1 /* 2 BlueZ - Bluetooth protocol stack for Linux 3 Copyright (C) 2000-2001 Qualcomm Incorporated 4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org> 5 Copyright (C) 2010 Google Inc. 6 Copyright (C) 2011 ProFUSION Embedded Systems 7 8 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> 9 10 This program is free software; you can redistribute it and/or modify 11 it under the terms of the GNU General Public License version 2 as 12 published by the Free Software Foundation; 13 14 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 15 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 17 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 18 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 19 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 20 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 21 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 22 23 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 24 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 25 SOFTWARE IS DISCLAIMED. 26 */ 27 28 /* Bluetooth L2CAP core. */ 29 30 #include <linux/module.h> 31 32 #include <linux/types.h> 33 #include <linux/capability.h> 34 #include <linux/errno.h> 35 #include <linux/kernel.h> 36 #include <linux/sched.h> 37 #include <linux/slab.h> 38 #include <linux/poll.h> 39 #include <linux/fcntl.h> 40 #include <linux/init.h> 41 #include <linux/interrupt.h> 42 #include <linux/socket.h> 43 #include <linux/skbuff.h> 44 #include <linux/list.h> 45 #include <linux/device.h> 46 #include <linux/debugfs.h> 47 #include <linux/seq_file.h> 48 #include <linux/uaccess.h> 49 #include <linux/crc16.h> 50 #include <net/sock.h> 51 52 #include <asm/system.h> 53 #include <asm/unaligned.h> 54 55 #include <net/bluetooth/bluetooth.h> 56 #include <net/bluetooth/hci_core.h> 57 #include <net/bluetooth/l2cap.h> 58 #include <net/bluetooth/smp.h> 59 60 bool disable_ertm; 61 62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN; 63 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, }; 64 65 static LIST_HEAD(chan_list); 66 static DEFINE_RWLOCK(chan_list_lock); 67 68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, 69 u8 code, u8 ident, u16 dlen, void *data); 70 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, 71 void *data); 72 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data); 73 static void l2cap_send_disconn_req(struct l2cap_conn *conn, 74 struct l2cap_chan *chan, int err); 75 76 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb); 77 78 /* ---- L2CAP channels ---- */ 79 80 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid) 81 { 82 struct l2cap_chan *c, *r = NULL; 83 84 rcu_read_lock(); 85 86 list_for_each_entry_rcu(c, &conn->chan_l, list) { 87 if (c->dcid == cid) { 88 r = c; 89 break; 90 } 91 } 92 93 rcu_read_unlock(); 94 return r; 95 } 96 97 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid) 98 { 99 struct l2cap_chan *c, *r = NULL; 100 101 rcu_read_lock(); 102 103 list_for_each_entry_rcu(c, &conn->chan_l, list) { 104 if (c->scid == cid) { 105 r = c; 106 break; 107 } 108 } 109 110 rcu_read_unlock(); 111 return r; 112 } 113 114 /* Find channel with given SCID. 115 * Returns locked socket */ 116 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid) 117 { 118 struct l2cap_chan *c; 119 120 c = __l2cap_get_chan_by_scid(conn, cid); 121 if (c) 122 lock_sock(c->sk); 123 return c; 124 } 125 126 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident) 127 { 128 struct l2cap_chan *c, *r = NULL; 129 130 rcu_read_lock(); 131 132 list_for_each_entry_rcu(c, &conn->chan_l, list) { 133 if (c->ident == ident) { 134 r = c; 135 break; 136 } 137 } 138 139 rcu_read_unlock(); 140 return r; 141 } 142 143 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident) 144 { 145 struct l2cap_chan *c; 146 147 c = __l2cap_get_chan_by_ident(conn, ident); 148 if (c) 149 lock_sock(c->sk); 150 return c; 151 } 152 153 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src) 154 { 155 struct l2cap_chan *c; 156 157 list_for_each_entry(c, &chan_list, global_l) { 158 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src)) 159 return c; 160 } 161 return NULL; 162 } 163 164 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm) 165 { 166 int err; 167 168 write_lock(&chan_list_lock); 169 170 if (psm && __l2cap_global_chan_by_addr(psm, src)) { 171 err = -EADDRINUSE; 172 goto done; 173 } 174 175 if (psm) { 176 chan->psm = psm; 177 chan->sport = psm; 178 err = 0; 179 } else { 180 u16 p; 181 182 err = -EINVAL; 183 for (p = 0x1001; p < 0x1100; p += 2) 184 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) { 185 chan->psm = cpu_to_le16(p); 186 chan->sport = cpu_to_le16(p); 187 err = 0; 188 break; 189 } 190 } 191 192 done: 193 write_unlock(&chan_list_lock); 194 return err; 195 } 196 197 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid) 198 { 199 write_lock(&chan_list_lock); 200 201 chan->scid = scid; 202 203 write_unlock(&chan_list_lock); 204 205 return 0; 206 } 207 208 static u16 l2cap_alloc_cid(struct l2cap_conn *conn) 209 { 210 u16 cid = L2CAP_CID_DYN_START; 211 212 for (; cid < L2CAP_CID_DYN_END; cid++) { 213 if (!__l2cap_get_chan_by_scid(conn, cid)) 214 return cid; 215 } 216 217 return 0; 218 } 219 220 static char *state_to_string(int state) 221 { 222 switch(state) { 223 case BT_CONNECTED: 224 return "BT_CONNECTED"; 225 case BT_OPEN: 226 return "BT_OPEN"; 227 case BT_BOUND: 228 return "BT_BOUND"; 229 case BT_LISTEN: 230 return "BT_LISTEN"; 231 case BT_CONNECT: 232 return "BT_CONNECT"; 233 case BT_CONNECT2: 234 return "BT_CONNECT2"; 235 case BT_CONFIG: 236 return "BT_CONFIG"; 237 case BT_DISCONN: 238 return "BT_DISCONN"; 239 case BT_CLOSED: 240 return "BT_CLOSED"; 241 } 242 243 return "invalid state"; 244 } 245 246 static void l2cap_state_change(struct l2cap_chan *chan, int state) 247 { 248 BT_DBG("%p %s -> %s", chan, state_to_string(chan->state), 249 state_to_string(state)); 250 251 chan->state = state; 252 chan->ops->state_change(chan->data, state); 253 } 254 255 static void l2cap_chan_timeout(struct work_struct *work) 256 { 257 struct l2cap_chan *chan = container_of(work, struct l2cap_chan, 258 chan_timer.work); 259 struct sock *sk = chan->sk; 260 int reason; 261 262 BT_DBG("chan %p state %d", chan, chan->state); 263 264 lock_sock(sk); 265 266 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG) 267 reason = ECONNREFUSED; 268 else if (chan->state == BT_CONNECT && 269 chan->sec_level != BT_SECURITY_SDP) 270 reason = ECONNREFUSED; 271 else 272 reason = ETIMEDOUT; 273 274 l2cap_chan_close(chan, reason); 275 276 release_sock(sk); 277 278 chan->ops->close(chan->data); 279 l2cap_chan_put(chan); 280 } 281 282 struct l2cap_chan *l2cap_chan_create(struct sock *sk) 283 { 284 struct l2cap_chan *chan; 285 286 chan = kzalloc(sizeof(*chan), GFP_ATOMIC); 287 if (!chan) 288 return NULL; 289 290 chan->sk = sk; 291 292 write_lock(&chan_list_lock); 293 list_add(&chan->global_l, &chan_list); 294 write_unlock(&chan_list_lock); 295 296 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout); 297 298 chan->state = BT_OPEN; 299 300 atomic_set(&chan->refcnt, 1); 301 302 BT_DBG("sk %p chan %p", sk, chan); 303 304 return chan; 305 } 306 307 void l2cap_chan_destroy(struct l2cap_chan *chan) 308 { 309 write_lock(&chan_list_lock); 310 list_del(&chan->global_l); 311 write_unlock(&chan_list_lock); 312 313 l2cap_chan_put(chan); 314 } 315 316 static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) 317 { 318 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, 319 chan->psm, chan->dcid); 320 321 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM; 322 323 chan->conn = conn; 324 325 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) { 326 if (conn->hcon->type == LE_LINK) { 327 /* LE connection */ 328 chan->omtu = L2CAP_LE_DEFAULT_MTU; 329 chan->scid = L2CAP_CID_LE_DATA; 330 chan->dcid = L2CAP_CID_LE_DATA; 331 } else { 332 /* Alloc CID for connection-oriented socket */ 333 chan->scid = l2cap_alloc_cid(conn); 334 chan->omtu = L2CAP_DEFAULT_MTU; 335 } 336 } else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) { 337 /* Connectionless socket */ 338 chan->scid = L2CAP_CID_CONN_LESS; 339 chan->dcid = L2CAP_CID_CONN_LESS; 340 chan->omtu = L2CAP_DEFAULT_MTU; 341 } else { 342 /* Raw socket can send/recv signalling messages only */ 343 chan->scid = L2CAP_CID_SIGNALING; 344 chan->dcid = L2CAP_CID_SIGNALING; 345 chan->omtu = L2CAP_DEFAULT_MTU; 346 } 347 348 chan->local_id = L2CAP_BESTEFFORT_ID; 349 chan->local_stype = L2CAP_SERV_BESTEFFORT; 350 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE; 351 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME; 352 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT; 353 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO; 354 355 l2cap_chan_hold(chan); 356 357 list_add_rcu(&chan->list, &conn->chan_l); 358 } 359 360 /* Delete channel. 361 * Must be called on the locked socket. */ 362 static void l2cap_chan_del(struct l2cap_chan *chan, int err) 363 { 364 struct sock *sk = chan->sk; 365 struct l2cap_conn *conn = chan->conn; 366 struct sock *parent = bt_sk(sk)->parent; 367 368 __clear_chan_timer(chan); 369 370 BT_DBG("chan %p, conn %p, err %d", chan, conn, err); 371 372 if (conn) { 373 /* Delete from channel list */ 374 list_del_rcu(&chan->list); 375 synchronize_rcu(); 376 377 l2cap_chan_put(chan); 378 379 chan->conn = NULL; 380 hci_conn_put(conn->hcon); 381 } 382 383 l2cap_state_change(chan, BT_CLOSED); 384 sock_set_flag(sk, SOCK_ZAPPED); 385 386 if (err) 387 sk->sk_err = err; 388 389 if (parent) { 390 bt_accept_unlink(sk); 391 parent->sk_data_ready(parent, 0); 392 } else 393 sk->sk_state_change(sk); 394 395 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) && 396 test_bit(CONF_INPUT_DONE, &chan->conf_state))) 397 return; 398 399 skb_queue_purge(&chan->tx_q); 400 401 if (chan->mode == L2CAP_MODE_ERTM) { 402 struct srej_list *l, *tmp; 403 404 __clear_retrans_timer(chan); 405 __clear_monitor_timer(chan); 406 __clear_ack_timer(chan); 407 408 skb_queue_purge(&chan->srej_q); 409 410 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) { 411 list_del(&l->list); 412 kfree(l); 413 } 414 } 415 } 416 417 static void l2cap_chan_cleanup_listen(struct sock *parent) 418 { 419 struct sock *sk; 420 421 BT_DBG("parent %p", parent); 422 423 /* Close not yet accepted channels */ 424 while ((sk = bt_accept_dequeue(parent, NULL))) { 425 struct l2cap_chan *chan = l2cap_pi(sk)->chan; 426 __clear_chan_timer(chan); 427 lock_sock(sk); 428 l2cap_chan_close(chan, ECONNRESET); 429 release_sock(sk); 430 chan->ops->close(chan->data); 431 } 432 } 433 434 void l2cap_chan_close(struct l2cap_chan *chan, int reason) 435 { 436 struct l2cap_conn *conn = chan->conn; 437 struct sock *sk = chan->sk; 438 439 BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket); 440 441 switch (chan->state) { 442 case BT_LISTEN: 443 l2cap_chan_cleanup_listen(sk); 444 445 l2cap_state_change(chan, BT_CLOSED); 446 sock_set_flag(sk, SOCK_ZAPPED); 447 break; 448 449 case BT_CONNECTED: 450 case BT_CONFIG: 451 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && 452 conn->hcon->type == ACL_LINK) { 453 __clear_chan_timer(chan); 454 __set_chan_timer(chan, sk->sk_sndtimeo); 455 l2cap_send_disconn_req(conn, chan, reason); 456 } else 457 l2cap_chan_del(chan, reason); 458 break; 459 460 case BT_CONNECT2: 461 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && 462 conn->hcon->type == ACL_LINK) { 463 struct l2cap_conn_rsp rsp; 464 __u16 result; 465 466 if (bt_sk(sk)->defer_setup) 467 result = L2CAP_CR_SEC_BLOCK; 468 else 469 result = L2CAP_CR_BAD_PSM; 470 l2cap_state_change(chan, BT_DISCONN); 471 472 rsp.scid = cpu_to_le16(chan->dcid); 473 rsp.dcid = cpu_to_le16(chan->scid); 474 rsp.result = cpu_to_le16(result); 475 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); 476 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, 477 sizeof(rsp), &rsp); 478 } 479 480 l2cap_chan_del(chan, reason); 481 break; 482 483 case BT_CONNECT: 484 case BT_DISCONN: 485 l2cap_chan_del(chan, reason); 486 break; 487 488 default: 489 sock_set_flag(sk, SOCK_ZAPPED); 490 break; 491 } 492 } 493 494 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan) 495 { 496 if (chan->chan_type == L2CAP_CHAN_RAW) { 497 switch (chan->sec_level) { 498 case BT_SECURITY_HIGH: 499 return HCI_AT_DEDICATED_BONDING_MITM; 500 case BT_SECURITY_MEDIUM: 501 return HCI_AT_DEDICATED_BONDING; 502 default: 503 return HCI_AT_NO_BONDING; 504 } 505 } else if (chan->psm == cpu_to_le16(0x0001)) { 506 if (chan->sec_level == BT_SECURITY_LOW) 507 chan->sec_level = BT_SECURITY_SDP; 508 509 if (chan->sec_level == BT_SECURITY_HIGH) 510 return HCI_AT_NO_BONDING_MITM; 511 else 512 return HCI_AT_NO_BONDING; 513 } else { 514 switch (chan->sec_level) { 515 case BT_SECURITY_HIGH: 516 return HCI_AT_GENERAL_BONDING_MITM; 517 case BT_SECURITY_MEDIUM: 518 return HCI_AT_GENERAL_BONDING; 519 default: 520 return HCI_AT_NO_BONDING; 521 } 522 } 523 } 524 525 /* Service level security */ 526 int l2cap_chan_check_security(struct l2cap_chan *chan) 527 { 528 struct l2cap_conn *conn = chan->conn; 529 __u8 auth_type; 530 531 auth_type = l2cap_get_auth_type(chan); 532 533 return hci_conn_security(conn->hcon, chan->sec_level, auth_type); 534 } 535 536 static u8 l2cap_get_ident(struct l2cap_conn *conn) 537 { 538 u8 id; 539 540 /* Get next available identificator. 541 * 1 - 128 are used by kernel. 542 * 129 - 199 are reserved. 543 * 200 - 254 are used by utilities like l2ping, etc. 544 */ 545 546 spin_lock(&conn->lock); 547 548 if (++conn->tx_ident > 128) 549 conn->tx_ident = 1; 550 551 id = conn->tx_ident; 552 553 spin_unlock(&conn->lock); 554 555 return id; 556 } 557 558 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data) 559 { 560 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data); 561 u8 flags; 562 563 BT_DBG("code 0x%2.2x", code); 564 565 if (!skb) 566 return; 567 568 if (lmp_no_flush_capable(conn->hcon->hdev)) 569 flags = ACL_START_NO_FLUSH; 570 else 571 flags = ACL_START; 572 573 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON; 574 skb->priority = HCI_PRIO_MAX; 575 576 hci_send_acl(conn->hchan, skb, flags); 577 } 578 579 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb) 580 { 581 struct hci_conn *hcon = chan->conn->hcon; 582 u16 flags; 583 584 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len, 585 skb->priority); 586 587 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) && 588 lmp_no_flush_capable(hcon->hdev)) 589 flags = ACL_START_NO_FLUSH; 590 else 591 flags = ACL_START; 592 593 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags); 594 hci_send_acl(chan->conn->hchan, skb, flags); 595 } 596 597 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control) 598 { 599 struct sk_buff *skb; 600 struct l2cap_hdr *lh; 601 struct l2cap_conn *conn = chan->conn; 602 int count, hlen; 603 604 if (chan->state != BT_CONNECTED) 605 return; 606 607 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) 608 hlen = L2CAP_EXT_HDR_SIZE; 609 else 610 hlen = L2CAP_ENH_HDR_SIZE; 611 612 if (chan->fcs == L2CAP_FCS_CRC16) 613 hlen += L2CAP_FCS_SIZE; 614 615 BT_DBG("chan %p, control 0x%8.8x", chan, control); 616 617 count = min_t(unsigned int, conn->mtu, hlen); 618 619 control |= __set_sframe(chan); 620 621 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) 622 control |= __set_ctrl_final(chan); 623 624 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state)) 625 control |= __set_ctrl_poll(chan); 626 627 skb = bt_skb_alloc(count, GFP_ATOMIC); 628 if (!skb) 629 return; 630 631 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); 632 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE); 633 lh->cid = cpu_to_le16(chan->dcid); 634 635 __put_control(chan, control, skb_put(skb, __ctrl_size(chan))); 636 637 if (chan->fcs == L2CAP_FCS_CRC16) { 638 u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE); 639 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE)); 640 } 641 642 skb->priority = HCI_PRIO_MAX; 643 l2cap_do_send(chan, skb); 644 } 645 646 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control) 647 { 648 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { 649 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR); 650 set_bit(CONN_RNR_SENT, &chan->conn_state); 651 } else 652 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR); 653 654 control |= __set_reqseq(chan, chan->buffer_seq); 655 656 l2cap_send_sframe(chan, control); 657 } 658 659 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan) 660 { 661 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state); 662 } 663 664 static void l2cap_do_start(struct l2cap_chan *chan) 665 { 666 struct l2cap_conn *conn = chan->conn; 667 668 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) { 669 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)) 670 return; 671 672 if (l2cap_chan_check_security(chan) && 673 __l2cap_no_conn_pending(chan)) { 674 struct l2cap_conn_req req; 675 req.scid = cpu_to_le16(chan->scid); 676 req.psm = chan->psm; 677 678 chan->ident = l2cap_get_ident(conn); 679 set_bit(CONF_CONNECT_PEND, &chan->conf_state); 680 681 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, 682 sizeof(req), &req); 683 } 684 } else { 685 struct l2cap_info_req req; 686 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK); 687 688 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT; 689 conn->info_ident = l2cap_get_ident(conn); 690 691 schedule_delayed_work(&conn->info_timer, 692 msecs_to_jiffies(L2CAP_INFO_TIMEOUT)); 693 694 l2cap_send_cmd(conn, conn->info_ident, 695 L2CAP_INFO_REQ, sizeof(req), &req); 696 } 697 } 698 699 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask) 700 { 701 u32 local_feat_mask = l2cap_feat_mask; 702 if (!disable_ertm) 703 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING; 704 705 switch (mode) { 706 case L2CAP_MODE_ERTM: 707 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask; 708 case L2CAP_MODE_STREAMING: 709 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask; 710 default: 711 return 0x00; 712 } 713 } 714 715 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err) 716 { 717 struct sock *sk; 718 struct l2cap_disconn_req req; 719 720 if (!conn) 721 return; 722 723 sk = chan->sk; 724 725 if (chan->mode == L2CAP_MODE_ERTM) { 726 __clear_retrans_timer(chan); 727 __clear_monitor_timer(chan); 728 __clear_ack_timer(chan); 729 } 730 731 req.dcid = cpu_to_le16(chan->dcid); 732 req.scid = cpu_to_le16(chan->scid); 733 l2cap_send_cmd(conn, l2cap_get_ident(conn), 734 L2CAP_DISCONN_REQ, sizeof(req), &req); 735 736 l2cap_state_change(chan, BT_DISCONN); 737 sk->sk_err = err; 738 } 739 740 /* ---- L2CAP connections ---- */ 741 static void l2cap_conn_start(struct l2cap_conn *conn) 742 { 743 struct l2cap_chan *chan; 744 745 BT_DBG("conn %p", conn); 746 747 rcu_read_lock(); 748 749 list_for_each_entry_rcu(chan, &conn->chan_l, list) { 750 struct sock *sk = chan->sk; 751 752 bh_lock_sock(sk); 753 754 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) { 755 bh_unlock_sock(sk); 756 continue; 757 } 758 759 if (chan->state == BT_CONNECT) { 760 struct l2cap_conn_req req; 761 762 if (!l2cap_chan_check_security(chan) || 763 !__l2cap_no_conn_pending(chan)) { 764 bh_unlock_sock(sk); 765 continue; 766 } 767 768 if (!l2cap_mode_supported(chan->mode, conn->feat_mask) 769 && test_bit(CONF_STATE2_DEVICE, 770 &chan->conf_state)) { 771 /* l2cap_chan_close() calls list_del(chan) 772 * so release the lock */ 773 l2cap_chan_close(chan, ECONNRESET); 774 bh_unlock_sock(sk); 775 continue; 776 } 777 778 req.scid = cpu_to_le16(chan->scid); 779 req.psm = chan->psm; 780 781 chan->ident = l2cap_get_ident(conn); 782 set_bit(CONF_CONNECT_PEND, &chan->conf_state); 783 784 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, 785 sizeof(req), &req); 786 787 } else if (chan->state == BT_CONNECT2) { 788 struct l2cap_conn_rsp rsp; 789 char buf[128]; 790 rsp.scid = cpu_to_le16(chan->dcid); 791 rsp.dcid = cpu_to_le16(chan->scid); 792 793 if (l2cap_chan_check_security(chan)) { 794 if (bt_sk(sk)->defer_setup) { 795 struct sock *parent = bt_sk(sk)->parent; 796 rsp.result = cpu_to_le16(L2CAP_CR_PEND); 797 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND); 798 if (parent) 799 parent->sk_data_ready(parent, 0); 800 801 } else { 802 l2cap_state_change(chan, BT_CONFIG); 803 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); 804 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); 805 } 806 } else { 807 rsp.result = cpu_to_le16(L2CAP_CR_PEND); 808 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND); 809 } 810 811 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, 812 sizeof(rsp), &rsp); 813 814 if (test_bit(CONF_REQ_SENT, &chan->conf_state) || 815 rsp.result != L2CAP_CR_SUCCESS) { 816 bh_unlock_sock(sk); 817 continue; 818 } 819 820 set_bit(CONF_REQ_SENT, &chan->conf_state); 821 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 822 l2cap_build_conf_req(chan, buf), buf); 823 chan->num_conf_req++; 824 } 825 826 bh_unlock_sock(sk); 827 } 828 829 rcu_read_unlock(); 830 } 831 832 /* Find socket with cid and source bdaddr. 833 * Returns closest match, locked. 834 */ 835 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src) 836 { 837 struct l2cap_chan *c, *c1 = NULL; 838 839 read_lock(&chan_list_lock); 840 841 list_for_each_entry(c, &chan_list, global_l) { 842 struct sock *sk = c->sk; 843 844 if (state && c->state != state) 845 continue; 846 847 if (c->scid == cid) { 848 /* Exact match. */ 849 if (!bacmp(&bt_sk(sk)->src, src)) { 850 read_unlock(&chan_list_lock); 851 return c; 852 } 853 854 /* Closest match */ 855 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) 856 c1 = c; 857 } 858 } 859 860 read_unlock(&chan_list_lock); 861 862 return c1; 863 } 864 865 static void l2cap_le_conn_ready(struct l2cap_conn *conn) 866 { 867 struct sock *parent, *sk; 868 struct l2cap_chan *chan, *pchan; 869 870 BT_DBG(""); 871 872 /* Check if we have socket listening on cid */ 873 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA, 874 conn->src); 875 if (!pchan) 876 return; 877 878 parent = pchan->sk; 879 880 lock_sock(parent); 881 882 /* Check for backlog size */ 883 if (sk_acceptq_is_full(parent)) { 884 BT_DBG("backlog full %d", parent->sk_ack_backlog); 885 goto clean; 886 } 887 888 chan = pchan->ops->new_connection(pchan->data); 889 if (!chan) 890 goto clean; 891 892 sk = chan->sk; 893 894 hci_conn_hold(conn->hcon); 895 896 bacpy(&bt_sk(sk)->src, conn->src); 897 bacpy(&bt_sk(sk)->dst, conn->dst); 898 899 bt_accept_enqueue(parent, sk); 900 901 l2cap_chan_add(conn, chan); 902 903 __set_chan_timer(chan, sk->sk_sndtimeo); 904 905 l2cap_state_change(chan, BT_CONNECTED); 906 parent->sk_data_ready(parent, 0); 907 908 clean: 909 release_sock(parent); 910 } 911 912 static void l2cap_chan_ready(struct sock *sk) 913 { 914 struct l2cap_chan *chan = l2cap_pi(sk)->chan; 915 struct sock *parent = bt_sk(sk)->parent; 916 917 BT_DBG("sk %p, parent %p", sk, parent); 918 919 chan->conf_state = 0; 920 __clear_chan_timer(chan); 921 922 l2cap_state_change(chan, BT_CONNECTED); 923 sk->sk_state_change(sk); 924 925 if (parent) 926 parent->sk_data_ready(parent, 0); 927 } 928 929 static void l2cap_conn_ready(struct l2cap_conn *conn) 930 { 931 struct l2cap_chan *chan; 932 933 BT_DBG("conn %p", conn); 934 935 if (!conn->hcon->out && conn->hcon->type == LE_LINK) 936 l2cap_le_conn_ready(conn); 937 938 if (conn->hcon->out && conn->hcon->type == LE_LINK) 939 smp_conn_security(conn, conn->hcon->pending_sec_level); 940 941 rcu_read_lock(); 942 943 list_for_each_entry_rcu(chan, &conn->chan_l, list) { 944 struct sock *sk = chan->sk; 945 946 bh_lock_sock(sk); 947 948 if (conn->hcon->type == LE_LINK) { 949 if (smp_conn_security(conn, chan->sec_level)) 950 l2cap_chan_ready(sk); 951 952 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) { 953 __clear_chan_timer(chan); 954 l2cap_state_change(chan, BT_CONNECTED); 955 sk->sk_state_change(sk); 956 957 } else if (chan->state == BT_CONNECT) 958 l2cap_do_start(chan); 959 960 bh_unlock_sock(sk); 961 } 962 963 rcu_read_unlock(); 964 } 965 966 /* Notify sockets that we cannot guaranty reliability anymore */ 967 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err) 968 { 969 struct l2cap_chan *chan; 970 971 BT_DBG("conn %p", conn); 972 973 rcu_read_lock(); 974 975 list_for_each_entry_rcu(chan, &conn->chan_l, list) { 976 struct sock *sk = chan->sk; 977 978 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags)) 979 sk->sk_err = err; 980 } 981 982 rcu_read_unlock(); 983 } 984 985 static void l2cap_info_timeout(struct work_struct *work) 986 { 987 struct l2cap_conn *conn = container_of(work, struct l2cap_conn, 988 info_timer.work); 989 990 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; 991 conn->info_ident = 0; 992 993 l2cap_conn_start(conn); 994 } 995 996 static void l2cap_conn_del(struct hci_conn *hcon, int err) 997 { 998 struct l2cap_conn *conn = hcon->l2cap_data; 999 struct l2cap_chan *chan, *l; 1000 struct sock *sk; 1001 1002 if (!conn) 1003 return; 1004 1005 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err); 1006 1007 kfree_skb(conn->rx_skb); 1008 1009 /* Kill channels */ 1010 list_for_each_entry_safe(chan, l, &conn->chan_l, list) { 1011 sk = chan->sk; 1012 lock_sock(sk); 1013 l2cap_chan_del(chan, err); 1014 release_sock(sk); 1015 chan->ops->close(chan->data); 1016 } 1017 1018 hci_chan_del(conn->hchan); 1019 1020 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) 1021 cancel_delayed_work_sync(&conn->info_timer); 1022 1023 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) { 1024 cancel_delayed_work_sync(&conn->security_timer); 1025 smp_chan_destroy(conn); 1026 } 1027 1028 hcon->l2cap_data = NULL; 1029 kfree(conn); 1030 } 1031 1032 static void security_timeout(struct work_struct *work) 1033 { 1034 struct l2cap_conn *conn = container_of(work, struct l2cap_conn, 1035 security_timer.work); 1036 1037 l2cap_conn_del(conn->hcon, ETIMEDOUT); 1038 } 1039 1040 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status) 1041 { 1042 struct l2cap_conn *conn = hcon->l2cap_data; 1043 struct hci_chan *hchan; 1044 1045 if (conn || status) 1046 return conn; 1047 1048 hchan = hci_chan_create(hcon); 1049 if (!hchan) 1050 return NULL; 1051 1052 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC); 1053 if (!conn) { 1054 hci_chan_del(hchan); 1055 return NULL; 1056 } 1057 1058 hcon->l2cap_data = conn; 1059 conn->hcon = hcon; 1060 conn->hchan = hchan; 1061 1062 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan); 1063 1064 if (hcon->hdev->le_mtu && hcon->type == LE_LINK) 1065 conn->mtu = hcon->hdev->le_mtu; 1066 else 1067 conn->mtu = hcon->hdev->acl_mtu; 1068 1069 conn->src = &hcon->hdev->bdaddr; 1070 conn->dst = &hcon->dst; 1071 1072 conn->feat_mask = 0; 1073 1074 spin_lock_init(&conn->lock); 1075 1076 INIT_LIST_HEAD(&conn->chan_l); 1077 1078 if (hcon->type == LE_LINK) 1079 INIT_DELAYED_WORK(&conn->security_timer, security_timeout); 1080 else 1081 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout); 1082 1083 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM; 1084 1085 return conn; 1086 } 1087 1088 /* ---- Socket interface ---- */ 1089 1090 /* Find socket with psm and source bdaddr. 1091 * Returns closest match. 1092 */ 1093 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src) 1094 { 1095 struct l2cap_chan *c, *c1 = NULL; 1096 1097 read_lock(&chan_list_lock); 1098 1099 list_for_each_entry(c, &chan_list, global_l) { 1100 struct sock *sk = c->sk; 1101 1102 if (state && c->state != state) 1103 continue; 1104 1105 if (c->psm == psm) { 1106 /* Exact match. */ 1107 if (!bacmp(&bt_sk(sk)->src, src)) { 1108 read_unlock(&chan_list_lock); 1109 return c; 1110 } 1111 1112 /* Closest match */ 1113 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) 1114 c1 = c; 1115 } 1116 } 1117 1118 read_unlock(&chan_list_lock); 1119 1120 return c1; 1121 } 1122 1123 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *dst) 1124 { 1125 struct sock *sk = chan->sk; 1126 bdaddr_t *src = &bt_sk(sk)->src; 1127 struct l2cap_conn *conn; 1128 struct hci_conn *hcon; 1129 struct hci_dev *hdev; 1130 __u8 auth_type; 1131 int err; 1132 1133 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), 1134 chan->psm); 1135 1136 hdev = hci_get_route(dst, src); 1137 if (!hdev) 1138 return -EHOSTUNREACH; 1139 1140 hci_dev_lock(hdev); 1141 1142 lock_sock(sk); 1143 1144 /* PSM must be odd and lsb of upper byte must be 0 */ 1145 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid && 1146 chan->chan_type != L2CAP_CHAN_RAW) { 1147 err = -EINVAL; 1148 goto done; 1149 } 1150 1151 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) { 1152 err = -EINVAL; 1153 goto done; 1154 } 1155 1156 switch (chan->mode) { 1157 case L2CAP_MODE_BASIC: 1158 break; 1159 case L2CAP_MODE_ERTM: 1160 case L2CAP_MODE_STREAMING: 1161 if (!disable_ertm) 1162 break; 1163 /* fall through */ 1164 default: 1165 err = -ENOTSUPP; 1166 goto done; 1167 } 1168 1169 switch (sk->sk_state) { 1170 case BT_CONNECT: 1171 case BT_CONNECT2: 1172 case BT_CONFIG: 1173 /* Already connecting */ 1174 err = 0; 1175 goto done; 1176 1177 case BT_CONNECTED: 1178 /* Already connected */ 1179 err = -EISCONN; 1180 goto done; 1181 1182 case BT_OPEN: 1183 case BT_BOUND: 1184 /* Can connect */ 1185 break; 1186 1187 default: 1188 err = -EBADFD; 1189 goto done; 1190 } 1191 1192 /* Set destination address and psm */ 1193 bacpy(&bt_sk(sk)->dst, dst); 1194 chan->psm = psm; 1195 chan->dcid = cid; 1196 1197 auth_type = l2cap_get_auth_type(chan); 1198 1199 if (chan->dcid == L2CAP_CID_LE_DATA) 1200 hcon = hci_connect(hdev, LE_LINK, dst, 1201 chan->sec_level, auth_type); 1202 else 1203 hcon = hci_connect(hdev, ACL_LINK, dst, 1204 chan->sec_level, auth_type); 1205 1206 if (IS_ERR(hcon)) { 1207 err = PTR_ERR(hcon); 1208 goto done; 1209 } 1210 1211 conn = l2cap_conn_add(hcon, 0); 1212 if (!conn) { 1213 hci_conn_put(hcon); 1214 err = -ENOMEM; 1215 goto done; 1216 } 1217 1218 /* Update source addr of the socket */ 1219 bacpy(src, conn->src); 1220 1221 l2cap_chan_add(conn, chan); 1222 1223 l2cap_state_change(chan, BT_CONNECT); 1224 __set_chan_timer(chan, sk->sk_sndtimeo); 1225 1226 if (hcon->state == BT_CONNECTED) { 1227 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) { 1228 __clear_chan_timer(chan); 1229 if (l2cap_chan_check_security(chan)) 1230 l2cap_state_change(chan, BT_CONNECTED); 1231 } else 1232 l2cap_do_start(chan); 1233 } 1234 1235 err = 0; 1236 1237 done: 1238 hci_dev_unlock(hdev); 1239 hci_dev_put(hdev); 1240 return err; 1241 } 1242 1243 int __l2cap_wait_ack(struct sock *sk) 1244 { 1245 struct l2cap_chan *chan = l2cap_pi(sk)->chan; 1246 DECLARE_WAITQUEUE(wait, current); 1247 int err = 0; 1248 int timeo = HZ/5; 1249 1250 add_wait_queue(sk_sleep(sk), &wait); 1251 set_current_state(TASK_INTERRUPTIBLE); 1252 while (chan->unacked_frames > 0 && chan->conn) { 1253 if (!timeo) 1254 timeo = HZ/5; 1255 1256 if (signal_pending(current)) { 1257 err = sock_intr_errno(timeo); 1258 break; 1259 } 1260 1261 release_sock(sk); 1262 timeo = schedule_timeout(timeo); 1263 lock_sock(sk); 1264 set_current_state(TASK_INTERRUPTIBLE); 1265 1266 err = sock_error(sk); 1267 if (err) 1268 break; 1269 } 1270 set_current_state(TASK_RUNNING); 1271 remove_wait_queue(sk_sleep(sk), &wait); 1272 return err; 1273 } 1274 1275 static void l2cap_monitor_timeout(struct work_struct *work) 1276 { 1277 struct l2cap_chan *chan = container_of(work, struct l2cap_chan, 1278 monitor_timer.work); 1279 struct sock *sk = chan->sk; 1280 1281 BT_DBG("chan %p", chan); 1282 1283 lock_sock(sk); 1284 if (chan->retry_count >= chan->remote_max_tx) { 1285 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED); 1286 release_sock(sk); 1287 return; 1288 } 1289 1290 chan->retry_count++; 1291 __set_monitor_timer(chan); 1292 1293 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL); 1294 release_sock(sk); 1295 } 1296 1297 static void l2cap_retrans_timeout(struct work_struct *work) 1298 { 1299 struct l2cap_chan *chan = container_of(work, struct l2cap_chan, 1300 retrans_timer.work); 1301 struct sock *sk = chan->sk; 1302 1303 BT_DBG("chan %p", chan); 1304 1305 lock_sock(sk); 1306 chan->retry_count = 1; 1307 __set_monitor_timer(chan); 1308 1309 set_bit(CONN_WAIT_F, &chan->conn_state); 1310 1311 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL); 1312 release_sock(sk); 1313 } 1314 1315 static void l2cap_drop_acked_frames(struct l2cap_chan *chan) 1316 { 1317 struct sk_buff *skb; 1318 1319 while ((skb = skb_peek(&chan->tx_q)) && 1320 chan->unacked_frames) { 1321 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq) 1322 break; 1323 1324 skb = skb_dequeue(&chan->tx_q); 1325 kfree_skb(skb); 1326 1327 chan->unacked_frames--; 1328 } 1329 1330 if (!chan->unacked_frames) 1331 __clear_retrans_timer(chan); 1332 } 1333 1334 static void l2cap_streaming_send(struct l2cap_chan *chan) 1335 { 1336 struct sk_buff *skb; 1337 u32 control; 1338 u16 fcs; 1339 1340 while ((skb = skb_dequeue(&chan->tx_q))) { 1341 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE); 1342 control |= __set_txseq(chan, chan->next_tx_seq); 1343 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE); 1344 1345 if (chan->fcs == L2CAP_FCS_CRC16) { 1346 fcs = crc16(0, (u8 *)skb->data, 1347 skb->len - L2CAP_FCS_SIZE); 1348 put_unaligned_le16(fcs, 1349 skb->data + skb->len - L2CAP_FCS_SIZE); 1350 } 1351 1352 l2cap_do_send(chan, skb); 1353 1354 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq); 1355 } 1356 } 1357 1358 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq) 1359 { 1360 struct sk_buff *skb, *tx_skb; 1361 u16 fcs; 1362 u32 control; 1363 1364 skb = skb_peek(&chan->tx_q); 1365 if (!skb) 1366 return; 1367 1368 while (bt_cb(skb)->tx_seq != tx_seq) { 1369 if (skb_queue_is_last(&chan->tx_q, skb)) 1370 return; 1371 1372 skb = skb_queue_next(&chan->tx_q, skb); 1373 } 1374 1375 if (chan->remote_max_tx && 1376 bt_cb(skb)->retries == chan->remote_max_tx) { 1377 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED); 1378 return; 1379 } 1380 1381 tx_skb = skb_clone(skb, GFP_ATOMIC); 1382 bt_cb(skb)->retries++; 1383 1384 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE); 1385 control &= __get_sar_mask(chan); 1386 1387 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) 1388 control |= __set_ctrl_final(chan); 1389 1390 control |= __set_reqseq(chan, chan->buffer_seq); 1391 control |= __set_txseq(chan, tx_seq); 1392 1393 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE); 1394 1395 if (chan->fcs == L2CAP_FCS_CRC16) { 1396 fcs = crc16(0, (u8 *)tx_skb->data, 1397 tx_skb->len - L2CAP_FCS_SIZE); 1398 put_unaligned_le16(fcs, 1399 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE); 1400 } 1401 1402 l2cap_do_send(chan, tx_skb); 1403 } 1404 1405 static int l2cap_ertm_send(struct l2cap_chan *chan) 1406 { 1407 struct sk_buff *skb, *tx_skb; 1408 u16 fcs; 1409 u32 control; 1410 int nsent = 0; 1411 1412 if (chan->state != BT_CONNECTED) 1413 return -ENOTCONN; 1414 1415 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) { 1416 1417 if (chan->remote_max_tx && 1418 bt_cb(skb)->retries == chan->remote_max_tx) { 1419 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED); 1420 break; 1421 } 1422 1423 tx_skb = skb_clone(skb, GFP_ATOMIC); 1424 1425 bt_cb(skb)->retries++; 1426 1427 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE); 1428 control &= __get_sar_mask(chan); 1429 1430 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) 1431 control |= __set_ctrl_final(chan); 1432 1433 control |= __set_reqseq(chan, chan->buffer_seq); 1434 control |= __set_txseq(chan, chan->next_tx_seq); 1435 1436 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE); 1437 1438 if (chan->fcs == L2CAP_FCS_CRC16) { 1439 fcs = crc16(0, (u8 *)skb->data, 1440 tx_skb->len - L2CAP_FCS_SIZE); 1441 put_unaligned_le16(fcs, skb->data + 1442 tx_skb->len - L2CAP_FCS_SIZE); 1443 } 1444 1445 l2cap_do_send(chan, tx_skb); 1446 1447 __set_retrans_timer(chan); 1448 1449 bt_cb(skb)->tx_seq = chan->next_tx_seq; 1450 1451 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq); 1452 1453 if (bt_cb(skb)->retries == 1) 1454 chan->unacked_frames++; 1455 1456 chan->frames_sent++; 1457 1458 if (skb_queue_is_last(&chan->tx_q, skb)) 1459 chan->tx_send_head = NULL; 1460 else 1461 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb); 1462 1463 nsent++; 1464 } 1465 1466 return nsent; 1467 } 1468 1469 static int l2cap_retransmit_frames(struct l2cap_chan *chan) 1470 { 1471 int ret; 1472 1473 if (!skb_queue_empty(&chan->tx_q)) 1474 chan->tx_send_head = chan->tx_q.next; 1475 1476 chan->next_tx_seq = chan->expected_ack_seq; 1477 ret = l2cap_ertm_send(chan); 1478 return ret; 1479 } 1480 1481 static void l2cap_send_ack(struct l2cap_chan *chan) 1482 { 1483 u32 control = 0; 1484 1485 control |= __set_reqseq(chan, chan->buffer_seq); 1486 1487 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { 1488 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR); 1489 set_bit(CONN_RNR_SENT, &chan->conn_state); 1490 l2cap_send_sframe(chan, control); 1491 return; 1492 } 1493 1494 if (l2cap_ertm_send(chan) > 0) 1495 return; 1496 1497 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR); 1498 l2cap_send_sframe(chan, control); 1499 } 1500 1501 static void l2cap_send_srejtail(struct l2cap_chan *chan) 1502 { 1503 struct srej_list *tail; 1504 u32 control; 1505 1506 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ); 1507 control |= __set_ctrl_final(chan); 1508 1509 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list); 1510 control |= __set_reqseq(chan, tail->tx_seq); 1511 1512 l2cap_send_sframe(chan, control); 1513 } 1514 1515 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb) 1516 { 1517 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn; 1518 struct sk_buff **frag; 1519 int err, sent = 0; 1520 1521 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) 1522 return -EFAULT; 1523 1524 sent += count; 1525 len -= count; 1526 1527 /* Continuation fragments (no L2CAP header) */ 1528 frag = &skb_shinfo(skb)->frag_list; 1529 while (len) { 1530 count = min_t(unsigned int, conn->mtu, len); 1531 1532 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err); 1533 if (!*frag) 1534 return err; 1535 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) 1536 return -EFAULT; 1537 1538 (*frag)->priority = skb->priority; 1539 1540 sent += count; 1541 len -= count; 1542 1543 frag = &(*frag)->next; 1544 } 1545 1546 return sent; 1547 } 1548 1549 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, 1550 struct msghdr *msg, size_t len, 1551 u32 priority) 1552 { 1553 struct sock *sk = chan->sk; 1554 struct l2cap_conn *conn = chan->conn; 1555 struct sk_buff *skb; 1556 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE; 1557 struct l2cap_hdr *lh; 1558 1559 BT_DBG("sk %p len %d priority %u", sk, (int)len, priority); 1560 1561 count = min_t(unsigned int, (conn->mtu - hlen), len); 1562 skb = bt_skb_send_alloc(sk, count + hlen, 1563 msg->msg_flags & MSG_DONTWAIT, &err); 1564 if (!skb) 1565 return ERR_PTR(err); 1566 1567 skb->priority = priority; 1568 1569 /* Create L2CAP header */ 1570 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); 1571 lh->cid = cpu_to_le16(chan->dcid); 1572 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); 1573 put_unaligned_le16(chan->psm, skb_put(skb, 2)); 1574 1575 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb); 1576 if (unlikely(err < 0)) { 1577 kfree_skb(skb); 1578 return ERR_PTR(err); 1579 } 1580 return skb; 1581 } 1582 1583 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, 1584 struct msghdr *msg, size_t len, 1585 u32 priority) 1586 { 1587 struct sock *sk = chan->sk; 1588 struct l2cap_conn *conn = chan->conn; 1589 struct sk_buff *skb; 1590 int err, count, hlen = L2CAP_HDR_SIZE; 1591 struct l2cap_hdr *lh; 1592 1593 BT_DBG("sk %p len %d", sk, (int)len); 1594 1595 count = min_t(unsigned int, (conn->mtu - hlen), len); 1596 skb = bt_skb_send_alloc(sk, count + hlen, 1597 msg->msg_flags & MSG_DONTWAIT, &err); 1598 if (!skb) 1599 return ERR_PTR(err); 1600 1601 skb->priority = priority; 1602 1603 /* Create L2CAP header */ 1604 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); 1605 lh->cid = cpu_to_le16(chan->dcid); 1606 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); 1607 1608 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb); 1609 if (unlikely(err < 0)) { 1610 kfree_skb(skb); 1611 return ERR_PTR(err); 1612 } 1613 return skb; 1614 } 1615 1616 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, 1617 struct msghdr *msg, size_t len, 1618 u32 control, u16 sdulen) 1619 { 1620 struct sock *sk = chan->sk; 1621 struct l2cap_conn *conn = chan->conn; 1622 struct sk_buff *skb; 1623 int err, count, hlen; 1624 struct l2cap_hdr *lh; 1625 1626 BT_DBG("sk %p len %d", sk, (int)len); 1627 1628 if (!conn) 1629 return ERR_PTR(-ENOTCONN); 1630 1631 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) 1632 hlen = L2CAP_EXT_HDR_SIZE; 1633 else 1634 hlen = L2CAP_ENH_HDR_SIZE; 1635 1636 if (sdulen) 1637 hlen += L2CAP_SDULEN_SIZE; 1638 1639 if (chan->fcs == L2CAP_FCS_CRC16) 1640 hlen += L2CAP_FCS_SIZE; 1641 1642 count = min_t(unsigned int, (conn->mtu - hlen), len); 1643 skb = bt_skb_send_alloc(sk, count + hlen, 1644 msg->msg_flags & MSG_DONTWAIT, &err); 1645 if (!skb) 1646 return ERR_PTR(err); 1647 1648 /* Create L2CAP header */ 1649 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); 1650 lh->cid = cpu_to_le16(chan->dcid); 1651 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); 1652 1653 __put_control(chan, control, skb_put(skb, __ctrl_size(chan))); 1654 1655 if (sdulen) 1656 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE)); 1657 1658 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb); 1659 if (unlikely(err < 0)) { 1660 kfree_skb(skb); 1661 return ERR_PTR(err); 1662 } 1663 1664 if (chan->fcs == L2CAP_FCS_CRC16) 1665 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE)); 1666 1667 bt_cb(skb)->retries = 0; 1668 return skb; 1669 } 1670 1671 static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len) 1672 { 1673 struct sk_buff *skb; 1674 struct sk_buff_head sar_queue; 1675 u32 control; 1676 size_t size = 0; 1677 1678 skb_queue_head_init(&sar_queue); 1679 control = __set_ctrl_sar(chan, L2CAP_SAR_START); 1680 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len); 1681 if (IS_ERR(skb)) 1682 return PTR_ERR(skb); 1683 1684 __skb_queue_tail(&sar_queue, skb); 1685 len -= chan->remote_mps; 1686 size += chan->remote_mps; 1687 1688 while (len > 0) { 1689 size_t buflen; 1690 1691 if (len > chan->remote_mps) { 1692 control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE); 1693 buflen = chan->remote_mps; 1694 } else { 1695 control = __set_ctrl_sar(chan, L2CAP_SAR_END); 1696 buflen = len; 1697 } 1698 1699 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0); 1700 if (IS_ERR(skb)) { 1701 skb_queue_purge(&sar_queue); 1702 return PTR_ERR(skb); 1703 } 1704 1705 __skb_queue_tail(&sar_queue, skb); 1706 len -= buflen; 1707 size += buflen; 1708 } 1709 skb_queue_splice_tail(&sar_queue, &chan->tx_q); 1710 if (chan->tx_send_head == NULL) 1711 chan->tx_send_head = sar_queue.next; 1712 1713 return size; 1714 } 1715 1716 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len, 1717 u32 priority) 1718 { 1719 struct sk_buff *skb; 1720 u32 control; 1721 int err; 1722 1723 /* Connectionless channel */ 1724 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) { 1725 skb = l2cap_create_connless_pdu(chan, msg, len, priority); 1726 if (IS_ERR(skb)) 1727 return PTR_ERR(skb); 1728 1729 l2cap_do_send(chan, skb); 1730 return len; 1731 } 1732 1733 switch (chan->mode) { 1734 case L2CAP_MODE_BASIC: 1735 /* Check outgoing MTU */ 1736 if (len > chan->omtu) 1737 return -EMSGSIZE; 1738 1739 /* Create a basic PDU */ 1740 skb = l2cap_create_basic_pdu(chan, msg, len, priority); 1741 if (IS_ERR(skb)) 1742 return PTR_ERR(skb); 1743 1744 l2cap_do_send(chan, skb); 1745 err = len; 1746 break; 1747 1748 case L2CAP_MODE_ERTM: 1749 case L2CAP_MODE_STREAMING: 1750 /* Entire SDU fits into one PDU */ 1751 if (len <= chan->remote_mps) { 1752 control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED); 1753 skb = l2cap_create_iframe_pdu(chan, msg, len, control, 1754 0); 1755 if (IS_ERR(skb)) 1756 return PTR_ERR(skb); 1757 1758 __skb_queue_tail(&chan->tx_q, skb); 1759 1760 if (chan->tx_send_head == NULL) 1761 chan->tx_send_head = skb; 1762 1763 } else { 1764 /* Segment SDU into multiples PDUs */ 1765 err = l2cap_sar_segment_sdu(chan, msg, len); 1766 if (err < 0) 1767 return err; 1768 } 1769 1770 if (chan->mode == L2CAP_MODE_STREAMING) { 1771 l2cap_streaming_send(chan); 1772 err = len; 1773 break; 1774 } 1775 1776 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) && 1777 test_bit(CONN_WAIT_F, &chan->conn_state)) { 1778 err = len; 1779 break; 1780 } 1781 1782 err = l2cap_ertm_send(chan); 1783 if (err >= 0) 1784 err = len; 1785 1786 break; 1787 1788 default: 1789 BT_DBG("bad state %1.1x", chan->mode); 1790 err = -EBADFD; 1791 } 1792 1793 return err; 1794 } 1795 1796 /* Copy frame to all raw sockets on that connection */ 1797 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb) 1798 { 1799 struct sk_buff *nskb; 1800 struct l2cap_chan *chan; 1801 1802 BT_DBG("conn %p", conn); 1803 1804 rcu_read_lock(); 1805 1806 list_for_each_entry_rcu(chan, &conn->chan_l, list) { 1807 struct sock *sk = chan->sk; 1808 if (chan->chan_type != L2CAP_CHAN_RAW) 1809 continue; 1810 1811 /* Don't send frame to the socket it came from */ 1812 if (skb->sk == sk) 1813 continue; 1814 nskb = skb_clone(skb, GFP_ATOMIC); 1815 if (!nskb) 1816 continue; 1817 1818 if (chan->ops->recv(chan->data, nskb)) 1819 kfree_skb(nskb); 1820 } 1821 1822 rcu_read_unlock(); 1823 } 1824 1825 /* ---- L2CAP signalling commands ---- */ 1826 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, 1827 u8 code, u8 ident, u16 dlen, void *data) 1828 { 1829 struct sk_buff *skb, **frag; 1830 struct l2cap_cmd_hdr *cmd; 1831 struct l2cap_hdr *lh; 1832 int len, count; 1833 1834 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d", 1835 conn, code, ident, dlen); 1836 1837 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen; 1838 count = min_t(unsigned int, conn->mtu, len); 1839 1840 skb = bt_skb_alloc(count, GFP_ATOMIC); 1841 if (!skb) 1842 return NULL; 1843 1844 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); 1845 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen); 1846 1847 if (conn->hcon->type == LE_LINK) 1848 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING); 1849 else 1850 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING); 1851 1852 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE); 1853 cmd->code = code; 1854 cmd->ident = ident; 1855 cmd->len = cpu_to_le16(dlen); 1856 1857 if (dlen) { 1858 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE; 1859 memcpy(skb_put(skb, count), data, count); 1860 data += count; 1861 } 1862 1863 len -= skb->len; 1864 1865 /* Continuation fragments (no L2CAP header) */ 1866 frag = &skb_shinfo(skb)->frag_list; 1867 while (len) { 1868 count = min_t(unsigned int, conn->mtu, len); 1869 1870 *frag = bt_skb_alloc(count, GFP_ATOMIC); 1871 if (!*frag) 1872 goto fail; 1873 1874 memcpy(skb_put(*frag, count), data, count); 1875 1876 len -= count; 1877 data += count; 1878 1879 frag = &(*frag)->next; 1880 } 1881 1882 return skb; 1883 1884 fail: 1885 kfree_skb(skb); 1886 return NULL; 1887 } 1888 1889 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val) 1890 { 1891 struct l2cap_conf_opt *opt = *ptr; 1892 int len; 1893 1894 len = L2CAP_CONF_OPT_SIZE + opt->len; 1895 *ptr += len; 1896 1897 *type = opt->type; 1898 *olen = opt->len; 1899 1900 switch (opt->len) { 1901 case 1: 1902 *val = *((u8 *) opt->val); 1903 break; 1904 1905 case 2: 1906 *val = get_unaligned_le16(opt->val); 1907 break; 1908 1909 case 4: 1910 *val = get_unaligned_le32(opt->val); 1911 break; 1912 1913 default: 1914 *val = (unsigned long) opt->val; 1915 break; 1916 } 1917 1918 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val); 1919 return len; 1920 } 1921 1922 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val) 1923 { 1924 struct l2cap_conf_opt *opt = *ptr; 1925 1926 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val); 1927 1928 opt->type = type; 1929 opt->len = len; 1930 1931 switch (len) { 1932 case 1: 1933 *((u8 *) opt->val) = val; 1934 break; 1935 1936 case 2: 1937 put_unaligned_le16(val, opt->val); 1938 break; 1939 1940 case 4: 1941 put_unaligned_le32(val, opt->val); 1942 break; 1943 1944 default: 1945 memcpy(opt->val, (void *) val, len); 1946 break; 1947 } 1948 1949 *ptr += L2CAP_CONF_OPT_SIZE + len; 1950 } 1951 1952 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan) 1953 { 1954 struct l2cap_conf_efs efs; 1955 1956 switch (chan->mode) { 1957 case L2CAP_MODE_ERTM: 1958 efs.id = chan->local_id; 1959 efs.stype = chan->local_stype; 1960 efs.msdu = cpu_to_le16(chan->local_msdu); 1961 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime); 1962 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT); 1963 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO); 1964 break; 1965 1966 case L2CAP_MODE_STREAMING: 1967 efs.id = 1; 1968 efs.stype = L2CAP_SERV_BESTEFFORT; 1969 efs.msdu = cpu_to_le16(chan->local_msdu); 1970 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime); 1971 efs.acc_lat = 0; 1972 efs.flush_to = 0; 1973 break; 1974 1975 default: 1976 return; 1977 } 1978 1979 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs), 1980 (unsigned long) &efs); 1981 } 1982 1983 static void l2cap_ack_timeout(struct work_struct *work) 1984 { 1985 struct l2cap_chan *chan = container_of(work, struct l2cap_chan, 1986 ack_timer.work); 1987 1988 BT_DBG("chan %p", chan); 1989 1990 lock_sock(chan->sk); 1991 l2cap_send_ack(chan); 1992 release_sock(chan->sk); 1993 } 1994 1995 static inline void l2cap_ertm_init(struct l2cap_chan *chan) 1996 { 1997 chan->expected_ack_seq = 0; 1998 chan->unacked_frames = 0; 1999 chan->buffer_seq = 0; 2000 chan->num_acked = 0; 2001 chan->frames_sent = 0; 2002 2003 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout); 2004 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout); 2005 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout); 2006 2007 skb_queue_head_init(&chan->srej_q); 2008 2009 INIT_LIST_HEAD(&chan->srej_l); 2010 } 2011 2012 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask) 2013 { 2014 switch (mode) { 2015 case L2CAP_MODE_STREAMING: 2016 case L2CAP_MODE_ERTM: 2017 if (l2cap_mode_supported(mode, remote_feat_mask)) 2018 return mode; 2019 /* fall through */ 2020 default: 2021 return L2CAP_MODE_BASIC; 2022 } 2023 } 2024 2025 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan) 2026 { 2027 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW; 2028 } 2029 2030 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan) 2031 { 2032 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW; 2033 } 2034 2035 static inline void l2cap_txwin_setup(struct l2cap_chan *chan) 2036 { 2037 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW && 2038 __l2cap_ews_supported(chan)) { 2039 /* use extended control field */ 2040 set_bit(FLAG_EXT_CTRL, &chan->flags); 2041 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW; 2042 } else { 2043 chan->tx_win = min_t(u16, chan->tx_win, 2044 L2CAP_DEFAULT_TX_WINDOW); 2045 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW; 2046 } 2047 } 2048 2049 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data) 2050 { 2051 struct l2cap_conf_req *req = data; 2052 struct l2cap_conf_rfc rfc = { .mode = chan->mode }; 2053 void *ptr = req->data; 2054 u16 size; 2055 2056 BT_DBG("chan %p", chan); 2057 2058 if (chan->num_conf_req || chan->num_conf_rsp) 2059 goto done; 2060 2061 switch (chan->mode) { 2062 case L2CAP_MODE_STREAMING: 2063 case L2CAP_MODE_ERTM: 2064 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) 2065 break; 2066 2067 if (__l2cap_efs_supported(chan)) 2068 set_bit(FLAG_EFS_ENABLE, &chan->flags); 2069 2070 /* fall through */ 2071 default: 2072 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask); 2073 break; 2074 } 2075 2076 done: 2077 if (chan->imtu != L2CAP_DEFAULT_MTU) 2078 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu); 2079 2080 switch (chan->mode) { 2081 case L2CAP_MODE_BASIC: 2082 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) && 2083 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING)) 2084 break; 2085 2086 rfc.mode = L2CAP_MODE_BASIC; 2087 rfc.txwin_size = 0; 2088 rfc.max_transmit = 0; 2089 rfc.retrans_timeout = 0; 2090 rfc.monitor_timeout = 0; 2091 rfc.max_pdu_size = 0; 2092 2093 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), 2094 (unsigned long) &rfc); 2095 break; 2096 2097 case L2CAP_MODE_ERTM: 2098 rfc.mode = L2CAP_MODE_ERTM; 2099 rfc.max_transmit = chan->max_tx; 2100 rfc.retrans_timeout = 0; 2101 rfc.monitor_timeout = 0; 2102 2103 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu - 2104 L2CAP_EXT_HDR_SIZE - 2105 L2CAP_SDULEN_SIZE - 2106 L2CAP_FCS_SIZE); 2107 rfc.max_pdu_size = cpu_to_le16(size); 2108 2109 l2cap_txwin_setup(chan); 2110 2111 rfc.txwin_size = min_t(u16, chan->tx_win, 2112 L2CAP_DEFAULT_TX_WINDOW); 2113 2114 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), 2115 (unsigned long) &rfc); 2116 2117 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) 2118 l2cap_add_opt_efs(&ptr, chan); 2119 2120 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS)) 2121 break; 2122 2123 if (chan->fcs == L2CAP_FCS_NONE || 2124 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) { 2125 chan->fcs = L2CAP_FCS_NONE; 2126 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs); 2127 } 2128 2129 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) 2130 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2, 2131 chan->tx_win); 2132 break; 2133 2134 case L2CAP_MODE_STREAMING: 2135 rfc.mode = L2CAP_MODE_STREAMING; 2136 rfc.txwin_size = 0; 2137 rfc.max_transmit = 0; 2138 rfc.retrans_timeout = 0; 2139 rfc.monitor_timeout = 0; 2140 2141 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu - 2142 L2CAP_EXT_HDR_SIZE - 2143 L2CAP_SDULEN_SIZE - 2144 L2CAP_FCS_SIZE); 2145 rfc.max_pdu_size = cpu_to_le16(size); 2146 2147 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), 2148 (unsigned long) &rfc); 2149 2150 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) 2151 l2cap_add_opt_efs(&ptr, chan); 2152 2153 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS)) 2154 break; 2155 2156 if (chan->fcs == L2CAP_FCS_NONE || 2157 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) { 2158 chan->fcs = L2CAP_FCS_NONE; 2159 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs); 2160 } 2161 break; 2162 } 2163 2164 req->dcid = cpu_to_le16(chan->dcid); 2165 req->flags = cpu_to_le16(0); 2166 2167 return ptr - data; 2168 } 2169 2170 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data) 2171 { 2172 struct l2cap_conf_rsp *rsp = data; 2173 void *ptr = rsp->data; 2174 void *req = chan->conf_req; 2175 int len = chan->conf_len; 2176 int type, hint, olen; 2177 unsigned long val; 2178 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC }; 2179 struct l2cap_conf_efs efs; 2180 u8 remote_efs = 0; 2181 u16 mtu = L2CAP_DEFAULT_MTU; 2182 u16 result = L2CAP_CONF_SUCCESS; 2183 u16 size; 2184 2185 BT_DBG("chan %p", chan); 2186 2187 while (len >= L2CAP_CONF_OPT_SIZE) { 2188 len -= l2cap_get_conf_opt(&req, &type, &olen, &val); 2189 2190 hint = type & L2CAP_CONF_HINT; 2191 type &= L2CAP_CONF_MASK; 2192 2193 switch (type) { 2194 case L2CAP_CONF_MTU: 2195 mtu = val; 2196 break; 2197 2198 case L2CAP_CONF_FLUSH_TO: 2199 chan->flush_to = val; 2200 break; 2201 2202 case L2CAP_CONF_QOS: 2203 break; 2204 2205 case L2CAP_CONF_RFC: 2206 if (olen == sizeof(rfc)) 2207 memcpy(&rfc, (void *) val, olen); 2208 break; 2209 2210 case L2CAP_CONF_FCS: 2211 if (val == L2CAP_FCS_NONE) 2212 set_bit(CONF_NO_FCS_RECV, &chan->conf_state); 2213 break; 2214 2215 case L2CAP_CONF_EFS: 2216 remote_efs = 1; 2217 if (olen == sizeof(efs)) 2218 memcpy(&efs, (void *) val, olen); 2219 break; 2220 2221 case L2CAP_CONF_EWS: 2222 if (!enable_hs) 2223 return -ECONNREFUSED; 2224 2225 set_bit(FLAG_EXT_CTRL, &chan->flags); 2226 set_bit(CONF_EWS_RECV, &chan->conf_state); 2227 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW; 2228 chan->remote_tx_win = val; 2229 break; 2230 2231 default: 2232 if (hint) 2233 break; 2234 2235 result = L2CAP_CONF_UNKNOWN; 2236 *((u8 *) ptr++) = type; 2237 break; 2238 } 2239 } 2240 2241 if (chan->num_conf_rsp || chan->num_conf_req > 1) 2242 goto done; 2243 2244 switch (chan->mode) { 2245 case L2CAP_MODE_STREAMING: 2246 case L2CAP_MODE_ERTM: 2247 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) { 2248 chan->mode = l2cap_select_mode(rfc.mode, 2249 chan->conn->feat_mask); 2250 break; 2251 } 2252 2253 if (remote_efs) { 2254 if (__l2cap_efs_supported(chan)) 2255 set_bit(FLAG_EFS_ENABLE, &chan->flags); 2256 else 2257 return -ECONNREFUSED; 2258 } 2259 2260 if (chan->mode != rfc.mode) 2261 return -ECONNREFUSED; 2262 2263 break; 2264 } 2265 2266 done: 2267 if (chan->mode != rfc.mode) { 2268 result = L2CAP_CONF_UNACCEPT; 2269 rfc.mode = chan->mode; 2270 2271 if (chan->num_conf_rsp == 1) 2272 return -ECONNREFUSED; 2273 2274 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, 2275 sizeof(rfc), (unsigned long) &rfc); 2276 } 2277 2278 if (result == L2CAP_CONF_SUCCESS) { 2279 /* Configure output options and let the other side know 2280 * which ones we don't like. */ 2281 2282 if (mtu < L2CAP_DEFAULT_MIN_MTU) 2283 result = L2CAP_CONF_UNACCEPT; 2284 else { 2285 chan->omtu = mtu; 2286 set_bit(CONF_MTU_DONE, &chan->conf_state); 2287 } 2288 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu); 2289 2290 if (remote_efs) { 2291 if (chan->local_stype != L2CAP_SERV_NOTRAFIC && 2292 efs.stype != L2CAP_SERV_NOTRAFIC && 2293 efs.stype != chan->local_stype) { 2294 2295 result = L2CAP_CONF_UNACCEPT; 2296 2297 if (chan->num_conf_req >= 1) 2298 return -ECONNREFUSED; 2299 2300 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, 2301 sizeof(efs), 2302 (unsigned long) &efs); 2303 } else { 2304 /* Send PENDING Conf Rsp */ 2305 result = L2CAP_CONF_PENDING; 2306 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state); 2307 } 2308 } 2309 2310 switch (rfc.mode) { 2311 case L2CAP_MODE_BASIC: 2312 chan->fcs = L2CAP_FCS_NONE; 2313 set_bit(CONF_MODE_DONE, &chan->conf_state); 2314 break; 2315 2316 case L2CAP_MODE_ERTM: 2317 if (!test_bit(CONF_EWS_RECV, &chan->conf_state)) 2318 chan->remote_tx_win = rfc.txwin_size; 2319 else 2320 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW; 2321 2322 chan->remote_max_tx = rfc.max_transmit; 2323 2324 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size), 2325 chan->conn->mtu - 2326 L2CAP_EXT_HDR_SIZE - 2327 L2CAP_SDULEN_SIZE - 2328 L2CAP_FCS_SIZE); 2329 rfc.max_pdu_size = cpu_to_le16(size); 2330 chan->remote_mps = size; 2331 2332 rfc.retrans_timeout = 2333 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO); 2334 rfc.monitor_timeout = 2335 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO); 2336 2337 set_bit(CONF_MODE_DONE, &chan->conf_state); 2338 2339 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, 2340 sizeof(rfc), (unsigned long) &rfc); 2341 2342 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) { 2343 chan->remote_id = efs.id; 2344 chan->remote_stype = efs.stype; 2345 chan->remote_msdu = le16_to_cpu(efs.msdu); 2346 chan->remote_flush_to = 2347 le32_to_cpu(efs.flush_to); 2348 chan->remote_acc_lat = 2349 le32_to_cpu(efs.acc_lat); 2350 chan->remote_sdu_itime = 2351 le32_to_cpu(efs.sdu_itime); 2352 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, 2353 sizeof(efs), (unsigned long) &efs); 2354 } 2355 break; 2356 2357 case L2CAP_MODE_STREAMING: 2358 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size), 2359 chan->conn->mtu - 2360 L2CAP_EXT_HDR_SIZE - 2361 L2CAP_SDULEN_SIZE - 2362 L2CAP_FCS_SIZE); 2363 rfc.max_pdu_size = cpu_to_le16(size); 2364 chan->remote_mps = size; 2365 2366 set_bit(CONF_MODE_DONE, &chan->conf_state); 2367 2368 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, 2369 sizeof(rfc), (unsigned long) &rfc); 2370 2371 break; 2372 2373 default: 2374 result = L2CAP_CONF_UNACCEPT; 2375 2376 memset(&rfc, 0, sizeof(rfc)); 2377 rfc.mode = chan->mode; 2378 } 2379 2380 if (result == L2CAP_CONF_SUCCESS) 2381 set_bit(CONF_OUTPUT_DONE, &chan->conf_state); 2382 } 2383 rsp->scid = cpu_to_le16(chan->dcid); 2384 rsp->result = cpu_to_le16(result); 2385 rsp->flags = cpu_to_le16(0x0000); 2386 2387 return ptr - data; 2388 } 2389 2390 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result) 2391 { 2392 struct l2cap_conf_req *req = data; 2393 void *ptr = req->data; 2394 int type, olen; 2395 unsigned long val; 2396 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC }; 2397 struct l2cap_conf_efs efs; 2398 2399 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data); 2400 2401 while (len >= L2CAP_CONF_OPT_SIZE) { 2402 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val); 2403 2404 switch (type) { 2405 case L2CAP_CONF_MTU: 2406 if (val < L2CAP_DEFAULT_MIN_MTU) { 2407 *result = L2CAP_CONF_UNACCEPT; 2408 chan->imtu = L2CAP_DEFAULT_MIN_MTU; 2409 } else 2410 chan->imtu = val; 2411 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu); 2412 break; 2413 2414 case L2CAP_CONF_FLUSH_TO: 2415 chan->flush_to = val; 2416 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2417 2, chan->flush_to); 2418 break; 2419 2420 case L2CAP_CONF_RFC: 2421 if (olen == sizeof(rfc)) 2422 memcpy(&rfc, (void *)val, olen); 2423 2424 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) && 2425 rfc.mode != chan->mode) 2426 return -ECONNREFUSED; 2427 2428 chan->fcs = 0; 2429 2430 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, 2431 sizeof(rfc), (unsigned long) &rfc); 2432 break; 2433 2434 case L2CAP_CONF_EWS: 2435 chan->tx_win = min_t(u16, val, 2436 L2CAP_DEFAULT_EXT_WINDOW); 2437 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2, 2438 chan->tx_win); 2439 break; 2440 2441 case L2CAP_CONF_EFS: 2442 if (olen == sizeof(efs)) 2443 memcpy(&efs, (void *)val, olen); 2444 2445 if (chan->local_stype != L2CAP_SERV_NOTRAFIC && 2446 efs.stype != L2CAP_SERV_NOTRAFIC && 2447 efs.stype != chan->local_stype) 2448 return -ECONNREFUSED; 2449 2450 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, 2451 sizeof(efs), (unsigned long) &efs); 2452 break; 2453 } 2454 } 2455 2456 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode) 2457 return -ECONNREFUSED; 2458 2459 chan->mode = rfc.mode; 2460 2461 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) { 2462 switch (rfc.mode) { 2463 case L2CAP_MODE_ERTM: 2464 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout); 2465 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout); 2466 chan->mps = le16_to_cpu(rfc.max_pdu_size); 2467 2468 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) { 2469 chan->local_msdu = le16_to_cpu(efs.msdu); 2470 chan->local_sdu_itime = 2471 le32_to_cpu(efs.sdu_itime); 2472 chan->local_acc_lat = le32_to_cpu(efs.acc_lat); 2473 chan->local_flush_to = 2474 le32_to_cpu(efs.flush_to); 2475 } 2476 break; 2477 2478 case L2CAP_MODE_STREAMING: 2479 chan->mps = le16_to_cpu(rfc.max_pdu_size); 2480 } 2481 } 2482 2483 req->dcid = cpu_to_le16(chan->dcid); 2484 req->flags = cpu_to_le16(0x0000); 2485 2486 return ptr - data; 2487 } 2488 2489 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags) 2490 { 2491 struct l2cap_conf_rsp *rsp = data; 2492 void *ptr = rsp->data; 2493 2494 BT_DBG("chan %p", chan); 2495 2496 rsp->scid = cpu_to_le16(chan->dcid); 2497 rsp->result = cpu_to_le16(result); 2498 rsp->flags = cpu_to_le16(flags); 2499 2500 return ptr - data; 2501 } 2502 2503 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan) 2504 { 2505 struct l2cap_conn_rsp rsp; 2506 struct l2cap_conn *conn = chan->conn; 2507 u8 buf[128]; 2508 2509 rsp.scid = cpu_to_le16(chan->dcid); 2510 rsp.dcid = cpu_to_le16(chan->scid); 2511 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); 2512 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); 2513 l2cap_send_cmd(conn, chan->ident, 2514 L2CAP_CONN_RSP, sizeof(rsp), &rsp); 2515 2516 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) 2517 return; 2518 2519 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 2520 l2cap_build_conf_req(chan, buf), buf); 2521 chan->num_conf_req++; 2522 } 2523 2524 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len) 2525 { 2526 int type, olen; 2527 unsigned long val; 2528 struct l2cap_conf_rfc rfc; 2529 2530 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len); 2531 2532 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING)) 2533 return; 2534 2535 while (len >= L2CAP_CONF_OPT_SIZE) { 2536 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val); 2537 2538 switch (type) { 2539 case L2CAP_CONF_RFC: 2540 if (olen == sizeof(rfc)) 2541 memcpy(&rfc, (void *)val, olen); 2542 goto done; 2543 } 2544 } 2545 2546 /* Use sane default values in case a misbehaving remote device 2547 * did not send an RFC option. 2548 */ 2549 rfc.mode = chan->mode; 2550 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO); 2551 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO); 2552 rfc.max_pdu_size = cpu_to_le16(chan->imtu); 2553 2554 BT_ERR("Expected RFC option was not found, using defaults"); 2555 2556 done: 2557 switch (rfc.mode) { 2558 case L2CAP_MODE_ERTM: 2559 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout); 2560 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout); 2561 chan->mps = le16_to_cpu(rfc.max_pdu_size); 2562 break; 2563 case L2CAP_MODE_STREAMING: 2564 chan->mps = le16_to_cpu(rfc.max_pdu_size); 2565 } 2566 } 2567 2568 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) 2569 { 2570 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data; 2571 2572 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD) 2573 return 0; 2574 2575 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) && 2576 cmd->ident == conn->info_ident) { 2577 cancel_delayed_work(&conn->info_timer); 2578 2579 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; 2580 conn->info_ident = 0; 2581 2582 l2cap_conn_start(conn); 2583 } 2584 2585 return 0; 2586 } 2587 2588 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) 2589 { 2590 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data; 2591 struct l2cap_conn_rsp rsp; 2592 struct l2cap_chan *chan = NULL, *pchan; 2593 struct sock *parent, *sk = NULL; 2594 int result, status = L2CAP_CS_NO_INFO; 2595 2596 u16 dcid = 0, scid = __le16_to_cpu(req->scid); 2597 __le16 psm = req->psm; 2598 2599 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid); 2600 2601 /* Check if we have socket listening on psm */ 2602 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src); 2603 if (!pchan) { 2604 result = L2CAP_CR_BAD_PSM; 2605 goto sendresp; 2606 } 2607 2608 parent = pchan->sk; 2609 2610 lock_sock(parent); 2611 2612 /* Check if the ACL is secure enough (if not SDP) */ 2613 if (psm != cpu_to_le16(0x0001) && 2614 !hci_conn_check_link_mode(conn->hcon)) { 2615 conn->disc_reason = HCI_ERROR_AUTH_FAILURE; 2616 result = L2CAP_CR_SEC_BLOCK; 2617 goto response; 2618 } 2619 2620 result = L2CAP_CR_NO_MEM; 2621 2622 /* Check for backlog size */ 2623 if (sk_acceptq_is_full(parent)) { 2624 BT_DBG("backlog full %d", parent->sk_ack_backlog); 2625 goto response; 2626 } 2627 2628 chan = pchan->ops->new_connection(pchan->data); 2629 if (!chan) 2630 goto response; 2631 2632 sk = chan->sk; 2633 2634 /* Check if we already have channel with that dcid */ 2635 if (__l2cap_get_chan_by_dcid(conn, scid)) { 2636 sock_set_flag(sk, SOCK_ZAPPED); 2637 chan->ops->close(chan->data); 2638 goto response; 2639 } 2640 2641 hci_conn_hold(conn->hcon); 2642 2643 bacpy(&bt_sk(sk)->src, conn->src); 2644 bacpy(&bt_sk(sk)->dst, conn->dst); 2645 chan->psm = psm; 2646 chan->dcid = scid; 2647 2648 bt_accept_enqueue(parent, sk); 2649 2650 l2cap_chan_add(conn, chan); 2651 2652 dcid = chan->scid; 2653 2654 __set_chan_timer(chan, sk->sk_sndtimeo); 2655 2656 chan->ident = cmd->ident; 2657 2658 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) { 2659 if (l2cap_chan_check_security(chan)) { 2660 if (bt_sk(sk)->defer_setup) { 2661 l2cap_state_change(chan, BT_CONNECT2); 2662 result = L2CAP_CR_PEND; 2663 status = L2CAP_CS_AUTHOR_PEND; 2664 parent->sk_data_ready(parent, 0); 2665 } else { 2666 l2cap_state_change(chan, BT_CONFIG); 2667 result = L2CAP_CR_SUCCESS; 2668 status = L2CAP_CS_NO_INFO; 2669 } 2670 } else { 2671 l2cap_state_change(chan, BT_CONNECT2); 2672 result = L2CAP_CR_PEND; 2673 status = L2CAP_CS_AUTHEN_PEND; 2674 } 2675 } else { 2676 l2cap_state_change(chan, BT_CONNECT2); 2677 result = L2CAP_CR_PEND; 2678 status = L2CAP_CS_NO_INFO; 2679 } 2680 2681 response: 2682 release_sock(parent); 2683 2684 sendresp: 2685 rsp.scid = cpu_to_le16(scid); 2686 rsp.dcid = cpu_to_le16(dcid); 2687 rsp.result = cpu_to_le16(result); 2688 rsp.status = cpu_to_le16(status); 2689 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp); 2690 2691 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) { 2692 struct l2cap_info_req info; 2693 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK); 2694 2695 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT; 2696 conn->info_ident = l2cap_get_ident(conn); 2697 2698 schedule_delayed_work(&conn->info_timer, 2699 msecs_to_jiffies(L2CAP_INFO_TIMEOUT)); 2700 2701 l2cap_send_cmd(conn, conn->info_ident, 2702 L2CAP_INFO_REQ, sizeof(info), &info); 2703 } 2704 2705 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) && 2706 result == L2CAP_CR_SUCCESS) { 2707 u8 buf[128]; 2708 set_bit(CONF_REQ_SENT, &chan->conf_state); 2709 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 2710 l2cap_build_conf_req(chan, buf), buf); 2711 chan->num_conf_req++; 2712 } 2713 2714 return 0; 2715 } 2716 2717 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) 2718 { 2719 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data; 2720 u16 scid, dcid, result, status; 2721 struct l2cap_chan *chan; 2722 struct sock *sk; 2723 u8 req[128]; 2724 2725 scid = __le16_to_cpu(rsp->scid); 2726 dcid = __le16_to_cpu(rsp->dcid); 2727 result = __le16_to_cpu(rsp->result); 2728 status = __le16_to_cpu(rsp->status); 2729 2730 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status); 2731 2732 if (scid) { 2733 chan = l2cap_get_chan_by_scid(conn, scid); 2734 if (!chan) 2735 return -EFAULT; 2736 } else { 2737 chan = l2cap_get_chan_by_ident(conn, cmd->ident); 2738 if (!chan) 2739 return -EFAULT; 2740 } 2741 2742 sk = chan->sk; 2743 2744 switch (result) { 2745 case L2CAP_CR_SUCCESS: 2746 l2cap_state_change(chan, BT_CONFIG); 2747 chan->ident = 0; 2748 chan->dcid = dcid; 2749 clear_bit(CONF_CONNECT_PEND, &chan->conf_state); 2750 2751 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) 2752 break; 2753 2754 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 2755 l2cap_build_conf_req(chan, req), req); 2756 chan->num_conf_req++; 2757 break; 2758 2759 case L2CAP_CR_PEND: 2760 set_bit(CONF_CONNECT_PEND, &chan->conf_state); 2761 break; 2762 2763 default: 2764 l2cap_chan_del(chan, ECONNREFUSED); 2765 break; 2766 } 2767 2768 release_sock(sk); 2769 return 0; 2770 } 2771 2772 static inline void set_default_fcs(struct l2cap_chan *chan) 2773 { 2774 /* FCS is enabled only in ERTM or streaming mode, if one or both 2775 * sides request it. 2776 */ 2777 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING) 2778 chan->fcs = L2CAP_FCS_NONE; 2779 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) 2780 chan->fcs = L2CAP_FCS_CRC16; 2781 } 2782 2783 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) 2784 { 2785 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data; 2786 u16 dcid, flags; 2787 u8 rsp[64]; 2788 struct l2cap_chan *chan; 2789 struct sock *sk; 2790 int len; 2791 2792 dcid = __le16_to_cpu(req->dcid); 2793 flags = __le16_to_cpu(req->flags); 2794 2795 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags); 2796 2797 chan = l2cap_get_chan_by_scid(conn, dcid); 2798 if (!chan) 2799 return -ENOENT; 2800 2801 sk = chan->sk; 2802 2803 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) { 2804 struct l2cap_cmd_rej_cid rej; 2805 2806 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID); 2807 rej.scid = cpu_to_le16(chan->scid); 2808 rej.dcid = cpu_to_le16(chan->dcid); 2809 2810 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ, 2811 sizeof(rej), &rej); 2812 goto unlock; 2813 } 2814 2815 /* Reject if config buffer is too small. */ 2816 len = cmd_len - sizeof(*req); 2817 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) { 2818 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, 2819 l2cap_build_conf_rsp(chan, rsp, 2820 L2CAP_CONF_REJECT, flags), rsp); 2821 goto unlock; 2822 } 2823 2824 /* Store config. */ 2825 memcpy(chan->conf_req + chan->conf_len, req->data, len); 2826 chan->conf_len += len; 2827 2828 if (flags & 0x0001) { 2829 /* Incomplete config. Send empty response. */ 2830 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, 2831 l2cap_build_conf_rsp(chan, rsp, 2832 L2CAP_CONF_SUCCESS, 0x0001), rsp); 2833 goto unlock; 2834 } 2835 2836 /* Complete config. */ 2837 len = l2cap_parse_conf_req(chan, rsp); 2838 if (len < 0) { 2839 l2cap_send_disconn_req(conn, chan, ECONNRESET); 2840 goto unlock; 2841 } 2842 2843 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp); 2844 chan->num_conf_rsp++; 2845 2846 /* Reset config buffer. */ 2847 chan->conf_len = 0; 2848 2849 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) 2850 goto unlock; 2851 2852 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) { 2853 set_default_fcs(chan); 2854 2855 l2cap_state_change(chan, BT_CONNECTED); 2856 2857 chan->next_tx_seq = 0; 2858 chan->expected_tx_seq = 0; 2859 skb_queue_head_init(&chan->tx_q); 2860 if (chan->mode == L2CAP_MODE_ERTM) 2861 l2cap_ertm_init(chan); 2862 2863 l2cap_chan_ready(sk); 2864 goto unlock; 2865 } 2866 2867 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) { 2868 u8 buf[64]; 2869 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 2870 l2cap_build_conf_req(chan, buf), buf); 2871 chan->num_conf_req++; 2872 } 2873 2874 /* Got Conf Rsp PENDING from remote side and asume we sent 2875 Conf Rsp PENDING in the code above */ 2876 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) && 2877 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) { 2878 2879 /* check compatibility */ 2880 2881 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state); 2882 set_bit(CONF_OUTPUT_DONE, &chan->conf_state); 2883 2884 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, 2885 l2cap_build_conf_rsp(chan, rsp, 2886 L2CAP_CONF_SUCCESS, 0x0000), rsp); 2887 } 2888 2889 unlock: 2890 release_sock(sk); 2891 return 0; 2892 } 2893 2894 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) 2895 { 2896 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data; 2897 u16 scid, flags, result; 2898 struct l2cap_chan *chan; 2899 struct sock *sk; 2900 int len = cmd->len - sizeof(*rsp); 2901 2902 scid = __le16_to_cpu(rsp->scid); 2903 flags = __le16_to_cpu(rsp->flags); 2904 result = __le16_to_cpu(rsp->result); 2905 2906 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", 2907 scid, flags, result); 2908 2909 chan = l2cap_get_chan_by_scid(conn, scid); 2910 if (!chan) 2911 return 0; 2912 2913 sk = chan->sk; 2914 2915 switch (result) { 2916 case L2CAP_CONF_SUCCESS: 2917 l2cap_conf_rfc_get(chan, rsp->data, len); 2918 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state); 2919 break; 2920 2921 case L2CAP_CONF_PENDING: 2922 set_bit(CONF_REM_CONF_PEND, &chan->conf_state); 2923 2924 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) { 2925 char buf[64]; 2926 2927 len = l2cap_parse_conf_rsp(chan, rsp->data, len, 2928 buf, &result); 2929 if (len < 0) { 2930 l2cap_send_disconn_req(conn, chan, ECONNRESET); 2931 goto done; 2932 } 2933 2934 /* check compatibility */ 2935 2936 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state); 2937 set_bit(CONF_OUTPUT_DONE, &chan->conf_state); 2938 2939 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, 2940 l2cap_build_conf_rsp(chan, buf, 2941 L2CAP_CONF_SUCCESS, 0x0000), buf); 2942 } 2943 goto done; 2944 2945 case L2CAP_CONF_UNACCEPT: 2946 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) { 2947 char req[64]; 2948 2949 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) { 2950 l2cap_send_disconn_req(conn, chan, ECONNRESET); 2951 goto done; 2952 } 2953 2954 /* throw out any old stored conf requests */ 2955 result = L2CAP_CONF_SUCCESS; 2956 len = l2cap_parse_conf_rsp(chan, rsp->data, len, 2957 req, &result); 2958 if (len < 0) { 2959 l2cap_send_disconn_req(conn, chan, ECONNRESET); 2960 goto done; 2961 } 2962 2963 l2cap_send_cmd(conn, l2cap_get_ident(conn), 2964 L2CAP_CONF_REQ, len, req); 2965 chan->num_conf_req++; 2966 if (result != L2CAP_CONF_SUCCESS) 2967 goto done; 2968 break; 2969 } 2970 2971 default: 2972 sk->sk_err = ECONNRESET; 2973 __set_chan_timer(chan, 2974 msecs_to_jiffies(L2CAP_DISC_REJ_TIMEOUT)); 2975 l2cap_send_disconn_req(conn, chan, ECONNRESET); 2976 goto done; 2977 } 2978 2979 if (flags & 0x01) 2980 goto done; 2981 2982 set_bit(CONF_INPUT_DONE, &chan->conf_state); 2983 2984 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) { 2985 set_default_fcs(chan); 2986 2987 l2cap_state_change(chan, BT_CONNECTED); 2988 chan->next_tx_seq = 0; 2989 chan->expected_tx_seq = 0; 2990 skb_queue_head_init(&chan->tx_q); 2991 if (chan->mode == L2CAP_MODE_ERTM) 2992 l2cap_ertm_init(chan); 2993 2994 l2cap_chan_ready(sk); 2995 } 2996 2997 done: 2998 release_sock(sk); 2999 return 0; 3000 } 3001 3002 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) 3003 { 3004 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data; 3005 struct l2cap_disconn_rsp rsp; 3006 u16 dcid, scid; 3007 struct l2cap_chan *chan; 3008 struct sock *sk; 3009 3010 scid = __le16_to_cpu(req->scid); 3011 dcid = __le16_to_cpu(req->dcid); 3012 3013 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid); 3014 3015 chan = l2cap_get_chan_by_scid(conn, dcid); 3016 if (!chan) 3017 return 0; 3018 3019 sk = chan->sk; 3020 3021 rsp.dcid = cpu_to_le16(chan->scid); 3022 rsp.scid = cpu_to_le16(chan->dcid); 3023 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp); 3024 3025 sk->sk_shutdown = SHUTDOWN_MASK; 3026 3027 l2cap_chan_del(chan, ECONNRESET); 3028 release_sock(sk); 3029 3030 chan->ops->close(chan->data); 3031 return 0; 3032 } 3033 3034 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) 3035 { 3036 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data; 3037 u16 dcid, scid; 3038 struct l2cap_chan *chan; 3039 struct sock *sk; 3040 3041 scid = __le16_to_cpu(rsp->scid); 3042 dcid = __le16_to_cpu(rsp->dcid); 3043 3044 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid); 3045 3046 chan = l2cap_get_chan_by_scid(conn, scid); 3047 if (!chan) 3048 return 0; 3049 3050 sk = chan->sk; 3051 3052 l2cap_chan_del(chan, 0); 3053 release_sock(sk); 3054 3055 chan->ops->close(chan->data); 3056 return 0; 3057 } 3058 3059 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) 3060 { 3061 struct l2cap_info_req *req = (struct l2cap_info_req *) data; 3062 u16 type; 3063 3064 type = __le16_to_cpu(req->type); 3065 3066 BT_DBG("type 0x%4.4x", type); 3067 3068 if (type == L2CAP_IT_FEAT_MASK) { 3069 u8 buf[8]; 3070 u32 feat_mask = l2cap_feat_mask; 3071 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf; 3072 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK); 3073 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS); 3074 if (!disable_ertm) 3075 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING 3076 | L2CAP_FEAT_FCS; 3077 if (enable_hs) 3078 feat_mask |= L2CAP_FEAT_EXT_FLOW 3079 | L2CAP_FEAT_EXT_WINDOW; 3080 3081 put_unaligned_le32(feat_mask, rsp->data); 3082 l2cap_send_cmd(conn, cmd->ident, 3083 L2CAP_INFO_RSP, sizeof(buf), buf); 3084 } else if (type == L2CAP_IT_FIXED_CHAN) { 3085 u8 buf[12]; 3086 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf; 3087 3088 if (enable_hs) 3089 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP; 3090 else 3091 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP; 3092 3093 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN); 3094 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS); 3095 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan)); 3096 l2cap_send_cmd(conn, cmd->ident, 3097 L2CAP_INFO_RSP, sizeof(buf), buf); 3098 } else { 3099 struct l2cap_info_rsp rsp; 3100 rsp.type = cpu_to_le16(type); 3101 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP); 3102 l2cap_send_cmd(conn, cmd->ident, 3103 L2CAP_INFO_RSP, sizeof(rsp), &rsp); 3104 } 3105 3106 return 0; 3107 } 3108 3109 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) 3110 { 3111 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data; 3112 u16 type, result; 3113 3114 type = __le16_to_cpu(rsp->type); 3115 result = __le16_to_cpu(rsp->result); 3116 3117 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result); 3118 3119 /* L2CAP Info req/rsp are unbound to channels, add extra checks */ 3120 if (cmd->ident != conn->info_ident || 3121 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) 3122 return 0; 3123 3124 cancel_delayed_work(&conn->info_timer); 3125 3126 if (result != L2CAP_IR_SUCCESS) { 3127 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; 3128 conn->info_ident = 0; 3129 3130 l2cap_conn_start(conn); 3131 3132 return 0; 3133 } 3134 3135 if (type == L2CAP_IT_FEAT_MASK) { 3136 conn->feat_mask = get_unaligned_le32(rsp->data); 3137 3138 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) { 3139 struct l2cap_info_req req; 3140 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN); 3141 3142 conn->info_ident = l2cap_get_ident(conn); 3143 3144 l2cap_send_cmd(conn, conn->info_ident, 3145 L2CAP_INFO_REQ, sizeof(req), &req); 3146 } else { 3147 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; 3148 conn->info_ident = 0; 3149 3150 l2cap_conn_start(conn); 3151 } 3152 } else if (type == L2CAP_IT_FIXED_CHAN) { 3153 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; 3154 conn->info_ident = 0; 3155 3156 l2cap_conn_start(conn); 3157 } 3158 3159 return 0; 3160 } 3161 3162 static inline int l2cap_create_channel_req(struct l2cap_conn *conn, 3163 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 3164 void *data) 3165 { 3166 struct l2cap_create_chan_req *req = data; 3167 struct l2cap_create_chan_rsp rsp; 3168 u16 psm, scid; 3169 3170 if (cmd_len != sizeof(*req)) 3171 return -EPROTO; 3172 3173 if (!enable_hs) 3174 return -EINVAL; 3175 3176 psm = le16_to_cpu(req->psm); 3177 scid = le16_to_cpu(req->scid); 3178 3179 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id); 3180 3181 /* Placeholder: Always reject */ 3182 rsp.dcid = 0; 3183 rsp.scid = cpu_to_le16(scid); 3184 rsp.result = L2CAP_CR_NO_MEM; 3185 rsp.status = L2CAP_CS_NO_INFO; 3186 3187 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP, 3188 sizeof(rsp), &rsp); 3189 3190 return 0; 3191 } 3192 3193 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn, 3194 struct l2cap_cmd_hdr *cmd, void *data) 3195 { 3196 BT_DBG("conn %p", conn); 3197 3198 return l2cap_connect_rsp(conn, cmd, data); 3199 } 3200 3201 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident, 3202 u16 icid, u16 result) 3203 { 3204 struct l2cap_move_chan_rsp rsp; 3205 3206 BT_DBG("icid %d, result %d", icid, result); 3207 3208 rsp.icid = cpu_to_le16(icid); 3209 rsp.result = cpu_to_le16(result); 3210 3211 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp); 3212 } 3213 3214 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn, 3215 struct l2cap_chan *chan, u16 icid, u16 result) 3216 { 3217 struct l2cap_move_chan_cfm cfm; 3218 u8 ident; 3219 3220 BT_DBG("icid %d, result %d", icid, result); 3221 3222 ident = l2cap_get_ident(conn); 3223 if (chan) 3224 chan->ident = ident; 3225 3226 cfm.icid = cpu_to_le16(icid); 3227 cfm.result = cpu_to_le16(result); 3228 3229 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm); 3230 } 3231 3232 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident, 3233 u16 icid) 3234 { 3235 struct l2cap_move_chan_cfm_rsp rsp; 3236 3237 BT_DBG("icid %d", icid); 3238 3239 rsp.icid = cpu_to_le16(icid); 3240 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp); 3241 } 3242 3243 static inline int l2cap_move_channel_req(struct l2cap_conn *conn, 3244 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data) 3245 { 3246 struct l2cap_move_chan_req *req = data; 3247 u16 icid = 0; 3248 u16 result = L2CAP_MR_NOT_ALLOWED; 3249 3250 if (cmd_len != sizeof(*req)) 3251 return -EPROTO; 3252 3253 icid = le16_to_cpu(req->icid); 3254 3255 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id); 3256 3257 if (!enable_hs) 3258 return -EINVAL; 3259 3260 /* Placeholder: Always refuse */ 3261 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result); 3262 3263 return 0; 3264 } 3265 3266 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn, 3267 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data) 3268 { 3269 struct l2cap_move_chan_rsp *rsp = data; 3270 u16 icid, result; 3271 3272 if (cmd_len != sizeof(*rsp)) 3273 return -EPROTO; 3274 3275 icid = le16_to_cpu(rsp->icid); 3276 result = le16_to_cpu(rsp->result); 3277 3278 BT_DBG("icid %d, result %d", icid, result); 3279 3280 /* Placeholder: Always unconfirmed */ 3281 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED); 3282 3283 return 0; 3284 } 3285 3286 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn, 3287 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data) 3288 { 3289 struct l2cap_move_chan_cfm *cfm = data; 3290 u16 icid, result; 3291 3292 if (cmd_len != sizeof(*cfm)) 3293 return -EPROTO; 3294 3295 icid = le16_to_cpu(cfm->icid); 3296 result = le16_to_cpu(cfm->result); 3297 3298 BT_DBG("icid %d, result %d", icid, result); 3299 3300 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid); 3301 3302 return 0; 3303 } 3304 3305 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn, 3306 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data) 3307 { 3308 struct l2cap_move_chan_cfm_rsp *rsp = data; 3309 u16 icid; 3310 3311 if (cmd_len != sizeof(*rsp)) 3312 return -EPROTO; 3313 3314 icid = le16_to_cpu(rsp->icid); 3315 3316 BT_DBG("icid %d", icid); 3317 3318 return 0; 3319 } 3320 3321 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency, 3322 u16 to_multiplier) 3323 { 3324 u16 max_latency; 3325 3326 if (min > max || min < 6 || max > 3200) 3327 return -EINVAL; 3328 3329 if (to_multiplier < 10 || to_multiplier > 3200) 3330 return -EINVAL; 3331 3332 if (max >= to_multiplier * 8) 3333 return -EINVAL; 3334 3335 max_latency = (to_multiplier * 8 / max) - 1; 3336 if (latency > 499 || latency > max_latency) 3337 return -EINVAL; 3338 3339 return 0; 3340 } 3341 3342 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn, 3343 struct l2cap_cmd_hdr *cmd, u8 *data) 3344 { 3345 struct hci_conn *hcon = conn->hcon; 3346 struct l2cap_conn_param_update_req *req; 3347 struct l2cap_conn_param_update_rsp rsp; 3348 u16 min, max, latency, to_multiplier, cmd_len; 3349 int err; 3350 3351 if (!(hcon->link_mode & HCI_LM_MASTER)) 3352 return -EINVAL; 3353 3354 cmd_len = __le16_to_cpu(cmd->len); 3355 if (cmd_len != sizeof(struct l2cap_conn_param_update_req)) 3356 return -EPROTO; 3357 3358 req = (struct l2cap_conn_param_update_req *) data; 3359 min = __le16_to_cpu(req->min); 3360 max = __le16_to_cpu(req->max); 3361 latency = __le16_to_cpu(req->latency); 3362 to_multiplier = __le16_to_cpu(req->to_multiplier); 3363 3364 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x", 3365 min, max, latency, to_multiplier); 3366 3367 memset(&rsp, 0, sizeof(rsp)); 3368 3369 err = l2cap_check_conn_param(min, max, latency, to_multiplier); 3370 if (err) 3371 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED); 3372 else 3373 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED); 3374 3375 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP, 3376 sizeof(rsp), &rsp); 3377 3378 if (!err) 3379 hci_le_conn_update(hcon, min, max, latency, to_multiplier); 3380 3381 return 0; 3382 } 3383 3384 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn, 3385 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) 3386 { 3387 int err = 0; 3388 3389 switch (cmd->code) { 3390 case L2CAP_COMMAND_REJ: 3391 l2cap_command_rej(conn, cmd, data); 3392 break; 3393 3394 case L2CAP_CONN_REQ: 3395 err = l2cap_connect_req(conn, cmd, data); 3396 break; 3397 3398 case L2CAP_CONN_RSP: 3399 err = l2cap_connect_rsp(conn, cmd, data); 3400 break; 3401 3402 case L2CAP_CONF_REQ: 3403 err = l2cap_config_req(conn, cmd, cmd_len, data); 3404 break; 3405 3406 case L2CAP_CONF_RSP: 3407 err = l2cap_config_rsp(conn, cmd, data); 3408 break; 3409 3410 case L2CAP_DISCONN_REQ: 3411 err = l2cap_disconnect_req(conn, cmd, data); 3412 break; 3413 3414 case L2CAP_DISCONN_RSP: 3415 err = l2cap_disconnect_rsp(conn, cmd, data); 3416 break; 3417 3418 case L2CAP_ECHO_REQ: 3419 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data); 3420 break; 3421 3422 case L2CAP_ECHO_RSP: 3423 break; 3424 3425 case L2CAP_INFO_REQ: 3426 err = l2cap_information_req(conn, cmd, data); 3427 break; 3428 3429 case L2CAP_INFO_RSP: 3430 err = l2cap_information_rsp(conn, cmd, data); 3431 break; 3432 3433 case L2CAP_CREATE_CHAN_REQ: 3434 err = l2cap_create_channel_req(conn, cmd, cmd_len, data); 3435 break; 3436 3437 case L2CAP_CREATE_CHAN_RSP: 3438 err = l2cap_create_channel_rsp(conn, cmd, data); 3439 break; 3440 3441 case L2CAP_MOVE_CHAN_REQ: 3442 err = l2cap_move_channel_req(conn, cmd, cmd_len, data); 3443 break; 3444 3445 case L2CAP_MOVE_CHAN_RSP: 3446 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data); 3447 break; 3448 3449 case L2CAP_MOVE_CHAN_CFM: 3450 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data); 3451 break; 3452 3453 case L2CAP_MOVE_CHAN_CFM_RSP: 3454 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data); 3455 break; 3456 3457 default: 3458 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code); 3459 err = -EINVAL; 3460 break; 3461 } 3462 3463 return err; 3464 } 3465 3466 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn, 3467 struct l2cap_cmd_hdr *cmd, u8 *data) 3468 { 3469 switch (cmd->code) { 3470 case L2CAP_COMMAND_REJ: 3471 return 0; 3472 3473 case L2CAP_CONN_PARAM_UPDATE_REQ: 3474 return l2cap_conn_param_update_req(conn, cmd, data); 3475 3476 case L2CAP_CONN_PARAM_UPDATE_RSP: 3477 return 0; 3478 3479 default: 3480 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code); 3481 return -EINVAL; 3482 } 3483 } 3484 3485 static inline void l2cap_sig_channel(struct l2cap_conn *conn, 3486 struct sk_buff *skb) 3487 { 3488 u8 *data = skb->data; 3489 int len = skb->len; 3490 struct l2cap_cmd_hdr cmd; 3491 int err; 3492 3493 l2cap_raw_recv(conn, skb); 3494 3495 while (len >= L2CAP_CMD_HDR_SIZE) { 3496 u16 cmd_len; 3497 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE); 3498 data += L2CAP_CMD_HDR_SIZE; 3499 len -= L2CAP_CMD_HDR_SIZE; 3500 3501 cmd_len = le16_to_cpu(cmd.len); 3502 3503 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident); 3504 3505 if (cmd_len > len || !cmd.ident) { 3506 BT_DBG("corrupted command"); 3507 break; 3508 } 3509 3510 if (conn->hcon->type == LE_LINK) 3511 err = l2cap_le_sig_cmd(conn, &cmd, data); 3512 else 3513 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data); 3514 3515 if (err) { 3516 struct l2cap_cmd_rej_unk rej; 3517 3518 BT_ERR("Wrong link type (%d)", err); 3519 3520 /* FIXME: Map err to a valid reason */ 3521 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD); 3522 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej); 3523 } 3524 3525 data += cmd_len; 3526 len -= cmd_len; 3527 } 3528 3529 kfree_skb(skb); 3530 } 3531 3532 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb) 3533 { 3534 u16 our_fcs, rcv_fcs; 3535 int hdr_size; 3536 3537 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) 3538 hdr_size = L2CAP_EXT_HDR_SIZE; 3539 else 3540 hdr_size = L2CAP_ENH_HDR_SIZE; 3541 3542 if (chan->fcs == L2CAP_FCS_CRC16) { 3543 skb_trim(skb, skb->len - L2CAP_FCS_SIZE); 3544 rcv_fcs = get_unaligned_le16(skb->data + skb->len); 3545 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size); 3546 3547 if (our_fcs != rcv_fcs) 3548 return -EBADMSG; 3549 } 3550 return 0; 3551 } 3552 3553 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan) 3554 { 3555 u32 control = 0; 3556 3557 chan->frames_sent = 0; 3558 3559 control |= __set_reqseq(chan, chan->buffer_seq); 3560 3561 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { 3562 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR); 3563 l2cap_send_sframe(chan, control); 3564 set_bit(CONN_RNR_SENT, &chan->conn_state); 3565 } 3566 3567 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) 3568 l2cap_retransmit_frames(chan); 3569 3570 l2cap_ertm_send(chan); 3571 3572 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) && 3573 chan->frames_sent == 0) { 3574 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR); 3575 l2cap_send_sframe(chan, control); 3576 } 3577 } 3578 3579 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar) 3580 { 3581 struct sk_buff *next_skb; 3582 int tx_seq_offset, next_tx_seq_offset; 3583 3584 bt_cb(skb)->tx_seq = tx_seq; 3585 bt_cb(skb)->sar = sar; 3586 3587 next_skb = skb_peek(&chan->srej_q); 3588 3589 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq); 3590 3591 while (next_skb) { 3592 if (bt_cb(next_skb)->tx_seq == tx_seq) 3593 return -EINVAL; 3594 3595 next_tx_seq_offset = __seq_offset(chan, 3596 bt_cb(next_skb)->tx_seq, chan->buffer_seq); 3597 3598 if (next_tx_seq_offset > tx_seq_offset) { 3599 __skb_queue_before(&chan->srej_q, next_skb, skb); 3600 return 0; 3601 } 3602 3603 if (skb_queue_is_last(&chan->srej_q, next_skb)) 3604 next_skb = NULL; 3605 else 3606 next_skb = skb_queue_next(&chan->srej_q, next_skb); 3607 } 3608 3609 __skb_queue_tail(&chan->srej_q, skb); 3610 3611 return 0; 3612 } 3613 3614 static void append_skb_frag(struct sk_buff *skb, 3615 struct sk_buff *new_frag, struct sk_buff **last_frag) 3616 { 3617 /* skb->len reflects data in skb as well as all fragments 3618 * skb->data_len reflects only data in fragments 3619 */ 3620 if (!skb_has_frag_list(skb)) 3621 skb_shinfo(skb)->frag_list = new_frag; 3622 3623 new_frag->next = NULL; 3624 3625 (*last_frag)->next = new_frag; 3626 *last_frag = new_frag; 3627 3628 skb->len += new_frag->len; 3629 skb->data_len += new_frag->len; 3630 skb->truesize += new_frag->truesize; 3631 } 3632 3633 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control) 3634 { 3635 int err = -EINVAL; 3636 3637 switch (__get_ctrl_sar(chan, control)) { 3638 case L2CAP_SAR_UNSEGMENTED: 3639 if (chan->sdu) 3640 break; 3641 3642 err = chan->ops->recv(chan->data, skb); 3643 break; 3644 3645 case L2CAP_SAR_START: 3646 if (chan->sdu) 3647 break; 3648 3649 chan->sdu_len = get_unaligned_le16(skb->data); 3650 skb_pull(skb, L2CAP_SDULEN_SIZE); 3651 3652 if (chan->sdu_len > chan->imtu) { 3653 err = -EMSGSIZE; 3654 break; 3655 } 3656 3657 if (skb->len >= chan->sdu_len) 3658 break; 3659 3660 chan->sdu = skb; 3661 chan->sdu_last_frag = skb; 3662 3663 skb = NULL; 3664 err = 0; 3665 break; 3666 3667 case L2CAP_SAR_CONTINUE: 3668 if (!chan->sdu) 3669 break; 3670 3671 append_skb_frag(chan->sdu, skb, 3672 &chan->sdu_last_frag); 3673 skb = NULL; 3674 3675 if (chan->sdu->len >= chan->sdu_len) 3676 break; 3677 3678 err = 0; 3679 break; 3680 3681 case L2CAP_SAR_END: 3682 if (!chan->sdu) 3683 break; 3684 3685 append_skb_frag(chan->sdu, skb, 3686 &chan->sdu_last_frag); 3687 skb = NULL; 3688 3689 if (chan->sdu->len != chan->sdu_len) 3690 break; 3691 3692 err = chan->ops->recv(chan->data, chan->sdu); 3693 3694 if (!err) { 3695 /* Reassembly complete */ 3696 chan->sdu = NULL; 3697 chan->sdu_last_frag = NULL; 3698 chan->sdu_len = 0; 3699 } 3700 break; 3701 } 3702 3703 if (err) { 3704 kfree_skb(skb); 3705 kfree_skb(chan->sdu); 3706 chan->sdu = NULL; 3707 chan->sdu_last_frag = NULL; 3708 chan->sdu_len = 0; 3709 } 3710 3711 return err; 3712 } 3713 3714 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan) 3715 { 3716 u32 control; 3717 3718 BT_DBG("chan %p, Enter local busy", chan); 3719 3720 set_bit(CONN_LOCAL_BUSY, &chan->conn_state); 3721 3722 control = __set_reqseq(chan, chan->buffer_seq); 3723 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR); 3724 l2cap_send_sframe(chan, control); 3725 3726 set_bit(CONN_RNR_SENT, &chan->conn_state); 3727 3728 __clear_ack_timer(chan); 3729 } 3730 3731 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan) 3732 { 3733 u32 control; 3734 3735 if (!test_bit(CONN_RNR_SENT, &chan->conn_state)) 3736 goto done; 3737 3738 control = __set_reqseq(chan, chan->buffer_seq); 3739 control |= __set_ctrl_poll(chan); 3740 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR); 3741 l2cap_send_sframe(chan, control); 3742 chan->retry_count = 1; 3743 3744 __clear_retrans_timer(chan); 3745 __set_monitor_timer(chan); 3746 3747 set_bit(CONN_WAIT_F, &chan->conn_state); 3748 3749 done: 3750 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state); 3751 clear_bit(CONN_RNR_SENT, &chan->conn_state); 3752 3753 BT_DBG("chan %p, Exit local busy", chan); 3754 } 3755 3756 void l2cap_chan_busy(struct l2cap_chan *chan, int busy) 3757 { 3758 if (chan->mode == L2CAP_MODE_ERTM) { 3759 if (busy) 3760 l2cap_ertm_enter_local_busy(chan); 3761 else 3762 l2cap_ertm_exit_local_busy(chan); 3763 } 3764 } 3765 3766 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq) 3767 { 3768 struct sk_buff *skb; 3769 u32 control; 3770 3771 while ((skb = skb_peek(&chan->srej_q)) && 3772 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { 3773 int err; 3774 3775 if (bt_cb(skb)->tx_seq != tx_seq) 3776 break; 3777 3778 skb = skb_dequeue(&chan->srej_q); 3779 control = __set_ctrl_sar(chan, bt_cb(skb)->sar); 3780 err = l2cap_reassemble_sdu(chan, skb, control); 3781 3782 if (err < 0) { 3783 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); 3784 break; 3785 } 3786 3787 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej); 3788 tx_seq = __next_seq(chan, tx_seq); 3789 } 3790 } 3791 3792 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq) 3793 { 3794 struct srej_list *l, *tmp; 3795 u32 control; 3796 3797 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) { 3798 if (l->tx_seq == tx_seq) { 3799 list_del(&l->list); 3800 kfree(l); 3801 return; 3802 } 3803 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ); 3804 control |= __set_reqseq(chan, l->tx_seq); 3805 l2cap_send_sframe(chan, control); 3806 list_del(&l->list); 3807 list_add_tail(&l->list, &chan->srej_l); 3808 } 3809 } 3810 3811 static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq) 3812 { 3813 struct srej_list *new; 3814 u32 control; 3815 3816 while (tx_seq != chan->expected_tx_seq) { 3817 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ); 3818 control |= __set_reqseq(chan, chan->expected_tx_seq); 3819 l2cap_send_sframe(chan, control); 3820 3821 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC); 3822 if (!new) 3823 return -ENOMEM; 3824 3825 new->tx_seq = chan->expected_tx_seq; 3826 3827 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq); 3828 3829 list_add_tail(&new->list, &chan->srej_l); 3830 } 3831 3832 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq); 3833 3834 return 0; 3835 } 3836 3837 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb) 3838 { 3839 u16 tx_seq = __get_txseq(chan, rx_control); 3840 u16 req_seq = __get_reqseq(chan, rx_control); 3841 u8 sar = __get_ctrl_sar(chan, rx_control); 3842 int tx_seq_offset, expected_tx_seq_offset; 3843 int num_to_ack = (chan->tx_win/6) + 1; 3844 int err = 0; 3845 3846 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len, 3847 tx_seq, rx_control); 3848 3849 if (__is_ctrl_final(chan, rx_control) && 3850 test_bit(CONN_WAIT_F, &chan->conn_state)) { 3851 __clear_monitor_timer(chan); 3852 if (chan->unacked_frames > 0) 3853 __set_retrans_timer(chan); 3854 clear_bit(CONN_WAIT_F, &chan->conn_state); 3855 } 3856 3857 chan->expected_ack_seq = req_seq; 3858 l2cap_drop_acked_frames(chan); 3859 3860 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq); 3861 3862 /* invalid tx_seq */ 3863 if (tx_seq_offset >= chan->tx_win) { 3864 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); 3865 goto drop; 3866 } 3867 3868 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) 3869 goto drop; 3870 3871 if (tx_seq == chan->expected_tx_seq) 3872 goto expected; 3873 3874 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) { 3875 struct srej_list *first; 3876 3877 first = list_first_entry(&chan->srej_l, 3878 struct srej_list, list); 3879 if (tx_seq == first->tx_seq) { 3880 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar); 3881 l2cap_check_srej_gap(chan, tx_seq); 3882 3883 list_del(&first->list); 3884 kfree(first); 3885 3886 if (list_empty(&chan->srej_l)) { 3887 chan->buffer_seq = chan->buffer_seq_srej; 3888 clear_bit(CONN_SREJ_SENT, &chan->conn_state); 3889 l2cap_send_ack(chan); 3890 BT_DBG("chan %p, Exit SREJ_SENT", chan); 3891 } 3892 } else { 3893 struct srej_list *l; 3894 3895 /* duplicated tx_seq */ 3896 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0) 3897 goto drop; 3898 3899 list_for_each_entry(l, &chan->srej_l, list) { 3900 if (l->tx_seq == tx_seq) { 3901 l2cap_resend_srejframe(chan, tx_seq); 3902 return 0; 3903 } 3904 } 3905 3906 err = l2cap_send_srejframe(chan, tx_seq); 3907 if (err < 0) { 3908 l2cap_send_disconn_req(chan->conn, chan, -err); 3909 return err; 3910 } 3911 } 3912 } else { 3913 expected_tx_seq_offset = __seq_offset(chan, 3914 chan->expected_tx_seq, chan->buffer_seq); 3915 3916 /* duplicated tx_seq */ 3917 if (tx_seq_offset < expected_tx_seq_offset) 3918 goto drop; 3919 3920 set_bit(CONN_SREJ_SENT, &chan->conn_state); 3921 3922 BT_DBG("chan %p, Enter SREJ", chan); 3923 3924 INIT_LIST_HEAD(&chan->srej_l); 3925 chan->buffer_seq_srej = chan->buffer_seq; 3926 3927 __skb_queue_head_init(&chan->srej_q); 3928 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar); 3929 3930 set_bit(CONN_SEND_PBIT, &chan->conn_state); 3931 3932 err = l2cap_send_srejframe(chan, tx_seq); 3933 if (err < 0) { 3934 l2cap_send_disconn_req(chan->conn, chan, -err); 3935 return err; 3936 } 3937 3938 __clear_ack_timer(chan); 3939 } 3940 return 0; 3941 3942 expected: 3943 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq); 3944 3945 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) { 3946 bt_cb(skb)->tx_seq = tx_seq; 3947 bt_cb(skb)->sar = sar; 3948 __skb_queue_tail(&chan->srej_q, skb); 3949 return 0; 3950 } 3951 3952 err = l2cap_reassemble_sdu(chan, skb, rx_control); 3953 chan->buffer_seq = __next_seq(chan, chan->buffer_seq); 3954 3955 if (err < 0) { 3956 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); 3957 return err; 3958 } 3959 3960 if (__is_ctrl_final(chan, rx_control)) { 3961 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) 3962 l2cap_retransmit_frames(chan); 3963 } 3964 3965 3966 chan->num_acked = (chan->num_acked + 1) % num_to_ack; 3967 if (chan->num_acked == num_to_ack - 1) 3968 l2cap_send_ack(chan); 3969 else 3970 __set_ack_timer(chan); 3971 3972 return 0; 3973 3974 drop: 3975 kfree_skb(skb); 3976 return 0; 3977 } 3978 3979 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control) 3980 { 3981 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, 3982 __get_reqseq(chan, rx_control), rx_control); 3983 3984 chan->expected_ack_seq = __get_reqseq(chan, rx_control); 3985 l2cap_drop_acked_frames(chan); 3986 3987 if (__is_ctrl_poll(chan, rx_control)) { 3988 set_bit(CONN_SEND_FBIT, &chan->conn_state); 3989 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) { 3990 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) && 3991 (chan->unacked_frames > 0)) 3992 __set_retrans_timer(chan); 3993 3994 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); 3995 l2cap_send_srejtail(chan); 3996 } else { 3997 l2cap_send_i_or_rr_or_rnr(chan); 3998 } 3999 4000 } else if (__is_ctrl_final(chan, rx_control)) { 4001 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); 4002 4003 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) 4004 l2cap_retransmit_frames(chan); 4005 4006 } else { 4007 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) && 4008 (chan->unacked_frames > 0)) 4009 __set_retrans_timer(chan); 4010 4011 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); 4012 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) 4013 l2cap_send_ack(chan); 4014 else 4015 l2cap_ertm_send(chan); 4016 } 4017 } 4018 4019 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control) 4020 { 4021 u16 tx_seq = __get_reqseq(chan, rx_control); 4022 4023 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control); 4024 4025 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); 4026 4027 chan->expected_ack_seq = tx_seq; 4028 l2cap_drop_acked_frames(chan); 4029 4030 if (__is_ctrl_final(chan, rx_control)) { 4031 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) 4032 l2cap_retransmit_frames(chan); 4033 } else { 4034 l2cap_retransmit_frames(chan); 4035 4036 if (test_bit(CONN_WAIT_F, &chan->conn_state)) 4037 set_bit(CONN_REJ_ACT, &chan->conn_state); 4038 } 4039 } 4040 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control) 4041 { 4042 u16 tx_seq = __get_reqseq(chan, rx_control); 4043 4044 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control); 4045 4046 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); 4047 4048 if (__is_ctrl_poll(chan, rx_control)) { 4049 chan->expected_ack_seq = tx_seq; 4050 l2cap_drop_acked_frames(chan); 4051 4052 set_bit(CONN_SEND_FBIT, &chan->conn_state); 4053 l2cap_retransmit_one_frame(chan, tx_seq); 4054 4055 l2cap_ertm_send(chan); 4056 4057 if (test_bit(CONN_WAIT_F, &chan->conn_state)) { 4058 chan->srej_save_reqseq = tx_seq; 4059 set_bit(CONN_SREJ_ACT, &chan->conn_state); 4060 } 4061 } else if (__is_ctrl_final(chan, rx_control)) { 4062 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) && 4063 chan->srej_save_reqseq == tx_seq) 4064 clear_bit(CONN_SREJ_ACT, &chan->conn_state); 4065 else 4066 l2cap_retransmit_one_frame(chan, tx_seq); 4067 } else { 4068 l2cap_retransmit_one_frame(chan, tx_seq); 4069 if (test_bit(CONN_WAIT_F, &chan->conn_state)) { 4070 chan->srej_save_reqseq = tx_seq; 4071 set_bit(CONN_SREJ_ACT, &chan->conn_state); 4072 } 4073 } 4074 } 4075 4076 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control) 4077 { 4078 u16 tx_seq = __get_reqseq(chan, rx_control); 4079 4080 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control); 4081 4082 set_bit(CONN_REMOTE_BUSY, &chan->conn_state); 4083 chan->expected_ack_seq = tx_seq; 4084 l2cap_drop_acked_frames(chan); 4085 4086 if (__is_ctrl_poll(chan, rx_control)) 4087 set_bit(CONN_SEND_FBIT, &chan->conn_state); 4088 4089 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) { 4090 __clear_retrans_timer(chan); 4091 if (__is_ctrl_poll(chan, rx_control)) 4092 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL); 4093 return; 4094 } 4095 4096 if (__is_ctrl_poll(chan, rx_control)) { 4097 l2cap_send_srejtail(chan); 4098 } else { 4099 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR); 4100 l2cap_send_sframe(chan, rx_control); 4101 } 4102 } 4103 4104 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb) 4105 { 4106 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len); 4107 4108 if (__is_ctrl_final(chan, rx_control) && 4109 test_bit(CONN_WAIT_F, &chan->conn_state)) { 4110 __clear_monitor_timer(chan); 4111 if (chan->unacked_frames > 0) 4112 __set_retrans_timer(chan); 4113 clear_bit(CONN_WAIT_F, &chan->conn_state); 4114 } 4115 4116 switch (__get_ctrl_super(chan, rx_control)) { 4117 case L2CAP_SUPER_RR: 4118 l2cap_data_channel_rrframe(chan, rx_control); 4119 break; 4120 4121 case L2CAP_SUPER_REJ: 4122 l2cap_data_channel_rejframe(chan, rx_control); 4123 break; 4124 4125 case L2CAP_SUPER_SREJ: 4126 l2cap_data_channel_srejframe(chan, rx_control); 4127 break; 4128 4129 case L2CAP_SUPER_RNR: 4130 l2cap_data_channel_rnrframe(chan, rx_control); 4131 break; 4132 } 4133 4134 kfree_skb(skb); 4135 return 0; 4136 } 4137 4138 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb) 4139 { 4140 struct l2cap_chan *chan = l2cap_pi(sk)->chan; 4141 u32 control; 4142 u16 req_seq; 4143 int len, next_tx_seq_offset, req_seq_offset; 4144 4145 control = __get_control(chan, skb->data); 4146 skb_pull(skb, __ctrl_size(chan)); 4147 len = skb->len; 4148 4149 /* 4150 * We can just drop the corrupted I-frame here. 4151 * Receiver will miss it and start proper recovery 4152 * procedures and ask retransmission. 4153 */ 4154 if (l2cap_check_fcs(chan, skb)) 4155 goto drop; 4156 4157 if (__is_sar_start(chan, control) && !__is_sframe(chan, control)) 4158 len -= L2CAP_SDULEN_SIZE; 4159 4160 if (chan->fcs == L2CAP_FCS_CRC16) 4161 len -= L2CAP_FCS_SIZE; 4162 4163 if (len > chan->mps) { 4164 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); 4165 goto drop; 4166 } 4167 4168 req_seq = __get_reqseq(chan, control); 4169 4170 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq); 4171 4172 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq, 4173 chan->expected_ack_seq); 4174 4175 /* check for invalid req-seq */ 4176 if (req_seq_offset > next_tx_seq_offset) { 4177 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); 4178 goto drop; 4179 } 4180 4181 if (!__is_sframe(chan, control)) { 4182 if (len < 0) { 4183 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); 4184 goto drop; 4185 } 4186 4187 l2cap_data_channel_iframe(chan, control, skb); 4188 } else { 4189 if (len != 0) { 4190 BT_ERR("%d", len); 4191 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); 4192 goto drop; 4193 } 4194 4195 l2cap_data_channel_sframe(chan, control, skb); 4196 } 4197 4198 return 0; 4199 4200 drop: 4201 kfree_skb(skb); 4202 return 0; 4203 } 4204 4205 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb) 4206 { 4207 struct l2cap_chan *chan; 4208 struct sock *sk = NULL; 4209 u32 control; 4210 u16 tx_seq; 4211 int len; 4212 4213 chan = l2cap_get_chan_by_scid(conn, cid); 4214 if (!chan) { 4215 BT_DBG("unknown cid 0x%4.4x", cid); 4216 goto drop; 4217 } 4218 4219 sk = chan->sk; 4220 4221 BT_DBG("chan %p, len %d", chan, skb->len); 4222 4223 if (chan->state != BT_CONNECTED) 4224 goto drop; 4225 4226 switch (chan->mode) { 4227 case L2CAP_MODE_BASIC: 4228 /* If socket recv buffers overflows we drop data here 4229 * which is *bad* because L2CAP has to be reliable. 4230 * But we don't have any other choice. L2CAP doesn't 4231 * provide flow control mechanism. */ 4232 4233 if (chan->imtu < skb->len) 4234 goto drop; 4235 4236 if (!chan->ops->recv(chan->data, skb)) 4237 goto done; 4238 break; 4239 4240 case L2CAP_MODE_ERTM: 4241 l2cap_ertm_data_rcv(sk, skb); 4242 4243 goto done; 4244 4245 case L2CAP_MODE_STREAMING: 4246 control = __get_control(chan, skb->data); 4247 skb_pull(skb, __ctrl_size(chan)); 4248 len = skb->len; 4249 4250 if (l2cap_check_fcs(chan, skb)) 4251 goto drop; 4252 4253 if (__is_sar_start(chan, control)) 4254 len -= L2CAP_SDULEN_SIZE; 4255 4256 if (chan->fcs == L2CAP_FCS_CRC16) 4257 len -= L2CAP_FCS_SIZE; 4258 4259 if (len > chan->mps || len < 0 || __is_sframe(chan, control)) 4260 goto drop; 4261 4262 tx_seq = __get_txseq(chan, control); 4263 4264 if (chan->expected_tx_seq != tx_seq) { 4265 /* Frame(s) missing - must discard partial SDU */ 4266 kfree_skb(chan->sdu); 4267 chan->sdu = NULL; 4268 chan->sdu_last_frag = NULL; 4269 chan->sdu_len = 0; 4270 4271 /* TODO: Notify userland of missing data */ 4272 } 4273 4274 chan->expected_tx_seq = __next_seq(chan, tx_seq); 4275 4276 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE) 4277 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); 4278 4279 goto done; 4280 4281 default: 4282 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode); 4283 break; 4284 } 4285 4286 drop: 4287 kfree_skb(skb); 4288 4289 done: 4290 if (sk) 4291 release_sock(sk); 4292 4293 return 0; 4294 } 4295 4296 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb) 4297 { 4298 struct sock *sk = NULL; 4299 struct l2cap_chan *chan; 4300 4301 chan = l2cap_global_chan_by_psm(0, psm, conn->src); 4302 if (!chan) 4303 goto drop; 4304 4305 sk = chan->sk; 4306 4307 lock_sock(sk); 4308 4309 BT_DBG("sk %p, len %d", sk, skb->len); 4310 4311 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED) 4312 goto drop; 4313 4314 if (chan->imtu < skb->len) 4315 goto drop; 4316 4317 if (!chan->ops->recv(chan->data, skb)) 4318 goto done; 4319 4320 drop: 4321 kfree_skb(skb); 4322 4323 done: 4324 if (sk) 4325 release_sock(sk); 4326 return 0; 4327 } 4328 4329 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb) 4330 { 4331 struct sock *sk = NULL; 4332 struct l2cap_chan *chan; 4333 4334 chan = l2cap_global_chan_by_scid(0, cid, conn->src); 4335 if (!chan) 4336 goto drop; 4337 4338 sk = chan->sk; 4339 4340 lock_sock(sk); 4341 4342 BT_DBG("sk %p, len %d", sk, skb->len); 4343 4344 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED) 4345 goto drop; 4346 4347 if (chan->imtu < skb->len) 4348 goto drop; 4349 4350 if (!chan->ops->recv(chan->data, skb)) 4351 goto done; 4352 4353 drop: 4354 kfree_skb(skb); 4355 4356 done: 4357 if (sk) 4358 release_sock(sk); 4359 return 0; 4360 } 4361 4362 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb) 4363 { 4364 struct l2cap_hdr *lh = (void *) skb->data; 4365 u16 cid, len; 4366 __le16 psm; 4367 4368 skb_pull(skb, L2CAP_HDR_SIZE); 4369 cid = __le16_to_cpu(lh->cid); 4370 len = __le16_to_cpu(lh->len); 4371 4372 if (len != skb->len) { 4373 kfree_skb(skb); 4374 return; 4375 } 4376 4377 BT_DBG("len %d, cid 0x%4.4x", len, cid); 4378 4379 switch (cid) { 4380 case L2CAP_CID_LE_SIGNALING: 4381 case L2CAP_CID_SIGNALING: 4382 l2cap_sig_channel(conn, skb); 4383 break; 4384 4385 case L2CAP_CID_CONN_LESS: 4386 psm = get_unaligned_le16(skb->data); 4387 skb_pull(skb, 2); 4388 l2cap_conless_channel(conn, psm, skb); 4389 break; 4390 4391 case L2CAP_CID_LE_DATA: 4392 l2cap_att_channel(conn, cid, skb); 4393 break; 4394 4395 case L2CAP_CID_SMP: 4396 if (smp_sig_channel(conn, skb)) 4397 l2cap_conn_del(conn->hcon, EACCES); 4398 break; 4399 4400 default: 4401 l2cap_data_channel(conn, cid, skb); 4402 break; 4403 } 4404 } 4405 4406 /* ---- L2CAP interface with lower layer (HCI) ---- */ 4407 4408 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr) 4409 { 4410 int exact = 0, lm1 = 0, lm2 = 0; 4411 struct l2cap_chan *c; 4412 4413 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr)); 4414 4415 /* Find listening sockets and check their link_mode */ 4416 read_lock(&chan_list_lock); 4417 list_for_each_entry(c, &chan_list, global_l) { 4418 struct sock *sk = c->sk; 4419 4420 if (c->state != BT_LISTEN) 4421 continue; 4422 4423 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) { 4424 lm1 |= HCI_LM_ACCEPT; 4425 if (test_bit(FLAG_ROLE_SWITCH, &c->flags)) 4426 lm1 |= HCI_LM_MASTER; 4427 exact++; 4428 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) { 4429 lm2 |= HCI_LM_ACCEPT; 4430 if (test_bit(FLAG_ROLE_SWITCH, &c->flags)) 4431 lm2 |= HCI_LM_MASTER; 4432 } 4433 } 4434 read_unlock(&chan_list_lock); 4435 4436 return exact ? lm1 : lm2; 4437 } 4438 4439 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status) 4440 { 4441 struct l2cap_conn *conn; 4442 4443 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status); 4444 4445 if (!status) { 4446 conn = l2cap_conn_add(hcon, status); 4447 if (conn) 4448 l2cap_conn_ready(conn); 4449 } else 4450 l2cap_conn_del(hcon, bt_to_errno(status)); 4451 4452 return 0; 4453 } 4454 4455 int l2cap_disconn_ind(struct hci_conn *hcon) 4456 { 4457 struct l2cap_conn *conn = hcon->l2cap_data; 4458 4459 BT_DBG("hcon %p", hcon); 4460 4461 if (!conn) 4462 return HCI_ERROR_REMOTE_USER_TERM; 4463 return conn->disc_reason; 4464 } 4465 4466 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason) 4467 { 4468 BT_DBG("hcon %p reason %d", hcon, reason); 4469 4470 l2cap_conn_del(hcon, bt_to_errno(reason)); 4471 return 0; 4472 } 4473 4474 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt) 4475 { 4476 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) 4477 return; 4478 4479 if (encrypt == 0x00) { 4480 if (chan->sec_level == BT_SECURITY_MEDIUM) { 4481 __clear_chan_timer(chan); 4482 __set_chan_timer(chan, 4483 msecs_to_jiffies(L2CAP_ENC_TIMEOUT)); 4484 } else if (chan->sec_level == BT_SECURITY_HIGH) 4485 l2cap_chan_close(chan, ECONNREFUSED); 4486 } else { 4487 if (chan->sec_level == BT_SECURITY_MEDIUM) 4488 __clear_chan_timer(chan); 4489 } 4490 } 4491 4492 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) 4493 { 4494 struct l2cap_conn *conn = hcon->l2cap_data; 4495 struct l2cap_chan *chan; 4496 4497 if (!conn) 4498 return 0; 4499 4500 BT_DBG("conn %p", conn); 4501 4502 if (hcon->type == LE_LINK) { 4503 smp_distribute_keys(conn, 0); 4504 cancel_delayed_work(&conn->security_timer); 4505 } 4506 4507 rcu_read_lock(); 4508 4509 list_for_each_entry_rcu(chan, &conn->chan_l, list) { 4510 struct sock *sk = chan->sk; 4511 4512 bh_lock_sock(sk); 4513 4514 BT_DBG("chan->scid %d", chan->scid); 4515 4516 if (chan->scid == L2CAP_CID_LE_DATA) { 4517 if (!status && encrypt) { 4518 chan->sec_level = hcon->sec_level; 4519 l2cap_chan_ready(sk); 4520 } 4521 4522 bh_unlock_sock(sk); 4523 continue; 4524 } 4525 4526 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) { 4527 bh_unlock_sock(sk); 4528 continue; 4529 } 4530 4531 if (!status && (chan->state == BT_CONNECTED || 4532 chan->state == BT_CONFIG)) { 4533 l2cap_check_encryption(chan, encrypt); 4534 bh_unlock_sock(sk); 4535 continue; 4536 } 4537 4538 if (chan->state == BT_CONNECT) { 4539 if (!status) { 4540 struct l2cap_conn_req req; 4541 req.scid = cpu_to_le16(chan->scid); 4542 req.psm = chan->psm; 4543 4544 chan->ident = l2cap_get_ident(conn); 4545 set_bit(CONF_CONNECT_PEND, &chan->conf_state); 4546 4547 l2cap_send_cmd(conn, chan->ident, 4548 L2CAP_CONN_REQ, sizeof(req), &req); 4549 } else { 4550 __clear_chan_timer(chan); 4551 __set_chan_timer(chan, 4552 msecs_to_jiffies(L2CAP_DISC_TIMEOUT)); 4553 } 4554 } else if (chan->state == BT_CONNECT2) { 4555 struct l2cap_conn_rsp rsp; 4556 __u16 res, stat; 4557 4558 if (!status) { 4559 if (bt_sk(sk)->defer_setup) { 4560 struct sock *parent = bt_sk(sk)->parent; 4561 res = L2CAP_CR_PEND; 4562 stat = L2CAP_CS_AUTHOR_PEND; 4563 if (parent) 4564 parent->sk_data_ready(parent, 0); 4565 } else { 4566 l2cap_state_change(chan, BT_CONFIG); 4567 res = L2CAP_CR_SUCCESS; 4568 stat = L2CAP_CS_NO_INFO; 4569 } 4570 } else { 4571 l2cap_state_change(chan, BT_DISCONN); 4572 __set_chan_timer(chan, 4573 msecs_to_jiffies(L2CAP_DISC_TIMEOUT)); 4574 res = L2CAP_CR_SEC_BLOCK; 4575 stat = L2CAP_CS_NO_INFO; 4576 } 4577 4578 rsp.scid = cpu_to_le16(chan->dcid); 4579 rsp.dcid = cpu_to_le16(chan->scid); 4580 rsp.result = cpu_to_le16(res); 4581 rsp.status = cpu_to_le16(stat); 4582 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, 4583 sizeof(rsp), &rsp); 4584 } 4585 4586 bh_unlock_sock(sk); 4587 } 4588 4589 rcu_read_unlock(); 4590 4591 return 0; 4592 } 4593 4594 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags) 4595 { 4596 struct l2cap_conn *conn = hcon->l2cap_data; 4597 4598 if (!conn) 4599 conn = l2cap_conn_add(hcon, 0); 4600 4601 if (!conn) 4602 goto drop; 4603 4604 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags); 4605 4606 if (!(flags & ACL_CONT)) { 4607 struct l2cap_hdr *hdr; 4608 struct l2cap_chan *chan; 4609 u16 cid; 4610 int len; 4611 4612 if (conn->rx_len) { 4613 BT_ERR("Unexpected start frame (len %d)", skb->len); 4614 kfree_skb(conn->rx_skb); 4615 conn->rx_skb = NULL; 4616 conn->rx_len = 0; 4617 l2cap_conn_unreliable(conn, ECOMM); 4618 } 4619 4620 /* Start fragment always begin with Basic L2CAP header */ 4621 if (skb->len < L2CAP_HDR_SIZE) { 4622 BT_ERR("Frame is too short (len %d)", skb->len); 4623 l2cap_conn_unreliable(conn, ECOMM); 4624 goto drop; 4625 } 4626 4627 hdr = (struct l2cap_hdr *) skb->data; 4628 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE; 4629 cid = __le16_to_cpu(hdr->cid); 4630 4631 if (len == skb->len) { 4632 /* Complete frame received */ 4633 l2cap_recv_frame(conn, skb); 4634 return 0; 4635 } 4636 4637 BT_DBG("Start: total len %d, frag len %d", len, skb->len); 4638 4639 if (skb->len > len) { 4640 BT_ERR("Frame is too long (len %d, expected len %d)", 4641 skb->len, len); 4642 l2cap_conn_unreliable(conn, ECOMM); 4643 goto drop; 4644 } 4645 4646 chan = l2cap_get_chan_by_scid(conn, cid); 4647 4648 if (chan && chan->sk) { 4649 struct sock *sk = chan->sk; 4650 4651 if (chan->imtu < len - L2CAP_HDR_SIZE) { 4652 BT_ERR("Frame exceeding recv MTU (len %d, " 4653 "MTU %d)", len, 4654 chan->imtu); 4655 release_sock(sk); 4656 l2cap_conn_unreliable(conn, ECOMM); 4657 goto drop; 4658 } 4659 release_sock(sk); 4660 } 4661 4662 /* Allocate skb for the complete frame (with header) */ 4663 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC); 4664 if (!conn->rx_skb) 4665 goto drop; 4666 4667 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len), 4668 skb->len); 4669 conn->rx_len = len - skb->len; 4670 } else { 4671 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len); 4672 4673 if (!conn->rx_len) { 4674 BT_ERR("Unexpected continuation frame (len %d)", skb->len); 4675 l2cap_conn_unreliable(conn, ECOMM); 4676 goto drop; 4677 } 4678 4679 if (skb->len > conn->rx_len) { 4680 BT_ERR("Fragment is too long (len %d, expected %d)", 4681 skb->len, conn->rx_len); 4682 kfree_skb(conn->rx_skb); 4683 conn->rx_skb = NULL; 4684 conn->rx_len = 0; 4685 l2cap_conn_unreliable(conn, ECOMM); 4686 goto drop; 4687 } 4688 4689 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len), 4690 skb->len); 4691 conn->rx_len -= skb->len; 4692 4693 if (!conn->rx_len) { 4694 /* Complete frame received */ 4695 l2cap_recv_frame(conn, conn->rx_skb); 4696 conn->rx_skb = NULL; 4697 } 4698 } 4699 4700 drop: 4701 kfree_skb(skb); 4702 return 0; 4703 } 4704 4705 static int l2cap_debugfs_show(struct seq_file *f, void *p) 4706 { 4707 struct l2cap_chan *c; 4708 4709 read_lock(&chan_list_lock); 4710 4711 list_for_each_entry(c, &chan_list, global_l) { 4712 struct sock *sk = c->sk; 4713 4714 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n", 4715 batostr(&bt_sk(sk)->src), 4716 batostr(&bt_sk(sk)->dst), 4717 c->state, __le16_to_cpu(c->psm), 4718 c->scid, c->dcid, c->imtu, c->omtu, 4719 c->sec_level, c->mode); 4720 } 4721 4722 read_unlock(&chan_list_lock); 4723 4724 return 0; 4725 } 4726 4727 static int l2cap_debugfs_open(struct inode *inode, struct file *file) 4728 { 4729 return single_open(file, l2cap_debugfs_show, inode->i_private); 4730 } 4731 4732 static const struct file_operations l2cap_debugfs_fops = { 4733 .open = l2cap_debugfs_open, 4734 .read = seq_read, 4735 .llseek = seq_lseek, 4736 .release = single_release, 4737 }; 4738 4739 static struct dentry *l2cap_debugfs; 4740 4741 int __init l2cap_init(void) 4742 { 4743 int err; 4744 4745 err = l2cap_init_sockets(); 4746 if (err < 0) 4747 return err; 4748 4749 if (bt_debugfs) { 4750 l2cap_debugfs = debugfs_create_file("l2cap", 0444, 4751 bt_debugfs, NULL, &l2cap_debugfs_fops); 4752 if (!l2cap_debugfs) 4753 BT_ERR("Failed to create L2CAP debug file"); 4754 } 4755 4756 return 0; 4757 } 4758 4759 void l2cap_exit(void) 4760 { 4761 debugfs_remove(l2cap_debugfs); 4762 l2cap_cleanup_sockets(); 4763 } 4764 4765 module_param(disable_ertm, bool, 0644); 4766 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode"); 4767