1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27 */
28
29 /* Bluetooth L2CAP core. */
30
31 #include <linux/module.h>
32
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
36
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40
41 #include "smp.h"
42
43 #define LE_FLOWCTL_MAX_CREDITS 65535
44
45 bool disable_ertm;
46 bool enable_ecred = IS_ENABLED(CONFIG_BT_LE_L2CAP_ECRED);
47
48 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
49
50 static LIST_HEAD(chan_list);
51 static DEFINE_RWLOCK(chan_list_lock);
52
53 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
54 u8 code, u8 ident, u16 dlen, void *data);
55 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
56 void *data);
57 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
58 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
59
60 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
61 struct sk_buff_head *skbs, u8 event);
62 static void l2cap_retrans_timeout(struct work_struct *work);
63 static void l2cap_monitor_timeout(struct work_struct *work);
64 static void l2cap_ack_timeout(struct work_struct *work);
65
bdaddr_type(u8 link_type,u8 bdaddr_type)66 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
67 {
68 if (link_type == LE_LINK) {
69 if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
70 return BDADDR_LE_PUBLIC;
71 else
72 return BDADDR_LE_RANDOM;
73 }
74
75 return BDADDR_BREDR;
76 }
77
bdaddr_src_type(struct hci_conn * hcon)78 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
79 {
80 return bdaddr_type(hcon->type, hcon->src_type);
81 }
82
bdaddr_dst_type(struct hci_conn * hcon)83 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
84 {
85 return bdaddr_type(hcon->type, hcon->dst_type);
86 }
87
88 /* ---- L2CAP channels ---- */
89
__l2cap_get_chan_by_dcid(struct l2cap_conn * conn,u16 cid)90 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
91 u16 cid)
92 {
93 struct l2cap_chan *c;
94
95 list_for_each_entry(c, &conn->chan_l, list) {
96 if (c->dcid == cid)
97 return c;
98 }
99 return NULL;
100 }
101
__l2cap_get_chan_by_scid(struct l2cap_conn * conn,u16 cid)102 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
103 u16 cid)
104 {
105 struct l2cap_chan *c;
106
107 list_for_each_entry(c, &conn->chan_l, list) {
108 if (c->scid == cid)
109 return c;
110 }
111 return NULL;
112 }
113
114 /* Find channel with given SCID.
115 * Returns a reference locked channel.
116 */
l2cap_get_chan_by_scid(struct l2cap_conn * conn,u16 cid)117 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
118 u16 cid)
119 {
120 struct l2cap_chan *c;
121
122 c = __l2cap_get_chan_by_scid(conn, cid);
123 if (c) {
124 /* Only lock if chan reference is not 0 */
125 c = l2cap_chan_hold_unless_zero(c);
126 if (c)
127 l2cap_chan_lock(c);
128 }
129
130 return c;
131 }
132
133 /* Find channel with given DCID.
134 * Returns a reference locked channel.
135 */
l2cap_get_chan_by_dcid(struct l2cap_conn * conn,u16 cid)136 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
137 u16 cid)
138 {
139 struct l2cap_chan *c;
140
141 c = __l2cap_get_chan_by_dcid(conn, cid);
142 if (c) {
143 /* Only lock if chan reference is not 0 */
144 c = l2cap_chan_hold_unless_zero(c);
145 if (c)
146 l2cap_chan_lock(c);
147 }
148
149 return c;
150 }
151
__l2cap_get_chan_by_ident(struct l2cap_conn * conn,u8 ident)152 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
153 u8 ident)
154 {
155 struct l2cap_chan *c;
156
157 list_for_each_entry(c, &conn->chan_l, list) {
158 if (c->ident == ident)
159 return c;
160 }
161 return NULL;
162 }
163
__l2cap_global_chan_by_addr(__le16 psm,bdaddr_t * src,u8 src_type)164 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src,
165 u8 src_type)
166 {
167 struct l2cap_chan *c;
168
169 list_for_each_entry(c, &chan_list, global_l) {
170 if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR)
171 continue;
172
173 if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR)
174 continue;
175
176 if (c->sport == psm && !bacmp(&c->src, src))
177 return c;
178 }
179 return NULL;
180 }
181
l2cap_add_psm(struct l2cap_chan * chan,bdaddr_t * src,__le16 psm)182 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
183 {
184 int err;
185
186 write_lock(&chan_list_lock);
187
188 if (psm && __l2cap_global_chan_by_addr(psm, src, chan->src_type)) {
189 err = -EADDRINUSE;
190 goto done;
191 }
192
193 if (psm) {
194 chan->psm = psm;
195 chan->sport = psm;
196 err = 0;
197 } else {
198 u16 p, start, end, incr;
199
200 if (chan->src_type == BDADDR_BREDR) {
201 start = L2CAP_PSM_DYN_START;
202 end = L2CAP_PSM_AUTO_END;
203 incr = 2;
204 } else {
205 start = L2CAP_PSM_LE_DYN_START;
206 end = L2CAP_PSM_LE_DYN_END;
207 incr = 1;
208 }
209
210 err = -EINVAL;
211 for (p = start; p <= end; p += incr)
212 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src,
213 chan->src_type)) {
214 chan->psm = cpu_to_le16(p);
215 chan->sport = cpu_to_le16(p);
216 err = 0;
217 break;
218 }
219 }
220
221 done:
222 write_unlock(&chan_list_lock);
223 return err;
224 }
225 EXPORT_SYMBOL_GPL(l2cap_add_psm);
226
l2cap_add_scid(struct l2cap_chan * chan,__u16 scid)227 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
228 {
229 write_lock(&chan_list_lock);
230
231 /* Override the defaults (which are for conn-oriented) */
232 chan->omtu = L2CAP_DEFAULT_MTU;
233 chan->chan_type = L2CAP_CHAN_FIXED;
234
235 chan->scid = scid;
236
237 write_unlock(&chan_list_lock);
238
239 return 0;
240 }
241
l2cap_alloc_cid(struct l2cap_conn * conn)242 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
243 {
244 u16 cid, dyn_end;
245
246 if (conn->hcon->type == LE_LINK)
247 dyn_end = L2CAP_CID_LE_DYN_END;
248 else
249 dyn_end = L2CAP_CID_DYN_END;
250
251 for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
252 if (!__l2cap_get_chan_by_scid(conn, cid))
253 return cid;
254 }
255
256 return 0;
257 }
258
l2cap_state_change(struct l2cap_chan * chan,int state)259 static void l2cap_state_change(struct l2cap_chan *chan, int state)
260 {
261 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
262 state_to_string(state));
263
264 chan->state = state;
265 chan->ops->state_change(chan, state, 0);
266 }
267
l2cap_state_change_and_error(struct l2cap_chan * chan,int state,int err)268 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
269 int state, int err)
270 {
271 chan->state = state;
272 chan->ops->state_change(chan, chan->state, err);
273 }
274
l2cap_chan_set_err(struct l2cap_chan * chan,int err)275 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
276 {
277 chan->ops->state_change(chan, chan->state, err);
278 }
279
__set_retrans_timer(struct l2cap_chan * chan)280 static void __set_retrans_timer(struct l2cap_chan *chan)
281 {
282 if (!delayed_work_pending(&chan->monitor_timer) &&
283 chan->retrans_timeout) {
284 l2cap_set_timer(chan, &chan->retrans_timer,
285 secs_to_jiffies(chan->retrans_timeout));
286 }
287 }
288
__set_monitor_timer(struct l2cap_chan * chan)289 static void __set_monitor_timer(struct l2cap_chan *chan)
290 {
291 __clear_retrans_timer(chan);
292 if (chan->monitor_timeout) {
293 l2cap_set_timer(chan, &chan->monitor_timer,
294 secs_to_jiffies(chan->monitor_timeout));
295 }
296 }
297
l2cap_ertm_seq_in_queue(struct sk_buff_head * head,u16 seq)298 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
299 u16 seq)
300 {
301 struct sk_buff *skb;
302
303 skb_queue_walk(head, skb) {
304 if (bt_cb(skb)->l2cap.txseq == seq)
305 return skb;
306 }
307
308 return NULL;
309 }
310
311 /* ---- L2CAP sequence number lists ---- */
312
313 /* For ERTM, ordered lists of sequence numbers must be tracked for
314 * SREJ requests that are received and for frames that are to be
315 * retransmitted. These seq_list functions implement a singly-linked
316 * list in an array, where membership in the list can also be checked
317 * in constant time. Items can also be added to the tail of the list
318 * and removed from the head in constant time, without further memory
319 * allocs or frees.
320 */
321
l2cap_seq_list_init(struct l2cap_seq_list * seq_list,u16 size)322 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
323 {
324 size_t alloc_size, i;
325
326 /* Allocated size is a power of 2 to map sequence numbers
327 * (which may be up to 14 bits) in to a smaller array that is
328 * sized for the negotiated ERTM transmit windows.
329 */
330 alloc_size = roundup_pow_of_two(size);
331
332 seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
333 if (!seq_list->list)
334 return -ENOMEM;
335
336 seq_list->mask = alloc_size - 1;
337 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
338 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
339 for (i = 0; i < alloc_size; i++)
340 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
341
342 return 0;
343 }
344
l2cap_seq_list_free(struct l2cap_seq_list * seq_list)345 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
346 {
347 kfree(seq_list->list);
348 }
349
l2cap_seq_list_contains(struct l2cap_seq_list * seq_list,u16 seq)350 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
351 u16 seq)
352 {
353 /* Constant-time check for list membership */
354 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
355 }
356
l2cap_seq_list_pop(struct l2cap_seq_list * seq_list)357 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
358 {
359 u16 seq = seq_list->head;
360 u16 mask = seq_list->mask;
361
362 seq_list->head = seq_list->list[seq & mask];
363 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
364
365 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
366 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
367 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
368 }
369
370 return seq;
371 }
372
l2cap_seq_list_clear(struct l2cap_seq_list * seq_list)373 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
374 {
375 u16 i;
376
377 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
378 return;
379
380 for (i = 0; i <= seq_list->mask; i++)
381 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
382
383 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
384 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
385 }
386
l2cap_seq_list_append(struct l2cap_seq_list * seq_list,u16 seq)387 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
388 {
389 u16 mask = seq_list->mask;
390
391 /* All appends happen in constant time */
392
393 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
394 return;
395
396 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
397 seq_list->head = seq;
398 else
399 seq_list->list[seq_list->tail & mask] = seq;
400
401 seq_list->tail = seq;
402 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
403 }
404
l2cap_chan_timeout(struct work_struct * work)405 static void l2cap_chan_timeout(struct work_struct *work)
406 {
407 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
408 chan_timer.work);
409 struct l2cap_conn *conn = chan->conn;
410 int reason;
411
412 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
413
414 if (!conn)
415 return;
416
417 mutex_lock(&conn->lock);
418 /* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
419 * this work. No need to call l2cap_chan_hold(chan) here again.
420 */
421 l2cap_chan_lock(chan);
422
423 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
424 reason = ECONNREFUSED;
425 else if (chan->state == BT_CONNECT &&
426 chan->sec_level != BT_SECURITY_SDP)
427 reason = ECONNREFUSED;
428 else
429 reason = ETIMEDOUT;
430
431 l2cap_chan_close(chan, reason);
432
433 chan->ops->close(chan);
434
435 l2cap_chan_unlock(chan);
436 l2cap_chan_put(chan);
437
438 mutex_unlock(&conn->lock);
439 }
440
l2cap_chan_create(void)441 struct l2cap_chan *l2cap_chan_create(void)
442 {
443 struct l2cap_chan *chan;
444
445 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
446 if (!chan)
447 return NULL;
448
449 skb_queue_head_init(&chan->tx_q);
450 skb_queue_head_init(&chan->srej_q);
451 mutex_init(&chan->lock);
452
453 /* Set default lock nesting level */
454 atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
455
456 /* Available receive buffer space is initially unknown */
457 chan->rx_avail = -1;
458
459 write_lock(&chan_list_lock);
460 list_add(&chan->global_l, &chan_list);
461 write_unlock(&chan_list_lock);
462
463 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
464 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
465 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
466 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
467
468 chan->state = BT_OPEN;
469
470 kref_init(&chan->kref);
471
472 /* This flag is cleared in l2cap_chan_ready() */
473 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
474
475 BT_DBG("chan %p", chan);
476
477 return chan;
478 }
479 EXPORT_SYMBOL_GPL(l2cap_chan_create);
480
l2cap_chan_destroy(struct kref * kref)481 static void l2cap_chan_destroy(struct kref *kref)
482 {
483 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
484
485 BT_DBG("chan %p", chan);
486
487 write_lock(&chan_list_lock);
488 list_del(&chan->global_l);
489 write_unlock(&chan_list_lock);
490
491 kfree(chan);
492 }
493
l2cap_chan_hold(struct l2cap_chan * c)494 void l2cap_chan_hold(struct l2cap_chan *c)
495 {
496 BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
497
498 kref_get(&c->kref);
499 }
500
l2cap_chan_hold_unless_zero(struct l2cap_chan * c)501 struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c)
502 {
503 BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
504
505 if (!kref_get_unless_zero(&c->kref))
506 return NULL;
507
508 return c;
509 }
510
l2cap_chan_put(struct l2cap_chan * c)511 void l2cap_chan_put(struct l2cap_chan *c)
512 {
513 BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
514
515 kref_put(&c->kref, l2cap_chan_destroy);
516 }
517 EXPORT_SYMBOL_GPL(l2cap_chan_put);
518
l2cap_chan_set_defaults(struct l2cap_chan * chan)519 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
520 {
521 chan->fcs = L2CAP_FCS_CRC16;
522 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
523 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
524 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
525 chan->remote_max_tx = chan->max_tx;
526 chan->remote_tx_win = chan->tx_win;
527 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
528 chan->sec_level = BT_SECURITY_LOW;
529 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
530 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
531 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
532
533 chan->conf_state = 0;
534 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
535
536 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
537 }
538 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
539
l2cap_le_rx_credits(struct l2cap_chan * chan)540 static __u16 l2cap_le_rx_credits(struct l2cap_chan *chan)
541 {
542 size_t sdu_len = chan->sdu ? chan->sdu->len : 0;
543
544 if (chan->mps == 0)
545 return 0;
546
547 /* If we don't know the available space in the receiver buffer, give
548 * enough credits for a full packet.
549 */
550 if (chan->rx_avail == -1)
551 return (chan->imtu / chan->mps) + 1;
552
553 /* If we know how much space is available in the receive buffer, give
554 * out as many credits as would fill the buffer.
555 */
556 if (chan->rx_avail <= sdu_len)
557 return 0;
558
559 return DIV_ROUND_UP(chan->rx_avail - sdu_len, chan->mps);
560 }
561
l2cap_le_flowctl_init(struct l2cap_chan * chan,u16 tx_credits)562 static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
563 {
564 chan->sdu = NULL;
565 chan->sdu_last_frag = NULL;
566 chan->sdu_len = 0;
567 chan->tx_credits = tx_credits;
568 /* Derive MPS from connection MTU to stop HCI fragmentation */
569 chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
570 chan->rx_credits = l2cap_le_rx_credits(chan);
571
572 skb_queue_head_init(&chan->tx_q);
573 }
574
l2cap_ecred_init(struct l2cap_chan * chan,u16 tx_credits)575 static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits)
576 {
577 l2cap_le_flowctl_init(chan, tx_credits);
578
579 /* L2CAP implementations shall support a minimum MPS of 64 octets */
580 if (chan->mps < L2CAP_ECRED_MIN_MPS) {
581 chan->mps = L2CAP_ECRED_MIN_MPS;
582 chan->rx_credits = l2cap_le_rx_credits(chan);
583 }
584 }
585
__l2cap_chan_add(struct l2cap_conn * conn,struct l2cap_chan * chan)586 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
587 {
588 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
589 __le16_to_cpu(chan->psm), chan->dcid);
590
591 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
592
593 chan->conn = conn;
594
595 switch (chan->chan_type) {
596 case L2CAP_CHAN_CONN_ORIENTED:
597 /* Alloc CID for connection-oriented socket */
598 chan->scid = l2cap_alloc_cid(conn);
599 if (conn->hcon->type == ACL_LINK)
600 chan->omtu = L2CAP_DEFAULT_MTU;
601 break;
602
603 case L2CAP_CHAN_CONN_LESS:
604 /* Connectionless socket */
605 chan->scid = L2CAP_CID_CONN_LESS;
606 chan->dcid = L2CAP_CID_CONN_LESS;
607 chan->omtu = L2CAP_DEFAULT_MTU;
608 break;
609
610 case L2CAP_CHAN_FIXED:
611 /* Caller will set CID and CID specific MTU values */
612 break;
613
614 default:
615 /* Raw socket can send/recv signalling messages only */
616 chan->scid = L2CAP_CID_SIGNALING;
617 chan->dcid = L2CAP_CID_SIGNALING;
618 chan->omtu = L2CAP_DEFAULT_MTU;
619 }
620
621 chan->local_id = L2CAP_BESTEFFORT_ID;
622 chan->local_stype = L2CAP_SERV_BESTEFFORT;
623 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
624 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
625 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
626 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
627
628 l2cap_chan_hold(chan);
629
630 /* Only keep a reference for fixed channels if they requested it */
631 if (chan->chan_type != L2CAP_CHAN_FIXED ||
632 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
633 hci_conn_hold(conn->hcon);
634
635 /* Append to the list since the order matters for ECRED */
636 list_add_tail(&chan->list, &conn->chan_l);
637 }
638
l2cap_chan_add(struct l2cap_conn * conn,struct l2cap_chan * chan)639 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
640 {
641 mutex_lock(&conn->lock);
642 __l2cap_chan_add(conn, chan);
643 mutex_unlock(&conn->lock);
644 }
645
l2cap_chan_del(struct l2cap_chan * chan,int err)646 void l2cap_chan_del(struct l2cap_chan *chan, int err)
647 {
648 struct l2cap_conn *conn = chan->conn;
649
650 __clear_chan_timer(chan);
651
652 BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
653 state_to_string(chan->state));
654
655 chan->ops->teardown(chan, err);
656
657 if (conn) {
658 /* Delete from channel list */
659 list_del(&chan->list);
660
661 l2cap_chan_put(chan);
662
663 chan->conn = NULL;
664
665 /* Reference was only held for non-fixed channels or
666 * fixed channels that explicitly requested it using the
667 * FLAG_HOLD_HCI_CONN flag.
668 */
669 if (chan->chan_type != L2CAP_CHAN_FIXED ||
670 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
671 hci_conn_drop(conn->hcon);
672 }
673
674 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
675 return;
676
677 switch (chan->mode) {
678 case L2CAP_MODE_BASIC:
679 break;
680
681 case L2CAP_MODE_LE_FLOWCTL:
682 case L2CAP_MODE_EXT_FLOWCTL:
683 skb_queue_purge(&chan->tx_q);
684 break;
685
686 case L2CAP_MODE_ERTM:
687 __clear_retrans_timer(chan);
688 __clear_monitor_timer(chan);
689 __clear_ack_timer(chan);
690
691 skb_queue_purge(&chan->srej_q);
692
693 l2cap_seq_list_free(&chan->srej_list);
694 l2cap_seq_list_free(&chan->retrans_list);
695 fallthrough;
696
697 case L2CAP_MODE_STREAMING:
698 skb_queue_purge(&chan->tx_q);
699 break;
700 }
701 }
702 EXPORT_SYMBOL_GPL(l2cap_chan_del);
703
__l2cap_chan_list_id(struct l2cap_conn * conn,u16 id,l2cap_chan_func_t func,void * data)704 static void __l2cap_chan_list_id(struct l2cap_conn *conn, u16 id,
705 l2cap_chan_func_t func, void *data)
706 {
707 struct l2cap_chan *chan, *l;
708
709 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
710 if (chan->ident == id)
711 func(chan, data);
712 }
713 }
714
__l2cap_chan_list(struct l2cap_conn * conn,l2cap_chan_func_t func,void * data)715 static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
716 void *data)
717 {
718 struct l2cap_chan *chan;
719
720 list_for_each_entry(chan, &conn->chan_l, list) {
721 func(chan, data);
722 }
723 }
724
l2cap_chan_list(struct l2cap_conn * conn,l2cap_chan_func_t func,void * data)725 void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
726 void *data)
727 {
728 if (!conn)
729 return;
730
731 mutex_lock(&conn->lock);
732 __l2cap_chan_list(conn, func, data);
733 mutex_unlock(&conn->lock);
734 }
735
736 EXPORT_SYMBOL_GPL(l2cap_chan_list);
737
l2cap_conn_update_id_addr(struct work_struct * work)738 static void l2cap_conn_update_id_addr(struct work_struct *work)
739 {
740 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
741 id_addr_timer.work);
742 struct hci_conn *hcon = conn->hcon;
743 struct l2cap_chan *chan;
744
745 mutex_lock(&conn->lock);
746
747 list_for_each_entry(chan, &conn->chan_l, list) {
748 l2cap_chan_lock(chan);
749 bacpy(&chan->dst, &hcon->dst);
750 chan->dst_type = bdaddr_dst_type(hcon);
751 l2cap_chan_unlock(chan);
752 }
753
754 mutex_unlock(&conn->lock);
755 }
756
l2cap_chan_le_connect_reject(struct l2cap_chan * chan)757 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
758 {
759 struct l2cap_conn *conn = chan->conn;
760 struct l2cap_le_conn_rsp rsp;
761 u16 result;
762
763 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
764 result = L2CAP_CR_LE_AUTHORIZATION;
765 else
766 result = L2CAP_CR_LE_BAD_PSM;
767
768 l2cap_state_change(chan, BT_DISCONN);
769
770 rsp.dcid = cpu_to_le16(chan->scid);
771 rsp.mtu = cpu_to_le16(chan->imtu);
772 rsp.mps = cpu_to_le16(chan->mps);
773 rsp.credits = cpu_to_le16(chan->rx_credits);
774 rsp.result = cpu_to_le16(result);
775
776 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
777 &rsp);
778 }
779
l2cap_chan_ecred_connect_reject(struct l2cap_chan * chan)780 static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan)
781 {
782 l2cap_state_change(chan, BT_DISCONN);
783
784 __l2cap_ecred_conn_rsp_defer(chan);
785 }
786
l2cap_chan_connect_reject(struct l2cap_chan * chan)787 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
788 {
789 struct l2cap_conn *conn = chan->conn;
790 struct l2cap_conn_rsp rsp;
791 u16 result;
792
793 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
794 result = L2CAP_CR_SEC_BLOCK;
795 else
796 result = L2CAP_CR_BAD_PSM;
797
798 l2cap_state_change(chan, BT_DISCONN);
799
800 rsp.scid = cpu_to_le16(chan->dcid);
801 rsp.dcid = cpu_to_le16(chan->scid);
802 rsp.result = cpu_to_le16(result);
803 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
804
805 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
806 }
807
l2cap_chan_close(struct l2cap_chan * chan,int reason)808 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
809 {
810 struct l2cap_conn *conn = chan->conn;
811
812 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
813
814 switch (chan->state) {
815 case BT_LISTEN:
816 chan->ops->teardown(chan, 0);
817 break;
818
819 case BT_CONNECTED:
820 case BT_CONFIG:
821 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
822 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
823 l2cap_send_disconn_req(chan, reason);
824 } else
825 l2cap_chan_del(chan, reason);
826 break;
827
828 case BT_CONNECT2:
829 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
830 if (conn->hcon->type == ACL_LINK)
831 l2cap_chan_connect_reject(chan);
832 else if (conn->hcon->type == LE_LINK) {
833 switch (chan->mode) {
834 case L2CAP_MODE_LE_FLOWCTL:
835 l2cap_chan_le_connect_reject(chan);
836 break;
837 case L2CAP_MODE_EXT_FLOWCTL:
838 l2cap_chan_ecred_connect_reject(chan);
839 return;
840 }
841 }
842 }
843
844 l2cap_chan_del(chan, reason);
845 break;
846
847 case BT_CONNECT:
848 case BT_DISCONN:
849 l2cap_chan_del(chan, reason);
850 break;
851
852 default:
853 chan->ops->teardown(chan, 0);
854 break;
855 }
856 }
857 EXPORT_SYMBOL(l2cap_chan_close);
858
l2cap_get_auth_type(struct l2cap_chan * chan)859 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
860 {
861 switch (chan->chan_type) {
862 case L2CAP_CHAN_RAW:
863 switch (chan->sec_level) {
864 case BT_SECURITY_HIGH:
865 case BT_SECURITY_FIPS:
866 return HCI_AT_DEDICATED_BONDING_MITM;
867 case BT_SECURITY_MEDIUM:
868 return HCI_AT_DEDICATED_BONDING;
869 default:
870 return HCI_AT_NO_BONDING;
871 }
872 break;
873 case L2CAP_CHAN_CONN_LESS:
874 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
875 if (chan->sec_level == BT_SECURITY_LOW)
876 chan->sec_level = BT_SECURITY_SDP;
877 }
878 if (chan->sec_level == BT_SECURITY_HIGH ||
879 chan->sec_level == BT_SECURITY_FIPS)
880 return HCI_AT_NO_BONDING_MITM;
881 else
882 return HCI_AT_NO_BONDING;
883 break;
884 case L2CAP_CHAN_CONN_ORIENTED:
885 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
886 if (chan->sec_level == BT_SECURITY_LOW)
887 chan->sec_level = BT_SECURITY_SDP;
888
889 if (chan->sec_level == BT_SECURITY_HIGH ||
890 chan->sec_level == BT_SECURITY_FIPS)
891 return HCI_AT_NO_BONDING_MITM;
892 else
893 return HCI_AT_NO_BONDING;
894 }
895 fallthrough;
896
897 default:
898 switch (chan->sec_level) {
899 case BT_SECURITY_HIGH:
900 case BT_SECURITY_FIPS:
901 return HCI_AT_GENERAL_BONDING_MITM;
902 case BT_SECURITY_MEDIUM:
903 return HCI_AT_GENERAL_BONDING;
904 default:
905 return HCI_AT_NO_BONDING;
906 }
907 break;
908 }
909 }
910
911 /* Service level security */
l2cap_chan_check_security(struct l2cap_chan * chan,bool initiator)912 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
913 {
914 struct l2cap_conn *conn = chan->conn;
915 __u8 auth_type;
916
917 if (conn->hcon->type == LE_LINK)
918 return smp_conn_security(conn->hcon, chan->sec_level);
919
920 auth_type = l2cap_get_auth_type(chan);
921
922 return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
923 initiator);
924 }
925
l2cap_get_ident(struct l2cap_conn * conn)926 static u8 l2cap_get_ident(struct l2cap_conn *conn)
927 {
928 u8 id;
929
930 /* Get next available identificator.
931 * 1 - 128 are used by kernel.
932 * 129 - 199 are reserved.
933 * 200 - 254 are used by utilities like l2ping, etc.
934 */
935
936 mutex_lock(&conn->ident_lock);
937
938 if (++conn->tx_ident > 128)
939 conn->tx_ident = 1;
940
941 id = conn->tx_ident;
942
943 mutex_unlock(&conn->ident_lock);
944
945 return id;
946 }
947
l2cap_send_acl(struct l2cap_conn * conn,struct sk_buff * skb,u8 flags)948 static void l2cap_send_acl(struct l2cap_conn *conn, struct sk_buff *skb,
949 u8 flags)
950 {
951 /* Check if the hcon still valid before attempting to send */
952 if (hci_conn_valid(conn->hcon->hdev, conn->hcon))
953 hci_send_acl(conn->hchan, skb, flags);
954 else
955 kfree_skb(skb);
956 }
957
l2cap_send_cmd(struct l2cap_conn * conn,u8 ident,u8 code,u16 len,void * data)958 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
959 void *data)
960 {
961 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
962 u8 flags;
963
964 BT_DBG("code 0x%2.2x", code);
965
966 if (!skb)
967 return;
968
969 /* Use NO_FLUSH if supported or we have an LE link (which does
970 * not support auto-flushing packets) */
971 if (lmp_no_flush_capable(conn->hcon->hdev) ||
972 conn->hcon->type == LE_LINK)
973 flags = ACL_START_NO_FLUSH;
974 else
975 flags = ACL_START;
976
977 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
978 skb->priority = HCI_PRIO_MAX;
979
980 l2cap_send_acl(conn, skb, flags);
981 }
982
l2cap_do_send(struct l2cap_chan * chan,struct sk_buff * skb)983 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
984 {
985 struct hci_conn *hcon = chan->conn->hcon;
986 u16 flags;
987
988 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
989 skb->priority);
990
991 /* Use NO_FLUSH for LE links (where this is the only option) or
992 * if the BR/EDR link supports it and flushing has not been
993 * explicitly requested (through FLAG_FLUSHABLE).
994 */
995 if (hcon->type == LE_LINK ||
996 (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
997 lmp_no_flush_capable(hcon->hdev)))
998 flags = ACL_START_NO_FLUSH;
999 else
1000 flags = ACL_START;
1001
1002 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
1003 hci_send_acl(chan->conn->hchan, skb, flags);
1004 }
1005
__unpack_enhanced_control(u16 enh,struct l2cap_ctrl * control)1006 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
1007 {
1008 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
1009 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
1010
1011 if (enh & L2CAP_CTRL_FRAME_TYPE) {
1012 /* S-Frame */
1013 control->sframe = 1;
1014 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
1015 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
1016
1017 control->sar = 0;
1018 control->txseq = 0;
1019 } else {
1020 /* I-Frame */
1021 control->sframe = 0;
1022 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
1023 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
1024
1025 control->poll = 0;
1026 control->super = 0;
1027 }
1028 }
1029
__unpack_extended_control(u32 ext,struct l2cap_ctrl * control)1030 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
1031 {
1032 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1033 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
1034
1035 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
1036 /* S-Frame */
1037 control->sframe = 1;
1038 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
1039 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
1040
1041 control->sar = 0;
1042 control->txseq = 0;
1043 } else {
1044 /* I-Frame */
1045 control->sframe = 0;
1046 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
1047 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1048
1049 control->poll = 0;
1050 control->super = 0;
1051 }
1052 }
1053
__unpack_control(struct l2cap_chan * chan,struct sk_buff * skb)1054 static inline void __unpack_control(struct l2cap_chan *chan,
1055 struct sk_buff *skb)
1056 {
1057 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1058 __unpack_extended_control(get_unaligned_le32(skb->data),
1059 &bt_cb(skb)->l2cap);
1060 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
1061 } else {
1062 __unpack_enhanced_control(get_unaligned_le16(skb->data),
1063 &bt_cb(skb)->l2cap);
1064 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
1065 }
1066 }
1067
__pack_extended_control(struct l2cap_ctrl * control)1068 static u32 __pack_extended_control(struct l2cap_ctrl *control)
1069 {
1070 u32 packed;
1071
1072 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1073 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
1074
1075 if (control->sframe) {
1076 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
1077 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
1078 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
1079 } else {
1080 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
1081 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1082 }
1083
1084 return packed;
1085 }
1086
__pack_enhanced_control(struct l2cap_ctrl * control)1087 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1088 {
1089 u16 packed;
1090
1091 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1092 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1093
1094 if (control->sframe) {
1095 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1096 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1097 packed |= L2CAP_CTRL_FRAME_TYPE;
1098 } else {
1099 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1100 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1101 }
1102
1103 return packed;
1104 }
1105
__pack_control(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb)1106 static inline void __pack_control(struct l2cap_chan *chan,
1107 struct l2cap_ctrl *control,
1108 struct sk_buff *skb)
1109 {
1110 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1111 put_unaligned_le32(__pack_extended_control(control),
1112 skb->data + L2CAP_HDR_SIZE);
1113 } else {
1114 put_unaligned_le16(__pack_enhanced_control(control),
1115 skb->data + L2CAP_HDR_SIZE);
1116 }
1117 }
1118
__ertm_hdr_size(struct l2cap_chan * chan)1119 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1120 {
1121 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1122 return L2CAP_EXT_HDR_SIZE;
1123 else
1124 return L2CAP_ENH_HDR_SIZE;
1125 }
1126
l2cap_create_sframe_pdu(struct l2cap_chan * chan,u32 control)1127 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1128 u32 control)
1129 {
1130 struct sk_buff *skb;
1131 struct l2cap_hdr *lh;
1132 int hlen = __ertm_hdr_size(chan);
1133
1134 if (chan->fcs == L2CAP_FCS_CRC16)
1135 hlen += L2CAP_FCS_SIZE;
1136
1137 skb = bt_skb_alloc(hlen, GFP_KERNEL);
1138
1139 if (!skb)
1140 return ERR_PTR(-ENOMEM);
1141
1142 lh = skb_put(skb, L2CAP_HDR_SIZE);
1143 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1144 lh->cid = cpu_to_le16(chan->dcid);
1145
1146 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1147 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1148 else
1149 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1150
1151 if (chan->fcs == L2CAP_FCS_CRC16) {
1152 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1153 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1154 }
1155
1156 skb->priority = HCI_PRIO_MAX;
1157 return skb;
1158 }
1159
l2cap_send_sframe(struct l2cap_chan * chan,struct l2cap_ctrl * control)1160 static void l2cap_send_sframe(struct l2cap_chan *chan,
1161 struct l2cap_ctrl *control)
1162 {
1163 struct sk_buff *skb;
1164 u32 control_field;
1165
1166 BT_DBG("chan %p, control %p", chan, control);
1167
1168 if (!control->sframe)
1169 return;
1170
1171 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1172 !control->poll)
1173 control->final = 1;
1174
1175 if (control->super == L2CAP_SUPER_RR)
1176 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1177 else if (control->super == L2CAP_SUPER_RNR)
1178 set_bit(CONN_RNR_SENT, &chan->conn_state);
1179
1180 if (control->super != L2CAP_SUPER_SREJ) {
1181 chan->last_acked_seq = control->reqseq;
1182 __clear_ack_timer(chan);
1183 }
1184
1185 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1186 control->final, control->poll, control->super);
1187
1188 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1189 control_field = __pack_extended_control(control);
1190 else
1191 control_field = __pack_enhanced_control(control);
1192
1193 skb = l2cap_create_sframe_pdu(chan, control_field);
1194 if (!IS_ERR(skb))
1195 l2cap_do_send(chan, skb);
1196 }
1197
l2cap_send_rr_or_rnr(struct l2cap_chan * chan,bool poll)1198 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1199 {
1200 struct l2cap_ctrl control;
1201
1202 BT_DBG("chan %p, poll %d", chan, poll);
1203
1204 memset(&control, 0, sizeof(control));
1205 control.sframe = 1;
1206 control.poll = poll;
1207
1208 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1209 control.super = L2CAP_SUPER_RNR;
1210 else
1211 control.super = L2CAP_SUPER_RR;
1212
1213 control.reqseq = chan->buffer_seq;
1214 l2cap_send_sframe(chan, &control);
1215 }
1216
__l2cap_no_conn_pending(struct l2cap_chan * chan)1217 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1218 {
1219 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1220 return true;
1221
1222 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1223 }
1224
l2cap_send_conn_req(struct l2cap_chan * chan)1225 void l2cap_send_conn_req(struct l2cap_chan *chan)
1226 {
1227 struct l2cap_conn *conn = chan->conn;
1228 struct l2cap_conn_req req;
1229
1230 req.scid = cpu_to_le16(chan->scid);
1231 req.psm = chan->psm;
1232
1233 chan->ident = l2cap_get_ident(conn);
1234
1235 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1236
1237 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1238 }
1239
l2cap_chan_ready(struct l2cap_chan * chan)1240 static void l2cap_chan_ready(struct l2cap_chan *chan)
1241 {
1242 /* The channel may have already been flagged as connected in
1243 * case of receiving data before the L2CAP info req/rsp
1244 * procedure is complete.
1245 */
1246 if (chan->state == BT_CONNECTED)
1247 return;
1248
1249 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1250 chan->conf_state = 0;
1251 __clear_chan_timer(chan);
1252
1253 switch (chan->mode) {
1254 case L2CAP_MODE_LE_FLOWCTL:
1255 case L2CAP_MODE_EXT_FLOWCTL:
1256 if (!chan->tx_credits)
1257 chan->ops->suspend(chan);
1258 break;
1259 }
1260
1261 chan->state = BT_CONNECTED;
1262
1263 chan->ops->ready(chan);
1264 }
1265
l2cap_le_connect(struct l2cap_chan * chan)1266 static void l2cap_le_connect(struct l2cap_chan *chan)
1267 {
1268 struct l2cap_conn *conn = chan->conn;
1269 struct l2cap_le_conn_req req;
1270
1271 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1272 return;
1273
1274 if (!chan->imtu)
1275 chan->imtu = chan->conn->mtu;
1276
1277 l2cap_le_flowctl_init(chan, 0);
1278
1279 memset(&req, 0, sizeof(req));
1280 req.psm = chan->psm;
1281 req.scid = cpu_to_le16(chan->scid);
1282 req.mtu = cpu_to_le16(chan->imtu);
1283 req.mps = cpu_to_le16(chan->mps);
1284 req.credits = cpu_to_le16(chan->rx_credits);
1285
1286 chan->ident = l2cap_get_ident(conn);
1287
1288 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1289 sizeof(req), &req);
1290 }
1291
1292 struct l2cap_ecred_conn_data {
1293 struct {
1294 struct l2cap_ecred_conn_req_hdr req;
1295 __le16 scid[5];
1296 } __packed pdu;
1297 struct l2cap_chan *chan;
1298 struct pid *pid;
1299 int count;
1300 };
1301
l2cap_ecred_defer_connect(struct l2cap_chan * chan,void * data)1302 static void l2cap_ecred_defer_connect(struct l2cap_chan *chan, void *data)
1303 {
1304 struct l2cap_ecred_conn_data *conn = data;
1305 struct pid *pid;
1306
1307 if (chan == conn->chan)
1308 return;
1309
1310 if (!test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
1311 return;
1312
1313 pid = chan->ops->get_peer_pid(chan);
1314
1315 /* Only add deferred channels with the same PID/PSM */
1316 if (conn->pid != pid || chan->psm != conn->chan->psm || chan->ident ||
1317 chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
1318 return;
1319
1320 if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1321 return;
1322
1323 l2cap_ecred_init(chan, 0);
1324
1325 /* Set the same ident so we can match on the rsp */
1326 chan->ident = conn->chan->ident;
1327
1328 /* Include all channels deferred */
1329 conn->pdu.scid[conn->count] = cpu_to_le16(chan->scid);
1330
1331 conn->count++;
1332 }
1333
l2cap_ecred_connect(struct l2cap_chan * chan)1334 static void l2cap_ecred_connect(struct l2cap_chan *chan)
1335 {
1336 struct l2cap_conn *conn = chan->conn;
1337 struct l2cap_ecred_conn_data data;
1338
1339 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
1340 return;
1341
1342 if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1343 return;
1344
1345 l2cap_ecred_init(chan, 0);
1346
1347 memset(&data, 0, sizeof(data));
1348 data.pdu.req.psm = chan->psm;
1349 data.pdu.req.mtu = cpu_to_le16(chan->imtu);
1350 data.pdu.req.mps = cpu_to_le16(chan->mps);
1351 data.pdu.req.credits = cpu_to_le16(chan->rx_credits);
1352 data.pdu.scid[0] = cpu_to_le16(chan->scid);
1353
1354 chan->ident = l2cap_get_ident(conn);
1355
1356 data.count = 1;
1357 data.chan = chan;
1358 data.pid = chan->ops->get_peer_pid(chan);
1359
1360 __l2cap_chan_list(conn, l2cap_ecred_defer_connect, &data);
1361
1362 l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_CONN_REQ,
1363 sizeof(data.pdu.req) + data.count * sizeof(__le16),
1364 &data.pdu);
1365 }
1366
l2cap_le_start(struct l2cap_chan * chan)1367 static void l2cap_le_start(struct l2cap_chan *chan)
1368 {
1369 struct l2cap_conn *conn = chan->conn;
1370
1371 if (!smp_conn_security(conn->hcon, chan->sec_level))
1372 return;
1373
1374 if (!chan->psm) {
1375 l2cap_chan_ready(chan);
1376 return;
1377 }
1378
1379 if (chan->state == BT_CONNECT) {
1380 if (chan->mode == L2CAP_MODE_EXT_FLOWCTL)
1381 l2cap_ecred_connect(chan);
1382 else
1383 l2cap_le_connect(chan);
1384 }
1385 }
1386
l2cap_start_connection(struct l2cap_chan * chan)1387 static void l2cap_start_connection(struct l2cap_chan *chan)
1388 {
1389 if (chan->conn->hcon->type == LE_LINK) {
1390 l2cap_le_start(chan);
1391 } else {
1392 l2cap_send_conn_req(chan);
1393 }
1394 }
1395
l2cap_request_info(struct l2cap_conn * conn)1396 static void l2cap_request_info(struct l2cap_conn *conn)
1397 {
1398 struct l2cap_info_req req;
1399
1400 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1401 return;
1402
1403 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1404
1405 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1406 conn->info_ident = l2cap_get_ident(conn);
1407
1408 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1409
1410 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1411 sizeof(req), &req);
1412 }
1413
l2cap_check_enc_key_size(struct hci_conn * hcon)1414 static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
1415 {
1416 /* The minimum encryption key size needs to be enforced by the
1417 * host stack before establishing any L2CAP connections. The
1418 * specification in theory allows a minimum of 1, but to align
1419 * BR/EDR and LE transports, a minimum of 7 is chosen.
1420 *
1421 * This check might also be called for unencrypted connections
1422 * that have no key size requirements. Ensure that the link is
1423 * actually encrypted before enforcing a key size.
1424 */
1425 int min_key_size = hcon->hdev->min_enc_key_size;
1426
1427 /* On FIPS security level, key size must be 16 bytes */
1428 if (hcon->sec_level == BT_SECURITY_FIPS)
1429 min_key_size = 16;
1430
1431 return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1432 hcon->enc_key_size >= min_key_size);
1433 }
1434
l2cap_do_start(struct l2cap_chan * chan)1435 static void l2cap_do_start(struct l2cap_chan *chan)
1436 {
1437 struct l2cap_conn *conn = chan->conn;
1438
1439 if (conn->hcon->type == LE_LINK) {
1440 l2cap_le_start(chan);
1441 return;
1442 }
1443
1444 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1445 l2cap_request_info(conn);
1446 return;
1447 }
1448
1449 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1450 return;
1451
1452 if (!l2cap_chan_check_security(chan, true) ||
1453 !__l2cap_no_conn_pending(chan))
1454 return;
1455
1456 if (l2cap_check_enc_key_size(conn->hcon))
1457 l2cap_start_connection(chan);
1458 else
1459 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1460 }
1461
l2cap_mode_supported(__u8 mode,__u32 feat_mask)1462 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1463 {
1464 u32 local_feat_mask = l2cap_feat_mask;
1465 if (!disable_ertm)
1466 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1467
1468 switch (mode) {
1469 case L2CAP_MODE_ERTM:
1470 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1471 case L2CAP_MODE_STREAMING:
1472 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1473 default:
1474 return 0x00;
1475 }
1476 }
1477
l2cap_send_disconn_req(struct l2cap_chan * chan,int err)1478 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1479 {
1480 struct l2cap_conn *conn = chan->conn;
1481 struct l2cap_disconn_req req;
1482
1483 if (!conn)
1484 return;
1485
1486 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1487 __clear_retrans_timer(chan);
1488 __clear_monitor_timer(chan);
1489 __clear_ack_timer(chan);
1490 }
1491
1492 req.dcid = cpu_to_le16(chan->dcid);
1493 req.scid = cpu_to_le16(chan->scid);
1494 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1495 sizeof(req), &req);
1496
1497 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1498 }
1499
1500 /* ---- L2CAP connections ---- */
l2cap_conn_start(struct l2cap_conn * conn)1501 static void l2cap_conn_start(struct l2cap_conn *conn)
1502 {
1503 struct l2cap_chan *chan, *tmp;
1504
1505 BT_DBG("conn %p", conn);
1506
1507 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1508 l2cap_chan_lock(chan);
1509
1510 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1511 l2cap_chan_ready(chan);
1512 l2cap_chan_unlock(chan);
1513 continue;
1514 }
1515
1516 if (chan->state == BT_CONNECT) {
1517 if (!l2cap_chan_check_security(chan, true) ||
1518 !__l2cap_no_conn_pending(chan)) {
1519 l2cap_chan_unlock(chan);
1520 continue;
1521 }
1522
1523 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1524 && test_bit(CONF_STATE2_DEVICE,
1525 &chan->conf_state)) {
1526 l2cap_chan_close(chan, ECONNRESET);
1527 l2cap_chan_unlock(chan);
1528 continue;
1529 }
1530
1531 if (l2cap_check_enc_key_size(conn->hcon))
1532 l2cap_start_connection(chan);
1533 else
1534 l2cap_chan_close(chan, ECONNREFUSED);
1535
1536 } else if (chan->state == BT_CONNECT2) {
1537 struct l2cap_conn_rsp rsp;
1538 char buf[128];
1539 rsp.scid = cpu_to_le16(chan->dcid);
1540 rsp.dcid = cpu_to_le16(chan->scid);
1541
1542 if (l2cap_chan_check_security(chan, false)) {
1543 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1544 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1545 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1546 chan->ops->defer(chan);
1547
1548 } else {
1549 l2cap_state_change(chan, BT_CONFIG);
1550 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1551 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1552 }
1553 } else {
1554 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1555 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1556 }
1557
1558 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1559 sizeof(rsp), &rsp);
1560
1561 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1562 rsp.result != L2CAP_CR_SUCCESS) {
1563 l2cap_chan_unlock(chan);
1564 continue;
1565 }
1566
1567 set_bit(CONF_REQ_SENT, &chan->conf_state);
1568 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1569 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1570 chan->num_conf_req++;
1571 }
1572
1573 l2cap_chan_unlock(chan);
1574 }
1575 }
1576
l2cap_le_conn_ready(struct l2cap_conn * conn)1577 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1578 {
1579 struct hci_conn *hcon = conn->hcon;
1580 struct hci_dev *hdev = hcon->hdev;
1581
1582 BT_DBG("%s conn %p", hdev->name, conn);
1583
1584 /* For outgoing pairing which doesn't necessarily have an
1585 * associated socket (e.g. mgmt_pair_device).
1586 */
1587 if (hcon->out)
1588 smp_conn_security(hcon, hcon->pending_sec_level);
1589
1590 /* For LE peripheral connections, make sure the connection interval
1591 * is in the range of the minimum and maximum interval that has
1592 * been configured for this connection. If not, then trigger
1593 * the connection update procedure.
1594 */
1595 if (hcon->role == HCI_ROLE_SLAVE &&
1596 (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1597 hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1598 struct l2cap_conn_param_update_req req;
1599
1600 req.min = cpu_to_le16(hcon->le_conn_min_interval);
1601 req.max = cpu_to_le16(hcon->le_conn_max_interval);
1602 req.latency = cpu_to_le16(hcon->le_conn_latency);
1603 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1604
1605 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1606 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1607 }
1608 }
1609
l2cap_conn_ready(struct l2cap_conn * conn)1610 static void l2cap_conn_ready(struct l2cap_conn *conn)
1611 {
1612 struct l2cap_chan *chan;
1613 struct hci_conn *hcon = conn->hcon;
1614
1615 BT_DBG("conn %p", conn);
1616
1617 if (hcon->type == ACL_LINK)
1618 l2cap_request_info(conn);
1619
1620 mutex_lock(&conn->lock);
1621
1622 list_for_each_entry(chan, &conn->chan_l, list) {
1623
1624 l2cap_chan_lock(chan);
1625
1626 if (hcon->type == LE_LINK) {
1627 l2cap_le_start(chan);
1628 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1629 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1630 l2cap_chan_ready(chan);
1631 } else if (chan->state == BT_CONNECT) {
1632 l2cap_do_start(chan);
1633 }
1634
1635 l2cap_chan_unlock(chan);
1636 }
1637
1638 mutex_unlock(&conn->lock);
1639
1640 if (hcon->type == LE_LINK)
1641 l2cap_le_conn_ready(conn);
1642
1643 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1644 }
1645
1646 /* Notify sockets that we cannot guaranty reliability anymore */
l2cap_conn_unreliable(struct l2cap_conn * conn,int err)1647 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1648 {
1649 struct l2cap_chan *chan;
1650
1651 BT_DBG("conn %p", conn);
1652
1653 list_for_each_entry(chan, &conn->chan_l, list) {
1654 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1655 l2cap_chan_set_err(chan, err);
1656 }
1657 }
1658
l2cap_info_timeout(struct work_struct * work)1659 static void l2cap_info_timeout(struct work_struct *work)
1660 {
1661 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1662 info_timer.work);
1663
1664 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1665 conn->info_ident = 0;
1666
1667 mutex_lock(&conn->lock);
1668 l2cap_conn_start(conn);
1669 mutex_unlock(&conn->lock);
1670 }
1671
1672 /*
1673 * l2cap_user
1674 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1675 * callback is called during registration. The ->remove callback is called
1676 * during unregistration.
1677 * An l2cap_user object can either be explicitly unregistered or when the
1678 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1679 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1680 * External modules must own a reference to the l2cap_conn object if they intend
1681 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1682 * any time if they don't.
1683 */
1684
l2cap_register_user(struct l2cap_conn * conn,struct l2cap_user * user)1685 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1686 {
1687 struct hci_dev *hdev = conn->hcon->hdev;
1688 int ret;
1689
1690 /* We need to check whether l2cap_conn is registered. If it is not, we
1691 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1692 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1693 * relies on the parent hci_conn object to be locked. This itself relies
1694 * on the hci_dev object to be locked. So we must lock the hci device
1695 * here, too. */
1696
1697 hci_dev_lock(hdev);
1698
1699 if (!list_empty(&user->list)) {
1700 ret = -EINVAL;
1701 goto out_unlock;
1702 }
1703
1704 /* conn->hchan is NULL after l2cap_conn_del() was called */
1705 if (!conn->hchan) {
1706 ret = -ENODEV;
1707 goto out_unlock;
1708 }
1709
1710 ret = user->probe(conn, user);
1711 if (ret)
1712 goto out_unlock;
1713
1714 list_add(&user->list, &conn->users);
1715 ret = 0;
1716
1717 out_unlock:
1718 hci_dev_unlock(hdev);
1719 return ret;
1720 }
1721 EXPORT_SYMBOL(l2cap_register_user);
1722
l2cap_unregister_user(struct l2cap_conn * conn,struct l2cap_user * user)1723 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1724 {
1725 struct hci_dev *hdev = conn->hcon->hdev;
1726
1727 hci_dev_lock(hdev);
1728
1729 if (list_empty(&user->list))
1730 goto out_unlock;
1731
1732 list_del_init(&user->list);
1733 user->remove(conn, user);
1734
1735 out_unlock:
1736 hci_dev_unlock(hdev);
1737 }
1738 EXPORT_SYMBOL(l2cap_unregister_user);
1739
l2cap_unregister_all_users(struct l2cap_conn * conn)1740 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1741 {
1742 struct l2cap_user *user;
1743
1744 while (!list_empty(&conn->users)) {
1745 user = list_first_entry(&conn->users, struct l2cap_user, list);
1746 list_del_init(&user->list);
1747 user->remove(conn, user);
1748 }
1749 }
1750
l2cap_conn_del(struct hci_conn * hcon,int err)1751 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1752 {
1753 struct l2cap_conn *conn = hcon->l2cap_data;
1754 struct l2cap_chan *chan, *l;
1755
1756 if (!conn)
1757 return;
1758
1759 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1760
1761 mutex_lock(&conn->lock);
1762
1763 kfree_skb(conn->rx_skb);
1764
1765 skb_queue_purge(&conn->pending_rx);
1766
1767 /* We can not call flush_work(&conn->pending_rx_work) here since we
1768 * might block if we are running on a worker from the same workqueue
1769 * pending_rx_work is waiting on.
1770 */
1771 if (work_pending(&conn->pending_rx_work))
1772 cancel_work_sync(&conn->pending_rx_work);
1773
1774 cancel_delayed_work_sync(&conn->id_addr_timer);
1775
1776 l2cap_unregister_all_users(conn);
1777
1778 /* Force the connection to be immediately dropped */
1779 hcon->disc_timeout = 0;
1780
1781 /* Kill channels */
1782 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1783 l2cap_chan_hold(chan);
1784 l2cap_chan_lock(chan);
1785
1786 l2cap_chan_del(chan, err);
1787
1788 chan->ops->close(chan);
1789
1790 l2cap_chan_unlock(chan);
1791 l2cap_chan_put(chan);
1792 }
1793
1794 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1795 cancel_delayed_work_sync(&conn->info_timer);
1796
1797 hci_chan_del(conn->hchan);
1798 conn->hchan = NULL;
1799
1800 hcon->l2cap_data = NULL;
1801 mutex_unlock(&conn->lock);
1802 l2cap_conn_put(conn);
1803 }
1804
l2cap_conn_free(struct kref * ref)1805 static void l2cap_conn_free(struct kref *ref)
1806 {
1807 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1808
1809 hci_conn_put(conn->hcon);
1810 kfree(conn);
1811 }
1812
l2cap_conn_get(struct l2cap_conn * conn)1813 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1814 {
1815 kref_get(&conn->ref);
1816 return conn;
1817 }
1818 EXPORT_SYMBOL(l2cap_conn_get);
1819
l2cap_conn_put(struct l2cap_conn * conn)1820 void l2cap_conn_put(struct l2cap_conn *conn)
1821 {
1822 kref_put(&conn->ref, l2cap_conn_free);
1823 }
1824 EXPORT_SYMBOL(l2cap_conn_put);
1825
1826 /* ---- Socket interface ---- */
1827
1828 /* Find socket with psm and source / destination bdaddr.
1829 * Returns closest match.
1830 */
l2cap_global_chan_by_psm(int state,__le16 psm,bdaddr_t * src,bdaddr_t * dst,u8 link_type)1831 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1832 bdaddr_t *src,
1833 bdaddr_t *dst,
1834 u8 link_type)
1835 {
1836 struct l2cap_chan *c, *tmp, *c1 = NULL;
1837
1838 read_lock(&chan_list_lock);
1839
1840 list_for_each_entry_safe(c, tmp, &chan_list, global_l) {
1841 if (state && c->state != state)
1842 continue;
1843
1844 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1845 continue;
1846
1847 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1848 continue;
1849
1850 if (c->chan_type != L2CAP_CHAN_FIXED && c->psm == psm) {
1851 int src_match, dst_match;
1852 int src_any, dst_any;
1853
1854 /* Exact match. */
1855 src_match = !bacmp(&c->src, src);
1856 dst_match = !bacmp(&c->dst, dst);
1857 if (src_match && dst_match) {
1858 if (!l2cap_chan_hold_unless_zero(c))
1859 continue;
1860
1861 read_unlock(&chan_list_lock);
1862 return c;
1863 }
1864
1865 /* Closest match */
1866 src_any = !bacmp(&c->src, BDADDR_ANY);
1867 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1868 if ((src_match && dst_any) || (src_any && dst_match) ||
1869 (src_any && dst_any))
1870 c1 = c;
1871 }
1872 }
1873
1874 if (c1)
1875 c1 = l2cap_chan_hold_unless_zero(c1);
1876
1877 read_unlock(&chan_list_lock);
1878
1879 return c1;
1880 }
1881
l2cap_monitor_timeout(struct work_struct * work)1882 static void l2cap_monitor_timeout(struct work_struct *work)
1883 {
1884 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1885 monitor_timer.work);
1886
1887 BT_DBG("chan %p", chan);
1888
1889 l2cap_chan_lock(chan);
1890
1891 if (!chan->conn) {
1892 l2cap_chan_unlock(chan);
1893 l2cap_chan_put(chan);
1894 return;
1895 }
1896
1897 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1898
1899 l2cap_chan_unlock(chan);
1900 l2cap_chan_put(chan);
1901 }
1902
l2cap_retrans_timeout(struct work_struct * work)1903 static void l2cap_retrans_timeout(struct work_struct *work)
1904 {
1905 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1906 retrans_timer.work);
1907
1908 BT_DBG("chan %p", chan);
1909
1910 l2cap_chan_lock(chan);
1911
1912 if (!chan->conn) {
1913 l2cap_chan_unlock(chan);
1914 l2cap_chan_put(chan);
1915 return;
1916 }
1917
1918 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1919 l2cap_chan_unlock(chan);
1920 l2cap_chan_put(chan);
1921 }
1922
l2cap_streaming_send(struct l2cap_chan * chan,struct sk_buff_head * skbs)1923 static void l2cap_streaming_send(struct l2cap_chan *chan,
1924 struct sk_buff_head *skbs)
1925 {
1926 struct sk_buff *skb;
1927 struct l2cap_ctrl *control;
1928
1929 BT_DBG("chan %p, skbs %p", chan, skbs);
1930
1931 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1932
1933 while (!skb_queue_empty(&chan->tx_q)) {
1934
1935 skb = skb_dequeue(&chan->tx_q);
1936
1937 bt_cb(skb)->l2cap.retries = 1;
1938 control = &bt_cb(skb)->l2cap;
1939
1940 control->reqseq = 0;
1941 control->txseq = chan->next_tx_seq;
1942
1943 __pack_control(chan, control, skb);
1944
1945 if (chan->fcs == L2CAP_FCS_CRC16) {
1946 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1947 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1948 }
1949
1950 l2cap_do_send(chan, skb);
1951
1952 BT_DBG("Sent txseq %u", control->txseq);
1953
1954 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1955 chan->frames_sent++;
1956 }
1957 }
1958
l2cap_ertm_send(struct l2cap_chan * chan)1959 static int l2cap_ertm_send(struct l2cap_chan *chan)
1960 {
1961 struct sk_buff *skb, *tx_skb;
1962 struct l2cap_ctrl *control;
1963 int sent = 0;
1964
1965 BT_DBG("chan %p", chan);
1966
1967 if (chan->state != BT_CONNECTED)
1968 return -ENOTCONN;
1969
1970 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1971 return 0;
1972
1973 while (chan->tx_send_head &&
1974 chan->unacked_frames < chan->remote_tx_win &&
1975 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1976
1977 skb = chan->tx_send_head;
1978
1979 bt_cb(skb)->l2cap.retries = 1;
1980 control = &bt_cb(skb)->l2cap;
1981
1982 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1983 control->final = 1;
1984
1985 control->reqseq = chan->buffer_seq;
1986 chan->last_acked_seq = chan->buffer_seq;
1987 control->txseq = chan->next_tx_seq;
1988
1989 __pack_control(chan, control, skb);
1990
1991 if (chan->fcs == L2CAP_FCS_CRC16) {
1992 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1993 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1994 }
1995
1996 /* Clone after data has been modified. Data is assumed to be
1997 read-only (for locking purposes) on cloned sk_buffs.
1998 */
1999 tx_skb = skb_clone(skb, GFP_KERNEL);
2000
2001 if (!tx_skb)
2002 break;
2003
2004 __set_retrans_timer(chan);
2005
2006 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2007 chan->unacked_frames++;
2008 chan->frames_sent++;
2009 sent++;
2010
2011 if (skb_queue_is_last(&chan->tx_q, skb))
2012 chan->tx_send_head = NULL;
2013 else
2014 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2015
2016 l2cap_do_send(chan, tx_skb);
2017 BT_DBG("Sent txseq %u", control->txseq);
2018 }
2019
2020 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2021 chan->unacked_frames, skb_queue_len(&chan->tx_q));
2022
2023 return sent;
2024 }
2025
l2cap_ertm_resend(struct l2cap_chan * chan)2026 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2027 {
2028 struct l2cap_ctrl control;
2029 struct sk_buff *skb;
2030 struct sk_buff *tx_skb;
2031 u16 seq;
2032
2033 BT_DBG("chan %p", chan);
2034
2035 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2036 return;
2037
2038 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2039 seq = l2cap_seq_list_pop(&chan->retrans_list);
2040
2041 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2042 if (!skb) {
2043 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2044 seq);
2045 continue;
2046 }
2047
2048 bt_cb(skb)->l2cap.retries++;
2049 control = bt_cb(skb)->l2cap;
2050
2051 if (chan->max_tx != 0 &&
2052 bt_cb(skb)->l2cap.retries > chan->max_tx) {
2053 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2054 l2cap_send_disconn_req(chan, ECONNRESET);
2055 l2cap_seq_list_clear(&chan->retrans_list);
2056 break;
2057 }
2058
2059 control.reqseq = chan->buffer_seq;
2060 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2061 control.final = 1;
2062 else
2063 control.final = 0;
2064
2065 if (skb_cloned(skb)) {
2066 /* Cloned sk_buffs are read-only, so we need a
2067 * writeable copy
2068 */
2069 tx_skb = skb_copy(skb, GFP_KERNEL);
2070 } else {
2071 tx_skb = skb_clone(skb, GFP_KERNEL);
2072 }
2073
2074 if (!tx_skb) {
2075 l2cap_seq_list_clear(&chan->retrans_list);
2076 break;
2077 }
2078
2079 /* Update skb contents */
2080 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2081 put_unaligned_le32(__pack_extended_control(&control),
2082 tx_skb->data + L2CAP_HDR_SIZE);
2083 } else {
2084 put_unaligned_le16(__pack_enhanced_control(&control),
2085 tx_skb->data + L2CAP_HDR_SIZE);
2086 }
2087
2088 /* Update FCS */
2089 if (chan->fcs == L2CAP_FCS_CRC16) {
2090 u16 fcs = crc16(0, (u8 *) tx_skb->data,
2091 tx_skb->len - L2CAP_FCS_SIZE);
2092 put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2093 L2CAP_FCS_SIZE);
2094 }
2095
2096 l2cap_do_send(chan, tx_skb);
2097
2098 BT_DBG("Resent txseq %d", control.txseq);
2099
2100 chan->last_acked_seq = chan->buffer_seq;
2101 }
2102 }
2103
l2cap_retransmit(struct l2cap_chan * chan,struct l2cap_ctrl * control)2104 static void l2cap_retransmit(struct l2cap_chan *chan,
2105 struct l2cap_ctrl *control)
2106 {
2107 BT_DBG("chan %p, control %p", chan, control);
2108
2109 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2110 l2cap_ertm_resend(chan);
2111 }
2112
l2cap_retransmit_all(struct l2cap_chan * chan,struct l2cap_ctrl * control)2113 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2114 struct l2cap_ctrl *control)
2115 {
2116 struct sk_buff *skb;
2117
2118 BT_DBG("chan %p, control %p", chan, control);
2119
2120 if (control->poll)
2121 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2122
2123 l2cap_seq_list_clear(&chan->retrans_list);
2124
2125 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2126 return;
2127
2128 if (chan->unacked_frames) {
2129 skb_queue_walk(&chan->tx_q, skb) {
2130 if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2131 skb == chan->tx_send_head)
2132 break;
2133 }
2134
2135 skb_queue_walk_from(&chan->tx_q, skb) {
2136 if (skb == chan->tx_send_head)
2137 break;
2138
2139 l2cap_seq_list_append(&chan->retrans_list,
2140 bt_cb(skb)->l2cap.txseq);
2141 }
2142
2143 l2cap_ertm_resend(chan);
2144 }
2145 }
2146
l2cap_send_ack(struct l2cap_chan * chan)2147 static void l2cap_send_ack(struct l2cap_chan *chan)
2148 {
2149 struct l2cap_ctrl control;
2150 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2151 chan->last_acked_seq);
2152 int threshold;
2153
2154 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2155 chan, chan->last_acked_seq, chan->buffer_seq);
2156
2157 memset(&control, 0, sizeof(control));
2158 control.sframe = 1;
2159
2160 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2161 chan->rx_state == L2CAP_RX_STATE_RECV) {
2162 __clear_ack_timer(chan);
2163 control.super = L2CAP_SUPER_RNR;
2164 control.reqseq = chan->buffer_seq;
2165 l2cap_send_sframe(chan, &control);
2166 } else {
2167 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2168 l2cap_ertm_send(chan);
2169 /* If any i-frames were sent, they included an ack */
2170 if (chan->buffer_seq == chan->last_acked_seq)
2171 frames_to_ack = 0;
2172 }
2173
2174 /* Ack now if the window is 3/4ths full.
2175 * Calculate without mul or div
2176 */
2177 threshold = chan->ack_win;
2178 threshold += threshold << 1;
2179 threshold >>= 2;
2180
2181 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2182 threshold);
2183
2184 if (frames_to_ack >= threshold) {
2185 __clear_ack_timer(chan);
2186 control.super = L2CAP_SUPER_RR;
2187 control.reqseq = chan->buffer_seq;
2188 l2cap_send_sframe(chan, &control);
2189 frames_to_ack = 0;
2190 }
2191
2192 if (frames_to_ack)
2193 __set_ack_timer(chan);
2194 }
2195 }
2196
l2cap_skbuff_fromiovec(struct l2cap_chan * chan,struct msghdr * msg,int len,int count,struct sk_buff * skb)2197 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2198 struct msghdr *msg, int len,
2199 int count, struct sk_buff *skb)
2200 {
2201 struct l2cap_conn *conn = chan->conn;
2202 struct sk_buff **frag;
2203 int sent = 0;
2204
2205 if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2206 return -EFAULT;
2207
2208 sent += count;
2209 len -= count;
2210
2211 /* Continuation fragments (no L2CAP header) */
2212 frag = &skb_shinfo(skb)->frag_list;
2213 while (len) {
2214 struct sk_buff *tmp;
2215
2216 count = min_t(unsigned int, conn->mtu, len);
2217
2218 tmp = chan->ops->alloc_skb(chan, 0, count,
2219 msg->msg_flags & MSG_DONTWAIT);
2220 if (IS_ERR(tmp))
2221 return PTR_ERR(tmp);
2222
2223 *frag = tmp;
2224
2225 if (!copy_from_iter_full(skb_put(*frag, count), count,
2226 &msg->msg_iter))
2227 return -EFAULT;
2228
2229 sent += count;
2230 len -= count;
2231
2232 skb->len += (*frag)->len;
2233 skb->data_len += (*frag)->len;
2234
2235 frag = &(*frag)->next;
2236 }
2237
2238 return sent;
2239 }
2240
l2cap_create_connless_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2241 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2242 struct msghdr *msg, size_t len)
2243 {
2244 struct l2cap_conn *conn = chan->conn;
2245 struct sk_buff *skb;
2246 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2247 struct l2cap_hdr *lh;
2248
2249 BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2250 __le16_to_cpu(chan->psm), len);
2251
2252 count = min_t(unsigned int, (conn->mtu - hlen), len);
2253
2254 skb = chan->ops->alloc_skb(chan, hlen, count,
2255 msg->msg_flags & MSG_DONTWAIT);
2256 if (IS_ERR(skb))
2257 return skb;
2258
2259 /* Create L2CAP header */
2260 lh = skb_put(skb, L2CAP_HDR_SIZE);
2261 lh->cid = cpu_to_le16(chan->dcid);
2262 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2263 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2264
2265 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2266 if (unlikely(err < 0)) {
2267 kfree_skb(skb);
2268 return ERR_PTR(err);
2269 }
2270 return skb;
2271 }
2272
l2cap_create_basic_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2273 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2274 struct msghdr *msg, size_t len)
2275 {
2276 struct l2cap_conn *conn = chan->conn;
2277 struct sk_buff *skb;
2278 int err, count;
2279 struct l2cap_hdr *lh;
2280
2281 BT_DBG("chan %p len %zu", chan, len);
2282
2283 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2284
2285 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2286 msg->msg_flags & MSG_DONTWAIT);
2287 if (IS_ERR(skb))
2288 return skb;
2289
2290 /* Create L2CAP header */
2291 lh = skb_put(skb, L2CAP_HDR_SIZE);
2292 lh->cid = cpu_to_le16(chan->dcid);
2293 lh->len = cpu_to_le16(len);
2294
2295 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2296 if (unlikely(err < 0)) {
2297 kfree_skb(skb);
2298 return ERR_PTR(err);
2299 }
2300 return skb;
2301 }
2302
l2cap_create_iframe_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len,u16 sdulen)2303 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2304 struct msghdr *msg, size_t len,
2305 u16 sdulen)
2306 {
2307 struct l2cap_conn *conn = chan->conn;
2308 struct sk_buff *skb;
2309 int err, count, hlen;
2310 struct l2cap_hdr *lh;
2311
2312 BT_DBG("chan %p len %zu", chan, len);
2313
2314 if (!conn)
2315 return ERR_PTR(-ENOTCONN);
2316
2317 hlen = __ertm_hdr_size(chan);
2318
2319 if (sdulen)
2320 hlen += L2CAP_SDULEN_SIZE;
2321
2322 if (chan->fcs == L2CAP_FCS_CRC16)
2323 hlen += L2CAP_FCS_SIZE;
2324
2325 count = min_t(unsigned int, (conn->mtu - hlen), len);
2326
2327 skb = chan->ops->alloc_skb(chan, hlen, count,
2328 msg->msg_flags & MSG_DONTWAIT);
2329 if (IS_ERR(skb))
2330 return skb;
2331
2332 /* Create L2CAP header */
2333 lh = skb_put(skb, L2CAP_HDR_SIZE);
2334 lh->cid = cpu_to_le16(chan->dcid);
2335 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2336
2337 /* Control header is populated later */
2338 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2339 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2340 else
2341 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2342
2343 if (sdulen)
2344 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2345
2346 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2347 if (unlikely(err < 0)) {
2348 kfree_skb(skb);
2349 return ERR_PTR(err);
2350 }
2351
2352 bt_cb(skb)->l2cap.fcs = chan->fcs;
2353 bt_cb(skb)->l2cap.retries = 0;
2354 return skb;
2355 }
2356
l2cap_segment_sdu(struct l2cap_chan * chan,struct sk_buff_head * seg_queue,struct msghdr * msg,size_t len)2357 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2358 struct sk_buff_head *seg_queue,
2359 struct msghdr *msg, size_t len)
2360 {
2361 struct sk_buff *skb;
2362 u16 sdu_len;
2363 size_t pdu_len;
2364 u8 sar;
2365
2366 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2367
2368 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2369 * so fragmented skbs are not used. The HCI layer's handling
2370 * of fragmented skbs is not compatible with ERTM's queueing.
2371 */
2372
2373 /* PDU size is derived from the HCI MTU */
2374 pdu_len = chan->conn->mtu;
2375
2376 /* Constrain PDU size for BR/EDR connections */
2377 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2378
2379 /* Adjust for largest possible L2CAP overhead. */
2380 if (chan->fcs)
2381 pdu_len -= L2CAP_FCS_SIZE;
2382
2383 pdu_len -= __ertm_hdr_size(chan);
2384
2385 /* Remote device may have requested smaller PDUs */
2386 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2387
2388 if (len <= pdu_len) {
2389 sar = L2CAP_SAR_UNSEGMENTED;
2390 sdu_len = 0;
2391 pdu_len = len;
2392 } else {
2393 sar = L2CAP_SAR_START;
2394 sdu_len = len;
2395 }
2396
2397 while (len > 0) {
2398 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2399
2400 if (IS_ERR(skb)) {
2401 __skb_queue_purge(seg_queue);
2402 return PTR_ERR(skb);
2403 }
2404
2405 bt_cb(skb)->l2cap.sar = sar;
2406 __skb_queue_tail(seg_queue, skb);
2407
2408 len -= pdu_len;
2409 if (sdu_len)
2410 sdu_len = 0;
2411
2412 if (len <= pdu_len) {
2413 sar = L2CAP_SAR_END;
2414 pdu_len = len;
2415 } else {
2416 sar = L2CAP_SAR_CONTINUE;
2417 }
2418 }
2419
2420 return 0;
2421 }
2422
l2cap_create_le_flowctl_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len,u16 sdulen)2423 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2424 struct msghdr *msg,
2425 size_t len, u16 sdulen)
2426 {
2427 struct l2cap_conn *conn = chan->conn;
2428 struct sk_buff *skb;
2429 int err, count, hlen;
2430 struct l2cap_hdr *lh;
2431
2432 BT_DBG("chan %p len %zu", chan, len);
2433
2434 if (!conn)
2435 return ERR_PTR(-ENOTCONN);
2436
2437 hlen = L2CAP_HDR_SIZE;
2438
2439 if (sdulen)
2440 hlen += L2CAP_SDULEN_SIZE;
2441
2442 count = min_t(unsigned int, (conn->mtu - hlen), len);
2443
2444 skb = chan->ops->alloc_skb(chan, hlen, count,
2445 msg->msg_flags & MSG_DONTWAIT);
2446 if (IS_ERR(skb))
2447 return skb;
2448
2449 /* Create L2CAP header */
2450 lh = skb_put(skb, L2CAP_HDR_SIZE);
2451 lh->cid = cpu_to_le16(chan->dcid);
2452 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2453
2454 if (sdulen)
2455 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2456
2457 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2458 if (unlikely(err < 0)) {
2459 kfree_skb(skb);
2460 return ERR_PTR(err);
2461 }
2462
2463 return skb;
2464 }
2465
l2cap_segment_le_sdu(struct l2cap_chan * chan,struct sk_buff_head * seg_queue,struct msghdr * msg,size_t len)2466 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2467 struct sk_buff_head *seg_queue,
2468 struct msghdr *msg, size_t len)
2469 {
2470 struct sk_buff *skb;
2471 size_t pdu_len;
2472 u16 sdu_len;
2473
2474 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2475
2476 sdu_len = len;
2477 pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2478
2479 while (len > 0) {
2480 if (len <= pdu_len)
2481 pdu_len = len;
2482
2483 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2484 if (IS_ERR(skb)) {
2485 __skb_queue_purge(seg_queue);
2486 return PTR_ERR(skb);
2487 }
2488
2489 __skb_queue_tail(seg_queue, skb);
2490
2491 len -= pdu_len;
2492
2493 if (sdu_len) {
2494 sdu_len = 0;
2495 pdu_len += L2CAP_SDULEN_SIZE;
2496 }
2497 }
2498
2499 return 0;
2500 }
2501
l2cap_le_flowctl_send(struct l2cap_chan * chan)2502 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2503 {
2504 int sent = 0;
2505
2506 BT_DBG("chan %p", chan);
2507
2508 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2509 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2510 chan->tx_credits--;
2511 sent++;
2512 }
2513
2514 BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2515 skb_queue_len(&chan->tx_q));
2516 }
2517
l2cap_tx_timestamp(struct sk_buff * skb,const struct sockcm_cookie * sockc,size_t len)2518 static void l2cap_tx_timestamp(struct sk_buff *skb,
2519 const struct sockcm_cookie *sockc,
2520 size_t len)
2521 {
2522 struct sock *sk = skb ? skb->sk : NULL;
2523
2524 if (sk && sk->sk_type == SOCK_STREAM)
2525 hci_setup_tx_timestamp(skb, len, sockc);
2526 else
2527 hci_setup_tx_timestamp(skb, 1, sockc);
2528 }
2529
l2cap_tx_timestamp_seg(struct sk_buff_head * queue,const struct sockcm_cookie * sockc,size_t len)2530 static void l2cap_tx_timestamp_seg(struct sk_buff_head *queue,
2531 const struct sockcm_cookie *sockc,
2532 size_t len)
2533 {
2534 struct sk_buff *skb = skb_peek(queue);
2535 struct sock *sk = skb ? skb->sk : NULL;
2536
2537 if (sk && sk->sk_type == SOCK_STREAM)
2538 l2cap_tx_timestamp(skb_peek_tail(queue), sockc, len);
2539 else
2540 l2cap_tx_timestamp(skb, sockc, len);
2541 }
2542
l2cap_chan_send(struct l2cap_chan * chan,struct msghdr * msg,size_t len,const struct sockcm_cookie * sockc)2543 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2544 const struct sockcm_cookie *sockc)
2545 {
2546 struct sk_buff *skb;
2547 int err;
2548 struct sk_buff_head seg_queue;
2549
2550 if (!chan->conn)
2551 return -ENOTCONN;
2552
2553 /* Connectionless channel */
2554 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2555 skb = l2cap_create_connless_pdu(chan, msg, len);
2556 if (IS_ERR(skb))
2557 return PTR_ERR(skb);
2558
2559 l2cap_tx_timestamp(skb, sockc, len);
2560
2561 l2cap_do_send(chan, skb);
2562 return len;
2563 }
2564
2565 switch (chan->mode) {
2566 case L2CAP_MODE_LE_FLOWCTL:
2567 case L2CAP_MODE_EXT_FLOWCTL:
2568 /* Check outgoing MTU */
2569 if (len > chan->omtu)
2570 return -EMSGSIZE;
2571
2572 __skb_queue_head_init(&seg_queue);
2573
2574 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2575
2576 if (chan->state != BT_CONNECTED) {
2577 __skb_queue_purge(&seg_queue);
2578 err = -ENOTCONN;
2579 }
2580
2581 if (err)
2582 return err;
2583
2584 l2cap_tx_timestamp_seg(&seg_queue, sockc, len);
2585
2586 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2587
2588 l2cap_le_flowctl_send(chan);
2589
2590 if (!chan->tx_credits)
2591 chan->ops->suspend(chan);
2592
2593 err = len;
2594
2595 break;
2596
2597 case L2CAP_MODE_BASIC:
2598 /* Check outgoing MTU */
2599 if (len > chan->omtu)
2600 return -EMSGSIZE;
2601
2602 /* Create a basic PDU */
2603 skb = l2cap_create_basic_pdu(chan, msg, len);
2604 if (IS_ERR(skb))
2605 return PTR_ERR(skb);
2606
2607 l2cap_tx_timestamp(skb, sockc, len);
2608
2609 l2cap_do_send(chan, skb);
2610 err = len;
2611 break;
2612
2613 case L2CAP_MODE_ERTM:
2614 case L2CAP_MODE_STREAMING:
2615 /* Check outgoing MTU */
2616 if (len > chan->omtu) {
2617 err = -EMSGSIZE;
2618 break;
2619 }
2620
2621 __skb_queue_head_init(&seg_queue);
2622
2623 /* Do segmentation before calling in to the state machine,
2624 * since it's possible to block while waiting for memory
2625 * allocation.
2626 */
2627 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2628
2629 if (err)
2630 break;
2631
2632 if (chan->mode == L2CAP_MODE_ERTM) {
2633 /* TODO: ERTM mode timestamping */
2634 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2635 } else {
2636 l2cap_tx_timestamp_seg(&seg_queue, sockc, len);
2637 l2cap_streaming_send(chan, &seg_queue);
2638 }
2639
2640 err = len;
2641
2642 /* If the skbs were not queued for sending, they'll still be in
2643 * seg_queue and need to be purged.
2644 */
2645 __skb_queue_purge(&seg_queue);
2646 break;
2647
2648 default:
2649 BT_DBG("bad state %1.1x", chan->mode);
2650 err = -EBADFD;
2651 }
2652
2653 return err;
2654 }
2655 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2656
l2cap_send_srej(struct l2cap_chan * chan,u16 txseq)2657 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2658 {
2659 struct l2cap_ctrl control;
2660 u16 seq;
2661
2662 BT_DBG("chan %p, txseq %u", chan, txseq);
2663
2664 memset(&control, 0, sizeof(control));
2665 control.sframe = 1;
2666 control.super = L2CAP_SUPER_SREJ;
2667
2668 for (seq = chan->expected_tx_seq; seq != txseq;
2669 seq = __next_seq(chan, seq)) {
2670 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2671 control.reqseq = seq;
2672 l2cap_send_sframe(chan, &control);
2673 l2cap_seq_list_append(&chan->srej_list, seq);
2674 }
2675 }
2676
2677 chan->expected_tx_seq = __next_seq(chan, txseq);
2678 }
2679
l2cap_send_srej_tail(struct l2cap_chan * chan)2680 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2681 {
2682 struct l2cap_ctrl control;
2683
2684 BT_DBG("chan %p", chan);
2685
2686 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2687 return;
2688
2689 memset(&control, 0, sizeof(control));
2690 control.sframe = 1;
2691 control.super = L2CAP_SUPER_SREJ;
2692 control.reqseq = chan->srej_list.tail;
2693 l2cap_send_sframe(chan, &control);
2694 }
2695
l2cap_send_srej_list(struct l2cap_chan * chan,u16 txseq)2696 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2697 {
2698 struct l2cap_ctrl control;
2699 u16 initial_head;
2700 u16 seq;
2701
2702 BT_DBG("chan %p, txseq %u", chan, txseq);
2703
2704 memset(&control, 0, sizeof(control));
2705 control.sframe = 1;
2706 control.super = L2CAP_SUPER_SREJ;
2707
2708 /* Capture initial list head to allow only one pass through the list. */
2709 initial_head = chan->srej_list.head;
2710
2711 do {
2712 seq = l2cap_seq_list_pop(&chan->srej_list);
2713 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2714 break;
2715
2716 control.reqseq = seq;
2717 l2cap_send_sframe(chan, &control);
2718 l2cap_seq_list_append(&chan->srej_list, seq);
2719 } while (chan->srej_list.head != initial_head);
2720 }
2721
l2cap_process_reqseq(struct l2cap_chan * chan,u16 reqseq)2722 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2723 {
2724 struct sk_buff *acked_skb;
2725 u16 ackseq;
2726
2727 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2728
2729 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2730 return;
2731
2732 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2733 chan->expected_ack_seq, chan->unacked_frames);
2734
2735 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2736 ackseq = __next_seq(chan, ackseq)) {
2737
2738 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2739 if (acked_skb) {
2740 skb_unlink(acked_skb, &chan->tx_q);
2741 kfree_skb(acked_skb);
2742 chan->unacked_frames--;
2743 }
2744 }
2745
2746 chan->expected_ack_seq = reqseq;
2747
2748 if (chan->unacked_frames == 0)
2749 __clear_retrans_timer(chan);
2750
2751 BT_DBG("unacked_frames %u", chan->unacked_frames);
2752 }
2753
l2cap_abort_rx_srej_sent(struct l2cap_chan * chan)2754 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2755 {
2756 BT_DBG("chan %p", chan);
2757
2758 chan->expected_tx_seq = chan->buffer_seq;
2759 l2cap_seq_list_clear(&chan->srej_list);
2760 skb_queue_purge(&chan->srej_q);
2761 chan->rx_state = L2CAP_RX_STATE_RECV;
2762 }
2763
l2cap_tx_state_xmit(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2764 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2765 struct l2cap_ctrl *control,
2766 struct sk_buff_head *skbs, u8 event)
2767 {
2768 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2769 event);
2770
2771 switch (event) {
2772 case L2CAP_EV_DATA_REQUEST:
2773 if (chan->tx_send_head == NULL)
2774 chan->tx_send_head = skb_peek(skbs);
2775
2776 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2777 l2cap_ertm_send(chan);
2778 break;
2779 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2780 BT_DBG("Enter LOCAL_BUSY");
2781 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2782
2783 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2784 /* The SREJ_SENT state must be aborted if we are to
2785 * enter the LOCAL_BUSY state.
2786 */
2787 l2cap_abort_rx_srej_sent(chan);
2788 }
2789
2790 l2cap_send_ack(chan);
2791
2792 break;
2793 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2794 BT_DBG("Exit LOCAL_BUSY");
2795 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2796
2797 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2798 struct l2cap_ctrl local_control;
2799
2800 memset(&local_control, 0, sizeof(local_control));
2801 local_control.sframe = 1;
2802 local_control.super = L2CAP_SUPER_RR;
2803 local_control.poll = 1;
2804 local_control.reqseq = chan->buffer_seq;
2805 l2cap_send_sframe(chan, &local_control);
2806
2807 chan->retry_count = 1;
2808 __set_monitor_timer(chan);
2809 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2810 }
2811 break;
2812 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2813 l2cap_process_reqseq(chan, control->reqseq);
2814 break;
2815 case L2CAP_EV_EXPLICIT_POLL:
2816 l2cap_send_rr_or_rnr(chan, 1);
2817 chan->retry_count = 1;
2818 __set_monitor_timer(chan);
2819 __clear_ack_timer(chan);
2820 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2821 break;
2822 case L2CAP_EV_RETRANS_TO:
2823 l2cap_send_rr_or_rnr(chan, 1);
2824 chan->retry_count = 1;
2825 __set_monitor_timer(chan);
2826 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2827 break;
2828 case L2CAP_EV_RECV_FBIT:
2829 /* Nothing to process */
2830 break;
2831 default:
2832 break;
2833 }
2834 }
2835
l2cap_tx_state_wait_f(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2836 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2837 struct l2cap_ctrl *control,
2838 struct sk_buff_head *skbs, u8 event)
2839 {
2840 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2841 event);
2842
2843 switch (event) {
2844 case L2CAP_EV_DATA_REQUEST:
2845 if (chan->tx_send_head == NULL)
2846 chan->tx_send_head = skb_peek(skbs);
2847 /* Queue data, but don't send. */
2848 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2849 break;
2850 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2851 BT_DBG("Enter LOCAL_BUSY");
2852 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2853
2854 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2855 /* The SREJ_SENT state must be aborted if we are to
2856 * enter the LOCAL_BUSY state.
2857 */
2858 l2cap_abort_rx_srej_sent(chan);
2859 }
2860
2861 l2cap_send_ack(chan);
2862
2863 break;
2864 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2865 BT_DBG("Exit LOCAL_BUSY");
2866 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2867
2868 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2869 struct l2cap_ctrl local_control;
2870 memset(&local_control, 0, sizeof(local_control));
2871 local_control.sframe = 1;
2872 local_control.super = L2CAP_SUPER_RR;
2873 local_control.poll = 1;
2874 local_control.reqseq = chan->buffer_seq;
2875 l2cap_send_sframe(chan, &local_control);
2876
2877 chan->retry_count = 1;
2878 __set_monitor_timer(chan);
2879 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2880 }
2881 break;
2882 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2883 l2cap_process_reqseq(chan, control->reqseq);
2884 fallthrough;
2885
2886 case L2CAP_EV_RECV_FBIT:
2887 if (control && control->final) {
2888 __clear_monitor_timer(chan);
2889 if (chan->unacked_frames > 0)
2890 __set_retrans_timer(chan);
2891 chan->retry_count = 0;
2892 chan->tx_state = L2CAP_TX_STATE_XMIT;
2893 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2894 }
2895 break;
2896 case L2CAP_EV_EXPLICIT_POLL:
2897 /* Ignore */
2898 break;
2899 case L2CAP_EV_MONITOR_TO:
2900 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2901 l2cap_send_rr_or_rnr(chan, 1);
2902 __set_monitor_timer(chan);
2903 chan->retry_count++;
2904 } else {
2905 l2cap_send_disconn_req(chan, ECONNABORTED);
2906 }
2907 break;
2908 default:
2909 break;
2910 }
2911 }
2912
l2cap_tx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2913 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2914 struct sk_buff_head *skbs, u8 event)
2915 {
2916 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2917 chan, control, skbs, event, chan->tx_state);
2918
2919 switch (chan->tx_state) {
2920 case L2CAP_TX_STATE_XMIT:
2921 l2cap_tx_state_xmit(chan, control, skbs, event);
2922 break;
2923 case L2CAP_TX_STATE_WAIT_F:
2924 l2cap_tx_state_wait_f(chan, control, skbs, event);
2925 break;
2926 default:
2927 /* Ignore event */
2928 break;
2929 }
2930 }
2931
l2cap_pass_to_tx(struct l2cap_chan * chan,struct l2cap_ctrl * control)2932 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2933 struct l2cap_ctrl *control)
2934 {
2935 BT_DBG("chan %p, control %p", chan, control);
2936 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2937 }
2938
l2cap_pass_to_tx_fbit(struct l2cap_chan * chan,struct l2cap_ctrl * control)2939 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2940 struct l2cap_ctrl *control)
2941 {
2942 BT_DBG("chan %p, control %p", chan, control);
2943 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2944 }
2945
2946 /* Copy frame to all raw sockets on that connection */
l2cap_raw_recv(struct l2cap_conn * conn,struct sk_buff * skb)2947 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2948 {
2949 struct sk_buff *nskb;
2950 struct l2cap_chan *chan;
2951
2952 BT_DBG("conn %p", conn);
2953
2954 list_for_each_entry(chan, &conn->chan_l, list) {
2955 if (chan->chan_type != L2CAP_CHAN_RAW)
2956 continue;
2957
2958 /* Don't send frame to the channel it came from */
2959 if (bt_cb(skb)->l2cap.chan == chan)
2960 continue;
2961
2962 nskb = skb_clone(skb, GFP_KERNEL);
2963 if (!nskb)
2964 continue;
2965 if (chan->ops->recv(chan, nskb))
2966 kfree_skb(nskb);
2967 }
2968 }
2969
2970 /* ---- L2CAP signalling commands ---- */
l2cap_build_cmd(struct l2cap_conn * conn,u8 code,u8 ident,u16 dlen,void * data)2971 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2972 u8 ident, u16 dlen, void *data)
2973 {
2974 struct sk_buff *skb, **frag;
2975 struct l2cap_cmd_hdr *cmd;
2976 struct l2cap_hdr *lh;
2977 int len, count;
2978
2979 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2980 conn, code, ident, dlen);
2981
2982 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2983 return NULL;
2984
2985 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2986 count = min_t(unsigned int, conn->mtu, len);
2987
2988 skb = bt_skb_alloc(count, GFP_KERNEL);
2989 if (!skb)
2990 return NULL;
2991
2992 lh = skb_put(skb, L2CAP_HDR_SIZE);
2993 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2994
2995 if (conn->hcon->type == LE_LINK)
2996 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2997 else
2998 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2999
3000 cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
3001 cmd->code = code;
3002 cmd->ident = ident;
3003 cmd->len = cpu_to_le16(dlen);
3004
3005 if (dlen) {
3006 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
3007 skb_put_data(skb, data, count);
3008 data += count;
3009 }
3010
3011 len -= skb->len;
3012
3013 /* Continuation fragments (no L2CAP header) */
3014 frag = &skb_shinfo(skb)->frag_list;
3015 while (len) {
3016 count = min_t(unsigned int, conn->mtu, len);
3017
3018 *frag = bt_skb_alloc(count, GFP_KERNEL);
3019 if (!*frag)
3020 goto fail;
3021
3022 skb_put_data(*frag, data, count);
3023
3024 len -= count;
3025 data += count;
3026
3027 frag = &(*frag)->next;
3028 }
3029
3030 return skb;
3031
3032 fail:
3033 kfree_skb(skb);
3034 return NULL;
3035 }
3036
l2cap_get_conf_opt(void ** ptr,int * type,int * olen,unsigned long * val)3037 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
3038 unsigned long *val)
3039 {
3040 struct l2cap_conf_opt *opt = *ptr;
3041 int len;
3042
3043 len = L2CAP_CONF_OPT_SIZE + opt->len;
3044 *ptr += len;
3045
3046 *type = opt->type;
3047 *olen = opt->len;
3048
3049 switch (opt->len) {
3050 case 1:
3051 *val = *((u8 *) opt->val);
3052 break;
3053
3054 case 2:
3055 *val = get_unaligned_le16(opt->val);
3056 break;
3057
3058 case 4:
3059 *val = get_unaligned_le32(opt->val);
3060 break;
3061
3062 default:
3063 *val = (unsigned long) opt->val;
3064 break;
3065 }
3066
3067 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3068 return len;
3069 }
3070
l2cap_add_conf_opt(void ** ptr,u8 type,u8 len,unsigned long val,size_t size)3071 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3072 {
3073 struct l2cap_conf_opt *opt = *ptr;
3074
3075 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3076
3077 if (size < L2CAP_CONF_OPT_SIZE + len)
3078 return;
3079
3080 opt->type = type;
3081 opt->len = len;
3082
3083 switch (len) {
3084 case 1:
3085 *((u8 *) opt->val) = val;
3086 break;
3087
3088 case 2:
3089 put_unaligned_le16(val, opt->val);
3090 break;
3091
3092 case 4:
3093 put_unaligned_le32(val, opt->val);
3094 break;
3095
3096 default:
3097 memcpy(opt->val, (void *) val, len);
3098 break;
3099 }
3100
3101 *ptr += L2CAP_CONF_OPT_SIZE + len;
3102 }
3103
l2cap_add_opt_efs(void ** ptr,struct l2cap_chan * chan,size_t size)3104 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3105 {
3106 struct l2cap_conf_efs efs;
3107
3108 switch (chan->mode) {
3109 case L2CAP_MODE_ERTM:
3110 efs.id = chan->local_id;
3111 efs.stype = chan->local_stype;
3112 efs.msdu = cpu_to_le16(chan->local_msdu);
3113 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3114 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3115 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3116 break;
3117
3118 case L2CAP_MODE_STREAMING:
3119 efs.id = 1;
3120 efs.stype = L2CAP_SERV_BESTEFFORT;
3121 efs.msdu = cpu_to_le16(chan->local_msdu);
3122 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3123 efs.acc_lat = 0;
3124 efs.flush_to = 0;
3125 break;
3126
3127 default:
3128 return;
3129 }
3130
3131 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3132 (unsigned long) &efs, size);
3133 }
3134
l2cap_ack_timeout(struct work_struct * work)3135 static void l2cap_ack_timeout(struct work_struct *work)
3136 {
3137 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3138 ack_timer.work);
3139 u16 frames_to_ack;
3140
3141 BT_DBG("chan %p", chan);
3142
3143 l2cap_chan_lock(chan);
3144
3145 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3146 chan->last_acked_seq);
3147
3148 if (frames_to_ack)
3149 l2cap_send_rr_or_rnr(chan, 0);
3150
3151 l2cap_chan_unlock(chan);
3152 l2cap_chan_put(chan);
3153 }
3154
l2cap_ertm_init(struct l2cap_chan * chan)3155 int l2cap_ertm_init(struct l2cap_chan *chan)
3156 {
3157 int err;
3158
3159 chan->next_tx_seq = 0;
3160 chan->expected_tx_seq = 0;
3161 chan->expected_ack_seq = 0;
3162 chan->unacked_frames = 0;
3163 chan->buffer_seq = 0;
3164 chan->frames_sent = 0;
3165 chan->last_acked_seq = 0;
3166 chan->sdu = NULL;
3167 chan->sdu_last_frag = NULL;
3168 chan->sdu_len = 0;
3169
3170 skb_queue_head_init(&chan->tx_q);
3171
3172 if (chan->mode != L2CAP_MODE_ERTM)
3173 return 0;
3174
3175 chan->rx_state = L2CAP_RX_STATE_RECV;
3176 chan->tx_state = L2CAP_TX_STATE_XMIT;
3177
3178 skb_queue_head_init(&chan->srej_q);
3179
3180 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3181 if (err < 0)
3182 return err;
3183
3184 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3185 if (err < 0)
3186 l2cap_seq_list_free(&chan->srej_list);
3187
3188 return err;
3189 }
3190
l2cap_select_mode(__u8 mode,__u16 remote_feat_mask)3191 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3192 {
3193 switch (mode) {
3194 case L2CAP_MODE_STREAMING:
3195 case L2CAP_MODE_ERTM:
3196 if (l2cap_mode_supported(mode, remote_feat_mask))
3197 return mode;
3198 fallthrough;
3199 default:
3200 return L2CAP_MODE_BASIC;
3201 }
3202 }
3203
__l2cap_ews_supported(struct l2cap_conn * conn)3204 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3205 {
3206 return (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW);
3207 }
3208
__l2cap_efs_supported(struct l2cap_conn * conn)3209 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3210 {
3211 return (conn->feat_mask & L2CAP_FEAT_EXT_FLOW);
3212 }
3213
__l2cap_set_ertm_timeouts(struct l2cap_chan * chan,struct l2cap_conf_rfc * rfc)3214 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3215 struct l2cap_conf_rfc *rfc)
3216 {
3217 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3218 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3219 }
3220
l2cap_txwin_setup(struct l2cap_chan * chan)3221 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3222 {
3223 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3224 __l2cap_ews_supported(chan->conn)) {
3225 /* use extended control field */
3226 set_bit(FLAG_EXT_CTRL, &chan->flags);
3227 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3228 } else {
3229 chan->tx_win = min_t(u16, chan->tx_win,
3230 L2CAP_DEFAULT_TX_WINDOW);
3231 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3232 }
3233 chan->ack_win = chan->tx_win;
3234 }
3235
l2cap_mtu_auto(struct l2cap_chan * chan)3236 static void l2cap_mtu_auto(struct l2cap_chan *chan)
3237 {
3238 struct hci_conn *conn = chan->conn->hcon;
3239
3240 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3241
3242 /* The 2-DH1 packet has between 2 and 56 information bytes
3243 * (including the 2-byte payload header)
3244 */
3245 if (!(conn->pkt_type & HCI_2DH1))
3246 chan->imtu = 54;
3247
3248 /* The 3-DH1 packet has between 2 and 85 information bytes
3249 * (including the 2-byte payload header)
3250 */
3251 if (!(conn->pkt_type & HCI_3DH1))
3252 chan->imtu = 83;
3253
3254 /* The 2-DH3 packet has between 2 and 369 information bytes
3255 * (including the 2-byte payload header)
3256 */
3257 if (!(conn->pkt_type & HCI_2DH3))
3258 chan->imtu = 367;
3259
3260 /* The 3-DH3 packet has between 2 and 554 information bytes
3261 * (including the 2-byte payload header)
3262 */
3263 if (!(conn->pkt_type & HCI_3DH3))
3264 chan->imtu = 552;
3265
3266 /* The 2-DH5 packet has between 2 and 681 information bytes
3267 * (including the 2-byte payload header)
3268 */
3269 if (!(conn->pkt_type & HCI_2DH5))
3270 chan->imtu = 679;
3271
3272 /* The 3-DH5 packet has between 2 and 1023 information bytes
3273 * (including the 2-byte payload header)
3274 */
3275 if (!(conn->pkt_type & HCI_3DH5))
3276 chan->imtu = 1021;
3277 }
3278
l2cap_build_conf_req(struct l2cap_chan * chan,void * data,size_t data_size)3279 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3280 {
3281 struct l2cap_conf_req *req = data;
3282 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3283 void *ptr = req->data;
3284 void *endptr = data + data_size;
3285 u16 size;
3286
3287 BT_DBG("chan %p", chan);
3288
3289 if (chan->num_conf_req || chan->num_conf_rsp)
3290 goto done;
3291
3292 switch (chan->mode) {
3293 case L2CAP_MODE_STREAMING:
3294 case L2CAP_MODE_ERTM:
3295 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3296 break;
3297
3298 if (__l2cap_efs_supported(chan->conn))
3299 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3300
3301 fallthrough;
3302 default:
3303 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3304 break;
3305 }
3306
3307 done:
3308 if (chan->imtu != L2CAP_DEFAULT_MTU) {
3309 if (!chan->imtu)
3310 l2cap_mtu_auto(chan);
3311 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3312 endptr - ptr);
3313 }
3314
3315 switch (chan->mode) {
3316 case L2CAP_MODE_BASIC:
3317 if (disable_ertm)
3318 break;
3319
3320 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3321 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3322 break;
3323
3324 rfc.mode = L2CAP_MODE_BASIC;
3325 rfc.txwin_size = 0;
3326 rfc.max_transmit = 0;
3327 rfc.retrans_timeout = 0;
3328 rfc.monitor_timeout = 0;
3329 rfc.max_pdu_size = 0;
3330
3331 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3332 (unsigned long) &rfc, endptr - ptr);
3333 break;
3334
3335 case L2CAP_MODE_ERTM:
3336 rfc.mode = L2CAP_MODE_ERTM;
3337 rfc.max_transmit = chan->max_tx;
3338
3339 __l2cap_set_ertm_timeouts(chan, &rfc);
3340
3341 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3342 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3343 L2CAP_FCS_SIZE);
3344 rfc.max_pdu_size = cpu_to_le16(size);
3345
3346 l2cap_txwin_setup(chan);
3347
3348 rfc.txwin_size = min_t(u16, chan->tx_win,
3349 L2CAP_DEFAULT_TX_WINDOW);
3350
3351 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3352 (unsigned long) &rfc, endptr - ptr);
3353
3354 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3355 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3356
3357 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3358 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3359 chan->tx_win, endptr - ptr);
3360
3361 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3362 if (chan->fcs == L2CAP_FCS_NONE ||
3363 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3364 chan->fcs = L2CAP_FCS_NONE;
3365 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3366 chan->fcs, endptr - ptr);
3367 }
3368 break;
3369
3370 case L2CAP_MODE_STREAMING:
3371 l2cap_txwin_setup(chan);
3372 rfc.mode = L2CAP_MODE_STREAMING;
3373 rfc.txwin_size = 0;
3374 rfc.max_transmit = 0;
3375 rfc.retrans_timeout = 0;
3376 rfc.monitor_timeout = 0;
3377
3378 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3379 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3380 L2CAP_FCS_SIZE);
3381 rfc.max_pdu_size = cpu_to_le16(size);
3382
3383 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3384 (unsigned long) &rfc, endptr - ptr);
3385
3386 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3387 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3388
3389 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3390 if (chan->fcs == L2CAP_FCS_NONE ||
3391 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3392 chan->fcs = L2CAP_FCS_NONE;
3393 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3394 chan->fcs, endptr - ptr);
3395 }
3396 break;
3397 }
3398
3399 req->dcid = cpu_to_le16(chan->dcid);
3400 req->flags = cpu_to_le16(0);
3401
3402 return ptr - data;
3403 }
3404
l2cap_parse_conf_req(struct l2cap_chan * chan,void * data,size_t data_size)3405 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3406 {
3407 struct l2cap_conf_rsp *rsp = data;
3408 void *ptr = rsp->data;
3409 void *endptr = data + data_size;
3410 void *req = chan->conf_req;
3411 int len = chan->conf_len;
3412 int type, hint, olen;
3413 unsigned long val;
3414 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3415 struct l2cap_conf_efs efs;
3416 u8 remote_efs = 0;
3417 u16 mtu = L2CAP_DEFAULT_MTU;
3418 u16 result = L2CAP_CONF_SUCCESS;
3419 u16 size;
3420
3421 BT_DBG("chan %p", chan);
3422
3423 while (len >= L2CAP_CONF_OPT_SIZE) {
3424 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3425 if (len < 0)
3426 break;
3427
3428 hint = type & L2CAP_CONF_HINT;
3429 type &= L2CAP_CONF_MASK;
3430
3431 switch (type) {
3432 case L2CAP_CONF_MTU:
3433 if (olen != 2)
3434 break;
3435 mtu = val;
3436 break;
3437
3438 case L2CAP_CONF_FLUSH_TO:
3439 if (olen != 2)
3440 break;
3441 chan->flush_to = val;
3442 break;
3443
3444 case L2CAP_CONF_QOS:
3445 break;
3446
3447 case L2CAP_CONF_RFC:
3448 if (olen != sizeof(rfc))
3449 break;
3450 memcpy(&rfc, (void *) val, olen);
3451 break;
3452
3453 case L2CAP_CONF_FCS:
3454 if (olen != 1)
3455 break;
3456 if (val == L2CAP_FCS_NONE)
3457 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3458 break;
3459
3460 case L2CAP_CONF_EFS:
3461 if (olen != sizeof(efs))
3462 break;
3463 remote_efs = 1;
3464 memcpy(&efs, (void *) val, olen);
3465 break;
3466
3467 case L2CAP_CONF_EWS:
3468 if (olen != 2)
3469 break;
3470 return -ECONNREFUSED;
3471
3472 default:
3473 if (hint)
3474 break;
3475 result = L2CAP_CONF_UNKNOWN;
3476 l2cap_add_conf_opt(&ptr, (u8)type, sizeof(u8), type, endptr - ptr);
3477 break;
3478 }
3479 }
3480
3481 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3482 goto done;
3483
3484 switch (chan->mode) {
3485 case L2CAP_MODE_STREAMING:
3486 case L2CAP_MODE_ERTM:
3487 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3488 chan->mode = l2cap_select_mode(rfc.mode,
3489 chan->conn->feat_mask);
3490 break;
3491 }
3492
3493 if (remote_efs) {
3494 if (__l2cap_efs_supported(chan->conn))
3495 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3496 else
3497 return -ECONNREFUSED;
3498 }
3499
3500 if (chan->mode != rfc.mode)
3501 return -ECONNREFUSED;
3502
3503 break;
3504 }
3505
3506 done:
3507 if (chan->mode != rfc.mode) {
3508 result = L2CAP_CONF_UNACCEPT;
3509 rfc.mode = chan->mode;
3510
3511 if (chan->num_conf_rsp == 1)
3512 return -ECONNREFUSED;
3513
3514 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3515 (unsigned long) &rfc, endptr - ptr);
3516 }
3517
3518 if (result == L2CAP_CONF_SUCCESS) {
3519 /* Configure output options and let the other side know
3520 * which ones we don't like. */
3521
3522 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3523 result = L2CAP_CONF_UNACCEPT;
3524 else {
3525 chan->omtu = mtu;
3526 set_bit(CONF_MTU_DONE, &chan->conf_state);
3527 }
3528 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3529
3530 if (remote_efs) {
3531 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3532 efs.stype != L2CAP_SERV_NOTRAFIC &&
3533 efs.stype != chan->local_stype) {
3534
3535 result = L2CAP_CONF_UNACCEPT;
3536
3537 if (chan->num_conf_req >= 1)
3538 return -ECONNREFUSED;
3539
3540 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3541 sizeof(efs),
3542 (unsigned long) &efs, endptr - ptr);
3543 } else {
3544 /* Send PENDING Conf Rsp */
3545 result = L2CAP_CONF_PENDING;
3546 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3547 }
3548 }
3549
3550 switch (rfc.mode) {
3551 case L2CAP_MODE_BASIC:
3552 chan->fcs = L2CAP_FCS_NONE;
3553 set_bit(CONF_MODE_DONE, &chan->conf_state);
3554 break;
3555
3556 case L2CAP_MODE_ERTM:
3557 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3558 chan->remote_tx_win = rfc.txwin_size;
3559 else
3560 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3561
3562 chan->remote_max_tx = rfc.max_transmit;
3563
3564 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3565 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3566 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3567 rfc.max_pdu_size = cpu_to_le16(size);
3568 chan->remote_mps = size;
3569
3570 __l2cap_set_ertm_timeouts(chan, &rfc);
3571
3572 set_bit(CONF_MODE_DONE, &chan->conf_state);
3573
3574 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3575 sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3576
3577 if (remote_efs &&
3578 test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3579 chan->remote_id = efs.id;
3580 chan->remote_stype = efs.stype;
3581 chan->remote_msdu = le16_to_cpu(efs.msdu);
3582 chan->remote_flush_to =
3583 le32_to_cpu(efs.flush_to);
3584 chan->remote_acc_lat =
3585 le32_to_cpu(efs.acc_lat);
3586 chan->remote_sdu_itime =
3587 le32_to_cpu(efs.sdu_itime);
3588 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3589 sizeof(efs),
3590 (unsigned long) &efs, endptr - ptr);
3591 }
3592 break;
3593
3594 case L2CAP_MODE_STREAMING:
3595 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3596 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3597 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3598 rfc.max_pdu_size = cpu_to_le16(size);
3599 chan->remote_mps = size;
3600
3601 set_bit(CONF_MODE_DONE, &chan->conf_state);
3602
3603 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3604 (unsigned long) &rfc, endptr - ptr);
3605
3606 break;
3607
3608 default:
3609 result = L2CAP_CONF_UNACCEPT;
3610
3611 memset(&rfc, 0, sizeof(rfc));
3612 rfc.mode = chan->mode;
3613 }
3614
3615 if (result == L2CAP_CONF_SUCCESS)
3616 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3617 }
3618 rsp->scid = cpu_to_le16(chan->dcid);
3619 rsp->result = cpu_to_le16(result);
3620 rsp->flags = cpu_to_le16(0);
3621
3622 return ptr - data;
3623 }
3624
l2cap_parse_conf_rsp(struct l2cap_chan * chan,void * rsp,int len,void * data,size_t size,u16 * result)3625 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3626 void *data, size_t size, u16 *result)
3627 {
3628 struct l2cap_conf_req *req = data;
3629 void *ptr = req->data;
3630 void *endptr = data + size;
3631 int type, olen;
3632 unsigned long val;
3633 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3634 struct l2cap_conf_efs efs;
3635
3636 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3637
3638 while (len >= L2CAP_CONF_OPT_SIZE) {
3639 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3640 if (len < 0)
3641 break;
3642
3643 switch (type) {
3644 case L2CAP_CONF_MTU:
3645 if (olen != 2)
3646 break;
3647 if (val < L2CAP_DEFAULT_MIN_MTU) {
3648 *result = L2CAP_CONF_UNACCEPT;
3649 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3650 } else
3651 chan->imtu = val;
3652 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3653 endptr - ptr);
3654 break;
3655
3656 case L2CAP_CONF_FLUSH_TO:
3657 if (olen != 2)
3658 break;
3659 chan->flush_to = val;
3660 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3661 chan->flush_to, endptr - ptr);
3662 break;
3663
3664 case L2CAP_CONF_RFC:
3665 if (olen != sizeof(rfc))
3666 break;
3667 memcpy(&rfc, (void *)val, olen);
3668 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3669 rfc.mode != chan->mode)
3670 return -ECONNREFUSED;
3671 chan->fcs = 0;
3672 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3673 (unsigned long) &rfc, endptr - ptr);
3674 break;
3675
3676 case L2CAP_CONF_EWS:
3677 if (olen != 2)
3678 break;
3679 chan->ack_win = min_t(u16, val, chan->ack_win);
3680 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3681 chan->tx_win, endptr - ptr);
3682 break;
3683
3684 case L2CAP_CONF_EFS:
3685 if (olen != sizeof(efs))
3686 break;
3687 memcpy(&efs, (void *)val, olen);
3688 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3689 efs.stype != L2CAP_SERV_NOTRAFIC &&
3690 efs.stype != chan->local_stype)
3691 return -ECONNREFUSED;
3692 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3693 (unsigned long) &efs, endptr - ptr);
3694 break;
3695
3696 case L2CAP_CONF_FCS:
3697 if (olen != 1)
3698 break;
3699 if (*result == L2CAP_CONF_PENDING)
3700 if (val == L2CAP_FCS_NONE)
3701 set_bit(CONF_RECV_NO_FCS,
3702 &chan->conf_state);
3703 break;
3704 }
3705 }
3706
3707 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3708 return -ECONNREFUSED;
3709
3710 chan->mode = rfc.mode;
3711
3712 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3713 switch (rfc.mode) {
3714 case L2CAP_MODE_ERTM:
3715 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3716 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3717 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3718 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3719 chan->ack_win = min_t(u16, chan->ack_win,
3720 rfc.txwin_size);
3721
3722 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3723 chan->local_msdu = le16_to_cpu(efs.msdu);
3724 chan->local_sdu_itime =
3725 le32_to_cpu(efs.sdu_itime);
3726 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3727 chan->local_flush_to =
3728 le32_to_cpu(efs.flush_to);
3729 }
3730 break;
3731
3732 case L2CAP_MODE_STREAMING:
3733 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3734 }
3735 }
3736
3737 req->dcid = cpu_to_le16(chan->dcid);
3738 req->flags = cpu_to_le16(0);
3739
3740 return ptr - data;
3741 }
3742
l2cap_build_conf_rsp(struct l2cap_chan * chan,void * data,u16 result,u16 flags)3743 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3744 u16 result, u16 flags)
3745 {
3746 struct l2cap_conf_rsp *rsp = data;
3747 void *ptr = rsp->data;
3748
3749 BT_DBG("chan %p", chan);
3750
3751 rsp->scid = cpu_to_le16(chan->dcid);
3752 rsp->result = cpu_to_le16(result);
3753 rsp->flags = cpu_to_le16(flags);
3754
3755 return ptr - data;
3756 }
3757
__l2cap_le_connect_rsp_defer(struct l2cap_chan * chan)3758 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3759 {
3760 struct l2cap_le_conn_rsp rsp;
3761 struct l2cap_conn *conn = chan->conn;
3762
3763 BT_DBG("chan %p", chan);
3764
3765 rsp.dcid = cpu_to_le16(chan->scid);
3766 rsp.mtu = cpu_to_le16(chan->imtu);
3767 rsp.mps = cpu_to_le16(chan->mps);
3768 rsp.credits = cpu_to_le16(chan->rx_credits);
3769 rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3770
3771 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3772 &rsp);
3773 }
3774
l2cap_ecred_list_defer(struct l2cap_chan * chan,void * data)3775 static void l2cap_ecred_list_defer(struct l2cap_chan *chan, void *data)
3776 {
3777 int *result = data;
3778
3779 if (*result || test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
3780 return;
3781
3782 switch (chan->state) {
3783 case BT_CONNECT2:
3784 /* If channel still pending accept add to result */
3785 (*result)++;
3786 return;
3787 case BT_CONNECTED:
3788 return;
3789 default:
3790 /* If not connected or pending accept it has been refused */
3791 *result = -ECONNREFUSED;
3792 return;
3793 }
3794 }
3795
3796 struct l2cap_ecred_rsp_data {
3797 struct {
3798 struct l2cap_ecred_conn_rsp_hdr rsp;
3799 __le16 scid[L2CAP_ECRED_MAX_CID];
3800 } __packed pdu;
3801 int count;
3802 };
3803
l2cap_ecred_rsp_defer(struct l2cap_chan * chan,void * data)3804 static void l2cap_ecred_rsp_defer(struct l2cap_chan *chan, void *data)
3805 {
3806 struct l2cap_ecred_rsp_data *rsp = data;
3807 struct l2cap_ecred_conn_rsp *rsp_flex =
3808 container_of(&rsp->pdu.rsp, struct l2cap_ecred_conn_rsp, hdr);
3809
3810 /* Check if channel for outgoing connection or if it wasn't deferred
3811 * since in those cases it must be skipped.
3812 */
3813 if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags) ||
3814 !test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
3815 return;
3816
3817 /* Reset ident so only one response is sent */
3818 chan->ident = 0;
3819
3820 /* Include all channels pending with the same ident */
3821 if (!rsp->pdu.rsp.result)
3822 rsp_flex->dcid[rsp->count++] = cpu_to_le16(chan->scid);
3823 else
3824 l2cap_chan_del(chan, ECONNRESET);
3825 }
3826
__l2cap_ecred_conn_rsp_defer(struct l2cap_chan * chan)3827 void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
3828 {
3829 struct l2cap_conn *conn = chan->conn;
3830 struct l2cap_ecred_rsp_data data;
3831 u16 id = chan->ident;
3832 int result = 0;
3833
3834 if (!id)
3835 return;
3836
3837 BT_DBG("chan %p id %d", chan, id);
3838
3839 memset(&data, 0, sizeof(data));
3840
3841 data.pdu.rsp.mtu = cpu_to_le16(chan->imtu);
3842 data.pdu.rsp.mps = cpu_to_le16(chan->mps);
3843 data.pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
3844 data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3845
3846 /* Verify that all channels are ready */
3847 __l2cap_chan_list_id(conn, id, l2cap_ecred_list_defer, &result);
3848
3849 if (result > 0)
3850 return;
3851
3852 if (result < 0)
3853 data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_AUTHORIZATION);
3854
3855 /* Build response */
3856 __l2cap_chan_list_id(conn, id, l2cap_ecred_rsp_defer, &data);
3857
3858 l2cap_send_cmd(conn, id, L2CAP_ECRED_CONN_RSP,
3859 sizeof(data.pdu.rsp) + (data.count * sizeof(__le16)),
3860 &data.pdu);
3861 }
3862
__l2cap_connect_rsp_defer(struct l2cap_chan * chan)3863 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3864 {
3865 struct l2cap_conn_rsp rsp;
3866 struct l2cap_conn *conn = chan->conn;
3867 u8 buf[128];
3868 u8 rsp_code;
3869
3870 rsp.scid = cpu_to_le16(chan->dcid);
3871 rsp.dcid = cpu_to_le16(chan->scid);
3872 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3873 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3874 rsp_code = L2CAP_CONN_RSP;
3875
3876 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3877
3878 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3879
3880 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3881 return;
3882
3883 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3884 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3885 chan->num_conf_req++;
3886 }
3887
l2cap_conf_rfc_get(struct l2cap_chan * chan,void * rsp,int len)3888 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3889 {
3890 int type, olen;
3891 unsigned long val;
3892 /* Use sane default values in case a misbehaving remote device
3893 * did not send an RFC or extended window size option.
3894 */
3895 u16 txwin_ext = chan->ack_win;
3896 struct l2cap_conf_rfc rfc = {
3897 .mode = chan->mode,
3898 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3899 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3900 .max_pdu_size = cpu_to_le16(chan->imtu),
3901 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3902 };
3903
3904 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3905
3906 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3907 return;
3908
3909 while (len >= L2CAP_CONF_OPT_SIZE) {
3910 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3911 if (len < 0)
3912 break;
3913
3914 switch (type) {
3915 case L2CAP_CONF_RFC:
3916 if (olen != sizeof(rfc))
3917 break;
3918 memcpy(&rfc, (void *)val, olen);
3919 break;
3920 case L2CAP_CONF_EWS:
3921 if (olen != 2)
3922 break;
3923 txwin_ext = val;
3924 break;
3925 }
3926 }
3927
3928 switch (rfc.mode) {
3929 case L2CAP_MODE_ERTM:
3930 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3931 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3932 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3933 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3934 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3935 else
3936 chan->ack_win = min_t(u16, chan->ack_win,
3937 rfc.txwin_size);
3938 break;
3939 case L2CAP_MODE_STREAMING:
3940 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3941 }
3942 }
3943
l2cap_command_rej(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)3944 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3945 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3946 u8 *data)
3947 {
3948 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3949
3950 if (cmd_len < sizeof(*rej))
3951 return -EPROTO;
3952
3953 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3954 return 0;
3955
3956 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3957 cmd->ident == conn->info_ident) {
3958 cancel_delayed_work(&conn->info_timer);
3959
3960 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3961 conn->info_ident = 0;
3962
3963 l2cap_conn_start(conn);
3964 }
3965
3966 return 0;
3967 }
3968
l2cap_connect(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u8 * data,u8 rsp_code)3969 static void l2cap_connect(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd,
3970 u8 *data, u8 rsp_code)
3971 {
3972 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3973 struct l2cap_conn_rsp rsp;
3974 struct l2cap_chan *chan = NULL, *pchan = NULL;
3975 int result, status = L2CAP_CS_NO_INFO;
3976
3977 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3978 __le16 psm = req->psm;
3979
3980 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3981
3982 /* Check if we have socket listening on psm */
3983 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3984 &conn->hcon->dst, ACL_LINK);
3985 if (!pchan) {
3986 result = L2CAP_CR_BAD_PSM;
3987 goto response;
3988 }
3989
3990 l2cap_chan_lock(pchan);
3991
3992 /* Check if the ACL is secure enough (if not SDP) */
3993 if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3994 !hci_conn_check_link_mode(conn->hcon)) {
3995 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3996 result = L2CAP_CR_SEC_BLOCK;
3997 goto response;
3998 }
3999
4000 result = L2CAP_CR_NO_MEM;
4001
4002 /* Check for valid dynamic CID range (as per Erratum 3253) */
4003 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) {
4004 result = L2CAP_CR_INVALID_SCID;
4005 goto response;
4006 }
4007
4008 /* Check if we already have channel with that dcid */
4009 if (__l2cap_get_chan_by_dcid(conn, scid)) {
4010 result = L2CAP_CR_SCID_IN_USE;
4011 goto response;
4012 }
4013
4014 chan = pchan->ops->new_connection(pchan);
4015 if (!chan)
4016 goto response;
4017
4018 /* For certain devices (ex: HID mouse), support for authentication,
4019 * pairing and bonding is optional. For such devices, inorder to avoid
4020 * the ACL alive for too long after L2CAP disconnection, reset the ACL
4021 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
4022 */
4023 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4024
4025 bacpy(&chan->src, &conn->hcon->src);
4026 bacpy(&chan->dst, &conn->hcon->dst);
4027 chan->src_type = bdaddr_src_type(conn->hcon);
4028 chan->dst_type = bdaddr_dst_type(conn->hcon);
4029 chan->psm = psm;
4030 chan->dcid = scid;
4031
4032 __l2cap_chan_add(conn, chan);
4033
4034 dcid = chan->scid;
4035
4036 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4037
4038 chan->ident = cmd->ident;
4039
4040 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
4041 if (l2cap_chan_check_security(chan, false)) {
4042 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4043 l2cap_state_change(chan, BT_CONNECT2);
4044 result = L2CAP_CR_PEND;
4045 status = L2CAP_CS_AUTHOR_PEND;
4046 chan->ops->defer(chan);
4047 } else {
4048 l2cap_state_change(chan, BT_CONFIG);
4049 result = L2CAP_CR_SUCCESS;
4050 status = L2CAP_CS_NO_INFO;
4051 }
4052 } else {
4053 l2cap_state_change(chan, BT_CONNECT2);
4054 result = L2CAP_CR_PEND;
4055 status = L2CAP_CS_AUTHEN_PEND;
4056 }
4057 } else {
4058 l2cap_state_change(chan, BT_CONNECT2);
4059 result = L2CAP_CR_PEND;
4060 status = L2CAP_CS_NO_INFO;
4061 }
4062
4063 response:
4064 rsp.scid = cpu_to_le16(scid);
4065 rsp.dcid = cpu_to_le16(dcid);
4066 rsp.result = cpu_to_le16(result);
4067 rsp.status = cpu_to_le16(status);
4068 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4069
4070 if (!pchan)
4071 return;
4072
4073 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4074 struct l2cap_info_req info;
4075 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4076
4077 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4078 conn->info_ident = l2cap_get_ident(conn);
4079
4080 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4081
4082 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4083 sizeof(info), &info);
4084 }
4085
4086 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4087 result == L2CAP_CR_SUCCESS) {
4088 u8 buf[128];
4089 set_bit(CONF_REQ_SENT, &chan->conf_state);
4090 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4091 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4092 chan->num_conf_req++;
4093 }
4094
4095 l2cap_chan_unlock(pchan);
4096 l2cap_chan_put(pchan);
4097 }
4098
l2cap_connect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4099 static int l2cap_connect_req(struct l2cap_conn *conn,
4100 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4101 {
4102 if (cmd_len < sizeof(struct l2cap_conn_req))
4103 return -EPROTO;
4104
4105 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP);
4106 return 0;
4107 }
4108
l2cap_connect_create_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4109 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4110 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4111 u8 *data)
4112 {
4113 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4114 u16 scid, dcid, result, status;
4115 struct l2cap_chan *chan;
4116 u8 req[128];
4117 int err;
4118
4119 if (cmd_len < sizeof(*rsp))
4120 return -EPROTO;
4121
4122 scid = __le16_to_cpu(rsp->scid);
4123 dcid = __le16_to_cpu(rsp->dcid);
4124 result = __le16_to_cpu(rsp->result);
4125 status = __le16_to_cpu(rsp->status);
4126
4127 if (result == L2CAP_CR_SUCCESS && (dcid < L2CAP_CID_DYN_START ||
4128 dcid > L2CAP_CID_DYN_END))
4129 return -EPROTO;
4130
4131 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4132 dcid, scid, result, status);
4133
4134 if (scid) {
4135 chan = __l2cap_get_chan_by_scid(conn, scid);
4136 if (!chan)
4137 return -EBADSLT;
4138 } else {
4139 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4140 if (!chan)
4141 return -EBADSLT;
4142 }
4143
4144 chan = l2cap_chan_hold_unless_zero(chan);
4145 if (!chan)
4146 return -EBADSLT;
4147
4148 err = 0;
4149
4150 l2cap_chan_lock(chan);
4151
4152 switch (result) {
4153 case L2CAP_CR_SUCCESS:
4154 if (__l2cap_get_chan_by_dcid(conn, dcid)) {
4155 err = -EBADSLT;
4156 break;
4157 }
4158
4159 l2cap_state_change(chan, BT_CONFIG);
4160 chan->ident = 0;
4161 chan->dcid = dcid;
4162 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4163
4164 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4165 break;
4166
4167 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4168 l2cap_build_conf_req(chan, req, sizeof(req)), req);
4169 chan->num_conf_req++;
4170 break;
4171
4172 case L2CAP_CR_PEND:
4173 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4174 break;
4175
4176 default:
4177 l2cap_chan_del(chan, ECONNREFUSED);
4178 break;
4179 }
4180
4181 l2cap_chan_unlock(chan);
4182 l2cap_chan_put(chan);
4183
4184 return err;
4185 }
4186
set_default_fcs(struct l2cap_chan * chan)4187 static inline void set_default_fcs(struct l2cap_chan *chan)
4188 {
4189 /* FCS is enabled only in ERTM or streaming mode, if one or both
4190 * sides request it.
4191 */
4192 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4193 chan->fcs = L2CAP_FCS_NONE;
4194 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4195 chan->fcs = L2CAP_FCS_CRC16;
4196 }
4197
l2cap_send_efs_conf_rsp(struct l2cap_chan * chan,void * data,u8 ident,u16 flags)4198 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4199 u8 ident, u16 flags)
4200 {
4201 struct l2cap_conn *conn = chan->conn;
4202
4203 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4204 flags);
4205
4206 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4207 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4208
4209 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4210 l2cap_build_conf_rsp(chan, data,
4211 L2CAP_CONF_SUCCESS, flags), data);
4212 }
4213
cmd_reject_invalid_cid(struct l2cap_conn * conn,u8 ident,u16 scid,u16 dcid)4214 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4215 u16 scid, u16 dcid)
4216 {
4217 struct l2cap_cmd_rej_cid rej;
4218
4219 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4220 rej.scid = __cpu_to_le16(scid);
4221 rej.dcid = __cpu_to_le16(dcid);
4222
4223 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4224 }
4225
l2cap_config_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4226 static inline int l2cap_config_req(struct l2cap_conn *conn,
4227 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4228 u8 *data)
4229 {
4230 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4231 u16 dcid, flags;
4232 u8 rsp[64];
4233 struct l2cap_chan *chan;
4234 int len, err = 0;
4235
4236 if (cmd_len < sizeof(*req))
4237 return -EPROTO;
4238
4239 dcid = __le16_to_cpu(req->dcid);
4240 flags = __le16_to_cpu(req->flags);
4241
4242 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4243
4244 chan = l2cap_get_chan_by_scid(conn, dcid);
4245 if (!chan) {
4246 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4247 return 0;
4248 }
4249
4250 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
4251 chan->state != BT_CONNECTED) {
4252 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4253 chan->dcid);
4254 goto unlock;
4255 }
4256
4257 /* Reject if config buffer is too small. */
4258 len = cmd_len - sizeof(*req);
4259 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4260 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4261 l2cap_build_conf_rsp(chan, rsp,
4262 L2CAP_CONF_REJECT, flags), rsp);
4263 goto unlock;
4264 }
4265
4266 /* Store config. */
4267 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4268 chan->conf_len += len;
4269
4270 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4271 /* Incomplete config. Send empty response. */
4272 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4273 l2cap_build_conf_rsp(chan, rsp,
4274 L2CAP_CONF_SUCCESS, flags), rsp);
4275 goto unlock;
4276 }
4277
4278 /* Complete config. */
4279 len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4280 if (len < 0) {
4281 l2cap_send_disconn_req(chan, ECONNRESET);
4282 goto unlock;
4283 }
4284
4285 chan->ident = cmd->ident;
4286 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4287 if (chan->num_conf_rsp < L2CAP_CONF_MAX_CONF_RSP)
4288 chan->num_conf_rsp++;
4289
4290 /* Reset config buffer. */
4291 chan->conf_len = 0;
4292
4293 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4294 goto unlock;
4295
4296 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4297 set_default_fcs(chan);
4298
4299 if (chan->mode == L2CAP_MODE_ERTM ||
4300 chan->mode == L2CAP_MODE_STREAMING)
4301 err = l2cap_ertm_init(chan);
4302
4303 if (err < 0)
4304 l2cap_send_disconn_req(chan, -err);
4305 else
4306 l2cap_chan_ready(chan);
4307
4308 goto unlock;
4309 }
4310
4311 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4312 u8 buf[64];
4313 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4314 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4315 chan->num_conf_req++;
4316 }
4317
4318 /* Got Conf Rsp PENDING from remote side and assume we sent
4319 Conf Rsp PENDING in the code above */
4320 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4321 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4322
4323 /* check compatibility */
4324
4325 /* Send rsp for BR/EDR channel */
4326 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4327 }
4328
4329 unlock:
4330 l2cap_chan_unlock(chan);
4331 l2cap_chan_put(chan);
4332 return err;
4333 }
4334
l2cap_config_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4335 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4336 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4337 u8 *data)
4338 {
4339 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4340 u16 scid, flags, result;
4341 struct l2cap_chan *chan;
4342 int len = cmd_len - sizeof(*rsp);
4343 int err = 0;
4344
4345 if (cmd_len < sizeof(*rsp))
4346 return -EPROTO;
4347
4348 scid = __le16_to_cpu(rsp->scid);
4349 flags = __le16_to_cpu(rsp->flags);
4350 result = __le16_to_cpu(rsp->result);
4351
4352 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4353 result, len);
4354
4355 chan = l2cap_get_chan_by_scid(conn, scid);
4356 if (!chan)
4357 return 0;
4358
4359 switch (result) {
4360 case L2CAP_CONF_SUCCESS:
4361 l2cap_conf_rfc_get(chan, rsp->data, len);
4362 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4363 break;
4364
4365 case L2CAP_CONF_PENDING:
4366 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4367
4368 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4369 char buf[64];
4370
4371 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4372 buf, sizeof(buf), &result);
4373 if (len < 0) {
4374 l2cap_send_disconn_req(chan, ECONNRESET);
4375 goto done;
4376 }
4377
4378 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident, 0);
4379 }
4380 goto done;
4381
4382 case L2CAP_CONF_UNKNOWN:
4383 case L2CAP_CONF_UNACCEPT:
4384 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4385 char req[64];
4386
4387 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4388 l2cap_send_disconn_req(chan, ECONNRESET);
4389 goto done;
4390 }
4391
4392 /* throw out any old stored conf requests */
4393 result = L2CAP_CONF_SUCCESS;
4394 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4395 req, sizeof(req), &result);
4396 if (len < 0) {
4397 l2cap_send_disconn_req(chan, ECONNRESET);
4398 goto done;
4399 }
4400
4401 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4402 L2CAP_CONF_REQ, len, req);
4403 chan->num_conf_req++;
4404 if (result != L2CAP_CONF_SUCCESS)
4405 goto done;
4406 break;
4407 }
4408 fallthrough;
4409
4410 default:
4411 l2cap_chan_set_err(chan, ECONNRESET);
4412
4413 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4414 l2cap_send_disconn_req(chan, ECONNRESET);
4415 goto done;
4416 }
4417
4418 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4419 goto done;
4420
4421 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4422
4423 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4424 set_default_fcs(chan);
4425
4426 if (chan->mode == L2CAP_MODE_ERTM ||
4427 chan->mode == L2CAP_MODE_STREAMING)
4428 err = l2cap_ertm_init(chan);
4429
4430 if (err < 0)
4431 l2cap_send_disconn_req(chan, -err);
4432 else
4433 l2cap_chan_ready(chan);
4434 }
4435
4436 done:
4437 l2cap_chan_unlock(chan);
4438 l2cap_chan_put(chan);
4439 return err;
4440 }
4441
l2cap_disconnect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4442 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4443 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4444 u8 *data)
4445 {
4446 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4447 struct l2cap_disconn_rsp rsp;
4448 u16 dcid, scid;
4449 struct l2cap_chan *chan;
4450
4451 if (cmd_len != sizeof(*req))
4452 return -EPROTO;
4453
4454 scid = __le16_to_cpu(req->scid);
4455 dcid = __le16_to_cpu(req->dcid);
4456
4457 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4458
4459 chan = l2cap_get_chan_by_scid(conn, dcid);
4460 if (!chan) {
4461 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4462 return 0;
4463 }
4464
4465 rsp.dcid = cpu_to_le16(chan->scid);
4466 rsp.scid = cpu_to_le16(chan->dcid);
4467 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4468
4469 chan->ops->set_shutdown(chan);
4470
4471 l2cap_chan_del(chan, ECONNRESET);
4472
4473 chan->ops->close(chan);
4474
4475 l2cap_chan_unlock(chan);
4476 l2cap_chan_put(chan);
4477
4478 return 0;
4479 }
4480
l2cap_disconnect_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4481 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4482 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4483 u8 *data)
4484 {
4485 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4486 u16 dcid, scid;
4487 struct l2cap_chan *chan;
4488
4489 if (cmd_len != sizeof(*rsp))
4490 return -EPROTO;
4491
4492 scid = __le16_to_cpu(rsp->scid);
4493 dcid = __le16_to_cpu(rsp->dcid);
4494
4495 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4496
4497 chan = l2cap_get_chan_by_scid(conn, scid);
4498 if (!chan) {
4499 return 0;
4500 }
4501
4502 if (chan->state != BT_DISCONN) {
4503 l2cap_chan_unlock(chan);
4504 l2cap_chan_put(chan);
4505 return 0;
4506 }
4507
4508 l2cap_chan_del(chan, 0);
4509
4510 chan->ops->close(chan);
4511
4512 l2cap_chan_unlock(chan);
4513 l2cap_chan_put(chan);
4514
4515 return 0;
4516 }
4517
l2cap_information_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4518 static inline int l2cap_information_req(struct l2cap_conn *conn,
4519 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4520 u8 *data)
4521 {
4522 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4523 u16 type;
4524
4525 if (cmd_len != sizeof(*req))
4526 return -EPROTO;
4527
4528 type = __le16_to_cpu(req->type);
4529
4530 BT_DBG("type 0x%4.4x", type);
4531
4532 if (type == L2CAP_IT_FEAT_MASK) {
4533 u8 buf[8];
4534 u32 feat_mask = l2cap_feat_mask;
4535 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4536 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4537 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4538 if (!disable_ertm)
4539 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4540 | L2CAP_FEAT_FCS;
4541
4542 put_unaligned_le32(feat_mask, rsp->data);
4543 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4544 buf);
4545 } else if (type == L2CAP_IT_FIXED_CHAN) {
4546 u8 buf[12];
4547 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4548
4549 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4550 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4551 rsp->data[0] = conn->local_fixed_chan;
4552 memset(rsp->data + 1, 0, 7);
4553 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4554 buf);
4555 } else {
4556 struct l2cap_info_rsp rsp;
4557 rsp.type = cpu_to_le16(type);
4558 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4559 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4560 &rsp);
4561 }
4562
4563 return 0;
4564 }
4565
l2cap_information_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4566 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4567 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4568 u8 *data)
4569 {
4570 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4571 u16 type, result;
4572
4573 if (cmd_len < sizeof(*rsp))
4574 return -EPROTO;
4575
4576 type = __le16_to_cpu(rsp->type);
4577 result = __le16_to_cpu(rsp->result);
4578
4579 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4580
4581 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4582 if (cmd->ident != conn->info_ident ||
4583 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4584 return 0;
4585
4586 cancel_delayed_work(&conn->info_timer);
4587
4588 if (result != L2CAP_IR_SUCCESS) {
4589 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4590 conn->info_ident = 0;
4591
4592 l2cap_conn_start(conn);
4593
4594 return 0;
4595 }
4596
4597 switch (type) {
4598 case L2CAP_IT_FEAT_MASK:
4599 conn->feat_mask = get_unaligned_le32(rsp->data);
4600
4601 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4602 struct l2cap_info_req req;
4603 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4604
4605 conn->info_ident = l2cap_get_ident(conn);
4606
4607 l2cap_send_cmd(conn, conn->info_ident,
4608 L2CAP_INFO_REQ, sizeof(req), &req);
4609 } else {
4610 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4611 conn->info_ident = 0;
4612
4613 l2cap_conn_start(conn);
4614 }
4615 break;
4616
4617 case L2CAP_IT_FIXED_CHAN:
4618 conn->remote_fixed_chan = rsp->data[0];
4619 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4620 conn->info_ident = 0;
4621
4622 l2cap_conn_start(conn);
4623 break;
4624 }
4625
4626 return 0;
4627 }
4628
l2cap_conn_param_update_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4629 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4630 struct l2cap_cmd_hdr *cmd,
4631 u16 cmd_len, u8 *data)
4632 {
4633 struct hci_conn *hcon = conn->hcon;
4634 struct l2cap_conn_param_update_req *req;
4635 struct l2cap_conn_param_update_rsp rsp;
4636 u16 min, max, latency, to_multiplier;
4637 int err;
4638
4639 if (hcon->role != HCI_ROLE_MASTER)
4640 return -EINVAL;
4641
4642 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4643 return -EPROTO;
4644
4645 req = (struct l2cap_conn_param_update_req *) data;
4646 min = __le16_to_cpu(req->min);
4647 max = __le16_to_cpu(req->max);
4648 latency = __le16_to_cpu(req->latency);
4649 to_multiplier = __le16_to_cpu(req->to_multiplier);
4650
4651 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4652 min, max, latency, to_multiplier);
4653
4654 memset(&rsp, 0, sizeof(rsp));
4655
4656 err = hci_check_conn_params(min, max, latency, to_multiplier);
4657 if (err)
4658 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4659 else
4660 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4661
4662 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4663 sizeof(rsp), &rsp);
4664
4665 if (!err) {
4666 u8 store_hint;
4667
4668 store_hint = hci_le_conn_update(hcon, min, max, latency,
4669 to_multiplier);
4670 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
4671 store_hint, min, max, latency,
4672 to_multiplier);
4673
4674 }
4675
4676 return 0;
4677 }
4678
l2cap_le_connect_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4679 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
4680 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4681 u8 *data)
4682 {
4683 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
4684 struct hci_conn *hcon = conn->hcon;
4685 u16 dcid, mtu, mps, credits, result;
4686 struct l2cap_chan *chan;
4687 int err, sec_level;
4688
4689 if (cmd_len < sizeof(*rsp))
4690 return -EPROTO;
4691
4692 dcid = __le16_to_cpu(rsp->dcid);
4693 mtu = __le16_to_cpu(rsp->mtu);
4694 mps = __le16_to_cpu(rsp->mps);
4695 credits = __le16_to_cpu(rsp->credits);
4696 result = __le16_to_cpu(rsp->result);
4697
4698 if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
4699 dcid < L2CAP_CID_DYN_START ||
4700 dcid > L2CAP_CID_LE_DYN_END))
4701 return -EPROTO;
4702
4703 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
4704 dcid, mtu, mps, credits, result);
4705
4706 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4707 if (!chan)
4708 return -EBADSLT;
4709
4710 err = 0;
4711
4712 l2cap_chan_lock(chan);
4713
4714 switch (result) {
4715 case L2CAP_CR_LE_SUCCESS:
4716 if (__l2cap_get_chan_by_dcid(conn, dcid)) {
4717 err = -EBADSLT;
4718 break;
4719 }
4720
4721 chan->ident = 0;
4722 chan->dcid = dcid;
4723 chan->omtu = mtu;
4724 chan->remote_mps = mps;
4725 chan->tx_credits = credits;
4726 l2cap_chan_ready(chan);
4727 break;
4728
4729 case L2CAP_CR_LE_AUTHENTICATION:
4730 case L2CAP_CR_LE_ENCRYPTION:
4731 /* If we already have MITM protection we can't do
4732 * anything.
4733 */
4734 if (hcon->sec_level > BT_SECURITY_MEDIUM) {
4735 l2cap_chan_del(chan, ECONNREFUSED);
4736 break;
4737 }
4738
4739 sec_level = hcon->sec_level + 1;
4740 if (chan->sec_level < sec_level)
4741 chan->sec_level = sec_level;
4742
4743 /* We'll need to send a new Connect Request */
4744 clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
4745
4746 smp_conn_security(hcon, chan->sec_level);
4747 break;
4748
4749 default:
4750 l2cap_chan_del(chan, ECONNREFUSED);
4751 break;
4752 }
4753
4754 l2cap_chan_unlock(chan);
4755
4756 return err;
4757 }
4758
l2cap_bredr_sig_cmd(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4759 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4760 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4761 u8 *data)
4762 {
4763 int err = 0;
4764
4765 switch (cmd->code) {
4766 case L2CAP_COMMAND_REJ:
4767 l2cap_command_rej(conn, cmd, cmd_len, data);
4768 break;
4769
4770 case L2CAP_CONN_REQ:
4771 err = l2cap_connect_req(conn, cmd, cmd_len, data);
4772 break;
4773
4774 case L2CAP_CONN_RSP:
4775 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
4776 break;
4777
4778 case L2CAP_CONF_REQ:
4779 err = l2cap_config_req(conn, cmd, cmd_len, data);
4780 break;
4781
4782 case L2CAP_CONF_RSP:
4783 l2cap_config_rsp(conn, cmd, cmd_len, data);
4784 break;
4785
4786 case L2CAP_DISCONN_REQ:
4787 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
4788 break;
4789
4790 case L2CAP_DISCONN_RSP:
4791 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
4792 break;
4793
4794 case L2CAP_ECHO_REQ:
4795 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4796 break;
4797
4798 case L2CAP_ECHO_RSP:
4799 break;
4800
4801 case L2CAP_INFO_REQ:
4802 err = l2cap_information_req(conn, cmd, cmd_len, data);
4803 break;
4804
4805 case L2CAP_INFO_RSP:
4806 l2cap_information_rsp(conn, cmd, cmd_len, data);
4807 break;
4808
4809 default:
4810 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4811 err = -EINVAL;
4812 break;
4813 }
4814
4815 return err;
4816 }
4817
l2cap_le_connect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4818 static int l2cap_le_connect_req(struct l2cap_conn *conn,
4819 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4820 u8 *data)
4821 {
4822 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
4823 struct l2cap_le_conn_rsp rsp;
4824 struct l2cap_chan *chan, *pchan;
4825 u16 dcid, scid, credits, mtu, mps;
4826 __le16 psm;
4827 u8 result;
4828
4829 if (cmd_len != sizeof(*req))
4830 return -EPROTO;
4831
4832 scid = __le16_to_cpu(req->scid);
4833 mtu = __le16_to_cpu(req->mtu);
4834 mps = __le16_to_cpu(req->mps);
4835 psm = req->psm;
4836 dcid = 0;
4837 credits = 0;
4838
4839 if (mtu < 23 || mps < 23)
4840 return -EPROTO;
4841
4842 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
4843 scid, mtu, mps);
4844
4845 /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
4846 * page 1059:
4847 *
4848 * Valid range: 0x0001-0x00ff
4849 *
4850 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
4851 */
4852 if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
4853 result = L2CAP_CR_LE_BAD_PSM;
4854 chan = NULL;
4855 goto response;
4856 }
4857
4858 /* Check if we have socket listening on psm */
4859 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
4860 &conn->hcon->dst, LE_LINK);
4861 if (!pchan) {
4862 result = L2CAP_CR_LE_BAD_PSM;
4863 chan = NULL;
4864 goto response;
4865 }
4866
4867 l2cap_chan_lock(pchan);
4868
4869 if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
4870 SMP_ALLOW_STK)) {
4871 result = L2CAP_CR_LE_AUTHENTICATION;
4872 chan = NULL;
4873 goto response_unlock;
4874 }
4875
4876 /* Check for valid dynamic CID range */
4877 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
4878 result = L2CAP_CR_LE_INVALID_SCID;
4879 chan = NULL;
4880 goto response_unlock;
4881 }
4882
4883 /* Check if we already have channel with that dcid */
4884 if (__l2cap_get_chan_by_dcid(conn, scid)) {
4885 result = L2CAP_CR_LE_SCID_IN_USE;
4886 chan = NULL;
4887 goto response_unlock;
4888 }
4889
4890 chan = pchan->ops->new_connection(pchan);
4891 if (!chan) {
4892 result = L2CAP_CR_LE_NO_MEM;
4893 goto response_unlock;
4894 }
4895
4896 bacpy(&chan->src, &conn->hcon->src);
4897 bacpy(&chan->dst, &conn->hcon->dst);
4898 chan->src_type = bdaddr_src_type(conn->hcon);
4899 chan->dst_type = bdaddr_dst_type(conn->hcon);
4900 chan->psm = psm;
4901 chan->dcid = scid;
4902 chan->omtu = mtu;
4903 chan->remote_mps = mps;
4904
4905 __l2cap_chan_add(conn, chan);
4906
4907 l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits));
4908
4909 dcid = chan->scid;
4910 credits = chan->rx_credits;
4911
4912 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4913
4914 chan->ident = cmd->ident;
4915
4916 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4917 l2cap_state_change(chan, BT_CONNECT2);
4918 /* The following result value is actually not defined
4919 * for LE CoC but we use it to let the function know
4920 * that it should bail out after doing its cleanup
4921 * instead of sending a response.
4922 */
4923 result = L2CAP_CR_PEND;
4924 chan->ops->defer(chan);
4925 } else {
4926 l2cap_chan_ready(chan);
4927 result = L2CAP_CR_LE_SUCCESS;
4928 }
4929
4930 response_unlock:
4931 l2cap_chan_unlock(pchan);
4932 l2cap_chan_put(pchan);
4933
4934 if (result == L2CAP_CR_PEND)
4935 return 0;
4936
4937 response:
4938 if (chan) {
4939 rsp.mtu = cpu_to_le16(chan->imtu);
4940 rsp.mps = cpu_to_le16(chan->mps);
4941 } else {
4942 rsp.mtu = 0;
4943 rsp.mps = 0;
4944 }
4945
4946 rsp.dcid = cpu_to_le16(dcid);
4947 rsp.credits = cpu_to_le16(credits);
4948 rsp.result = cpu_to_le16(result);
4949
4950 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
4951
4952 return 0;
4953 }
4954
l2cap_le_credits(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4955 static inline int l2cap_le_credits(struct l2cap_conn *conn,
4956 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4957 u8 *data)
4958 {
4959 struct l2cap_le_credits *pkt;
4960 struct l2cap_chan *chan;
4961 u16 cid, credits, max_credits;
4962
4963 if (cmd_len != sizeof(*pkt))
4964 return -EPROTO;
4965
4966 pkt = (struct l2cap_le_credits *) data;
4967 cid = __le16_to_cpu(pkt->cid);
4968 credits = __le16_to_cpu(pkt->credits);
4969
4970 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
4971
4972 chan = l2cap_get_chan_by_dcid(conn, cid);
4973 if (!chan)
4974 return -EBADSLT;
4975
4976 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
4977 if (credits > max_credits) {
4978 BT_ERR("LE credits overflow");
4979 l2cap_send_disconn_req(chan, ECONNRESET);
4980
4981 /* Return 0 so that we don't trigger an unnecessary
4982 * command reject packet.
4983 */
4984 goto unlock;
4985 }
4986
4987 chan->tx_credits += credits;
4988
4989 /* Resume sending */
4990 l2cap_le_flowctl_send(chan);
4991
4992 if (chan->tx_credits)
4993 chan->ops->resume(chan);
4994
4995 unlock:
4996 l2cap_chan_unlock(chan);
4997 l2cap_chan_put(chan);
4998
4999 return 0;
5000 }
5001
l2cap_ecred_conn_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5002 static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
5003 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5004 u8 *data)
5005 {
5006 struct l2cap_ecred_conn_req *req = (void *) data;
5007 DEFINE_RAW_FLEX(struct l2cap_ecred_conn_rsp, pdu, dcid, L2CAP_ECRED_MAX_CID);
5008 struct l2cap_chan *chan, *pchan;
5009 u16 mtu, mps;
5010 __le16 psm;
5011 u8 result, len = 0;
5012 int i, num_scid;
5013 bool defer = false;
5014
5015 if (!enable_ecred)
5016 return -EINVAL;
5017
5018 if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) {
5019 result = L2CAP_CR_LE_INVALID_PARAMS;
5020 goto response;
5021 }
5022
5023 cmd_len -= sizeof(*req);
5024 num_scid = cmd_len / sizeof(u16);
5025
5026 if (num_scid > L2CAP_ECRED_MAX_CID) {
5027 result = L2CAP_CR_LE_INVALID_PARAMS;
5028 goto response;
5029 }
5030
5031 mtu = __le16_to_cpu(req->mtu);
5032 mps = __le16_to_cpu(req->mps);
5033
5034 if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MPS) {
5035 result = L2CAP_CR_LE_UNACCEPT_PARAMS;
5036 goto response;
5037 }
5038
5039 psm = req->psm;
5040
5041 /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
5042 * page 1059:
5043 *
5044 * Valid range: 0x0001-0x00ff
5045 *
5046 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
5047 */
5048 if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
5049 result = L2CAP_CR_LE_BAD_PSM;
5050 goto response;
5051 }
5052
5053 BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps);
5054
5055 memset(pdu, 0, sizeof(*pdu));
5056
5057 /* Check if we have socket listening on psm */
5058 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5059 &conn->hcon->dst, LE_LINK);
5060 if (!pchan) {
5061 result = L2CAP_CR_LE_BAD_PSM;
5062 goto response;
5063 }
5064
5065 l2cap_chan_lock(pchan);
5066
5067 if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5068 SMP_ALLOW_STK)) {
5069 result = L2CAP_CR_LE_AUTHENTICATION;
5070 goto unlock;
5071 }
5072
5073 result = L2CAP_CR_LE_SUCCESS;
5074
5075 for (i = 0; i < num_scid; i++) {
5076 u16 scid = __le16_to_cpu(req->scid[i]);
5077
5078 BT_DBG("scid[%d] 0x%4.4x", i, scid);
5079
5080 pdu->dcid[i] = 0x0000;
5081 len += sizeof(*pdu->dcid);
5082
5083 /* Check for valid dynamic CID range */
5084 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5085 result = L2CAP_CR_LE_INVALID_SCID;
5086 continue;
5087 }
5088
5089 /* Check if we already have channel with that dcid */
5090 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5091 result = L2CAP_CR_LE_SCID_IN_USE;
5092 continue;
5093 }
5094
5095 chan = pchan->ops->new_connection(pchan);
5096 if (!chan) {
5097 result = L2CAP_CR_LE_NO_MEM;
5098 continue;
5099 }
5100
5101 bacpy(&chan->src, &conn->hcon->src);
5102 bacpy(&chan->dst, &conn->hcon->dst);
5103 chan->src_type = bdaddr_src_type(conn->hcon);
5104 chan->dst_type = bdaddr_dst_type(conn->hcon);
5105 chan->psm = psm;
5106 chan->dcid = scid;
5107 chan->omtu = mtu;
5108 chan->remote_mps = mps;
5109
5110 __l2cap_chan_add(conn, chan);
5111
5112 l2cap_ecred_init(chan, __le16_to_cpu(req->credits));
5113
5114 /* Init response */
5115 if (!pdu->credits) {
5116 pdu->mtu = cpu_to_le16(chan->imtu);
5117 pdu->mps = cpu_to_le16(chan->mps);
5118 pdu->credits = cpu_to_le16(chan->rx_credits);
5119 }
5120
5121 pdu->dcid[i] = cpu_to_le16(chan->scid);
5122
5123 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5124
5125 chan->ident = cmd->ident;
5126 chan->mode = L2CAP_MODE_EXT_FLOWCTL;
5127
5128 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5129 l2cap_state_change(chan, BT_CONNECT2);
5130 defer = true;
5131 chan->ops->defer(chan);
5132 } else {
5133 l2cap_chan_ready(chan);
5134 }
5135 }
5136
5137 unlock:
5138 l2cap_chan_unlock(pchan);
5139 l2cap_chan_put(pchan);
5140
5141 response:
5142 pdu->result = cpu_to_le16(result);
5143
5144 if (defer)
5145 return 0;
5146
5147 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP,
5148 sizeof(*pdu) + len, pdu);
5149
5150 return 0;
5151 }
5152
l2cap_ecred_conn_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5153 static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
5154 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5155 u8 *data)
5156 {
5157 struct l2cap_ecred_conn_rsp *rsp = (void *) data;
5158 struct hci_conn *hcon = conn->hcon;
5159 u16 mtu, mps, credits, result;
5160 struct l2cap_chan *chan, *tmp;
5161 int err = 0, sec_level;
5162 int i = 0;
5163
5164 if (cmd_len < sizeof(*rsp))
5165 return -EPROTO;
5166
5167 mtu = __le16_to_cpu(rsp->mtu);
5168 mps = __le16_to_cpu(rsp->mps);
5169 credits = __le16_to_cpu(rsp->credits);
5170 result = __le16_to_cpu(rsp->result);
5171
5172 BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits,
5173 result);
5174
5175 cmd_len -= sizeof(*rsp);
5176
5177 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
5178 u16 dcid;
5179
5180 if (chan->ident != cmd->ident ||
5181 chan->mode != L2CAP_MODE_EXT_FLOWCTL ||
5182 chan->state == BT_CONNECTED)
5183 continue;
5184
5185 l2cap_chan_lock(chan);
5186
5187 /* Check that there is a dcid for each pending channel */
5188 if (cmd_len < sizeof(dcid)) {
5189 l2cap_chan_del(chan, ECONNREFUSED);
5190 l2cap_chan_unlock(chan);
5191 continue;
5192 }
5193
5194 dcid = __le16_to_cpu(rsp->dcid[i++]);
5195 cmd_len -= sizeof(u16);
5196
5197 BT_DBG("dcid[%d] 0x%4.4x", i, dcid);
5198
5199 /* Check if dcid is already in use */
5200 if (dcid && __l2cap_get_chan_by_dcid(conn, dcid)) {
5201 /* If a device receives a
5202 * L2CAP_CREDIT_BASED_CONNECTION_RSP packet with an
5203 * already-assigned Destination CID, then both the
5204 * original channel and the new channel shall be
5205 * immediately discarded and not used.
5206 */
5207 l2cap_chan_del(chan, ECONNREFUSED);
5208 l2cap_chan_unlock(chan);
5209 chan = __l2cap_get_chan_by_dcid(conn, dcid);
5210 l2cap_chan_lock(chan);
5211 l2cap_chan_del(chan, ECONNRESET);
5212 l2cap_chan_unlock(chan);
5213 continue;
5214 }
5215
5216 switch (result) {
5217 case L2CAP_CR_LE_AUTHENTICATION:
5218 case L2CAP_CR_LE_ENCRYPTION:
5219 /* If we already have MITM protection we can't do
5220 * anything.
5221 */
5222 if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5223 l2cap_chan_del(chan, ECONNREFUSED);
5224 break;
5225 }
5226
5227 sec_level = hcon->sec_level + 1;
5228 if (chan->sec_level < sec_level)
5229 chan->sec_level = sec_level;
5230
5231 /* We'll need to send a new Connect Request */
5232 clear_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags);
5233
5234 smp_conn_security(hcon, chan->sec_level);
5235 break;
5236
5237 case L2CAP_CR_LE_BAD_PSM:
5238 l2cap_chan_del(chan, ECONNREFUSED);
5239 break;
5240
5241 default:
5242 /* If dcid was not set it means channels was refused */
5243 if (!dcid) {
5244 l2cap_chan_del(chan, ECONNREFUSED);
5245 break;
5246 }
5247
5248 chan->ident = 0;
5249 chan->dcid = dcid;
5250 chan->omtu = mtu;
5251 chan->remote_mps = mps;
5252 chan->tx_credits = credits;
5253 l2cap_chan_ready(chan);
5254 break;
5255 }
5256
5257 l2cap_chan_unlock(chan);
5258 }
5259
5260 return err;
5261 }
5262
l2cap_ecred_reconf_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5263 static inline int l2cap_ecred_reconf_req(struct l2cap_conn *conn,
5264 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5265 u8 *data)
5266 {
5267 struct l2cap_ecred_reconf_req *req = (void *) data;
5268 struct l2cap_ecred_reconf_rsp rsp;
5269 u16 mtu, mps, result;
5270 struct l2cap_chan *chan;
5271 int i, num_scid;
5272
5273 if (!enable_ecred)
5274 return -EINVAL;
5275
5276 if (cmd_len < sizeof(*req) || cmd_len - sizeof(*req) % sizeof(u16)) {
5277 result = L2CAP_CR_LE_INVALID_PARAMS;
5278 goto respond;
5279 }
5280
5281 mtu = __le16_to_cpu(req->mtu);
5282 mps = __le16_to_cpu(req->mps);
5283
5284 BT_DBG("mtu %u mps %u", mtu, mps);
5285
5286 if (mtu < L2CAP_ECRED_MIN_MTU) {
5287 result = L2CAP_RECONF_INVALID_MTU;
5288 goto respond;
5289 }
5290
5291 if (mps < L2CAP_ECRED_MIN_MPS) {
5292 result = L2CAP_RECONF_INVALID_MPS;
5293 goto respond;
5294 }
5295
5296 cmd_len -= sizeof(*req);
5297 num_scid = cmd_len / sizeof(u16);
5298 result = L2CAP_RECONF_SUCCESS;
5299
5300 for (i = 0; i < num_scid; i++) {
5301 u16 scid;
5302
5303 scid = __le16_to_cpu(req->scid[i]);
5304 if (!scid)
5305 return -EPROTO;
5306
5307 chan = __l2cap_get_chan_by_dcid(conn, scid);
5308 if (!chan)
5309 continue;
5310
5311 /* If the MTU value is decreased for any of the included
5312 * channels, then the receiver shall disconnect all
5313 * included channels.
5314 */
5315 if (chan->omtu > mtu) {
5316 BT_ERR("chan %p decreased MTU %u -> %u", chan,
5317 chan->omtu, mtu);
5318 result = L2CAP_RECONF_INVALID_MTU;
5319 }
5320
5321 chan->omtu = mtu;
5322 chan->remote_mps = mps;
5323 }
5324
5325 respond:
5326 rsp.result = cpu_to_le16(result);
5327
5328 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_RECONF_RSP, sizeof(rsp),
5329 &rsp);
5330
5331 return 0;
5332 }
5333
l2cap_ecred_reconf_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5334 static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn,
5335 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5336 u8 *data)
5337 {
5338 struct l2cap_chan *chan, *tmp;
5339 struct l2cap_ecred_conn_rsp *rsp = (void *) data;
5340 u16 result;
5341
5342 if (cmd_len < sizeof(*rsp))
5343 return -EPROTO;
5344
5345 result = __le16_to_cpu(rsp->result);
5346
5347 BT_DBG("result 0x%4.4x", rsp->result);
5348
5349 if (!result)
5350 return 0;
5351
5352 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
5353 if (chan->ident != cmd->ident)
5354 continue;
5355
5356 l2cap_chan_del(chan, ECONNRESET);
5357 }
5358
5359 return 0;
5360 }
5361
l2cap_le_command_rej(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5362 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5363 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5364 u8 *data)
5365 {
5366 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5367 struct l2cap_chan *chan;
5368
5369 if (cmd_len < sizeof(*rej))
5370 return -EPROTO;
5371
5372 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5373 if (!chan)
5374 goto done;
5375
5376 chan = l2cap_chan_hold_unless_zero(chan);
5377 if (!chan)
5378 goto done;
5379
5380 l2cap_chan_lock(chan);
5381 l2cap_chan_del(chan, ECONNREFUSED);
5382 l2cap_chan_unlock(chan);
5383 l2cap_chan_put(chan);
5384
5385 done:
5386 return 0;
5387 }
5388
l2cap_le_sig_cmd(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5389 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5390 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5391 u8 *data)
5392 {
5393 int err = 0;
5394
5395 switch (cmd->code) {
5396 case L2CAP_COMMAND_REJ:
5397 l2cap_le_command_rej(conn, cmd, cmd_len, data);
5398 break;
5399
5400 case L2CAP_CONN_PARAM_UPDATE_REQ:
5401 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5402 break;
5403
5404 case L2CAP_CONN_PARAM_UPDATE_RSP:
5405 break;
5406
5407 case L2CAP_LE_CONN_RSP:
5408 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5409 break;
5410
5411 case L2CAP_LE_CONN_REQ:
5412 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5413 break;
5414
5415 case L2CAP_LE_CREDITS:
5416 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5417 break;
5418
5419 case L2CAP_ECRED_CONN_REQ:
5420 err = l2cap_ecred_conn_req(conn, cmd, cmd_len, data);
5421 break;
5422
5423 case L2CAP_ECRED_CONN_RSP:
5424 err = l2cap_ecred_conn_rsp(conn, cmd, cmd_len, data);
5425 break;
5426
5427 case L2CAP_ECRED_RECONF_REQ:
5428 err = l2cap_ecred_reconf_req(conn, cmd, cmd_len, data);
5429 break;
5430
5431 case L2CAP_ECRED_RECONF_RSP:
5432 err = l2cap_ecred_reconf_rsp(conn, cmd, cmd_len, data);
5433 break;
5434
5435 case L2CAP_DISCONN_REQ:
5436 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5437 break;
5438
5439 case L2CAP_DISCONN_RSP:
5440 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5441 break;
5442
5443 default:
5444 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5445 err = -EINVAL;
5446 break;
5447 }
5448
5449 return err;
5450 }
5451
l2cap_le_sig_channel(struct l2cap_conn * conn,struct sk_buff * skb)5452 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5453 struct sk_buff *skb)
5454 {
5455 struct hci_conn *hcon = conn->hcon;
5456 struct l2cap_cmd_hdr *cmd;
5457 u16 len;
5458 int err;
5459
5460 if (hcon->type != LE_LINK)
5461 goto drop;
5462
5463 if (skb->len < L2CAP_CMD_HDR_SIZE)
5464 goto drop;
5465
5466 cmd = (void *) skb->data;
5467 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5468
5469 len = le16_to_cpu(cmd->len);
5470
5471 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5472
5473 if (len != skb->len || !cmd->ident) {
5474 BT_DBG("corrupted command");
5475 goto drop;
5476 }
5477
5478 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5479 if (err) {
5480 struct l2cap_cmd_rej_unk rej;
5481
5482 BT_ERR("Wrong link type (%d)", err);
5483
5484 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5485 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5486 sizeof(rej), &rej);
5487 }
5488
5489 drop:
5490 kfree_skb(skb);
5491 }
5492
l2cap_sig_send_rej(struct l2cap_conn * conn,u16 ident)5493 static inline void l2cap_sig_send_rej(struct l2cap_conn *conn, u16 ident)
5494 {
5495 struct l2cap_cmd_rej_unk rej;
5496
5497 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5498 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
5499 }
5500
l2cap_sig_channel(struct l2cap_conn * conn,struct sk_buff * skb)5501 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5502 struct sk_buff *skb)
5503 {
5504 struct hci_conn *hcon = conn->hcon;
5505 struct l2cap_cmd_hdr *cmd;
5506 int err;
5507
5508 l2cap_raw_recv(conn, skb);
5509
5510 if (hcon->type != ACL_LINK)
5511 goto drop;
5512
5513 while (skb->len >= L2CAP_CMD_HDR_SIZE) {
5514 u16 len;
5515
5516 cmd = (void *) skb->data;
5517 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5518
5519 len = le16_to_cpu(cmd->len);
5520
5521 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len,
5522 cmd->ident);
5523
5524 if (len > skb->len || !cmd->ident) {
5525 BT_DBG("corrupted command");
5526 l2cap_sig_send_rej(conn, cmd->ident);
5527 skb_pull(skb, len > skb->len ? skb->len : len);
5528 continue;
5529 }
5530
5531 err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data);
5532 if (err) {
5533 BT_ERR("Wrong link type (%d)", err);
5534 l2cap_sig_send_rej(conn, cmd->ident);
5535 }
5536
5537 skb_pull(skb, len);
5538 }
5539
5540 if (skb->len > 0) {
5541 BT_DBG("corrupted command");
5542 l2cap_sig_send_rej(conn, 0);
5543 }
5544
5545 drop:
5546 kfree_skb(skb);
5547 }
5548
l2cap_check_fcs(struct l2cap_chan * chan,struct sk_buff * skb)5549 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5550 {
5551 u16 our_fcs, rcv_fcs;
5552 int hdr_size;
5553
5554 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5555 hdr_size = L2CAP_EXT_HDR_SIZE;
5556 else
5557 hdr_size = L2CAP_ENH_HDR_SIZE;
5558
5559 if (chan->fcs == L2CAP_FCS_CRC16) {
5560 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5561 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5562 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5563
5564 if (our_fcs != rcv_fcs)
5565 return -EBADMSG;
5566 }
5567 return 0;
5568 }
5569
l2cap_send_i_or_rr_or_rnr(struct l2cap_chan * chan)5570 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5571 {
5572 struct l2cap_ctrl control;
5573
5574 BT_DBG("chan %p", chan);
5575
5576 memset(&control, 0, sizeof(control));
5577 control.sframe = 1;
5578 control.final = 1;
5579 control.reqseq = chan->buffer_seq;
5580 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5581
5582 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5583 control.super = L2CAP_SUPER_RNR;
5584 l2cap_send_sframe(chan, &control);
5585 }
5586
5587 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5588 chan->unacked_frames > 0)
5589 __set_retrans_timer(chan);
5590
5591 /* Send pending iframes */
5592 l2cap_ertm_send(chan);
5593
5594 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5595 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5596 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5597 * send it now.
5598 */
5599 control.super = L2CAP_SUPER_RR;
5600 l2cap_send_sframe(chan, &control);
5601 }
5602 }
5603
append_skb_frag(struct sk_buff * skb,struct sk_buff * new_frag,struct sk_buff ** last_frag)5604 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5605 struct sk_buff **last_frag)
5606 {
5607 /* skb->len reflects data in skb as well as all fragments
5608 * skb->data_len reflects only data in fragments
5609 */
5610 if (!skb_has_frag_list(skb))
5611 skb_shinfo(skb)->frag_list = new_frag;
5612
5613 new_frag->next = NULL;
5614
5615 (*last_frag)->next = new_frag;
5616 *last_frag = new_frag;
5617
5618 skb->len += new_frag->len;
5619 skb->data_len += new_frag->len;
5620 skb->truesize += new_frag->truesize;
5621 }
5622
l2cap_reassemble_sdu(struct l2cap_chan * chan,struct sk_buff * skb,struct l2cap_ctrl * control)5623 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5624 struct l2cap_ctrl *control)
5625 {
5626 int err = -EINVAL;
5627
5628 switch (control->sar) {
5629 case L2CAP_SAR_UNSEGMENTED:
5630 if (chan->sdu)
5631 break;
5632
5633 err = chan->ops->recv(chan, skb);
5634 break;
5635
5636 case L2CAP_SAR_START:
5637 if (chan->sdu)
5638 break;
5639
5640 if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
5641 break;
5642
5643 chan->sdu_len = get_unaligned_le16(skb->data);
5644 skb_pull(skb, L2CAP_SDULEN_SIZE);
5645
5646 if (chan->sdu_len > chan->imtu) {
5647 err = -EMSGSIZE;
5648 break;
5649 }
5650
5651 if (skb->len >= chan->sdu_len)
5652 break;
5653
5654 chan->sdu = skb;
5655 chan->sdu_last_frag = skb;
5656
5657 skb = NULL;
5658 err = 0;
5659 break;
5660
5661 case L2CAP_SAR_CONTINUE:
5662 if (!chan->sdu)
5663 break;
5664
5665 append_skb_frag(chan->sdu, skb,
5666 &chan->sdu_last_frag);
5667 skb = NULL;
5668
5669 if (chan->sdu->len >= chan->sdu_len)
5670 break;
5671
5672 err = 0;
5673 break;
5674
5675 case L2CAP_SAR_END:
5676 if (!chan->sdu)
5677 break;
5678
5679 append_skb_frag(chan->sdu, skb,
5680 &chan->sdu_last_frag);
5681 skb = NULL;
5682
5683 if (chan->sdu->len != chan->sdu_len)
5684 break;
5685
5686 err = chan->ops->recv(chan, chan->sdu);
5687
5688 if (!err) {
5689 /* Reassembly complete */
5690 chan->sdu = NULL;
5691 chan->sdu_last_frag = NULL;
5692 chan->sdu_len = 0;
5693 }
5694 break;
5695 }
5696
5697 if (err) {
5698 kfree_skb(skb);
5699 kfree_skb(chan->sdu);
5700 chan->sdu = NULL;
5701 chan->sdu_last_frag = NULL;
5702 chan->sdu_len = 0;
5703 }
5704
5705 return err;
5706 }
5707
l2cap_resegment(struct l2cap_chan * chan)5708 static int l2cap_resegment(struct l2cap_chan *chan)
5709 {
5710 /* Placeholder */
5711 return 0;
5712 }
5713
l2cap_chan_busy(struct l2cap_chan * chan,int busy)5714 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5715 {
5716 u8 event;
5717
5718 if (chan->mode != L2CAP_MODE_ERTM)
5719 return;
5720
5721 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5722 l2cap_tx(chan, NULL, NULL, event);
5723 }
5724
l2cap_rx_queued_iframes(struct l2cap_chan * chan)5725 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5726 {
5727 int err = 0;
5728 /* Pass sequential frames to l2cap_reassemble_sdu()
5729 * until a gap is encountered.
5730 */
5731
5732 BT_DBG("chan %p", chan);
5733
5734 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5735 struct sk_buff *skb;
5736 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5737 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5738
5739 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5740
5741 if (!skb)
5742 break;
5743
5744 skb_unlink(skb, &chan->srej_q);
5745 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5746 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
5747 if (err)
5748 break;
5749 }
5750
5751 if (skb_queue_empty(&chan->srej_q)) {
5752 chan->rx_state = L2CAP_RX_STATE_RECV;
5753 l2cap_send_ack(chan);
5754 }
5755
5756 return err;
5757 }
5758
l2cap_handle_srej(struct l2cap_chan * chan,struct l2cap_ctrl * control)5759 static void l2cap_handle_srej(struct l2cap_chan *chan,
5760 struct l2cap_ctrl *control)
5761 {
5762 struct sk_buff *skb;
5763
5764 BT_DBG("chan %p, control %p", chan, control);
5765
5766 if (control->reqseq == chan->next_tx_seq) {
5767 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5768 l2cap_send_disconn_req(chan, ECONNRESET);
5769 return;
5770 }
5771
5772 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5773
5774 if (skb == NULL) {
5775 BT_DBG("Seq %d not available for retransmission",
5776 control->reqseq);
5777 return;
5778 }
5779
5780 if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
5781 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5782 l2cap_send_disconn_req(chan, ECONNRESET);
5783 return;
5784 }
5785
5786 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5787
5788 if (control->poll) {
5789 l2cap_pass_to_tx(chan, control);
5790
5791 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5792 l2cap_retransmit(chan, control);
5793 l2cap_ertm_send(chan);
5794
5795 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5796 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5797 chan->srej_save_reqseq = control->reqseq;
5798 }
5799 } else {
5800 l2cap_pass_to_tx_fbit(chan, control);
5801
5802 if (control->final) {
5803 if (chan->srej_save_reqseq != control->reqseq ||
5804 !test_and_clear_bit(CONN_SREJ_ACT,
5805 &chan->conn_state))
5806 l2cap_retransmit(chan, control);
5807 } else {
5808 l2cap_retransmit(chan, control);
5809 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5810 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5811 chan->srej_save_reqseq = control->reqseq;
5812 }
5813 }
5814 }
5815 }
5816
l2cap_handle_rej(struct l2cap_chan * chan,struct l2cap_ctrl * control)5817 static void l2cap_handle_rej(struct l2cap_chan *chan,
5818 struct l2cap_ctrl *control)
5819 {
5820 struct sk_buff *skb;
5821
5822 BT_DBG("chan %p, control %p", chan, control);
5823
5824 if (control->reqseq == chan->next_tx_seq) {
5825 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5826 l2cap_send_disconn_req(chan, ECONNRESET);
5827 return;
5828 }
5829
5830 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5831
5832 if (chan->max_tx && skb &&
5833 bt_cb(skb)->l2cap.retries >= chan->max_tx) {
5834 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5835 l2cap_send_disconn_req(chan, ECONNRESET);
5836 return;
5837 }
5838
5839 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5840
5841 l2cap_pass_to_tx(chan, control);
5842
5843 if (control->final) {
5844 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5845 l2cap_retransmit_all(chan, control);
5846 } else {
5847 l2cap_retransmit_all(chan, control);
5848 l2cap_ertm_send(chan);
5849 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5850 set_bit(CONN_REJ_ACT, &chan->conn_state);
5851 }
5852 }
5853
l2cap_classify_txseq(struct l2cap_chan * chan,u16 txseq)5854 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5855 {
5856 BT_DBG("chan %p, txseq %d", chan, txseq);
5857
5858 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5859 chan->expected_tx_seq);
5860
5861 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5862 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5863 chan->tx_win) {
5864 /* See notes below regarding "double poll" and
5865 * invalid packets.
5866 */
5867 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5868 BT_DBG("Invalid/Ignore - after SREJ");
5869 return L2CAP_TXSEQ_INVALID_IGNORE;
5870 } else {
5871 BT_DBG("Invalid - in window after SREJ sent");
5872 return L2CAP_TXSEQ_INVALID;
5873 }
5874 }
5875
5876 if (chan->srej_list.head == txseq) {
5877 BT_DBG("Expected SREJ");
5878 return L2CAP_TXSEQ_EXPECTED_SREJ;
5879 }
5880
5881 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
5882 BT_DBG("Duplicate SREJ - txseq already stored");
5883 return L2CAP_TXSEQ_DUPLICATE_SREJ;
5884 }
5885
5886 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
5887 BT_DBG("Unexpected SREJ - not requested");
5888 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
5889 }
5890 }
5891
5892 if (chan->expected_tx_seq == txseq) {
5893 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5894 chan->tx_win) {
5895 BT_DBG("Invalid - txseq outside tx window");
5896 return L2CAP_TXSEQ_INVALID;
5897 } else {
5898 BT_DBG("Expected");
5899 return L2CAP_TXSEQ_EXPECTED;
5900 }
5901 }
5902
5903 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
5904 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
5905 BT_DBG("Duplicate - expected_tx_seq later than txseq");
5906 return L2CAP_TXSEQ_DUPLICATE;
5907 }
5908
5909 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
5910 /* A source of invalid packets is a "double poll" condition,
5911 * where delays cause us to send multiple poll packets. If
5912 * the remote stack receives and processes both polls,
5913 * sequence numbers can wrap around in such a way that a
5914 * resent frame has a sequence number that looks like new data
5915 * with a sequence gap. This would trigger an erroneous SREJ
5916 * request.
5917 *
5918 * Fortunately, this is impossible with a tx window that's
5919 * less than half of the maximum sequence number, which allows
5920 * invalid frames to be safely ignored.
5921 *
5922 * With tx window sizes greater than half of the tx window
5923 * maximum, the frame is invalid and cannot be ignored. This
5924 * causes a disconnect.
5925 */
5926
5927 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5928 BT_DBG("Invalid/Ignore - txseq outside tx window");
5929 return L2CAP_TXSEQ_INVALID_IGNORE;
5930 } else {
5931 BT_DBG("Invalid - txseq outside tx window");
5932 return L2CAP_TXSEQ_INVALID;
5933 }
5934 } else {
5935 BT_DBG("Unexpected - txseq indicates missing frames");
5936 return L2CAP_TXSEQ_UNEXPECTED;
5937 }
5938 }
5939
l2cap_rx_state_recv(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)5940 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
5941 struct l2cap_ctrl *control,
5942 struct sk_buff *skb, u8 event)
5943 {
5944 struct l2cap_ctrl local_control;
5945 int err = 0;
5946 bool skb_in_use = false;
5947
5948 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5949 event);
5950
5951 switch (event) {
5952 case L2CAP_EV_RECV_IFRAME:
5953 switch (l2cap_classify_txseq(chan, control->txseq)) {
5954 case L2CAP_TXSEQ_EXPECTED:
5955 l2cap_pass_to_tx(chan, control);
5956
5957 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5958 BT_DBG("Busy, discarding expected seq %d",
5959 control->txseq);
5960 break;
5961 }
5962
5963 chan->expected_tx_seq = __next_seq(chan,
5964 control->txseq);
5965
5966 chan->buffer_seq = chan->expected_tx_seq;
5967 skb_in_use = true;
5968
5969 /* l2cap_reassemble_sdu may free skb, hence invalidate
5970 * control, so make a copy in advance to use it after
5971 * l2cap_reassemble_sdu returns and to avoid the race
5972 * condition, for example:
5973 *
5974 * The current thread calls:
5975 * l2cap_reassemble_sdu
5976 * chan->ops->recv == l2cap_sock_recv_cb
5977 * __sock_queue_rcv_skb
5978 * Another thread calls:
5979 * bt_sock_recvmsg
5980 * skb_recv_datagram
5981 * skb_free_datagram
5982 * Then the current thread tries to access control, but
5983 * it was freed by skb_free_datagram.
5984 */
5985 local_control = *control;
5986 err = l2cap_reassemble_sdu(chan, skb, control);
5987 if (err)
5988 break;
5989
5990 if (local_control.final) {
5991 if (!test_and_clear_bit(CONN_REJ_ACT,
5992 &chan->conn_state)) {
5993 local_control.final = 0;
5994 l2cap_retransmit_all(chan, &local_control);
5995 l2cap_ertm_send(chan);
5996 }
5997 }
5998
5999 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6000 l2cap_send_ack(chan);
6001 break;
6002 case L2CAP_TXSEQ_UNEXPECTED:
6003 l2cap_pass_to_tx(chan, control);
6004
6005 /* Can't issue SREJ frames in the local busy state.
6006 * Drop this frame, it will be seen as missing
6007 * when local busy is exited.
6008 */
6009 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6010 BT_DBG("Busy, discarding unexpected seq %d",
6011 control->txseq);
6012 break;
6013 }
6014
6015 /* There was a gap in the sequence, so an SREJ
6016 * must be sent for each missing frame. The
6017 * current frame is stored for later use.
6018 */
6019 skb_queue_tail(&chan->srej_q, skb);
6020 skb_in_use = true;
6021 BT_DBG("Queued %p (queue len %d)", skb,
6022 skb_queue_len(&chan->srej_q));
6023
6024 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6025 l2cap_seq_list_clear(&chan->srej_list);
6026 l2cap_send_srej(chan, control->txseq);
6027
6028 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6029 break;
6030 case L2CAP_TXSEQ_DUPLICATE:
6031 l2cap_pass_to_tx(chan, control);
6032 break;
6033 case L2CAP_TXSEQ_INVALID_IGNORE:
6034 break;
6035 case L2CAP_TXSEQ_INVALID:
6036 default:
6037 l2cap_send_disconn_req(chan, ECONNRESET);
6038 break;
6039 }
6040 break;
6041 case L2CAP_EV_RECV_RR:
6042 l2cap_pass_to_tx(chan, control);
6043 if (control->final) {
6044 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6045
6046 if (!test_and_clear_bit(CONN_REJ_ACT,
6047 &chan->conn_state)) {
6048 control->final = 0;
6049 l2cap_retransmit_all(chan, control);
6050 }
6051
6052 l2cap_ertm_send(chan);
6053 } else if (control->poll) {
6054 l2cap_send_i_or_rr_or_rnr(chan);
6055 } else {
6056 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6057 &chan->conn_state) &&
6058 chan->unacked_frames)
6059 __set_retrans_timer(chan);
6060
6061 l2cap_ertm_send(chan);
6062 }
6063 break;
6064 case L2CAP_EV_RECV_RNR:
6065 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6066 l2cap_pass_to_tx(chan, control);
6067 if (control && control->poll) {
6068 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6069 l2cap_send_rr_or_rnr(chan, 0);
6070 }
6071 __clear_retrans_timer(chan);
6072 l2cap_seq_list_clear(&chan->retrans_list);
6073 break;
6074 case L2CAP_EV_RECV_REJ:
6075 l2cap_handle_rej(chan, control);
6076 break;
6077 case L2CAP_EV_RECV_SREJ:
6078 l2cap_handle_srej(chan, control);
6079 break;
6080 default:
6081 break;
6082 }
6083
6084 if (skb && !skb_in_use) {
6085 BT_DBG("Freeing %p", skb);
6086 kfree_skb(skb);
6087 }
6088
6089 return err;
6090 }
6091
l2cap_rx_state_srej_sent(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6092 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6093 struct l2cap_ctrl *control,
6094 struct sk_buff *skb, u8 event)
6095 {
6096 int err = 0;
6097 u16 txseq = control->txseq;
6098 bool skb_in_use = false;
6099
6100 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6101 event);
6102
6103 switch (event) {
6104 case L2CAP_EV_RECV_IFRAME:
6105 switch (l2cap_classify_txseq(chan, txseq)) {
6106 case L2CAP_TXSEQ_EXPECTED:
6107 /* Keep frame for reassembly later */
6108 l2cap_pass_to_tx(chan, control);
6109 skb_queue_tail(&chan->srej_q, skb);
6110 skb_in_use = true;
6111 BT_DBG("Queued %p (queue len %d)", skb,
6112 skb_queue_len(&chan->srej_q));
6113
6114 chan->expected_tx_seq = __next_seq(chan, txseq);
6115 break;
6116 case L2CAP_TXSEQ_EXPECTED_SREJ:
6117 l2cap_seq_list_pop(&chan->srej_list);
6118
6119 l2cap_pass_to_tx(chan, control);
6120 skb_queue_tail(&chan->srej_q, skb);
6121 skb_in_use = true;
6122 BT_DBG("Queued %p (queue len %d)", skb,
6123 skb_queue_len(&chan->srej_q));
6124
6125 err = l2cap_rx_queued_iframes(chan);
6126 if (err)
6127 break;
6128
6129 break;
6130 case L2CAP_TXSEQ_UNEXPECTED:
6131 /* Got a frame that can't be reassembled yet.
6132 * Save it for later, and send SREJs to cover
6133 * the missing frames.
6134 */
6135 skb_queue_tail(&chan->srej_q, skb);
6136 skb_in_use = true;
6137 BT_DBG("Queued %p (queue len %d)", skb,
6138 skb_queue_len(&chan->srej_q));
6139
6140 l2cap_pass_to_tx(chan, control);
6141 l2cap_send_srej(chan, control->txseq);
6142 break;
6143 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6144 /* This frame was requested with an SREJ, but
6145 * some expected retransmitted frames are
6146 * missing. Request retransmission of missing
6147 * SREJ'd frames.
6148 */
6149 skb_queue_tail(&chan->srej_q, skb);
6150 skb_in_use = true;
6151 BT_DBG("Queued %p (queue len %d)", skb,
6152 skb_queue_len(&chan->srej_q));
6153
6154 l2cap_pass_to_tx(chan, control);
6155 l2cap_send_srej_list(chan, control->txseq);
6156 break;
6157 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6158 /* We've already queued this frame. Drop this copy. */
6159 l2cap_pass_to_tx(chan, control);
6160 break;
6161 case L2CAP_TXSEQ_DUPLICATE:
6162 /* Expecting a later sequence number, so this frame
6163 * was already received. Ignore it completely.
6164 */
6165 break;
6166 case L2CAP_TXSEQ_INVALID_IGNORE:
6167 break;
6168 case L2CAP_TXSEQ_INVALID:
6169 default:
6170 l2cap_send_disconn_req(chan, ECONNRESET);
6171 break;
6172 }
6173 break;
6174 case L2CAP_EV_RECV_RR:
6175 l2cap_pass_to_tx(chan, control);
6176 if (control->final) {
6177 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6178
6179 if (!test_and_clear_bit(CONN_REJ_ACT,
6180 &chan->conn_state)) {
6181 control->final = 0;
6182 l2cap_retransmit_all(chan, control);
6183 }
6184
6185 l2cap_ertm_send(chan);
6186 } else if (control->poll) {
6187 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6188 &chan->conn_state) &&
6189 chan->unacked_frames) {
6190 __set_retrans_timer(chan);
6191 }
6192
6193 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6194 l2cap_send_srej_tail(chan);
6195 } else {
6196 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6197 &chan->conn_state) &&
6198 chan->unacked_frames)
6199 __set_retrans_timer(chan);
6200
6201 l2cap_send_ack(chan);
6202 }
6203 break;
6204 case L2CAP_EV_RECV_RNR:
6205 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6206 l2cap_pass_to_tx(chan, control);
6207 if (control->poll) {
6208 l2cap_send_srej_tail(chan);
6209 } else {
6210 struct l2cap_ctrl rr_control;
6211 memset(&rr_control, 0, sizeof(rr_control));
6212 rr_control.sframe = 1;
6213 rr_control.super = L2CAP_SUPER_RR;
6214 rr_control.reqseq = chan->buffer_seq;
6215 l2cap_send_sframe(chan, &rr_control);
6216 }
6217
6218 break;
6219 case L2CAP_EV_RECV_REJ:
6220 l2cap_handle_rej(chan, control);
6221 break;
6222 case L2CAP_EV_RECV_SREJ:
6223 l2cap_handle_srej(chan, control);
6224 break;
6225 }
6226
6227 if (skb && !skb_in_use) {
6228 BT_DBG("Freeing %p", skb);
6229 kfree_skb(skb);
6230 }
6231
6232 return err;
6233 }
6234
l2cap_finish_move(struct l2cap_chan * chan)6235 static int l2cap_finish_move(struct l2cap_chan *chan)
6236 {
6237 BT_DBG("chan %p", chan);
6238
6239 chan->rx_state = L2CAP_RX_STATE_RECV;
6240 chan->conn->mtu = chan->conn->hcon->mtu;
6241
6242 return l2cap_resegment(chan);
6243 }
6244
l2cap_rx_state_wait_p(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6245 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6246 struct l2cap_ctrl *control,
6247 struct sk_buff *skb, u8 event)
6248 {
6249 int err;
6250
6251 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6252 event);
6253
6254 if (!control->poll)
6255 return -EPROTO;
6256
6257 l2cap_process_reqseq(chan, control->reqseq);
6258
6259 if (!skb_queue_empty(&chan->tx_q))
6260 chan->tx_send_head = skb_peek(&chan->tx_q);
6261 else
6262 chan->tx_send_head = NULL;
6263
6264 /* Rewind next_tx_seq to the point expected
6265 * by the receiver.
6266 */
6267 chan->next_tx_seq = control->reqseq;
6268 chan->unacked_frames = 0;
6269
6270 err = l2cap_finish_move(chan);
6271 if (err)
6272 return err;
6273
6274 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6275 l2cap_send_i_or_rr_or_rnr(chan);
6276
6277 if (event == L2CAP_EV_RECV_IFRAME)
6278 return -EPROTO;
6279
6280 return l2cap_rx_state_recv(chan, control, NULL, event);
6281 }
6282
l2cap_rx_state_wait_f(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6283 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6284 struct l2cap_ctrl *control,
6285 struct sk_buff *skb, u8 event)
6286 {
6287 int err;
6288
6289 if (!control->final)
6290 return -EPROTO;
6291
6292 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6293
6294 chan->rx_state = L2CAP_RX_STATE_RECV;
6295 l2cap_process_reqseq(chan, control->reqseq);
6296
6297 if (!skb_queue_empty(&chan->tx_q))
6298 chan->tx_send_head = skb_peek(&chan->tx_q);
6299 else
6300 chan->tx_send_head = NULL;
6301
6302 /* Rewind next_tx_seq to the point expected
6303 * by the receiver.
6304 */
6305 chan->next_tx_seq = control->reqseq;
6306 chan->unacked_frames = 0;
6307 chan->conn->mtu = chan->conn->hcon->mtu;
6308
6309 err = l2cap_resegment(chan);
6310
6311 if (!err)
6312 err = l2cap_rx_state_recv(chan, control, skb, event);
6313
6314 return err;
6315 }
6316
__valid_reqseq(struct l2cap_chan * chan,u16 reqseq)6317 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6318 {
6319 /* Make sure reqseq is for a packet that has been sent but not acked */
6320 u16 unacked;
6321
6322 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6323 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6324 }
6325
l2cap_rx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6326 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6327 struct sk_buff *skb, u8 event)
6328 {
6329 int err = 0;
6330
6331 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6332 control, skb, event, chan->rx_state);
6333
6334 if (__valid_reqseq(chan, control->reqseq)) {
6335 switch (chan->rx_state) {
6336 case L2CAP_RX_STATE_RECV:
6337 err = l2cap_rx_state_recv(chan, control, skb, event);
6338 break;
6339 case L2CAP_RX_STATE_SREJ_SENT:
6340 err = l2cap_rx_state_srej_sent(chan, control, skb,
6341 event);
6342 break;
6343 case L2CAP_RX_STATE_WAIT_P:
6344 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6345 break;
6346 case L2CAP_RX_STATE_WAIT_F:
6347 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6348 break;
6349 default:
6350 /* shut it down */
6351 break;
6352 }
6353 } else {
6354 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6355 control->reqseq, chan->next_tx_seq,
6356 chan->expected_ack_seq);
6357 l2cap_send_disconn_req(chan, ECONNRESET);
6358 }
6359
6360 return err;
6361 }
6362
l2cap_stream_rx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb)6363 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6364 struct sk_buff *skb)
6365 {
6366 /* l2cap_reassemble_sdu may free skb, hence invalidate control, so store
6367 * the txseq field in advance to use it after l2cap_reassemble_sdu
6368 * returns and to avoid the race condition, for example:
6369 *
6370 * The current thread calls:
6371 * l2cap_reassemble_sdu
6372 * chan->ops->recv == l2cap_sock_recv_cb
6373 * __sock_queue_rcv_skb
6374 * Another thread calls:
6375 * bt_sock_recvmsg
6376 * skb_recv_datagram
6377 * skb_free_datagram
6378 * Then the current thread tries to access control, but it was freed by
6379 * skb_free_datagram.
6380 */
6381 u16 txseq = control->txseq;
6382
6383 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6384 chan->rx_state);
6385
6386 if (l2cap_classify_txseq(chan, txseq) == L2CAP_TXSEQ_EXPECTED) {
6387 l2cap_pass_to_tx(chan, control);
6388
6389 BT_DBG("buffer_seq %u->%u", chan->buffer_seq,
6390 __next_seq(chan, chan->buffer_seq));
6391
6392 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6393
6394 l2cap_reassemble_sdu(chan, skb, control);
6395 } else {
6396 if (chan->sdu) {
6397 kfree_skb(chan->sdu);
6398 chan->sdu = NULL;
6399 }
6400 chan->sdu_last_frag = NULL;
6401 chan->sdu_len = 0;
6402
6403 if (skb) {
6404 BT_DBG("Freeing %p", skb);
6405 kfree_skb(skb);
6406 }
6407 }
6408
6409 chan->last_acked_seq = txseq;
6410 chan->expected_tx_seq = __next_seq(chan, txseq);
6411
6412 return 0;
6413 }
6414
l2cap_data_rcv(struct l2cap_chan * chan,struct sk_buff * skb)6415 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6416 {
6417 struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
6418 u16 len;
6419 u8 event;
6420
6421 __unpack_control(chan, skb);
6422
6423 len = skb->len;
6424
6425 /*
6426 * We can just drop the corrupted I-frame here.
6427 * Receiver will miss it and start proper recovery
6428 * procedures and ask for retransmission.
6429 */
6430 if (l2cap_check_fcs(chan, skb))
6431 goto drop;
6432
6433 if (!control->sframe && control->sar == L2CAP_SAR_START)
6434 len -= L2CAP_SDULEN_SIZE;
6435
6436 if (chan->fcs == L2CAP_FCS_CRC16)
6437 len -= L2CAP_FCS_SIZE;
6438
6439 if (len > chan->mps) {
6440 l2cap_send_disconn_req(chan, ECONNRESET);
6441 goto drop;
6442 }
6443
6444 if (chan->ops->filter) {
6445 if (chan->ops->filter(chan, skb))
6446 goto drop;
6447 }
6448
6449 if (!control->sframe) {
6450 int err;
6451
6452 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6453 control->sar, control->reqseq, control->final,
6454 control->txseq);
6455
6456 /* Validate F-bit - F=0 always valid, F=1 only
6457 * valid in TX WAIT_F
6458 */
6459 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6460 goto drop;
6461
6462 if (chan->mode != L2CAP_MODE_STREAMING) {
6463 event = L2CAP_EV_RECV_IFRAME;
6464 err = l2cap_rx(chan, control, skb, event);
6465 } else {
6466 err = l2cap_stream_rx(chan, control, skb);
6467 }
6468
6469 if (err)
6470 l2cap_send_disconn_req(chan, ECONNRESET);
6471 } else {
6472 const u8 rx_func_to_event[4] = {
6473 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6474 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6475 };
6476
6477 /* Only I-frames are expected in streaming mode */
6478 if (chan->mode == L2CAP_MODE_STREAMING)
6479 goto drop;
6480
6481 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6482 control->reqseq, control->final, control->poll,
6483 control->super);
6484
6485 if (len != 0) {
6486 BT_ERR("Trailing bytes: %d in sframe", len);
6487 l2cap_send_disconn_req(chan, ECONNRESET);
6488 goto drop;
6489 }
6490
6491 /* Validate F and P bits */
6492 if (control->final && (control->poll ||
6493 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6494 goto drop;
6495
6496 event = rx_func_to_event[control->super];
6497 if (l2cap_rx(chan, control, skb, event))
6498 l2cap_send_disconn_req(chan, ECONNRESET);
6499 }
6500
6501 return 0;
6502
6503 drop:
6504 kfree_skb(skb);
6505 return 0;
6506 }
6507
l2cap_chan_le_send_credits(struct l2cap_chan * chan)6508 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6509 {
6510 struct l2cap_conn *conn = chan->conn;
6511 struct l2cap_le_credits pkt;
6512 u16 return_credits = l2cap_le_rx_credits(chan);
6513
6514 if (chan->rx_credits >= return_credits)
6515 return;
6516
6517 return_credits -= chan->rx_credits;
6518
6519 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6520
6521 chan->rx_credits += return_credits;
6522
6523 pkt.cid = cpu_to_le16(chan->scid);
6524 pkt.credits = cpu_to_le16(return_credits);
6525
6526 chan->ident = l2cap_get_ident(conn);
6527
6528 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6529 }
6530
l2cap_chan_rx_avail(struct l2cap_chan * chan,ssize_t rx_avail)6531 void l2cap_chan_rx_avail(struct l2cap_chan *chan, ssize_t rx_avail)
6532 {
6533 if (chan->rx_avail == rx_avail)
6534 return;
6535
6536 BT_DBG("chan %p has %zd bytes avail for rx", chan, rx_avail);
6537
6538 chan->rx_avail = rx_avail;
6539
6540 if (chan->state == BT_CONNECTED)
6541 l2cap_chan_le_send_credits(chan);
6542 }
6543
l2cap_ecred_recv(struct l2cap_chan * chan,struct sk_buff * skb)6544 static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
6545 {
6546 int err;
6547
6548 BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
6549
6550 /* Wait recv to confirm reception before updating the credits */
6551 err = chan->ops->recv(chan, skb);
6552
6553 if (err < 0 && chan->rx_avail != -1) {
6554 BT_ERR("Queueing received LE L2CAP data failed");
6555 l2cap_send_disconn_req(chan, ECONNRESET);
6556 return err;
6557 }
6558
6559 /* Update credits whenever an SDU is received */
6560 l2cap_chan_le_send_credits(chan);
6561
6562 return err;
6563 }
6564
l2cap_ecred_data_rcv(struct l2cap_chan * chan,struct sk_buff * skb)6565 static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6566 {
6567 int err;
6568
6569 if (!chan->rx_credits) {
6570 BT_ERR("No credits to receive LE L2CAP data");
6571 l2cap_send_disconn_req(chan, ECONNRESET);
6572 return -ENOBUFS;
6573 }
6574
6575 if (chan->imtu < skb->len) {
6576 BT_ERR("Too big LE L2CAP PDU");
6577 return -ENOBUFS;
6578 }
6579
6580 chan->rx_credits--;
6581 BT_DBG("chan %p: rx_credits %u -> %u",
6582 chan, chan->rx_credits + 1, chan->rx_credits);
6583
6584 /* Update if remote had run out of credits, this should only happens
6585 * if the remote is not using the entire MPS.
6586 */
6587 if (!chan->rx_credits)
6588 l2cap_chan_le_send_credits(chan);
6589
6590 err = 0;
6591
6592 if (!chan->sdu) {
6593 u16 sdu_len;
6594
6595 sdu_len = get_unaligned_le16(skb->data);
6596 skb_pull(skb, L2CAP_SDULEN_SIZE);
6597
6598 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6599 sdu_len, skb->len, chan->imtu);
6600
6601 if (sdu_len > chan->imtu) {
6602 BT_ERR("Too big LE L2CAP SDU length received");
6603 err = -EMSGSIZE;
6604 goto failed;
6605 }
6606
6607 if (skb->len > sdu_len) {
6608 BT_ERR("Too much LE L2CAP data received");
6609 err = -EINVAL;
6610 goto failed;
6611 }
6612
6613 if (skb->len == sdu_len)
6614 return l2cap_ecred_recv(chan, skb);
6615
6616 chan->sdu = skb;
6617 chan->sdu_len = sdu_len;
6618 chan->sdu_last_frag = skb;
6619
6620 /* Detect if remote is not able to use the selected MPS */
6621 if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
6622 u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
6623
6624 /* Adjust the number of credits */
6625 BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
6626 chan->mps = mps_len;
6627 l2cap_chan_le_send_credits(chan);
6628 }
6629
6630 return 0;
6631 }
6632
6633 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6634 chan->sdu->len, skb->len, chan->sdu_len);
6635
6636 if (chan->sdu->len + skb->len > chan->sdu_len) {
6637 BT_ERR("Too much LE L2CAP data received");
6638 err = -EINVAL;
6639 goto failed;
6640 }
6641
6642 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6643 skb = NULL;
6644
6645 if (chan->sdu->len == chan->sdu_len) {
6646 err = l2cap_ecred_recv(chan, chan->sdu);
6647 if (!err) {
6648 chan->sdu = NULL;
6649 chan->sdu_last_frag = NULL;
6650 chan->sdu_len = 0;
6651 }
6652 }
6653
6654 failed:
6655 if (err) {
6656 kfree_skb(skb);
6657 kfree_skb(chan->sdu);
6658 chan->sdu = NULL;
6659 chan->sdu_last_frag = NULL;
6660 chan->sdu_len = 0;
6661 }
6662
6663 /* We can't return an error here since we took care of the skb
6664 * freeing internally. An error return would cause the caller to
6665 * do a double-free of the skb.
6666 */
6667 return 0;
6668 }
6669
l2cap_data_channel(struct l2cap_conn * conn,u16 cid,struct sk_buff * skb)6670 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6671 struct sk_buff *skb)
6672 {
6673 struct l2cap_chan *chan;
6674
6675 chan = l2cap_get_chan_by_scid(conn, cid);
6676 if (!chan) {
6677 BT_DBG("unknown cid 0x%4.4x", cid);
6678 /* Drop packet and return */
6679 kfree_skb(skb);
6680 return;
6681 }
6682
6683 BT_DBG("chan %p, len %d", chan, skb->len);
6684
6685 /* If we receive data on a fixed channel before the info req/rsp
6686 * procedure is done simply assume that the channel is supported
6687 * and mark it as ready.
6688 */
6689 if (chan->chan_type == L2CAP_CHAN_FIXED)
6690 l2cap_chan_ready(chan);
6691
6692 if (chan->state != BT_CONNECTED)
6693 goto drop;
6694
6695 switch (chan->mode) {
6696 case L2CAP_MODE_LE_FLOWCTL:
6697 case L2CAP_MODE_EXT_FLOWCTL:
6698 if (l2cap_ecred_data_rcv(chan, skb) < 0)
6699 goto drop;
6700
6701 goto done;
6702
6703 case L2CAP_MODE_BASIC:
6704 /* If socket recv buffers overflows we drop data here
6705 * which is *bad* because L2CAP has to be reliable.
6706 * But we don't have any other choice. L2CAP doesn't
6707 * provide flow control mechanism. */
6708
6709 if (chan->imtu < skb->len) {
6710 BT_ERR("Dropping L2CAP data: receive buffer overflow");
6711 goto drop;
6712 }
6713
6714 if (!chan->ops->recv(chan, skb))
6715 goto done;
6716 break;
6717
6718 case L2CAP_MODE_ERTM:
6719 case L2CAP_MODE_STREAMING:
6720 l2cap_data_rcv(chan, skb);
6721 goto done;
6722
6723 default:
6724 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6725 break;
6726 }
6727
6728 drop:
6729 kfree_skb(skb);
6730
6731 done:
6732 l2cap_chan_unlock(chan);
6733 l2cap_chan_put(chan);
6734 }
6735
l2cap_conless_channel(struct l2cap_conn * conn,__le16 psm,struct sk_buff * skb)6736 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6737 struct sk_buff *skb)
6738 {
6739 struct hci_conn *hcon = conn->hcon;
6740 struct l2cap_chan *chan;
6741
6742 if (hcon->type != ACL_LINK)
6743 goto free_skb;
6744
6745 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6746 ACL_LINK);
6747 if (!chan)
6748 goto free_skb;
6749
6750 BT_DBG("chan %p, len %d", chan, skb->len);
6751
6752 l2cap_chan_lock(chan);
6753
6754 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6755 goto drop;
6756
6757 if (chan->imtu < skb->len)
6758 goto drop;
6759
6760 /* Store remote BD_ADDR and PSM for msg_name */
6761 bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
6762 bt_cb(skb)->l2cap.psm = psm;
6763
6764 if (!chan->ops->recv(chan, skb)) {
6765 l2cap_chan_unlock(chan);
6766 l2cap_chan_put(chan);
6767 return;
6768 }
6769
6770 drop:
6771 l2cap_chan_unlock(chan);
6772 l2cap_chan_put(chan);
6773 free_skb:
6774 kfree_skb(skb);
6775 }
6776
l2cap_recv_frame(struct l2cap_conn * conn,struct sk_buff * skb)6777 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6778 {
6779 struct l2cap_hdr *lh = (void *) skb->data;
6780 struct hci_conn *hcon = conn->hcon;
6781 u16 cid, len;
6782 __le16 psm;
6783
6784 if (hcon->state != BT_CONNECTED) {
6785 BT_DBG("queueing pending rx skb");
6786 skb_queue_tail(&conn->pending_rx, skb);
6787 return;
6788 }
6789
6790 skb_pull(skb, L2CAP_HDR_SIZE);
6791 cid = __le16_to_cpu(lh->cid);
6792 len = __le16_to_cpu(lh->len);
6793
6794 if (len != skb->len) {
6795 kfree_skb(skb);
6796 return;
6797 }
6798
6799 /* Since we can't actively block incoming LE connections we must
6800 * at least ensure that we ignore incoming data from them.
6801 */
6802 if (hcon->type == LE_LINK &&
6803 hci_bdaddr_list_lookup(&hcon->hdev->reject_list, &hcon->dst,
6804 bdaddr_dst_type(hcon))) {
6805 kfree_skb(skb);
6806 return;
6807 }
6808
6809 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6810
6811 switch (cid) {
6812 case L2CAP_CID_SIGNALING:
6813 l2cap_sig_channel(conn, skb);
6814 break;
6815
6816 case L2CAP_CID_CONN_LESS:
6817 psm = get_unaligned((__le16 *) skb->data);
6818 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6819 l2cap_conless_channel(conn, psm, skb);
6820 break;
6821
6822 case L2CAP_CID_LE_SIGNALING:
6823 l2cap_le_sig_channel(conn, skb);
6824 break;
6825
6826 default:
6827 l2cap_data_channel(conn, cid, skb);
6828 break;
6829 }
6830 }
6831
process_pending_rx(struct work_struct * work)6832 static void process_pending_rx(struct work_struct *work)
6833 {
6834 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
6835 pending_rx_work);
6836 struct sk_buff *skb;
6837
6838 BT_DBG("");
6839
6840 mutex_lock(&conn->lock);
6841
6842 while ((skb = skb_dequeue(&conn->pending_rx)))
6843 l2cap_recv_frame(conn, skb);
6844
6845 mutex_unlock(&conn->lock);
6846 }
6847
l2cap_conn_add(struct hci_conn * hcon)6848 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
6849 {
6850 struct l2cap_conn *conn = hcon->l2cap_data;
6851 struct hci_chan *hchan;
6852
6853 if (conn)
6854 return conn;
6855
6856 hchan = hci_chan_create(hcon);
6857 if (!hchan)
6858 return NULL;
6859
6860 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
6861 if (!conn) {
6862 hci_chan_del(hchan);
6863 return NULL;
6864 }
6865
6866 kref_init(&conn->ref);
6867 hcon->l2cap_data = conn;
6868 conn->hcon = hci_conn_get(hcon);
6869 conn->hchan = hchan;
6870
6871 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
6872
6873 conn->mtu = hcon->mtu;
6874 conn->feat_mask = 0;
6875
6876 conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
6877
6878 if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
6879 (bredr_sc_enabled(hcon->hdev) ||
6880 hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
6881 conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
6882
6883 mutex_init(&conn->ident_lock);
6884 mutex_init(&conn->lock);
6885
6886 INIT_LIST_HEAD(&conn->chan_l);
6887 INIT_LIST_HEAD(&conn->users);
6888
6889 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
6890
6891 skb_queue_head_init(&conn->pending_rx);
6892 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
6893 INIT_DELAYED_WORK(&conn->id_addr_timer, l2cap_conn_update_id_addr);
6894
6895 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
6896
6897 return conn;
6898 }
6899
is_valid_psm(u16 psm,u8 dst_type)6900 static bool is_valid_psm(u16 psm, u8 dst_type)
6901 {
6902 if (!psm)
6903 return false;
6904
6905 if (bdaddr_type_is_le(dst_type))
6906 return (psm <= 0x00ff);
6907
6908 /* PSM must be odd and lsb of upper byte must be 0 */
6909 return ((psm & 0x0101) == 0x0001);
6910 }
6911
6912 struct l2cap_chan_data {
6913 struct l2cap_chan *chan;
6914 struct pid *pid;
6915 int count;
6916 };
6917
l2cap_chan_by_pid(struct l2cap_chan * chan,void * data)6918 static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data)
6919 {
6920 struct l2cap_chan_data *d = data;
6921 struct pid *pid;
6922
6923 if (chan == d->chan)
6924 return;
6925
6926 if (!test_bit(FLAG_DEFER_SETUP, &chan->flags))
6927 return;
6928
6929 pid = chan->ops->get_peer_pid(chan);
6930
6931 /* Only count deferred channels with the same PID/PSM */
6932 if (d->pid != pid || chan->psm != d->chan->psm || chan->ident ||
6933 chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
6934 return;
6935
6936 d->count++;
6937 }
6938
l2cap_chan_connect(struct l2cap_chan * chan,__le16 psm,u16 cid,bdaddr_t * dst,u8 dst_type,u16 timeout)6939 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
6940 bdaddr_t *dst, u8 dst_type, u16 timeout)
6941 {
6942 struct l2cap_conn *conn;
6943 struct hci_conn *hcon;
6944 struct hci_dev *hdev;
6945 int err;
6946
6947 BT_DBG("%pMR -> %pMR (type %u) psm 0x%4.4x mode 0x%2.2x", &chan->src,
6948 dst, dst_type, __le16_to_cpu(psm), chan->mode);
6949
6950 hdev = hci_get_route(dst, &chan->src, chan->src_type);
6951 if (!hdev)
6952 return -EHOSTUNREACH;
6953
6954 hci_dev_lock(hdev);
6955
6956 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
6957 chan->chan_type != L2CAP_CHAN_RAW) {
6958 err = -EINVAL;
6959 goto done;
6960 }
6961
6962 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
6963 err = -EINVAL;
6964 goto done;
6965 }
6966
6967 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
6968 err = -EINVAL;
6969 goto done;
6970 }
6971
6972 switch (chan->mode) {
6973 case L2CAP_MODE_BASIC:
6974 break;
6975 case L2CAP_MODE_LE_FLOWCTL:
6976 break;
6977 case L2CAP_MODE_EXT_FLOWCTL:
6978 if (!enable_ecred) {
6979 err = -EOPNOTSUPP;
6980 goto done;
6981 }
6982 break;
6983 case L2CAP_MODE_ERTM:
6984 case L2CAP_MODE_STREAMING:
6985 if (!disable_ertm)
6986 break;
6987 fallthrough;
6988 default:
6989 err = -EOPNOTSUPP;
6990 goto done;
6991 }
6992
6993 switch (chan->state) {
6994 case BT_CONNECT:
6995 case BT_CONNECT2:
6996 case BT_CONFIG:
6997 /* Already connecting */
6998 err = 0;
6999 goto done;
7000
7001 case BT_CONNECTED:
7002 /* Already connected */
7003 err = -EISCONN;
7004 goto done;
7005
7006 case BT_OPEN:
7007 case BT_BOUND:
7008 /* Can connect */
7009 break;
7010
7011 default:
7012 err = -EBADFD;
7013 goto done;
7014 }
7015
7016 /* Set destination address and psm */
7017 bacpy(&chan->dst, dst);
7018 chan->dst_type = dst_type;
7019
7020 chan->psm = psm;
7021 chan->dcid = cid;
7022
7023 if (bdaddr_type_is_le(dst_type)) {
7024 /* Convert from L2CAP channel address type to HCI address type
7025 */
7026 if (dst_type == BDADDR_LE_PUBLIC)
7027 dst_type = ADDR_LE_DEV_PUBLIC;
7028 else
7029 dst_type = ADDR_LE_DEV_RANDOM;
7030
7031 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7032 hcon = hci_connect_le(hdev, dst, dst_type, false,
7033 chan->sec_level, timeout,
7034 HCI_ROLE_SLAVE, 0, 0);
7035 else
7036 hcon = hci_connect_le_scan(hdev, dst, dst_type,
7037 chan->sec_level, timeout,
7038 CONN_REASON_L2CAP_CHAN);
7039
7040 } else {
7041 u8 auth_type = l2cap_get_auth_type(chan);
7042 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type,
7043 CONN_REASON_L2CAP_CHAN, timeout);
7044 }
7045
7046 if (IS_ERR(hcon)) {
7047 err = PTR_ERR(hcon);
7048 goto done;
7049 }
7050
7051 conn = l2cap_conn_add(hcon);
7052 if (!conn) {
7053 hci_conn_drop(hcon);
7054 err = -ENOMEM;
7055 goto done;
7056 }
7057
7058 if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) {
7059 struct l2cap_chan_data data;
7060
7061 data.chan = chan;
7062 data.pid = chan->ops->get_peer_pid(chan);
7063 data.count = 1;
7064
7065 l2cap_chan_list(conn, l2cap_chan_by_pid, &data);
7066
7067 /* Check if there isn't too many channels being connected */
7068 if (data.count > L2CAP_ECRED_CONN_SCID_MAX) {
7069 hci_conn_drop(hcon);
7070 err = -EPROTO;
7071 goto done;
7072 }
7073 }
7074
7075 mutex_lock(&conn->lock);
7076 l2cap_chan_lock(chan);
7077
7078 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7079 hci_conn_drop(hcon);
7080 err = -EBUSY;
7081 goto chan_unlock;
7082 }
7083
7084 /* Update source addr of the socket */
7085 bacpy(&chan->src, &hcon->src);
7086 chan->src_type = bdaddr_src_type(hcon);
7087
7088 __l2cap_chan_add(conn, chan);
7089
7090 /* l2cap_chan_add takes its own ref so we can drop this one */
7091 hci_conn_drop(hcon);
7092
7093 l2cap_state_change(chan, BT_CONNECT);
7094 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7095
7096 /* Release chan->sport so that it can be reused by other
7097 * sockets (as it's only used for listening sockets).
7098 */
7099 write_lock(&chan_list_lock);
7100 chan->sport = 0;
7101 write_unlock(&chan_list_lock);
7102
7103 if (hcon->state == BT_CONNECTED) {
7104 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7105 __clear_chan_timer(chan);
7106 if (l2cap_chan_check_security(chan, true))
7107 l2cap_state_change(chan, BT_CONNECTED);
7108 } else
7109 l2cap_do_start(chan);
7110 }
7111
7112 err = 0;
7113
7114 chan_unlock:
7115 l2cap_chan_unlock(chan);
7116 mutex_unlock(&conn->lock);
7117 done:
7118 hci_dev_unlock(hdev);
7119 hci_dev_put(hdev);
7120 return err;
7121 }
7122 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7123
l2cap_ecred_reconfigure(struct l2cap_chan * chan)7124 static void l2cap_ecred_reconfigure(struct l2cap_chan *chan)
7125 {
7126 struct l2cap_conn *conn = chan->conn;
7127 DEFINE_RAW_FLEX(struct l2cap_ecred_reconf_req, pdu, scid, 1);
7128
7129 pdu->mtu = cpu_to_le16(chan->imtu);
7130 pdu->mps = cpu_to_le16(chan->mps);
7131 pdu->scid[0] = cpu_to_le16(chan->scid);
7132
7133 chan->ident = l2cap_get_ident(conn);
7134
7135 l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_RECONF_REQ,
7136 sizeof(pdu), &pdu);
7137 }
7138
l2cap_chan_reconfigure(struct l2cap_chan * chan,__u16 mtu)7139 int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu)
7140 {
7141 if (chan->imtu > mtu)
7142 return -EINVAL;
7143
7144 BT_DBG("chan %p mtu 0x%4.4x", chan, mtu);
7145
7146 chan->imtu = mtu;
7147
7148 l2cap_ecred_reconfigure(chan);
7149
7150 return 0;
7151 }
7152
7153 /* ---- L2CAP interface with lower layer (HCI) ---- */
7154
l2cap_connect_ind(struct hci_dev * hdev,bdaddr_t * bdaddr)7155 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7156 {
7157 int exact = 0, lm1 = 0, lm2 = 0;
7158 struct l2cap_chan *c;
7159
7160 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7161
7162 /* Find listening sockets and check their link_mode */
7163 read_lock(&chan_list_lock);
7164 list_for_each_entry(c, &chan_list, global_l) {
7165 if (c->state != BT_LISTEN)
7166 continue;
7167
7168 if (!bacmp(&c->src, &hdev->bdaddr)) {
7169 lm1 |= HCI_LM_ACCEPT;
7170 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7171 lm1 |= HCI_LM_MASTER;
7172 exact++;
7173 } else if (!bacmp(&c->src, BDADDR_ANY)) {
7174 lm2 |= HCI_LM_ACCEPT;
7175 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7176 lm2 |= HCI_LM_MASTER;
7177 }
7178 }
7179 read_unlock(&chan_list_lock);
7180
7181 return exact ? lm1 : lm2;
7182 }
7183
7184 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7185 * from an existing channel in the list or from the beginning of the
7186 * global list (by passing NULL as first parameter).
7187 */
l2cap_global_fixed_chan(struct l2cap_chan * c,struct hci_conn * hcon)7188 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7189 struct hci_conn *hcon)
7190 {
7191 u8 src_type = bdaddr_src_type(hcon);
7192
7193 read_lock(&chan_list_lock);
7194
7195 if (c)
7196 c = list_next_entry(c, global_l);
7197 else
7198 c = list_entry(chan_list.next, typeof(*c), global_l);
7199
7200 list_for_each_entry_from(c, &chan_list, global_l) {
7201 if (c->chan_type != L2CAP_CHAN_FIXED)
7202 continue;
7203 if (c->state != BT_LISTEN)
7204 continue;
7205 if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
7206 continue;
7207 if (src_type != c->src_type)
7208 continue;
7209
7210 c = l2cap_chan_hold_unless_zero(c);
7211 read_unlock(&chan_list_lock);
7212 return c;
7213 }
7214
7215 read_unlock(&chan_list_lock);
7216
7217 return NULL;
7218 }
7219
l2cap_connect_cfm(struct hci_conn * hcon,u8 status)7220 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7221 {
7222 struct hci_dev *hdev = hcon->hdev;
7223 struct l2cap_conn *conn;
7224 struct l2cap_chan *pchan;
7225 u8 dst_type;
7226
7227 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7228 return;
7229
7230 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7231
7232 if (status) {
7233 l2cap_conn_del(hcon, bt_to_errno(status));
7234 return;
7235 }
7236
7237 conn = l2cap_conn_add(hcon);
7238 if (!conn)
7239 return;
7240
7241 dst_type = bdaddr_dst_type(hcon);
7242
7243 /* If device is blocked, do not create channels for it */
7244 if (hci_bdaddr_list_lookup(&hdev->reject_list, &hcon->dst, dst_type))
7245 return;
7246
7247 /* Find fixed channels and notify them of the new connection. We
7248 * use multiple individual lookups, continuing each time where
7249 * we left off, because the list lock would prevent calling the
7250 * potentially sleeping l2cap_chan_lock() function.
7251 */
7252 pchan = l2cap_global_fixed_chan(NULL, hcon);
7253 while (pchan) {
7254 struct l2cap_chan *chan, *next;
7255
7256 /* Client fixed channels should override server ones */
7257 if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7258 goto next;
7259
7260 l2cap_chan_lock(pchan);
7261 chan = pchan->ops->new_connection(pchan);
7262 if (chan) {
7263 bacpy(&chan->src, &hcon->src);
7264 bacpy(&chan->dst, &hcon->dst);
7265 chan->src_type = bdaddr_src_type(hcon);
7266 chan->dst_type = dst_type;
7267
7268 __l2cap_chan_add(conn, chan);
7269 }
7270
7271 l2cap_chan_unlock(pchan);
7272 next:
7273 next = l2cap_global_fixed_chan(pchan, hcon);
7274 l2cap_chan_put(pchan);
7275 pchan = next;
7276 }
7277
7278 l2cap_conn_ready(conn);
7279 }
7280
l2cap_disconn_ind(struct hci_conn * hcon)7281 int l2cap_disconn_ind(struct hci_conn *hcon)
7282 {
7283 struct l2cap_conn *conn = hcon->l2cap_data;
7284
7285 BT_DBG("hcon %p", hcon);
7286
7287 if (!conn)
7288 return HCI_ERROR_REMOTE_USER_TERM;
7289 return conn->disc_reason;
7290 }
7291
l2cap_disconn_cfm(struct hci_conn * hcon,u8 reason)7292 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7293 {
7294 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7295 return;
7296
7297 BT_DBG("hcon %p reason %d", hcon, reason);
7298
7299 l2cap_conn_del(hcon, bt_to_errno(reason));
7300 }
7301
l2cap_check_encryption(struct l2cap_chan * chan,u8 encrypt)7302 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7303 {
7304 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7305 return;
7306
7307 if (encrypt == 0x00) {
7308 if (chan->sec_level == BT_SECURITY_MEDIUM) {
7309 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7310 } else if (chan->sec_level == BT_SECURITY_HIGH ||
7311 chan->sec_level == BT_SECURITY_FIPS)
7312 l2cap_chan_close(chan, ECONNREFUSED);
7313 } else {
7314 if (chan->sec_level == BT_SECURITY_MEDIUM)
7315 __clear_chan_timer(chan);
7316 }
7317 }
7318
l2cap_security_cfm(struct hci_conn * hcon,u8 status,u8 encrypt)7319 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7320 {
7321 struct l2cap_conn *conn = hcon->l2cap_data;
7322 struct l2cap_chan *chan;
7323
7324 if (!conn)
7325 return;
7326
7327 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7328
7329 mutex_lock(&conn->lock);
7330
7331 list_for_each_entry(chan, &conn->chan_l, list) {
7332 l2cap_chan_lock(chan);
7333
7334 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7335 state_to_string(chan->state));
7336
7337 if (!status && encrypt)
7338 chan->sec_level = hcon->sec_level;
7339
7340 if (!__l2cap_no_conn_pending(chan)) {
7341 l2cap_chan_unlock(chan);
7342 continue;
7343 }
7344
7345 if (!status && (chan->state == BT_CONNECTED ||
7346 chan->state == BT_CONFIG)) {
7347 chan->ops->resume(chan);
7348 l2cap_check_encryption(chan, encrypt);
7349 l2cap_chan_unlock(chan);
7350 continue;
7351 }
7352
7353 if (chan->state == BT_CONNECT) {
7354 if (!status && l2cap_check_enc_key_size(hcon))
7355 l2cap_start_connection(chan);
7356 else
7357 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7358 } else if (chan->state == BT_CONNECT2 &&
7359 !(chan->mode == L2CAP_MODE_EXT_FLOWCTL ||
7360 chan->mode == L2CAP_MODE_LE_FLOWCTL)) {
7361 struct l2cap_conn_rsp rsp;
7362 __u16 res, stat;
7363
7364 if (!status && l2cap_check_enc_key_size(hcon)) {
7365 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7366 res = L2CAP_CR_PEND;
7367 stat = L2CAP_CS_AUTHOR_PEND;
7368 chan->ops->defer(chan);
7369 } else {
7370 l2cap_state_change(chan, BT_CONFIG);
7371 res = L2CAP_CR_SUCCESS;
7372 stat = L2CAP_CS_NO_INFO;
7373 }
7374 } else {
7375 l2cap_state_change(chan, BT_DISCONN);
7376 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7377 res = L2CAP_CR_SEC_BLOCK;
7378 stat = L2CAP_CS_NO_INFO;
7379 }
7380
7381 rsp.scid = cpu_to_le16(chan->dcid);
7382 rsp.dcid = cpu_to_le16(chan->scid);
7383 rsp.result = cpu_to_le16(res);
7384 rsp.status = cpu_to_le16(stat);
7385 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7386 sizeof(rsp), &rsp);
7387
7388 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7389 res == L2CAP_CR_SUCCESS) {
7390 char buf[128];
7391 set_bit(CONF_REQ_SENT, &chan->conf_state);
7392 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7393 L2CAP_CONF_REQ,
7394 l2cap_build_conf_req(chan, buf, sizeof(buf)),
7395 buf);
7396 chan->num_conf_req++;
7397 }
7398 }
7399
7400 l2cap_chan_unlock(chan);
7401 }
7402
7403 mutex_unlock(&conn->lock);
7404 }
7405
7406 /* Append fragment into frame respecting the maximum len of rx_skb */
l2cap_recv_frag(struct l2cap_conn * conn,struct sk_buff * skb,u16 len)7407 static int l2cap_recv_frag(struct l2cap_conn *conn, struct sk_buff *skb,
7408 u16 len)
7409 {
7410 if (!conn->rx_skb) {
7411 /* Allocate skb for the complete frame (with header) */
7412 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7413 if (!conn->rx_skb)
7414 return -ENOMEM;
7415 /* Init rx_len */
7416 conn->rx_len = len;
7417 }
7418
7419 /* Copy as much as the rx_skb can hold */
7420 len = min_t(u16, len, skb->len);
7421 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, len), len);
7422 skb_pull(skb, len);
7423 conn->rx_len -= len;
7424
7425 return len;
7426 }
7427
l2cap_recv_len(struct l2cap_conn * conn,struct sk_buff * skb)7428 static int l2cap_recv_len(struct l2cap_conn *conn, struct sk_buff *skb)
7429 {
7430 struct sk_buff *rx_skb;
7431 int len;
7432
7433 /* Append just enough to complete the header */
7434 len = l2cap_recv_frag(conn, skb, L2CAP_LEN_SIZE - conn->rx_skb->len);
7435
7436 /* If header could not be read just continue */
7437 if (len < 0 || conn->rx_skb->len < L2CAP_LEN_SIZE)
7438 return len;
7439
7440 rx_skb = conn->rx_skb;
7441 len = get_unaligned_le16(rx_skb->data);
7442
7443 /* Check if rx_skb has enough space to received all fragments */
7444 if (len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE) <= skb_tailroom(rx_skb)) {
7445 /* Update expected len */
7446 conn->rx_len = len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE);
7447 return L2CAP_LEN_SIZE;
7448 }
7449
7450 /* Reset conn->rx_skb since it will need to be reallocated in order to
7451 * fit all fragments.
7452 */
7453 conn->rx_skb = NULL;
7454
7455 /* Reallocates rx_skb using the exact expected length */
7456 len = l2cap_recv_frag(conn, rx_skb,
7457 len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE));
7458 kfree_skb(rx_skb);
7459
7460 return len;
7461 }
7462
l2cap_recv_reset(struct l2cap_conn * conn)7463 static void l2cap_recv_reset(struct l2cap_conn *conn)
7464 {
7465 kfree_skb(conn->rx_skb);
7466 conn->rx_skb = NULL;
7467 conn->rx_len = 0;
7468 }
7469
l2cap_conn_hold_unless_zero(struct l2cap_conn * c)7470 struct l2cap_conn *l2cap_conn_hold_unless_zero(struct l2cap_conn *c)
7471 {
7472 if (!c)
7473 return NULL;
7474
7475 BT_DBG("conn %p orig refcnt %u", c, kref_read(&c->ref));
7476
7477 if (!kref_get_unless_zero(&c->ref))
7478 return NULL;
7479
7480 return c;
7481 }
7482
l2cap_recv_acldata(struct hci_conn * hcon,struct sk_buff * skb,u16 flags)7483 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7484 {
7485 struct l2cap_conn *conn;
7486 int len;
7487
7488 /* Lock hdev to access l2cap_data to avoid race with l2cap_conn_del */
7489 hci_dev_lock(hcon->hdev);
7490
7491 conn = hcon->l2cap_data;
7492
7493 if (!conn)
7494 conn = l2cap_conn_add(hcon);
7495
7496 conn = l2cap_conn_hold_unless_zero(conn);
7497
7498 hci_dev_unlock(hcon->hdev);
7499
7500 if (!conn) {
7501 kfree_skb(skb);
7502 return;
7503 }
7504
7505 BT_DBG("conn %p len %u flags 0x%x", conn, skb->len, flags);
7506
7507 mutex_lock(&conn->lock);
7508
7509 switch (flags) {
7510 case ACL_START:
7511 case ACL_START_NO_FLUSH:
7512 case ACL_COMPLETE:
7513 if (conn->rx_skb) {
7514 BT_ERR("Unexpected start frame (len %d)", skb->len);
7515 l2cap_recv_reset(conn);
7516 l2cap_conn_unreliable(conn, ECOMM);
7517 }
7518
7519 /* Start fragment may not contain the L2CAP length so just
7520 * copy the initial byte when that happens and use conn->mtu as
7521 * expected length.
7522 */
7523 if (skb->len < L2CAP_LEN_SIZE) {
7524 l2cap_recv_frag(conn, skb, conn->mtu);
7525 break;
7526 }
7527
7528 len = get_unaligned_le16(skb->data) + L2CAP_HDR_SIZE;
7529
7530 if (len == skb->len) {
7531 /* Complete frame received */
7532 l2cap_recv_frame(conn, skb);
7533 goto unlock;
7534 }
7535
7536 BT_DBG("Start: total len %d, frag len %u", len, skb->len);
7537
7538 if (skb->len > len) {
7539 BT_ERR("Frame is too long (len %u, expected len %d)",
7540 skb->len, len);
7541 l2cap_conn_unreliable(conn, ECOMM);
7542 goto drop;
7543 }
7544
7545 /* Append fragment into frame (with header) */
7546 if (l2cap_recv_frag(conn, skb, len) < 0)
7547 goto drop;
7548
7549 break;
7550
7551 case ACL_CONT:
7552 BT_DBG("Cont: frag len %u (expecting %u)", skb->len, conn->rx_len);
7553
7554 if (!conn->rx_skb) {
7555 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7556 l2cap_conn_unreliable(conn, ECOMM);
7557 goto drop;
7558 }
7559
7560 /* Complete the L2CAP length if it has not been read */
7561 if (conn->rx_skb->len < L2CAP_LEN_SIZE) {
7562 if (l2cap_recv_len(conn, skb) < 0) {
7563 l2cap_conn_unreliable(conn, ECOMM);
7564 goto drop;
7565 }
7566
7567 /* Header still could not be read just continue */
7568 if (conn->rx_skb->len < L2CAP_LEN_SIZE)
7569 break;
7570 }
7571
7572 if (skb->len > conn->rx_len) {
7573 BT_ERR("Fragment is too long (len %u, expected %u)",
7574 skb->len, conn->rx_len);
7575 l2cap_recv_reset(conn);
7576 l2cap_conn_unreliable(conn, ECOMM);
7577 goto drop;
7578 }
7579
7580 /* Append fragment into frame (with header) */
7581 l2cap_recv_frag(conn, skb, skb->len);
7582
7583 if (!conn->rx_len) {
7584 /* Complete frame received. l2cap_recv_frame
7585 * takes ownership of the skb so set the global
7586 * rx_skb pointer to NULL first.
7587 */
7588 struct sk_buff *rx_skb = conn->rx_skb;
7589 conn->rx_skb = NULL;
7590 l2cap_recv_frame(conn, rx_skb);
7591 }
7592 break;
7593 }
7594
7595 drop:
7596 kfree_skb(skb);
7597 unlock:
7598 mutex_unlock(&conn->lock);
7599 l2cap_conn_put(conn);
7600 }
7601
7602 static struct hci_cb l2cap_cb = {
7603 .name = "L2CAP",
7604 .connect_cfm = l2cap_connect_cfm,
7605 .disconn_cfm = l2cap_disconn_cfm,
7606 .security_cfm = l2cap_security_cfm,
7607 };
7608
l2cap_debugfs_show(struct seq_file * f,void * p)7609 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7610 {
7611 struct l2cap_chan *c;
7612
7613 read_lock(&chan_list_lock);
7614
7615 list_for_each_entry(c, &chan_list, global_l) {
7616 seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7617 &c->src, c->src_type, &c->dst, c->dst_type,
7618 c->state, __le16_to_cpu(c->psm),
7619 c->scid, c->dcid, c->imtu, c->omtu,
7620 c->sec_level, c->mode);
7621 }
7622
7623 read_unlock(&chan_list_lock);
7624
7625 return 0;
7626 }
7627
7628 DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs);
7629
7630 static struct dentry *l2cap_debugfs;
7631
l2cap_init(void)7632 int __init l2cap_init(void)
7633 {
7634 int err;
7635
7636 err = l2cap_init_sockets();
7637 if (err < 0)
7638 return err;
7639
7640 hci_register_cb(&l2cap_cb);
7641
7642 if (IS_ERR_OR_NULL(bt_debugfs))
7643 return 0;
7644
7645 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7646 NULL, &l2cap_debugfs_fops);
7647
7648 return 0;
7649 }
7650
l2cap_exit(void)7651 void l2cap_exit(void)
7652 {
7653 debugfs_remove(l2cap_debugfs);
7654 hci_unregister_cb(&l2cap_cb);
7655 l2cap_cleanup_sockets();
7656 }
7657
7658 module_param(disable_ertm, bool, 0644);
7659 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
7660
7661 module_param(enable_ecred, bool, 0644);
7662 MODULE_PARM_DESC(enable_ecred, "Enable enhanced credit flow control mode");
7663