1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27 */
28
29 /* Bluetooth L2CAP core. */
30
31 #include <linux/module.h>
32
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
36
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40
41 #include "smp.h"
42
43 #define LE_FLOWCTL_MAX_CREDITS 65535
44
45 bool disable_ertm;
46 bool enable_ecred = IS_ENABLED(CONFIG_BT_LE_L2CAP_ECRED);
47
48 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
49
50 static LIST_HEAD(chan_list);
51 static DEFINE_RWLOCK(chan_list_lock);
52
53 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
54 u8 code, u8 ident, u16 dlen, void *data);
55 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
56 void *data);
57 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
58 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
59
60 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
61 struct sk_buff_head *skbs, u8 event);
62 static void l2cap_retrans_timeout(struct work_struct *work);
63 static void l2cap_monitor_timeout(struct work_struct *work);
64 static void l2cap_ack_timeout(struct work_struct *work);
65
bdaddr_type(u8 link_type,u8 bdaddr_type)66 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
67 {
68 if (link_type == LE_LINK) {
69 if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
70 return BDADDR_LE_PUBLIC;
71 else
72 return BDADDR_LE_RANDOM;
73 }
74
75 return BDADDR_BREDR;
76 }
77
bdaddr_src_type(struct hci_conn * hcon)78 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
79 {
80 return bdaddr_type(hcon->type, hcon->src_type);
81 }
82
bdaddr_dst_type(struct hci_conn * hcon)83 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
84 {
85 return bdaddr_type(hcon->type, hcon->dst_type);
86 }
87
88 /* ---- L2CAP channels ---- */
89
__l2cap_get_chan_by_dcid(struct l2cap_conn * conn,u16 cid)90 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
91 u16 cid)
92 {
93 struct l2cap_chan *c;
94
95 list_for_each_entry(c, &conn->chan_l, list) {
96 if (c->dcid == cid)
97 return c;
98 }
99 return NULL;
100 }
101
__l2cap_get_chan_by_scid(struct l2cap_conn * conn,u16 cid)102 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
103 u16 cid)
104 {
105 struct l2cap_chan *c;
106
107 list_for_each_entry(c, &conn->chan_l, list) {
108 if (c->scid == cid)
109 return c;
110 }
111 return NULL;
112 }
113
114 /* Find channel with given SCID.
115 * Returns a reference locked channel.
116 */
l2cap_get_chan_by_scid(struct l2cap_conn * conn,u16 cid)117 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
118 u16 cid)
119 {
120 struct l2cap_chan *c;
121
122 c = __l2cap_get_chan_by_scid(conn, cid);
123 if (c) {
124 /* Only lock if chan reference is not 0 */
125 c = l2cap_chan_hold_unless_zero(c);
126 if (c)
127 l2cap_chan_lock(c);
128 }
129
130 return c;
131 }
132
133 /* Find channel with given DCID.
134 * Returns a reference locked channel.
135 */
l2cap_get_chan_by_dcid(struct l2cap_conn * conn,u16 cid)136 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
137 u16 cid)
138 {
139 struct l2cap_chan *c;
140
141 c = __l2cap_get_chan_by_dcid(conn, cid);
142 if (c) {
143 /* Only lock if chan reference is not 0 */
144 c = l2cap_chan_hold_unless_zero(c);
145 if (c)
146 l2cap_chan_lock(c);
147 }
148
149 return c;
150 }
151
__l2cap_get_chan_by_ident(struct l2cap_conn * conn,u8 ident)152 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
153 u8 ident)
154 {
155 struct l2cap_chan *c;
156
157 list_for_each_entry(c, &conn->chan_l, list) {
158 if (c->ident == ident)
159 return c;
160 }
161 return NULL;
162 }
163
__l2cap_global_chan_by_addr(__le16 psm,bdaddr_t * src,u8 src_type)164 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src,
165 u8 src_type)
166 {
167 struct l2cap_chan *c;
168
169 list_for_each_entry(c, &chan_list, global_l) {
170 if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR)
171 continue;
172
173 if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR)
174 continue;
175
176 if (c->sport == psm && !bacmp(&c->src, src))
177 return c;
178 }
179 return NULL;
180 }
181
l2cap_add_psm(struct l2cap_chan * chan,bdaddr_t * src,__le16 psm)182 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
183 {
184 int err;
185
186 write_lock(&chan_list_lock);
187
188 if (psm && __l2cap_global_chan_by_addr(psm, src, chan->src_type)) {
189 err = -EADDRINUSE;
190 goto done;
191 }
192
193 if (psm) {
194 chan->psm = psm;
195 chan->sport = psm;
196 err = 0;
197 } else {
198 u16 p, start, end, incr;
199
200 if (chan->src_type == BDADDR_BREDR) {
201 start = L2CAP_PSM_DYN_START;
202 end = L2CAP_PSM_AUTO_END;
203 incr = 2;
204 } else {
205 start = L2CAP_PSM_LE_DYN_START;
206 end = L2CAP_PSM_LE_DYN_END;
207 incr = 1;
208 }
209
210 err = -EINVAL;
211 for (p = start; p <= end; p += incr)
212 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src,
213 chan->src_type)) {
214 chan->psm = cpu_to_le16(p);
215 chan->sport = cpu_to_le16(p);
216 err = 0;
217 break;
218 }
219 }
220
221 done:
222 write_unlock(&chan_list_lock);
223 return err;
224 }
225 EXPORT_SYMBOL_GPL(l2cap_add_psm);
226
l2cap_add_scid(struct l2cap_chan * chan,__u16 scid)227 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
228 {
229 write_lock(&chan_list_lock);
230
231 /* Override the defaults (which are for conn-oriented) */
232 chan->omtu = L2CAP_DEFAULT_MTU;
233 chan->chan_type = L2CAP_CHAN_FIXED;
234
235 chan->scid = scid;
236
237 write_unlock(&chan_list_lock);
238
239 return 0;
240 }
241
l2cap_alloc_cid(struct l2cap_conn * conn)242 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
243 {
244 u16 cid, dyn_end;
245
246 if (conn->hcon->type == LE_LINK)
247 dyn_end = L2CAP_CID_LE_DYN_END;
248 else
249 dyn_end = L2CAP_CID_DYN_END;
250
251 for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
252 if (!__l2cap_get_chan_by_scid(conn, cid))
253 return cid;
254 }
255
256 return 0;
257 }
258
l2cap_state_change(struct l2cap_chan * chan,int state)259 static void l2cap_state_change(struct l2cap_chan *chan, int state)
260 {
261 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
262 state_to_string(state));
263
264 chan->state = state;
265 chan->ops->state_change(chan, state, 0);
266 }
267
l2cap_state_change_and_error(struct l2cap_chan * chan,int state,int err)268 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
269 int state, int err)
270 {
271 chan->state = state;
272 chan->ops->state_change(chan, chan->state, err);
273 }
274
l2cap_chan_set_err(struct l2cap_chan * chan,int err)275 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
276 {
277 chan->ops->state_change(chan, chan->state, err);
278 }
279
__set_retrans_timer(struct l2cap_chan * chan)280 static void __set_retrans_timer(struct l2cap_chan *chan)
281 {
282 if (!delayed_work_pending(&chan->monitor_timer) &&
283 chan->retrans_timeout) {
284 l2cap_set_timer(chan, &chan->retrans_timer,
285 secs_to_jiffies(chan->retrans_timeout));
286 }
287 }
288
__set_monitor_timer(struct l2cap_chan * chan)289 static void __set_monitor_timer(struct l2cap_chan *chan)
290 {
291 __clear_retrans_timer(chan);
292 if (chan->monitor_timeout) {
293 l2cap_set_timer(chan, &chan->monitor_timer,
294 secs_to_jiffies(chan->monitor_timeout));
295 }
296 }
297
l2cap_ertm_seq_in_queue(struct sk_buff_head * head,u16 seq)298 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
299 u16 seq)
300 {
301 struct sk_buff *skb;
302
303 skb_queue_walk(head, skb) {
304 if (bt_cb(skb)->l2cap.txseq == seq)
305 return skb;
306 }
307
308 return NULL;
309 }
310
311 /* ---- L2CAP sequence number lists ---- */
312
313 /* For ERTM, ordered lists of sequence numbers must be tracked for
314 * SREJ requests that are received and for frames that are to be
315 * retransmitted. These seq_list functions implement a singly-linked
316 * list in an array, where membership in the list can also be checked
317 * in constant time. Items can also be added to the tail of the list
318 * and removed from the head in constant time, without further memory
319 * allocs or frees.
320 */
321
l2cap_seq_list_init(struct l2cap_seq_list * seq_list,u16 size)322 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
323 {
324 size_t alloc_size, i;
325
326 /* Allocated size is a power of 2 to map sequence numbers
327 * (which may be up to 14 bits) in to a smaller array that is
328 * sized for the negotiated ERTM transmit windows.
329 */
330 alloc_size = roundup_pow_of_two(size);
331
332 seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
333 if (!seq_list->list)
334 return -ENOMEM;
335
336 seq_list->mask = alloc_size - 1;
337 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
338 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
339 for (i = 0; i < alloc_size; i++)
340 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
341
342 return 0;
343 }
344
l2cap_seq_list_free(struct l2cap_seq_list * seq_list)345 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
346 {
347 kfree(seq_list->list);
348 }
349
l2cap_seq_list_contains(struct l2cap_seq_list * seq_list,u16 seq)350 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
351 u16 seq)
352 {
353 /* Constant-time check for list membership */
354 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
355 }
356
l2cap_seq_list_pop(struct l2cap_seq_list * seq_list)357 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
358 {
359 u16 seq = seq_list->head;
360 u16 mask = seq_list->mask;
361
362 seq_list->head = seq_list->list[seq & mask];
363 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
364
365 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
366 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
367 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
368 }
369
370 return seq;
371 }
372
l2cap_seq_list_clear(struct l2cap_seq_list * seq_list)373 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
374 {
375 u16 i;
376
377 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
378 return;
379
380 for (i = 0; i <= seq_list->mask; i++)
381 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
382
383 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
384 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
385 }
386
l2cap_seq_list_append(struct l2cap_seq_list * seq_list,u16 seq)387 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
388 {
389 u16 mask = seq_list->mask;
390
391 /* All appends happen in constant time */
392
393 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
394 return;
395
396 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
397 seq_list->head = seq;
398 else
399 seq_list->list[seq_list->tail & mask] = seq;
400
401 seq_list->tail = seq;
402 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
403 }
404
l2cap_chan_timeout(struct work_struct * work)405 static void l2cap_chan_timeout(struct work_struct *work)
406 {
407 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
408 chan_timer.work);
409 struct l2cap_conn *conn = chan->conn;
410 int reason;
411
412 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
413
414 if (!conn)
415 return;
416
417 mutex_lock(&conn->lock);
418 /* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
419 * this work. No need to call l2cap_chan_hold(chan) here again.
420 */
421 l2cap_chan_lock(chan);
422
423 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
424 reason = ECONNREFUSED;
425 else if (chan->state == BT_CONNECT &&
426 chan->sec_level != BT_SECURITY_SDP)
427 reason = ECONNREFUSED;
428 else
429 reason = ETIMEDOUT;
430
431 l2cap_chan_close(chan, reason);
432
433 chan->ops->close(chan);
434
435 l2cap_chan_unlock(chan);
436 l2cap_chan_put(chan);
437
438 mutex_unlock(&conn->lock);
439 }
440
l2cap_chan_create(void)441 struct l2cap_chan *l2cap_chan_create(void)
442 {
443 struct l2cap_chan *chan;
444
445 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
446 if (!chan)
447 return NULL;
448
449 skb_queue_head_init(&chan->tx_q);
450 skb_queue_head_init(&chan->srej_q);
451 mutex_init(&chan->lock);
452
453 /* Set default lock nesting level */
454 atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
455
456 /* Available receive buffer space is initially unknown */
457 chan->rx_avail = -1;
458
459 write_lock(&chan_list_lock);
460 list_add(&chan->global_l, &chan_list);
461 write_unlock(&chan_list_lock);
462
463 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
464 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
465 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
466 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
467
468 chan->state = BT_OPEN;
469
470 kref_init(&chan->kref);
471
472 /* This flag is cleared in l2cap_chan_ready() */
473 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
474
475 BT_DBG("chan %p", chan);
476
477 return chan;
478 }
479 EXPORT_SYMBOL_GPL(l2cap_chan_create);
480
l2cap_chan_destroy(struct kref * kref)481 static void l2cap_chan_destroy(struct kref *kref)
482 {
483 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
484
485 BT_DBG("chan %p", chan);
486
487 write_lock(&chan_list_lock);
488 list_del(&chan->global_l);
489 write_unlock(&chan_list_lock);
490
491 kfree(chan);
492 }
493
l2cap_chan_hold(struct l2cap_chan * c)494 void l2cap_chan_hold(struct l2cap_chan *c)
495 {
496 BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
497
498 kref_get(&c->kref);
499 }
500
l2cap_chan_hold_unless_zero(struct l2cap_chan * c)501 struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c)
502 {
503 BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
504
505 if (!kref_get_unless_zero(&c->kref))
506 return NULL;
507
508 return c;
509 }
510
l2cap_chan_put(struct l2cap_chan * c)511 void l2cap_chan_put(struct l2cap_chan *c)
512 {
513 BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
514
515 kref_put(&c->kref, l2cap_chan_destroy);
516 }
517 EXPORT_SYMBOL_GPL(l2cap_chan_put);
518
l2cap_chan_set_defaults(struct l2cap_chan * chan)519 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
520 {
521 chan->fcs = L2CAP_FCS_CRC16;
522 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
523 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
524 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
525 chan->remote_max_tx = chan->max_tx;
526 chan->remote_tx_win = chan->tx_win;
527 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
528 chan->sec_level = BT_SECURITY_LOW;
529 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
530 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
531 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
532
533 chan->conf_state = 0;
534 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
535
536 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
537 }
538 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
539
l2cap_le_rx_credits(struct l2cap_chan * chan)540 static __u16 l2cap_le_rx_credits(struct l2cap_chan *chan)
541 {
542 size_t sdu_len = chan->sdu ? chan->sdu->len : 0;
543
544 if (chan->mps == 0)
545 return 0;
546
547 /* If we don't know the available space in the receiver buffer, give
548 * enough credits for a full packet.
549 */
550 if (chan->rx_avail == -1)
551 return (chan->imtu / chan->mps) + 1;
552
553 /* If we know how much space is available in the receive buffer, give
554 * out as many credits as would fill the buffer.
555 */
556 if (chan->rx_avail <= sdu_len)
557 return 0;
558
559 return DIV_ROUND_UP(chan->rx_avail - sdu_len, chan->mps);
560 }
561
l2cap_le_flowctl_init(struct l2cap_chan * chan,u16 tx_credits)562 static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
563 {
564 chan->sdu = NULL;
565 chan->sdu_last_frag = NULL;
566 chan->sdu_len = 0;
567 chan->tx_credits = tx_credits;
568 /* Derive MPS from connection MTU to stop HCI fragmentation */
569 chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
570 chan->rx_credits = l2cap_le_rx_credits(chan);
571
572 skb_queue_head_init(&chan->tx_q);
573 }
574
l2cap_ecred_init(struct l2cap_chan * chan,u16 tx_credits)575 static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits)
576 {
577 l2cap_le_flowctl_init(chan, tx_credits);
578
579 /* L2CAP implementations shall support a minimum MPS of 64 octets */
580 if (chan->mps < L2CAP_ECRED_MIN_MPS) {
581 chan->mps = L2CAP_ECRED_MIN_MPS;
582 chan->rx_credits = l2cap_le_rx_credits(chan);
583 }
584 }
585
__l2cap_chan_add(struct l2cap_conn * conn,struct l2cap_chan * chan)586 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
587 {
588 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
589 __le16_to_cpu(chan->psm), chan->dcid);
590
591 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
592
593 chan->conn = conn;
594
595 switch (chan->chan_type) {
596 case L2CAP_CHAN_CONN_ORIENTED:
597 /* Alloc CID for connection-oriented socket */
598 chan->scid = l2cap_alloc_cid(conn);
599 if (conn->hcon->type == ACL_LINK)
600 chan->omtu = L2CAP_DEFAULT_MTU;
601 break;
602
603 case L2CAP_CHAN_CONN_LESS:
604 /* Connectionless socket */
605 chan->scid = L2CAP_CID_CONN_LESS;
606 chan->dcid = L2CAP_CID_CONN_LESS;
607 chan->omtu = L2CAP_DEFAULT_MTU;
608 break;
609
610 case L2CAP_CHAN_FIXED:
611 /* Caller will set CID and CID specific MTU values */
612 break;
613
614 default:
615 /* Raw socket can send/recv signalling messages only */
616 chan->scid = L2CAP_CID_SIGNALING;
617 chan->dcid = L2CAP_CID_SIGNALING;
618 chan->omtu = L2CAP_DEFAULT_MTU;
619 }
620
621 chan->local_id = L2CAP_BESTEFFORT_ID;
622 chan->local_stype = L2CAP_SERV_BESTEFFORT;
623 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
624 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
625 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
626 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
627
628 l2cap_chan_hold(chan);
629
630 /* Only keep a reference for fixed channels if they requested it */
631 if (chan->chan_type != L2CAP_CHAN_FIXED ||
632 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
633 hci_conn_hold(conn->hcon);
634
635 /* Append to the list since the order matters for ECRED */
636 list_add_tail(&chan->list, &conn->chan_l);
637 }
638
l2cap_chan_add(struct l2cap_conn * conn,struct l2cap_chan * chan)639 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
640 {
641 mutex_lock(&conn->lock);
642 __l2cap_chan_add(conn, chan);
643 mutex_unlock(&conn->lock);
644 }
645
l2cap_chan_del(struct l2cap_chan * chan,int err)646 void l2cap_chan_del(struct l2cap_chan *chan, int err)
647 {
648 struct l2cap_conn *conn = chan->conn;
649
650 __clear_chan_timer(chan);
651
652 BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
653 state_to_string(chan->state));
654
655 chan->ops->teardown(chan, err);
656
657 if (conn) {
658 /* Delete from channel list */
659 list_del(&chan->list);
660
661 l2cap_chan_put(chan);
662
663 chan->conn = NULL;
664
665 /* Reference was only held for non-fixed channels or
666 * fixed channels that explicitly requested it using the
667 * FLAG_HOLD_HCI_CONN flag.
668 */
669 if (chan->chan_type != L2CAP_CHAN_FIXED ||
670 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
671 hci_conn_drop(conn->hcon);
672 }
673
674 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
675 return;
676
677 switch (chan->mode) {
678 case L2CAP_MODE_BASIC:
679 break;
680
681 case L2CAP_MODE_LE_FLOWCTL:
682 case L2CAP_MODE_EXT_FLOWCTL:
683 skb_queue_purge(&chan->tx_q);
684 break;
685
686 case L2CAP_MODE_ERTM:
687 __clear_retrans_timer(chan);
688 __clear_monitor_timer(chan);
689 __clear_ack_timer(chan);
690
691 skb_queue_purge(&chan->srej_q);
692
693 l2cap_seq_list_free(&chan->srej_list);
694 l2cap_seq_list_free(&chan->retrans_list);
695 fallthrough;
696
697 case L2CAP_MODE_STREAMING:
698 skb_queue_purge(&chan->tx_q);
699 break;
700 }
701 }
702 EXPORT_SYMBOL_GPL(l2cap_chan_del);
703
__l2cap_chan_list_id(struct l2cap_conn * conn,u16 id,l2cap_chan_func_t func,void * data)704 static void __l2cap_chan_list_id(struct l2cap_conn *conn, u16 id,
705 l2cap_chan_func_t func, void *data)
706 {
707 struct l2cap_chan *chan, *l;
708
709 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
710 if (chan->ident == id)
711 func(chan, data);
712 }
713 }
714
__l2cap_chan_list(struct l2cap_conn * conn,l2cap_chan_func_t func,void * data)715 static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
716 void *data)
717 {
718 struct l2cap_chan *chan;
719
720 list_for_each_entry(chan, &conn->chan_l, list) {
721 func(chan, data);
722 }
723 }
724
l2cap_chan_list(struct l2cap_conn * conn,l2cap_chan_func_t func,void * data)725 void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
726 void *data)
727 {
728 if (!conn)
729 return;
730
731 mutex_lock(&conn->lock);
732 __l2cap_chan_list(conn, func, data);
733 mutex_unlock(&conn->lock);
734 }
735
736 EXPORT_SYMBOL_GPL(l2cap_chan_list);
737
l2cap_conn_update_id_addr(struct work_struct * work)738 static void l2cap_conn_update_id_addr(struct work_struct *work)
739 {
740 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
741 id_addr_timer.work);
742 struct hci_conn *hcon = conn->hcon;
743 struct l2cap_chan *chan;
744
745 mutex_lock(&conn->lock);
746
747 list_for_each_entry(chan, &conn->chan_l, list) {
748 l2cap_chan_lock(chan);
749 bacpy(&chan->dst, &hcon->dst);
750 chan->dst_type = bdaddr_dst_type(hcon);
751 l2cap_chan_unlock(chan);
752 }
753
754 mutex_unlock(&conn->lock);
755 }
756
l2cap_chan_le_connect_reject(struct l2cap_chan * chan)757 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
758 {
759 struct l2cap_conn *conn = chan->conn;
760 struct l2cap_le_conn_rsp rsp;
761 u16 result;
762
763 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
764 result = L2CAP_CR_LE_AUTHORIZATION;
765 else
766 result = L2CAP_CR_LE_BAD_PSM;
767
768 l2cap_state_change(chan, BT_DISCONN);
769
770 rsp.dcid = cpu_to_le16(chan->scid);
771 rsp.mtu = cpu_to_le16(chan->imtu);
772 rsp.mps = cpu_to_le16(chan->mps);
773 rsp.credits = cpu_to_le16(chan->rx_credits);
774 rsp.result = cpu_to_le16(result);
775
776 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
777 &rsp);
778 }
779
l2cap_chan_ecred_connect_reject(struct l2cap_chan * chan)780 static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan)
781 {
782 l2cap_state_change(chan, BT_DISCONN);
783
784 __l2cap_ecred_conn_rsp_defer(chan);
785 }
786
l2cap_chan_connect_reject(struct l2cap_chan * chan)787 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
788 {
789 struct l2cap_conn *conn = chan->conn;
790 struct l2cap_conn_rsp rsp;
791 u16 result;
792
793 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
794 result = L2CAP_CR_SEC_BLOCK;
795 else
796 result = L2CAP_CR_BAD_PSM;
797
798 l2cap_state_change(chan, BT_DISCONN);
799
800 rsp.scid = cpu_to_le16(chan->dcid);
801 rsp.dcid = cpu_to_le16(chan->scid);
802 rsp.result = cpu_to_le16(result);
803 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
804
805 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
806 }
807
l2cap_chan_close(struct l2cap_chan * chan,int reason)808 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
809 {
810 struct l2cap_conn *conn = chan->conn;
811
812 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
813
814 switch (chan->state) {
815 case BT_LISTEN:
816 chan->ops->teardown(chan, 0);
817 break;
818
819 case BT_CONNECTED:
820 case BT_CONFIG:
821 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
822 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
823 l2cap_send_disconn_req(chan, reason);
824 } else
825 l2cap_chan_del(chan, reason);
826 break;
827
828 case BT_CONNECT2:
829 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
830 if (conn->hcon->type == ACL_LINK)
831 l2cap_chan_connect_reject(chan);
832 else if (conn->hcon->type == LE_LINK) {
833 switch (chan->mode) {
834 case L2CAP_MODE_LE_FLOWCTL:
835 l2cap_chan_le_connect_reject(chan);
836 break;
837 case L2CAP_MODE_EXT_FLOWCTL:
838 l2cap_chan_ecred_connect_reject(chan);
839 return;
840 }
841 }
842 }
843
844 l2cap_chan_del(chan, reason);
845 break;
846
847 case BT_CONNECT:
848 case BT_DISCONN:
849 l2cap_chan_del(chan, reason);
850 break;
851
852 default:
853 chan->ops->teardown(chan, 0);
854 break;
855 }
856 }
857 EXPORT_SYMBOL(l2cap_chan_close);
858
l2cap_get_auth_type(struct l2cap_chan * chan)859 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
860 {
861 switch (chan->chan_type) {
862 case L2CAP_CHAN_RAW:
863 switch (chan->sec_level) {
864 case BT_SECURITY_HIGH:
865 case BT_SECURITY_FIPS:
866 return HCI_AT_DEDICATED_BONDING_MITM;
867 case BT_SECURITY_MEDIUM:
868 return HCI_AT_DEDICATED_BONDING;
869 default:
870 return HCI_AT_NO_BONDING;
871 }
872 break;
873 case L2CAP_CHAN_CONN_LESS:
874 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
875 if (chan->sec_level == BT_SECURITY_LOW)
876 chan->sec_level = BT_SECURITY_SDP;
877 }
878 if (chan->sec_level == BT_SECURITY_HIGH ||
879 chan->sec_level == BT_SECURITY_FIPS)
880 return HCI_AT_NO_BONDING_MITM;
881 else
882 return HCI_AT_NO_BONDING;
883 break;
884 case L2CAP_CHAN_CONN_ORIENTED:
885 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
886 if (chan->sec_level == BT_SECURITY_LOW)
887 chan->sec_level = BT_SECURITY_SDP;
888
889 if (chan->sec_level == BT_SECURITY_HIGH ||
890 chan->sec_level == BT_SECURITY_FIPS)
891 return HCI_AT_NO_BONDING_MITM;
892 else
893 return HCI_AT_NO_BONDING;
894 }
895 fallthrough;
896
897 default:
898 switch (chan->sec_level) {
899 case BT_SECURITY_HIGH:
900 case BT_SECURITY_FIPS:
901 return HCI_AT_GENERAL_BONDING_MITM;
902 case BT_SECURITY_MEDIUM:
903 return HCI_AT_GENERAL_BONDING;
904 default:
905 return HCI_AT_NO_BONDING;
906 }
907 break;
908 }
909 }
910
911 /* Service level security */
l2cap_chan_check_security(struct l2cap_chan * chan,bool initiator)912 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
913 {
914 struct l2cap_conn *conn = chan->conn;
915 __u8 auth_type;
916
917 if (conn->hcon->type == LE_LINK)
918 return smp_conn_security(conn->hcon, chan->sec_level);
919
920 auth_type = l2cap_get_auth_type(chan);
921
922 return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
923 initiator);
924 }
925
l2cap_get_ident(struct l2cap_conn * conn)926 static u8 l2cap_get_ident(struct l2cap_conn *conn)
927 {
928 u8 id;
929
930 /* Get next available identificator.
931 * 1 - 128 are used by kernel.
932 * 129 - 199 are reserved.
933 * 200 - 254 are used by utilities like l2ping, etc.
934 */
935
936 mutex_lock(&conn->ident_lock);
937
938 if (++conn->tx_ident > 128)
939 conn->tx_ident = 1;
940
941 id = conn->tx_ident;
942
943 mutex_unlock(&conn->ident_lock);
944
945 return id;
946 }
947
l2cap_send_acl(struct l2cap_conn * conn,struct sk_buff * skb,u8 flags)948 static void l2cap_send_acl(struct l2cap_conn *conn, struct sk_buff *skb,
949 u8 flags)
950 {
951 /* Check if the hcon still valid before attempting to send */
952 if (hci_conn_valid(conn->hcon->hdev, conn->hcon))
953 hci_send_acl(conn->hchan, skb, flags);
954 else
955 kfree_skb(skb);
956 }
957
l2cap_send_cmd(struct l2cap_conn * conn,u8 ident,u8 code,u16 len,void * data)958 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
959 void *data)
960 {
961 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
962 u8 flags;
963
964 BT_DBG("code 0x%2.2x", code);
965
966 if (!skb)
967 return;
968
969 /* Use NO_FLUSH if supported or we have an LE link (which does
970 * not support auto-flushing packets) */
971 if (lmp_no_flush_capable(conn->hcon->hdev) ||
972 conn->hcon->type == LE_LINK)
973 flags = ACL_START_NO_FLUSH;
974 else
975 flags = ACL_START;
976
977 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
978 skb->priority = HCI_PRIO_MAX;
979
980 l2cap_send_acl(conn, skb, flags);
981 }
982
l2cap_do_send(struct l2cap_chan * chan,struct sk_buff * skb)983 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
984 {
985 struct hci_conn *hcon = chan->conn->hcon;
986 u16 flags;
987
988 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
989 skb->priority);
990
991 /* Use NO_FLUSH for LE links (where this is the only option) or
992 * if the BR/EDR link supports it and flushing has not been
993 * explicitly requested (through FLAG_FLUSHABLE).
994 */
995 if (hcon->type == LE_LINK ||
996 (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
997 lmp_no_flush_capable(hcon->hdev)))
998 flags = ACL_START_NO_FLUSH;
999 else
1000 flags = ACL_START;
1001
1002 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
1003 hci_send_acl(chan->conn->hchan, skb, flags);
1004 }
1005
__unpack_enhanced_control(u16 enh,struct l2cap_ctrl * control)1006 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
1007 {
1008 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
1009 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
1010
1011 if (enh & L2CAP_CTRL_FRAME_TYPE) {
1012 /* S-Frame */
1013 control->sframe = 1;
1014 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
1015 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
1016
1017 control->sar = 0;
1018 control->txseq = 0;
1019 } else {
1020 /* I-Frame */
1021 control->sframe = 0;
1022 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
1023 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
1024
1025 control->poll = 0;
1026 control->super = 0;
1027 }
1028 }
1029
__unpack_extended_control(u32 ext,struct l2cap_ctrl * control)1030 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
1031 {
1032 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1033 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
1034
1035 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
1036 /* S-Frame */
1037 control->sframe = 1;
1038 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
1039 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
1040
1041 control->sar = 0;
1042 control->txseq = 0;
1043 } else {
1044 /* I-Frame */
1045 control->sframe = 0;
1046 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
1047 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1048
1049 control->poll = 0;
1050 control->super = 0;
1051 }
1052 }
1053
__unpack_control(struct l2cap_chan * chan,struct sk_buff * skb)1054 static inline void __unpack_control(struct l2cap_chan *chan,
1055 struct sk_buff *skb)
1056 {
1057 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1058 __unpack_extended_control(get_unaligned_le32(skb->data),
1059 &bt_cb(skb)->l2cap);
1060 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
1061 } else {
1062 __unpack_enhanced_control(get_unaligned_le16(skb->data),
1063 &bt_cb(skb)->l2cap);
1064 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
1065 }
1066 }
1067
__pack_extended_control(struct l2cap_ctrl * control)1068 static u32 __pack_extended_control(struct l2cap_ctrl *control)
1069 {
1070 u32 packed;
1071
1072 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1073 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
1074
1075 if (control->sframe) {
1076 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
1077 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
1078 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
1079 } else {
1080 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
1081 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1082 }
1083
1084 return packed;
1085 }
1086
__pack_enhanced_control(struct l2cap_ctrl * control)1087 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1088 {
1089 u16 packed;
1090
1091 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1092 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1093
1094 if (control->sframe) {
1095 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1096 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1097 packed |= L2CAP_CTRL_FRAME_TYPE;
1098 } else {
1099 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1100 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1101 }
1102
1103 return packed;
1104 }
1105
__pack_control(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb)1106 static inline void __pack_control(struct l2cap_chan *chan,
1107 struct l2cap_ctrl *control,
1108 struct sk_buff *skb)
1109 {
1110 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1111 put_unaligned_le32(__pack_extended_control(control),
1112 skb->data + L2CAP_HDR_SIZE);
1113 } else {
1114 put_unaligned_le16(__pack_enhanced_control(control),
1115 skb->data + L2CAP_HDR_SIZE);
1116 }
1117 }
1118
__ertm_hdr_size(struct l2cap_chan * chan)1119 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1120 {
1121 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1122 return L2CAP_EXT_HDR_SIZE;
1123 else
1124 return L2CAP_ENH_HDR_SIZE;
1125 }
1126
l2cap_create_sframe_pdu(struct l2cap_chan * chan,u32 control)1127 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1128 u32 control)
1129 {
1130 struct sk_buff *skb;
1131 struct l2cap_hdr *lh;
1132 int hlen = __ertm_hdr_size(chan);
1133
1134 if (chan->fcs == L2CAP_FCS_CRC16)
1135 hlen += L2CAP_FCS_SIZE;
1136
1137 skb = bt_skb_alloc(hlen, GFP_KERNEL);
1138
1139 if (!skb)
1140 return ERR_PTR(-ENOMEM);
1141
1142 lh = skb_put(skb, L2CAP_HDR_SIZE);
1143 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1144 lh->cid = cpu_to_le16(chan->dcid);
1145
1146 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1147 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1148 else
1149 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1150
1151 if (chan->fcs == L2CAP_FCS_CRC16) {
1152 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1153 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1154 }
1155
1156 skb->priority = HCI_PRIO_MAX;
1157 return skb;
1158 }
1159
l2cap_send_sframe(struct l2cap_chan * chan,struct l2cap_ctrl * control)1160 static void l2cap_send_sframe(struct l2cap_chan *chan,
1161 struct l2cap_ctrl *control)
1162 {
1163 struct sk_buff *skb;
1164 u32 control_field;
1165
1166 BT_DBG("chan %p, control %p", chan, control);
1167
1168 if (!control->sframe)
1169 return;
1170
1171 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1172 !control->poll)
1173 control->final = 1;
1174
1175 if (control->super == L2CAP_SUPER_RR)
1176 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1177 else if (control->super == L2CAP_SUPER_RNR)
1178 set_bit(CONN_RNR_SENT, &chan->conn_state);
1179
1180 if (control->super != L2CAP_SUPER_SREJ) {
1181 chan->last_acked_seq = control->reqseq;
1182 __clear_ack_timer(chan);
1183 }
1184
1185 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1186 control->final, control->poll, control->super);
1187
1188 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1189 control_field = __pack_extended_control(control);
1190 else
1191 control_field = __pack_enhanced_control(control);
1192
1193 skb = l2cap_create_sframe_pdu(chan, control_field);
1194 if (!IS_ERR(skb))
1195 l2cap_do_send(chan, skb);
1196 }
1197
l2cap_send_rr_or_rnr(struct l2cap_chan * chan,bool poll)1198 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1199 {
1200 struct l2cap_ctrl control;
1201
1202 BT_DBG("chan %p, poll %d", chan, poll);
1203
1204 memset(&control, 0, sizeof(control));
1205 control.sframe = 1;
1206 control.poll = poll;
1207
1208 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1209 control.super = L2CAP_SUPER_RNR;
1210 else
1211 control.super = L2CAP_SUPER_RR;
1212
1213 control.reqseq = chan->buffer_seq;
1214 l2cap_send_sframe(chan, &control);
1215 }
1216
__l2cap_no_conn_pending(struct l2cap_chan * chan)1217 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1218 {
1219 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1220 return true;
1221
1222 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1223 }
1224
l2cap_send_conn_req(struct l2cap_chan * chan)1225 void l2cap_send_conn_req(struct l2cap_chan *chan)
1226 {
1227 struct l2cap_conn *conn = chan->conn;
1228 struct l2cap_conn_req req;
1229
1230 req.scid = cpu_to_le16(chan->scid);
1231 req.psm = chan->psm;
1232
1233 chan->ident = l2cap_get_ident(conn);
1234
1235 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1236
1237 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1238 }
1239
l2cap_chan_ready(struct l2cap_chan * chan)1240 static void l2cap_chan_ready(struct l2cap_chan *chan)
1241 {
1242 /* The channel may have already been flagged as connected in
1243 * case of receiving data before the L2CAP info req/rsp
1244 * procedure is complete.
1245 */
1246 if (chan->state == BT_CONNECTED)
1247 return;
1248
1249 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1250 chan->conf_state = 0;
1251 __clear_chan_timer(chan);
1252
1253 switch (chan->mode) {
1254 case L2CAP_MODE_LE_FLOWCTL:
1255 case L2CAP_MODE_EXT_FLOWCTL:
1256 if (!chan->tx_credits)
1257 chan->ops->suspend(chan);
1258 break;
1259 }
1260
1261 chan->state = BT_CONNECTED;
1262
1263 chan->ops->ready(chan);
1264 }
1265
l2cap_le_connect(struct l2cap_chan * chan)1266 static void l2cap_le_connect(struct l2cap_chan *chan)
1267 {
1268 struct l2cap_conn *conn = chan->conn;
1269 struct l2cap_le_conn_req req;
1270
1271 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1272 return;
1273
1274 if (!chan->imtu)
1275 chan->imtu = chan->conn->mtu;
1276
1277 l2cap_le_flowctl_init(chan, 0);
1278
1279 memset(&req, 0, sizeof(req));
1280 req.psm = chan->psm;
1281 req.scid = cpu_to_le16(chan->scid);
1282 req.mtu = cpu_to_le16(chan->imtu);
1283 req.mps = cpu_to_le16(chan->mps);
1284 req.credits = cpu_to_le16(chan->rx_credits);
1285
1286 chan->ident = l2cap_get_ident(conn);
1287
1288 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1289 sizeof(req), &req);
1290 }
1291
1292 struct l2cap_ecred_conn_data {
1293 struct {
1294 struct l2cap_ecred_conn_req_hdr req;
1295 __le16 scid[5];
1296 } __packed pdu;
1297 struct l2cap_chan *chan;
1298 struct pid *pid;
1299 int count;
1300 };
1301
l2cap_ecred_defer_connect(struct l2cap_chan * chan,void * data)1302 static void l2cap_ecred_defer_connect(struct l2cap_chan *chan, void *data)
1303 {
1304 struct l2cap_ecred_conn_data *conn = data;
1305 struct pid *pid;
1306
1307 if (chan == conn->chan)
1308 return;
1309
1310 if (!test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
1311 return;
1312
1313 pid = chan->ops->get_peer_pid(chan);
1314
1315 /* Only add deferred channels with the same PID/PSM */
1316 if (conn->pid != pid || chan->psm != conn->chan->psm || chan->ident ||
1317 chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
1318 return;
1319
1320 if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1321 return;
1322
1323 l2cap_ecred_init(chan, 0);
1324
1325 /* Set the same ident so we can match on the rsp */
1326 chan->ident = conn->chan->ident;
1327
1328 /* Include all channels deferred */
1329 conn->pdu.scid[conn->count] = cpu_to_le16(chan->scid);
1330
1331 conn->count++;
1332 }
1333
l2cap_ecred_connect(struct l2cap_chan * chan)1334 static void l2cap_ecred_connect(struct l2cap_chan *chan)
1335 {
1336 struct l2cap_conn *conn = chan->conn;
1337 struct l2cap_ecred_conn_data data;
1338
1339 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
1340 return;
1341
1342 if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1343 return;
1344
1345 l2cap_ecred_init(chan, 0);
1346
1347 memset(&data, 0, sizeof(data));
1348 data.pdu.req.psm = chan->psm;
1349 data.pdu.req.mtu = cpu_to_le16(chan->imtu);
1350 data.pdu.req.mps = cpu_to_le16(chan->mps);
1351 data.pdu.req.credits = cpu_to_le16(chan->rx_credits);
1352 data.pdu.scid[0] = cpu_to_le16(chan->scid);
1353
1354 chan->ident = l2cap_get_ident(conn);
1355
1356 data.count = 1;
1357 data.chan = chan;
1358 data.pid = chan->ops->get_peer_pid(chan);
1359
1360 __l2cap_chan_list(conn, l2cap_ecred_defer_connect, &data);
1361
1362 l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_CONN_REQ,
1363 sizeof(data.pdu.req) + data.count * sizeof(__le16),
1364 &data.pdu);
1365 }
1366
l2cap_le_start(struct l2cap_chan * chan)1367 static void l2cap_le_start(struct l2cap_chan *chan)
1368 {
1369 struct l2cap_conn *conn = chan->conn;
1370
1371 if (!smp_conn_security(conn->hcon, chan->sec_level))
1372 return;
1373
1374 if (!chan->psm) {
1375 l2cap_chan_ready(chan);
1376 return;
1377 }
1378
1379 if (chan->state == BT_CONNECT) {
1380 if (chan->mode == L2CAP_MODE_EXT_FLOWCTL)
1381 l2cap_ecred_connect(chan);
1382 else
1383 l2cap_le_connect(chan);
1384 }
1385 }
1386
l2cap_start_connection(struct l2cap_chan * chan)1387 static void l2cap_start_connection(struct l2cap_chan *chan)
1388 {
1389 if (chan->conn->hcon->type == LE_LINK) {
1390 l2cap_le_start(chan);
1391 } else {
1392 l2cap_send_conn_req(chan);
1393 }
1394 }
1395
l2cap_request_info(struct l2cap_conn * conn)1396 static void l2cap_request_info(struct l2cap_conn *conn)
1397 {
1398 struct l2cap_info_req req;
1399
1400 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1401 return;
1402
1403 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1404
1405 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1406 conn->info_ident = l2cap_get_ident(conn);
1407
1408 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1409
1410 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1411 sizeof(req), &req);
1412 }
1413
l2cap_check_enc_key_size(struct hci_conn * hcon,struct l2cap_chan * chan)1414 static bool l2cap_check_enc_key_size(struct hci_conn *hcon,
1415 struct l2cap_chan *chan)
1416 {
1417 /* The minimum encryption key size needs to be enforced by the
1418 * host stack before establishing any L2CAP connections. The
1419 * specification in theory allows a minimum of 1, but to align
1420 * BR/EDR and LE transports, a minimum of 7 is chosen.
1421 *
1422 * This check might also be called for unencrypted connections
1423 * that have no key size requirements. Ensure that the link is
1424 * actually encrypted before enforcing a key size.
1425 */
1426 int min_key_size = hcon->hdev->min_enc_key_size;
1427
1428 /* On FIPS security level, key size must be 16 bytes */
1429 if (chan->sec_level == BT_SECURITY_FIPS)
1430 min_key_size = 16;
1431
1432 return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1433 hcon->enc_key_size >= min_key_size);
1434 }
1435
l2cap_do_start(struct l2cap_chan * chan)1436 static void l2cap_do_start(struct l2cap_chan *chan)
1437 {
1438 struct l2cap_conn *conn = chan->conn;
1439
1440 if (conn->hcon->type == LE_LINK) {
1441 l2cap_le_start(chan);
1442 return;
1443 }
1444
1445 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1446 l2cap_request_info(conn);
1447 return;
1448 }
1449
1450 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1451 return;
1452
1453 if (!l2cap_chan_check_security(chan, true) ||
1454 !__l2cap_no_conn_pending(chan))
1455 return;
1456
1457 if (l2cap_check_enc_key_size(conn->hcon, chan))
1458 l2cap_start_connection(chan);
1459 else
1460 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1461 }
1462
l2cap_mode_supported(__u8 mode,__u32 feat_mask)1463 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1464 {
1465 u32 local_feat_mask = l2cap_feat_mask;
1466 if (!disable_ertm)
1467 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1468
1469 switch (mode) {
1470 case L2CAP_MODE_ERTM:
1471 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1472 case L2CAP_MODE_STREAMING:
1473 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1474 default:
1475 return 0x00;
1476 }
1477 }
1478
l2cap_send_disconn_req(struct l2cap_chan * chan,int err)1479 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1480 {
1481 struct l2cap_conn *conn = chan->conn;
1482 struct l2cap_disconn_req req;
1483
1484 if (!conn)
1485 return;
1486
1487 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1488 __clear_retrans_timer(chan);
1489 __clear_monitor_timer(chan);
1490 __clear_ack_timer(chan);
1491 }
1492
1493 req.dcid = cpu_to_le16(chan->dcid);
1494 req.scid = cpu_to_le16(chan->scid);
1495 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1496 sizeof(req), &req);
1497
1498 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1499 }
1500
1501 /* ---- L2CAP connections ---- */
l2cap_conn_start(struct l2cap_conn * conn)1502 static void l2cap_conn_start(struct l2cap_conn *conn)
1503 {
1504 struct l2cap_chan *chan, *tmp;
1505
1506 BT_DBG("conn %p", conn);
1507
1508 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1509 l2cap_chan_lock(chan);
1510
1511 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1512 l2cap_chan_ready(chan);
1513 l2cap_chan_unlock(chan);
1514 continue;
1515 }
1516
1517 if (chan->state == BT_CONNECT) {
1518 if (!l2cap_chan_check_security(chan, true) ||
1519 !__l2cap_no_conn_pending(chan)) {
1520 l2cap_chan_unlock(chan);
1521 continue;
1522 }
1523
1524 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1525 && test_bit(CONF_STATE2_DEVICE,
1526 &chan->conf_state)) {
1527 l2cap_chan_close(chan, ECONNRESET);
1528 l2cap_chan_unlock(chan);
1529 continue;
1530 }
1531
1532 if (l2cap_check_enc_key_size(conn->hcon, chan))
1533 l2cap_start_connection(chan);
1534 else
1535 l2cap_chan_close(chan, ECONNREFUSED);
1536
1537 } else if (chan->state == BT_CONNECT2) {
1538 struct l2cap_conn_rsp rsp;
1539 char buf[128];
1540 rsp.scid = cpu_to_le16(chan->dcid);
1541 rsp.dcid = cpu_to_le16(chan->scid);
1542
1543 if (l2cap_chan_check_security(chan, false)) {
1544 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1545 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1546 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1547 chan->ops->defer(chan);
1548
1549 } else {
1550 l2cap_state_change(chan, BT_CONFIG);
1551 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1552 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1553 }
1554 } else {
1555 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1556 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1557 }
1558
1559 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1560 sizeof(rsp), &rsp);
1561
1562 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1563 rsp.result != L2CAP_CR_SUCCESS) {
1564 l2cap_chan_unlock(chan);
1565 continue;
1566 }
1567
1568 set_bit(CONF_REQ_SENT, &chan->conf_state);
1569 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1570 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1571 chan->num_conf_req++;
1572 }
1573
1574 l2cap_chan_unlock(chan);
1575 }
1576 }
1577
l2cap_le_conn_ready(struct l2cap_conn * conn)1578 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1579 {
1580 struct hci_conn *hcon = conn->hcon;
1581 struct hci_dev *hdev = hcon->hdev;
1582
1583 BT_DBG("%s conn %p", hdev->name, conn);
1584
1585 /* For outgoing pairing which doesn't necessarily have an
1586 * associated socket (e.g. mgmt_pair_device).
1587 */
1588 if (hcon->out)
1589 smp_conn_security(hcon, hcon->pending_sec_level);
1590
1591 /* For LE peripheral connections, make sure the connection interval
1592 * is in the range of the minimum and maximum interval that has
1593 * been configured for this connection. If not, then trigger
1594 * the connection update procedure.
1595 */
1596 if (hcon->role == HCI_ROLE_SLAVE &&
1597 (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1598 hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1599 struct l2cap_conn_param_update_req req;
1600
1601 req.min = cpu_to_le16(hcon->le_conn_min_interval);
1602 req.max = cpu_to_le16(hcon->le_conn_max_interval);
1603 req.latency = cpu_to_le16(hcon->le_conn_latency);
1604 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1605
1606 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1607 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1608 }
1609 }
1610
l2cap_conn_ready(struct l2cap_conn * conn)1611 static void l2cap_conn_ready(struct l2cap_conn *conn)
1612 {
1613 struct l2cap_chan *chan;
1614 struct hci_conn *hcon = conn->hcon;
1615
1616 BT_DBG("conn %p", conn);
1617
1618 if (hcon->type == ACL_LINK)
1619 l2cap_request_info(conn);
1620
1621 mutex_lock(&conn->lock);
1622
1623 list_for_each_entry(chan, &conn->chan_l, list) {
1624
1625 l2cap_chan_lock(chan);
1626
1627 if (hcon->type == LE_LINK) {
1628 l2cap_le_start(chan);
1629 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1630 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1631 l2cap_chan_ready(chan);
1632 } else if (chan->state == BT_CONNECT) {
1633 l2cap_do_start(chan);
1634 }
1635
1636 l2cap_chan_unlock(chan);
1637 }
1638
1639 mutex_unlock(&conn->lock);
1640
1641 if (hcon->type == LE_LINK)
1642 l2cap_le_conn_ready(conn);
1643
1644 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1645 }
1646
1647 /* Notify sockets that we cannot guaranty reliability anymore */
l2cap_conn_unreliable(struct l2cap_conn * conn,int err)1648 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1649 {
1650 struct l2cap_chan *chan;
1651
1652 BT_DBG("conn %p", conn);
1653
1654 list_for_each_entry(chan, &conn->chan_l, list) {
1655 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1656 l2cap_chan_set_err(chan, err);
1657 }
1658 }
1659
l2cap_info_timeout(struct work_struct * work)1660 static void l2cap_info_timeout(struct work_struct *work)
1661 {
1662 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1663 info_timer.work);
1664
1665 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1666 conn->info_ident = 0;
1667
1668 mutex_lock(&conn->lock);
1669 l2cap_conn_start(conn);
1670 mutex_unlock(&conn->lock);
1671 }
1672
1673 /*
1674 * l2cap_user
1675 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1676 * callback is called during registration. The ->remove callback is called
1677 * during unregistration.
1678 * An l2cap_user object can either be explicitly unregistered or when the
1679 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1680 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1681 * External modules must own a reference to the l2cap_conn object if they intend
1682 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1683 * any time if they don't.
1684 */
1685
l2cap_register_user(struct l2cap_conn * conn,struct l2cap_user * user)1686 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1687 {
1688 struct hci_dev *hdev = conn->hcon->hdev;
1689 int ret;
1690
1691 /* We need to check whether l2cap_conn is registered. If it is not, we
1692 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1693 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1694 * relies on the parent hci_conn object to be locked. This itself relies
1695 * on the hci_dev object to be locked. So we must lock the hci device
1696 * here, too. */
1697
1698 hci_dev_lock(hdev);
1699
1700 if (!list_empty(&user->list)) {
1701 ret = -EINVAL;
1702 goto out_unlock;
1703 }
1704
1705 /* conn->hchan is NULL after l2cap_conn_del() was called */
1706 if (!conn->hchan) {
1707 ret = -ENODEV;
1708 goto out_unlock;
1709 }
1710
1711 ret = user->probe(conn, user);
1712 if (ret)
1713 goto out_unlock;
1714
1715 list_add(&user->list, &conn->users);
1716 ret = 0;
1717
1718 out_unlock:
1719 hci_dev_unlock(hdev);
1720 return ret;
1721 }
1722 EXPORT_SYMBOL(l2cap_register_user);
1723
l2cap_unregister_user(struct l2cap_conn * conn,struct l2cap_user * user)1724 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1725 {
1726 struct hci_dev *hdev = conn->hcon->hdev;
1727
1728 hci_dev_lock(hdev);
1729
1730 if (list_empty(&user->list))
1731 goto out_unlock;
1732
1733 list_del_init(&user->list);
1734 user->remove(conn, user);
1735
1736 out_unlock:
1737 hci_dev_unlock(hdev);
1738 }
1739 EXPORT_SYMBOL(l2cap_unregister_user);
1740
l2cap_unregister_all_users(struct l2cap_conn * conn)1741 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1742 {
1743 struct l2cap_user *user;
1744
1745 while (!list_empty(&conn->users)) {
1746 user = list_first_entry(&conn->users, struct l2cap_user, list);
1747 list_del_init(&user->list);
1748 user->remove(conn, user);
1749 }
1750 }
1751
l2cap_conn_del(struct hci_conn * hcon,int err)1752 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1753 {
1754 struct l2cap_conn *conn = hcon->l2cap_data;
1755 struct l2cap_chan *chan, *l;
1756
1757 if (!conn)
1758 return;
1759
1760 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1761
1762 mutex_lock(&conn->lock);
1763
1764 kfree_skb(conn->rx_skb);
1765
1766 skb_queue_purge(&conn->pending_rx);
1767
1768 /* We can not call flush_work(&conn->pending_rx_work) here since we
1769 * might block if we are running on a worker from the same workqueue
1770 * pending_rx_work is waiting on.
1771 */
1772 if (work_pending(&conn->pending_rx_work))
1773 cancel_work_sync(&conn->pending_rx_work);
1774
1775 cancel_delayed_work_sync(&conn->id_addr_timer);
1776
1777 l2cap_unregister_all_users(conn);
1778
1779 /* Force the connection to be immediately dropped */
1780 hcon->disc_timeout = 0;
1781
1782 /* Kill channels */
1783 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1784 l2cap_chan_hold(chan);
1785 l2cap_chan_lock(chan);
1786
1787 l2cap_chan_del(chan, err);
1788
1789 chan->ops->close(chan);
1790
1791 l2cap_chan_unlock(chan);
1792 l2cap_chan_put(chan);
1793 }
1794
1795 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1796 cancel_delayed_work_sync(&conn->info_timer);
1797
1798 hci_chan_del(conn->hchan);
1799 conn->hchan = NULL;
1800
1801 hcon->l2cap_data = NULL;
1802 mutex_unlock(&conn->lock);
1803 l2cap_conn_put(conn);
1804 }
1805
l2cap_conn_free(struct kref * ref)1806 static void l2cap_conn_free(struct kref *ref)
1807 {
1808 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1809
1810 hci_conn_put(conn->hcon);
1811 kfree(conn);
1812 }
1813
l2cap_conn_get(struct l2cap_conn * conn)1814 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1815 {
1816 kref_get(&conn->ref);
1817 return conn;
1818 }
1819 EXPORT_SYMBOL(l2cap_conn_get);
1820
l2cap_conn_put(struct l2cap_conn * conn)1821 void l2cap_conn_put(struct l2cap_conn *conn)
1822 {
1823 kref_put(&conn->ref, l2cap_conn_free);
1824 }
1825 EXPORT_SYMBOL(l2cap_conn_put);
1826
1827 /* ---- Socket interface ---- */
1828
1829 /* Find socket with psm and source / destination bdaddr.
1830 * Returns closest match.
1831 */
l2cap_global_chan_by_psm(int state,__le16 psm,bdaddr_t * src,bdaddr_t * dst,u8 link_type)1832 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1833 bdaddr_t *src,
1834 bdaddr_t *dst,
1835 u8 link_type)
1836 {
1837 struct l2cap_chan *c, *tmp, *c1 = NULL;
1838
1839 read_lock(&chan_list_lock);
1840
1841 list_for_each_entry_safe(c, tmp, &chan_list, global_l) {
1842 if (state && c->state != state)
1843 continue;
1844
1845 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1846 continue;
1847
1848 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1849 continue;
1850
1851 if (c->chan_type != L2CAP_CHAN_FIXED && c->psm == psm) {
1852 int src_match, dst_match;
1853 int src_any, dst_any;
1854
1855 /* Exact match. */
1856 src_match = !bacmp(&c->src, src);
1857 dst_match = !bacmp(&c->dst, dst);
1858 if (src_match && dst_match) {
1859 if (!l2cap_chan_hold_unless_zero(c))
1860 continue;
1861
1862 read_unlock(&chan_list_lock);
1863 return c;
1864 }
1865
1866 /* Closest match */
1867 src_any = !bacmp(&c->src, BDADDR_ANY);
1868 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1869 if ((src_match && dst_any) || (src_any && dst_match) ||
1870 (src_any && dst_any))
1871 c1 = c;
1872 }
1873 }
1874
1875 if (c1)
1876 c1 = l2cap_chan_hold_unless_zero(c1);
1877
1878 read_unlock(&chan_list_lock);
1879
1880 return c1;
1881 }
1882
l2cap_monitor_timeout(struct work_struct * work)1883 static void l2cap_monitor_timeout(struct work_struct *work)
1884 {
1885 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1886 monitor_timer.work);
1887
1888 BT_DBG("chan %p", chan);
1889
1890 l2cap_chan_lock(chan);
1891
1892 if (!chan->conn) {
1893 l2cap_chan_unlock(chan);
1894 l2cap_chan_put(chan);
1895 return;
1896 }
1897
1898 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1899
1900 l2cap_chan_unlock(chan);
1901 l2cap_chan_put(chan);
1902 }
1903
l2cap_retrans_timeout(struct work_struct * work)1904 static void l2cap_retrans_timeout(struct work_struct *work)
1905 {
1906 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1907 retrans_timer.work);
1908
1909 BT_DBG("chan %p", chan);
1910
1911 l2cap_chan_lock(chan);
1912
1913 if (!chan->conn) {
1914 l2cap_chan_unlock(chan);
1915 l2cap_chan_put(chan);
1916 return;
1917 }
1918
1919 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1920 l2cap_chan_unlock(chan);
1921 l2cap_chan_put(chan);
1922 }
1923
l2cap_streaming_send(struct l2cap_chan * chan,struct sk_buff_head * skbs)1924 static void l2cap_streaming_send(struct l2cap_chan *chan,
1925 struct sk_buff_head *skbs)
1926 {
1927 struct sk_buff *skb;
1928 struct l2cap_ctrl *control;
1929
1930 BT_DBG("chan %p, skbs %p", chan, skbs);
1931
1932 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1933
1934 while (!skb_queue_empty(&chan->tx_q)) {
1935
1936 skb = skb_dequeue(&chan->tx_q);
1937
1938 bt_cb(skb)->l2cap.retries = 1;
1939 control = &bt_cb(skb)->l2cap;
1940
1941 control->reqseq = 0;
1942 control->txseq = chan->next_tx_seq;
1943
1944 __pack_control(chan, control, skb);
1945
1946 if (chan->fcs == L2CAP_FCS_CRC16) {
1947 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1948 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1949 }
1950
1951 l2cap_do_send(chan, skb);
1952
1953 BT_DBG("Sent txseq %u", control->txseq);
1954
1955 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1956 chan->frames_sent++;
1957 }
1958 }
1959
l2cap_ertm_send(struct l2cap_chan * chan)1960 static int l2cap_ertm_send(struct l2cap_chan *chan)
1961 {
1962 struct sk_buff *skb, *tx_skb;
1963 struct l2cap_ctrl *control;
1964 int sent = 0;
1965
1966 BT_DBG("chan %p", chan);
1967
1968 if (chan->state != BT_CONNECTED)
1969 return -ENOTCONN;
1970
1971 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1972 return 0;
1973
1974 while (chan->tx_send_head &&
1975 chan->unacked_frames < chan->remote_tx_win &&
1976 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1977
1978 skb = chan->tx_send_head;
1979
1980 bt_cb(skb)->l2cap.retries = 1;
1981 control = &bt_cb(skb)->l2cap;
1982
1983 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1984 control->final = 1;
1985
1986 control->reqseq = chan->buffer_seq;
1987 chan->last_acked_seq = chan->buffer_seq;
1988 control->txseq = chan->next_tx_seq;
1989
1990 __pack_control(chan, control, skb);
1991
1992 if (chan->fcs == L2CAP_FCS_CRC16) {
1993 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1994 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1995 }
1996
1997 /* Clone after data has been modified. Data is assumed to be
1998 read-only (for locking purposes) on cloned sk_buffs.
1999 */
2000 tx_skb = skb_clone(skb, GFP_KERNEL);
2001
2002 if (!tx_skb)
2003 break;
2004
2005 __set_retrans_timer(chan);
2006
2007 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2008 chan->unacked_frames++;
2009 chan->frames_sent++;
2010 sent++;
2011
2012 if (skb_queue_is_last(&chan->tx_q, skb))
2013 chan->tx_send_head = NULL;
2014 else
2015 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2016
2017 l2cap_do_send(chan, tx_skb);
2018 BT_DBG("Sent txseq %u", control->txseq);
2019 }
2020
2021 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2022 chan->unacked_frames, skb_queue_len(&chan->tx_q));
2023
2024 return sent;
2025 }
2026
l2cap_ertm_resend(struct l2cap_chan * chan)2027 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2028 {
2029 struct l2cap_ctrl control;
2030 struct sk_buff *skb;
2031 struct sk_buff *tx_skb;
2032 u16 seq;
2033
2034 BT_DBG("chan %p", chan);
2035
2036 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2037 return;
2038
2039 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2040 seq = l2cap_seq_list_pop(&chan->retrans_list);
2041
2042 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2043 if (!skb) {
2044 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2045 seq);
2046 continue;
2047 }
2048
2049 bt_cb(skb)->l2cap.retries++;
2050 control = bt_cb(skb)->l2cap;
2051
2052 if (chan->max_tx != 0 &&
2053 bt_cb(skb)->l2cap.retries > chan->max_tx) {
2054 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2055 l2cap_send_disconn_req(chan, ECONNRESET);
2056 l2cap_seq_list_clear(&chan->retrans_list);
2057 break;
2058 }
2059
2060 control.reqseq = chan->buffer_seq;
2061 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2062 control.final = 1;
2063 else
2064 control.final = 0;
2065
2066 if (skb_cloned(skb)) {
2067 /* Cloned sk_buffs are read-only, so we need a
2068 * writeable copy
2069 */
2070 tx_skb = skb_copy(skb, GFP_KERNEL);
2071 } else {
2072 tx_skb = skb_clone(skb, GFP_KERNEL);
2073 }
2074
2075 if (!tx_skb) {
2076 l2cap_seq_list_clear(&chan->retrans_list);
2077 break;
2078 }
2079
2080 /* Update skb contents */
2081 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2082 put_unaligned_le32(__pack_extended_control(&control),
2083 tx_skb->data + L2CAP_HDR_SIZE);
2084 } else {
2085 put_unaligned_le16(__pack_enhanced_control(&control),
2086 tx_skb->data + L2CAP_HDR_SIZE);
2087 }
2088
2089 /* Update FCS */
2090 if (chan->fcs == L2CAP_FCS_CRC16) {
2091 u16 fcs = crc16(0, (u8 *) tx_skb->data,
2092 tx_skb->len - L2CAP_FCS_SIZE);
2093 put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2094 L2CAP_FCS_SIZE);
2095 }
2096
2097 l2cap_do_send(chan, tx_skb);
2098
2099 BT_DBG("Resent txseq %d", control.txseq);
2100
2101 chan->last_acked_seq = chan->buffer_seq;
2102 }
2103 }
2104
l2cap_retransmit(struct l2cap_chan * chan,struct l2cap_ctrl * control)2105 static void l2cap_retransmit(struct l2cap_chan *chan,
2106 struct l2cap_ctrl *control)
2107 {
2108 BT_DBG("chan %p, control %p", chan, control);
2109
2110 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2111 l2cap_ertm_resend(chan);
2112 }
2113
l2cap_retransmit_all(struct l2cap_chan * chan,struct l2cap_ctrl * control)2114 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2115 struct l2cap_ctrl *control)
2116 {
2117 struct sk_buff *skb;
2118
2119 BT_DBG("chan %p, control %p", chan, control);
2120
2121 if (control->poll)
2122 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2123
2124 l2cap_seq_list_clear(&chan->retrans_list);
2125
2126 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2127 return;
2128
2129 if (chan->unacked_frames) {
2130 skb_queue_walk(&chan->tx_q, skb) {
2131 if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2132 skb == chan->tx_send_head)
2133 break;
2134 }
2135
2136 skb_queue_walk_from(&chan->tx_q, skb) {
2137 if (skb == chan->tx_send_head)
2138 break;
2139
2140 l2cap_seq_list_append(&chan->retrans_list,
2141 bt_cb(skb)->l2cap.txseq);
2142 }
2143
2144 l2cap_ertm_resend(chan);
2145 }
2146 }
2147
l2cap_send_ack(struct l2cap_chan * chan)2148 static void l2cap_send_ack(struct l2cap_chan *chan)
2149 {
2150 struct l2cap_ctrl control;
2151 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2152 chan->last_acked_seq);
2153 int threshold;
2154
2155 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2156 chan, chan->last_acked_seq, chan->buffer_seq);
2157
2158 memset(&control, 0, sizeof(control));
2159 control.sframe = 1;
2160
2161 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2162 chan->rx_state == L2CAP_RX_STATE_RECV) {
2163 __clear_ack_timer(chan);
2164 control.super = L2CAP_SUPER_RNR;
2165 control.reqseq = chan->buffer_seq;
2166 l2cap_send_sframe(chan, &control);
2167 } else {
2168 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2169 l2cap_ertm_send(chan);
2170 /* If any i-frames were sent, they included an ack */
2171 if (chan->buffer_seq == chan->last_acked_seq)
2172 frames_to_ack = 0;
2173 }
2174
2175 /* Ack now if the window is 3/4ths full.
2176 * Calculate without mul or div
2177 */
2178 threshold = chan->ack_win;
2179 threshold += threshold << 1;
2180 threshold >>= 2;
2181
2182 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2183 threshold);
2184
2185 if (frames_to_ack >= threshold) {
2186 __clear_ack_timer(chan);
2187 control.super = L2CAP_SUPER_RR;
2188 control.reqseq = chan->buffer_seq;
2189 l2cap_send_sframe(chan, &control);
2190 frames_to_ack = 0;
2191 }
2192
2193 if (frames_to_ack)
2194 __set_ack_timer(chan);
2195 }
2196 }
2197
l2cap_skbuff_fromiovec(struct l2cap_chan * chan,struct msghdr * msg,int len,int count,struct sk_buff * skb)2198 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2199 struct msghdr *msg, int len,
2200 int count, struct sk_buff *skb)
2201 {
2202 struct l2cap_conn *conn = chan->conn;
2203 struct sk_buff **frag;
2204 int sent = 0;
2205
2206 if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2207 return -EFAULT;
2208
2209 sent += count;
2210 len -= count;
2211
2212 /* Continuation fragments (no L2CAP header) */
2213 frag = &skb_shinfo(skb)->frag_list;
2214 while (len) {
2215 struct sk_buff *tmp;
2216
2217 count = min_t(unsigned int, conn->mtu, len);
2218
2219 tmp = chan->ops->alloc_skb(chan, 0, count,
2220 msg->msg_flags & MSG_DONTWAIT);
2221 if (IS_ERR(tmp))
2222 return PTR_ERR(tmp);
2223
2224 *frag = tmp;
2225
2226 if (!copy_from_iter_full(skb_put(*frag, count), count,
2227 &msg->msg_iter))
2228 return -EFAULT;
2229
2230 sent += count;
2231 len -= count;
2232
2233 skb->len += (*frag)->len;
2234 skb->data_len += (*frag)->len;
2235
2236 frag = &(*frag)->next;
2237 }
2238
2239 return sent;
2240 }
2241
l2cap_create_connless_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2242 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2243 struct msghdr *msg, size_t len)
2244 {
2245 struct l2cap_conn *conn = chan->conn;
2246 struct sk_buff *skb;
2247 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2248 struct l2cap_hdr *lh;
2249
2250 BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2251 __le16_to_cpu(chan->psm), len);
2252
2253 count = min_t(unsigned int, (conn->mtu - hlen), len);
2254
2255 skb = chan->ops->alloc_skb(chan, hlen, count,
2256 msg->msg_flags & MSG_DONTWAIT);
2257 if (IS_ERR(skb))
2258 return skb;
2259
2260 /* Create L2CAP header */
2261 lh = skb_put(skb, L2CAP_HDR_SIZE);
2262 lh->cid = cpu_to_le16(chan->dcid);
2263 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2264 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2265
2266 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2267 if (unlikely(err < 0)) {
2268 kfree_skb(skb);
2269 return ERR_PTR(err);
2270 }
2271 return skb;
2272 }
2273
l2cap_create_basic_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2274 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2275 struct msghdr *msg, size_t len)
2276 {
2277 struct l2cap_conn *conn = chan->conn;
2278 struct sk_buff *skb;
2279 int err, count;
2280 struct l2cap_hdr *lh;
2281
2282 BT_DBG("chan %p len %zu", chan, len);
2283
2284 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2285
2286 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2287 msg->msg_flags & MSG_DONTWAIT);
2288 if (IS_ERR(skb))
2289 return skb;
2290
2291 /* Create L2CAP header */
2292 lh = skb_put(skb, L2CAP_HDR_SIZE);
2293 lh->cid = cpu_to_le16(chan->dcid);
2294 lh->len = cpu_to_le16(len);
2295
2296 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2297 if (unlikely(err < 0)) {
2298 kfree_skb(skb);
2299 return ERR_PTR(err);
2300 }
2301 return skb;
2302 }
2303
l2cap_create_iframe_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len,u16 sdulen)2304 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2305 struct msghdr *msg, size_t len,
2306 u16 sdulen)
2307 {
2308 struct l2cap_conn *conn = chan->conn;
2309 struct sk_buff *skb;
2310 int err, count, hlen;
2311 struct l2cap_hdr *lh;
2312
2313 BT_DBG("chan %p len %zu", chan, len);
2314
2315 if (!conn)
2316 return ERR_PTR(-ENOTCONN);
2317
2318 hlen = __ertm_hdr_size(chan);
2319
2320 if (sdulen)
2321 hlen += L2CAP_SDULEN_SIZE;
2322
2323 if (chan->fcs == L2CAP_FCS_CRC16)
2324 hlen += L2CAP_FCS_SIZE;
2325
2326 count = min_t(unsigned int, (conn->mtu - hlen), len);
2327
2328 skb = chan->ops->alloc_skb(chan, hlen, count,
2329 msg->msg_flags & MSG_DONTWAIT);
2330 if (IS_ERR(skb))
2331 return skb;
2332
2333 /* Create L2CAP header */
2334 lh = skb_put(skb, L2CAP_HDR_SIZE);
2335 lh->cid = cpu_to_le16(chan->dcid);
2336 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2337
2338 /* Control header is populated later */
2339 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2340 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2341 else
2342 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2343
2344 if (sdulen)
2345 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2346
2347 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2348 if (unlikely(err < 0)) {
2349 kfree_skb(skb);
2350 return ERR_PTR(err);
2351 }
2352
2353 bt_cb(skb)->l2cap.fcs = chan->fcs;
2354 bt_cb(skb)->l2cap.retries = 0;
2355 return skb;
2356 }
2357
l2cap_segment_sdu(struct l2cap_chan * chan,struct sk_buff_head * seg_queue,struct msghdr * msg,size_t len)2358 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2359 struct sk_buff_head *seg_queue,
2360 struct msghdr *msg, size_t len)
2361 {
2362 struct sk_buff *skb;
2363 u16 sdu_len;
2364 size_t pdu_len;
2365 u8 sar;
2366
2367 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2368
2369 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2370 * so fragmented skbs are not used. The HCI layer's handling
2371 * of fragmented skbs is not compatible with ERTM's queueing.
2372 */
2373
2374 /* PDU size is derived from the HCI MTU */
2375 pdu_len = chan->conn->mtu;
2376
2377 /* Constrain PDU size for BR/EDR connections */
2378 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2379
2380 /* Adjust for largest possible L2CAP overhead. */
2381 if (chan->fcs)
2382 pdu_len -= L2CAP_FCS_SIZE;
2383
2384 pdu_len -= __ertm_hdr_size(chan);
2385
2386 /* Remote device may have requested smaller PDUs */
2387 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2388
2389 if (len <= pdu_len) {
2390 sar = L2CAP_SAR_UNSEGMENTED;
2391 sdu_len = 0;
2392 pdu_len = len;
2393 } else {
2394 sar = L2CAP_SAR_START;
2395 sdu_len = len;
2396 }
2397
2398 while (len > 0) {
2399 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2400
2401 if (IS_ERR(skb)) {
2402 __skb_queue_purge(seg_queue);
2403 return PTR_ERR(skb);
2404 }
2405
2406 bt_cb(skb)->l2cap.sar = sar;
2407 __skb_queue_tail(seg_queue, skb);
2408
2409 len -= pdu_len;
2410 if (sdu_len)
2411 sdu_len = 0;
2412
2413 if (len <= pdu_len) {
2414 sar = L2CAP_SAR_END;
2415 pdu_len = len;
2416 } else {
2417 sar = L2CAP_SAR_CONTINUE;
2418 }
2419 }
2420
2421 return 0;
2422 }
2423
l2cap_create_le_flowctl_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len,u16 sdulen)2424 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2425 struct msghdr *msg,
2426 size_t len, u16 sdulen)
2427 {
2428 struct l2cap_conn *conn = chan->conn;
2429 struct sk_buff *skb;
2430 int err, count, hlen;
2431 struct l2cap_hdr *lh;
2432
2433 BT_DBG("chan %p len %zu", chan, len);
2434
2435 if (!conn)
2436 return ERR_PTR(-ENOTCONN);
2437
2438 hlen = L2CAP_HDR_SIZE;
2439
2440 if (sdulen)
2441 hlen += L2CAP_SDULEN_SIZE;
2442
2443 count = min_t(unsigned int, (conn->mtu - hlen), len);
2444
2445 skb = chan->ops->alloc_skb(chan, hlen, count,
2446 msg->msg_flags & MSG_DONTWAIT);
2447 if (IS_ERR(skb))
2448 return skb;
2449
2450 /* Create L2CAP header */
2451 lh = skb_put(skb, L2CAP_HDR_SIZE);
2452 lh->cid = cpu_to_le16(chan->dcid);
2453 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2454
2455 if (sdulen)
2456 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2457
2458 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2459 if (unlikely(err < 0)) {
2460 kfree_skb(skb);
2461 return ERR_PTR(err);
2462 }
2463
2464 return skb;
2465 }
2466
l2cap_segment_le_sdu(struct l2cap_chan * chan,struct sk_buff_head * seg_queue,struct msghdr * msg,size_t len)2467 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2468 struct sk_buff_head *seg_queue,
2469 struct msghdr *msg, size_t len)
2470 {
2471 struct sk_buff *skb;
2472 size_t pdu_len;
2473 u16 sdu_len;
2474
2475 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2476
2477 sdu_len = len;
2478 pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2479
2480 while (len > 0) {
2481 if (len <= pdu_len)
2482 pdu_len = len;
2483
2484 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2485 if (IS_ERR(skb)) {
2486 __skb_queue_purge(seg_queue);
2487 return PTR_ERR(skb);
2488 }
2489
2490 __skb_queue_tail(seg_queue, skb);
2491
2492 len -= pdu_len;
2493
2494 if (sdu_len) {
2495 sdu_len = 0;
2496 pdu_len += L2CAP_SDULEN_SIZE;
2497 }
2498 }
2499
2500 return 0;
2501 }
2502
l2cap_le_flowctl_send(struct l2cap_chan * chan)2503 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2504 {
2505 int sent = 0;
2506
2507 BT_DBG("chan %p", chan);
2508
2509 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2510 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2511 chan->tx_credits--;
2512 sent++;
2513 }
2514
2515 BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2516 skb_queue_len(&chan->tx_q));
2517 }
2518
l2cap_tx_timestamp(struct sk_buff * skb,const struct sockcm_cookie * sockc,size_t len)2519 static void l2cap_tx_timestamp(struct sk_buff *skb,
2520 const struct sockcm_cookie *sockc,
2521 size_t len)
2522 {
2523 struct sock *sk = skb ? skb->sk : NULL;
2524
2525 if (sk && sk->sk_type == SOCK_STREAM)
2526 hci_setup_tx_timestamp(skb, len, sockc);
2527 else
2528 hci_setup_tx_timestamp(skb, 1, sockc);
2529 }
2530
l2cap_tx_timestamp_seg(struct sk_buff_head * queue,const struct sockcm_cookie * sockc,size_t len)2531 static void l2cap_tx_timestamp_seg(struct sk_buff_head *queue,
2532 const struct sockcm_cookie *sockc,
2533 size_t len)
2534 {
2535 struct sk_buff *skb = skb_peek(queue);
2536 struct sock *sk = skb ? skb->sk : NULL;
2537
2538 if (sk && sk->sk_type == SOCK_STREAM)
2539 l2cap_tx_timestamp(skb_peek_tail(queue), sockc, len);
2540 else
2541 l2cap_tx_timestamp(skb, sockc, len);
2542 }
2543
l2cap_chan_send(struct l2cap_chan * chan,struct msghdr * msg,size_t len,const struct sockcm_cookie * sockc)2544 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2545 const struct sockcm_cookie *sockc)
2546 {
2547 struct sk_buff *skb;
2548 int err;
2549 struct sk_buff_head seg_queue;
2550
2551 if (!chan->conn)
2552 return -ENOTCONN;
2553
2554 /* Connectionless channel */
2555 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2556 skb = l2cap_create_connless_pdu(chan, msg, len);
2557 if (IS_ERR(skb))
2558 return PTR_ERR(skb);
2559
2560 l2cap_tx_timestamp(skb, sockc, len);
2561
2562 l2cap_do_send(chan, skb);
2563 return len;
2564 }
2565
2566 switch (chan->mode) {
2567 case L2CAP_MODE_LE_FLOWCTL:
2568 case L2CAP_MODE_EXT_FLOWCTL:
2569 /* Check outgoing MTU */
2570 if (len > chan->omtu)
2571 return -EMSGSIZE;
2572
2573 __skb_queue_head_init(&seg_queue);
2574
2575 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2576
2577 if (chan->state != BT_CONNECTED) {
2578 __skb_queue_purge(&seg_queue);
2579 err = -ENOTCONN;
2580 }
2581
2582 if (err)
2583 return err;
2584
2585 l2cap_tx_timestamp_seg(&seg_queue, sockc, len);
2586
2587 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2588
2589 l2cap_le_flowctl_send(chan);
2590
2591 if (!chan->tx_credits)
2592 chan->ops->suspend(chan);
2593
2594 err = len;
2595
2596 break;
2597
2598 case L2CAP_MODE_BASIC:
2599 /* Check outgoing MTU */
2600 if (len > chan->omtu)
2601 return -EMSGSIZE;
2602
2603 /* Create a basic PDU */
2604 skb = l2cap_create_basic_pdu(chan, msg, len);
2605 if (IS_ERR(skb))
2606 return PTR_ERR(skb);
2607
2608 l2cap_tx_timestamp(skb, sockc, len);
2609
2610 l2cap_do_send(chan, skb);
2611 err = len;
2612 break;
2613
2614 case L2CAP_MODE_ERTM:
2615 case L2CAP_MODE_STREAMING:
2616 /* Check outgoing MTU */
2617 if (len > chan->omtu) {
2618 err = -EMSGSIZE;
2619 break;
2620 }
2621
2622 __skb_queue_head_init(&seg_queue);
2623
2624 /* Do segmentation before calling in to the state machine,
2625 * since it's possible to block while waiting for memory
2626 * allocation.
2627 */
2628 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2629
2630 if (err)
2631 break;
2632
2633 if (chan->mode == L2CAP_MODE_ERTM) {
2634 /* TODO: ERTM mode timestamping */
2635 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2636 } else {
2637 l2cap_tx_timestamp_seg(&seg_queue, sockc, len);
2638 l2cap_streaming_send(chan, &seg_queue);
2639 }
2640
2641 err = len;
2642
2643 /* If the skbs were not queued for sending, they'll still be in
2644 * seg_queue and need to be purged.
2645 */
2646 __skb_queue_purge(&seg_queue);
2647 break;
2648
2649 default:
2650 BT_DBG("bad state %1.1x", chan->mode);
2651 err = -EBADFD;
2652 }
2653
2654 return err;
2655 }
2656 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2657
l2cap_send_srej(struct l2cap_chan * chan,u16 txseq)2658 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2659 {
2660 struct l2cap_ctrl control;
2661 u16 seq;
2662
2663 BT_DBG("chan %p, txseq %u", chan, txseq);
2664
2665 memset(&control, 0, sizeof(control));
2666 control.sframe = 1;
2667 control.super = L2CAP_SUPER_SREJ;
2668
2669 for (seq = chan->expected_tx_seq; seq != txseq;
2670 seq = __next_seq(chan, seq)) {
2671 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2672 control.reqseq = seq;
2673 l2cap_send_sframe(chan, &control);
2674 l2cap_seq_list_append(&chan->srej_list, seq);
2675 }
2676 }
2677
2678 chan->expected_tx_seq = __next_seq(chan, txseq);
2679 }
2680
l2cap_send_srej_tail(struct l2cap_chan * chan)2681 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2682 {
2683 struct l2cap_ctrl control;
2684
2685 BT_DBG("chan %p", chan);
2686
2687 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2688 return;
2689
2690 memset(&control, 0, sizeof(control));
2691 control.sframe = 1;
2692 control.super = L2CAP_SUPER_SREJ;
2693 control.reqseq = chan->srej_list.tail;
2694 l2cap_send_sframe(chan, &control);
2695 }
2696
l2cap_send_srej_list(struct l2cap_chan * chan,u16 txseq)2697 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2698 {
2699 struct l2cap_ctrl control;
2700 u16 initial_head;
2701 u16 seq;
2702
2703 BT_DBG("chan %p, txseq %u", chan, txseq);
2704
2705 memset(&control, 0, sizeof(control));
2706 control.sframe = 1;
2707 control.super = L2CAP_SUPER_SREJ;
2708
2709 /* Capture initial list head to allow only one pass through the list. */
2710 initial_head = chan->srej_list.head;
2711
2712 do {
2713 seq = l2cap_seq_list_pop(&chan->srej_list);
2714 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2715 break;
2716
2717 control.reqseq = seq;
2718 l2cap_send_sframe(chan, &control);
2719 l2cap_seq_list_append(&chan->srej_list, seq);
2720 } while (chan->srej_list.head != initial_head);
2721 }
2722
l2cap_process_reqseq(struct l2cap_chan * chan,u16 reqseq)2723 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2724 {
2725 struct sk_buff *acked_skb;
2726 u16 ackseq;
2727
2728 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2729
2730 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2731 return;
2732
2733 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2734 chan->expected_ack_seq, chan->unacked_frames);
2735
2736 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2737 ackseq = __next_seq(chan, ackseq)) {
2738
2739 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2740 if (acked_skb) {
2741 skb_unlink(acked_skb, &chan->tx_q);
2742 kfree_skb(acked_skb);
2743 chan->unacked_frames--;
2744 }
2745 }
2746
2747 chan->expected_ack_seq = reqseq;
2748
2749 if (chan->unacked_frames == 0)
2750 __clear_retrans_timer(chan);
2751
2752 BT_DBG("unacked_frames %u", chan->unacked_frames);
2753 }
2754
l2cap_abort_rx_srej_sent(struct l2cap_chan * chan)2755 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2756 {
2757 BT_DBG("chan %p", chan);
2758
2759 chan->expected_tx_seq = chan->buffer_seq;
2760 l2cap_seq_list_clear(&chan->srej_list);
2761 skb_queue_purge(&chan->srej_q);
2762 chan->rx_state = L2CAP_RX_STATE_RECV;
2763 }
2764
l2cap_tx_state_xmit(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2765 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2766 struct l2cap_ctrl *control,
2767 struct sk_buff_head *skbs, u8 event)
2768 {
2769 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2770 event);
2771
2772 switch (event) {
2773 case L2CAP_EV_DATA_REQUEST:
2774 if (chan->tx_send_head == NULL)
2775 chan->tx_send_head = skb_peek(skbs);
2776
2777 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2778 l2cap_ertm_send(chan);
2779 break;
2780 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2781 BT_DBG("Enter LOCAL_BUSY");
2782 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2783
2784 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2785 /* The SREJ_SENT state must be aborted if we are to
2786 * enter the LOCAL_BUSY state.
2787 */
2788 l2cap_abort_rx_srej_sent(chan);
2789 }
2790
2791 l2cap_send_ack(chan);
2792
2793 break;
2794 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2795 BT_DBG("Exit LOCAL_BUSY");
2796 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2797
2798 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2799 struct l2cap_ctrl local_control;
2800
2801 memset(&local_control, 0, sizeof(local_control));
2802 local_control.sframe = 1;
2803 local_control.super = L2CAP_SUPER_RR;
2804 local_control.poll = 1;
2805 local_control.reqseq = chan->buffer_seq;
2806 l2cap_send_sframe(chan, &local_control);
2807
2808 chan->retry_count = 1;
2809 __set_monitor_timer(chan);
2810 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2811 }
2812 break;
2813 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2814 l2cap_process_reqseq(chan, control->reqseq);
2815 break;
2816 case L2CAP_EV_EXPLICIT_POLL:
2817 l2cap_send_rr_or_rnr(chan, 1);
2818 chan->retry_count = 1;
2819 __set_monitor_timer(chan);
2820 __clear_ack_timer(chan);
2821 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2822 break;
2823 case L2CAP_EV_RETRANS_TO:
2824 l2cap_send_rr_or_rnr(chan, 1);
2825 chan->retry_count = 1;
2826 __set_monitor_timer(chan);
2827 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2828 break;
2829 case L2CAP_EV_RECV_FBIT:
2830 /* Nothing to process */
2831 break;
2832 default:
2833 break;
2834 }
2835 }
2836
l2cap_tx_state_wait_f(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2837 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2838 struct l2cap_ctrl *control,
2839 struct sk_buff_head *skbs, u8 event)
2840 {
2841 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2842 event);
2843
2844 switch (event) {
2845 case L2CAP_EV_DATA_REQUEST:
2846 if (chan->tx_send_head == NULL)
2847 chan->tx_send_head = skb_peek(skbs);
2848 /* Queue data, but don't send. */
2849 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2850 break;
2851 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2852 BT_DBG("Enter LOCAL_BUSY");
2853 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2854
2855 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2856 /* The SREJ_SENT state must be aborted if we are to
2857 * enter the LOCAL_BUSY state.
2858 */
2859 l2cap_abort_rx_srej_sent(chan);
2860 }
2861
2862 l2cap_send_ack(chan);
2863
2864 break;
2865 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2866 BT_DBG("Exit LOCAL_BUSY");
2867 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2868
2869 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2870 struct l2cap_ctrl local_control;
2871 memset(&local_control, 0, sizeof(local_control));
2872 local_control.sframe = 1;
2873 local_control.super = L2CAP_SUPER_RR;
2874 local_control.poll = 1;
2875 local_control.reqseq = chan->buffer_seq;
2876 l2cap_send_sframe(chan, &local_control);
2877
2878 chan->retry_count = 1;
2879 __set_monitor_timer(chan);
2880 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2881 }
2882 break;
2883 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2884 l2cap_process_reqseq(chan, control->reqseq);
2885 fallthrough;
2886
2887 case L2CAP_EV_RECV_FBIT:
2888 if (control && control->final) {
2889 __clear_monitor_timer(chan);
2890 if (chan->unacked_frames > 0)
2891 __set_retrans_timer(chan);
2892 chan->retry_count = 0;
2893 chan->tx_state = L2CAP_TX_STATE_XMIT;
2894 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2895 }
2896 break;
2897 case L2CAP_EV_EXPLICIT_POLL:
2898 /* Ignore */
2899 break;
2900 case L2CAP_EV_MONITOR_TO:
2901 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2902 l2cap_send_rr_or_rnr(chan, 1);
2903 __set_monitor_timer(chan);
2904 chan->retry_count++;
2905 } else {
2906 l2cap_send_disconn_req(chan, ECONNABORTED);
2907 }
2908 break;
2909 default:
2910 break;
2911 }
2912 }
2913
l2cap_tx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2914 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2915 struct sk_buff_head *skbs, u8 event)
2916 {
2917 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2918 chan, control, skbs, event, chan->tx_state);
2919
2920 switch (chan->tx_state) {
2921 case L2CAP_TX_STATE_XMIT:
2922 l2cap_tx_state_xmit(chan, control, skbs, event);
2923 break;
2924 case L2CAP_TX_STATE_WAIT_F:
2925 l2cap_tx_state_wait_f(chan, control, skbs, event);
2926 break;
2927 default:
2928 /* Ignore event */
2929 break;
2930 }
2931 }
2932
l2cap_pass_to_tx(struct l2cap_chan * chan,struct l2cap_ctrl * control)2933 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2934 struct l2cap_ctrl *control)
2935 {
2936 BT_DBG("chan %p, control %p", chan, control);
2937 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2938 }
2939
l2cap_pass_to_tx_fbit(struct l2cap_chan * chan,struct l2cap_ctrl * control)2940 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2941 struct l2cap_ctrl *control)
2942 {
2943 BT_DBG("chan %p, control %p", chan, control);
2944 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2945 }
2946
2947 /* Copy frame to all raw sockets on that connection */
l2cap_raw_recv(struct l2cap_conn * conn,struct sk_buff * skb)2948 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2949 {
2950 struct sk_buff *nskb;
2951 struct l2cap_chan *chan;
2952
2953 BT_DBG("conn %p", conn);
2954
2955 list_for_each_entry(chan, &conn->chan_l, list) {
2956 if (chan->chan_type != L2CAP_CHAN_RAW)
2957 continue;
2958
2959 /* Don't send frame to the channel it came from */
2960 if (bt_cb(skb)->l2cap.chan == chan)
2961 continue;
2962
2963 nskb = skb_clone(skb, GFP_KERNEL);
2964 if (!nskb)
2965 continue;
2966 if (chan->ops->recv(chan, nskb))
2967 kfree_skb(nskb);
2968 }
2969 }
2970
2971 /* ---- L2CAP signalling commands ---- */
l2cap_build_cmd(struct l2cap_conn * conn,u8 code,u8 ident,u16 dlen,void * data)2972 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2973 u8 ident, u16 dlen, void *data)
2974 {
2975 struct sk_buff *skb, **frag;
2976 struct l2cap_cmd_hdr *cmd;
2977 struct l2cap_hdr *lh;
2978 int len, count;
2979
2980 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2981 conn, code, ident, dlen);
2982
2983 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2984 return NULL;
2985
2986 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2987 count = min_t(unsigned int, conn->mtu, len);
2988
2989 skb = bt_skb_alloc(count, GFP_KERNEL);
2990 if (!skb)
2991 return NULL;
2992
2993 lh = skb_put(skb, L2CAP_HDR_SIZE);
2994 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2995
2996 if (conn->hcon->type == LE_LINK)
2997 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2998 else
2999 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
3000
3001 cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
3002 cmd->code = code;
3003 cmd->ident = ident;
3004 cmd->len = cpu_to_le16(dlen);
3005
3006 if (dlen) {
3007 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
3008 skb_put_data(skb, data, count);
3009 data += count;
3010 }
3011
3012 len -= skb->len;
3013
3014 /* Continuation fragments (no L2CAP header) */
3015 frag = &skb_shinfo(skb)->frag_list;
3016 while (len) {
3017 count = min_t(unsigned int, conn->mtu, len);
3018
3019 *frag = bt_skb_alloc(count, GFP_KERNEL);
3020 if (!*frag)
3021 goto fail;
3022
3023 skb_put_data(*frag, data, count);
3024
3025 len -= count;
3026 data += count;
3027
3028 frag = &(*frag)->next;
3029 }
3030
3031 return skb;
3032
3033 fail:
3034 kfree_skb(skb);
3035 return NULL;
3036 }
3037
l2cap_get_conf_opt(void ** ptr,int * type,int * olen,unsigned long * val)3038 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
3039 unsigned long *val)
3040 {
3041 struct l2cap_conf_opt *opt = *ptr;
3042 int len;
3043
3044 len = L2CAP_CONF_OPT_SIZE + opt->len;
3045 *ptr += len;
3046
3047 *type = opt->type;
3048 *olen = opt->len;
3049
3050 switch (opt->len) {
3051 case 1:
3052 *val = *((u8 *) opt->val);
3053 break;
3054
3055 case 2:
3056 *val = get_unaligned_le16(opt->val);
3057 break;
3058
3059 case 4:
3060 *val = get_unaligned_le32(opt->val);
3061 break;
3062
3063 default:
3064 *val = (unsigned long) opt->val;
3065 break;
3066 }
3067
3068 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3069 return len;
3070 }
3071
l2cap_add_conf_opt(void ** ptr,u8 type,u8 len,unsigned long val,size_t size)3072 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3073 {
3074 struct l2cap_conf_opt *opt = *ptr;
3075
3076 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3077
3078 if (size < L2CAP_CONF_OPT_SIZE + len)
3079 return;
3080
3081 opt->type = type;
3082 opt->len = len;
3083
3084 switch (len) {
3085 case 1:
3086 *((u8 *) opt->val) = val;
3087 break;
3088
3089 case 2:
3090 put_unaligned_le16(val, opt->val);
3091 break;
3092
3093 case 4:
3094 put_unaligned_le32(val, opt->val);
3095 break;
3096
3097 default:
3098 memcpy(opt->val, (void *) val, len);
3099 break;
3100 }
3101
3102 *ptr += L2CAP_CONF_OPT_SIZE + len;
3103 }
3104
l2cap_add_opt_efs(void ** ptr,struct l2cap_chan * chan,size_t size)3105 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3106 {
3107 struct l2cap_conf_efs efs;
3108
3109 switch (chan->mode) {
3110 case L2CAP_MODE_ERTM:
3111 efs.id = chan->local_id;
3112 efs.stype = chan->local_stype;
3113 efs.msdu = cpu_to_le16(chan->local_msdu);
3114 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3115 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3116 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3117 break;
3118
3119 case L2CAP_MODE_STREAMING:
3120 efs.id = 1;
3121 efs.stype = L2CAP_SERV_BESTEFFORT;
3122 efs.msdu = cpu_to_le16(chan->local_msdu);
3123 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3124 efs.acc_lat = 0;
3125 efs.flush_to = 0;
3126 break;
3127
3128 default:
3129 return;
3130 }
3131
3132 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3133 (unsigned long) &efs, size);
3134 }
3135
l2cap_ack_timeout(struct work_struct * work)3136 static void l2cap_ack_timeout(struct work_struct *work)
3137 {
3138 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3139 ack_timer.work);
3140 u16 frames_to_ack;
3141
3142 BT_DBG("chan %p", chan);
3143
3144 l2cap_chan_lock(chan);
3145
3146 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3147 chan->last_acked_seq);
3148
3149 if (frames_to_ack)
3150 l2cap_send_rr_or_rnr(chan, 0);
3151
3152 l2cap_chan_unlock(chan);
3153 l2cap_chan_put(chan);
3154 }
3155
l2cap_ertm_init(struct l2cap_chan * chan)3156 int l2cap_ertm_init(struct l2cap_chan *chan)
3157 {
3158 int err;
3159
3160 chan->next_tx_seq = 0;
3161 chan->expected_tx_seq = 0;
3162 chan->expected_ack_seq = 0;
3163 chan->unacked_frames = 0;
3164 chan->buffer_seq = 0;
3165 chan->frames_sent = 0;
3166 chan->last_acked_seq = 0;
3167 chan->sdu = NULL;
3168 chan->sdu_last_frag = NULL;
3169 chan->sdu_len = 0;
3170
3171 skb_queue_head_init(&chan->tx_q);
3172
3173 if (chan->mode != L2CAP_MODE_ERTM)
3174 return 0;
3175
3176 chan->rx_state = L2CAP_RX_STATE_RECV;
3177 chan->tx_state = L2CAP_TX_STATE_XMIT;
3178
3179 skb_queue_head_init(&chan->srej_q);
3180
3181 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3182 if (err < 0)
3183 return err;
3184
3185 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3186 if (err < 0)
3187 l2cap_seq_list_free(&chan->srej_list);
3188
3189 return err;
3190 }
3191
l2cap_select_mode(__u8 mode,__u16 remote_feat_mask)3192 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3193 {
3194 switch (mode) {
3195 case L2CAP_MODE_STREAMING:
3196 case L2CAP_MODE_ERTM:
3197 if (l2cap_mode_supported(mode, remote_feat_mask))
3198 return mode;
3199 fallthrough;
3200 default:
3201 return L2CAP_MODE_BASIC;
3202 }
3203 }
3204
__l2cap_ews_supported(struct l2cap_conn * conn)3205 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3206 {
3207 return (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW);
3208 }
3209
__l2cap_efs_supported(struct l2cap_conn * conn)3210 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3211 {
3212 return (conn->feat_mask & L2CAP_FEAT_EXT_FLOW);
3213 }
3214
__l2cap_set_ertm_timeouts(struct l2cap_chan * chan,struct l2cap_conf_rfc * rfc)3215 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3216 struct l2cap_conf_rfc *rfc)
3217 {
3218 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3219 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3220 }
3221
l2cap_txwin_setup(struct l2cap_chan * chan)3222 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3223 {
3224 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3225 __l2cap_ews_supported(chan->conn)) {
3226 /* use extended control field */
3227 set_bit(FLAG_EXT_CTRL, &chan->flags);
3228 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3229 } else {
3230 chan->tx_win = min_t(u16, chan->tx_win,
3231 L2CAP_DEFAULT_TX_WINDOW);
3232 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3233 }
3234 chan->ack_win = chan->tx_win;
3235 }
3236
l2cap_mtu_auto(struct l2cap_chan * chan)3237 static void l2cap_mtu_auto(struct l2cap_chan *chan)
3238 {
3239 struct hci_conn *conn = chan->conn->hcon;
3240
3241 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3242
3243 /* The 2-DH1 packet has between 2 and 56 information bytes
3244 * (including the 2-byte payload header)
3245 */
3246 if (!(conn->pkt_type & HCI_2DH1))
3247 chan->imtu = 54;
3248
3249 /* The 3-DH1 packet has between 2 and 85 information bytes
3250 * (including the 2-byte payload header)
3251 */
3252 if (!(conn->pkt_type & HCI_3DH1))
3253 chan->imtu = 83;
3254
3255 /* The 2-DH3 packet has between 2 and 369 information bytes
3256 * (including the 2-byte payload header)
3257 */
3258 if (!(conn->pkt_type & HCI_2DH3))
3259 chan->imtu = 367;
3260
3261 /* The 3-DH3 packet has between 2 and 554 information bytes
3262 * (including the 2-byte payload header)
3263 */
3264 if (!(conn->pkt_type & HCI_3DH3))
3265 chan->imtu = 552;
3266
3267 /* The 2-DH5 packet has between 2 and 681 information bytes
3268 * (including the 2-byte payload header)
3269 */
3270 if (!(conn->pkt_type & HCI_2DH5))
3271 chan->imtu = 679;
3272
3273 /* The 3-DH5 packet has between 2 and 1023 information bytes
3274 * (including the 2-byte payload header)
3275 */
3276 if (!(conn->pkt_type & HCI_3DH5))
3277 chan->imtu = 1021;
3278 }
3279
l2cap_build_conf_req(struct l2cap_chan * chan,void * data,size_t data_size)3280 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3281 {
3282 struct l2cap_conf_req *req = data;
3283 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3284 void *ptr = req->data;
3285 void *endptr = data + data_size;
3286 u16 size;
3287
3288 BT_DBG("chan %p", chan);
3289
3290 if (chan->num_conf_req || chan->num_conf_rsp)
3291 goto done;
3292
3293 switch (chan->mode) {
3294 case L2CAP_MODE_STREAMING:
3295 case L2CAP_MODE_ERTM:
3296 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3297 break;
3298
3299 if (__l2cap_efs_supported(chan->conn))
3300 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3301
3302 fallthrough;
3303 default:
3304 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3305 break;
3306 }
3307
3308 done:
3309 if (chan->imtu != L2CAP_DEFAULT_MTU) {
3310 if (!chan->imtu)
3311 l2cap_mtu_auto(chan);
3312 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3313 endptr - ptr);
3314 }
3315
3316 switch (chan->mode) {
3317 case L2CAP_MODE_BASIC:
3318 if (disable_ertm)
3319 break;
3320
3321 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3322 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3323 break;
3324
3325 rfc.mode = L2CAP_MODE_BASIC;
3326 rfc.txwin_size = 0;
3327 rfc.max_transmit = 0;
3328 rfc.retrans_timeout = 0;
3329 rfc.monitor_timeout = 0;
3330 rfc.max_pdu_size = 0;
3331
3332 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3333 (unsigned long) &rfc, endptr - ptr);
3334 break;
3335
3336 case L2CAP_MODE_ERTM:
3337 rfc.mode = L2CAP_MODE_ERTM;
3338 rfc.max_transmit = chan->max_tx;
3339
3340 __l2cap_set_ertm_timeouts(chan, &rfc);
3341
3342 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3343 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3344 L2CAP_FCS_SIZE);
3345 rfc.max_pdu_size = cpu_to_le16(size);
3346
3347 l2cap_txwin_setup(chan);
3348
3349 rfc.txwin_size = min_t(u16, chan->tx_win,
3350 L2CAP_DEFAULT_TX_WINDOW);
3351
3352 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3353 (unsigned long) &rfc, endptr - ptr);
3354
3355 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3356 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3357
3358 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3359 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3360 chan->tx_win, endptr - ptr);
3361
3362 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3363 if (chan->fcs == L2CAP_FCS_NONE ||
3364 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3365 chan->fcs = L2CAP_FCS_NONE;
3366 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3367 chan->fcs, endptr - ptr);
3368 }
3369 break;
3370
3371 case L2CAP_MODE_STREAMING:
3372 l2cap_txwin_setup(chan);
3373 rfc.mode = L2CAP_MODE_STREAMING;
3374 rfc.txwin_size = 0;
3375 rfc.max_transmit = 0;
3376 rfc.retrans_timeout = 0;
3377 rfc.monitor_timeout = 0;
3378
3379 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3380 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3381 L2CAP_FCS_SIZE);
3382 rfc.max_pdu_size = cpu_to_le16(size);
3383
3384 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3385 (unsigned long) &rfc, endptr - ptr);
3386
3387 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3388 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3389
3390 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3391 if (chan->fcs == L2CAP_FCS_NONE ||
3392 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3393 chan->fcs = L2CAP_FCS_NONE;
3394 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3395 chan->fcs, endptr - ptr);
3396 }
3397 break;
3398 }
3399
3400 req->dcid = cpu_to_le16(chan->dcid);
3401 req->flags = cpu_to_le16(0);
3402
3403 return ptr - data;
3404 }
3405
l2cap_parse_conf_req(struct l2cap_chan * chan,void * data,size_t data_size)3406 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3407 {
3408 struct l2cap_conf_rsp *rsp = data;
3409 void *ptr = rsp->data;
3410 void *endptr = data + data_size;
3411 void *req = chan->conf_req;
3412 int len = chan->conf_len;
3413 int type, hint, olen;
3414 unsigned long val;
3415 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3416 struct l2cap_conf_efs efs;
3417 u8 remote_efs = 0;
3418 u16 mtu = 0;
3419 u16 result = L2CAP_CONF_SUCCESS;
3420 u16 size;
3421
3422 BT_DBG("chan %p", chan);
3423
3424 while (len >= L2CAP_CONF_OPT_SIZE) {
3425 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3426 if (len < 0)
3427 break;
3428
3429 hint = type & L2CAP_CONF_HINT;
3430 type &= L2CAP_CONF_MASK;
3431
3432 switch (type) {
3433 case L2CAP_CONF_MTU:
3434 if (olen != 2)
3435 break;
3436 mtu = val;
3437 break;
3438
3439 case L2CAP_CONF_FLUSH_TO:
3440 if (olen != 2)
3441 break;
3442 chan->flush_to = val;
3443 break;
3444
3445 case L2CAP_CONF_QOS:
3446 break;
3447
3448 case L2CAP_CONF_RFC:
3449 if (olen != sizeof(rfc))
3450 break;
3451 memcpy(&rfc, (void *) val, olen);
3452 break;
3453
3454 case L2CAP_CONF_FCS:
3455 if (olen != 1)
3456 break;
3457 if (val == L2CAP_FCS_NONE)
3458 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3459 break;
3460
3461 case L2CAP_CONF_EFS:
3462 if (olen != sizeof(efs))
3463 break;
3464 remote_efs = 1;
3465 memcpy(&efs, (void *) val, olen);
3466 break;
3467
3468 case L2CAP_CONF_EWS:
3469 if (olen != 2)
3470 break;
3471 return -ECONNREFUSED;
3472
3473 default:
3474 if (hint)
3475 break;
3476 result = L2CAP_CONF_UNKNOWN;
3477 l2cap_add_conf_opt(&ptr, (u8)type, sizeof(u8), type, endptr - ptr);
3478 break;
3479 }
3480 }
3481
3482 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3483 goto done;
3484
3485 switch (chan->mode) {
3486 case L2CAP_MODE_STREAMING:
3487 case L2CAP_MODE_ERTM:
3488 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3489 chan->mode = l2cap_select_mode(rfc.mode,
3490 chan->conn->feat_mask);
3491 break;
3492 }
3493
3494 if (remote_efs) {
3495 if (__l2cap_efs_supported(chan->conn))
3496 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3497 else
3498 return -ECONNREFUSED;
3499 }
3500
3501 if (chan->mode != rfc.mode)
3502 return -ECONNREFUSED;
3503
3504 break;
3505 }
3506
3507 done:
3508 if (chan->mode != rfc.mode) {
3509 result = L2CAP_CONF_UNACCEPT;
3510 rfc.mode = chan->mode;
3511
3512 if (chan->num_conf_rsp == 1)
3513 return -ECONNREFUSED;
3514
3515 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3516 (unsigned long) &rfc, endptr - ptr);
3517 }
3518
3519 if (result == L2CAP_CONF_SUCCESS) {
3520 /* Configure output options and let the other side know
3521 * which ones we don't like. */
3522
3523 /* If MTU is not provided in configure request, use the most recently
3524 * explicitly or implicitly accepted value for the other direction,
3525 * or the default value.
3526 */
3527 if (mtu == 0)
3528 mtu = chan->imtu ? chan->imtu : L2CAP_DEFAULT_MTU;
3529
3530 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3531 result = L2CAP_CONF_UNACCEPT;
3532 else {
3533 chan->omtu = mtu;
3534 set_bit(CONF_MTU_DONE, &chan->conf_state);
3535 }
3536 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3537
3538 if (remote_efs) {
3539 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3540 efs.stype != L2CAP_SERV_NOTRAFIC &&
3541 efs.stype != chan->local_stype) {
3542
3543 result = L2CAP_CONF_UNACCEPT;
3544
3545 if (chan->num_conf_req >= 1)
3546 return -ECONNREFUSED;
3547
3548 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3549 sizeof(efs),
3550 (unsigned long) &efs, endptr - ptr);
3551 } else {
3552 /* Send PENDING Conf Rsp */
3553 result = L2CAP_CONF_PENDING;
3554 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3555 }
3556 }
3557
3558 switch (rfc.mode) {
3559 case L2CAP_MODE_BASIC:
3560 chan->fcs = L2CAP_FCS_NONE;
3561 set_bit(CONF_MODE_DONE, &chan->conf_state);
3562 break;
3563
3564 case L2CAP_MODE_ERTM:
3565 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3566 chan->remote_tx_win = rfc.txwin_size;
3567 else
3568 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3569
3570 chan->remote_max_tx = rfc.max_transmit;
3571
3572 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3573 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3574 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3575 rfc.max_pdu_size = cpu_to_le16(size);
3576 chan->remote_mps = size;
3577
3578 __l2cap_set_ertm_timeouts(chan, &rfc);
3579
3580 set_bit(CONF_MODE_DONE, &chan->conf_state);
3581
3582 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3583 sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3584
3585 if (remote_efs &&
3586 test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3587 chan->remote_id = efs.id;
3588 chan->remote_stype = efs.stype;
3589 chan->remote_msdu = le16_to_cpu(efs.msdu);
3590 chan->remote_flush_to =
3591 le32_to_cpu(efs.flush_to);
3592 chan->remote_acc_lat =
3593 le32_to_cpu(efs.acc_lat);
3594 chan->remote_sdu_itime =
3595 le32_to_cpu(efs.sdu_itime);
3596 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3597 sizeof(efs),
3598 (unsigned long) &efs, endptr - ptr);
3599 }
3600 break;
3601
3602 case L2CAP_MODE_STREAMING:
3603 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3604 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3605 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3606 rfc.max_pdu_size = cpu_to_le16(size);
3607 chan->remote_mps = size;
3608
3609 set_bit(CONF_MODE_DONE, &chan->conf_state);
3610
3611 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3612 (unsigned long) &rfc, endptr - ptr);
3613
3614 break;
3615
3616 default:
3617 result = L2CAP_CONF_UNACCEPT;
3618
3619 memset(&rfc, 0, sizeof(rfc));
3620 rfc.mode = chan->mode;
3621 }
3622
3623 if (result == L2CAP_CONF_SUCCESS)
3624 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3625 }
3626 rsp->scid = cpu_to_le16(chan->dcid);
3627 rsp->result = cpu_to_le16(result);
3628 rsp->flags = cpu_to_le16(0);
3629
3630 return ptr - data;
3631 }
3632
l2cap_parse_conf_rsp(struct l2cap_chan * chan,void * rsp,int len,void * data,size_t size,u16 * result)3633 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3634 void *data, size_t size, u16 *result)
3635 {
3636 struct l2cap_conf_req *req = data;
3637 void *ptr = req->data;
3638 void *endptr = data + size;
3639 int type, olen;
3640 unsigned long val;
3641 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3642 struct l2cap_conf_efs efs;
3643
3644 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3645
3646 while (len >= L2CAP_CONF_OPT_SIZE) {
3647 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3648 if (len < 0)
3649 break;
3650
3651 switch (type) {
3652 case L2CAP_CONF_MTU:
3653 if (olen != 2)
3654 break;
3655 if (val < L2CAP_DEFAULT_MIN_MTU) {
3656 *result = L2CAP_CONF_UNACCEPT;
3657 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3658 } else
3659 chan->imtu = val;
3660 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3661 endptr - ptr);
3662 break;
3663
3664 case L2CAP_CONF_FLUSH_TO:
3665 if (olen != 2)
3666 break;
3667 chan->flush_to = val;
3668 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3669 chan->flush_to, endptr - ptr);
3670 break;
3671
3672 case L2CAP_CONF_RFC:
3673 if (olen != sizeof(rfc))
3674 break;
3675 memcpy(&rfc, (void *)val, olen);
3676 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3677 rfc.mode != chan->mode)
3678 return -ECONNREFUSED;
3679 chan->fcs = 0;
3680 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3681 (unsigned long) &rfc, endptr - ptr);
3682 break;
3683
3684 case L2CAP_CONF_EWS:
3685 if (olen != 2)
3686 break;
3687 chan->ack_win = min_t(u16, val, chan->ack_win);
3688 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3689 chan->tx_win, endptr - ptr);
3690 break;
3691
3692 case L2CAP_CONF_EFS:
3693 if (olen != sizeof(efs))
3694 break;
3695 memcpy(&efs, (void *)val, olen);
3696 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3697 efs.stype != L2CAP_SERV_NOTRAFIC &&
3698 efs.stype != chan->local_stype)
3699 return -ECONNREFUSED;
3700 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3701 (unsigned long) &efs, endptr - ptr);
3702 break;
3703
3704 case L2CAP_CONF_FCS:
3705 if (olen != 1)
3706 break;
3707 if (*result == L2CAP_CONF_PENDING)
3708 if (val == L2CAP_FCS_NONE)
3709 set_bit(CONF_RECV_NO_FCS,
3710 &chan->conf_state);
3711 break;
3712 }
3713 }
3714
3715 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3716 return -ECONNREFUSED;
3717
3718 chan->mode = rfc.mode;
3719
3720 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3721 switch (rfc.mode) {
3722 case L2CAP_MODE_ERTM:
3723 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3724 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3725 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3726 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3727 chan->ack_win = min_t(u16, chan->ack_win,
3728 rfc.txwin_size);
3729
3730 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3731 chan->local_msdu = le16_to_cpu(efs.msdu);
3732 chan->local_sdu_itime =
3733 le32_to_cpu(efs.sdu_itime);
3734 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3735 chan->local_flush_to =
3736 le32_to_cpu(efs.flush_to);
3737 }
3738 break;
3739
3740 case L2CAP_MODE_STREAMING:
3741 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3742 }
3743 }
3744
3745 req->dcid = cpu_to_le16(chan->dcid);
3746 req->flags = cpu_to_le16(0);
3747
3748 return ptr - data;
3749 }
3750
l2cap_build_conf_rsp(struct l2cap_chan * chan,void * data,u16 result,u16 flags)3751 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3752 u16 result, u16 flags)
3753 {
3754 struct l2cap_conf_rsp *rsp = data;
3755 void *ptr = rsp->data;
3756
3757 BT_DBG("chan %p", chan);
3758
3759 rsp->scid = cpu_to_le16(chan->dcid);
3760 rsp->result = cpu_to_le16(result);
3761 rsp->flags = cpu_to_le16(flags);
3762
3763 return ptr - data;
3764 }
3765
__l2cap_le_connect_rsp_defer(struct l2cap_chan * chan)3766 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3767 {
3768 struct l2cap_le_conn_rsp rsp;
3769 struct l2cap_conn *conn = chan->conn;
3770
3771 BT_DBG("chan %p", chan);
3772
3773 rsp.dcid = cpu_to_le16(chan->scid);
3774 rsp.mtu = cpu_to_le16(chan->imtu);
3775 rsp.mps = cpu_to_le16(chan->mps);
3776 rsp.credits = cpu_to_le16(chan->rx_credits);
3777 rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3778
3779 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3780 &rsp);
3781 }
3782
l2cap_ecred_list_defer(struct l2cap_chan * chan,void * data)3783 static void l2cap_ecred_list_defer(struct l2cap_chan *chan, void *data)
3784 {
3785 int *result = data;
3786
3787 if (*result || test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
3788 return;
3789
3790 switch (chan->state) {
3791 case BT_CONNECT2:
3792 /* If channel still pending accept add to result */
3793 (*result)++;
3794 return;
3795 case BT_CONNECTED:
3796 return;
3797 default:
3798 /* If not connected or pending accept it has been refused */
3799 *result = -ECONNREFUSED;
3800 return;
3801 }
3802 }
3803
3804 struct l2cap_ecred_rsp_data {
3805 struct {
3806 struct l2cap_ecred_conn_rsp_hdr rsp;
3807 __le16 scid[L2CAP_ECRED_MAX_CID];
3808 } __packed pdu;
3809 int count;
3810 };
3811
l2cap_ecred_rsp_defer(struct l2cap_chan * chan,void * data)3812 static void l2cap_ecred_rsp_defer(struct l2cap_chan *chan, void *data)
3813 {
3814 struct l2cap_ecred_rsp_data *rsp = data;
3815 struct l2cap_ecred_conn_rsp *rsp_flex =
3816 container_of(&rsp->pdu.rsp, struct l2cap_ecred_conn_rsp, hdr);
3817
3818 /* Check if channel for outgoing connection or if it wasn't deferred
3819 * since in those cases it must be skipped.
3820 */
3821 if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags) ||
3822 !test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
3823 return;
3824
3825 /* Reset ident so only one response is sent */
3826 chan->ident = 0;
3827
3828 /* Include all channels pending with the same ident */
3829 if (!rsp->pdu.rsp.result)
3830 rsp_flex->dcid[rsp->count++] = cpu_to_le16(chan->scid);
3831 else
3832 l2cap_chan_del(chan, ECONNRESET);
3833 }
3834
__l2cap_ecred_conn_rsp_defer(struct l2cap_chan * chan)3835 void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
3836 {
3837 struct l2cap_conn *conn = chan->conn;
3838 struct l2cap_ecred_rsp_data data;
3839 u16 id = chan->ident;
3840 int result = 0;
3841
3842 if (!id)
3843 return;
3844
3845 BT_DBG("chan %p id %d", chan, id);
3846
3847 memset(&data, 0, sizeof(data));
3848
3849 data.pdu.rsp.mtu = cpu_to_le16(chan->imtu);
3850 data.pdu.rsp.mps = cpu_to_le16(chan->mps);
3851 data.pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
3852 data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3853
3854 /* Verify that all channels are ready */
3855 __l2cap_chan_list_id(conn, id, l2cap_ecred_list_defer, &result);
3856
3857 if (result > 0)
3858 return;
3859
3860 if (result < 0)
3861 data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_AUTHORIZATION);
3862
3863 /* Build response */
3864 __l2cap_chan_list_id(conn, id, l2cap_ecred_rsp_defer, &data);
3865
3866 l2cap_send_cmd(conn, id, L2CAP_ECRED_CONN_RSP,
3867 sizeof(data.pdu.rsp) + (data.count * sizeof(__le16)),
3868 &data.pdu);
3869 }
3870
__l2cap_connect_rsp_defer(struct l2cap_chan * chan)3871 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3872 {
3873 struct l2cap_conn_rsp rsp;
3874 struct l2cap_conn *conn = chan->conn;
3875 u8 buf[128];
3876 u8 rsp_code;
3877
3878 rsp.scid = cpu_to_le16(chan->dcid);
3879 rsp.dcid = cpu_to_le16(chan->scid);
3880 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3881 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3882 rsp_code = L2CAP_CONN_RSP;
3883
3884 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3885
3886 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3887
3888 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3889 return;
3890
3891 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3892 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3893 chan->num_conf_req++;
3894 }
3895
l2cap_conf_rfc_get(struct l2cap_chan * chan,void * rsp,int len)3896 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3897 {
3898 int type, olen;
3899 unsigned long val;
3900 /* Use sane default values in case a misbehaving remote device
3901 * did not send an RFC or extended window size option.
3902 */
3903 u16 txwin_ext = chan->ack_win;
3904 struct l2cap_conf_rfc rfc = {
3905 .mode = chan->mode,
3906 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3907 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3908 .max_pdu_size = cpu_to_le16(chan->imtu),
3909 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3910 };
3911
3912 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3913
3914 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3915 return;
3916
3917 while (len >= L2CAP_CONF_OPT_SIZE) {
3918 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3919 if (len < 0)
3920 break;
3921
3922 switch (type) {
3923 case L2CAP_CONF_RFC:
3924 if (olen != sizeof(rfc))
3925 break;
3926 memcpy(&rfc, (void *)val, olen);
3927 break;
3928 case L2CAP_CONF_EWS:
3929 if (olen != 2)
3930 break;
3931 txwin_ext = val;
3932 break;
3933 }
3934 }
3935
3936 switch (rfc.mode) {
3937 case L2CAP_MODE_ERTM:
3938 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3939 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3940 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3941 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3942 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3943 else
3944 chan->ack_win = min_t(u16, chan->ack_win,
3945 rfc.txwin_size);
3946 break;
3947 case L2CAP_MODE_STREAMING:
3948 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3949 }
3950 }
3951
l2cap_command_rej(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)3952 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3953 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3954 u8 *data)
3955 {
3956 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3957
3958 if (cmd_len < sizeof(*rej))
3959 return -EPROTO;
3960
3961 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3962 return 0;
3963
3964 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3965 cmd->ident == conn->info_ident) {
3966 cancel_delayed_work(&conn->info_timer);
3967
3968 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3969 conn->info_ident = 0;
3970
3971 l2cap_conn_start(conn);
3972 }
3973
3974 return 0;
3975 }
3976
l2cap_connect(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u8 * data,u8 rsp_code)3977 static void l2cap_connect(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd,
3978 u8 *data, u8 rsp_code)
3979 {
3980 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3981 struct l2cap_conn_rsp rsp;
3982 struct l2cap_chan *chan = NULL, *pchan = NULL;
3983 int result, status = L2CAP_CS_NO_INFO;
3984
3985 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3986 __le16 psm = req->psm;
3987
3988 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3989
3990 /* Check if we have socket listening on psm */
3991 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3992 &conn->hcon->dst, ACL_LINK);
3993 if (!pchan) {
3994 result = L2CAP_CR_BAD_PSM;
3995 goto response;
3996 }
3997
3998 l2cap_chan_lock(pchan);
3999
4000 /* Check if the ACL is secure enough (if not SDP) */
4001 if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
4002 (!hci_conn_check_link_mode(conn->hcon) ||
4003 !l2cap_check_enc_key_size(conn->hcon, pchan))) {
4004 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
4005 result = L2CAP_CR_SEC_BLOCK;
4006 goto response;
4007 }
4008
4009 result = L2CAP_CR_NO_MEM;
4010
4011 /* Check for valid dynamic CID range (as per Erratum 3253) */
4012 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) {
4013 result = L2CAP_CR_INVALID_SCID;
4014 goto response;
4015 }
4016
4017 /* Check if we already have channel with that dcid */
4018 if (__l2cap_get_chan_by_dcid(conn, scid)) {
4019 result = L2CAP_CR_SCID_IN_USE;
4020 goto response;
4021 }
4022
4023 chan = pchan->ops->new_connection(pchan);
4024 if (!chan)
4025 goto response;
4026
4027 /* For certain devices (ex: HID mouse), support for authentication,
4028 * pairing and bonding is optional. For such devices, inorder to avoid
4029 * the ACL alive for too long after L2CAP disconnection, reset the ACL
4030 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
4031 */
4032 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4033
4034 bacpy(&chan->src, &conn->hcon->src);
4035 bacpy(&chan->dst, &conn->hcon->dst);
4036 chan->src_type = bdaddr_src_type(conn->hcon);
4037 chan->dst_type = bdaddr_dst_type(conn->hcon);
4038 chan->psm = psm;
4039 chan->dcid = scid;
4040
4041 __l2cap_chan_add(conn, chan);
4042
4043 dcid = chan->scid;
4044
4045 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4046
4047 chan->ident = cmd->ident;
4048
4049 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
4050 if (l2cap_chan_check_security(chan, false)) {
4051 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4052 l2cap_state_change(chan, BT_CONNECT2);
4053 result = L2CAP_CR_PEND;
4054 status = L2CAP_CS_AUTHOR_PEND;
4055 chan->ops->defer(chan);
4056 } else {
4057 l2cap_state_change(chan, BT_CONFIG);
4058 result = L2CAP_CR_SUCCESS;
4059 status = L2CAP_CS_NO_INFO;
4060 }
4061 } else {
4062 l2cap_state_change(chan, BT_CONNECT2);
4063 result = L2CAP_CR_PEND;
4064 status = L2CAP_CS_AUTHEN_PEND;
4065 }
4066 } else {
4067 l2cap_state_change(chan, BT_CONNECT2);
4068 result = L2CAP_CR_PEND;
4069 status = L2CAP_CS_NO_INFO;
4070 }
4071
4072 response:
4073 rsp.scid = cpu_to_le16(scid);
4074 rsp.dcid = cpu_to_le16(dcid);
4075 rsp.result = cpu_to_le16(result);
4076 rsp.status = cpu_to_le16(status);
4077 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4078
4079 if (!pchan)
4080 return;
4081
4082 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4083 struct l2cap_info_req info;
4084 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4085
4086 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4087 conn->info_ident = l2cap_get_ident(conn);
4088
4089 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4090
4091 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4092 sizeof(info), &info);
4093 }
4094
4095 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4096 result == L2CAP_CR_SUCCESS) {
4097 u8 buf[128];
4098 set_bit(CONF_REQ_SENT, &chan->conf_state);
4099 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4100 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4101 chan->num_conf_req++;
4102 }
4103
4104 l2cap_chan_unlock(pchan);
4105 l2cap_chan_put(pchan);
4106 }
4107
l2cap_connect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4108 static int l2cap_connect_req(struct l2cap_conn *conn,
4109 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4110 {
4111 if (cmd_len < sizeof(struct l2cap_conn_req))
4112 return -EPROTO;
4113
4114 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP);
4115 return 0;
4116 }
4117
l2cap_connect_create_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4118 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4119 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4120 u8 *data)
4121 {
4122 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4123 u16 scid, dcid, result, status;
4124 struct l2cap_chan *chan;
4125 u8 req[128];
4126 int err;
4127
4128 if (cmd_len < sizeof(*rsp))
4129 return -EPROTO;
4130
4131 scid = __le16_to_cpu(rsp->scid);
4132 dcid = __le16_to_cpu(rsp->dcid);
4133 result = __le16_to_cpu(rsp->result);
4134 status = __le16_to_cpu(rsp->status);
4135
4136 if (result == L2CAP_CR_SUCCESS && (dcid < L2CAP_CID_DYN_START ||
4137 dcid > L2CAP_CID_DYN_END))
4138 return -EPROTO;
4139
4140 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4141 dcid, scid, result, status);
4142
4143 if (scid) {
4144 chan = __l2cap_get_chan_by_scid(conn, scid);
4145 if (!chan)
4146 return -EBADSLT;
4147 } else {
4148 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4149 if (!chan)
4150 return -EBADSLT;
4151 }
4152
4153 chan = l2cap_chan_hold_unless_zero(chan);
4154 if (!chan)
4155 return -EBADSLT;
4156
4157 err = 0;
4158
4159 l2cap_chan_lock(chan);
4160
4161 switch (result) {
4162 case L2CAP_CR_SUCCESS:
4163 if (__l2cap_get_chan_by_dcid(conn, dcid)) {
4164 err = -EBADSLT;
4165 break;
4166 }
4167
4168 l2cap_state_change(chan, BT_CONFIG);
4169 chan->ident = 0;
4170 chan->dcid = dcid;
4171 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4172
4173 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4174 break;
4175
4176 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4177 l2cap_build_conf_req(chan, req, sizeof(req)), req);
4178 chan->num_conf_req++;
4179 break;
4180
4181 case L2CAP_CR_PEND:
4182 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4183 break;
4184
4185 default:
4186 l2cap_chan_del(chan, ECONNREFUSED);
4187 break;
4188 }
4189
4190 l2cap_chan_unlock(chan);
4191 l2cap_chan_put(chan);
4192
4193 return err;
4194 }
4195
set_default_fcs(struct l2cap_chan * chan)4196 static inline void set_default_fcs(struct l2cap_chan *chan)
4197 {
4198 /* FCS is enabled only in ERTM or streaming mode, if one or both
4199 * sides request it.
4200 */
4201 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4202 chan->fcs = L2CAP_FCS_NONE;
4203 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4204 chan->fcs = L2CAP_FCS_CRC16;
4205 }
4206
l2cap_send_efs_conf_rsp(struct l2cap_chan * chan,void * data,u8 ident,u16 flags)4207 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4208 u8 ident, u16 flags)
4209 {
4210 struct l2cap_conn *conn = chan->conn;
4211
4212 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4213 flags);
4214
4215 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4216 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4217
4218 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4219 l2cap_build_conf_rsp(chan, data,
4220 L2CAP_CONF_SUCCESS, flags), data);
4221 }
4222
cmd_reject_invalid_cid(struct l2cap_conn * conn,u8 ident,u16 scid,u16 dcid)4223 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4224 u16 scid, u16 dcid)
4225 {
4226 struct l2cap_cmd_rej_cid rej;
4227
4228 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4229 rej.scid = __cpu_to_le16(scid);
4230 rej.dcid = __cpu_to_le16(dcid);
4231
4232 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4233 }
4234
l2cap_config_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4235 static inline int l2cap_config_req(struct l2cap_conn *conn,
4236 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4237 u8 *data)
4238 {
4239 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4240 u16 dcid, flags;
4241 u8 rsp[64];
4242 struct l2cap_chan *chan;
4243 int len, err = 0;
4244
4245 if (cmd_len < sizeof(*req))
4246 return -EPROTO;
4247
4248 dcid = __le16_to_cpu(req->dcid);
4249 flags = __le16_to_cpu(req->flags);
4250
4251 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4252
4253 chan = l2cap_get_chan_by_scid(conn, dcid);
4254 if (!chan) {
4255 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4256 return 0;
4257 }
4258
4259 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
4260 chan->state != BT_CONNECTED) {
4261 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4262 chan->dcid);
4263 goto unlock;
4264 }
4265
4266 /* Reject if config buffer is too small. */
4267 len = cmd_len - sizeof(*req);
4268 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4269 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4270 l2cap_build_conf_rsp(chan, rsp,
4271 L2CAP_CONF_REJECT, flags), rsp);
4272 goto unlock;
4273 }
4274
4275 /* Store config. */
4276 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4277 chan->conf_len += len;
4278
4279 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4280 /* Incomplete config. Send empty response. */
4281 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4282 l2cap_build_conf_rsp(chan, rsp,
4283 L2CAP_CONF_SUCCESS, flags), rsp);
4284 goto unlock;
4285 }
4286
4287 /* Complete config. */
4288 len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4289 if (len < 0) {
4290 l2cap_send_disconn_req(chan, ECONNRESET);
4291 goto unlock;
4292 }
4293
4294 chan->ident = cmd->ident;
4295 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4296 if (chan->num_conf_rsp < L2CAP_CONF_MAX_CONF_RSP)
4297 chan->num_conf_rsp++;
4298
4299 /* Reset config buffer. */
4300 chan->conf_len = 0;
4301
4302 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4303 goto unlock;
4304
4305 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4306 set_default_fcs(chan);
4307
4308 if (chan->mode == L2CAP_MODE_ERTM ||
4309 chan->mode == L2CAP_MODE_STREAMING)
4310 err = l2cap_ertm_init(chan);
4311
4312 if (err < 0)
4313 l2cap_send_disconn_req(chan, -err);
4314 else
4315 l2cap_chan_ready(chan);
4316
4317 goto unlock;
4318 }
4319
4320 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4321 u8 buf[64];
4322 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4323 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4324 chan->num_conf_req++;
4325 }
4326
4327 /* Got Conf Rsp PENDING from remote side and assume we sent
4328 Conf Rsp PENDING in the code above */
4329 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4330 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4331
4332 /* check compatibility */
4333
4334 /* Send rsp for BR/EDR channel */
4335 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4336 }
4337
4338 unlock:
4339 l2cap_chan_unlock(chan);
4340 l2cap_chan_put(chan);
4341 return err;
4342 }
4343
l2cap_config_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4344 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4345 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4346 u8 *data)
4347 {
4348 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4349 u16 scid, flags, result;
4350 struct l2cap_chan *chan;
4351 int len = cmd_len - sizeof(*rsp);
4352 int err = 0;
4353
4354 if (cmd_len < sizeof(*rsp))
4355 return -EPROTO;
4356
4357 scid = __le16_to_cpu(rsp->scid);
4358 flags = __le16_to_cpu(rsp->flags);
4359 result = __le16_to_cpu(rsp->result);
4360
4361 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4362 result, len);
4363
4364 chan = l2cap_get_chan_by_scid(conn, scid);
4365 if (!chan)
4366 return 0;
4367
4368 switch (result) {
4369 case L2CAP_CONF_SUCCESS:
4370 l2cap_conf_rfc_get(chan, rsp->data, len);
4371 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4372 break;
4373
4374 case L2CAP_CONF_PENDING:
4375 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4376
4377 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4378 char buf[64];
4379
4380 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4381 buf, sizeof(buf), &result);
4382 if (len < 0) {
4383 l2cap_send_disconn_req(chan, ECONNRESET);
4384 goto done;
4385 }
4386
4387 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident, 0);
4388 }
4389 goto done;
4390
4391 case L2CAP_CONF_UNKNOWN:
4392 case L2CAP_CONF_UNACCEPT:
4393 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4394 char req[64];
4395
4396 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4397 l2cap_send_disconn_req(chan, ECONNRESET);
4398 goto done;
4399 }
4400
4401 /* throw out any old stored conf requests */
4402 result = L2CAP_CONF_SUCCESS;
4403 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4404 req, sizeof(req), &result);
4405 if (len < 0) {
4406 l2cap_send_disconn_req(chan, ECONNRESET);
4407 goto done;
4408 }
4409
4410 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4411 L2CAP_CONF_REQ, len, req);
4412 chan->num_conf_req++;
4413 if (result != L2CAP_CONF_SUCCESS)
4414 goto done;
4415 break;
4416 }
4417 fallthrough;
4418
4419 default:
4420 l2cap_chan_set_err(chan, ECONNRESET);
4421
4422 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4423 l2cap_send_disconn_req(chan, ECONNRESET);
4424 goto done;
4425 }
4426
4427 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4428 goto done;
4429
4430 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4431
4432 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4433 set_default_fcs(chan);
4434
4435 if (chan->mode == L2CAP_MODE_ERTM ||
4436 chan->mode == L2CAP_MODE_STREAMING)
4437 err = l2cap_ertm_init(chan);
4438
4439 if (err < 0)
4440 l2cap_send_disconn_req(chan, -err);
4441 else
4442 l2cap_chan_ready(chan);
4443 }
4444
4445 done:
4446 l2cap_chan_unlock(chan);
4447 l2cap_chan_put(chan);
4448 return err;
4449 }
4450
l2cap_disconnect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4451 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4452 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4453 u8 *data)
4454 {
4455 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4456 struct l2cap_disconn_rsp rsp;
4457 u16 dcid, scid;
4458 struct l2cap_chan *chan;
4459
4460 if (cmd_len != sizeof(*req))
4461 return -EPROTO;
4462
4463 scid = __le16_to_cpu(req->scid);
4464 dcid = __le16_to_cpu(req->dcid);
4465
4466 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4467
4468 chan = l2cap_get_chan_by_scid(conn, dcid);
4469 if (!chan) {
4470 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4471 return 0;
4472 }
4473
4474 rsp.dcid = cpu_to_le16(chan->scid);
4475 rsp.scid = cpu_to_le16(chan->dcid);
4476 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4477
4478 chan->ops->set_shutdown(chan);
4479
4480 l2cap_chan_del(chan, ECONNRESET);
4481
4482 chan->ops->close(chan);
4483
4484 l2cap_chan_unlock(chan);
4485 l2cap_chan_put(chan);
4486
4487 return 0;
4488 }
4489
l2cap_disconnect_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4490 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4491 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4492 u8 *data)
4493 {
4494 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4495 u16 dcid, scid;
4496 struct l2cap_chan *chan;
4497
4498 if (cmd_len != sizeof(*rsp))
4499 return -EPROTO;
4500
4501 scid = __le16_to_cpu(rsp->scid);
4502 dcid = __le16_to_cpu(rsp->dcid);
4503
4504 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4505
4506 chan = l2cap_get_chan_by_scid(conn, scid);
4507 if (!chan) {
4508 return 0;
4509 }
4510
4511 if (chan->state != BT_DISCONN) {
4512 l2cap_chan_unlock(chan);
4513 l2cap_chan_put(chan);
4514 return 0;
4515 }
4516
4517 l2cap_chan_del(chan, 0);
4518
4519 chan->ops->close(chan);
4520
4521 l2cap_chan_unlock(chan);
4522 l2cap_chan_put(chan);
4523
4524 return 0;
4525 }
4526
l2cap_information_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4527 static inline int l2cap_information_req(struct l2cap_conn *conn,
4528 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4529 u8 *data)
4530 {
4531 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4532 u16 type;
4533
4534 if (cmd_len != sizeof(*req))
4535 return -EPROTO;
4536
4537 type = __le16_to_cpu(req->type);
4538
4539 BT_DBG("type 0x%4.4x", type);
4540
4541 if (type == L2CAP_IT_FEAT_MASK) {
4542 u8 buf[8];
4543 u32 feat_mask = l2cap_feat_mask;
4544 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4545 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4546 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4547 if (!disable_ertm)
4548 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4549 | L2CAP_FEAT_FCS;
4550
4551 put_unaligned_le32(feat_mask, rsp->data);
4552 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4553 buf);
4554 } else if (type == L2CAP_IT_FIXED_CHAN) {
4555 u8 buf[12];
4556 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4557
4558 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4559 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4560 rsp->data[0] = conn->local_fixed_chan;
4561 memset(rsp->data + 1, 0, 7);
4562 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4563 buf);
4564 } else {
4565 struct l2cap_info_rsp rsp;
4566 rsp.type = cpu_to_le16(type);
4567 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4568 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4569 &rsp);
4570 }
4571
4572 return 0;
4573 }
4574
l2cap_information_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4575 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4576 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4577 u8 *data)
4578 {
4579 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4580 u16 type, result;
4581
4582 if (cmd_len < sizeof(*rsp))
4583 return -EPROTO;
4584
4585 type = __le16_to_cpu(rsp->type);
4586 result = __le16_to_cpu(rsp->result);
4587
4588 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4589
4590 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4591 if (cmd->ident != conn->info_ident ||
4592 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4593 return 0;
4594
4595 cancel_delayed_work(&conn->info_timer);
4596
4597 if (result != L2CAP_IR_SUCCESS) {
4598 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4599 conn->info_ident = 0;
4600
4601 l2cap_conn_start(conn);
4602
4603 return 0;
4604 }
4605
4606 switch (type) {
4607 case L2CAP_IT_FEAT_MASK:
4608 conn->feat_mask = get_unaligned_le32(rsp->data);
4609
4610 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4611 struct l2cap_info_req req;
4612 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4613
4614 conn->info_ident = l2cap_get_ident(conn);
4615
4616 l2cap_send_cmd(conn, conn->info_ident,
4617 L2CAP_INFO_REQ, sizeof(req), &req);
4618 } else {
4619 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4620 conn->info_ident = 0;
4621
4622 l2cap_conn_start(conn);
4623 }
4624 break;
4625
4626 case L2CAP_IT_FIXED_CHAN:
4627 conn->remote_fixed_chan = rsp->data[0];
4628 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4629 conn->info_ident = 0;
4630
4631 l2cap_conn_start(conn);
4632 break;
4633 }
4634
4635 return 0;
4636 }
4637
l2cap_conn_param_update_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4638 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4639 struct l2cap_cmd_hdr *cmd,
4640 u16 cmd_len, u8 *data)
4641 {
4642 struct hci_conn *hcon = conn->hcon;
4643 struct l2cap_conn_param_update_req *req;
4644 struct l2cap_conn_param_update_rsp rsp;
4645 u16 min, max, latency, to_multiplier;
4646 int err;
4647
4648 if (hcon->role != HCI_ROLE_MASTER)
4649 return -EINVAL;
4650
4651 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4652 return -EPROTO;
4653
4654 req = (struct l2cap_conn_param_update_req *) data;
4655 min = __le16_to_cpu(req->min);
4656 max = __le16_to_cpu(req->max);
4657 latency = __le16_to_cpu(req->latency);
4658 to_multiplier = __le16_to_cpu(req->to_multiplier);
4659
4660 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4661 min, max, latency, to_multiplier);
4662
4663 memset(&rsp, 0, sizeof(rsp));
4664
4665 err = hci_check_conn_params(min, max, latency, to_multiplier);
4666 if (err)
4667 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4668 else
4669 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4670
4671 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4672 sizeof(rsp), &rsp);
4673
4674 if (!err) {
4675 u8 store_hint;
4676
4677 store_hint = hci_le_conn_update(hcon, min, max, latency,
4678 to_multiplier);
4679 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
4680 store_hint, min, max, latency,
4681 to_multiplier);
4682
4683 }
4684
4685 return 0;
4686 }
4687
l2cap_le_connect_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4688 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
4689 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4690 u8 *data)
4691 {
4692 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
4693 struct hci_conn *hcon = conn->hcon;
4694 u16 dcid, mtu, mps, credits, result;
4695 struct l2cap_chan *chan;
4696 int err, sec_level;
4697
4698 if (cmd_len < sizeof(*rsp))
4699 return -EPROTO;
4700
4701 dcid = __le16_to_cpu(rsp->dcid);
4702 mtu = __le16_to_cpu(rsp->mtu);
4703 mps = __le16_to_cpu(rsp->mps);
4704 credits = __le16_to_cpu(rsp->credits);
4705 result = __le16_to_cpu(rsp->result);
4706
4707 if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
4708 dcid < L2CAP_CID_DYN_START ||
4709 dcid > L2CAP_CID_LE_DYN_END))
4710 return -EPROTO;
4711
4712 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
4713 dcid, mtu, mps, credits, result);
4714
4715 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4716 if (!chan)
4717 return -EBADSLT;
4718
4719 err = 0;
4720
4721 l2cap_chan_lock(chan);
4722
4723 switch (result) {
4724 case L2CAP_CR_LE_SUCCESS:
4725 if (__l2cap_get_chan_by_dcid(conn, dcid)) {
4726 err = -EBADSLT;
4727 break;
4728 }
4729
4730 chan->ident = 0;
4731 chan->dcid = dcid;
4732 chan->omtu = mtu;
4733 chan->remote_mps = mps;
4734 chan->tx_credits = credits;
4735 l2cap_chan_ready(chan);
4736 break;
4737
4738 case L2CAP_CR_LE_AUTHENTICATION:
4739 case L2CAP_CR_LE_ENCRYPTION:
4740 /* If we already have MITM protection we can't do
4741 * anything.
4742 */
4743 if (hcon->sec_level > BT_SECURITY_MEDIUM) {
4744 l2cap_chan_del(chan, ECONNREFUSED);
4745 break;
4746 }
4747
4748 sec_level = hcon->sec_level + 1;
4749 if (chan->sec_level < sec_level)
4750 chan->sec_level = sec_level;
4751
4752 /* We'll need to send a new Connect Request */
4753 clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
4754
4755 smp_conn_security(hcon, chan->sec_level);
4756 break;
4757
4758 default:
4759 l2cap_chan_del(chan, ECONNREFUSED);
4760 break;
4761 }
4762
4763 l2cap_chan_unlock(chan);
4764
4765 return err;
4766 }
4767
l2cap_bredr_sig_cmd(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4768 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4769 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4770 u8 *data)
4771 {
4772 int err = 0;
4773
4774 switch (cmd->code) {
4775 case L2CAP_COMMAND_REJ:
4776 l2cap_command_rej(conn, cmd, cmd_len, data);
4777 break;
4778
4779 case L2CAP_CONN_REQ:
4780 err = l2cap_connect_req(conn, cmd, cmd_len, data);
4781 break;
4782
4783 case L2CAP_CONN_RSP:
4784 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
4785 break;
4786
4787 case L2CAP_CONF_REQ:
4788 err = l2cap_config_req(conn, cmd, cmd_len, data);
4789 break;
4790
4791 case L2CAP_CONF_RSP:
4792 l2cap_config_rsp(conn, cmd, cmd_len, data);
4793 break;
4794
4795 case L2CAP_DISCONN_REQ:
4796 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
4797 break;
4798
4799 case L2CAP_DISCONN_RSP:
4800 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
4801 break;
4802
4803 case L2CAP_ECHO_REQ:
4804 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4805 break;
4806
4807 case L2CAP_ECHO_RSP:
4808 break;
4809
4810 case L2CAP_INFO_REQ:
4811 err = l2cap_information_req(conn, cmd, cmd_len, data);
4812 break;
4813
4814 case L2CAP_INFO_RSP:
4815 l2cap_information_rsp(conn, cmd, cmd_len, data);
4816 break;
4817
4818 default:
4819 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4820 err = -EINVAL;
4821 break;
4822 }
4823
4824 return err;
4825 }
4826
l2cap_le_connect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4827 static int l2cap_le_connect_req(struct l2cap_conn *conn,
4828 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4829 u8 *data)
4830 {
4831 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
4832 struct l2cap_le_conn_rsp rsp;
4833 struct l2cap_chan *chan, *pchan;
4834 u16 dcid, scid, credits, mtu, mps;
4835 __le16 psm;
4836 u8 result;
4837
4838 if (cmd_len != sizeof(*req))
4839 return -EPROTO;
4840
4841 scid = __le16_to_cpu(req->scid);
4842 mtu = __le16_to_cpu(req->mtu);
4843 mps = __le16_to_cpu(req->mps);
4844 psm = req->psm;
4845 dcid = 0;
4846 credits = 0;
4847
4848 if (mtu < 23 || mps < 23)
4849 return -EPROTO;
4850
4851 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
4852 scid, mtu, mps);
4853
4854 /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
4855 * page 1059:
4856 *
4857 * Valid range: 0x0001-0x00ff
4858 *
4859 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
4860 */
4861 if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
4862 result = L2CAP_CR_LE_BAD_PSM;
4863 chan = NULL;
4864 goto response;
4865 }
4866
4867 /* Check if we have socket listening on psm */
4868 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
4869 &conn->hcon->dst, LE_LINK);
4870 if (!pchan) {
4871 result = L2CAP_CR_LE_BAD_PSM;
4872 chan = NULL;
4873 goto response;
4874 }
4875
4876 l2cap_chan_lock(pchan);
4877
4878 if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
4879 SMP_ALLOW_STK)) {
4880 result = pchan->sec_level == BT_SECURITY_MEDIUM ?
4881 L2CAP_CR_LE_ENCRYPTION : L2CAP_CR_LE_AUTHENTICATION;
4882 chan = NULL;
4883 goto response_unlock;
4884 }
4885
4886 /* Check for valid dynamic CID range */
4887 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
4888 result = L2CAP_CR_LE_INVALID_SCID;
4889 chan = NULL;
4890 goto response_unlock;
4891 }
4892
4893 /* Check if we already have channel with that dcid */
4894 if (__l2cap_get_chan_by_dcid(conn, scid)) {
4895 result = L2CAP_CR_LE_SCID_IN_USE;
4896 chan = NULL;
4897 goto response_unlock;
4898 }
4899
4900 chan = pchan->ops->new_connection(pchan);
4901 if (!chan) {
4902 result = L2CAP_CR_LE_NO_MEM;
4903 goto response_unlock;
4904 }
4905
4906 bacpy(&chan->src, &conn->hcon->src);
4907 bacpy(&chan->dst, &conn->hcon->dst);
4908 chan->src_type = bdaddr_src_type(conn->hcon);
4909 chan->dst_type = bdaddr_dst_type(conn->hcon);
4910 chan->psm = psm;
4911 chan->dcid = scid;
4912 chan->omtu = mtu;
4913 chan->remote_mps = mps;
4914
4915 __l2cap_chan_add(conn, chan);
4916
4917 l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits));
4918
4919 dcid = chan->scid;
4920 credits = chan->rx_credits;
4921
4922 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4923
4924 chan->ident = cmd->ident;
4925
4926 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4927 l2cap_state_change(chan, BT_CONNECT2);
4928 /* The following result value is actually not defined
4929 * for LE CoC but we use it to let the function know
4930 * that it should bail out after doing its cleanup
4931 * instead of sending a response.
4932 */
4933 result = L2CAP_CR_PEND;
4934 chan->ops->defer(chan);
4935 } else {
4936 l2cap_chan_ready(chan);
4937 result = L2CAP_CR_LE_SUCCESS;
4938 }
4939
4940 response_unlock:
4941 l2cap_chan_unlock(pchan);
4942 l2cap_chan_put(pchan);
4943
4944 if (result == L2CAP_CR_PEND)
4945 return 0;
4946
4947 response:
4948 if (chan) {
4949 rsp.mtu = cpu_to_le16(chan->imtu);
4950 rsp.mps = cpu_to_le16(chan->mps);
4951 } else {
4952 rsp.mtu = 0;
4953 rsp.mps = 0;
4954 }
4955
4956 rsp.dcid = cpu_to_le16(dcid);
4957 rsp.credits = cpu_to_le16(credits);
4958 rsp.result = cpu_to_le16(result);
4959
4960 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
4961
4962 return 0;
4963 }
4964
l2cap_le_credits(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4965 static inline int l2cap_le_credits(struct l2cap_conn *conn,
4966 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4967 u8 *data)
4968 {
4969 struct l2cap_le_credits *pkt;
4970 struct l2cap_chan *chan;
4971 u16 cid, credits, max_credits;
4972
4973 if (cmd_len != sizeof(*pkt))
4974 return -EPROTO;
4975
4976 pkt = (struct l2cap_le_credits *) data;
4977 cid = __le16_to_cpu(pkt->cid);
4978 credits = __le16_to_cpu(pkt->credits);
4979
4980 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
4981
4982 chan = l2cap_get_chan_by_dcid(conn, cid);
4983 if (!chan)
4984 return -EBADSLT;
4985
4986 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
4987 if (credits > max_credits) {
4988 BT_ERR("LE credits overflow");
4989 l2cap_send_disconn_req(chan, ECONNRESET);
4990
4991 /* Return 0 so that we don't trigger an unnecessary
4992 * command reject packet.
4993 */
4994 goto unlock;
4995 }
4996
4997 chan->tx_credits += credits;
4998
4999 /* Resume sending */
5000 l2cap_le_flowctl_send(chan);
5001
5002 if (chan->tx_credits)
5003 chan->ops->resume(chan);
5004
5005 unlock:
5006 l2cap_chan_unlock(chan);
5007 l2cap_chan_put(chan);
5008
5009 return 0;
5010 }
5011
l2cap_ecred_conn_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5012 static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
5013 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5014 u8 *data)
5015 {
5016 struct l2cap_ecred_conn_req *req = (void *) data;
5017 DEFINE_RAW_FLEX(struct l2cap_ecred_conn_rsp, pdu, dcid, L2CAP_ECRED_MAX_CID);
5018 struct l2cap_chan *chan, *pchan;
5019 u16 mtu, mps;
5020 __le16 psm;
5021 u8 result, len = 0;
5022 int i, num_scid;
5023 bool defer = false;
5024
5025 if (!enable_ecred)
5026 return -EINVAL;
5027
5028 if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) {
5029 result = L2CAP_CR_LE_INVALID_PARAMS;
5030 goto response;
5031 }
5032
5033 cmd_len -= sizeof(*req);
5034 num_scid = cmd_len / sizeof(u16);
5035
5036 if (num_scid > L2CAP_ECRED_MAX_CID) {
5037 result = L2CAP_CR_LE_INVALID_PARAMS;
5038 goto response;
5039 }
5040
5041 mtu = __le16_to_cpu(req->mtu);
5042 mps = __le16_to_cpu(req->mps);
5043
5044 if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MPS) {
5045 result = L2CAP_CR_LE_UNACCEPT_PARAMS;
5046 goto response;
5047 }
5048
5049 psm = req->psm;
5050
5051 /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
5052 * page 1059:
5053 *
5054 * Valid range: 0x0001-0x00ff
5055 *
5056 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
5057 */
5058 if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
5059 result = L2CAP_CR_LE_BAD_PSM;
5060 goto response;
5061 }
5062
5063 BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps);
5064
5065 memset(pdu, 0, sizeof(*pdu));
5066
5067 /* Check if we have socket listening on psm */
5068 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5069 &conn->hcon->dst, LE_LINK);
5070 if (!pchan) {
5071 result = L2CAP_CR_LE_BAD_PSM;
5072 goto response;
5073 }
5074
5075 l2cap_chan_lock(pchan);
5076
5077 if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5078 SMP_ALLOW_STK)) {
5079 result = L2CAP_CR_LE_AUTHENTICATION;
5080 goto unlock;
5081 }
5082
5083 result = L2CAP_CR_LE_SUCCESS;
5084
5085 for (i = 0; i < num_scid; i++) {
5086 u16 scid = __le16_to_cpu(req->scid[i]);
5087
5088 BT_DBG("scid[%d] 0x%4.4x", i, scid);
5089
5090 pdu->dcid[i] = 0x0000;
5091 len += sizeof(*pdu->dcid);
5092
5093 /* Check for valid dynamic CID range */
5094 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5095 result = L2CAP_CR_LE_INVALID_SCID;
5096 continue;
5097 }
5098
5099 /* Check if we already have channel with that dcid */
5100 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5101 result = L2CAP_CR_LE_SCID_IN_USE;
5102 continue;
5103 }
5104
5105 chan = pchan->ops->new_connection(pchan);
5106 if (!chan) {
5107 result = L2CAP_CR_LE_NO_MEM;
5108 continue;
5109 }
5110
5111 bacpy(&chan->src, &conn->hcon->src);
5112 bacpy(&chan->dst, &conn->hcon->dst);
5113 chan->src_type = bdaddr_src_type(conn->hcon);
5114 chan->dst_type = bdaddr_dst_type(conn->hcon);
5115 chan->psm = psm;
5116 chan->dcid = scid;
5117 chan->omtu = mtu;
5118 chan->remote_mps = mps;
5119
5120 __l2cap_chan_add(conn, chan);
5121
5122 l2cap_ecred_init(chan, __le16_to_cpu(req->credits));
5123
5124 /* Init response */
5125 if (!pdu->credits) {
5126 pdu->mtu = cpu_to_le16(chan->imtu);
5127 pdu->mps = cpu_to_le16(chan->mps);
5128 pdu->credits = cpu_to_le16(chan->rx_credits);
5129 }
5130
5131 pdu->dcid[i] = cpu_to_le16(chan->scid);
5132
5133 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5134
5135 chan->ident = cmd->ident;
5136 chan->mode = L2CAP_MODE_EXT_FLOWCTL;
5137
5138 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5139 l2cap_state_change(chan, BT_CONNECT2);
5140 defer = true;
5141 chan->ops->defer(chan);
5142 } else {
5143 l2cap_chan_ready(chan);
5144 }
5145 }
5146
5147 unlock:
5148 l2cap_chan_unlock(pchan);
5149 l2cap_chan_put(pchan);
5150
5151 response:
5152 pdu->result = cpu_to_le16(result);
5153
5154 if (defer)
5155 return 0;
5156
5157 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP,
5158 sizeof(*pdu) + len, pdu);
5159
5160 return 0;
5161 }
5162
l2cap_ecred_conn_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5163 static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
5164 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5165 u8 *data)
5166 {
5167 struct l2cap_ecred_conn_rsp *rsp = (void *) data;
5168 struct hci_conn *hcon = conn->hcon;
5169 u16 mtu, mps, credits, result;
5170 struct l2cap_chan *chan, *tmp;
5171 int err = 0, sec_level;
5172 int i = 0;
5173
5174 if (cmd_len < sizeof(*rsp))
5175 return -EPROTO;
5176
5177 mtu = __le16_to_cpu(rsp->mtu);
5178 mps = __le16_to_cpu(rsp->mps);
5179 credits = __le16_to_cpu(rsp->credits);
5180 result = __le16_to_cpu(rsp->result);
5181
5182 BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits,
5183 result);
5184
5185 cmd_len -= sizeof(*rsp);
5186
5187 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
5188 u16 dcid;
5189
5190 if (chan->ident != cmd->ident ||
5191 chan->mode != L2CAP_MODE_EXT_FLOWCTL ||
5192 chan->state == BT_CONNECTED)
5193 continue;
5194
5195 l2cap_chan_lock(chan);
5196
5197 /* Check that there is a dcid for each pending channel */
5198 if (cmd_len < sizeof(dcid)) {
5199 l2cap_chan_del(chan, ECONNREFUSED);
5200 l2cap_chan_unlock(chan);
5201 continue;
5202 }
5203
5204 dcid = __le16_to_cpu(rsp->dcid[i++]);
5205 cmd_len -= sizeof(u16);
5206
5207 BT_DBG("dcid[%d] 0x%4.4x", i, dcid);
5208
5209 /* Check if dcid is already in use */
5210 if (dcid && __l2cap_get_chan_by_dcid(conn, dcid)) {
5211 /* If a device receives a
5212 * L2CAP_CREDIT_BASED_CONNECTION_RSP packet with an
5213 * already-assigned Destination CID, then both the
5214 * original channel and the new channel shall be
5215 * immediately discarded and not used.
5216 */
5217 l2cap_chan_del(chan, ECONNREFUSED);
5218 l2cap_chan_unlock(chan);
5219 chan = __l2cap_get_chan_by_dcid(conn, dcid);
5220 l2cap_chan_lock(chan);
5221 l2cap_chan_del(chan, ECONNRESET);
5222 l2cap_chan_unlock(chan);
5223 continue;
5224 }
5225
5226 switch (result) {
5227 case L2CAP_CR_LE_AUTHENTICATION:
5228 case L2CAP_CR_LE_ENCRYPTION:
5229 /* If we already have MITM protection we can't do
5230 * anything.
5231 */
5232 if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5233 l2cap_chan_del(chan, ECONNREFUSED);
5234 break;
5235 }
5236
5237 sec_level = hcon->sec_level + 1;
5238 if (chan->sec_level < sec_level)
5239 chan->sec_level = sec_level;
5240
5241 /* We'll need to send a new Connect Request */
5242 clear_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags);
5243
5244 smp_conn_security(hcon, chan->sec_level);
5245 break;
5246
5247 case L2CAP_CR_LE_BAD_PSM:
5248 l2cap_chan_del(chan, ECONNREFUSED);
5249 break;
5250
5251 default:
5252 /* If dcid was not set it means channels was refused */
5253 if (!dcid) {
5254 l2cap_chan_del(chan, ECONNREFUSED);
5255 break;
5256 }
5257
5258 chan->ident = 0;
5259 chan->dcid = dcid;
5260 chan->omtu = mtu;
5261 chan->remote_mps = mps;
5262 chan->tx_credits = credits;
5263 l2cap_chan_ready(chan);
5264 break;
5265 }
5266
5267 l2cap_chan_unlock(chan);
5268 }
5269
5270 return err;
5271 }
5272
l2cap_ecred_reconf_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5273 static inline int l2cap_ecred_reconf_req(struct l2cap_conn *conn,
5274 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5275 u8 *data)
5276 {
5277 struct l2cap_ecred_reconf_req *req = (void *) data;
5278 struct l2cap_ecred_reconf_rsp rsp;
5279 u16 mtu, mps, result;
5280 struct l2cap_chan *chan;
5281 int i, num_scid;
5282
5283 if (!enable_ecred)
5284 return -EINVAL;
5285
5286 if (cmd_len < sizeof(*req) || cmd_len - sizeof(*req) % sizeof(u16)) {
5287 result = L2CAP_CR_LE_INVALID_PARAMS;
5288 goto respond;
5289 }
5290
5291 mtu = __le16_to_cpu(req->mtu);
5292 mps = __le16_to_cpu(req->mps);
5293
5294 BT_DBG("mtu %u mps %u", mtu, mps);
5295
5296 if (mtu < L2CAP_ECRED_MIN_MTU) {
5297 result = L2CAP_RECONF_INVALID_MTU;
5298 goto respond;
5299 }
5300
5301 if (mps < L2CAP_ECRED_MIN_MPS) {
5302 result = L2CAP_RECONF_INVALID_MPS;
5303 goto respond;
5304 }
5305
5306 cmd_len -= sizeof(*req);
5307 num_scid = cmd_len / sizeof(u16);
5308 result = L2CAP_RECONF_SUCCESS;
5309
5310 for (i = 0; i < num_scid; i++) {
5311 u16 scid;
5312
5313 scid = __le16_to_cpu(req->scid[i]);
5314 if (!scid)
5315 return -EPROTO;
5316
5317 chan = __l2cap_get_chan_by_dcid(conn, scid);
5318 if (!chan)
5319 continue;
5320
5321 /* If the MTU value is decreased for any of the included
5322 * channels, then the receiver shall disconnect all
5323 * included channels.
5324 */
5325 if (chan->omtu > mtu) {
5326 BT_ERR("chan %p decreased MTU %u -> %u", chan,
5327 chan->omtu, mtu);
5328 result = L2CAP_RECONF_INVALID_MTU;
5329 }
5330
5331 chan->omtu = mtu;
5332 chan->remote_mps = mps;
5333 }
5334
5335 respond:
5336 rsp.result = cpu_to_le16(result);
5337
5338 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_RECONF_RSP, sizeof(rsp),
5339 &rsp);
5340
5341 return 0;
5342 }
5343
l2cap_ecred_reconf_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5344 static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn,
5345 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5346 u8 *data)
5347 {
5348 struct l2cap_chan *chan, *tmp;
5349 struct l2cap_ecred_conn_rsp *rsp = (void *) data;
5350 u16 result;
5351
5352 if (cmd_len < sizeof(*rsp))
5353 return -EPROTO;
5354
5355 result = __le16_to_cpu(rsp->result);
5356
5357 BT_DBG("result 0x%4.4x", rsp->result);
5358
5359 if (!result)
5360 return 0;
5361
5362 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
5363 if (chan->ident != cmd->ident)
5364 continue;
5365
5366 l2cap_chan_del(chan, ECONNRESET);
5367 }
5368
5369 return 0;
5370 }
5371
l2cap_le_command_rej(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5372 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5373 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5374 u8 *data)
5375 {
5376 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5377 struct l2cap_chan *chan;
5378
5379 if (cmd_len < sizeof(*rej))
5380 return -EPROTO;
5381
5382 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5383 if (!chan)
5384 goto done;
5385
5386 chan = l2cap_chan_hold_unless_zero(chan);
5387 if (!chan)
5388 goto done;
5389
5390 l2cap_chan_lock(chan);
5391 l2cap_chan_del(chan, ECONNREFUSED);
5392 l2cap_chan_unlock(chan);
5393 l2cap_chan_put(chan);
5394
5395 done:
5396 return 0;
5397 }
5398
l2cap_le_sig_cmd(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5399 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5400 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5401 u8 *data)
5402 {
5403 int err = 0;
5404
5405 switch (cmd->code) {
5406 case L2CAP_COMMAND_REJ:
5407 l2cap_le_command_rej(conn, cmd, cmd_len, data);
5408 break;
5409
5410 case L2CAP_CONN_PARAM_UPDATE_REQ:
5411 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5412 break;
5413
5414 case L2CAP_CONN_PARAM_UPDATE_RSP:
5415 break;
5416
5417 case L2CAP_LE_CONN_RSP:
5418 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5419 break;
5420
5421 case L2CAP_LE_CONN_REQ:
5422 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5423 break;
5424
5425 case L2CAP_LE_CREDITS:
5426 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5427 break;
5428
5429 case L2CAP_ECRED_CONN_REQ:
5430 err = l2cap_ecred_conn_req(conn, cmd, cmd_len, data);
5431 break;
5432
5433 case L2CAP_ECRED_CONN_RSP:
5434 err = l2cap_ecred_conn_rsp(conn, cmd, cmd_len, data);
5435 break;
5436
5437 case L2CAP_ECRED_RECONF_REQ:
5438 err = l2cap_ecred_reconf_req(conn, cmd, cmd_len, data);
5439 break;
5440
5441 case L2CAP_ECRED_RECONF_RSP:
5442 err = l2cap_ecred_reconf_rsp(conn, cmd, cmd_len, data);
5443 break;
5444
5445 case L2CAP_DISCONN_REQ:
5446 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5447 break;
5448
5449 case L2CAP_DISCONN_RSP:
5450 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5451 break;
5452
5453 default:
5454 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5455 err = -EINVAL;
5456 break;
5457 }
5458
5459 return err;
5460 }
5461
l2cap_le_sig_channel(struct l2cap_conn * conn,struct sk_buff * skb)5462 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5463 struct sk_buff *skb)
5464 {
5465 struct hci_conn *hcon = conn->hcon;
5466 struct l2cap_cmd_hdr *cmd;
5467 u16 len;
5468 int err;
5469
5470 if (hcon->type != LE_LINK)
5471 goto drop;
5472
5473 if (skb->len < L2CAP_CMD_HDR_SIZE)
5474 goto drop;
5475
5476 cmd = (void *) skb->data;
5477 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5478
5479 len = le16_to_cpu(cmd->len);
5480
5481 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5482
5483 if (len != skb->len || !cmd->ident) {
5484 BT_DBG("corrupted command");
5485 goto drop;
5486 }
5487
5488 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5489 if (err) {
5490 struct l2cap_cmd_rej_unk rej;
5491
5492 BT_ERR("Wrong link type (%d)", err);
5493
5494 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5495 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5496 sizeof(rej), &rej);
5497 }
5498
5499 drop:
5500 kfree_skb(skb);
5501 }
5502
l2cap_sig_send_rej(struct l2cap_conn * conn,u16 ident)5503 static inline void l2cap_sig_send_rej(struct l2cap_conn *conn, u16 ident)
5504 {
5505 struct l2cap_cmd_rej_unk rej;
5506
5507 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5508 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
5509 }
5510
l2cap_sig_channel(struct l2cap_conn * conn,struct sk_buff * skb)5511 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5512 struct sk_buff *skb)
5513 {
5514 struct hci_conn *hcon = conn->hcon;
5515 struct l2cap_cmd_hdr *cmd;
5516 int err;
5517
5518 l2cap_raw_recv(conn, skb);
5519
5520 if (hcon->type != ACL_LINK)
5521 goto drop;
5522
5523 while (skb->len >= L2CAP_CMD_HDR_SIZE) {
5524 u16 len;
5525
5526 cmd = (void *) skb->data;
5527 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5528
5529 len = le16_to_cpu(cmd->len);
5530
5531 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len,
5532 cmd->ident);
5533
5534 if (len > skb->len || !cmd->ident) {
5535 BT_DBG("corrupted command");
5536 l2cap_sig_send_rej(conn, cmd->ident);
5537 skb_pull(skb, len > skb->len ? skb->len : len);
5538 continue;
5539 }
5540
5541 err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data);
5542 if (err) {
5543 BT_ERR("Wrong link type (%d)", err);
5544 l2cap_sig_send_rej(conn, cmd->ident);
5545 }
5546
5547 skb_pull(skb, len);
5548 }
5549
5550 if (skb->len > 0) {
5551 BT_DBG("corrupted command");
5552 l2cap_sig_send_rej(conn, 0);
5553 }
5554
5555 drop:
5556 kfree_skb(skb);
5557 }
5558
l2cap_check_fcs(struct l2cap_chan * chan,struct sk_buff * skb)5559 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5560 {
5561 u16 our_fcs, rcv_fcs;
5562 int hdr_size;
5563
5564 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5565 hdr_size = L2CAP_EXT_HDR_SIZE;
5566 else
5567 hdr_size = L2CAP_ENH_HDR_SIZE;
5568
5569 if (chan->fcs == L2CAP_FCS_CRC16) {
5570 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5571 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5572 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5573
5574 if (our_fcs != rcv_fcs)
5575 return -EBADMSG;
5576 }
5577 return 0;
5578 }
5579
l2cap_send_i_or_rr_or_rnr(struct l2cap_chan * chan)5580 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5581 {
5582 struct l2cap_ctrl control;
5583
5584 BT_DBG("chan %p", chan);
5585
5586 memset(&control, 0, sizeof(control));
5587 control.sframe = 1;
5588 control.final = 1;
5589 control.reqseq = chan->buffer_seq;
5590 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5591
5592 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5593 control.super = L2CAP_SUPER_RNR;
5594 l2cap_send_sframe(chan, &control);
5595 }
5596
5597 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5598 chan->unacked_frames > 0)
5599 __set_retrans_timer(chan);
5600
5601 /* Send pending iframes */
5602 l2cap_ertm_send(chan);
5603
5604 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5605 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5606 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5607 * send it now.
5608 */
5609 control.super = L2CAP_SUPER_RR;
5610 l2cap_send_sframe(chan, &control);
5611 }
5612 }
5613
append_skb_frag(struct sk_buff * skb,struct sk_buff * new_frag,struct sk_buff ** last_frag)5614 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5615 struct sk_buff **last_frag)
5616 {
5617 /* skb->len reflects data in skb as well as all fragments
5618 * skb->data_len reflects only data in fragments
5619 */
5620 if (!skb_has_frag_list(skb))
5621 skb_shinfo(skb)->frag_list = new_frag;
5622
5623 new_frag->next = NULL;
5624
5625 (*last_frag)->next = new_frag;
5626 *last_frag = new_frag;
5627
5628 skb->len += new_frag->len;
5629 skb->data_len += new_frag->len;
5630 skb->truesize += new_frag->truesize;
5631 }
5632
l2cap_reassemble_sdu(struct l2cap_chan * chan,struct sk_buff * skb,struct l2cap_ctrl * control)5633 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5634 struct l2cap_ctrl *control)
5635 {
5636 int err = -EINVAL;
5637
5638 switch (control->sar) {
5639 case L2CAP_SAR_UNSEGMENTED:
5640 if (chan->sdu)
5641 break;
5642
5643 err = chan->ops->recv(chan, skb);
5644 break;
5645
5646 case L2CAP_SAR_START:
5647 if (chan->sdu)
5648 break;
5649
5650 if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
5651 break;
5652
5653 chan->sdu_len = get_unaligned_le16(skb->data);
5654 skb_pull(skb, L2CAP_SDULEN_SIZE);
5655
5656 if (chan->sdu_len > chan->imtu) {
5657 err = -EMSGSIZE;
5658 break;
5659 }
5660
5661 if (skb->len >= chan->sdu_len)
5662 break;
5663
5664 chan->sdu = skb;
5665 chan->sdu_last_frag = skb;
5666
5667 skb = NULL;
5668 err = 0;
5669 break;
5670
5671 case L2CAP_SAR_CONTINUE:
5672 if (!chan->sdu)
5673 break;
5674
5675 append_skb_frag(chan->sdu, skb,
5676 &chan->sdu_last_frag);
5677 skb = NULL;
5678
5679 if (chan->sdu->len >= chan->sdu_len)
5680 break;
5681
5682 err = 0;
5683 break;
5684
5685 case L2CAP_SAR_END:
5686 if (!chan->sdu)
5687 break;
5688
5689 append_skb_frag(chan->sdu, skb,
5690 &chan->sdu_last_frag);
5691 skb = NULL;
5692
5693 if (chan->sdu->len != chan->sdu_len)
5694 break;
5695
5696 err = chan->ops->recv(chan, chan->sdu);
5697
5698 if (!err) {
5699 /* Reassembly complete */
5700 chan->sdu = NULL;
5701 chan->sdu_last_frag = NULL;
5702 chan->sdu_len = 0;
5703 }
5704 break;
5705 }
5706
5707 if (err) {
5708 kfree_skb(skb);
5709 kfree_skb(chan->sdu);
5710 chan->sdu = NULL;
5711 chan->sdu_last_frag = NULL;
5712 chan->sdu_len = 0;
5713 }
5714
5715 return err;
5716 }
5717
l2cap_resegment(struct l2cap_chan * chan)5718 static int l2cap_resegment(struct l2cap_chan *chan)
5719 {
5720 /* Placeholder */
5721 return 0;
5722 }
5723
l2cap_chan_busy(struct l2cap_chan * chan,int busy)5724 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5725 {
5726 u8 event;
5727
5728 if (chan->mode != L2CAP_MODE_ERTM)
5729 return;
5730
5731 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5732 l2cap_tx(chan, NULL, NULL, event);
5733 }
5734
l2cap_rx_queued_iframes(struct l2cap_chan * chan)5735 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5736 {
5737 int err = 0;
5738 /* Pass sequential frames to l2cap_reassemble_sdu()
5739 * until a gap is encountered.
5740 */
5741
5742 BT_DBG("chan %p", chan);
5743
5744 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5745 struct sk_buff *skb;
5746 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5747 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5748
5749 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5750
5751 if (!skb)
5752 break;
5753
5754 skb_unlink(skb, &chan->srej_q);
5755 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5756 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
5757 if (err)
5758 break;
5759 }
5760
5761 if (skb_queue_empty(&chan->srej_q)) {
5762 chan->rx_state = L2CAP_RX_STATE_RECV;
5763 l2cap_send_ack(chan);
5764 }
5765
5766 return err;
5767 }
5768
l2cap_handle_srej(struct l2cap_chan * chan,struct l2cap_ctrl * control)5769 static void l2cap_handle_srej(struct l2cap_chan *chan,
5770 struct l2cap_ctrl *control)
5771 {
5772 struct sk_buff *skb;
5773
5774 BT_DBG("chan %p, control %p", chan, control);
5775
5776 if (control->reqseq == chan->next_tx_seq) {
5777 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5778 l2cap_send_disconn_req(chan, ECONNRESET);
5779 return;
5780 }
5781
5782 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5783
5784 if (skb == NULL) {
5785 BT_DBG("Seq %d not available for retransmission",
5786 control->reqseq);
5787 return;
5788 }
5789
5790 if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
5791 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5792 l2cap_send_disconn_req(chan, ECONNRESET);
5793 return;
5794 }
5795
5796 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5797
5798 if (control->poll) {
5799 l2cap_pass_to_tx(chan, control);
5800
5801 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5802 l2cap_retransmit(chan, control);
5803 l2cap_ertm_send(chan);
5804
5805 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5806 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5807 chan->srej_save_reqseq = control->reqseq;
5808 }
5809 } else {
5810 l2cap_pass_to_tx_fbit(chan, control);
5811
5812 if (control->final) {
5813 if (chan->srej_save_reqseq != control->reqseq ||
5814 !test_and_clear_bit(CONN_SREJ_ACT,
5815 &chan->conn_state))
5816 l2cap_retransmit(chan, control);
5817 } else {
5818 l2cap_retransmit(chan, control);
5819 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5820 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5821 chan->srej_save_reqseq = control->reqseq;
5822 }
5823 }
5824 }
5825 }
5826
l2cap_handle_rej(struct l2cap_chan * chan,struct l2cap_ctrl * control)5827 static void l2cap_handle_rej(struct l2cap_chan *chan,
5828 struct l2cap_ctrl *control)
5829 {
5830 struct sk_buff *skb;
5831
5832 BT_DBG("chan %p, control %p", chan, control);
5833
5834 if (control->reqseq == chan->next_tx_seq) {
5835 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5836 l2cap_send_disconn_req(chan, ECONNRESET);
5837 return;
5838 }
5839
5840 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5841
5842 if (chan->max_tx && skb &&
5843 bt_cb(skb)->l2cap.retries >= chan->max_tx) {
5844 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5845 l2cap_send_disconn_req(chan, ECONNRESET);
5846 return;
5847 }
5848
5849 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5850
5851 l2cap_pass_to_tx(chan, control);
5852
5853 if (control->final) {
5854 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5855 l2cap_retransmit_all(chan, control);
5856 } else {
5857 l2cap_retransmit_all(chan, control);
5858 l2cap_ertm_send(chan);
5859 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5860 set_bit(CONN_REJ_ACT, &chan->conn_state);
5861 }
5862 }
5863
l2cap_classify_txseq(struct l2cap_chan * chan,u16 txseq)5864 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5865 {
5866 BT_DBG("chan %p, txseq %d", chan, txseq);
5867
5868 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5869 chan->expected_tx_seq);
5870
5871 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5872 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5873 chan->tx_win) {
5874 /* See notes below regarding "double poll" and
5875 * invalid packets.
5876 */
5877 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5878 BT_DBG("Invalid/Ignore - after SREJ");
5879 return L2CAP_TXSEQ_INVALID_IGNORE;
5880 } else {
5881 BT_DBG("Invalid - in window after SREJ sent");
5882 return L2CAP_TXSEQ_INVALID;
5883 }
5884 }
5885
5886 if (chan->srej_list.head == txseq) {
5887 BT_DBG("Expected SREJ");
5888 return L2CAP_TXSEQ_EXPECTED_SREJ;
5889 }
5890
5891 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
5892 BT_DBG("Duplicate SREJ - txseq already stored");
5893 return L2CAP_TXSEQ_DUPLICATE_SREJ;
5894 }
5895
5896 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
5897 BT_DBG("Unexpected SREJ - not requested");
5898 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
5899 }
5900 }
5901
5902 if (chan->expected_tx_seq == txseq) {
5903 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5904 chan->tx_win) {
5905 BT_DBG("Invalid - txseq outside tx window");
5906 return L2CAP_TXSEQ_INVALID;
5907 } else {
5908 BT_DBG("Expected");
5909 return L2CAP_TXSEQ_EXPECTED;
5910 }
5911 }
5912
5913 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
5914 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
5915 BT_DBG("Duplicate - expected_tx_seq later than txseq");
5916 return L2CAP_TXSEQ_DUPLICATE;
5917 }
5918
5919 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
5920 /* A source of invalid packets is a "double poll" condition,
5921 * where delays cause us to send multiple poll packets. If
5922 * the remote stack receives and processes both polls,
5923 * sequence numbers can wrap around in such a way that a
5924 * resent frame has a sequence number that looks like new data
5925 * with a sequence gap. This would trigger an erroneous SREJ
5926 * request.
5927 *
5928 * Fortunately, this is impossible with a tx window that's
5929 * less than half of the maximum sequence number, which allows
5930 * invalid frames to be safely ignored.
5931 *
5932 * With tx window sizes greater than half of the tx window
5933 * maximum, the frame is invalid and cannot be ignored. This
5934 * causes a disconnect.
5935 */
5936
5937 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5938 BT_DBG("Invalid/Ignore - txseq outside tx window");
5939 return L2CAP_TXSEQ_INVALID_IGNORE;
5940 } else {
5941 BT_DBG("Invalid - txseq outside tx window");
5942 return L2CAP_TXSEQ_INVALID;
5943 }
5944 } else {
5945 BT_DBG("Unexpected - txseq indicates missing frames");
5946 return L2CAP_TXSEQ_UNEXPECTED;
5947 }
5948 }
5949
l2cap_rx_state_recv(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)5950 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
5951 struct l2cap_ctrl *control,
5952 struct sk_buff *skb, u8 event)
5953 {
5954 struct l2cap_ctrl local_control;
5955 int err = 0;
5956 bool skb_in_use = false;
5957
5958 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5959 event);
5960
5961 switch (event) {
5962 case L2CAP_EV_RECV_IFRAME:
5963 switch (l2cap_classify_txseq(chan, control->txseq)) {
5964 case L2CAP_TXSEQ_EXPECTED:
5965 l2cap_pass_to_tx(chan, control);
5966
5967 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5968 BT_DBG("Busy, discarding expected seq %d",
5969 control->txseq);
5970 break;
5971 }
5972
5973 chan->expected_tx_seq = __next_seq(chan,
5974 control->txseq);
5975
5976 chan->buffer_seq = chan->expected_tx_seq;
5977 skb_in_use = true;
5978
5979 /* l2cap_reassemble_sdu may free skb, hence invalidate
5980 * control, so make a copy in advance to use it after
5981 * l2cap_reassemble_sdu returns and to avoid the race
5982 * condition, for example:
5983 *
5984 * The current thread calls:
5985 * l2cap_reassemble_sdu
5986 * chan->ops->recv == l2cap_sock_recv_cb
5987 * __sock_queue_rcv_skb
5988 * Another thread calls:
5989 * bt_sock_recvmsg
5990 * skb_recv_datagram
5991 * skb_free_datagram
5992 * Then the current thread tries to access control, but
5993 * it was freed by skb_free_datagram.
5994 */
5995 local_control = *control;
5996 err = l2cap_reassemble_sdu(chan, skb, control);
5997 if (err)
5998 break;
5999
6000 if (local_control.final) {
6001 if (!test_and_clear_bit(CONN_REJ_ACT,
6002 &chan->conn_state)) {
6003 local_control.final = 0;
6004 l2cap_retransmit_all(chan, &local_control);
6005 l2cap_ertm_send(chan);
6006 }
6007 }
6008
6009 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6010 l2cap_send_ack(chan);
6011 break;
6012 case L2CAP_TXSEQ_UNEXPECTED:
6013 l2cap_pass_to_tx(chan, control);
6014
6015 /* Can't issue SREJ frames in the local busy state.
6016 * Drop this frame, it will be seen as missing
6017 * when local busy is exited.
6018 */
6019 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6020 BT_DBG("Busy, discarding unexpected seq %d",
6021 control->txseq);
6022 break;
6023 }
6024
6025 /* There was a gap in the sequence, so an SREJ
6026 * must be sent for each missing frame. The
6027 * current frame is stored for later use.
6028 */
6029 skb_queue_tail(&chan->srej_q, skb);
6030 skb_in_use = true;
6031 BT_DBG("Queued %p (queue len %d)", skb,
6032 skb_queue_len(&chan->srej_q));
6033
6034 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6035 l2cap_seq_list_clear(&chan->srej_list);
6036 l2cap_send_srej(chan, control->txseq);
6037
6038 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6039 break;
6040 case L2CAP_TXSEQ_DUPLICATE:
6041 l2cap_pass_to_tx(chan, control);
6042 break;
6043 case L2CAP_TXSEQ_INVALID_IGNORE:
6044 break;
6045 case L2CAP_TXSEQ_INVALID:
6046 default:
6047 l2cap_send_disconn_req(chan, ECONNRESET);
6048 break;
6049 }
6050 break;
6051 case L2CAP_EV_RECV_RR:
6052 l2cap_pass_to_tx(chan, control);
6053 if (control->final) {
6054 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6055
6056 if (!test_and_clear_bit(CONN_REJ_ACT,
6057 &chan->conn_state)) {
6058 control->final = 0;
6059 l2cap_retransmit_all(chan, control);
6060 }
6061
6062 l2cap_ertm_send(chan);
6063 } else if (control->poll) {
6064 l2cap_send_i_or_rr_or_rnr(chan);
6065 } else {
6066 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6067 &chan->conn_state) &&
6068 chan->unacked_frames)
6069 __set_retrans_timer(chan);
6070
6071 l2cap_ertm_send(chan);
6072 }
6073 break;
6074 case L2CAP_EV_RECV_RNR:
6075 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6076 l2cap_pass_to_tx(chan, control);
6077 if (control && control->poll) {
6078 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6079 l2cap_send_rr_or_rnr(chan, 0);
6080 }
6081 __clear_retrans_timer(chan);
6082 l2cap_seq_list_clear(&chan->retrans_list);
6083 break;
6084 case L2CAP_EV_RECV_REJ:
6085 l2cap_handle_rej(chan, control);
6086 break;
6087 case L2CAP_EV_RECV_SREJ:
6088 l2cap_handle_srej(chan, control);
6089 break;
6090 default:
6091 break;
6092 }
6093
6094 if (skb && !skb_in_use) {
6095 BT_DBG("Freeing %p", skb);
6096 kfree_skb(skb);
6097 }
6098
6099 return err;
6100 }
6101
l2cap_rx_state_srej_sent(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6102 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6103 struct l2cap_ctrl *control,
6104 struct sk_buff *skb, u8 event)
6105 {
6106 int err = 0;
6107 u16 txseq = control->txseq;
6108 bool skb_in_use = false;
6109
6110 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6111 event);
6112
6113 switch (event) {
6114 case L2CAP_EV_RECV_IFRAME:
6115 switch (l2cap_classify_txseq(chan, txseq)) {
6116 case L2CAP_TXSEQ_EXPECTED:
6117 /* Keep frame for reassembly later */
6118 l2cap_pass_to_tx(chan, control);
6119 skb_queue_tail(&chan->srej_q, skb);
6120 skb_in_use = true;
6121 BT_DBG("Queued %p (queue len %d)", skb,
6122 skb_queue_len(&chan->srej_q));
6123
6124 chan->expected_tx_seq = __next_seq(chan, txseq);
6125 break;
6126 case L2CAP_TXSEQ_EXPECTED_SREJ:
6127 l2cap_seq_list_pop(&chan->srej_list);
6128
6129 l2cap_pass_to_tx(chan, control);
6130 skb_queue_tail(&chan->srej_q, skb);
6131 skb_in_use = true;
6132 BT_DBG("Queued %p (queue len %d)", skb,
6133 skb_queue_len(&chan->srej_q));
6134
6135 err = l2cap_rx_queued_iframes(chan);
6136 if (err)
6137 break;
6138
6139 break;
6140 case L2CAP_TXSEQ_UNEXPECTED:
6141 /* Got a frame that can't be reassembled yet.
6142 * Save it for later, and send SREJs to cover
6143 * the missing frames.
6144 */
6145 skb_queue_tail(&chan->srej_q, skb);
6146 skb_in_use = true;
6147 BT_DBG("Queued %p (queue len %d)", skb,
6148 skb_queue_len(&chan->srej_q));
6149
6150 l2cap_pass_to_tx(chan, control);
6151 l2cap_send_srej(chan, control->txseq);
6152 break;
6153 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6154 /* This frame was requested with an SREJ, but
6155 * some expected retransmitted frames are
6156 * missing. Request retransmission of missing
6157 * SREJ'd frames.
6158 */
6159 skb_queue_tail(&chan->srej_q, skb);
6160 skb_in_use = true;
6161 BT_DBG("Queued %p (queue len %d)", skb,
6162 skb_queue_len(&chan->srej_q));
6163
6164 l2cap_pass_to_tx(chan, control);
6165 l2cap_send_srej_list(chan, control->txseq);
6166 break;
6167 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6168 /* We've already queued this frame. Drop this copy. */
6169 l2cap_pass_to_tx(chan, control);
6170 break;
6171 case L2CAP_TXSEQ_DUPLICATE:
6172 /* Expecting a later sequence number, so this frame
6173 * was already received. Ignore it completely.
6174 */
6175 break;
6176 case L2CAP_TXSEQ_INVALID_IGNORE:
6177 break;
6178 case L2CAP_TXSEQ_INVALID:
6179 default:
6180 l2cap_send_disconn_req(chan, ECONNRESET);
6181 break;
6182 }
6183 break;
6184 case L2CAP_EV_RECV_RR:
6185 l2cap_pass_to_tx(chan, control);
6186 if (control->final) {
6187 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6188
6189 if (!test_and_clear_bit(CONN_REJ_ACT,
6190 &chan->conn_state)) {
6191 control->final = 0;
6192 l2cap_retransmit_all(chan, control);
6193 }
6194
6195 l2cap_ertm_send(chan);
6196 } else if (control->poll) {
6197 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6198 &chan->conn_state) &&
6199 chan->unacked_frames) {
6200 __set_retrans_timer(chan);
6201 }
6202
6203 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6204 l2cap_send_srej_tail(chan);
6205 } else {
6206 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6207 &chan->conn_state) &&
6208 chan->unacked_frames)
6209 __set_retrans_timer(chan);
6210
6211 l2cap_send_ack(chan);
6212 }
6213 break;
6214 case L2CAP_EV_RECV_RNR:
6215 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6216 l2cap_pass_to_tx(chan, control);
6217 if (control->poll) {
6218 l2cap_send_srej_tail(chan);
6219 } else {
6220 struct l2cap_ctrl rr_control;
6221 memset(&rr_control, 0, sizeof(rr_control));
6222 rr_control.sframe = 1;
6223 rr_control.super = L2CAP_SUPER_RR;
6224 rr_control.reqseq = chan->buffer_seq;
6225 l2cap_send_sframe(chan, &rr_control);
6226 }
6227
6228 break;
6229 case L2CAP_EV_RECV_REJ:
6230 l2cap_handle_rej(chan, control);
6231 break;
6232 case L2CAP_EV_RECV_SREJ:
6233 l2cap_handle_srej(chan, control);
6234 break;
6235 }
6236
6237 if (skb && !skb_in_use) {
6238 BT_DBG("Freeing %p", skb);
6239 kfree_skb(skb);
6240 }
6241
6242 return err;
6243 }
6244
l2cap_finish_move(struct l2cap_chan * chan)6245 static int l2cap_finish_move(struct l2cap_chan *chan)
6246 {
6247 BT_DBG("chan %p", chan);
6248
6249 chan->rx_state = L2CAP_RX_STATE_RECV;
6250 chan->conn->mtu = chan->conn->hcon->mtu;
6251
6252 return l2cap_resegment(chan);
6253 }
6254
l2cap_rx_state_wait_p(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6255 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6256 struct l2cap_ctrl *control,
6257 struct sk_buff *skb, u8 event)
6258 {
6259 int err;
6260
6261 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6262 event);
6263
6264 if (!control->poll)
6265 return -EPROTO;
6266
6267 l2cap_process_reqseq(chan, control->reqseq);
6268
6269 if (!skb_queue_empty(&chan->tx_q))
6270 chan->tx_send_head = skb_peek(&chan->tx_q);
6271 else
6272 chan->tx_send_head = NULL;
6273
6274 /* Rewind next_tx_seq to the point expected
6275 * by the receiver.
6276 */
6277 chan->next_tx_seq = control->reqseq;
6278 chan->unacked_frames = 0;
6279
6280 err = l2cap_finish_move(chan);
6281 if (err)
6282 return err;
6283
6284 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6285 l2cap_send_i_or_rr_or_rnr(chan);
6286
6287 if (event == L2CAP_EV_RECV_IFRAME)
6288 return -EPROTO;
6289
6290 return l2cap_rx_state_recv(chan, control, NULL, event);
6291 }
6292
l2cap_rx_state_wait_f(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6293 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6294 struct l2cap_ctrl *control,
6295 struct sk_buff *skb, u8 event)
6296 {
6297 int err;
6298
6299 if (!control->final)
6300 return -EPROTO;
6301
6302 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6303
6304 chan->rx_state = L2CAP_RX_STATE_RECV;
6305 l2cap_process_reqseq(chan, control->reqseq);
6306
6307 if (!skb_queue_empty(&chan->tx_q))
6308 chan->tx_send_head = skb_peek(&chan->tx_q);
6309 else
6310 chan->tx_send_head = NULL;
6311
6312 /* Rewind next_tx_seq to the point expected
6313 * by the receiver.
6314 */
6315 chan->next_tx_seq = control->reqseq;
6316 chan->unacked_frames = 0;
6317 chan->conn->mtu = chan->conn->hcon->mtu;
6318
6319 err = l2cap_resegment(chan);
6320
6321 if (!err)
6322 err = l2cap_rx_state_recv(chan, control, skb, event);
6323
6324 return err;
6325 }
6326
__valid_reqseq(struct l2cap_chan * chan,u16 reqseq)6327 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6328 {
6329 /* Make sure reqseq is for a packet that has been sent but not acked */
6330 u16 unacked;
6331
6332 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6333 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6334 }
6335
l2cap_rx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6336 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6337 struct sk_buff *skb, u8 event)
6338 {
6339 int err = 0;
6340
6341 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6342 control, skb, event, chan->rx_state);
6343
6344 if (__valid_reqseq(chan, control->reqseq)) {
6345 switch (chan->rx_state) {
6346 case L2CAP_RX_STATE_RECV:
6347 err = l2cap_rx_state_recv(chan, control, skb, event);
6348 break;
6349 case L2CAP_RX_STATE_SREJ_SENT:
6350 err = l2cap_rx_state_srej_sent(chan, control, skb,
6351 event);
6352 break;
6353 case L2CAP_RX_STATE_WAIT_P:
6354 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6355 break;
6356 case L2CAP_RX_STATE_WAIT_F:
6357 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6358 break;
6359 default:
6360 /* shut it down */
6361 break;
6362 }
6363 } else {
6364 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6365 control->reqseq, chan->next_tx_seq,
6366 chan->expected_ack_seq);
6367 l2cap_send_disconn_req(chan, ECONNRESET);
6368 }
6369
6370 return err;
6371 }
6372
l2cap_stream_rx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb)6373 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6374 struct sk_buff *skb)
6375 {
6376 /* l2cap_reassemble_sdu may free skb, hence invalidate control, so store
6377 * the txseq field in advance to use it after l2cap_reassemble_sdu
6378 * returns and to avoid the race condition, for example:
6379 *
6380 * The current thread calls:
6381 * l2cap_reassemble_sdu
6382 * chan->ops->recv == l2cap_sock_recv_cb
6383 * __sock_queue_rcv_skb
6384 * Another thread calls:
6385 * bt_sock_recvmsg
6386 * skb_recv_datagram
6387 * skb_free_datagram
6388 * Then the current thread tries to access control, but it was freed by
6389 * skb_free_datagram.
6390 */
6391 u16 txseq = control->txseq;
6392
6393 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6394 chan->rx_state);
6395
6396 if (l2cap_classify_txseq(chan, txseq) == L2CAP_TXSEQ_EXPECTED) {
6397 l2cap_pass_to_tx(chan, control);
6398
6399 BT_DBG("buffer_seq %u->%u", chan->buffer_seq,
6400 __next_seq(chan, chan->buffer_seq));
6401
6402 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6403
6404 l2cap_reassemble_sdu(chan, skb, control);
6405 } else {
6406 if (chan->sdu) {
6407 kfree_skb(chan->sdu);
6408 chan->sdu = NULL;
6409 }
6410 chan->sdu_last_frag = NULL;
6411 chan->sdu_len = 0;
6412
6413 if (skb) {
6414 BT_DBG("Freeing %p", skb);
6415 kfree_skb(skb);
6416 }
6417 }
6418
6419 chan->last_acked_seq = txseq;
6420 chan->expected_tx_seq = __next_seq(chan, txseq);
6421
6422 return 0;
6423 }
6424
l2cap_data_rcv(struct l2cap_chan * chan,struct sk_buff * skb)6425 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6426 {
6427 struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
6428 u16 len;
6429 u8 event;
6430
6431 __unpack_control(chan, skb);
6432
6433 len = skb->len;
6434
6435 /*
6436 * We can just drop the corrupted I-frame here.
6437 * Receiver will miss it and start proper recovery
6438 * procedures and ask for retransmission.
6439 */
6440 if (l2cap_check_fcs(chan, skb))
6441 goto drop;
6442
6443 if (!control->sframe && control->sar == L2CAP_SAR_START)
6444 len -= L2CAP_SDULEN_SIZE;
6445
6446 if (chan->fcs == L2CAP_FCS_CRC16)
6447 len -= L2CAP_FCS_SIZE;
6448
6449 if (len > chan->mps) {
6450 l2cap_send_disconn_req(chan, ECONNRESET);
6451 goto drop;
6452 }
6453
6454 if (chan->ops->filter) {
6455 if (chan->ops->filter(chan, skb))
6456 goto drop;
6457 }
6458
6459 if (!control->sframe) {
6460 int err;
6461
6462 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6463 control->sar, control->reqseq, control->final,
6464 control->txseq);
6465
6466 /* Validate F-bit - F=0 always valid, F=1 only
6467 * valid in TX WAIT_F
6468 */
6469 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6470 goto drop;
6471
6472 if (chan->mode != L2CAP_MODE_STREAMING) {
6473 event = L2CAP_EV_RECV_IFRAME;
6474 err = l2cap_rx(chan, control, skb, event);
6475 } else {
6476 err = l2cap_stream_rx(chan, control, skb);
6477 }
6478
6479 if (err)
6480 l2cap_send_disconn_req(chan, ECONNRESET);
6481 } else {
6482 const u8 rx_func_to_event[4] = {
6483 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6484 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6485 };
6486
6487 /* Only I-frames are expected in streaming mode */
6488 if (chan->mode == L2CAP_MODE_STREAMING)
6489 goto drop;
6490
6491 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6492 control->reqseq, control->final, control->poll,
6493 control->super);
6494
6495 if (len != 0) {
6496 BT_ERR("Trailing bytes: %d in sframe", len);
6497 l2cap_send_disconn_req(chan, ECONNRESET);
6498 goto drop;
6499 }
6500
6501 /* Validate F and P bits */
6502 if (control->final && (control->poll ||
6503 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6504 goto drop;
6505
6506 event = rx_func_to_event[control->super];
6507 if (l2cap_rx(chan, control, skb, event))
6508 l2cap_send_disconn_req(chan, ECONNRESET);
6509 }
6510
6511 return 0;
6512
6513 drop:
6514 kfree_skb(skb);
6515 return 0;
6516 }
6517
l2cap_chan_le_send_credits(struct l2cap_chan * chan)6518 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6519 {
6520 struct l2cap_conn *conn = chan->conn;
6521 struct l2cap_le_credits pkt;
6522 u16 return_credits = l2cap_le_rx_credits(chan);
6523
6524 if (chan->rx_credits >= return_credits)
6525 return;
6526
6527 return_credits -= chan->rx_credits;
6528
6529 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6530
6531 chan->rx_credits += return_credits;
6532
6533 pkt.cid = cpu_to_le16(chan->scid);
6534 pkt.credits = cpu_to_le16(return_credits);
6535
6536 chan->ident = l2cap_get_ident(conn);
6537
6538 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6539 }
6540
l2cap_chan_rx_avail(struct l2cap_chan * chan,ssize_t rx_avail)6541 void l2cap_chan_rx_avail(struct l2cap_chan *chan, ssize_t rx_avail)
6542 {
6543 if (chan->rx_avail == rx_avail)
6544 return;
6545
6546 BT_DBG("chan %p has %zd bytes avail for rx", chan, rx_avail);
6547
6548 chan->rx_avail = rx_avail;
6549
6550 if (chan->state == BT_CONNECTED)
6551 l2cap_chan_le_send_credits(chan);
6552 }
6553
l2cap_ecred_recv(struct l2cap_chan * chan,struct sk_buff * skb)6554 static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
6555 {
6556 int err;
6557
6558 BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
6559
6560 /* Wait recv to confirm reception before updating the credits */
6561 err = chan->ops->recv(chan, skb);
6562
6563 if (err < 0 && chan->rx_avail != -1) {
6564 BT_ERR("Queueing received LE L2CAP data failed");
6565 l2cap_send_disconn_req(chan, ECONNRESET);
6566 return err;
6567 }
6568
6569 /* Update credits whenever an SDU is received */
6570 l2cap_chan_le_send_credits(chan);
6571
6572 return err;
6573 }
6574
l2cap_ecred_data_rcv(struct l2cap_chan * chan,struct sk_buff * skb)6575 static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6576 {
6577 int err;
6578
6579 if (!chan->rx_credits) {
6580 BT_ERR("No credits to receive LE L2CAP data");
6581 l2cap_send_disconn_req(chan, ECONNRESET);
6582 return -ENOBUFS;
6583 }
6584
6585 if (chan->imtu < skb->len) {
6586 BT_ERR("Too big LE L2CAP PDU");
6587 return -ENOBUFS;
6588 }
6589
6590 chan->rx_credits--;
6591 BT_DBG("chan %p: rx_credits %u -> %u",
6592 chan, chan->rx_credits + 1, chan->rx_credits);
6593
6594 /* Update if remote had run out of credits, this should only happens
6595 * if the remote is not using the entire MPS.
6596 */
6597 if (!chan->rx_credits)
6598 l2cap_chan_le_send_credits(chan);
6599
6600 err = 0;
6601
6602 if (!chan->sdu) {
6603 u16 sdu_len;
6604
6605 sdu_len = get_unaligned_le16(skb->data);
6606 skb_pull(skb, L2CAP_SDULEN_SIZE);
6607
6608 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6609 sdu_len, skb->len, chan->imtu);
6610
6611 if (sdu_len > chan->imtu) {
6612 BT_ERR("Too big LE L2CAP SDU length received");
6613 err = -EMSGSIZE;
6614 goto failed;
6615 }
6616
6617 if (skb->len > sdu_len) {
6618 BT_ERR("Too much LE L2CAP data received");
6619 err = -EINVAL;
6620 goto failed;
6621 }
6622
6623 if (skb->len == sdu_len)
6624 return l2cap_ecred_recv(chan, skb);
6625
6626 chan->sdu = skb;
6627 chan->sdu_len = sdu_len;
6628 chan->sdu_last_frag = skb;
6629
6630 /* Detect if remote is not able to use the selected MPS */
6631 if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
6632 u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
6633
6634 /* Adjust the number of credits */
6635 BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
6636 chan->mps = mps_len;
6637 l2cap_chan_le_send_credits(chan);
6638 }
6639
6640 return 0;
6641 }
6642
6643 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6644 chan->sdu->len, skb->len, chan->sdu_len);
6645
6646 if (chan->sdu->len + skb->len > chan->sdu_len) {
6647 BT_ERR("Too much LE L2CAP data received");
6648 err = -EINVAL;
6649 goto failed;
6650 }
6651
6652 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6653 skb = NULL;
6654
6655 if (chan->sdu->len == chan->sdu_len) {
6656 err = l2cap_ecred_recv(chan, chan->sdu);
6657 if (!err) {
6658 chan->sdu = NULL;
6659 chan->sdu_last_frag = NULL;
6660 chan->sdu_len = 0;
6661 }
6662 }
6663
6664 failed:
6665 if (err) {
6666 kfree_skb(skb);
6667 kfree_skb(chan->sdu);
6668 chan->sdu = NULL;
6669 chan->sdu_last_frag = NULL;
6670 chan->sdu_len = 0;
6671 }
6672
6673 /* We can't return an error here since we took care of the skb
6674 * freeing internally. An error return would cause the caller to
6675 * do a double-free of the skb.
6676 */
6677 return 0;
6678 }
6679
l2cap_data_channel(struct l2cap_conn * conn,u16 cid,struct sk_buff * skb)6680 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6681 struct sk_buff *skb)
6682 {
6683 struct l2cap_chan *chan;
6684
6685 chan = l2cap_get_chan_by_scid(conn, cid);
6686 if (!chan) {
6687 BT_DBG("unknown cid 0x%4.4x", cid);
6688 /* Drop packet and return */
6689 kfree_skb(skb);
6690 return;
6691 }
6692
6693 BT_DBG("chan %p, len %d", chan, skb->len);
6694
6695 /* If we receive data on a fixed channel before the info req/rsp
6696 * procedure is done simply assume that the channel is supported
6697 * and mark it as ready.
6698 */
6699 if (chan->chan_type == L2CAP_CHAN_FIXED)
6700 l2cap_chan_ready(chan);
6701
6702 if (chan->state != BT_CONNECTED)
6703 goto drop;
6704
6705 switch (chan->mode) {
6706 case L2CAP_MODE_LE_FLOWCTL:
6707 case L2CAP_MODE_EXT_FLOWCTL:
6708 if (l2cap_ecred_data_rcv(chan, skb) < 0)
6709 goto drop;
6710
6711 goto done;
6712
6713 case L2CAP_MODE_BASIC:
6714 /* If socket recv buffers overflows we drop data here
6715 * which is *bad* because L2CAP has to be reliable.
6716 * But we don't have any other choice. L2CAP doesn't
6717 * provide flow control mechanism. */
6718
6719 if (chan->imtu < skb->len) {
6720 BT_ERR("Dropping L2CAP data: receive buffer overflow");
6721 goto drop;
6722 }
6723
6724 if (!chan->ops->recv(chan, skb))
6725 goto done;
6726 break;
6727
6728 case L2CAP_MODE_ERTM:
6729 case L2CAP_MODE_STREAMING:
6730 l2cap_data_rcv(chan, skb);
6731 goto done;
6732
6733 default:
6734 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6735 break;
6736 }
6737
6738 drop:
6739 kfree_skb(skb);
6740
6741 done:
6742 l2cap_chan_unlock(chan);
6743 l2cap_chan_put(chan);
6744 }
6745
l2cap_conless_channel(struct l2cap_conn * conn,__le16 psm,struct sk_buff * skb)6746 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6747 struct sk_buff *skb)
6748 {
6749 struct hci_conn *hcon = conn->hcon;
6750 struct l2cap_chan *chan;
6751
6752 if (hcon->type != ACL_LINK)
6753 goto free_skb;
6754
6755 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6756 ACL_LINK);
6757 if (!chan)
6758 goto free_skb;
6759
6760 BT_DBG("chan %p, len %d", chan, skb->len);
6761
6762 l2cap_chan_lock(chan);
6763
6764 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6765 goto drop;
6766
6767 if (chan->imtu < skb->len)
6768 goto drop;
6769
6770 /* Store remote BD_ADDR and PSM for msg_name */
6771 bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
6772 bt_cb(skb)->l2cap.psm = psm;
6773
6774 if (!chan->ops->recv(chan, skb)) {
6775 l2cap_chan_unlock(chan);
6776 l2cap_chan_put(chan);
6777 return;
6778 }
6779
6780 drop:
6781 l2cap_chan_unlock(chan);
6782 l2cap_chan_put(chan);
6783 free_skb:
6784 kfree_skb(skb);
6785 }
6786
l2cap_recv_frame(struct l2cap_conn * conn,struct sk_buff * skb)6787 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6788 {
6789 struct l2cap_hdr *lh = (void *) skb->data;
6790 struct hci_conn *hcon = conn->hcon;
6791 u16 cid, len;
6792 __le16 psm;
6793
6794 if (hcon->state != BT_CONNECTED) {
6795 BT_DBG("queueing pending rx skb");
6796 skb_queue_tail(&conn->pending_rx, skb);
6797 return;
6798 }
6799
6800 skb_pull(skb, L2CAP_HDR_SIZE);
6801 cid = __le16_to_cpu(lh->cid);
6802 len = __le16_to_cpu(lh->len);
6803
6804 if (len != skb->len) {
6805 kfree_skb(skb);
6806 return;
6807 }
6808
6809 /* Since we can't actively block incoming LE connections we must
6810 * at least ensure that we ignore incoming data from them.
6811 */
6812 if (hcon->type == LE_LINK &&
6813 hci_bdaddr_list_lookup(&hcon->hdev->reject_list, &hcon->dst,
6814 bdaddr_dst_type(hcon))) {
6815 kfree_skb(skb);
6816 return;
6817 }
6818
6819 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6820
6821 switch (cid) {
6822 case L2CAP_CID_SIGNALING:
6823 l2cap_sig_channel(conn, skb);
6824 break;
6825
6826 case L2CAP_CID_CONN_LESS:
6827 psm = get_unaligned((__le16 *) skb->data);
6828 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6829 l2cap_conless_channel(conn, psm, skb);
6830 break;
6831
6832 case L2CAP_CID_LE_SIGNALING:
6833 l2cap_le_sig_channel(conn, skb);
6834 break;
6835
6836 default:
6837 l2cap_data_channel(conn, cid, skb);
6838 break;
6839 }
6840 }
6841
process_pending_rx(struct work_struct * work)6842 static void process_pending_rx(struct work_struct *work)
6843 {
6844 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
6845 pending_rx_work);
6846 struct sk_buff *skb;
6847
6848 BT_DBG("");
6849
6850 mutex_lock(&conn->lock);
6851
6852 while ((skb = skb_dequeue(&conn->pending_rx)))
6853 l2cap_recv_frame(conn, skb);
6854
6855 mutex_unlock(&conn->lock);
6856 }
6857
l2cap_conn_add(struct hci_conn * hcon)6858 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
6859 {
6860 struct l2cap_conn *conn = hcon->l2cap_data;
6861 struct hci_chan *hchan;
6862
6863 if (conn)
6864 return conn;
6865
6866 hchan = hci_chan_create(hcon);
6867 if (!hchan)
6868 return NULL;
6869
6870 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
6871 if (!conn) {
6872 hci_chan_del(hchan);
6873 return NULL;
6874 }
6875
6876 kref_init(&conn->ref);
6877 hcon->l2cap_data = conn;
6878 conn->hcon = hci_conn_get(hcon);
6879 conn->hchan = hchan;
6880
6881 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
6882
6883 conn->mtu = hcon->mtu;
6884 conn->feat_mask = 0;
6885
6886 conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
6887
6888 if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
6889 (bredr_sc_enabled(hcon->hdev) ||
6890 hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
6891 conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
6892
6893 mutex_init(&conn->ident_lock);
6894 mutex_init(&conn->lock);
6895
6896 INIT_LIST_HEAD(&conn->chan_l);
6897 INIT_LIST_HEAD(&conn->users);
6898
6899 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
6900
6901 skb_queue_head_init(&conn->pending_rx);
6902 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
6903 INIT_DELAYED_WORK(&conn->id_addr_timer, l2cap_conn_update_id_addr);
6904
6905 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
6906
6907 return conn;
6908 }
6909
is_valid_psm(u16 psm,u8 dst_type)6910 static bool is_valid_psm(u16 psm, u8 dst_type)
6911 {
6912 if (!psm)
6913 return false;
6914
6915 if (bdaddr_type_is_le(dst_type))
6916 return (psm <= 0x00ff);
6917
6918 /* PSM must be odd and lsb of upper byte must be 0 */
6919 return ((psm & 0x0101) == 0x0001);
6920 }
6921
6922 struct l2cap_chan_data {
6923 struct l2cap_chan *chan;
6924 struct pid *pid;
6925 int count;
6926 };
6927
l2cap_chan_by_pid(struct l2cap_chan * chan,void * data)6928 static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data)
6929 {
6930 struct l2cap_chan_data *d = data;
6931 struct pid *pid;
6932
6933 if (chan == d->chan)
6934 return;
6935
6936 if (!test_bit(FLAG_DEFER_SETUP, &chan->flags))
6937 return;
6938
6939 pid = chan->ops->get_peer_pid(chan);
6940
6941 /* Only count deferred channels with the same PID/PSM */
6942 if (d->pid != pid || chan->psm != d->chan->psm || chan->ident ||
6943 chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
6944 return;
6945
6946 d->count++;
6947 }
6948
l2cap_chan_connect(struct l2cap_chan * chan,__le16 psm,u16 cid,bdaddr_t * dst,u8 dst_type,u16 timeout)6949 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
6950 bdaddr_t *dst, u8 dst_type, u16 timeout)
6951 {
6952 struct l2cap_conn *conn;
6953 struct hci_conn *hcon;
6954 struct hci_dev *hdev;
6955 int err;
6956
6957 BT_DBG("%pMR -> %pMR (type %u) psm 0x%4.4x mode 0x%2.2x", &chan->src,
6958 dst, dst_type, __le16_to_cpu(psm), chan->mode);
6959
6960 hdev = hci_get_route(dst, &chan->src, chan->src_type);
6961 if (!hdev)
6962 return -EHOSTUNREACH;
6963
6964 hci_dev_lock(hdev);
6965
6966 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
6967 chan->chan_type != L2CAP_CHAN_RAW) {
6968 err = -EINVAL;
6969 goto done;
6970 }
6971
6972 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
6973 err = -EINVAL;
6974 goto done;
6975 }
6976
6977 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
6978 err = -EINVAL;
6979 goto done;
6980 }
6981
6982 switch (chan->mode) {
6983 case L2CAP_MODE_BASIC:
6984 break;
6985 case L2CAP_MODE_LE_FLOWCTL:
6986 break;
6987 case L2CAP_MODE_EXT_FLOWCTL:
6988 if (!enable_ecred) {
6989 err = -EOPNOTSUPP;
6990 goto done;
6991 }
6992 break;
6993 case L2CAP_MODE_ERTM:
6994 case L2CAP_MODE_STREAMING:
6995 if (!disable_ertm)
6996 break;
6997 fallthrough;
6998 default:
6999 err = -EOPNOTSUPP;
7000 goto done;
7001 }
7002
7003 switch (chan->state) {
7004 case BT_CONNECT:
7005 case BT_CONNECT2:
7006 case BT_CONFIG:
7007 /* Already connecting */
7008 err = 0;
7009 goto done;
7010
7011 case BT_CONNECTED:
7012 /* Already connected */
7013 err = -EISCONN;
7014 goto done;
7015
7016 case BT_OPEN:
7017 case BT_BOUND:
7018 /* Can connect */
7019 break;
7020
7021 default:
7022 err = -EBADFD;
7023 goto done;
7024 }
7025
7026 /* Set destination address and psm */
7027 bacpy(&chan->dst, dst);
7028 chan->dst_type = dst_type;
7029
7030 chan->psm = psm;
7031 chan->dcid = cid;
7032
7033 if (bdaddr_type_is_le(dst_type)) {
7034 /* Convert from L2CAP channel address type to HCI address type
7035 */
7036 if (dst_type == BDADDR_LE_PUBLIC)
7037 dst_type = ADDR_LE_DEV_PUBLIC;
7038 else
7039 dst_type = ADDR_LE_DEV_RANDOM;
7040
7041 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7042 hcon = hci_connect_le(hdev, dst, dst_type, false,
7043 chan->sec_level, timeout,
7044 HCI_ROLE_SLAVE, 0, 0);
7045 else
7046 hcon = hci_connect_le_scan(hdev, dst, dst_type,
7047 chan->sec_level, timeout,
7048 CONN_REASON_L2CAP_CHAN);
7049
7050 } else {
7051 u8 auth_type = l2cap_get_auth_type(chan);
7052 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type,
7053 CONN_REASON_L2CAP_CHAN, timeout);
7054 }
7055
7056 if (IS_ERR(hcon)) {
7057 err = PTR_ERR(hcon);
7058 goto done;
7059 }
7060
7061 conn = l2cap_conn_add(hcon);
7062 if (!conn) {
7063 hci_conn_drop(hcon);
7064 err = -ENOMEM;
7065 goto done;
7066 }
7067
7068 if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) {
7069 struct l2cap_chan_data data;
7070
7071 data.chan = chan;
7072 data.pid = chan->ops->get_peer_pid(chan);
7073 data.count = 1;
7074
7075 l2cap_chan_list(conn, l2cap_chan_by_pid, &data);
7076
7077 /* Check if there isn't too many channels being connected */
7078 if (data.count > L2CAP_ECRED_CONN_SCID_MAX) {
7079 hci_conn_drop(hcon);
7080 err = -EPROTO;
7081 goto done;
7082 }
7083 }
7084
7085 mutex_lock(&conn->lock);
7086 l2cap_chan_lock(chan);
7087
7088 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7089 hci_conn_drop(hcon);
7090 err = -EBUSY;
7091 goto chan_unlock;
7092 }
7093
7094 /* Update source addr of the socket */
7095 bacpy(&chan->src, &hcon->src);
7096 chan->src_type = bdaddr_src_type(hcon);
7097
7098 __l2cap_chan_add(conn, chan);
7099
7100 /* l2cap_chan_add takes its own ref so we can drop this one */
7101 hci_conn_drop(hcon);
7102
7103 l2cap_state_change(chan, BT_CONNECT);
7104 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7105
7106 /* Release chan->sport so that it can be reused by other
7107 * sockets (as it's only used for listening sockets).
7108 */
7109 write_lock(&chan_list_lock);
7110 chan->sport = 0;
7111 write_unlock(&chan_list_lock);
7112
7113 if (hcon->state == BT_CONNECTED) {
7114 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7115 __clear_chan_timer(chan);
7116 if (l2cap_chan_check_security(chan, true))
7117 l2cap_state_change(chan, BT_CONNECTED);
7118 } else
7119 l2cap_do_start(chan);
7120 }
7121
7122 err = 0;
7123
7124 chan_unlock:
7125 l2cap_chan_unlock(chan);
7126 mutex_unlock(&conn->lock);
7127 done:
7128 hci_dev_unlock(hdev);
7129 hci_dev_put(hdev);
7130 return err;
7131 }
7132 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7133
l2cap_ecred_reconfigure(struct l2cap_chan * chan)7134 static void l2cap_ecred_reconfigure(struct l2cap_chan *chan)
7135 {
7136 struct l2cap_conn *conn = chan->conn;
7137 DEFINE_RAW_FLEX(struct l2cap_ecred_reconf_req, pdu, scid, 1);
7138
7139 pdu->mtu = cpu_to_le16(chan->imtu);
7140 pdu->mps = cpu_to_le16(chan->mps);
7141 pdu->scid[0] = cpu_to_le16(chan->scid);
7142
7143 chan->ident = l2cap_get_ident(conn);
7144
7145 l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_RECONF_REQ,
7146 sizeof(pdu), &pdu);
7147 }
7148
l2cap_chan_reconfigure(struct l2cap_chan * chan,__u16 mtu)7149 int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu)
7150 {
7151 if (chan->imtu > mtu)
7152 return -EINVAL;
7153
7154 BT_DBG("chan %p mtu 0x%4.4x", chan, mtu);
7155
7156 chan->imtu = mtu;
7157
7158 l2cap_ecred_reconfigure(chan);
7159
7160 return 0;
7161 }
7162
7163 /* ---- L2CAP interface with lower layer (HCI) ---- */
7164
l2cap_connect_ind(struct hci_dev * hdev,bdaddr_t * bdaddr)7165 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7166 {
7167 int exact = 0, lm1 = 0, lm2 = 0;
7168 struct l2cap_chan *c;
7169
7170 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7171
7172 /* Find listening sockets and check their link_mode */
7173 read_lock(&chan_list_lock);
7174 list_for_each_entry(c, &chan_list, global_l) {
7175 if (c->state != BT_LISTEN)
7176 continue;
7177
7178 if (!bacmp(&c->src, &hdev->bdaddr)) {
7179 lm1 |= HCI_LM_ACCEPT;
7180 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7181 lm1 |= HCI_LM_MASTER;
7182 exact++;
7183 } else if (!bacmp(&c->src, BDADDR_ANY)) {
7184 lm2 |= HCI_LM_ACCEPT;
7185 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7186 lm2 |= HCI_LM_MASTER;
7187 }
7188 }
7189 read_unlock(&chan_list_lock);
7190
7191 return exact ? lm1 : lm2;
7192 }
7193
7194 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7195 * from an existing channel in the list or from the beginning of the
7196 * global list (by passing NULL as first parameter).
7197 */
l2cap_global_fixed_chan(struct l2cap_chan * c,struct hci_conn * hcon)7198 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7199 struct hci_conn *hcon)
7200 {
7201 u8 src_type = bdaddr_src_type(hcon);
7202
7203 read_lock(&chan_list_lock);
7204
7205 if (c)
7206 c = list_next_entry(c, global_l);
7207 else
7208 c = list_entry(chan_list.next, typeof(*c), global_l);
7209
7210 list_for_each_entry_from(c, &chan_list, global_l) {
7211 if (c->chan_type != L2CAP_CHAN_FIXED)
7212 continue;
7213 if (c->state != BT_LISTEN)
7214 continue;
7215 if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
7216 continue;
7217 if (src_type != c->src_type)
7218 continue;
7219
7220 c = l2cap_chan_hold_unless_zero(c);
7221 read_unlock(&chan_list_lock);
7222 return c;
7223 }
7224
7225 read_unlock(&chan_list_lock);
7226
7227 return NULL;
7228 }
7229
l2cap_connect_cfm(struct hci_conn * hcon,u8 status)7230 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7231 {
7232 struct hci_dev *hdev = hcon->hdev;
7233 struct l2cap_conn *conn;
7234 struct l2cap_chan *pchan;
7235 u8 dst_type;
7236
7237 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7238 return;
7239
7240 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7241
7242 if (status) {
7243 l2cap_conn_del(hcon, bt_to_errno(status));
7244 return;
7245 }
7246
7247 conn = l2cap_conn_add(hcon);
7248 if (!conn)
7249 return;
7250
7251 dst_type = bdaddr_dst_type(hcon);
7252
7253 /* If device is blocked, do not create channels for it */
7254 if (hci_bdaddr_list_lookup(&hdev->reject_list, &hcon->dst, dst_type))
7255 return;
7256
7257 /* Find fixed channels and notify them of the new connection. We
7258 * use multiple individual lookups, continuing each time where
7259 * we left off, because the list lock would prevent calling the
7260 * potentially sleeping l2cap_chan_lock() function.
7261 */
7262 pchan = l2cap_global_fixed_chan(NULL, hcon);
7263 while (pchan) {
7264 struct l2cap_chan *chan, *next;
7265
7266 /* Client fixed channels should override server ones */
7267 if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7268 goto next;
7269
7270 l2cap_chan_lock(pchan);
7271 chan = pchan->ops->new_connection(pchan);
7272 if (chan) {
7273 bacpy(&chan->src, &hcon->src);
7274 bacpy(&chan->dst, &hcon->dst);
7275 chan->src_type = bdaddr_src_type(hcon);
7276 chan->dst_type = dst_type;
7277
7278 __l2cap_chan_add(conn, chan);
7279 }
7280
7281 l2cap_chan_unlock(pchan);
7282 next:
7283 next = l2cap_global_fixed_chan(pchan, hcon);
7284 l2cap_chan_put(pchan);
7285 pchan = next;
7286 }
7287
7288 l2cap_conn_ready(conn);
7289 }
7290
l2cap_disconn_ind(struct hci_conn * hcon)7291 int l2cap_disconn_ind(struct hci_conn *hcon)
7292 {
7293 struct l2cap_conn *conn = hcon->l2cap_data;
7294
7295 BT_DBG("hcon %p", hcon);
7296
7297 if (!conn)
7298 return HCI_ERROR_REMOTE_USER_TERM;
7299 return conn->disc_reason;
7300 }
7301
l2cap_disconn_cfm(struct hci_conn * hcon,u8 reason)7302 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7303 {
7304 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7305 return;
7306
7307 BT_DBG("hcon %p reason %d", hcon, reason);
7308
7309 l2cap_conn_del(hcon, bt_to_errno(reason));
7310 }
7311
l2cap_check_encryption(struct l2cap_chan * chan,u8 encrypt)7312 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7313 {
7314 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7315 return;
7316
7317 if (encrypt == 0x00) {
7318 if (chan->sec_level == BT_SECURITY_MEDIUM) {
7319 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7320 } else if (chan->sec_level == BT_SECURITY_HIGH ||
7321 chan->sec_level == BT_SECURITY_FIPS)
7322 l2cap_chan_close(chan, ECONNREFUSED);
7323 } else {
7324 if (chan->sec_level == BT_SECURITY_MEDIUM)
7325 __clear_chan_timer(chan);
7326 }
7327 }
7328
l2cap_security_cfm(struct hci_conn * hcon,u8 status,u8 encrypt)7329 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7330 {
7331 struct l2cap_conn *conn = hcon->l2cap_data;
7332 struct l2cap_chan *chan;
7333
7334 if (!conn)
7335 return;
7336
7337 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7338
7339 mutex_lock(&conn->lock);
7340
7341 list_for_each_entry(chan, &conn->chan_l, list) {
7342 l2cap_chan_lock(chan);
7343
7344 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7345 state_to_string(chan->state));
7346
7347 if (!status && encrypt)
7348 chan->sec_level = hcon->sec_level;
7349
7350 if (!__l2cap_no_conn_pending(chan)) {
7351 l2cap_chan_unlock(chan);
7352 continue;
7353 }
7354
7355 if (!status && (chan->state == BT_CONNECTED ||
7356 chan->state == BT_CONFIG)) {
7357 chan->ops->resume(chan);
7358 l2cap_check_encryption(chan, encrypt);
7359 l2cap_chan_unlock(chan);
7360 continue;
7361 }
7362
7363 if (chan->state == BT_CONNECT) {
7364 if (!status && l2cap_check_enc_key_size(hcon, chan))
7365 l2cap_start_connection(chan);
7366 else
7367 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7368 } else if (chan->state == BT_CONNECT2 &&
7369 !(chan->mode == L2CAP_MODE_EXT_FLOWCTL ||
7370 chan->mode == L2CAP_MODE_LE_FLOWCTL)) {
7371 struct l2cap_conn_rsp rsp;
7372 __u16 res, stat;
7373
7374 if (!status && l2cap_check_enc_key_size(hcon, chan)) {
7375 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7376 res = L2CAP_CR_PEND;
7377 stat = L2CAP_CS_AUTHOR_PEND;
7378 chan->ops->defer(chan);
7379 } else {
7380 l2cap_state_change(chan, BT_CONFIG);
7381 res = L2CAP_CR_SUCCESS;
7382 stat = L2CAP_CS_NO_INFO;
7383 }
7384 } else {
7385 l2cap_state_change(chan, BT_DISCONN);
7386 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7387 res = L2CAP_CR_SEC_BLOCK;
7388 stat = L2CAP_CS_NO_INFO;
7389 }
7390
7391 rsp.scid = cpu_to_le16(chan->dcid);
7392 rsp.dcid = cpu_to_le16(chan->scid);
7393 rsp.result = cpu_to_le16(res);
7394 rsp.status = cpu_to_le16(stat);
7395 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7396 sizeof(rsp), &rsp);
7397
7398 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7399 res == L2CAP_CR_SUCCESS) {
7400 char buf[128];
7401 set_bit(CONF_REQ_SENT, &chan->conf_state);
7402 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7403 L2CAP_CONF_REQ,
7404 l2cap_build_conf_req(chan, buf, sizeof(buf)),
7405 buf);
7406 chan->num_conf_req++;
7407 }
7408 }
7409
7410 l2cap_chan_unlock(chan);
7411 }
7412
7413 mutex_unlock(&conn->lock);
7414 }
7415
7416 /* Append fragment into frame respecting the maximum len of rx_skb */
l2cap_recv_frag(struct l2cap_conn * conn,struct sk_buff * skb,u16 len)7417 static int l2cap_recv_frag(struct l2cap_conn *conn, struct sk_buff *skb,
7418 u16 len)
7419 {
7420 if (!conn->rx_skb) {
7421 /* Allocate skb for the complete frame (with header) */
7422 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7423 if (!conn->rx_skb)
7424 return -ENOMEM;
7425 /* Init rx_len */
7426 conn->rx_len = len;
7427
7428 skb_set_delivery_time(conn->rx_skb, skb->tstamp,
7429 skb->tstamp_type);
7430 }
7431
7432 /* Copy as much as the rx_skb can hold */
7433 len = min_t(u16, len, skb->len);
7434 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, len), len);
7435 skb_pull(skb, len);
7436 conn->rx_len -= len;
7437
7438 return len;
7439 }
7440
l2cap_recv_len(struct l2cap_conn * conn,struct sk_buff * skb)7441 static int l2cap_recv_len(struct l2cap_conn *conn, struct sk_buff *skb)
7442 {
7443 struct sk_buff *rx_skb;
7444 int len;
7445
7446 /* Append just enough to complete the header */
7447 len = l2cap_recv_frag(conn, skb, L2CAP_LEN_SIZE - conn->rx_skb->len);
7448
7449 /* If header could not be read just continue */
7450 if (len < 0 || conn->rx_skb->len < L2CAP_LEN_SIZE)
7451 return len;
7452
7453 rx_skb = conn->rx_skb;
7454 len = get_unaligned_le16(rx_skb->data);
7455
7456 /* Check if rx_skb has enough space to received all fragments */
7457 if (len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE) <= skb_tailroom(rx_skb)) {
7458 /* Update expected len */
7459 conn->rx_len = len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE);
7460 return L2CAP_LEN_SIZE;
7461 }
7462
7463 /* Reset conn->rx_skb since it will need to be reallocated in order to
7464 * fit all fragments.
7465 */
7466 conn->rx_skb = NULL;
7467
7468 /* Reallocates rx_skb using the exact expected length */
7469 len = l2cap_recv_frag(conn, rx_skb,
7470 len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE));
7471 kfree_skb(rx_skb);
7472
7473 return len;
7474 }
7475
l2cap_recv_reset(struct l2cap_conn * conn)7476 static void l2cap_recv_reset(struct l2cap_conn *conn)
7477 {
7478 kfree_skb(conn->rx_skb);
7479 conn->rx_skb = NULL;
7480 conn->rx_len = 0;
7481 }
7482
l2cap_conn_hold_unless_zero(struct l2cap_conn * c)7483 struct l2cap_conn *l2cap_conn_hold_unless_zero(struct l2cap_conn *c)
7484 {
7485 if (!c)
7486 return NULL;
7487
7488 BT_DBG("conn %p orig refcnt %u", c, kref_read(&c->ref));
7489
7490 if (!kref_get_unless_zero(&c->ref))
7491 return NULL;
7492
7493 return c;
7494 }
7495
l2cap_recv_acldata(struct hci_conn * hcon,struct sk_buff * skb,u16 flags)7496 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7497 {
7498 struct l2cap_conn *conn;
7499 int len;
7500
7501 /* Lock hdev to access l2cap_data to avoid race with l2cap_conn_del */
7502 hci_dev_lock(hcon->hdev);
7503
7504 conn = hcon->l2cap_data;
7505
7506 if (!conn)
7507 conn = l2cap_conn_add(hcon);
7508
7509 conn = l2cap_conn_hold_unless_zero(conn);
7510
7511 hci_dev_unlock(hcon->hdev);
7512
7513 if (!conn) {
7514 kfree_skb(skb);
7515 return;
7516 }
7517
7518 BT_DBG("conn %p len %u flags 0x%x", conn, skb->len, flags);
7519
7520 mutex_lock(&conn->lock);
7521
7522 switch (flags) {
7523 case ACL_START:
7524 case ACL_START_NO_FLUSH:
7525 case ACL_COMPLETE:
7526 if (conn->rx_skb) {
7527 BT_ERR("Unexpected start frame (len %d)", skb->len);
7528 l2cap_recv_reset(conn);
7529 l2cap_conn_unreliable(conn, ECOMM);
7530 }
7531
7532 /* Start fragment may not contain the L2CAP length so just
7533 * copy the initial byte when that happens and use conn->mtu as
7534 * expected length.
7535 */
7536 if (skb->len < L2CAP_LEN_SIZE) {
7537 l2cap_recv_frag(conn, skb, conn->mtu);
7538 break;
7539 }
7540
7541 len = get_unaligned_le16(skb->data) + L2CAP_HDR_SIZE;
7542
7543 if (len == skb->len) {
7544 /* Complete frame received */
7545 l2cap_recv_frame(conn, skb);
7546 goto unlock;
7547 }
7548
7549 BT_DBG("Start: total len %d, frag len %u", len, skb->len);
7550
7551 if (skb->len > len) {
7552 BT_ERR("Frame is too long (len %u, expected len %d)",
7553 skb->len, len);
7554 /* PTS test cases L2CAP/COS/CED/BI-14-C and BI-15-C
7555 * (Multiple Signaling Command in one PDU, Data
7556 * Truncated, BR/EDR) send a C-frame to the IUT with
7557 * PDU Length set to 8 and Channel ID set to the
7558 * correct signaling channel for the logical link.
7559 * The Information payload contains one L2CAP_ECHO_REQ
7560 * packet with Data Length set to 0 with 0 octets of
7561 * echo data and one invalid command packet due to
7562 * data truncated in PDU but present in HCI packet.
7563 *
7564 * Shorter the socket buffer to the PDU length to
7565 * allow to process valid commands from the PDU before
7566 * setting the socket unreliable.
7567 */
7568 skb->len = len;
7569 l2cap_recv_frame(conn, skb);
7570 l2cap_conn_unreliable(conn, ECOMM);
7571 goto unlock;
7572 }
7573
7574 /* Append fragment into frame (with header) */
7575 if (l2cap_recv_frag(conn, skb, len) < 0)
7576 goto drop;
7577
7578 break;
7579
7580 case ACL_CONT:
7581 BT_DBG("Cont: frag len %u (expecting %u)", skb->len, conn->rx_len);
7582
7583 if (!conn->rx_skb) {
7584 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7585 l2cap_conn_unreliable(conn, ECOMM);
7586 goto drop;
7587 }
7588
7589 /* Complete the L2CAP length if it has not been read */
7590 if (conn->rx_skb->len < L2CAP_LEN_SIZE) {
7591 if (l2cap_recv_len(conn, skb) < 0) {
7592 l2cap_conn_unreliable(conn, ECOMM);
7593 goto drop;
7594 }
7595
7596 /* Header still could not be read just continue */
7597 if (conn->rx_skb->len < L2CAP_LEN_SIZE)
7598 break;
7599 }
7600
7601 if (skb->len > conn->rx_len) {
7602 BT_ERR("Fragment is too long (len %u, expected %u)",
7603 skb->len, conn->rx_len);
7604 l2cap_recv_reset(conn);
7605 l2cap_conn_unreliable(conn, ECOMM);
7606 goto drop;
7607 }
7608
7609 /* Append fragment into frame (with header) */
7610 l2cap_recv_frag(conn, skb, skb->len);
7611
7612 if (!conn->rx_len) {
7613 /* Complete frame received. l2cap_recv_frame
7614 * takes ownership of the skb so set the global
7615 * rx_skb pointer to NULL first.
7616 */
7617 struct sk_buff *rx_skb = conn->rx_skb;
7618 conn->rx_skb = NULL;
7619 l2cap_recv_frame(conn, rx_skb);
7620 }
7621 break;
7622 }
7623
7624 drop:
7625 kfree_skb(skb);
7626 unlock:
7627 mutex_unlock(&conn->lock);
7628 l2cap_conn_put(conn);
7629 }
7630
7631 static struct hci_cb l2cap_cb = {
7632 .name = "L2CAP",
7633 .connect_cfm = l2cap_connect_cfm,
7634 .disconn_cfm = l2cap_disconn_cfm,
7635 .security_cfm = l2cap_security_cfm,
7636 };
7637
l2cap_debugfs_show(struct seq_file * f,void * p)7638 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7639 {
7640 struct l2cap_chan *c;
7641
7642 read_lock(&chan_list_lock);
7643
7644 list_for_each_entry(c, &chan_list, global_l) {
7645 seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7646 &c->src, c->src_type, &c->dst, c->dst_type,
7647 c->state, __le16_to_cpu(c->psm),
7648 c->scid, c->dcid, c->imtu, c->omtu,
7649 c->sec_level, c->mode);
7650 }
7651
7652 read_unlock(&chan_list_lock);
7653
7654 return 0;
7655 }
7656
7657 DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs);
7658
7659 static struct dentry *l2cap_debugfs;
7660
l2cap_init(void)7661 int __init l2cap_init(void)
7662 {
7663 int err;
7664
7665 err = l2cap_init_sockets();
7666 if (err < 0)
7667 return err;
7668
7669 hci_register_cb(&l2cap_cb);
7670
7671 if (IS_ERR_OR_NULL(bt_debugfs))
7672 return 0;
7673
7674 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7675 NULL, &l2cap_debugfs_fops);
7676
7677 return 0;
7678 }
7679
l2cap_exit(void)7680 void l2cap_exit(void)
7681 {
7682 debugfs_remove(l2cap_debugfs);
7683 hci_unregister_cb(&l2cap_cb);
7684 l2cap_cleanup_sockets();
7685 }
7686
7687 module_param(disable_ertm, bool, 0644);
7688 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
7689
7690 module_param(enable_ecred, bool, 0644);
7691 MODULE_PARM_DESC(enable_ecred, "Enable enhanced credit flow control mode");
7692