1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27 */
28
29 /* Bluetooth L2CAP core. */
30
31 #include <linux/module.h>
32
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
36
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40
41 #include "smp.h"
42
43 #define LE_FLOWCTL_MAX_CREDITS 65535
44
45 bool disable_ertm;
46 bool enable_ecred = IS_ENABLED(CONFIG_BT_LE_L2CAP_ECRED);
47
48 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
49
50 static LIST_HEAD(chan_list);
51 static DEFINE_RWLOCK(chan_list_lock);
52
53 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
54 u8 code, u8 ident, u16 dlen, void *data);
55 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
56 void *data);
57 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
58 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
59
60 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
61 struct sk_buff_head *skbs, u8 event);
62 static void l2cap_retrans_timeout(struct work_struct *work);
63 static void l2cap_monitor_timeout(struct work_struct *work);
64 static void l2cap_ack_timeout(struct work_struct *work);
65
bdaddr_type(u8 link_type,u8 bdaddr_type)66 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
67 {
68 if (link_type == LE_LINK) {
69 if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
70 return BDADDR_LE_PUBLIC;
71 else
72 return BDADDR_LE_RANDOM;
73 }
74
75 return BDADDR_BREDR;
76 }
77
bdaddr_src_type(struct hci_conn * hcon)78 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
79 {
80 return bdaddr_type(hcon->type, hcon->src_type);
81 }
82
bdaddr_dst_type(struct hci_conn * hcon)83 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
84 {
85 return bdaddr_type(hcon->type, hcon->dst_type);
86 }
87
88 /* ---- L2CAP channels ---- */
89
__l2cap_get_chan_by_dcid(struct l2cap_conn * conn,u16 cid)90 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
91 u16 cid)
92 {
93 struct l2cap_chan *c;
94
95 list_for_each_entry(c, &conn->chan_l, list) {
96 if (c->dcid == cid)
97 return c;
98 }
99 return NULL;
100 }
101
__l2cap_get_chan_by_scid(struct l2cap_conn * conn,u16 cid)102 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
103 u16 cid)
104 {
105 struct l2cap_chan *c;
106
107 list_for_each_entry(c, &conn->chan_l, list) {
108 if (c->scid == cid)
109 return c;
110 }
111 return NULL;
112 }
113
114 /* Find channel with given SCID.
115 * Returns a reference locked channel.
116 */
l2cap_get_chan_by_scid(struct l2cap_conn * conn,u16 cid)117 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
118 u16 cid)
119 {
120 struct l2cap_chan *c;
121
122 c = __l2cap_get_chan_by_scid(conn, cid);
123 if (c) {
124 /* Only lock if chan reference is not 0 */
125 c = l2cap_chan_hold_unless_zero(c);
126 if (c)
127 l2cap_chan_lock(c);
128 }
129
130 return c;
131 }
132
133 /* Find channel with given DCID.
134 * Returns a reference locked channel.
135 */
l2cap_get_chan_by_dcid(struct l2cap_conn * conn,u16 cid)136 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
137 u16 cid)
138 {
139 struct l2cap_chan *c;
140
141 c = __l2cap_get_chan_by_dcid(conn, cid);
142 if (c) {
143 /* Only lock if chan reference is not 0 */
144 c = l2cap_chan_hold_unless_zero(c);
145 if (c)
146 l2cap_chan_lock(c);
147 }
148
149 return c;
150 }
151
__l2cap_get_chan_by_ident(struct l2cap_conn * conn,u8 ident)152 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
153 u8 ident)
154 {
155 struct l2cap_chan *c;
156
157 list_for_each_entry(c, &conn->chan_l, list) {
158 if (c->ident == ident)
159 return c;
160 }
161 return NULL;
162 }
163
__l2cap_global_chan_by_addr(__le16 psm,bdaddr_t * src,u8 src_type)164 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src,
165 u8 src_type)
166 {
167 struct l2cap_chan *c;
168
169 list_for_each_entry(c, &chan_list, global_l) {
170 if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR)
171 continue;
172
173 if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR)
174 continue;
175
176 if (c->sport == psm && !bacmp(&c->src, src))
177 return c;
178 }
179 return NULL;
180 }
181
l2cap_add_psm(struct l2cap_chan * chan,bdaddr_t * src,__le16 psm)182 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
183 {
184 int err;
185
186 write_lock(&chan_list_lock);
187
188 if (psm && __l2cap_global_chan_by_addr(psm, src, chan->src_type)) {
189 err = -EADDRINUSE;
190 goto done;
191 }
192
193 if (psm) {
194 chan->psm = psm;
195 chan->sport = psm;
196 err = 0;
197 } else {
198 u16 p, start, end, incr;
199
200 if (chan->src_type == BDADDR_BREDR) {
201 start = L2CAP_PSM_DYN_START;
202 end = L2CAP_PSM_AUTO_END;
203 incr = 2;
204 } else {
205 start = L2CAP_PSM_LE_DYN_START;
206 end = L2CAP_PSM_LE_DYN_END;
207 incr = 1;
208 }
209
210 err = -EINVAL;
211 for (p = start; p <= end; p += incr)
212 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src,
213 chan->src_type)) {
214 chan->psm = cpu_to_le16(p);
215 chan->sport = cpu_to_le16(p);
216 err = 0;
217 break;
218 }
219 }
220
221 done:
222 write_unlock(&chan_list_lock);
223 return err;
224 }
225 EXPORT_SYMBOL_GPL(l2cap_add_psm);
226
l2cap_add_scid(struct l2cap_chan * chan,__u16 scid)227 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
228 {
229 write_lock(&chan_list_lock);
230
231 /* Override the defaults (which are for conn-oriented) */
232 chan->omtu = L2CAP_DEFAULT_MTU;
233 chan->chan_type = L2CAP_CHAN_FIXED;
234
235 chan->scid = scid;
236
237 write_unlock(&chan_list_lock);
238
239 return 0;
240 }
241
l2cap_alloc_cid(struct l2cap_conn * conn)242 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
243 {
244 u16 cid, dyn_end;
245
246 if (conn->hcon->type == LE_LINK)
247 dyn_end = L2CAP_CID_LE_DYN_END;
248 else
249 dyn_end = L2CAP_CID_DYN_END;
250
251 for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
252 if (!__l2cap_get_chan_by_scid(conn, cid))
253 return cid;
254 }
255
256 return 0;
257 }
258
l2cap_state_change(struct l2cap_chan * chan,int state)259 static void l2cap_state_change(struct l2cap_chan *chan, int state)
260 {
261 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
262 state_to_string(state));
263
264 chan->state = state;
265 chan->ops->state_change(chan, state, 0);
266 }
267
l2cap_state_change_and_error(struct l2cap_chan * chan,int state,int err)268 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
269 int state, int err)
270 {
271 chan->state = state;
272 chan->ops->state_change(chan, chan->state, err);
273 }
274
l2cap_chan_set_err(struct l2cap_chan * chan,int err)275 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
276 {
277 chan->ops->state_change(chan, chan->state, err);
278 }
279
__set_retrans_timer(struct l2cap_chan * chan)280 static void __set_retrans_timer(struct l2cap_chan *chan)
281 {
282 if (!delayed_work_pending(&chan->monitor_timer) &&
283 chan->retrans_timeout) {
284 l2cap_set_timer(chan, &chan->retrans_timer,
285 msecs_to_jiffies(chan->retrans_timeout));
286 }
287 }
288
__set_monitor_timer(struct l2cap_chan * chan)289 static void __set_monitor_timer(struct l2cap_chan *chan)
290 {
291 __clear_retrans_timer(chan);
292 if (chan->monitor_timeout) {
293 l2cap_set_timer(chan, &chan->monitor_timer,
294 msecs_to_jiffies(chan->monitor_timeout));
295 }
296 }
297
l2cap_ertm_seq_in_queue(struct sk_buff_head * head,u16 seq)298 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
299 u16 seq)
300 {
301 struct sk_buff *skb;
302
303 skb_queue_walk(head, skb) {
304 if (bt_cb(skb)->l2cap.txseq == seq)
305 return skb;
306 }
307
308 return NULL;
309 }
310
311 /* ---- L2CAP sequence number lists ---- */
312
313 /* For ERTM, ordered lists of sequence numbers must be tracked for
314 * SREJ requests that are received and for frames that are to be
315 * retransmitted. These seq_list functions implement a singly-linked
316 * list in an array, where membership in the list can also be checked
317 * in constant time. Items can also be added to the tail of the list
318 * and removed from the head in constant time, without further memory
319 * allocs or frees.
320 */
321
l2cap_seq_list_init(struct l2cap_seq_list * seq_list,u16 size)322 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
323 {
324 size_t alloc_size, i;
325
326 /* Allocated size is a power of 2 to map sequence numbers
327 * (which may be up to 14 bits) in to a smaller array that is
328 * sized for the negotiated ERTM transmit windows.
329 */
330 alloc_size = roundup_pow_of_two(size);
331
332 seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
333 if (!seq_list->list)
334 return -ENOMEM;
335
336 seq_list->mask = alloc_size - 1;
337 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
338 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
339 for (i = 0; i < alloc_size; i++)
340 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
341
342 return 0;
343 }
344
l2cap_seq_list_free(struct l2cap_seq_list * seq_list)345 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
346 {
347 kfree(seq_list->list);
348 }
349
l2cap_seq_list_contains(struct l2cap_seq_list * seq_list,u16 seq)350 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
351 u16 seq)
352 {
353 /* Constant-time check for list membership */
354 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
355 }
356
l2cap_seq_list_pop(struct l2cap_seq_list * seq_list)357 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
358 {
359 u16 seq = seq_list->head;
360 u16 mask = seq_list->mask;
361
362 seq_list->head = seq_list->list[seq & mask];
363 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
364
365 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
366 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
367 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
368 }
369
370 return seq;
371 }
372
l2cap_seq_list_clear(struct l2cap_seq_list * seq_list)373 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
374 {
375 u16 i;
376
377 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
378 return;
379
380 for (i = 0; i <= seq_list->mask; i++)
381 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
382
383 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
384 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
385 }
386
l2cap_seq_list_append(struct l2cap_seq_list * seq_list,u16 seq)387 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
388 {
389 u16 mask = seq_list->mask;
390
391 /* All appends happen in constant time */
392
393 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
394 return;
395
396 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
397 seq_list->head = seq;
398 else
399 seq_list->list[seq_list->tail & mask] = seq;
400
401 seq_list->tail = seq;
402 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
403 }
404
l2cap_chan_timeout(struct work_struct * work)405 static void l2cap_chan_timeout(struct work_struct *work)
406 {
407 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
408 chan_timer.work);
409 struct l2cap_conn *conn = chan->conn;
410 int reason;
411
412 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
413
414 if (!conn)
415 return;
416
417 mutex_lock(&conn->lock);
418 /* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
419 * this work. No need to call l2cap_chan_hold(chan) here again.
420 */
421 l2cap_chan_lock(chan);
422
423 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
424 reason = ECONNREFUSED;
425 else if (chan->state == BT_CONNECT &&
426 chan->sec_level != BT_SECURITY_SDP)
427 reason = ECONNREFUSED;
428 else
429 reason = ETIMEDOUT;
430
431 l2cap_chan_close(chan, reason);
432
433 chan->ops->close(chan);
434
435 l2cap_chan_unlock(chan);
436 l2cap_chan_put(chan);
437
438 mutex_unlock(&conn->lock);
439 }
440
l2cap_chan_create(void)441 struct l2cap_chan *l2cap_chan_create(void)
442 {
443 struct l2cap_chan *chan;
444
445 chan = kzalloc_obj(*chan, GFP_ATOMIC);
446 if (!chan)
447 return NULL;
448
449 skb_queue_head_init(&chan->tx_q);
450 skb_queue_head_init(&chan->srej_q);
451 mutex_init(&chan->lock);
452
453 /* Set default lock nesting level */
454 atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
455
456 /* Available receive buffer space is initially unknown */
457 chan->rx_avail = -1;
458
459 write_lock(&chan_list_lock);
460 list_add(&chan->global_l, &chan_list);
461 write_unlock(&chan_list_lock);
462
463 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
464 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
465 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
466 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
467
468 chan->state = BT_OPEN;
469
470 kref_init(&chan->kref);
471
472 /* This flag is cleared in l2cap_chan_ready() */
473 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
474
475 BT_DBG("chan %p", chan);
476
477 return chan;
478 }
479 EXPORT_SYMBOL_GPL(l2cap_chan_create);
480
l2cap_chan_destroy(struct kref * kref)481 static void l2cap_chan_destroy(struct kref *kref)
482 {
483 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
484
485 BT_DBG("chan %p", chan);
486
487 write_lock(&chan_list_lock);
488 list_del(&chan->global_l);
489 write_unlock(&chan_list_lock);
490
491 kfree(chan);
492 }
493
l2cap_chan_hold(struct l2cap_chan * c)494 void l2cap_chan_hold(struct l2cap_chan *c)
495 {
496 BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
497
498 kref_get(&c->kref);
499 }
500 EXPORT_SYMBOL_GPL(l2cap_chan_hold);
501
l2cap_chan_hold_unless_zero(struct l2cap_chan * c)502 struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c)
503 {
504 BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
505
506 if (!kref_get_unless_zero(&c->kref))
507 return NULL;
508
509 return c;
510 }
511
l2cap_chan_put(struct l2cap_chan * c)512 void l2cap_chan_put(struct l2cap_chan *c)
513 {
514 BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
515
516 kref_put(&c->kref, l2cap_chan_destroy);
517 }
518 EXPORT_SYMBOL_GPL(l2cap_chan_put);
519
l2cap_chan_set_defaults(struct l2cap_chan * chan)520 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
521 {
522 chan->fcs = L2CAP_FCS_CRC16;
523 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
524 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
525 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
526 chan->remote_max_tx = chan->max_tx;
527 chan->remote_tx_win = chan->tx_win;
528 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
529 chan->sec_level = BT_SECURITY_LOW;
530 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
531 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
532 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
533
534 chan->conf_state = 0;
535 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
536
537 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
538 }
539 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
540
l2cap_le_rx_credits(struct l2cap_chan * chan)541 static __u16 l2cap_le_rx_credits(struct l2cap_chan *chan)
542 {
543 size_t sdu_len = chan->sdu ? chan->sdu->len : 0;
544
545 if (chan->mps == 0)
546 return 0;
547
548 /* If we don't know the available space in the receiver buffer, give
549 * enough credits for a full packet.
550 */
551 if (chan->rx_avail == -1)
552 return (chan->imtu / chan->mps) + 1;
553
554 /* If we know how much space is available in the receive buffer, give
555 * out as many credits as would fill the buffer.
556 */
557 if (chan->rx_avail <= sdu_len)
558 return 0;
559
560 return DIV_ROUND_UP(chan->rx_avail - sdu_len, chan->mps);
561 }
562
l2cap_le_flowctl_init(struct l2cap_chan * chan,u16 tx_credits)563 static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
564 {
565 chan->sdu = NULL;
566 chan->sdu_last_frag = NULL;
567 chan->sdu_len = 0;
568 chan->tx_credits = tx_credits;
569 /* Derive MPS from connection MTU to stop HCI fragmentation */
570 chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
571 chan->rx_credits = l2cap_le_rx_credits(chan);
572
573 skb_queue_head_init(&chan->tx_q);
574 }
575
l2cap_ecred_init(struct l2cap_chan * chan,u16 tx_credits)576 static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits)
577 {
578 l2cap_le_flowctl_init(chan, tx_credits);
579
580 /* L2CAP implementations shall support a minimum MPS of 64 octets */
581 if (chan->mps < L2CAP_ECRED_MIN_MPS) {
582 chan->mps = L2CAP_ECRED_MIN_MPS;
583 chan->rx_credits = l2cap_le_rx_credits(chan);
584 }
585 }
586
__l2cap_chan_add(struct l2cap_conn * conn,struct l2cap_chan * chan)587 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
588 {
589 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
590 __le16_to_cpu(chan->psm), chan->dcid);
591
592 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
593
594 chan->conn = conn;
595
596 switch (chan->chan_type) {
597 case L2CAP_CHAN_CONN_ORIENTED:
598 /* Alloc CID for connection-oriented socket */
599 chan->scid = l2cap_alloc_cid(conn);
600 if (conn->hcon->type == ACL_LINK)
601 chan->omtu = L2CAP_DEFAULT_MTU;
602 break;
603
604 case L2CAP_CHAN_CONN_LESS:
605 /* Connectionless socket */
606 chan->scid = L2CAP_CID_CONN_LESS;
607 chan->dcid = L2CAP_CID_CONN_LESS;
608 chan->omtu = L2CAP_DEFAULT_MTU;
609 break;
610
611 case L2CAP_CHAN_FIXED:
612 /* Caller will set CID and CID specific MTU values */
613 break;
614
615 default:
616 /* Raw socket can send/recv signalling messages only */
617 chan->scid = L2CAP_CID_SIGNALING;
618 chan->dcid = L2CAP_CID_SIGNALING;
619 chan->omtu = L2CAP_DEFAULT_MTU;
620 }
621
622 chan->local_id = L2CAP_BESTEFFORT_ID;
623 chan->local_stype = L2CAP_SERV_BESTEFFORT;
624 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
625 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
626 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
627 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
628
629 l2cap_chan_hold(chan);
630
631 /* Only keep a reference for fixed channels if they requested it */
632 if (chan->chan_type != L2CAP_CHAN_FIXED ||
633 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
634 hci_conn_hold(conn->hcon);
635
636 /* Append to the list since the order matters for ECRED */
637 list_add_tail(&chan->list, &conn->chan_l);
638 }
639
l2cap_chan_add(struct l2cap_conn * conn,struct l2cap_chan * chan)640 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
641 {
642 mutex_lock(&conn->lock);
643 __l2cap_chan_add(conn, chan);
644 mutex_unlock(&conn->lock);
645 }
646
l2cap_chan_del(struct l2cap_chan * chan,int err)647 void l2cap_chan_del(struct l2cap_chan *chan, int err)
648 {
649 struct l2cap_conn *conn = chan->conn;
650
651 __clear_chan_timer(chan);
652
653 BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
654 state_to_string(chan->state));
655
656 chan->ops->teardown(chan, err);
657
658 if (conn) {
659 /* Delete from channel list */
660 list_del(&chan->list);
661
662 l2cap_chan_put(chan);
663
664 chan->conn = NULL;
665
666 /* Reference was only held for non-fixed channels or
667 * fixed channels that explicitly requested it using the
668 * FLAG_HOLD_HCI_CONN flag.
669 */
670 if (chan->chan_type != L2CAP_CHAN_FIXED ||
671 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
672 hci_conn_drop(conn->hcon);
673 }
674
675 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
676 return;
677
678 switch (chan->mode) {
679 case L2CAP_MODE_BASIC:
680 break;
681
682 case L2CAP_MODE_LE_FLOWCTL:
683 case L2CAP_MODE_EXT_FLOWCTL:
684 skb_queue_purge(&chan->tx_q);
685 break;
686
687 case L2CAP_MODE_ERTM:
688 __clear_retrans_timer(chan);
689 __clear_monitor_timer(chan);
690 __clear_ack_timer(chan);
691
692 skb_queue_purge(&chan->srej_q);
693
694 l2cap_seq_list_free(&chan->srej_list);
695 l2cap_seq_list_free(&chan->retrans_list);
696 fallthrough;
697
698 case L2CAP_MODE_STREAMING:
699 skb_queue_purge(&chan->tx_q);
700 break;
701 }
702 }
703 EXPORT_SYMBOL_GPL(l2cap_chan_del);
704
__l2cap_chan_list_id(struct l2cap_conn * conn,u16 id,l2cap_chan_func_t func,void * data)705 static void __l2cap_chan_list_id(struct l2cap_conn *conn, u16 id,
706 l2cap_chan_func_t func, void *data)
707 {
708 struct l2cap_chan *chan, *l;
709
710 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
711 if (chan->ident == id)
712 func(chan, data);
713 }
714 }
715
__l2cap_chan_list(struct l2cap_conn * conn,l2cap_chan_func_t func,void * data)716 static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
717 void *data)
718 {
719 struct l2cap_chan *chan;
720
721 list_for_each_entry(chan, &conn->chan_l, list) {
722 func(chan, data);
723 }
724 }
725
l2cap_chan_list(struct l2cap_conn * conn,l2cap_chan_func_t func,void * data)726 void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
727 void *data)
728 {
729 if (!conn)
730 return;
731
732 mutex_lock(&conn->lock);
733 __l2cap_chan_list(conn, func, data);
734 mutex_unlock(&conn->lock);
735 }
736
737 EXPORT_SYMBOL_GPL(l2cap_chan_list);
738
l2cap_conn_update_id_addr(struct work_struct * work)739 static void l2cap_conn_update_id_addr(struct work_struct *work)
740 {
741 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
742 id_addr_timer.work);
743 struct hci_conn *hcon = conn->hcon;
744 struct l2cap_chan *chan;
745
746 mutex_lock(&conn->lock);
747
748 list_for_each_entry(chan, &conn->chan_l, list) {
749 l2cap_chan_lock(chan);
750 bacpy(&chan->dst, &hcon->dst);
751 chan->dst_type = bdaddr_dst_type(hcon);
752 l2cap_chan_unlock(chan);
753 }
754
755 mutex_unlock(&conn->lock);
756 }
757
l2cap_chan_le_connect_reject(struct l2cap_chan * chan)758 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
759 {
760 struct l2cap_conn *conn = chan->conn;
761 struct l2cap_le_conn_rsp rsp;
762 u16 result;
763
764 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
765 result = L2CAP_CR_LE_AUTHORIZATION;
766 else
767 result = L2CAP_CR_LE_BAD_PSM;
768
769 l2cap_state_change(chan, BT_DISCONN);
770
771 rsp.dcid = cpu_to_le16(chan->scid);
772 rsp.mtu = cpu_to_le16(chan->imtu);
773 rsp.mps = cpu_to_le16(chan->mps);
774 rsp.credits = cpu_to_le16(chan->rx_credits);
775 rsp.result = cpu_to_le16(result);
776
777 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
778 &rsp);
779 }
780
l2cap_chan_ecred_connect_reject(struct l2cap_chan * chan)781 static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan)
782 {
783 l2cap_state_change(chan, BT_DISCONN);
784
785 __l2cap_ecred_conn_rsp_defer(chan);
786 }
787
l2cap_chan_connect_reject(struct l2cap_chan * chan)788 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
789 {
790 struct l2cap_conn *conn = chan->conn;
791 struct l2cap_conn_rsp rsp;
792 u16 result;
793
794 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
795 result = L2CAP_CR_SEC_BLOCK;
796 else
797 result = L2CAP_CR_BAD_PSM;
798
799 l2cap_state_change(chan, BT_DISCONN);
800
801 rsp.scid = cpu_to_le16(chan->dcid);
802 rsp.dcid = cpu_to_le16(chan->scid);
803 rsp.result = cpu_to_le16(result);
804 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
805
806 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
807 }
808
l2cap_chan_close(struct l2cap_chan * chan,int reason)809 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
810 {
811 struct l2cap_conn *conn = chan->conn;
812
813 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
814
815 switch (chan->state) {
816 case BT_LISTEN:
817 chan->ops->teardown(chan, 0);
818 break;
819
820 case BT_CONNECTED:
821 case BT_CONFIG:
822 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
823 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
824 l2cap_send_disconn_req(chan, reason);
825 } else
826 l2cap_chan_del(chan, reason);
827 break;
828
829 case BT_CONNECT2:
830 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
831 if (conn->hcon->type == ACL_LINK)
832 l2cap_chan_connect_reject(chan);
833 else if (conn->hcon->type == LE_LINK) {
834 switch (chan->mode) {
835 case L2CAP_MODE_LE_FLOWCTL:
836 l2cap_chan_le_connect_reject(chan);
837 break;
838 case L2CAP_MODE_EXT_FLOWCTL:
839 l2cap_chan_ecred_connect_reject(chan);
840 return;
841 }
842 }
843 }
844
845 l2cap_chan_del(chan, reason);
846 break;
847
848 case BT_CONNECT:
849 case BT_DISCONN:
850 l2cap_chan_del(chan, reason);
851 break;
852
853 default:
854 chan->ops->teardown(chan, 0);
855 break;
856 }
857 }
858 EXPORT_SYMBOL(l2cap_chan_close);
859
l2cap_get_auth_type(struct l2cap_chan * chan)860 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
861 {
862 switch (chan->chan_type) {
863 case L2CAP_CHAN_RAW:
864 switch (chan->sec_level) {
865 case BT_SECURITY_HIGH:
866 case BT_SECURITY_FIPS:
867 return HCI_AT_DEDICATED_BONDING_MITM;
868 case BT_SECURITY_MEDIUM:
869 return HCI_AT_DEDICATED_BONDING;
870 default:
871 return HCI_AT_NO_BONDING;
872 }
873 break;
874 case L2CAP_CHAN_CONN_LESS:
875 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
876 if (chan->sec_level == BT_SECURITY_LOW)
877 chan->sec_level = BT_SECURITY_SDP;
878 }
879 if (chan->sec_level == BT_SECURITY_HIGH ||
880 chan->sec_level == BT_SECURITY_FIPS)
881 return HCI_AT_NO_BONDING_MITM;
882 else
883 return HCI_AT_NO_BONDING;
884 break;
885 case L2CAP_CHAN_CONN_ORIENTED:
886 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
887 if (chan->sec_level == BT_SECURITY_LOW)
888 chan->sec_level = BT_SECURITY_SDP;
889
890 if (chan->sec_level == BT_SECURITY_HIGH ||
891 chan->sec_level == BT_SECURITY_FIPS)
892 return HCI_AT_NO_BONDING_MITM;
893 else
894 return HCI_AT_NO_BONDING;
895 }
896 fallthrough;
897
898 default:
899 switch (chan->sec_level) {
900 case BT_SECURITY_HIGH:
901 case BT_SECURITY_FIPS:
902 return HCI_AT_GENERAL_BONDING_MITM;
903 case BT_SECURITY_MEDIUM:
904 return HCI_AT_GENERAL_BONDING;
905 default:
906 return HCI_AT_NO_BONDING;
907 }
908 break;
909 }
910 }
911
912 /* Service level security */
l2cap_chan_check_security(struct l2cap_chan * chan,bool initiator)913 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
914 {
915 struct l2cap_conn *conn = chan->conn;
916 __u8 auth_type;
917
918 if (conn->hcon->type == LE_LINK)
919 return smp_conn_security(conn->hcon, chan->sec_level);
920
921 auth_type = l2cap_get_auth_type(chan);
922
923 return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
924 initiator);
925 }
926
l2cap_get_ident(struct l2cap_conn * conn)927 static int l2cap_get_ident(struct l2cap_conn *conn)
928 {
929 /* LE link does not support tools like l2ping so use the full range */
930 if (conn->hcon->type == LE_LINK)
931 return ida_alloc_range(&conn->tx_ida, 1, 255, GFP_ATOMIC);
932
933 /* Get next available identificator.
934 * 1 - 128 are used by kernel.
935 * 129 - 199 are reserved.
936 * 200 - 254 are used by utilities like l2ping, etc.
937 */
938 return ida_alloc_range(&conn->tx_ida, 1, 128, GFP_ATOMIC);
939 }
940
l2cap_send_acl(struct l2cap_conn * conn,struct sk_buff * skb,u8 flags)941 static void l2cap_send_acl(struct l2cap_conn *conn, struct sk_buff *skb,
942 u8 flags)
943 {
944 /* Check if the hcon still valid before attempting to send */
945 if (hci_conn_valid(conn->hcon->hdev, conn->hcon))
946 hci_send_acl(conn->hchan, skb, flags);
947 else
948 kfree_skb(skb);
949 }
950
l2cap_send_cmd(struct l2cap_conn * conn,u8 ident,u8 code,u16 len,void * data)951 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
952 void *data)
953 {
954 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
955 u8 flags;
956
957 BT_DBG("code 0x%2.2x", code);
958
959 if (!skb)
960 return;
961
962 /* Use NO_FLUSH if supported or we have an LE link (which does
963 * not support auto-flushing packets) */
964 if (lmp_no_flush_capable(conn->hcon->hdev) ||
965 conn->hcon->type == LE_LINK)
966 flags = ACL_START_NO_FLUSH;
967 else
968 flags = ACL_START;
969
970 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
971 skb->priority = HCI_PRIO_MAX;
972
973 l2cap_send_acl(conn, skb, flags);
974 }
975
l2cap_do_send(struct l2cap_chan * chan,struct sk_buff * skb)976 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
977 {
978 struct hci_conn *hcon = chan->conn->hcon;
979 u16 flags;
980
981 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
982 skb->priority);
983
984 /* Use NO_FLUSH for LE links (where this is the only option) or
985 * if the BR/EDR link supports it and flushing has not been
986 * explicitly requested (through FLAG_FLUSHABLE).
987 */
988 if (hcon->type == LE_LINK ||
989 (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
990 lmp_no_flush_capable(hcon->hdev)))
991 flags = ACL_START_NO_FLUSH;
992 else
993 flags = ACL_START;
994
995 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
996 hci_send_acl(chan->conn->hchan, skb, flags);
997 }
998
__unpack_enhanced_control(u16 enh,struct l2cap_ctrl * control)999 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
1000 {
1001 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
1002 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
1003
1004 if (enh & L2CAP_CTRL_FRAME_TYPE) {
1005 /* S-Frame */
1006 control->sframe = 1;
1007 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
1008 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
1009
1010 control->sar = 0;
1011 control->txseq = 0;
1012 } else {
1013 /* I-Frame */
1014 control->sframe = 0;
1015 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
1016 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
1017
1018 control->poll = 0;
1019 control->super = 0;
1020 }
1021 }
1022
__unpack_extended_control(u32 ext,struct l2cap_ctrl * control)1023 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
1024 {
1025 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1026 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
1027
1028 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
1029 /* S-Frame */
1030 control->sframe = 1;
1031 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
1032 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
1033
1034 control->sar = 0;
1035 control->txseq = 0;
1036 } else {
1037 /* I-Frame */
1038 control->sframe = 0;
1039 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
1040 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1041
1042 control->poll = 0;
1043 control->super = 0;
1044 }
1045 }
1046
__unpack_control(struct l2cap_chan * chan,struct sk_buff * skb)1047 static inline void __unpack_control(struct l2cap_chan *chan,
1048 struct sk_buff *skb)
1049 {
1050 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1051 __unpack_extended_control(get_unaligned_le32(skb->data),
1052 &bt_cb(skb)->l2cap);
1053 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
1054 } else {
1055 __unpack_enhanced_control(get_unaligned_le16(skb->data),
1056 &bt_cb(skb)->l2cap);
1057 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
1058 }
1059 }
1060
__pack_extended_control(struct l2cap_ctrl * control)1061 static u32 __pack_extended_control(struct l2cap_ctrl *control)
1062 {
1063 u32 packed;
1064
1065 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1066 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
1067
1068 if (control->sframe) {
1069 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
1070 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
1071 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
1072 } else {
1073 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
1074 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1075 }
1076
1077 return packed;
1078 }
1079
__pack_enhanced_control(struct l2cap_ctrl * control)1080 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1081 {
1082 u16 packed;
1083
1084 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1085 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1086
1087 if (control->sframe) {
1088 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1089 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1090 packed |= L2CAP_CTRL_FRAME_TYPE;
1091 } else {
1092 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1093 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1094 }
1095
1096 return packed;
1097 }
1098
__pack_control(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb)1099 static inline void __pack_control(struct l2cap_chan *chan,
1100 struct l2cap_ctrl *control,
1101 struct sk_buff *skb)
1102 {
1103 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1104 put_unaligned_le32(__pack_extended_control(control),
1105 skb->data + L2CAP_HDR_SIZE);
1106 } else {
1107 put_unaligned_le16(__pack_enhanced_control(control),
1108 skb->data + L2CAP_HDR_SIZE);
1109 }
1110 }
1111
__ertm_hdr_size(struct l2cap_chan * chan)1112 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1113 {
1114 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1115 return L2CAP_EXT_HDR_SIZE;
1116 else
1117 return L2CAP_ENH_HDR_SIZE;
1118 }
1119
l2cap_create_sframe_pdu(struct l2cap_chan * chan,u32 control)1120 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1121 u32 control)
1122 {
1123 struct sk_buff *skb;
1124 struct l2cap_hdr *lh;
1125 int hlen = __ertm_hdr_size(chan);
1126
1127 if (chan->fcs == L2CAP_FCS_CRC16)
1128 hlen += L2CAP_FCS_SIZE;
1129
1130 skb = bt_skb_alloc(hlen, GFP_KERNEL);
1131
1132 if (!skb)
1133 return ERR_PTR(-ENOMEM);
1134
1135 lh = skb_put(skb, L2CAP_HDR_SIZE);
1136 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1137 lh->cid = cpu_to_le16(chan->dcid);
1138
1139 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1140 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1141 else
1142 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1143
1144 if (chan->fcs == L2CAP_FCS_CRC16) {
1145 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1146 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1147 }
1148
1149 skb->priority = HCI_PRIO_MAX;
1150 return skb;
1151 }
1152
l2cap_send_sframe(struct l2cap_chan * chan,struct l2cap_ctrl * control)1153 static void l2cap_send_sframe(struct l2cap_chan *chan,
1154 struct l2cap_ctrl *control)
1155 {
1156 struct sk_buff *skb;
1157 u32 control_field;
1158
1159 BT_DBG("chan %p, control %p", chan, control);
1160
1161 if (!control->sframe)
1162 return;
1163
1164 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1165 !control->poll)
1166 control->final = 1;
1167
1168 if (control->super == L2CAP_SUPER_RR)
1169 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1170 else if (control->super == L2CAP_SUPER_RNR)
1171 set_bit(CONN_RNR_SENT, &chan->conn_state);
1172
1173 if (control->super != L2CAP_SUPER_SREJ) {
1174 chan->last_acked_seq = control->reqseq;
1175 __clear_ack_timer(chan);
1176 }
1177
1178 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1179 control->final, control->poll, control->super);
1180
1181 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1182 control_field = __pack_extended_control(control);
1183 else
1184 control_field = __pack_enhanced_control(control);
1185
1186 skb = l2cap_create_sframe_pdu(chan, control_field);
1187 if (!IS_ERR(skb))
1188 l2cap_do_send(chan, skb);
1189 }
1190
l2cap_send_rr_or_rnr(struct l2cap_chan * chan,bool poll)1191 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1192 {
1193 struct l2cap_ctrl control;
1194
1195 BT_DBG("chan %p, poll %d", chan, poll);
1196
1197 memset(&control, 0, sizeof(control));
1198 control.sframe = 1;
1199 control.poll = poll;
1200
1201 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1202 control.super = L2CAP_SUPER_RNR;
1203 else
1204 control.super = L2CAP_SUPER_RR;
1205
1206 control.reqseq = chan->buffer_seq;
1207 l2cap_send_sframe(chan, &control);
1208 }
1209
__l2cap_no_conn_pending(struct l2cap_chan * chan)1210 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1211 {
1212 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1213 return true;
1214
1215 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1216 }
1217
l2cap_send_conn_req(struct l2cap_chan * chan)1218 void l2cap_send_conn_req(struct l2cap_chan *chan)
1219 {
1220 struct l2cap_conn *conn = chan->conn;
1221 struct l2cap_conn_req req;
1222
1223 req.scid = cpu_to_le16(chan->scid);
1224 req.psm = chan->psm;
1225
1226 chan->ident = l2cap_get_ident(conn);
1227
1228 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1229
1230 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1231 }
1232
l2cap_chan_ready(struct l2cap_chan * chan)1233 static void l2cap_chan_ready(struct l2cap_chan *chan)
1234 {
1235 /* The channel may have already been flagged as connected in
1236 * case of receiving data before the L2CAP info req/rsp
1237 * procedure is complete.
1238 */
1239 if (chan->state == BT_CONNECTED)
1240 return;
1241
1242 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1243 chan->conf_state = 0;
1244 __clear_chan_timer(chan);
1245
1246 switch (chan->mode) {
1247 case L2CAP_MODE_LE_FLOWCTL:
1248 case L2CAP_MODE_EXT_FLOWCTL:
1249 if (!chan->tx_credits)
1250 chan->ops->suspend(chan);
1251 break;
1252 }
1253
1254 chan->state = BT_CONNECTED;
1255
1256 chan->ops->ready(chan);
1257 }
1258
l2cap_le_connect(struct l2cap_chan * chan)1259 static void l2cap_le_connect(struct l2cap_chan *chan)
1260 {
1261 struct l2cap_conn *conn = chan->conn;
1262 struct l2cap_le_conn_req req;
1263
1264 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1265 return;
1266
1267 if (!chan->imtu)
1268 chan->imtu = chan->conn->mtu;
1269
1270 l2cap_le_flowctl_init(chan, 0);
1271
1272 memset(&req, 0, sizeof(req));
1273 req.psm = chan->psm;
1274 req.scid = cpu_to_le16(chan->scid);
1275 req.mtu = cpu_to_le16(chan->imtu);
1276 req.mps = cpu_to_le16(chan->mps);
1277 req.credits = cpu_to_le16(chan->rx_credits);
1278
1279 chan->ident = l2cap_get_ident(conn);
1280
1281 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1282 sizeof(req), &req);
1283 }
1284
1285 struct l2cap_ecred_conn_data {
1286 struct {
1287 struct l2cap_ecred_conn_req_hdr req;
1288 __le16 scid[5];
1289 } __packed pdu;
1290 struct l2cap_chan *chan;
1291 struct pid *pid;
1292 int count;
1293 };
1294
l2cap_ecred_defer_connect(struct l2cap_chan * chan,void * data)1295 static void l2cap_ecred_defer_connect(struct l2cap_chan *chan, void *data)
1296 {
1297 struct l2cap_ecred_conn_data *conn = data;
1298 struct pid *pid;
1299
1300 if (chan == conn->chan)
1301 return;
1302
1303 if (!test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
1304 return;
1305
1306 pid = chan->ops->get_peer_pid(chan);
1307
1308 /* Only add deferred channels with the same PID/PSM */
1309 if (conn->pid != pid || chan->psm != conn->chan->psm || chan->ident ||
1310 chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
1311 return;
1312
1313 if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1314 return;
1315
1316 l2cap_ecred_init(chan, 0);
1317
1318 /* Set the same ident so we can match on the rsp */
1319 chan->ident = conn->chan->ident;
1320
1321 /* Include all channels deferred */
1322 conn->pdu.scid[conn->count] = cpu_to_le16(chan->scid);
1323
1324 conn->count++;
1325 }
1326
l2cap_ecred_connect(struct l2cap_chan * chan)1327 static void l2cap_ecred_connect(struct l2cap_chan *chan)
1328 {
1329 struct l2cap_conn *conn = chan->conn;
1330 struct l2cap_ecred_conn_data data;
1331
1332 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
1333 return;
1334
1335 if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1336 return;
1337
1338 l2cap_ecred_init(chan, 0);
1339
1340 memset(&data, 0, sizeof(data));
1341 data.pdu.req.psm = chan->psm;
1342 data.pdu.req.mtu = cpu_to_le16(chan->imtu);
1343 data.pdu.req.mps = cpu_to_le16(chan->mps);
1344 data.pdu.req.credits = cpu_to_le16(chan->rx_credits);
1345 data.pdu.scid[0] = cpu_to_le16(chan->scid);
1346
1347 chan->ident = l2cap_get_ident(conn);
1348
1349 data.count = 1;
1350 data.chan = chan;
1351 data.pid = chan->ops->get_peer_pid(chan);
1352
1353 __l2cap_chan_list(conn, l2cap_ecred_defer_connect, &data);
1354
1355 l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_CONN_REQ,
1356 sizeof(data.pdu.req) + data.count * sizeof(__le16),
1357 &data.pdu);
1358 }
1359
l2cap_le_start(struct l2cap_chan * chan)1360 static void l2cap_le_start(struct l2cap_chan *chan)
1361 {
1362 struct l2cap_conn *conn = chan->conn;
1363
1364 if (!smp_conn_security(conn->hcon, chan->sec_level))
1365 return;
1366
1367 if (!chan->psm) {
1368 l2cap_chan_ready(chan);
1369 return;
1370 }
1371
1372 if (chan->state == BT_CONNECT) {
1373 if (chan->mode == L2CAP_MODE_EXT_FLOWCTL)
1374 l2cap_ecred_connect(chan);
1375 else
1376 l2cap_le_connect(chan);
1377 }
1378 }
1379
l2cap_start_connection(struct l2cap_chan * chan)1380 static void l2cap_start_connection(struct l2cap_chan *chan)
1381 {
1382 if (chan->conn->hcon->type == LE_LINK) {
1383 l2cap_le_start(chan);
1384 } else {
1385 l2cap_send_conn_req(chan);
1386 }
1387 }
1388
l2cap_request_info(struct l2cap_conn * conn)1389 static void l2cap_request_info(struct l2cap_conn *conn)
1390 {
1391 struct l2cap_info_req req;
1392
1393 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1394 return;
1395
1396 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1397
1398 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1399 conn->info_ident = l2cap_get_ident(conn);
1400
1401 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1402
1403 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1404 sizeof(req), &req);
1405 }
1406
l2cap_check_enc_key_size(struct hci_conn * hcon,struct l2cap_chan * chan)1407 static bool l2cap_check_enc_key_size(struct hci_conn *hcon,
1408 struct l2cap_chan *chan)
1409 {
1410 /* The minimum encryption key size needs to be enforced by the
1411 * host stack before establishing any L2CAP connections. The
1412 * specification in theory allows a minimum of 1, but to align
1413 * BR/EDR and LE transports, a minimum of 7 is chosen.
1414 *
1415 * This check might also be called for unencrypted connections
1416 * that have no key size requirements. Ensure that the link is
1417 * actually encrypted before enforcing a key size.
1418 */
1419 int min_key_size = hcon->hdev->min_enc_key_size;
1420
1421 /* On FIPS security level, key size must be 16 bytes */
1422 if (chan->sec_level == BT_SECURITY_FIPS)
1423 min_key_size = 16;
1424
1425 return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1426 hcon->enc_key_size >= min_key_size);
1427 }
1428
l2cap_do_start(struct l2cap_chan * chan)1429 static void l2cap_do_start(struct l2cap_chan *chan)
1430 {
1431 struct l2cap_conn *conn = chan->conn;
1432
1433 if (conn->hcon->type == LE_LINK) {
1434 l2cap_le_start(chan);
1435 return;
1436 }
1437
1438 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1439 l2cap_request_info(conn);
1440 return;
1441 }
1442
1443 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1444 return;
1445
1446 if (!l2cap_chan_check_security(chan, true) ||
1447 !__l2cap_no_conn_pending(chan))
1448 return;
1449
1450 if (l2cap_check_enc_key_size(conn->hcon, chan))
1451 l2cap_start_connection(chan);
1452 else
1453 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1454 }
1455
l2cap_mode_supported(__u8 mode,__u32 feat_mask)1456 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1457 {
1458 u32 local_feat_mask = l2cap_feat_mask;
1459 if (!disable_ertm)
1460 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1461
1462 switch (mode) {
1463 case L2CAP_MODE_ERTM:
1464 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1465 case L2CAP_MODE_STREAMING:
1466 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1467 default:
1468 return 0x00;
1469 }
1470 }
1471
l2cap_send_disconn_req(struct l2cap_chan * chan,int err)1472 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1473 {
1474 struct l2cap_conn *conn = chan->conn;
1475 struct l2cap_disconn_req req;
1476
1477 if (!conn)
1478 return;
1479
1480 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1481 __clear_retrans_timer(chan);
1482 __clear_monitor_timer(chan);
1483 __clear_ack_timer(chan);
1484 }
1485
1486 req.dcid = cpu_to_le16(chan->dcid);
1487 req.scid = cpu_to_le16(chan->scid);
1488 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1489 sizeof(req), &req);
1490
1491 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1492 }
1493
1494 /* ---- L2CAP connections ---- */
l2cap_conn_start(struct l2cap_conn * conn)1495 static void l2cap_conn_start(struct l2cap_conn *conn)
1496 {
1497 struct l2cap_chan *chan, *tmp;
1498
1499 BT_DBG("conn %p", conn);
1500
1501 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1502 l2cap_chan_lock(chan);
1503
1504 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1505 l2cap_chan_ready(chan);
1506 l2cap_chan_unlock(chan);
1507 continue;
1508 }
1509
1510 if (chan->state == BT_CONNECT) {
1511 if (!l2cap_chan_check_security(chan, true) ||
1512 !__l2cap_no_conn_pending(chan)) {
1513 l2cap_chan_unlock(chan);
1514 continue;
1515 }
1516
1517 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1518 && test_bit(CONF_STATE2_DEVICE,
1519 &chan->conf_state)) {
1520 l2cap_chan_close(chan, ECONNRESET);
1521 l2cap_chan_unlock(chan);
1522 continue;
1523 }
1524
1525 if (l2cap_check_enc_key_size(conn->hcon, chan))
1526 l2cap_start_connection(chan);
1527 else
1528 l2cap_chan_close(chan, ECONNREFUSED);
1529
1530 } else if (chan->state == BT_CONNECT2) {
1531 struct l2cap_conn_rsp rsp;
1532 char buf[128];
1533 rsp.scid = cpu_to_le16(chan->dcid);
1534 rsp.dcid = cpu_to_le16(chan->scid);
1535
1536 if (l2cap_chan_check_security(chan, false)) {
1537 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1538 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1539 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1540 chan->ops->defer(chan);
1541
1542 } else {
1543 l2cap_state_change(chan, BT_CONFIG);
1544 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1545 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1546 }
1547 } else {
1548 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1549 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1550 }
1551
1552 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1553 sizeof(rsp), &rsp);
1554
1555 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1556 rsp.result != L2CAP_CR_SUCCESS) {
1557 l2cap_chan_unlock(chan);
1558 continue;
1559 }
1560
1561 set_bit(CONF_REQ_SENT, &chan->conf_state);
1562 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1563 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1564 chan->num_conf_req++;
1565 }
1566
1567 l2cap_chan_unlock(chan);
1568 }
1569 }
1570
l2cap_le_conn_ready(struct l2cap_conn * conn)1571 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1572 {
1573 struct hci_conn *hcon = conn->hcon;
1574 struct hci_dev *hdev = hcon->hdev;
1575
1576 BT_DBG("%s conn %p", hdev->name, conn);
1577
1578 /* For outgoing pairing which doesn't necessarily have an
1579 * associated socket (e.g. mgmt_pair_device).
1580 */
1581 if (hcon->out)
1582 smp_conn_security(hcon, hcon->pending_sec_level);
1583
1584 /* For LE peripheral connections, make sure the connection interval
1585 * is in the range of the minimum and maximum interval that has
1586 * been configured for this connection. If not, then trigger
1587 * the connection update procedure.
1588 */
1589 if (hcon->role == HCI_ROLE_SLAVE &&
1590 (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1591 hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1592 struct l2cap_conn_param_update_req req;
1593
1594 req.min = cpu_to_le16(hcon->le_conn_min_interval);
1595 req.max = cpu_to_le16(hcon->le_conn_max_interval);
1596 req.latency = cpu_to_le16(hcon->le_conn_latency);
1597 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1598
1599 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1600 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1601 }
1602 }
1603
l2cap_conn_ready(struct l2cap_conn * conn)1604 static void l2cap_conn_ready(struct l2cap_conn *conn)
1605 {
1606 struct l2cap_chan *chan;
1607 struct hci_conn *hcon = conn->hcon;
1608
1609 BT_DBG("conn %p", conn);
1610
1611 if (hcon->type == ACL_LINK)
1612 l2cap_request_info(conn);
1613
1614 mutex_lock(&conn->lock);
1615
1616 list_for_each_entry(chan, &conn->chan_l, list) {
1617
1618 l2cap_chan_lock(chan);
1619
1620 if (hcon->type == LE_LINK) {
1621 l2cap_le_start(chan);
1622 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1623 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1624 l2cap_chan_ready(chan);
1625 } else if (chan->state == BT_CONNECT) {
1626 l2cap_do_start(chan);
1627 }
1628
1629 l2cap_chan_unlock(chan);
1630 }
1631
1632 mutex_unlock(&conn->lock);
1633
1634 if (hcon->type == LE_LINK)
1635 l2cap_le_conn_ready(conn);
1636
1637 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1638 }
1639
1640 /* Notify sockets that we cannot guaranty reliability anymore */
l2cap_conn_unreliable(struct l2cap_conn * conn,int err)1641 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1642 {
1643 struct l2cap_chan *chan;
1644
1645 BT_DBG("conn %p", conn);
1646
1647 list_for_each_entry(chan, &conn->chan_l, list) {
1648 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1649 l2cap_chan_set_err(chan, err);
1650 }
1651 }
1652
l2cap_info_timeout(struct work_struct * work)1653 static void l2cap_info_timeout(struct work_struct *work)
1654 {
1655 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1656 info_timer.work);
1657
1658 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1659 conn->info_ident = 0;
1660
1661 mutex_lock(&conn->lock);
1662 l2cap_conn_start(conn);
1663 mutex_unlock(&conn->lock);
1664 }
1665
1666 /*
1667 * l2cap_user
1668 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1669 * callback is called during registration. The ->remove callback is called
1670 * during unregistration.
1671 * An l2cap_user object can either be explicitly unregistered or when the
1672 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1673 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1674 * External modules must own a reference to the l2cap_conn object if they intend
1675 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1676 * any time if they don't.
1677 */
1678
l2cap_register_user(struct l2cap_conn * conn,struct l2cap_user * user)1679 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1680 {
1681 struct hci_dev *hdev = conn->hcon->hdev;
1682 int ret;
1683
1684 /* We need to check whether l2cap_conn is registered. If it is not, we
1685 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1686 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1687 * relies on the parent hci_conn object to be locked. This itself relies
1688 * on the hci_dev object to be locked. So we must lock the hci device
1689 * here, too. */
1690
1691 hci_dev_lock(hdev);
1692
1693 if (!list_empty(&user->list)) {
1694 ret = -EINVAL;
1695 goto out_unlock;
1696 }
1697
1698 /* conn->hchan is NULL after l2cap_conn_del() was called */
1699 if (!conn->hchan) {
1700 ret = -ENODEV;
1701 goto out_unlock;
1702 }
1703
1704 ret = user->probe(conn, user);
1705 if (ret)
1706 goto out_unlock;
1707
1708 list_add(&user->list, &conn->users);
1709 ret = 0;
1710
1711 out_unlock:
1712 hci_dev_unlock(hdev);
1713 return ret;
1714 }
1715 EXPORT_SYMBOL(l2cap_register_user);
1716
l2cap_unregister_user(struct l2cap_conn * conn,struct l2cap_user * user)1717 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1718 {
1719 struct hci_dev *hdev = conn->hcon->hdev;
1720
1721 hci_dev_lock(hdev);
1722
1723 if (list_empty(&user->list))
1724 goto out_unlock;
1725
1726 list_del_init(&user->list);
1727 user->remove(conn, user);
1728
1729 out_unlock:
1730 hci_dev_unlock(hdev);
1731 }
1732 EXPORT_SYMBOL(l2cap_unregister_user);
1733
l2cap_unregister_all_users(struct l2cap_conn * conn)1734 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1735 {
1736 struct l2cap_user *user;
1737
1738 while (!list_empty(&conn->users)) {
1739 user = list_first_entry(&conn->users, struct l2cap_user, list);
1740 list_del_init(&user->list);
1741 user->remove(conn, user);
1742 }
1743 }
1744
l2cap_conn_del(struct hci_conn * hcon,int err)1745 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1746 {
1747 struct l2cap_conn *conn = hcon->l2cap_data;
1748 struct l2cap_chan *chan, *l;
1749
1750 if (!conn)
1751 return;
1752
1753 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1754
1755 mutex_lock(&conn->lock);
1756
1757 kfree_skb(conn->rx_skb);
1758
1759 skb_queue_purge(&conn->pending_rx);
1760
1761 /* We can not call flush_work(&conn->pending_rx_work) here since we
1762 * might block if we are running on a worker from the same workqueue
1763 * pending_rx_work is waiting on.
1764 */
1765 if (work_pending(&conn->pending_rx_work))
1766 cancel_work_sync(&conn->pending_rx_work);
1767
1768 ida_destroy(&conn->tx_ida);
1769
1770 cancel_delayed_work_sync(&conn->id_addr_timer);
1771
1772 l2cap_unregister_all_users(conn);
1773
1774 /* Force the connection to be immediately dropped */
1775 hcon->disc_timeout = 0;
1776
1777 /* Kill channels */
1778 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1779 l2cap_chan_hold(chan);
1780 l2cap_chan_lock(chan);
1781
1782 l2cap_chan_del(chan, err);
1783
1784 chan->ops->close(chan);
1785
1786 l2cap_chan_unlock(chan);
1787 l2cap_chan_put(chan);
1788 }
1789
1790 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1791 cancel_delayed_work_sync(&conn->info_timer);
1792
1793 hci_chan_del(conn->hchan);
1794 conn->hchan = NULL;
1795
1796 hcon->l2cap_data = NULL;
1797 mutex_unlock(&conn->lock);
1798 l2cap_conn_put(conn);
1799 }
1800
l2cap_conn_free(struct kref * ref)1801 static void l2cap_conn_free(struct kref *ref)
1802 {
1803 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1804
1805 hci_conn_put(conn->hcon);
1806 kfree(conn);
1807 }
1808
l2cap_conn_get(struct l2cap_conn * conn)1809 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1810 {
1811 kref_get(&conn->ref);
1812 return conn;
1813 }
1814 EXPORT_SYMBOL(l2cap_conn_get);
1815
l2cap_conn_put(struct l2cap_conn * conn)1816 void l2cap_conn_put(struct l2cap_conn *conn)
1817 {
1818 kref_put(&conn->ref, l2cap_conn_free);
1819 }
1820 EXPORT_SYMBOL(l2cap_conn_put);
1821
1822 /* ---- Socket interface ---- */
1823
1824 /* Find socket with psm and source / destination bdaddr.
1825 * Returns closest match.
1826 */
l2cap_global_chan_by_psm(int state,__le16 psm,bdaddr_t * src,bdaddr_t * dst,u8 link_type)1827 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1828 bdaddr_t *src,
1829 bdaddr_t *dst,
1830 u8 link_type)
1831 {
1832 struct l2cap_chan *c, *tmp, *c1 = NULL;
1833
1834 read_lock(&chan_list_lock);
1835
1836 list_for_each_entry_safe(c, tmp, &chan_list, global_l) {
1837 if (state && c->state != state)
1838 continue;
1839
1840 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1841 continue;
1842
1843 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1844 continue;
1845
1846 if (c->chan_type != L2CAP_CHAN_FIXED && c->psm == psm) {
1847 int src_match, dst_match;
1848 int src_any, dst_any;
1849
1850 /* Exact match. */
1851 src_match = !bacmp(&c->src, src);
1852 dst_match = !bacmp(&c->dst, dst);
1853 if (src_match && dst_match) {
1854 if (!l2cap_chan_hold_unless_zero(c))
1855 continue;
1856
1857 read_unlock(&chan_list_lock);
1858 return c;
1859 }
1860
1861 /* Closest match */
1862 src_any = !bacmp(&c->src, BDADDR_ANY);
1863 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1864 if ((src_match && dst_any) || (src_any && dst_match) ||
1865 (src_any && dst_any))
1866 c1 = c;
1867 }
1868 }
1869
1870 if (c1)
1871 c1 = l2cap_chan_hold_unless_zero(c1);
1872
1873 read_unlock(&chan_list_lock);
1874
1875 return c1;
1876 }
1877
l2cap_monitor_timeout(struct work_struct * work)1878 static void l2cap_monitor_timeout(struct work_struct *work)
1879 {
1880 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1881 monitor_timer.work);
1882
1883 BT_DBG("chan %p", chan);
1884
1885 l2cap_chan_lock(chan);
1886
1887 if (!chan->conn) {
1888 l2cap_chan_unlock(chan);
1889 l2cap_chan_put(chan);
1890 return;
1891 }
1892
1893 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1894
1895 l2cap_chan_unlock(chan);
1896 l2cap_chan_put(chan);
1897 }
1898
l2cap_retrans_timeout(struct work_struct * work)1899 static void l2cap_retrans_timeout(struct work_struct *work)
1900 {
1901 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1902 retrans_timer.work);
1903
1904 BT_DBG("chan %p", chan);
1905
1906 l2cap_chan_lock(chan);
1907
1908 if (!chan->conn) {
1909 l2cap_chan_unlock(chan);
1910 l2cap_chan_put(chan);
1911 return;
1912 }
1913
1914 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1915 l2cap_chan_unlock(chan);
1916 l2cap_chan_put(chan);
1917 }
1918
l2cap_streaming_send(struct l2cap_chan * chan,struct sk_buff_head * skbs)1919 static void l2cap_streaming_send(struct l2cap_chan *chan,
1920 struct sk_buff_head *skbs)
1921 {
1922 struct sk_buff *skb;
1923 struct l2cap_ctrl *control;
1924
1925 BT_DBG("chan %p, skbs %p", chan, skbs);
1926
1927 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1928
1929 while (!skb_queue_empty(&chan->tx_q)) {
1930
1931 skb = skb_dequeue(&chan->tx_q);
1932
1933 bt_cb(skb)->l2cap.retries = 1;
1934 control = &bt_cb(skb)->l2cap;
1935
1936 control->reqseq = 0;
1937 control->txseq = chan->next_tx_seq;
1938
1939 __pack_control(chan, control, skb);
1940
1941 if (chan->fcs == L2CAP_FCS_CRC16) {
1942 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1943 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1944 }
1945
1946 l2cap_do_send(chan, skb);
1947
1948 BT_DBG("Sent txseq %u", control->txseq);
1949
1950 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1951 chan->frames_sent++;
1952 }
1953 }
1954
l2cap_ertm_send(struct l2cap_chan * chan)1955 static int l2cap_ertm_send(struct l2cap_chan *chan)
1956 {
1957 struct sk_buff *skb, *tx_skb;
1958 struct l2cap_ctrl *control;
1959 int sent = 0;
1960
1961 BT_DBG("chan %p", chan);
1962
1963 if (chan->state != BT_CONNECTED)
1964 return -ENOTCONN;
1965
1966 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1967 return 0;
1968
1969 while (chan->tx_send_head &&
1970 chan->unacked_frames < chan->remote_tx_win &&
1971 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1972
1973 skb = chan->tx_send_head;
1974
1975 bt_cb(skb)->l2cap.retries = 1;
1976 control = &bt_cb(skb)->l2cap;
1977
1978 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1979 control->final = 1;
1980
1981 control->reqseq = chan->buffer_seq;
1982 chan->last_acked_seq = chan->buffer_seq;
1983 control->txseq = chan->next_tx_seq;
1984
1985 __pack_control(chan, control, skb);
1986
1987 if (chan->fcs == L2CAP_FCS_CRC16) {
1988 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1989 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1990 }
1991
1992 /* Clone after data has been modified. Data is assumed to be
1993 read-only (for locking purposes) on cloned sk_buffs.
1994 */
1995 tx_skb = skb_clone(skb, GFP_KERNEL);
1996
1997 if (!tx_skb)
1998 break;
1999
2000 __set_retrans_timer(chan);
2001
2002 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2003 chan->unacked_frames++;
2004 chan->frames_sent++;
2005 sent++;
2006
2007 if (skb_queue_is_last(&chan->tx_q, skb))
2008 chan->tx_send_head = NULL;
2009 else
2010 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2011
2012 l2cap_do_send(chan, tx_skb);
2013 BT_DBG("Sent txseq %u", control->txseq);
2014 }
2015
2016 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2017 chan->unacked_frames, skb_queue_len(&chan->tx_q));
2018
2019 return sent;
2020 }
2021
l2cap_ertm_resend(struct l2cap_chan * chan)2022 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2023 {
2024 struct l2cap_ctrl control;
2025 struct sk_buff *skb;
2026 struct sk_buff *tx_skb;
2027 u16 seq;
2028
2029 BT_DBG("chan %p", chan);
2030
2031 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2032 return;
2033
2034 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2035 seq = l2cap_seq_list_pop(&chan->retrans_list);
2036
2037 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2038 if (!skb) {
2039 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2040 seq);
2041 continue;
2042 }
2043
2044 bt_cb(skb)->l2cap.retries++;
2045 control = bt_cb(skb)->l2cap;
2046
2047 if (chan->max_tx != 0 &&
2048 bt_cb(skb)->l2cap.retries > chan->max_tx) {
2049 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2050 l2cap_send_disconn_req(chan, ECONNRESET);
2051 l2cap_seq_list_clear(&chan->retrans_list);
2052 break;
2053 }
2054
2055 control.reqseq = chan->buffer_seq;
2056 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2057 control.final = 1;
2058 else
2059 control.final = 0;
2060
2061 if (skb_cloned(skb)) {
2062 /* Cloned sk_buffs are read-only, so we need a
2063 * writeable copy
2064 */
2065 tx_skb = skb_copy(skb, GFP_KERNEL);
2066 } else {
2067 tx_skb = skb_clone(skb, GFP_KERNEL);
2068 }
2069
2070 if (!tx_skb) {
2071 l2cap_seq_list_clear(&chan->retrans_list);
2072 break;
2073 }
2074
2075 /* Update skb contents */
2076 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2077 put_unaligned_le32(__pack_extended_control(&control),
2078 tx_skb->data + L2CAP_HDR_SIZE);
2079 } else {
2080 put_unaligned_le16(__pack_enhanced_control(&control),
2081 tx_skb->data + L2CAP_HDR_SIZE);
2082 }
2083
2084 /* Update FCS */
2085 if (chan->fcs == L2CAP_FCS_CRC16) {
2086 u16 fcs = crc16(0, (u8 *) tx_skb->data,
2087 tx_skb->len - L2CAP_FCS_SIZE);
2088 put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2089 L2CAP_FCS_SIZE);
2090 }
2091
2092 l2cap_do_send(chan, tx_skb);
2093
2094 BT_DBG("Resent txseq %d", control.txseq);
2095
2096 chan->last_acked_seq = chan->buffer_seq;
2097 }
2098 }
2099
l2cap_retransmit(struct l2cap_chan * chan,struct l2cap_ctrl * control)2100 static void l2cap_retransmit(struct l2cap_chan *chan,
2101 struct l2cap_ctrl *control)
2102 {
2103 BT_DBG("chan %p, control %p", chan, control);
2104
2105 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2106 l2cap_ertm_resend(chan);
2107 }
2108
l2cap_retransmit_all(struct l2cap_chan * chan,struct l2cap_ctrl * control)2109 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2110 struct l2cap_ctrl *control)
2111 {
2112 struct sk_buff *skb;
2113
2114 BT_DBG("chan %p, control %p", chan, control);
2115
2116 if (control->poll)
2117 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2118
2119 l2cap_seq_list_clear(&chan->retrans_list);
2120
2121 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2122 return;
2123
2124 if (chan->unacked_frames) {
2125 skb_queue_walk(&chan->tx_q, skb) {
2126 if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2127 skb == chan->tx_send_head)
2128 break;
2129 }
2130
2131 skb_queue_walk_from(&chan->tx_q, skb) {
2132 if (skb == chan->tx_send_head)
2133 break;
2134
2135 l2cap_seq_list_append(&chan->retrans_list,
2136 bt_cb(skb)->l2cap.txseq);
2137 }
2138
2139 l2cap_ertm_resend(chan);
2140 }
2141 }
2142
l2cap_send_ack(struct l2cap_chan * chan)2143 static void l2cap_send_ack(struct l2cap_chan *chan)
2144 {
2145 struct l2cap_ctrl control;
2146 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2147 chan->last_acked_seq);
2148 int threshold;
2149
2150 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2151 chan, chan->last_acked_seq, chan->buffer_seq);
2152
2153 memset(&control, 0, sizeof(control));
2154 control.sframe = 1;
2155
2156 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2157 chan->rx_state == L2CAP_RX_STATE_RECV) {
2158 __clear_ack_timer(chan);
2159 control.super = L2CAP_SUPER_RNR;
2160 control.reqseq = chan->buffer_seq;
2161 l2cap_send_sframe(chan, &control);
2162 } else {
2163 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2164 l2cap_ertm_send(chan);
2165 /* If any i-frames were sent, they included an ack */
2166 if (chan->buffer_seq == chan->last_acked_seq)
2167 frames_to_ack = 0;
2168 }
2169
2170 /* Ack now if the window is 3/4ths full.
2171 * Calculate without mul or div
2172 */
2173 threshold = chan->ack_win;
2174 threshold += threshold << 1;
2175 threshold >>= 2;
2176
2177 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2178 threshold);
2179
2180 if (frames_to_ack >= threshold) {
2181 __clear_ack_timer(chan);
2182 control.super = L2CAP_SUPER_RR;
2183 control.reqseq = chan->buffer_seq;
2184 l2cap_send_sframe(chan, &control);
2185 frames_to_ack = 0;
2186 }
2187
2188 if (frames_to_ack)
2189 __set_ack_timer(chan);
2190 }
2191 }
2192
l2cap_skbuff_fromiovec(struct l2cap_chan * chan,struct msghdr * msg,int len,int count,struct sk_buff * skb)2193 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2194 struct msghdr *msg, int len,
2195 int count, struct sk_buff *skb)
2196 {
2197 struct l2cap_conn *conn = chan->conn;
2198 struct sk_buff **frag;
2199 int sent = 0;
2200
2201 if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2202 return -EFAULT;
2203
2204 sent += count;
2205 len -= count;
2206
2207 /* Continuation fragments (no L2CAP header) */
2208 frag = &skb_shinfo(skb)->frag_list;
2209 while (len) {
2210 struct sk_buff *tmp;
2211
2212 count = min_t(unsigned int, conn->mtu, len);
2213
2214 tmp = chan->ops->alloc_skb(chan, 0, count,
2215 msg->msg_flags & MSG_DONTWAIT);
2216 if (IS_ERR(tmp))
2217 return PTR_ERR(tmp);
2218
2219 *frag = tmp;
2220
2221 if (!copy_from_iter_full(skb_put(*frag, count), count,
2222 &msg->msg_iter))
2223 return -EFAULT;
2224
2225 sent += count;
2226 len -= count;
2227
2228 skb->len += (*frag)->len;
2229 skb->data_len += (*frag)->len;
2230
2231 frag = &(*frag)->next;
2232 }
2233
2234 return sent;
2235 }
2236
l2cap_create_connless_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2237 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2238 struct msghdr *msg, size_t len)
2239 {
2240 struct l2cap_conn *conn = chan->conn;
2241 struct sk_buff *skb;
2242 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2243 struct l2cap_hdr *lh;
2244
2245 BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2246 __le16_to_cpu(chan->psm), len);
2247
2248 count = min_t(unsigned int, (conn->mtu - hlen), len);
2249
2250 skb = chan->ops->alloc_skb(chan, hlen, count,
2251 msg->msg_flags & MSG_DONTWAIT);
2252 if (IS_ERR(skb))
2253 return skb;
2254
2255 /* Create L2CAP header */
2256 lh = skb_put(skb, L2CAP_HDR_SIZE);
2257 lh->cid = cpu_to_le16(chan->dcid);
2258 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2259 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2260
2261 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2262 if (unlikely(err < 0)) {
2263 kfree_skb(skb);
2264 return ERR_PTR(err);
2265 }
2266 return skb;
2267 }
2268
l2cap_create_basic_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2269 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2270 struct msghdr *msg, size_t len)
2271 {
2272 struct l2cap_conn *conn = chan->conn;
2273 struct sk_buff *skb;
2274 int err, count;
2275 struct l2cap_hdr *lh;
2276
2277 BT_DBG("chan %p len %zu", chan, len);
2278
2279 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2280
2281 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2282 msg->msg_flags & MSG_DONTWAIT);
2283 if (IS_ERR(skb))
2284 return skb;
2285
2286 /* Create L2CAP header */
2287 lh = skb_put(skb, L2CAP_HDR_SIZE);
2288 lh->cid = cpu_to_le16(chan->dcid);
2289 lh->len = cpu_to_le16(len);
2290
2291 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2292 if (unlikely(err < 0)) {
2293 kfree_skb(skb);
2294 return ERR_PTR(err);
2295 }
2296 return skb;
2297 }
2298
l2cap_create_iframe_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len,u16 sdulen)2299 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2300 struct msghdr *msg, size_t len,
2301 u16 sdulen)
2302 {
2303 struct l2cap_conn *conn = chan->conn;
2304 struct sk_buff *skb;
2305 int err, count, hlen;
2306 struct l2cap_hdr *lh;
2307
2308 BT_DBG("chan %p len %zu", chan, len);
2309
2310 if (!conn)
2311 return ERR_PTR(-ENOTCONN);
2312
2313 hlen = __ertm_hdr_size(chan);
2314
2315 if (sdulen)
2316 hlen += L2CAP_SDULEN_SIZE;
2317
2318 if (chan->fcs == L2CAP_FCS_CRC16)
2319 hlen += L2CAP_FCS_SIZE;
2320
2321 count = min_t(unsigned int, (conn->mtu - hlen), len);
2322
2323 skb = chan->ops->alloc_skb(chan, hlen, count,
2324 msg->msg_flags & MSG_DONTWAIT);
2325 if (IS_ERR(skb))
2326 return skb;
2327
2328 /* Create L2CAP header */
2329 lh = skb_put(skb, L2CAP_HDR_SIZE);
2330 lh->cid = cpu_to_le16(chan->dcid);
2331 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2332
2333 /* Control header is populated later */
2334 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2335 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2336 else
2337 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2338
2339 if (sdulen)
2340 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2341
2342 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2343 if (unlikely(err < 0)) {
2344 kfree_skb(skb);
2345 return ERR_PTR(err);
2346 }
2347
2348 bt_cb(skb)->l2cap.fcs = chan->fcs;
2349 bt_cb(skb)->l2cap.retries = 0;
2350 return skb;
2351 }
2352
l2cap_segment_sdu(struct l2cap_chan * chan,struct sk_buff_head * seg_queue,struct msghdr * msg,size_t len)2353 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2354 struct sk_buff_head *seg_queue,
2355 struct msghdr *msg, size_t len)
2356 {
2357 struct sk_buff *skb;
2358 u16 sdu_len;
2359 size_t pdu_len;
2360 u8 sar;
2361
2362 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2363
2364 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2365 * so fragmented skbs are not used. The HCI layer's handling
2366 * of fragmented skbs is not compatible with ERTM's queueing.
2367 */
2368
2369 /* PDU size is derived from the HCI MTU */
2370 pdu_len = chan->conn->mtu;
2371
2372 /* Constrain PDU size for BR/EDR connections */
2373 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2374
2375 /* Adjust for largest possible L2CAP overhead. */
2376 if (chan->fcs)
2377 pdu_len -= L2CAP_FCS_SIZE;
2378
2379 pdu_len -= __ertm_hdr_size(chan);
2380
2381 /* Remote device may have requested smaller PDUs */
2382 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2383
2384 if (len <= pdu_len) {
2385 sar = L2CAP_SAR_UNSEGMENTED;
2386 sdu_len = 0;
2387 pdu_len = len;
2388 } else {
2389 sar = L2CAP_SAR_START;
2390 sdu_len = len;
2391 }
2392
2393 while (len > 0) {
2394 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2395
2396 if (IS_ERR(skb)) {
2397 __skb_queue_purge(seg_queue);
2398 return PTR_ERR(skb);
2399 }
2400
2401 bt_cb(skb)->l2cap.sar = sar;
2402 __skb_queue_tail(seg_queue, skb);
2403
2404 len -= pdu_len;
2405 if (sdu_len)
2406 sdu_len = 0;
2407
2408 if (len <= pdu_len) {
2409 sar = L2CAP_SAR_END;
2410 pdu_len = len;
2411 } else {
2412 sar = L2CAP_SAR_CONTINUE;
2413 }
2414 }
2415
2416 return 0;
2417 }
2418
l2cap_create_le_flowctl_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len,u16 sdulen)2419 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2420 struct msghdr *msg,
2421 size_t len, u16 sdulen)
2422 {
2423 struct l2cap_conn *conn = chan->conn;
2424 struct sk_buff *skb;
2425 int err, count, hlen;
2426 struct l2cap_hdr *lh;
2427
2428 BT_DBG("chan %p len %zu", chan, len);
2429
2430 if (!conn)
2431 return ERR_PTR(-ENOTCONN);
2432
2433 hlen = L2CAP_HDR_SIZE;
2434
2435 if (sdulen)
2436 hlen += L2CAP_SDULEN_SIZE;
2437
2438 count = min_t(unsigned int, (conn->mtu - hlen), len);
2439
2440 skb = chan->ops->alloc_skb(chan, hlen, count,
2441 msg->msg_flags & MSG_DONTWAIT);
2442 if (IS_ERR(skb))
2443 return skb;
2444
2445 /* Create L2CAP header */
2446 lh = skb_put(skb, L2CAP_HDR_SIZE);
2447 lh->cid = cpu_to_le16(chan->dcid);
2448 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2449
2450 if (sdulen)
2451 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2452
2453 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2454 if (unlikely(err < 0)) {
2455 kfree_skb(skb);
2456 return ERR_PTR(err);
2457 }
2458
2459 return skb;
2460 }
2461
l2cap_segment_le_sdu(struct l2cap_chan * chan,struct sk_buff_head * seg_queue,struct msghdr * msg,size_t len)2462 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2463 struct sk_buff_head *seg_queue,
2464 struct msghdr *msg, size_t len)
2465 {
2466 struct sk_buff *skb;
2467 size_t pdu_len;
2468 u16 sdu_len;
2469
2470 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2471
2472 sdu_len = len;
2473 pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2474
2475 while (len > 0) {
2476 if (len <= pdu_len)
2477 pdu_len = len;
2478
2479 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2480 if (IS_ERR(skb)) {
2481 __skb_queue_purge(seg_queue);
2482 return PTR_ERR(skb);
2483 }
2484
2485 __skb_queue_tail(seg_queue, skb);
2486
2487 len -= pdu_len;
2488
2489 if (sdu_len) {
2490 sdu_len = 0;
2491 pdu_len += L2CAP_SDULEN_SIZE;
2492 }
2493 }
2494
2495 return 0;
2496 }
2497
l2cap_le_flowctl_send(struct l2cap_chan * chan)2498 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2499 {
2500 int sent = 0;
2501
2502 BT_DBG("chan %p", chan);
2503
2504 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2505 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2506 chan->tx_credits--;
2507 sent++;
2508 }
2509
2510 BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2511 skb_queue_len(&chan->tx_q));
2512 }
2513
l2cap_tx_timestamp(struct sk_buff * skb,const struct sockcm_cookie * sockc,size_t len)2514 static void l2cap_tx_timestamp(struct sk_buff *skb,
2515 const struct sockcm_cookie *sockc,
2516 size_t len)
2517 {
2518 struct sock *sk = skb ? skb->sk : NULL;
2519
2520 if (sk && sk->sk_type == SOCK_STREAM)
2521 hci_setup_tx_timestamp(skb, len, sockc);
2522 else
2523 hci_setup_tx_timestamp(skb, 1, sockc);
2524 }
2525
l2cap_tx_timestamp_seg(struct sk_buff_head * queue,const struct sockcm_cookie * sockc,size_t len)2526 static void l2cap_tx_timestamp_seg(struct sk_buff_head *queue,
2527 const struct sockcm_cookie *sockc,
2528 size_t len)
2529 {
2530 struct sk_buff *skb = skb_peek(queue);
2531 struct sock *sk = skb ? skb->sk : NULL;
2532
2533 if (sk && sk->sk_type == SOCK_STREAM)
2534 l2cap_tx_timestamp(skb_peek_tail(queue), sockc, len);
2535 else
2536 l2cap_tx_timestamp(skb, sockc, len);
2537 }
2538
l2cap_chan_send(struct l2cap_chan * chan,struct msghdr * msg,size_t len,const struct sockcm_cookie * sockc)2539 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2540 const struct sockcm_cookie *sockc)
2541 {
2542 struct sk_buff *skb;
2543 int err;
2544 struct sk_buff_head seg_queue;
2545
2546 if (!chan->conn)
2547 return -ENOTCONN;
2548
2549 /* Connectionless channel */
2550 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2551 skb = l2cap_create_connless_pdu(chan, msg, len);
2552 if (IS_ERR(skb))
2553 return PTR_ERR(skb);
2554
2555 l2cap_tx_timestamp(skb, sockc, len);
2556
2557 l2cap_do_send(chan, skb);
2558 return len;
2559 }
2560
2561 switch (chan->mode) {
2562 case L2CAP_MODE_LE_FLOWCTL:
2563 case L2CAP_MODE_EXT_FLOWCTL:
2564 /* Check outgoing MTU */
2565 if (len > chan->omtu)
2566 return -EMSGSIZE;
2567
2568 __skb_queue_head_init(&seg_queue);
2569
2570 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2571
2572 if (chan->state != BT_CONNECTED) {
2573 __skb_queue_purge(&seg_queue);
2574 err = -ENOTCONN;
2575 }
2576
2577 if (err)
2578 return err;
2579
2580 l2cap_tx_timestamp_seg(&seg_queue, sockc, len);
2581
2582 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2583
2584 l2cap_le_flowctl_send(chan);
2585
2586 if (!chan->tx_credits)
2587 chan->ops->suspend(chan);
2588
2589 err = len;
2590
2591 break;
2592
2593 case L2CAP_MODE_BASIC:
2594 /* Check outgoing MTU */
2595 if (len > chan->omtu)
2596 return -EMSGSIZE;
2597
2598 /* Create a basic PDU */
2599 skb = l2cap_create_basic_pdu(chan, msg, len);
2600 if (IS_ERR(skb))
2601 return PTR_ERR(skb);
2602
2603 l2cap_tx_timestamp(skb, sockc, len);
2604
2605 l2cap_do_send(chan, skb);
2606 err = len;
2607 break;
2608
2609 case L2CAP_MODE_ERTM:
2610 case L2CAP_MODE_STREAMING:
2611 /* Check outgoing MTU */
2612 if (len > chan->omtu) {
2613 err = -EMSGSIZE;
2614 break;
2615 }
2616
2617 __skb_queue_head_init(&seg_queue);
2618
2619 /* Do segmentation before calling in to the state machine,
2620 * since it's possible to block while waiting for memory
2621 * allocation.
2622 */
2623 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2624
2625 if (err)
2626 break;
2627
2628 if (chan->mode == L2CAP_MODE_ERTM) {
2629 /* TODO: ERTM mode timestamping */
2630 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2631 } else {
2632 l2cap_tx_timestamp_seg(&seg_queue, sockc, len);
2633 l2cap_streaming_send(chan, &seg_queue);
2634 }
2635
2636 err = len;
2637
2638 /* If the skbs were not queued for sending, they'll still be in
2639 * seg_queue and need to be purged.
2640 */
2641 __skb_queue_purge(&seg_queue);
2642 break;
2643
2644 default:
2645 BT_DBG("bad state %1.1x", chan->mode);
2646 err = -EBADFD;
2647 }
2648
2649 return err;
2650 }
2651 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2652
l2cap_send_srej(struct l2cap_chan * chan,u16 txseq)2653 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2654 {
2655 struct l2cap_ctrl control;
2656 u16 seq;
2657
2658 BT_DBG("chan %p, txseq %u", chan, txseq);
2659
2660 memset(&control, 0, sizeof(control));
2661 control.sframe = 1;
2662 control.super = L2CAP_SUPER_SREJ;
2663
2664 for (seq = chan->expected_tx_seq; seq != txseq;
2665 seq = __next_seq(chan, seq)) {
2666 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2667 control.reqseq = seq;
2668 l2cap_send_sframe(chan, &control);
2669 l2cap_seq_list_append(&chan->srej_list, seq);
2670 }
2671 }
2672
2673 chan->expected_tx_seq = __next_seq(chan, txseq);
2674 }
2675
l2cap_send_srej_tail(struct l2cap_chan * chan)2676 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2677 {
2678 struct l2cap_ctrl control;
2679
2680 BT_DBG("chan %p", chan);
2681
2682 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2683 return;
2684
2685 memset(&control, 0, sizeof(control));
2686 control.sframe = 1;
2687 control.super = L2CAP_SUPER_SREJ;
2688 control.reqseq = chan->srej_list.tail;
2689 l2cap_send_sframe(chan, &control);
2690 }
2691
l2cap_send_srej_list(struct l2cap_chan * chan,u16 txseq)2692 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2693 {
2694 struct l2cap_ctrl control;
2695 u16 initial_head;
2696 u16 seq;
2697
2698 BT_DBG("chan %p, txseq %u", chan, txseq);
2699
2700 memset(&control, 0, sizeof(control));
2701 control.sframe = 1;
2702 control.super = L2CAP_SUPER_SREJ;
2703
2704 /* Capture initial list head to allow only one pass through the list. */
2705 initial_head = chan->srej_list.head;
2706
2707 do {
2708 seq = l2cap_seq_list_pop(&chan->srej_list);
2709 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2710 break;
2711
2712 control.reqseq = seq;
2713 l2cap_send_sframe(chan, &control);
2714 l2cap_seq_list_append(&chan->srej_list, seq);
2715 } while (chan->srej_list.head != initial_head);
2716 }
2717
l2cap_process_reqseq(struct l2cap_chan * chan,u16 reqseq)2718 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2719 {
2720 struct sk_buff *acked_skb;
2721 u16 ackseq;
2722
2723 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2724
2725 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2726 return;
2727
2728 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2729 chan->expected_ack_seq, chan->unacked_frames);
2730
2731 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2732 ackseq = __next_seq(chan, ackseq)) {
2733
2734 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2735 if (acked_skb) {
2736 skb_unlink(acked_skb, &chan->tx_q);
2737 kfree_skb(acked_skb);
2738 chan->unacked_frames--;
2739 }
2740 }
2741
2742 chan->expected_ack_seq = reqseq;
2743
2744 if (chan->unacked_frames == 0)
2745 __clear_retrans_timer(chan);
2746
2747 BT_DBG("unacked_frames %u", chan->unacked_frames);
2748 }
2749
l2cap_abort_rx_srej_sent(struct l2cap_chan * chan)2750 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2751 {
2752 BT_DBG("chan %p", chan);
2753
2754 chan->expected_tx_seq = chan->buffer_seq;
2755 l2cap_seq_list_clear(&chan->srej_list);
2756 skb_queue_purge(&chan->srej_q);
2757 chan->rx_state = L2CAP_RX_STATE_RECV;
2758 }
2759
l2cap_tx_state_xmit(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2760 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2761 struct l2cap_ctrl *control,
2762 struct sk_buff_head *skbs, u8 event)
2763 {
2764 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2765 event);
2766
2767 switch (event) {
2768 case L2CAP_EV_DATA_REQUEST:
2769 if (chan->tx_send_head == NULL)
2770 chan->tx_send_head = skb_peek(skbs);
2771
2772 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2773 l2cap_ertm_send(chan);
2774 break;
2775 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2776 BT_DBG("Enter LOCAL_BUSY");
2777 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2778
2779 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2780 /* The SREJ_SENT state must be aborted if we are to
2781 * enter the LOCAL_BUSY state.
2782 */
2783 l2cap_abort_rx_srej_sent(chan);
2784 }
2785
2786 l2cap_send_ack(chan);
2787
2788 break;
2789 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2790 BT_DBG("Exit LOCAL_BUSY");
2791 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2792
2793 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2794 struct l2cap_ctrl local_control;
2795
2796 memset(&local_control, 0, sizeof(local_control));
2797 local_control.sframe = 1;
2798 local_control.super = L2CAP_SUPER_RR;
2799 local_control.poll = 1;
2800 local_control.reqseq = chan->buffer_seq;
2801 l2cap_send_sframe(chan, &local_control);
2802
2803 chan->retry_count = 1;
2804 __set_monitor_timer(chan);
2805 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2806 }
2807 break;
2808 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2809 l2cap_process_reqseq(chan, control->reqseq);
2810 break;
2811 case L2CAP_EV_EXPLICIT_POLL:
2812 l2cap_send_rr_or_rnr(chan, 1);
2813 chan->retry_count = 1;
2814 __set_monitor_timer(chan);
2815 __clear_ack_timer(chan);
2816 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2817 break;
2818 case L2CAP_EV_RETRANS_TO:
2819 l2cap_send_rr_or_rnr(chan, 1);
2820 chan->retry_count = 1;
2821 __set_monitor_timer(chan);
2822 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2823 break;
2824 case L2CAP_EV_RECV_FBIT:
2825 /* Nothing to process */
2826 break;
2827 default:
2828 break;
2829 }
2830 }
2831
l2cap_tx_state_wait_f(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2832 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2833 struct l2cap_ctrl *control,
2834 struct sk_buff_head *skbs, u8 event)
2835 {
2836 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2837 event);
2838
2839 switch (event) {
2840 case L2CAP_EV_DATA_REQUEST:
2841 if (chan->tx_send_head == NULL)
2842 chan->tx_send_head = skb_peek(skbs);
2843 /* Queue data, but don't send. */
2844 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2845 break;
2846 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2847 BT_DBG("Enter LOCAL_BUSY");
2848 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2849
2850 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2851 /* The SREJ_SENT state must be aborted if we are to
2852 * enter the LOCAL_BUSY state.
2853 */
2854 l2cap_abort_rx_srej_sent(chan);
2855 }
2856
2857 l2cap_send_ack(chan);
2858
2859 break;
2860 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2861 BT_DBG("Exit LOCAL_BUSY");
2862 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2863
2864 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2865 struct l2cap_ctrl local_control;
2866 memset(&local_control, 0, sizeof(local_control));
2867 local_control.sframe = 1;
2868 local_control.super = L2CAP_SUPER_RR;
2869 local_control.poll = 1;
2870 local_control.reqseq = chan->buffer_seq;
2871 l2cap_send_sframe(chan, &local_control);
2872
2873 chan->retry_count = 1;
2874 __set_monitor_timer(chan);
2875 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2876 }
2877 break;
2878 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2879 l2cap_process_reqseq(chan, control->reqseq);
2880 fallthrough;
2881
2882 case L2CAP_EV_RECV_FBIT:
2883 if (control && control->final) {
2884 __clear_monitor_timer(chan);
2885 if (chan->unacked_frames > 0)
2886 __set_retrans_timer(chan);
2887 chan->retry_count = 0;
2888 chan->tx_state = L2CAP_TX_STATE_XMIT;
2889 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2890 }
2891 break;
2892 case L2CAP_EV_EXPLICIT_POLL:
2893 /* Ignore */
2894 break;
2895 case L2CAP_EV_MONITOR_TO:
2896 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2897 l2cap_send_rr_or_rnr(chan, 1);
2898 __set_monitor_timer(chan);
2899 chan->retry_count++;
2900 } else {
2901 l2cap_send_disconn_req(chan, ECONNABORTED);
2902 }
2903 break;
2904 default:
2905 break;
2906 }
2907 }
2908
l2cap_tx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2909 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2910 struct sk_buff_head *skbs, u8 event)
2911 {
2912 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2913 chan, control, skbs, event, chan->tx_state);
2914
2915 switch (chan->tx_state) {
2916 case L2CAP_TX_STATE_XMIT:
2917 l2cap_tx_state_xmit(chan, control, skbs, event);
2918 break;
2919 case L2CAP_TX_STATE_WAIT_F:
2920 l2cap_tx_state_wait_f(chan, control, skbs, event);
2921 break;
2922 default:
2923 /* Ignore event */
2924 break;
2925 }
2926 }
2927
l2cap_pass_to_tx(struct l2cap_chan * chan,struct l2cap_ctrl * control)2928 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2929 struct l2cap_ctrl *control)
2930 {
2931 BT_DBG("chan %p, control %p", chan, control);
2932 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2933 }
2934
l2cap_pass_to_tx_fbit(struct l2cap_chan * chan,struct l2cap_ctrl * control)2935 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2936 struct l2cap_ctrl *control)
2937 {
2938 BT_DBG("chan %p, control %p", chan, control);
2939 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2940 }
2941
2942 /* Copy frame to all raw sockets on that connection */
l2cap_raw_recv(struct l2cap_conn * conn,struct sk_buff * skb)2943 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2944 {
2945 struct sk_buff *nskb;
2946 struct l2cap_chan *chan;
2947
2948 BT_DBG("conn %p", conn);
2949
2950 list_for_each_entry(chan, &conn->chan_l, list) {
2951 if (chan->chan_type != L2CAP_CHAN_RAW)
2952 continue;
2953
2954 /* Don't send frame to the channel it came from */
2955 if (bt_cb(skb)->l2cap.chan == chan)
2956 continue;
2957
2958 nskb = skb_clone(skb, GFP_KERNEL);
2959 if (!nskb)
2960 continue;
2961 if (chan->ops->recv(chan, nskb))
2962 kfree_skb(nskb);
2963 }
2964 }
2965
2966 /* ---- L2CAP signalling commands ---- */
l2cap_build_cmd(struct l2cap_conn * conn,u8 code,u8 ident,u16 dlen,void * data)2967 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2968 u8 ident, u16 dlen, void *data)
2969 {
2970 struct sk_buff *skb, **frag;
2971 struct l2cap_cmd_hdr *cmd;
2972 struct l2cap_hdr *lh;
2973 int len, count;
2974
2975 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2976 conn, code, ident, dlen);
2977
2978 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2979 return NULL;
2980
2981 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2982 count = min_t(unsigned int, conn->mtu, len);
2983
2984 skb = bt_skb_alloc(count, GFP_KERNEL);
2985 if (!skb)
2986 return NULL;
2987
2988 lh = skb_put(skb, L2CAP_HDR_SIZE);
2989 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2990
2991 if (conn->hcon->type == LE_LINK)
2992 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2993 else
2994 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2995
2996 cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
2997 cmd->code = code;
2998 cmd->ident = ident;
2999 cmd->len = cpu_to_le16(dlen);
3000
3001 if (dlen) {
3002 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
3003 skb_put_data(skb, data, count);
3004 data += count;
3005 }
3006
3007 len -= skb->len;
3008
3009 /* Continuation fragments (no L2CAP header) */
3010 frag = &skb_shinfo(skb)->frag_list;
3011 while (len) {
3012 count = min_t(unsigned int, conn->mtu, len);
3013
3014 *frag = bt_skb_alloc(count, GFP_KERNEL);
3015 if (!*frag)
3016 goto fail;
3017
3018 skb_put_data(*frag, data, count);
3019
3020 len -= count;
3021 data += count;
3022
3023 frag = &(*frag)->next;
3024 }
3025
3026 return skb;
3027
3028 fail:
3029 kfree_skb(skb);
3030 return NULL;
3031 }
3032
l2cap_get_conf_opt(void ** ptr,int * type,int * olen,unsigned long * val)3033 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
3034 unsigned long *val)
3035 {
3036 struct l2cap_conf_opt *opt = *ptr;
3037 int len;
3038
3039 len = L2CAP_CONF_OPT_SIZE + opt->len;
3040 *ptr += len;
3041
3042 *type = opt->type;
3043 *olen = opt->len;
3044
3045 switch (opt->len) {
3046 case 1:
3047 *val = *((u8 *) opt->val);
3048 break;
3049
3050 case 2:
3051 *val = get_unaligned_le16(opt->val);
3052 break;
3053
3054 case 4:
3055 *val = get_unaligned_le32(opt->val);
3056 break;
3057
3058 default:
3059 *val = (unsigned long) opt->val;
3060 break;
3061 }
3062
3063 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3064 return len;
3065 }
3066
l2cap_add_conf_opt(void ** ptr,u8 type,u8 len,unsigned long val,size_t size)3067 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3068 {
3069 struct l2cap_conf_opt *opt = *ptr;
3070
3071 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3072
3073 if (size < L2CAP_CONF_OPT_SIZE + len)
3074 return;
3075
3076 opt->type = type;
3077 opt->len = len;
3078
3079 switch (len) {
3080 case 1:
3081 *((u8 *) opt->val) = val;
3082 break;
3083
3084 case 2:
3085 put_unaligned_le16(val, opt->val);
3086 break;
3087
3088 case 4:
3089 put_unaligned_le32(val, opt->val);
3090 break;
3091
3092 default:
3093 memcpy(opt->val, (void *) val, len);
3094 break;
3095 }
3096
3097 *ptr += L2CAP_CONF_OPT_SIZE + len;
3098 }
3099
l2cap_add_opt_efs(void ** ptr,struct l2cap_chan * chan,size_t size)3100 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3101 {
3102 struct l2cap_conf_efs efs;
3103
3104 switch (chan->mode) {
3105 case L2CAP_MODE_ERTM:
3106 efs.id = chan->local_id;
3107 efs.stype = chan->local_stype;
3108 efs.msdu = cpu_to_le16(chan->local_msdu);
3109 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3110 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3111 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3112 break;
3113
3114 case L2CAP_MODE_STREAMING:
3115 efs.id = 1;
3116 efs.stype = L2CAP_SERV_BESTEFFORT;
3117 efs.msdu = cpu_to_le16(chan->local_msdu);
3118 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3119 efs.acc_lat = 0;
3120 efs.flush_to = 0;
3121 break;
3122
3123 default:
3124 return;
3125 }
3126
3127 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3128 (unsigned long) &efs, size);
3129 }
3130
l2cap_ack_timeout(struct work_struct * work)3131 static void l2cap_ack_timeout(struct work_struct *work)
3132 {
3133 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3134 ack_timer.work);
3135 u16 frames_to_ack;
3136
3137 BT_DBG("chan %p", chan);
3138
3139 l2cap_chan_lock(chan);
3140
3141 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3142 chan->last_acked_seq);
3143
3144 if (frames_to_ack)
3145 l2cap_send_rr_or_rnr(chan, 0);
3146
3147 l2cap_chan_unlock(chan);
3148 l2cap_chan_put(chan);
3149 }
3150
l2cap_ertm_init(struct l2cap_chan * chan)3151 int l2cap_ertm_init(struct l2cap_chan *chan)
3152 {
3153 int err;
3154
3155 chan->next_tx_seq = 0;
3156 chan->expected_tx_seq = 0;
3157 chan->expected_ack_seq = 0;
3158 chan->unacked_frames = 0;
3159 chan->buffer_seq = 0;
3160 chan->frames_sent = 0;
3161 chan->last_acked_seq = 0;
3162 chan->sdu = NULL;
3163 chan->sdu_last_frag = NULL;
3164 chan->sdu_len = 0;
3165
3166 skb_queue_head_init(&chan->tx_q);
3167
3168 if (chan->mode != L2CAP_MODE_ERTM)
3169 return 0;
3170
3171 chan->rx_state = L2CAP_RX_STATE_RECV;
3172 chan->tx_state = L2CAP_TX_STATE_XMIT;
3173
3174 skb_queue_head_init(&chan->srej_q);
3175
3176 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3177 if (err < 0)
3178 return err;
3179
3180 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3181 if (err < 0)
3182 l2cap_seq_list_free(&chan->srej_list);
3183
3184 return err;
3185 }
3186
l2cap_select_mode(__u8 mode,__u16 remote_feat_mask)3187 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3188 {
3189 switch (mode) {
3190 case L2CAP_MODE_STREAMING:
3191 case L2CAP_MODE_ERTM:
3192 if (l2cap_mode_supported(mode, remote_feat_mask))
3193 return mode;
3194 fallthrough;
3195 default:
3196 return L2CAP_MODE_BASIC;
3197 }
3198 }
3199
__l2cap_ews_supported(struct l2cap_conn * conn)3200 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3201 {
3202 return (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW);
3203 }
3204
__l2cap_efs_supported(struct l2cap_conn * conn)3205 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3206 {
3207 return (conn->feat_mask & L2CAP_FEAT_EXT_FLOW);
3208 }
3209
__l2cap_set_ertm_timeouts(struct l2cap_chan * chan,struct l2cap_conf_rfc * rfc)3210 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3211 struct l2cap_conf_rfc *rfc)
3212 {
3213 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3214 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3215 }
3216
l2cap_txwin_setup(struct l2cap_chan * chan)3217 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3218 {
3219 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3220 __l2cap_ews_supported(chan->conn)) {
3221 /* use extended control field */
3222 set_bit(FLAG_EXT_CTRL, &chan->flags);
3223 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3224 } else {
3225 chan->tx_win = min_t(u16, chan->tx_win,
3226 L2CAP_DEFAULT_TX_WINDOW);
3227 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3228 }
3229 chan->ack_win = chan->tx_win;
3230 }
3231
l2cap_mtu_auto(struct l2cap_chan * chan)3232 static void l2cap_mtu_auto(struct l2cap_chan *chan)
3233 {
3234 struct hci_conn *conn = chan->conn->hcon;
3235
3236 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3237
3238 /* The 2-DH1 packet has between 2 and 56 information bytes
3239 * (including the 2-byte payload header)
3240 */
3241 if (!(conn->pkt_type & HCI_2DH1))
3242 chan->imtu = 54;
3243
3244 /* The 3-DH1 packet has between 2 and 85 information bytes
3245 * (including the 2-byte payload header)
3246 */
3247 if (!(conn->pkt_type & HCI_3DH1))
3248 chan->imtu = 83;
3249
3250 /* The 2-DH3 packet has between 2 and 369 information bytes
3251 * (including the 2-byte payload header)
3252 */
3253 if (!(conn->pkt_type & HCI_2DH3))
3254 chan->imtu = 367;
3255
3256 /* The 3-DH3 packet has between 2 and 554 information bytes
3257 * (including the 2-byte payload header)
3258 */
3259 if (!(conn->pkt_type & HCI_3DH3))
3260 chan->imtu = 552;
3261
3262 /* The 2-DH5 packet has between 2 and 681 information bytes
3263 * (including the 2-byte payload header)
3264 */
3265 if (!(conn->pkt_type & HCI_2DH5))
3266 chan->imtu = 679;
3267
3268 /* The 3-DH5 packet has between 2 and 1023 information bytes
3269 * (including the 2-byte payload header)
3270 */
3271 if (!(conn->pkt_type & HCI_3DH5))
3272 chan->imtu = 1021;
3273 }
3274
l2cap_build_conf_req(struct l2cap_chan * chan,void * data,size_t data_size)3275 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3276 {
3277 struct l2cap_conf_req *req = data;
3278 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3279 void *ptr = req->data;
3280 void *endptr = data + data_size;
3281 u16 size;
3282
3283 BT_DBG("chan %p", chan);
3284
3285 if (chan->num_conf_req || chan->num_conf_rsp)
3286 goto done;
3287
3288 switch (chan->mode) {
3289 case L2CAP_MODE_STREAMING:
3290 case L2CAP_MODE_ERTM:
3291 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3292 break;
3293
3294 if (__l2cap_efs_supported(chan->conn))
3295 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3296
3297 fallthrough;
3298 default:
3299 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3300 break;
3301 }
3302
3303 done:
3304 if (chan->imtu != L2CAP_DEFAULT_MTU) {
3305 if (!chan->imtu)
3306 l2cap_mtu_auto(chan);
3307 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3308 endptr - ptr);
3309 }
3310
3311 switch (chan->mode) {
3312 case L2CAP_MODE_BASIC:
3313 if (disable_ertm)
3314 break;
3315
3316 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3317 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3318 break;
3319
3320 rfc.mode = L2CAP_MODE_BASIC;
3321 rfc.txwin_size = 0;
3322 rfc.max_transmit = 0;
3323 rfc.retrans_timeout = 0;
3324 rfc.monitor_timeout = 0;
3325 rfc.max_pdu_size = 0;
3326
3327 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3328 (unsigned long) &rfc, endptr - ptr);
3329 break;
3330
3331 case L2CAP_MODE_ERTM:
3332 rfc.mode = L2CAP_MODE_ERTM;
3333 rfc.max_transmit = chan->max_tx;
3334
3335 __l2cap_set_ertm_timeouts(chan, &rfc);
3336
3337 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3338 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3339 L2CAP_FCS_SIZE);
3340 rfc.max_pdu_size = cpu_to_le16(size);
3341
3342 l2cap_txwin_setup(chan);
3343
3344 rfc.txwin_size = min_t(u16, chan->tx_win,
3345 L2CAP_DEFAULT_TX_WINDOW);
3346
3347 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3348 (unsigned long) &rfc, endptr - ptr);
3349
3350 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3351 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3352
3353 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3354 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3355 chan->tx_win, endptr - ptr);
3356
3357 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3358 if (chan->fcs == L2CAP_FCS_NONE ||
3359 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3360 chan->fcs = L2CAP_FCS_NONE;
3361 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3362 chan->fcs, endptr - ptr);
3363 }
3364 break;
3365
3366 case L2CAP_MODE_STREAMING:
3367 l2cap_txwin_setup(chan);
3368 rfc.mode = L2CAP_MODE_STREAMING;
3369 rfc.txwin_size = 0;
3370 rfc.max_transmit = 0;
3371 rfc.retrans_timeout = 0;
3372 rfc.monitor_timeout = 0;
3373
3374 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3375 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3376 L2CAP_FCS_SIZE);
3377 rfc.max_pdu_size = cpu_to_le16(size);
3378
3379 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3380 (unsigned long) &rfc, endptr - ptr);
3381
3382 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3383 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3384
3385 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3386 if (chan->fcs == L2CAP_FCS_NONE ||
3387 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3388 chan->fcs = L2CAP_FCS_NONE;
3389 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3390 chan->fcs, endptr - ptr);
3391 }
3392 break;
3393 }
3394
3395 req->dcid = cpu_to_le16(chan->dcid);
3396 req->flags = cpu_to_le16(0);
3397
3398 return ptr - data;
3399 }
3400
l2cap_parse_conf_req(struct l2cap_chan * chan,void * data,size_t data_size)3401 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3402 {
3403 struct l2cap_conf_rsp *rsp = data;
3404 void *ptr = rsp->data;
3405 void *endptr = data + data_size;
3406 void *req = chan->conf_req;
3407 int len = chan->conf_len;
3408 int type, hint, olen;
3409 unsigned long val;
3410 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3411 struct l2cap_conf_efs efs;
3412 u8 remote_efs = 0;
3413 u16 mtu = 0;
3414 u16 result = L2CAP_CONF_SUCCESS;
3415 u16 size;
3416
3417 BT_DBG("chan %p", chan);
3418
3419 while (len >= L2CAP_CONF_OPT_SIZE) {
3420 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3421 if (len < 0)
3422 break;
3423
3424 hint = type & L2CAP_CONF_HINT;
3425 type &= L2CAP_CONF_MASK;
3426
3427 switch (type) {
3428 case L2CAP_CONF_MTU:
3429 if (olen != 2)
3430 break;
3431 mtu = val;
3432 break;
3433
3434 case L2CAP_CONF_FLUSH_TO:
3435 if (olen != 2)
3436 break;
3437 chan->flush_to = val;
3438 break;
3439
3440 case L2CAP_CONF_QOS:
3441 break;
3442
3443 case L2CAP_CONF_RFC:
3444 if (olen != sizeof(rfc))
3445 break;
3446 memcpy(&rfc, (void *) val, olen);
3447 break;
3448
3449 case L2CAP_CONF_FCS:
3450 if (olen != 1)
3451 break;
3452 if (val == L2CAP_FCS_NONE)
3453 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3454 break;
3455
3456 case L2CAP_CONF_EFS:
3457 if (olen != sizeof(efs))
3458 break;
3459 remote_efs = 1;
3460 memcpy(&efs, (void *) val, olen);
3461 break;
3462
3463 case L2CAP_CONF_EWS:
3464 if (olen != 2)
3465 break;
3466 return -ECONNREFUSED;
3467
3468 default:
3469 if (hint)
3470 break;
3471 result = L2CAP_CONF_UNKNOWN;
3472 l2cap_add_conf_opt(&ptr, (u8)type, sizeof(u8), type, endptr - ptr);
3473 break;
3474 }
3475 }
3476
3477 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3478 goto done;
3479
3480 switch (chan->mode) {
3481 case L2CAP_MODE_STREAMING:
3482 case L2CAP_MODE_ERTM:
3483 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3484 chan->mode = l2cap_select_mode(rfc.mode,
3485 chan->conn->feat_mask);
3486 break;
3487 }
3488
3489 if (remote_efs) {
3490 if (__l2cap_efs_supported(chan->conn))
3491 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3492 else
3493 return -ECONNREFUSED;
3494 }
3495
3496 if (chan->mode != rfc.mode)
3497 return -ECONNREFUSED;
3498
3499 break;
3500 }
3501
3502 done:
3503 if (chan->mode != rfc.mode) {
3504 result = L2CAP_CONF_UNACCEPT;
3505 rfc.mode = chan->mode;
3506
3507 if (chan->num_conf_rsp == 1)
3508 return -ECONNREFUSED;
3509
3510 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3511 (unsigned long) &rfc, endptr - ptr);
3512 }
3513
3514 if (result == L2CAP_CONF_SUCCESS) {
3515 /* Configure output options and let the other side know
3516 * which ones we don't like. */
3517
3518 /* If MTU is not provided in configure request, try adjusting it
3519 * to the current output MTU if it has been set
3520 *
3521 * Bluetooth Core 6.1, Vol 3, Part A, Section 4.5
3522 *
3523 * Each configuration parameter value (if any is present) in an
3524 * L2CAP_CONFIGURATION_RSP packet reflects an ‘adjustment’ to a
3525 * configuration parameter value that has been sent (or, in case
3526 * of default values, implied) in the corresponding
3527 * L2CAP_CONFIGURATION_REQ packet.
3528 */
3529 if (!mtu) {
3530 /* Only adjust for ERTM channels as for older modes the
3531 * remote stack may not be able to detect that the
3532 * adjustment causing it to silently drop packets.
3533 */
3534 if (chan->mode == L2CAP_MODE_ERTM &&
3535 chan->omtu && chan->omtu != L2CAP_DEFAULT_MTU)
3536 mtu = chan->omtu;
3537 else
3538 mtu = L2CAP_DEFAULT_MTU;
3539 }
3540
3541 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3542 result = L2CAP_CONF_UNACCEPT;
3543 else {
3544 chan->omtu = mtu;
3545 set_bit(CONF_MTU_DONE, &chan->conf_state);
3546 }
3547 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3548
3549 if (remote_efs) {
3550 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3551 efs.stype != L2CAP_SERV_NOTRAFIC &&
3552 efs.stype != chan->local_stype) {
3553
3554 result = L2CAP_CONF_UNACCEPT;
3555
3556 if (chan->num_conf_req >= 1)
3557 return -ECONNREFUSED;
3558
3559 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3560 sizeof(efs),
3561 (unsigned long) &efs, endptr - ptr);
3562 } else {
3563 /* Send PENDING Conf Rsp */
3564 result = L2CAP_CONF_PENDING;
3565 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3566 }
3567 }
3568
3569 switch (rfc.mode) {
3570 case L2CAP_MODE_BASIC:
3571 chan->fcs = L2CAP_FCS_NONE;
3572 set_bit(CONF_MODE_DONE, &chan->conf_state);
3573 break;
3574
3575 case L2CAP_MODE_ERTM:
3576 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3577 chan->remote_tx_win = rfc.txwin_size;
3578 else
3579 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3580
3581 chan->remote_max_tx = rfc.max_transmit;
3582
3583 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3584 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3585 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3586 rfc.max_pdu_size = cpu_to_le16(size);
3587 chan->remote_mps = size;
3588
3589 __l2cap_set_ertm_timeouts(chan, &rfc);
3590
3591 set_bit(CONF_MODE_DONE, &chan->conf_state);
3592
3593 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3594 sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3595
3596 if (remote_efs &&
3597 test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3598 chan->remote_id = efs.id;
3599 chan->remote_stype = efs.stype;
3600 chan->remote_msdu = le16_to_cpu(efs.msdu);
3601 chan->remote_flush_to =
3602 le32_to_cpu(efs.flush_to);
3603 chan->remote_acc_lat =
3604 le32_to_cpu(efs.acc_lat);
3605 chan->remote_sdu_itime =
3606 le32_to_cpu(efs.sdu_itime);
3607 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3608 sizeof(efs),
3609 (unsigned long) &efs, endptr - ptr);
3610 }
3611 break;
3612
3613 case L2CAP_MODE_STREAMING:
3614 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3615 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3616 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3617 rfc.max_pdu_size = cpu_to_le16(size);
3618 chan->remote_mps = size;
3619
3620 set_bit(CONF_MODE_DONE, &chan->conf_state);
3621
3622 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3623 (unsigned long) &rfc, endptr - ptr);
3624
3625 break;
3626
3627 default:
3628 result = L2CAP_CONF_UNACCEPT;
3629
3630 memset(&rfc, 0, sizeof(rfc));
3631 rfc.mode = chan->mode;
3632 }
3633
3634 if (result == L2CAP_CONF_SUCCESS)
3635 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3636 }
3637 rsp->scid = cpu_to_le16(chan->dcid);
3638 rsp->result = cpu_to_le16(result);
3639 rsp->flags = cpu_to_le16(0);
3640
3641 return ptr - data;
3642 }
3643
l2cap_parse_conf_rsp(struct l2cap_chan * chan,void * rsp,int len,void * data,size_t size,u16 * result)3644 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3645 void *data, size_t size, u16 *result)
3646 {
3647 struct l2cap_conf_req *req = data;
3648 void *ptr = req->data;
3649 void *endptr = data + size;
3650 int type, olen;
3651 unsigned long val;
3652 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3653 struct l2cap_conf_efs efs;
3654
3655 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3656
3657 while (len >= L2CAP_CONF_OPT_SIZE) {
3658 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3659 if (len < 0)
3660 break;
3661
3662 switch (type) {
3663 case L2CAP_CONF_MTU:
3664 if (olen != 2)
3665 break;
3666 if (val < L2CAP_DEFAULT_MIN_MTU) {
3667 *result = L2CAP_CONF_UNACCEPT;
3668 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3669 } else
3670 chan->imtu = val;
3671 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3672 endptr - ptr);
3673 break;
3674
3675 case L2CAP_CONF_FLUSH_TO:
3676 if (olen != 2)
3677 break;
3678 chan->flush_to = val;
3679 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3680 chan->flush_to, endptr - ptr);
3681 break;
3682
3683 case L2CAP_CONF_RFC:
3684 if (olen != sizeof(rfc))
3685 break;
3686 memcpy(&rfc, (void *)val, olen);
3687 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3688 rfc.mode != chan->mode)
3689 return -ECONNREFUSED;
3690 chan->fcs = 0;
3691 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3692 (unsigned long) &rfc, endptr - ptr);
3693 break;
3694
3695 case L2CAP_CONF_EWS:
3696 if (olen != 2)
3697 break;
3698 chan->ack_win = min_t(u16, val, chan->ack_win);
3699 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3700 chan->tx_win, endptr - ptr);
3701 break;
3702
3703 case L2CAP_CONF_EFS:
3704 if (olen != sizeof(efs))
3705 break;
3706 memcpy(&efs, (void *)val, olen);
3707 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3708 efs.stype != L2CAP_SERV_NOTRAFIC &&
3709 efs.stype != chan->local_stype)
3710 return -ECONNREFUSED;
3711 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3712 (unsigned long) &efs, endptr - ptr);
3713 break;
3714
3715 case L2CAP_CONF_FCS:
3716 if (olen != 1)
3717 break;
3718 if (*result == L2CAP_CONF_PENDING)
3719 if (val == L2CAP_FCS_NONE)
3720 set_bit(CONF_RECV_NO_FCS,
3721 &chan->conf_state);
3722 break;
3723 }
3724 }
3725
3726 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3727 return -ECONNREFUSED;
3728
3729 chan->mode = rfc.mode;
3730
3731 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3732 switch (rfc.mode) {
3733 case L2CAP_MODE_ERTM:
3734 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3735 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3736 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3737 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3738 chan->ack_win = min_t(u16, chan->ack_win,
3739 rfc.txwin_size);
3740
3741 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3742 chan->local_msdu = le16_to_cpu(efs.msdu);
3743 chan->local_sdu_itime =
3744 le32_to_cpu(efs.sdu_itime);
3745 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3746 chan->local_flush_to =
3747 le32_to_cpu(efs.flush_to);
3748 }
3749 break;
3750
3751 case L2CAP_MODE_STREAMING:
3752 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3753 }
3754 }
3755
3756 req->dcid = cpu_to_le16(chan->dcid);
3757 req->flags = cpu_to_le16(0);
3758
3759 return ptr - data;
3760 }
3761
l2cap_build_conf_rsp(struct l2cap_chan * chan,void * data,u16 result,u16 flags)3762 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3763 u16 result, u16 flags)
3764 {
3765 struct l2cap_conf_rsp *rsp = data;
3766 void *ptr = rsp->data;
3767
3768 BT_DBG("chan %p", chan);
3769
3770 rsp->scid = cpu_to_le16(chan->dcid);
3771 rsp->result = cpu_to_le16(result);
3772 rsp->flags = cpu_to_le16(flags);
3773
3774 return ptr - data;
3775 }
3776
__l2cap_le_connect_rsp_defer(struct l2cap_chan * chan)3777 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3778 {
3779 struct l2cap_le_conn_rsp rsp;
3780 struct l2cap_conn *conn = chan->conn;
3781
3782 BT_DBG("chan %p", chan);
3783
3784 rsp.dcid = cpu_to_le16(chan->scid);
3785 rsp.mtu = cpu_to_le16(chan->imtu);
3786 rsp.mps = cpu_to_le16(chan->mps);
3787 rsp.credits = cpu_to_le16(chan->rx_credits);
3788 rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3789
3790 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3791 &rsp);
3792 }
3793
l2cap_ecred_list_defer(struct l2cap_chan * chan,void * data)3794 static void l2cap_ecred_list_defer(struct l2cap_chan *chan, void *data)
3795 {
3796 int *result = data;
3797
3798 if (*result || test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
3799 return;
3800
3801 switch (chan->state) {
3802 case BT_CONNECT2:
3803 /* If channel still pending accept add to result */
3804 (*result)++;
3805 return;
3806 case BT_CONNECTED:
3807 return;
3808 default:
3809 /* If not connected or pending accept it has been refused */
3810 *result = -ECONNREFUSED;
3811 return;
3812 }
3813 }
3814
3815 struct l2cap_ecred_rsp_data {
3816 struct {
3817 struct l2cap_ecred_conn_rsp_hdr rsp;
3818 __le16 scid[L2CAP_ECRED_MAX_CID];
3819 } __packed pdu;
3820 int count;
3821 };
3822
l2cap_ecred_rsp_defer(struct l2cap_chan * chan,void * data)3823 static void l2cap_ecred_rsp_defer(struct l2cap_chan *chan, void *data)
3824 {
3825 struct l2cap_ecred_rsp_data *rsp = data;
3826 struct l2cap_ecred_conn_rsp *rsp_flex =
3827 container_of(&rsp->pdu.rsp, struct l2cap_ecred_conn_rsp, hdr);
3828
3829 /* Check if channel for outgoing connection or if it wasn't deferred
3830 * since in those cases it must be skipped.
3831 */
3832 if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags) ||
3833 !test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
3834 return;
3835
3836 /* Reset ident so only one response is sent */
3837 chan->ident = 0;
3838
3839 /* Include all channels pending with the same ident */
3840 if (!rsp->pdu.rsp.result)
3841 rsp_flex->dcid[rsp->count++] = cpu_to_le16(chan->scid);
3842 else
3843 l2cap_chan_del(chan, ECONNRESET);
3844 }
3845
__l2cap_ecred_conn_rsp_defer(struct l2cap_chan * chan)3846 void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
3847 {
3848 struct l2cap_conn *conn = chan->conn;
3849 struct l2cap_ecred_rsp_data data;
3850 u16 id = chan->ident;
3851 int result = 0;
3852
3853 if (!id)
3854 return;
3855
3856 BT_DBG("chan %p id %d", chan, id);
3857
3858 memset(&data, 0, sizeof(data));
3859
3860 data.pdu.rsp.mtu = cpu_to_le16(chan->imtu);
3861 data.pdu.rsp.mps = cpu_to_le16(chan->mps);
3862 data.pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
3863 data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3864
3865 /* Verify that all channels are ready */
3866 __l2cap_chan_list_id(conn, id, l2cap_ecred_list_defer, &result);
3867
3868 if (result > 0)
3869 return;
3870
3871 if (result < 0)
3872 data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_AUTHORIZATION);
3873
3874 /* Build response */
3875 __l2cap_chan_list_id(conn, id, l2cap_ecred_rsp_defer, &data);
3876
3877 l2cap_send_cmd(conn, id, L2CAP_ECRED_CONN_RSP,
3878 sizeof(data.pdu.rsp) + (data.count * sizeof(__le16)),
3879 &data.pdu);
3880 }
3881
__l2cap_connect_rsp_defer(struct l2cap_chan * chan)3882 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3883 {
3884 struct l2cap_conn_rsp rsp;
3885 struct l2cap_conn *conn = chan->conn;
3886 u8 buf[128];
3887 u8 rsp_code;
3888
3889 rsp.scid = cpu_to_le16(chan->dcid);
3890 rsp.dcid = cpu_to_le16(chan->scid);
3891 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3892 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3893 rsp_code = L2CAP_CONN_RSP;
3894
3895 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3896
3897 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3898
3899 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3900 return;
3901
3902 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3903 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3904 chan->num_conf_req++;
3905 }
3906
l2cap_conf_rfc_get(struct l2cap_chan * chan,void * rsp,int len)3907 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3908 {
3909 int type, olen;
3910 unsigned long val;
3911 /* Use sane default values in case a misbehaving remote device
3912 * did not send an RFC or extended window size option.
3913 */
3914 u16 txwin_ext = chan->ack_win;
3915 struct l2cap_conf_rfc rfc = {
3916 .mode = chan->mode,
3917 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3918 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3919 .max_pdu_size = cpu_to_le16(chan->imtu),
3920 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3921 };
3922
3923 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3924
3925 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3926 return;
3927
3928 while (len >= L2CAP_CONF_OPT_SIZE) {
3929 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3930 if (len < 0)
3931 break;
3932
3933 switch (type) {
3934 case L2CAP_CONF_RFC:
3935 if (olen != sizeof(rfc))
3936 break;
3937 memcpy(&rfc, (void *)val, olen);
3938 break;
3939 case L2CAP_CONF_EWS:
3940 if (olen != 2)
3941 break;
3942 txwin_ext = val;
3943 break;
3944 }
3945 }
3946
3947 switch (rfc.mode) {
3948 case L2CAP_MODE_ERTM:
3949 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3950 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3951 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3952 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3953 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3954 else
3955 chan->ack_win = min_t(u16, chan->ack_win,
3956 rfc.txwin_size);
3957 break;
3958 case L2CAP_MODE_STREAMING:
3959 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3960 }
3961 }
3962
l2cap_command_rej(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)3963 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3964 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3965 u8 *data)
3966 {
3967 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3968
3969 if (cmd_len < sizeof(*rej))
3970 return -EPROTO;
3971
3972 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3973 return 0;
3974
3975 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3976 cmd->ident == conn->info_ident) {
3977 cancel_delayed_work(&conn->info_timer);
3978
3979 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3980 conn->info_ident = 0;
3981
3982 l2cap_conn_start(conn);
3983 }
3984
3985 return 0;
3986 }
3987
l2cap_connect(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u8 * data,u8 rsp_code)3988 static void l2cap_connect(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd,
3989 u8 *data, u8 rsp_code)
3990 {
3991 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3992 struct l2cap_conn_rsp rsp;
3993 struct l2cap_chan *chan = NULL, *pchan = NULL;
3994 int result, status = L2CAP_CS_NO_INFO;
3995
3996 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3997 __le16 psm = req->psm;
3998
3999 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
4000
4001 /* Check if we have socket listening on psm */
4002 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
4003 &conn->hcon->dst, ACL_LINK);
4004 if (!pchan) {
4005 result = L2CAP_CR_BAD_PSM;
4006 goto response;
4007 }
4008
4009 l2cap_chan_lock(pchan);
4010
4011 /* Check if the ACL is secure enough (if not SDP) */
4012 if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
4013 (!hci_conn_check_link_mode(conn->hcon) ||
4014 !l2cap_check_enc_key_size(conn->hcon, pchan))) {
4015 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
4016 result = L2CAP_CR_SEC_BLOCK;
4017 goto response;
4018 }
4019
4020 result = L2CAP_CR_NO_MEM;
4021
4022 /* Check for valid dynamic CID range (as per Erratum 3253) */
4023 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) {
4024 result = L2CAP_CR_INVALID_SCID;
4025 goto response;
4026 }
4027
4028 /* Check if we already have channel with that dcid */
4029 if (__l2cap_get_chan_by_dcid(conn, scid)) {
4030 result = L2CAP_CR_SCID_IN_USE;
4031 goto response;
4032 }
4033
4034 chan = pchan->ops->new_connection(pchan);
4035 if (!chan)
4036 goto response;
4037
4038 /* For certain devices (ex: HID mouse), support for authentication,
4039 * pairing and bonding is optional. For such devices, inorder to avoid
4040 * the ACL alive for too long after L2CAP disconnection, reset the ACL
4041 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
4042 */
4043 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4044
4045 bacpy(&chan->src, &conn->hcon->src);
4046 bacpy(&chan->dst, &conn->hcon->dst);
4047 chan->src_type = bdaddr_src_type(conn->hcon);
4048 chan->dst_type = bdaddr_dst_type(conn->hcon);
4049 chan->psm = psm;
4050 chan->dcid = scid;
4051
4052 __l2cap_chan_add(conn, chan);
4053
4054 dcid = chan->scid;
4055
4056 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4057
4058 chan->ident = cmd->ident;
4059
4060 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
4061 if (l2cap_chan_check_security(chan, false)) {
4062 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4063 l2cap_state_change(chan, BT_CONNECT2);
4064 result = L2CAP_CR_PEND;
4065 status = L2CAP_CS_AUTHOR_PEND;
4066 chan->ops->defer(chan);
4067 } else {
4068 l2cap_state_change(chan, BT_CONFIG);
4069 result = L2CAP_CR_SUCCESS;
4070 status = L2CAP_CS_NO_INFO;
4071 }
4072 } else {
4073 l2cap_state_change(chan, BT_CONNECT2);
4074 result = L2CAP_CR_PEND;
4075 status = L2CAP_CS_AUTHEN_PEND;
4076 }
4077 } else {
4078 l2cap_state_change(chan, BT_CONNECT2);
4079 result = L2CAP_CR_PEND;
4080 status = L2CAP_CS_NO_INFO;
4081 }
4082
4083 response:
4084 rsp.scid = cpu_to_le16(scid);
4085 rsp.dcid = cpu_to_le16(dcid);
4086 rsp.result = cpu_to_le16(result);
4087 rsp.status = cpu_to_le16(status);
4088 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4089
4090 if (!pchan)
4091 return;
4092
4093 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4094 struct l2cap_info_req info;
4095 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4096
4097 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4098 conn->info_ident = l2cap_get_ident(conn);
4099
4100 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4101
4102 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4103 sizeof(info), &info);
4104 }
4105
4106 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4107 result == L2CAP_CR_SUCCESS) {
4108 u8 buf[128];
4109 set_bit(CONF_REQ_SENT, &chan->conf_state);
4110 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4111 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4112 chan->num_conf_req++;
4113 }
4114
4115 l2cap_chan_unlock(pchan);
4116 l2cap_chan_put(pchan);
4117 }
4118
l2cap_connect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4119 static int l2cap_connect_req(struct l2cap_conn *conn,
4120 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4121 {
4122 if (cmd_len < sizeof(struct l2cap_conn_req))
4123 return -EPROTO;
4124
4125 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP);
4126 return 0;
4127 }
4128
l2cap_connect_create_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4129 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4130 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4131 u8 *data)
4132 {
4133 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4134 u16 scid, dcid, result, status;
4135 struct l2cap_chan *chan;
4136 u8 req[128];
4137 int err;
4138
4139 if (cmd_len < sizeof(*rsp))
4140 return -EPROTO;
4141
4142 scid = __le16_to_cpu(rsp->scid);
4143 dcid = __le16_to_cpu(rsp->dcid);
4144 result = __le16_to_cpu(rsp->result);
4145 status = __le16_to_cpu(rsp->status);
4146
4147 if (result == L2CAP_CR_SUCCESS && (dcid < L2CAP_CID_DYN_START ||
4148 dcid > L2CAP_CID_DYN_END))
4149 return -EPROTO;
4150
4151 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4152 dcid, scid, result, status);
4153
4154 if (scid) {
4155 chan = __l2cap_get_chan_by_scid(conn, scid);
4156 if (!chan)
4157 return -EBADSLT;
4158 } else {
4159 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4160 if (!chan)
4161 return -EBADSLT;
4162 }
4163
4164 chan = l2cap_chan_hold_unless_zero(chan);
4165 if (!chan)
4166 return -EBADSLT;
4167
4168 err = 0;
4169
4170 l2cap_chan_lock(chan);
4171
4172 switch (result) {
4173 case L2CAP_CR_SUCCESS:
4174 if (__l2cap_get_chan_by_dcid(conn, dcid)) {
4175 err = -EBADSLT;
4176 break;
4177 }
4178
4179 l2cap_state_change(chan, BT_CONFIG);
4180 chan->ident = 0;
4181 chan->dcid = dcid;
4182 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4183
4184 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4185 break;
4186
4187 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4188 l2cap_build_conf_req(chan, req, sizeof(req)), req);
4189 chan->num_conf_req++;
4190 break;
4191
4192 case L2CAP_CR_PEND:
4193 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4194 break;
4195
4196 default:
4197 l2cap_chan_del(chan, ECONNREFUSED);
4198 break;
4199 }
4200
4201 l2cap_chan_unlock(chan);
4202 l2cap_chan_put(chan);
4203
4204 return err;
4205 }
4206
set_default_fcs(struct l2cap_chan * chan)4207 static inline void set_default_fcs(struct l2cap_chan *chan)
4208 {
4209 /* FCS is enabled only in ERTM or streaming mode, if one or both
4210 * sides request it.
4211 */
4212 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4213 chan->fcs = L2CAP_FCS_NONE;
4214 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4215 chan->fcs = L2CAP_FCS_CRC16;
4216 }
4217
l2cap_send_efs_conf_rsp(struct l2cap_chan * chan,void * data,u8 ident,u16 flags)4218 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4219 u8 ident, u16 flags)
4220 {
4221 struct l2cap_conn *conn = chan->conn;
4222
4223 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4224 flags);
4225
4226 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4227 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4228
4229 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4230 l2cap_build_conf_rsp(chan, data,
4231 L2CAP_CONF_SUCCESS, flags), data);
4232 }
4233
cmd_reject_invalid_cid(struct l2cap_conn * conn,u8 ident,u16 scid,u16 dcid)4234 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4235 u16 scid, u16 dcid)
4236 {
4237 struct l2cap_cmd_rej_cid rej;
4238
4239 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4240 rej.scid = __cpu_to_le16(scid);
4241 rej.dcid = __cpu_to_le16(dcid);
4242
4243 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4244 }
4245
l2cap_config_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4246 static inline int l2cap_config_req(struct l2cap_conn *conn,
4247 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4248 u8 *data)
4249 {
4250 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4251 u16 dcid, flags;
4252 u8 rsp[64];
4253 struct l2cap_chan *chan;
4254 int len, err = 0;
4255
4256 if (cmd_len < sizeof(*req))
4257 return -EPROTO;
4258
4259 dcid = __le16_to_cpu(req->dcid);
4260 flags = __le16_to_cpu(req->flags);
4261
4262 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4263
4264 chan = l2cap_get_chan_by_scid(conn, dcid);
4265 if (!chan) {
4266 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4267 return 0;
4268 }
4269
4270 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
4271 chan->state != BT_CONNECTED) {
4272 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4273 chan->dcid);
4274 goto unlock;
4275 }
4276
4277 /* Reject if config buffer is too small. */
4278 len = cmd_len - sizeof(*req);
4279 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4280 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4281 l2cap_build_conf_rsp(chan, rsp,
4282 L2CAP_CONF_REJECT, flags), rsp);
4283 goto unlock;
4284 }
4285
4286 /* Store config. */
4287 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4288 chan->conf_len += len;
4289
4290 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4291 /* Incomplete config. Send empty response. */
4292 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4293 l2cap_build_conf_rsp(chan, rsp,
4294 L2CAP_CONF_SUCCESS, flags), rsp);
4295 goto unlock;
4296 }
4297
4298 /* Complete config. */
4299 len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4300 if (len < 0) {
4301 l2cap_send_disconn_req(chan, ECONNRESET);
4302 goto unlock;
4303 }
4304
4305 chan->ident = cmd->ident;
4306 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4307 if (chan->num_conf_rsp < L2CAP_CONF_MAX_CONF_RSP)
4308 chan->num_conf_rsp++;
4309
4310 /* Reset config buffer. */
4311 chan->conf_len = 0;
4312
4313 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4314 goto unlock;
4315
4316 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4317 set_default_fcs(chan);
4318
4319 if (chan->mode == L2CAP_MODE_ERTM ||
4320 chan->mode == L2CAP_MODE_STREAMING)
4321 err = l2cap_ertm_init(chan);
4322
4323 if (err < 0)
4324 l2cap_send_disconn_req(chan, -err);
4325 else
4326 l2cap_chan_ready(chan);
4327
4328 goto unlock;
4329 }
4330
4331 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4332 u8 buf[64];
4333 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4334 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4335 chan->num_conf_req++;
4336 }
4337
4338 /* Got Conf Rsp PENDING from remote side and assume we sent
4339 Conf Rsp PENDING in the code above */
4340 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4341 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4342
4343 /* check compatibility */
4344
4345 /* Send rsp for BR/EDR channel */
4346 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4347 }
4348
4349 unlock:
4350 l2cap_chan_unlock(chan);
4351 l2cap_chan_put(chan);
4352 return err;
4353 }
4354
l2cap_config_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4355 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4356 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4357 u8 *data)
4358 {
4359 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4360 u16 scid, flags, result;
4361 struct l2cap_chan *chan;
4362 int len = cmd_len - sizeof(*rsp);
4363 int err = 0;
4364
4365 if (cmd_len < sizeof(*rsp))
4366 return -EPROTO;
4367
4368 scid = __le16_to_cpu(rsp->scid);
4369 flags = __le16_to_cpu(rsp->flags);
4370 result = __le16_to_cpu(rsp->result);
4371
4372 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4373 result, len);
4374
4375 chan = l2cap_get_chan_by_scid(conn, scid);
4376 if (!chan)
4377 return 0;
4378
4379 switch (result) {
4380 case L2CAP_CONF_SUCCESS:
4381 l2cap_conf_rfc_get(chan, rsp->data, len);
4382 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4383 break;
4384
4385 case L2CAP_CONF_PENDING:
4386 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4387
4388 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4389 char buf[64];
4390
4391 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4392 buf, sizeof(buf), &result);
4393 if (len < 0) {
4394 l2cap_send_disconn_req(chan, ECONNRESET);
4395 goto done;
4396 }
4397
4398 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident, 0);
4399 }
4400 goto done;
4401
4402 case L2CAP_CONF_UNKNOWN:
4403 case L2CAP_CONF_UNACCEPT:
4404 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4405 char req[64];
4406
4407 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4408 l2cap_send_disconn_req(chan, ECONNRESET);
4409 goto done;
4410 }
4411
4412 /* throw out any old stored conf requests */
4413 result = L2CAP_CONF_SUCCESS;
4414 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4415 req, sizeof(req), &result);
4416 if (len < 0) {
4417 l2cap_send_disconn_req(chan, ECONNRESET);
4418 goto done;
4419 }
4420
4421 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4422 L2CAP_CONF_REQ, len, req);
4423 chan->num_conf_req++;
4424 if (result != L2CAP_CONF_SUCCESS)
4425 goto done;
4426 break;
4427 }
4428 fallthrough;
4429
4430 default:
4431 l2cap_chan_set_err(chan, ECONNRESET);
4432
4433 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4434 l2cap_send_disconn_req(chan, ECONNRESET);
4435 goto done;
4436 }
4437
4438 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4439 goto done;
4440
4441 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4442
4443 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4444 set_default_fcs(chan);
4445
4446 if (chan->mode == L2CAP_MODE_ERTM ||
4447 chan->mode == L2CAP_MODE_STREAMING)
4448 err = l2cap_ertm_init(chan);
4449
4450 if (err < 0)
4451 l2cap_send_disconn_req(chan, -err);
4452 else
4453 l2cap_chan_ready(chan);
4454 }
4455
4456 done:
4457 l2cap_chan_unlock(chan);
4458 l2cap_chan_put(chan);
4459 return err;
4460 }
4461
l2cap_disconnect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4462 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4463 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4464 u8 *data)
4465 {
4466 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4467 struct l2cap_disconn_rsp rsp;
4468 u16 dcid, scid;
4469 struct l2cap_chan *chan;
4470
4471 if (cmd_len != sizeof(*req))
4472 return -EPROTO;
4473
4474 scid = __le16_to_cpu(req->scid);
4475 dcid = __le16_to_cpu(req->dcid);
4476
4477 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4478
4479 chan = l2cap_get_chan_by_scid(conn, dcid);
4480 if (!chan) {
4481 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4482 return 0;
4483 }
4484
4485 rsp.dcid = cpu_to_le16(chan->scid);
4486 rsp.scid = cpu_to_le16(chan->dcid);
4487 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4488
4489 chan->ops->set_shutdown(chan);
4490
4491 l2cap_chan_del(chan, ECONNRESET);
4492
4493 chan->ops->close(chan);
4494
4495 l2cap_chan_unlock(chan);
4496 l2cap_chan_put(chan);
4497
4498 return 0;
4499 }
4500
l2cap_disconnect_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4501 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4502 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4503 u8 *data)
4504 {
4505 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4506 u16 dcid, scid;
4507 struct l2cap_chan *chan;
4508
4509 if (cmd_len != sizeof(*rsp))
4510 return -EPROTO;
4511
4512 scid = __le16_to_cpu(rsp->scid);
4513 dcid = __le16_to_cpu(rsp->dcid);
4514
4515 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4516
4517 chan = l2cap_get_chan_by_scid(conn, scid);
4518 if (!chan) {
4519 return 0;
4520 }
4521
4522 if (chan->state != BT_DISCONN) {
4523 l2cap_chan_unlock(chan);
4524 l2cap_chan_put(chan);
4525 return 0;
4526 }
4527
4528 l2cap_chan_del(chan, 0);
4529
4530 chan->ops->close(chan);
4531
4532 l2cap_chan_unlock(chan);
4533 l2cap_chan_put(chan);
4534
4535 return 0;
4536 }
4537
l2cap_information_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4538 static inline int l2cap_information_req(struct l2cap_conn *conn,
4539 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4540 u8 *data)
4541 {
4542 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4543 u16 type;
4544
4545 if (cmd_len != sizeof(*req))
4546 return -EPROTO;
4547
4548 type = __le16_to_cpu(req->type);
4549
4550 BT_DBG("type 0x%4.4x", type);
4551
4552 if (type == L2CAP_IT_FEAT_MASK) {
4553 u8 buf[8];
4554 u32 feat_mask = l2cap_feat_mask;
4555 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4556 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4557 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4558 if (!disable_ertm)
4559 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4560 | L2CAP_FEAT_FCS;
4561
4562 put_unaligned_le32(feat_mask, rsp->data);
4563 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4564 buf);
4565 } else if (type == L2CAP_IT_FIXED_CHAN) {
4566 u8 buf[12];
4567 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4568
4569 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4570 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4571 rsp->data[0] = conn->local_fixed_chan;
4572 memset(rsp->data + 1, 0, 7);
4573 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4574 buf);
4575 } else {
4576 struct l2cap_info_rsp rsp;
4577 rsp.type = cpu_to_le16(type);
4578 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4579 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4580 &rsp);
4581 }
4582
4583 return 0;
4584 }
4585
l2cap_information_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4586 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4587 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4588 u8 *data)
4589 {
4590 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4591 u16 type, result;
4592
4593 if (cmd_len < sizeof(*rsp))
4594 return -EPROTO;
4595
4596 type = __le16_to_cpu(rsp->type);
4597 result = __le16_to_cpu(rsp->result);
4598
4599 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4600
4601 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4602 if (cmd->ident != conn->info_ident ||
4603 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4604 return 0;
4605
4606 cancel_delayed_work(&conn->info_timer);
4607
4608 if (result != L2CAP_IR_SUCCESS) {
4609 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4610 conn->info_ident = 0;
4611
4612 l2cap_conn_start(conn);
4613
4614 return 0;
4615 }
4616
4617 switch (type) {
4618 case L2CAP_IT_FEAT_MASK:
4619 conn->feat_mask = get_unaligned_le32(rsp->data);
4620
4621 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4622 struct l2cap_info_req req;
4623 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4624
4625 conn->info_ident = l2cap_get_ident(conn);
4626
4627 l2cap_send_cmd(conn, conn->info_ident,
4628 L2CAP_INFO_REQ, sizeof(req), &req);
4629 } else {
4630 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4631 conn->info_ident = 0;
4632
4633 l2cap_conn_start(conn);
4634 }
4635 break;
4636
4637 case L2CAP_IT_FIXED_CHAN:
4638 conn->remote_fixed_chan = rsp->data[0];
4639 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4640 conn->info_ident = 0;
4641
4642 l2cap_conn_start(conn);
4643 break;
4644 }
4645
4646 return 0;
4647 }
4648
l2cap_conn_param_update_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4649 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4650 struct l2cap_cmd_hdr *cmd,
4651 u16 cmd_len, u8 *data)
4652 {
4653 struct hci_conn *hcon = conn->hcon;
4654 struct l2cap_conn_param_update_req *req;
4655 struct l2cap_conn_param_update_rsp rsp;
4656 u16 min, max, latency, to_multiplier;
4657 int err;
4658
4659 if (hcon->role != HCI_ROLE_MASTER)
4660 return -EINVAL;
4661
4662 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4663 return -EPROTO;
4664
4665 req = (struct l2cap_conn_param_update_req *) data;
4666 min = __le16_to_cpu(req->min);
4667 max = __le16_to_cpu(req->max);
4668 latency = __le16_to_cpu(req->latency);
4669 to_multiplier = __le16_to_cpu(req->to_multiplier);
4670
4671 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4672 min, max, latency, to_multiplier);
4673
4674 memset(&rsp, 0, sizeof(rsp));
4675
4676 err = hci_check_conn_params(min, max, latency, to_multiplier);
4677 if (err)
4678 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4679 else
4680 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4681
4682 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4683 sizeof(rsp), &rsp);
4684
4685 if (!err) {
4686 u8 store_hint;
4687
4688 store_hint = hci_le_conn_update(hcon, min, max, latency,
4689 to_multiplier);
4690 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
4691 store_hint, min, max, latency,
4692 to_multiplier);
4693
4694 }
4695
4696 return 0;
4697 }
4698
l2cap_le_connect_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4699 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
4700 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4701 u8 *data)
4702 {
4703 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
4704 struct hci_conn *hcon = conn->hcon;
4705 u16 dcid, mtu, mps, credits, result;
4706 struct l2cap_chan *chan;
4707 int err, sec_level;
4708
4709 if (cmd_len < sizeof(*rsp))
4710 return -EPROTO;
4711
4712 dcid = __le16_to_cpu(rsp->dcid);
4713 mtu = __le16_to_cpu(rsp->mtu);
4714 mps = __le16_to_cpu(rsp->mps);
4715 credits = __le16_to_cpu(rsp->credits);
4716 result = __le16_to_cpu(rsp->result);
4717
4718 if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
4719 dcid < L2CAP_CID_DYN_START ||
4720 dcid > L2CAP_CID_LE_DYN_END))
4721 return -EPROTO;
4722
4723 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
4724 dcid, mtu, mps, credits, result);
4725
4726 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4727 if (!chan)
4728 return -EBADSLT;
4729
4730 err = 0;
4731
4732 l2cap_chan_lock(chan);
4733
4734 switch (result) {
4735 case L2CAP_CR_LE_SUCCESS:
4736 if (__l2cap_get_chan_by_dcid(conn, dcid)) {
4737 err = -EBADSLT;
4738 break;
4739 }
4740
4741 chan->ident = 0;
4742 chan->dcid = dcid;
4743 chan->omtu = mtu;
4744 chan->remote_mps = mps;
4745 chan->tx_credits = credits;
4746 l2cap_chan_ready(chan);
4747 break;
4748
4749 case L2CAP_CR_LE_AUTHENTICATION:
4750 case L2CAP_CR_LE_ENCRYPTION:
4751 /* If we already have MITM protection we can't do
4752 * anything.
4753 */
4754 if (hcon->sec_level > BT_SECURITY_MEDIUM) {
4755 l2cap_chan_del(chan, ECONNREFUSED);
4756 break;
4757 }
4758
4759 sec_level = hcon->sec_level + 1;
4760 if (chan->sec_level < sec_level)
4761 chan->sec_level = sec_level;
4762
4763 /* We'll need to send a new Connect Request */
4764 clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
4765
4766 smp_conn_security(hcon, chan->sec_level);
4767 break;
4768
4769 default:
4770 l2cap_chan_del(chan, ECONNREFUSED);
4771 break;
4772 }
4773
4774 l2cap_chan_unlock(chan);
4775
4776 return err;
4777 }
4778
l2cap_put_ident(struct l2cap_conn * conn,u8 code,u8 id)4779 static void l2cap_put_ident(struct l2cap_conn *conn, u8 code, u8 id)
4780 {
4781 switch (code) {
4782 case L2CAP_COMMAND_REJ:
4783 case L2CAP_CONN_RSP:
4784 case L2CAP_CONF_RSP:
4785 case L2CAP_DISCONN_RSP:
4786 case L2CAP_ECHO_RSP:
4787 case L2CAP_INFO_RSP:
4788 case L2CAP_CONN_PARAM_UPDATE_RSP:
4789 case L2CAP_ECRED_CONN_RSP:
4790 case L2CAP_ECRED_RECONF_RSP:
4791 /* First do a lookup since the remote may send bogus ids that
4792 * would make ida_free to generate warnings.
4793 */
4794 if (ida_find_first_range(&conn->tx_ida, id, id) >= 0)
4795 ida_free(&conn->tx_ida, id);
4796 }
4797 }
4798
l2cap_bredr_sig_cmd(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4799 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4800 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4801 u8 *data)
4802 {
4803 int err = 0;
4804
4805 l2cap_put_ident(conn, cmd->code, cmd->ident);
4806
4807 switch (cmd->code) {
4808 case L2CAP_COMMAND_REJ:
4809 l2cap_command_rej(conn, cmd, cmd_len, data);
4810 break;
4811
4812 case L2CAP_CONN_REQ:
4813 err = l2cap_connect_req(conn, cmd, cmd_len, data);
4814 break;
4815
4816 case L2CAP_CONN_RSP:
4817 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
4818 break;
4819
4820 case L2CAP_CONF_REQ:
4821 err = l2cap_config_req(conn, cmd, cmd_len, data);
4822 break;
4823
4824 case L2CAP_CONF_RSP:
4825 l2cap_config_rsp(conn, cmd, cmd_len, data);
4826 break;
4827
4828 case L2CAP_DISCONN_REQ:
4829 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
4830 break;
4831
4832 case L2CAP_DISCONN_RSP:
4833 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
4834 break;
4835
4836 case L2CAP_ECHO_REQ:
4837 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4838 break;
4839
4840 case L2CAP_ECHO_RSP:
4841 break;
4842
4843 case L2CAP_INFO_REQ:
4844 err = l2cap_information_req(conn, cmd, cmd_len, data);
4845 break;
4846
4847 case L2CAP_INFO_RSP:
4848 l2cap_information_rsp(conn, cmd, cmd_len, data);
4849 break;
4850
4851 default:
4852 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4853 err = -EINVAL;
4854 break;
4855 }
4856
4857 return err;
4858 }
4859
l2cap_le_connect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4860 static int l2cap_le_connect_req(struct l2cap_conn *conn,
4861 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4862 u8 *data)
4863 {
4864 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
4865 struct l2cap_le_conn_rsp rsp;
4866 struct l2cap_chan *chan, *pchan;
4867 u16 dcid, scid, credits, mtu, mps;
4868 __le16 psm;
4869 u8 result;
4870
4871 if (cmd_len != sizeof(*req))
4872 return -EPROTO;
4873
4874 scid = __le16_to_cpu(req->scid);
4875 mtu = __le16_to_cpu(req->mtu);
4876 mps = __le16_to_cpu(req->mps);
4877 psm = req->psm;
4878 dcid = 0;
4879 credits = 0;
4880
4881 if (mtu < 23 || mps < 23)
4882 return -EPROTO;
4883
4884 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
4885 scid, mtu, mps);
4886
4887 /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
4888 * page 1059:
4889 *
4890 * Valid range: 0x0001-0x00ff
4891 *
4892 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
4893 */
4894 if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
4895 result = L2CAP_CR_LE_BAD_PSM;
4896 chan = NULL;
4897 goto response;
4898 }
4899
4900 /* Check if we have socket listening on psm */
4901 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
4902 &conn->hcon->dst, LE_LINK);
4903 if (!pchan) {
4904 result = L2CAP_CR_LE_BAD_PSM;
4905 chan = NULL;
4906 goto response;
4907 }
4908
4909 l2cap_chan_lock(pchan);
4910
4911 if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
4912 SMP_ALLOW_STK)) {
4913 result = pchan->sec_level == BT_SECURITY_MEDIUM ?
4914 L2CAP_CR_LE_ENCRYPTION : L2CAP_CR_LE_AUTHENTICATION;
4915 chan = NULL;
4916 goto response_unlock;
4917 }
4918
4919 /* Check if Key Size is sufficient for the security level */
4920 if (!l2cap_check_enc_key_size(conn->hcon, pchan)) {
4921 result = L2CAP_CR_LE_BAD_KEY_SIZE;
4922 chan = NULL;
4923 goto response_unlock;
4924 }
4925
4926 /* Check for valid dynamic CID range */
4927 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
4928 result = L2CAP_CR_LE_INVALID_SCID;
4929 chan = NULL;
4930 goto response_unlock;
4931 }
4932
4933 /* Check if we already have channel with that dcid */
4934 if (__l2cap_get_chan_by_dcid(conn, scid)) {
4935 result = L2CAP_CR_LE_SCID_IN_USE;
4936 chan = NULL;
4937 goto response_unlock;
4938 }
4939
4940 chan = pchan->ops->new_connection(pchan);
4941 if (!chan) {
4942 result = L2CAP_CR_LE_NO_MEM;
4943 goto response_unlock;
4944 }
4945
4946 bacpy(&chan->src, &conn->hcon->src);
4947 bacpy(&chan->dst, &conn->hcon->dst);
4948 chan->src_type = bdaddr_src_type(conn->hcon);
4949 chan->dst_type = bdaddr_dst_type(conn->hcon);
4950 chan->psm = psm;
4951 chan->dcid = scid;
4952 chan->omtu = mtu;
4953 chan->remote_mps = mps;
4954
4955 __l2cap_chan_add(conn, chan);
4956
4957 l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits));
4958
4959 dcid = chan->scid;
4960 credits = chan->rx_credits;
4961
4962 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4963
4964 chan->ident = cmd->ident;
4965
4966 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4967 l2cap_state_change(chan, BT_CONNECT2);
4968 /* The following result value is actually not defined
4969 * for LE CoC but we use it to let the function know
4970 * that it should bail out after doing its cleanup
4971 * instead of sending a response.
4972 */
4973 result = L2CAP_CR_PEND;
4974 chan->ops->defer(chan);
4975 } else {
4976 l2cap_chan_ready(chan);
4977 result = L2CAP_CR_LE_SUCCESS;
4978 }
4979
4980 response_unlock:
4981 l2cap_chan_unlock(pchan);
4982 l2cap_chan_put(pchan);
4983
4984 if (result == L2CAP_CR_PEND)
4985 return 0;
4986
4987 response:
4988 if (chan) {
4989 rsp.mtu = cpu_to_le16(chan->imtu);
4990 rsp.mps = cpu_to_le16(chan->mps);
4991 } else {
4992 rsp.mtu = 0;
4993 rsp.mps = 0;
4994 }
4995
4996 rsp.dcid = cpu_to_le16(dcid);
4997 rsp.credits = cpu_to_le16(credits);
4998 rsp.result = cpu_to_le16(result);
4999
5000 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5001
5002 return 0;
5003 }
5004
l2cap_le_credits(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5005 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5006 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5007 u8 *data)
5008 {
5009 struct l2cap_le_credits *pkt;
5010 struct l2cap_chan *chan;
5011 u16 cid, credits, max_credits;
5012
5013 if (cmd_len != sizeof(*pkt))
5014 return -EPROTO;
5015
5016 pkt = (struct l2cap_le_credits *) data;
5017 cid = __le16_to_cpu(pkt->cid);
5018 credits = __le16_to_cpu(pkt->credits);
5019
5020 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5021
5022 chan = l2cap_get_chan_by_dcid(conn, cid);
5023 if (!chan)
5024 return -EBADSLT;
5025
5026 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5027 if (credits > max_credits) {
5028 BT_ERR("LE credits overflow");
5029 l2cap_send_disconn_req(chan, ECONNRESET);
5030
5031 /* Return 0 so that we don't trigger an unnecessary
5032 * command reject packet.
5033 */
5034 goto unlock;
5035 }
5036
5037 chan->tx_credits += credits;
5038
5039 /* Resume sending */
5040 l2cap_le_flowctl_send(chan);
5041
5042 if (chan->tx_credits)
5043 chan->ops->resume(chan);
5044
5045 unlock:
5046 l2cap_chan_unlock(chan);
5047 l2cap_chan_put(chan);
5048
5049 return 0;
5050 }
5051
l2cap_ecred_conn_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5052 static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
5053 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5054 u8 *data)
5055 {
5056 struct l2cap_ecred_conn_req *req = (void *) data;
5057 DEFINE_RAW_FLEX(struct l2cap_ecred_conn_rsp, pdu, dcid, L2CAP_ECRED_MAX_CID);
5058 struct l2cap_chan *chan, *pchan;
5059 u16 mtu, mps;
5060 __le16 psm;
5061 u8 result, rsp_len = 0;
5062 int i, num_scid;
5063 bool defer = false;
5064
5065 if (!enable_ecred)
5066 return -EINVAL;
5067
5068 memset(pdu, 0, sizeof(*pdu));
5069
5070 if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) {
5071 result = L2CAP_CR_LE_INVALID_PARAMS;
5072 goto response;
5073 }
5074
5075 cmd_len -= sizeof(*req);
5076 num_scid = cmd_len / sizeof(u16);
5077
5078 /* Always respond with the same number of scids as in the request */
5079 rsp_len = cmd_len;
5080
5081 if (num_scid > L2CAP_ECRED_MAX_CID) {
5082 result = L2CAP_CR_LE_INVALID_PARAMS;
5083 goto response;
5084 }
5085
5086 mtu = __le16_to_cpu(req->mtu);
5087 mps = __le16_to_cpu(req->mps);
5088
5089 if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MPS) {
5090 result = L2CAP_CR_LE_INVALID_PARAMS;
5091 goto response;
5092 }
5093
5094 psm = req->psm;
5095
5096 /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
5097 * page 1059:
5098 *
5099 * Valid range: 0x0001-0x00ff
5100 *
5101 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
5102 */
5103 if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
5104 result = L2CAP_CR_LE_BAD_PSM;
5105 goto response;
5106 }
5107
5108 BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps);
5109
5110 /* Check if we have socket listening on psm */
5111 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5112 &conn->hcon->dst, LE_LINK);
5113 if (!pchan) {
5114 result = L2CAP_CR_LE_BAD_PSM;
5115 goto response;
5116 }
5117
5118 l2cap_chan_lock(pchan);
5119
5120 if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5121 SMP_ALLOW_STK)) {
5122 result = pchan->sec_level == BT_SECURITY_MEDIUM ?
5123 L2CAP_CR_LE_ENCRYPTION : L2CAP_CR_LE_AUTHENTICATION;
5124 goto unlock;
5125 }
5126
5127 /* Check if the listening channel has set an output MTU then the
5128 * requested MTU shall be less than or equal to that value.
5129 */
5130 if (pchan->omtu && mtu < pchan->omtu) {
5131 result = L2CAP_CR_LE_UNACCEPT_PARAMS;
5132 goto unlock;
5133 }
5134
5135 result = L2CAP_CR_LE_SUCCESS;
5136
5137 for (i = 0; i < num_scid; i++) {
5138 u16 scid = __le16_to_cpu(req->scid[i]);
5139
5140 BT_DBG("scid[%d] 0x%4.4x", i, scid);
5141
5142 pdu->dcid[i] = 0x0000;
5143
5144 /* Check for valid dynamic CID range */
5145 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5146 result = L2CAP_CR_LE_INVALID_SCID;
5147 continue;
5148 }
5149
5150 /* Check if we already have channel with that dcid */
5151 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5152 result = L2CAP_CR_LE_SCID_IN_USE;
5153 continue;
5154 }
5155
5156 chan = pchan->ops->new_connection(pchan);
5157 if (!chan) {
5158 result = L2CAP_CR_LE_NO_MEM;
5159 continue;
5160 }
5161
5162 bacpy(&chan->src, &conn->hcon->src);
5163 bacpy(&chan->dst, &conn->hcon->dst);
5164 chan->src_type = bdaddr_src_type(conn->hcon);
5165 chan->dst_type = bdaddr_dst_type(conn->hcon);
5166 chan->psm = psm;
5167 chan->dcid = scid;
5168 chan->omtu = mtu;
5169 chan->remote_mps = mps;
5170
5171 __l2cap_chan_add(conn, chan);
5172
5173 l2cap_ecred_init(chan, __le16_to_cpu(req->credits));
5174
5175 /* Init response */
5176 if (!pdu->credits) {
5177 pdu->mtu = cpu_to_le16(chan->imtu);
5178 pdu->mps = cpu_to_le16(chan->mps);
5179 pdu->credits = cpu_to_le16(chan->rx_credits);
5180 }
5181
5182 pdu->dcid[i] = cpu_to_le16(chan->scid);
5183
5184 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5185
5186 chan->ident = cmd->ident;
5187 chan->mode = L2CAP_MODE_EXT_FLOWCTL;
5188
5189 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5190 l2cap_state_change(chan, BT_CONNECT2);
5191 defer = true;
5192 chan->ops->defer(chan);
5193 } else {
5194 l2cap_chan_ready(chan);
5195 }
5196 }
5197
5198 unlock:
5199 l2cap_chan_unlock(pchan);
5200 l2cap_chan_put(pchan);
5201
5202 response:
5203 pdu->result = cpu_to_le16(result);
5204
5205 if (defer)
5206 return 0;
5207
5208 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP,
5209 sizeof(*pdu) + rsp_len, pdu);
5210
5211 return 0;
5212 }
5213
l2cap_ecred_conn_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5214 static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
5215 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5216 u8 *data)
5217 {
5218 struct l2cap_ecred_conn_rsp *rsp = (void *) data;
5219 struct hci_conn *hcon = conn->hcon;
5220 u16 mtu, mps, credits, result;
5221 struct l2cap_chan *chan, *tmp;
5222 int err = 0, sec_level;
5223 int i = 0;
5224
5225 if (cmd_len < sizeof(*rsp))
5226 return -EPROTO;
5227
5228 mtu = __le16_to_cpu(rsp->mtu);
5229 mps = __le16_to_cpu(rsp->mps);
5230 credits = __le16_to_cpu(rsp->credits);
5231 result = __le16_to_cpu(rsp->result);
5232
5233 BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits,
5234 result);
5235
5236 cmd_len -= sizeof(*rsp);
5237
5238 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
5239 u16 dcid;
5240
5241 if (chan->ident != cmd->ident ||
5242 chan->mode != L2CAP_MODE_EXT_FLOWCTL ||
5243 chan->state == BT_CONNECTED)
5244 continue;
5245
5246 l2cap_chan_lock(chan);
5247
5248 /* Check that there is a dcid for each pending channel */
5249 if (cmd_len < sizeof(dcid)) {
5250 l2cap_chan_del(chan, ECONNREFUSED);
5251 l2cap_chan_unlock(chan);
5252 continue;
5253 }
5254
5255 dcid = __le16_to_cpu(rsp->dcid[i++]);
5256 cmd_len -= sizeof(u16);
5257
5258 BT_DBG("dcid[%d] 0x%4.4x", i, dcid);
5259
5260 /* Check if dcid is already in use */
5261 if (dcid && __l2cap_get_chan_by_dcid(conn, dcid)) {
5262 /* If a device receives a
5263 * L2CAP_CREDIT_BASED_CONNECTION_RSP packet with an
5264 * already-assigned Destination CID, then both the
5265 * original channel and the new channel shall be
5266 * immediately discarded and not used.
5267 */
5268 l2cap_chan_del(chan, ECONNREFUSED);
5269 l2cap_chan_unlock(chan);
5270 chan = __l2cap_get_chan_by_dcid(conn, dcid);
5271 l2cap_chan_lock(chan);
5272 l2cap_chan_del(chan, ECONNRESET);
5273 l2cap_chan_unlock(chan);
5274 continue;
5275 }
5276
5277 switch (result) {
5278 case L2CAP_CR_LE_AUTHENTICATION:
5279 case L2CAP_CR_LE_ENCRYPTION:
5280 /* If we already have MITM protection we can't do
5281 * anything.
5282 */
5283 if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5284 l2cap_chan_del(chan, ECONNREFUSED);
5285 break;
5286 }
5287
5288 sec_level = hcon->sec_level + 1;
5289 if (chan->sec_level < sec_level)
5290 chan->sec_level = sec_level;
5291
5292 /* We'll need to send a new Connect Request */
5293 clear_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags);
5294
5295 smp_conn_security(hcon, chan->sec_level);
5296 break;
5297
5298 case L2CAP_CR_LE_BAD_PSM:
5299 l2cap_chan_del(chan, ECONNREFUSED);
5300 break;
5301
5302 default:
5303 /* If dcid was not set it means channels was refused */
5304 if (!dcid) {
5305 l2cap_chan_del(chan, ECONNREFUSED);
5306 break;
5307 }
5308
5309 chan->ident = 0;
5310 chan->dcid = dcid;
5311 chan->omtu = mtu;
5312 chan->remote_mps = mps;
5313 chan->tx_credits = credits;
5314 l2cap_chan_ready(chan);
5315 break;
5316 }
5317
5318 l2cap_chan_unlock(chan);
5319 }
5320
5321 return err;
5322 }
5323
l2cap_ecred_reconf_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5324 static inline int l2cap_ecred_reconf_req(struct l2cap_conn *conn,
5325 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5326 u8 *data)
5327 {
5328 struct l2cap_ecred_reconf_req *req = (void *) data;
5329 struct l2cap_ecred_reconf_rsp rsp;
5330 u16 mtu, mps, result;
5331 struct l2cap_chan *chan[L2CAP_ECRED_MAX_CID] = {};
5332 int i, num_scid;
5333
5334 if (!enable_ecred)
5335 return -EINVAL;
5336
5337 if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) {
5338 result = L2CAP_RECONF_INVALID_CID;
5339 goto respond;
5340 }
5341
5342 mtu = __le16_to_cpu(req->mtu);
5343 mps = __le16_to_cpu(req->mps);
5344
5345 BT_DBG("mtu %u mps %u", mtu, mps);
5346
5347 if (mtu < L2CAP_ECRED_MIN_MTU) {
5348 result = L2CAP_RECONF_INVALID_PARAMS;
5349 goto respond;
5350 }
5351
5352 if (mps < L2CAP_ECRED_MIN_MPS) {
5353 result = L2CAP_RECONF_INVALID_PARAMS;
5354 goto respond;
5355 }
5356
5357 cmd_len -= sizeof(*req);
5358 num_scid = cmd_len / sizeof(u16);
5359
5360 if (num_scid > L2CAP_ECRED_MAX_CID) {
5361 result = L2CAP_RECONF_INVALID_PARAMS;
5362 goto respond;
5363 }
5364
5365 result = L2CAP_RECONF_SUCCESS;
5366
5367 /* Check if each SCID, MTU and MPS are valid */
5368 for (i = 0; i < num_scid; i++) {
5369 u16 scid;
5370
5371 scid = __le16_to_cpu(req->scid[i]);
5372 if (!scid) {
5373 result = L2CAP_RECONF_INVALID_CID;
5374 goto respond;
5375 }
5376
5377 chan[i] = __l2cap_get_chan_by_dcid(conn, scid);
5378 if (!chan[i]) {
5379 result = L2CAP_RECONF_INVALID_CID;
5380 goto respond;
5381 }
5382
5383 /* The MTU field shall be greater than or equal to the greatest
5384 * current MTU size of these channels.
5385 */
5386 if (chan[i]->omtu > mtu) {
5387 BT_ERR("chan %p decreased MTU %u -> %u", chan[i],
5388 chan[i]->omtu, mtu);
5389 result = L2CAP_RECONF_INVALID_MTU;
5390 goto respond;
5391 }
5392
5393 /* If more than one channel is being configured, the MPS field
5394 * shall be greater than or equal to the current MPS size of
5395 * each of these channels. If only one channel is being
5396 * configured, the MPS field may be less than the current MPS
5397 * of that channel.
5398 */
5399 if (chan[i]->remote_mps >= mps && i) {
5400 BT_ERR("chan %p decreased MPS %u -> %u", chan[i],
5401 chan[i]->remote_mps, mps);
5402 result = L2CAP_RECONF_INVALID_MPS;
5403 goto respond;
5404 }
5405 }
5406
5407 /* Commit the new MTU and MPS values after checking they are valid */
5408 for (i = 0; i < num_scid; i++) {
5409 chan[i]->omtu = mtu;
5410 chan[i]->remote_mps = mps;
5411 }
5412
5413 respond:
5414 rsp.result = cpu_to_le16(result);
5415
5416 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_RECONF_RSP, sizeof(rsp),
5417 &rsp);
5418
5419 return 0;
5420 }
5421
l2cap_ecred_reconf_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5422 static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn,
5423 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5424 u8 *data)
5425 {
5426 struct l2cap_chan *chan, *tmp;
5427 struct l2cap_ecred_conn_rsp *rsp = (void *) data;
5428 u16 result;
5429
5430 if (cmd_len < sizeof(*rsp))
5431 return -EPROTO;
5432
5433 result = __le16_to_cpu(rsp->result);
5434
5435 BT_DBG("result 0x%4.4x", rsp->result);
5436
5437 if (!result)
5438 return 0;
5439
5440 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
5441 if (chan->ident != cmd->ident)
5442 continue;
5443
5444 l2cap_chan_del(chan, ECONNRESET);
5445 }
5446
5447 return 0;
5448 }
5449
l2cap_le_command_rej(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5450 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5451 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5452 u8 *data)
5453 {
5454 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5455 struct l2cap_chan *chan;
5456
5457 if (cmd_len < sizeof(*rej))
5458 return -EPROTO;
5459
5460 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5461 if (!chan)
5462 goto done;
5463
5464 chan = l2cap_chan_hold_unless_zero(chan);
5465 if (!chan)
5466 goto done;
5467
5468 l2cap_chan_lock(chan);
5469 l2cap_chan_del(chan, ECONNREFUSED);
5470 l2cap_chan_unlock(chan);
5471 l2cap_chan_put(chan);
5472
5473 done:
5474 return 0;
5475 }
5476
l2cap_le_sig_cmd(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5477 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5478 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5479 u8 *data)
5480 {
5481 int err = 0;
5482
5483 l2cap_put_ident(conn, cmd->code, cmd->ident);
5484
5485 switch (cmd->code) {
5486 case L2CAP_COMMAND_REJ:
5487 l2cap_le_command_rej(conn, cmd, cmd_len, data);
5488 break;
5489
5490 case L2CAP_CONN_PARAM_UPDATE_REQ:
5491 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5492 break;
5493
5494 case L2CAP_CONN_PARAM_UPDATE_RSP:
5495 break;
5496
5497 case L2CAP_LE_CONN_RSP:
5498 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5499 break;
5500
5501 case L2CAP_LE_CONN_REQ:
5502 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5503 break;
5504
5505 case L2CAP_LE_CREDITS:
5506 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5507 break;
5508
5509 case L2CAP_ECRED_CONN_REQ:
5510 err = l2cap_ecred_conn_req(conn, cmd, cmd_len, data);
5511 break;
5512
5513 case L2CAP_ECRED_CONN_RSP:
5514 err = l2cap_ecred_conn_rsp(conn, cmd, cmd_len, data);
5515 break;
5516
5517 case L2CAP_ECRED_RECONF_REQ:
5518 err = l2cap_ecred_reconf_req(conn, cmd, cmd_len, data);
5519 break;
5520
5521 case L2CAP_ECRED_RECONF_RSP:
5522 err = l2cap_ecred_reconf_rsp(conn, cmd, cmd_len, data);
5523 break;
5524
5525 case L2CAP_DISCONN_REQ:
5526 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5527 break;
5528
5529 case L2CAP_DISCONN_RSP:
5530 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5531 break;
5532
5533 default:
5534 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5535 err = -EINVAL;
5536 break;
5537 }
5538
5539 return err;
5540 }
5541
l2cap_le_sig_channel(struct l2cap_conn * conn,struct sk_buff * skb)5542 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5543 struct sk_buff *skb)
5544 {
5545 struct hci_conn *hcon = conn->hcon;
5546 struct l2cap_cmd_hdr *cmd;
5547 u16 len;
5548 int err;
5549
5550 if (hcon->type != LE_LINK)
5551 goto drop;
5552
5553 if (skb->len < L2CAP_CMD_HDR_SIZE)
5554 goto drop;
5555
5556 cmd = (void *) skb->data;
5557 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5558
5559 len = le16_to_cpu(cmd->len);
5560
5561 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5562
5563 if (len != skb->len || !cmd->ident) {
5564 BT_DBG("corrupted command");
5565 goto drop;
5566 }
5567
5568 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5569 if (err) {
5570 struct l2cap_cmd_rej_unk rej;
5571
5572 BT_ERR("Wrong link type (%d)", err);
5573
5574 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5575 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5576 sizeof(rej), &rej);
5577 }
5578
5579 drop:
5580 kfree_skb(skb);
5581 }
5582
l2cap_sig_send_rej(struct l2cap_conn * conn,u16 ident)5583 static inline void l2cap_sig_send_rej(struct l2cap_conn *conn, u16 ident)
5584 {
5585 struct l2cap_cmd_rej_unk rej;
5586
5587 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5588 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
5589 }
5590
l2cap_sig_channel(struct l2cap_conn * conn,struct sk_buff * skb)5591 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5592 struct sk_buff *skb)
5593 {
5594 struct hci_conn *hcon = conn->hcon;
5595 struct l2cap_cmd_hdr *cmd;
5596 int err;
5597
5598 l2cap_raw_recv(conn, skb);
5599
5600 if (hcon->type != ACL_LINK)
5601 goto drop;
5602
5603 while (skb->len >= L2CAP_CMD_HDR_SIZE) {
5604 u16 len;
5605
5606 cmd = (void *) skb->data;
5607 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5608
5609 len = le16_to_cpu(cmd->len);
5610
5611 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len,
5612 cmd->ident);
5613
5614 if (len > skb->len || !cmd->ident) {
5615 BT_DBG("corrupted command");
5616 l2cap_sig_send_rej(conn, cmd->ident);
5617 skb_pull(skb, len > skb->len ? skb->len : len);
5618 continue;
5619 }
5620
5621 err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data);
5622 if (err) {
5623 BT_ERR("Wrong link type (%d)", err);
5624 l2cap_sig_send_rej(conn, cmd->ident);
5625 }
5626
5627 skb_pull(skb, len);
5628 }
5629
5630 if (skb->len > 0) {
5631 BT_DBG("corrupted command");
5632 l2cap_sig_send_rej(conn, 0);
5633 }
5634
5635 drop:
5636 kfree_skb(skb);
5637 }
5638
l2cap_check_fcs(struct l2cap_chan * chan,struct sk_buff * skb)5639 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5640 {
5641 u16 our_fcs, rcv_fcs;
5642 int hdr_size;
5643
5644 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5645 hdr_size = L2CAP_EXT_HDR_SIZE;
5646 else
5647 hdr_size = L2CAP_ENH_HDR_SIZE;
5648
5649 if (chan->fcs == L2CAP_FCS_CRC16) {
5650 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5651 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5652 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5653
5654 if (our_fcs != rcv_fcs)
5655 return -EBADMSG;
5656 }
5657 return 0;
5658 }
5659
l2cap_send_i_or_rr_or_rnr(struct l2cap_chan * chan)5660 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5661 {
5662 struct l2cap_ctrl control;
5663
5664 BT_DBG("chan %p", chan);
5665
5666 memset(&control, 0, sizeof(control));
5667 control.sframe = 1;
5668 control.final = 1;
5669 control.reqseq = chan->buffer_seq;
5670 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5671
5672 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5673 control.super = L2CAP_SUPER_RNR;
5674 l2cap_send_sframe(chan, &control);
5675 }
5676
5677 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5678 chan->unacked_frames > 0)
5679 __set_retrans_timer(chan);
5680
5681 /* Send pending iframes */
5682 l2cap_ertm_send(chan);
5683
5684 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5685 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5686 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5687 * send it now.
5688 */
5689 control.super = L2CAP_SUPER_RR;
5690 l2cap_send_sframe(chan, &control);
5691 }
5692 }
5693
append_skb_frag(struct sk_buff * skb,struct sk_buff * new_frag,struct sk_buff ** last_frag)5694 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5695 struct sk_buff **last_frag)
5696 {
5697 /* skb->len reflects data in skb as well as all fragments
5698 * skb->data_len reflects only data in fragments
5699 */
5700 if (!skb_has_frag_list(skb))
5701 skb_shinfo(skb)->frag_list = new_frag;
5702
5703 new_frag->next = NULL;
5704
5705 (*last_frag)->next = new_frag;
5706 *last_frag = new_frag;
5707
5708 skb->len += new_frag->len;
5709 skb->data_len += new_frag->len;
5710 skb->truesize += new_frag->truesize;
5711 }
5712
l2cap_reassemble_sdu(struct l2cap_chan * chan,struct sk_buff * skb,struct l2cap_ctrl * control)5713 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5714 struct l2cap_ctrl *control)
5715 {
5716 int err = -EINVAL;
5717
5718 switch (control->sar) {
5719 case L2CAP_SAR_UNSEGMENTED:
5720 if (chan->sdu)
5721 break;
5722
5723 err = chan->ops->recv(chan, skb);
5724 break;
5725
5726 case L2CAP_SAR_START:
5727 if (chan->sdu)
5728 break;
5729
5730 if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
5731 break;
5732
5733 chan->sdu_len = get_unaligned_le16(skb->data);
5734 skb_pull(skb, L2CAP_SDULEN_SIZE);
5735
5736 if (chan->sdu_len > chan->imtu) {
5737 err = -EMSGSIZE;
5738 break;
5739 }
5740
5741 if (skb->len >= chan->sdu_len)
5742 break;
5743
5744 chan->sdu = skb;
5745 chan->sdu_last_frag = skb;
5746
5747 skb = NULL;
5748 err = 0;
5749 break;
5750
5751 case L2CAP_SAR_CONTINUE:
5752 if (!chan->sdu)
5753 break;
5754
5755 append_skb_frag(chan->sdu, skb,
5756 &chan->sdu_last_frag);
5757 skb = NULL;
5758
5759 if (chan->sdu->len >= chan->sdu_len)
5760 break;
5761
5762 err = 0;
5763 break;
5764
5765 case L2CAP_SAR_END:
5766 if (!chan->sdu)
5767 break;
5768
5769 append_skb_frag(chan->sdu, skb,
5770 &chan->sdu_last_frag);
5771 skb = NULL;
5772
5773 if (chan->sdu->len != chan->sdu_len)
5774 break;
5775
5776 err = chan->ops->recv(chan, chan->sdu);
5777
5778 if (!err) {
5779 /* Reassembly complete */
5780 chan->sdu = NULL;
5781 chan->sdu_last_frag = NULL;
5782 chan->sdu_len = 0;
5783 }
5784 break;
5785 }
5786
5787 if (err) {
5788 kfree_skb(skb);
5789 kfree_skb(chan->sdu);
5790 chan->sdu = NULL;
5791 chan->sdu_last_frag = NULL;
5792 chan->sdu_len = 0;
5793 }
5794
5795 return err;
5796 }
5797
l2cap_resegment(struct l2cap_chan * chan)5798 static int l2cap_resegment(struct l2cap_chan *chan)
5799 {
5800 /* Placeholder */
5801 return 0;
5802 }
5803
l2cap_chan_busy(struct l2cap_chan * chan,int busy)5804 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5805 {
5806 u8 event;
5807
5808 if (chan->mode != L2CAP_MODE_ERTM)
5809 return;
5810
5811 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5812 l2cap_tx(chan, NULL, NULL, event);
5813 }
5814
l2cap_rx_queued_iframes(struct l2cap_chan * chan)5815 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5816 {
5817 int err = 0;
5818 /* Pass sequential frames to l2cap_reassemble_sdu()
5819 * until a gap is encountered.
5820 */
5821
5822 BT_DBG("chan %p", chan);
5823
5824 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5825 struct sk_buff *skb;
5826 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5827 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5828
5829 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5830
5831 if (!skb)
5832 break;
5833
5834 skb_unlink(skb, &chan->srej_q);
5835 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5836 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
5837 if (err)
5838 break;
5839 }
5840
5841 if (skb_queue_empty(&chan->srej_q)) {
5842 chan->rx_state = L2CAP_RX_STATE_RECV;
5843 l2cap_send_ack(chan);
5844 }
5845
5846 return err;
5847 }
5848
l2cap_handle_srej(struct l2cap_chan * chan,struct l2cap_ctrl * control)5849 static void l2cap_handle_srej(struct l2cap_chan *chan,
5850 struct l2cap_ctrl *control)
5851 {
5852 struct sk_buff *skb;
5853
5854 BT_DBG("chan %p, control %p", chan, control);
5855
5856 if (control->reqseq == chan->next_tx_seq) {
5857 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5858 l2cap_send_disconn_req(chan, ECONNRESET);
5859 return;
5860 }
5861
5862 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5863
5864 if (skb == NULL) {
5865 BT_DBG("Seq %d not available for retransmission",
5866 control->reqseq);
5867 return;
5868 }
5869
5870 if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
5871 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5872 l2cap_send_disconn_req(chan, ECONNRESET);
5873 return;
5874 }
5875
5876 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5877
5878 if (control->poll) {
5879 l2cap_pass_to_tx(chan, control);
5880
5881 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5882 l2cap_retransmit(chan, control);
5883 l2cap_ertm_send(chan);
5884
5885 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5886 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5887 chan->srej_save_reqseq = control->reqseq;
5888 }
5889 } else {
5890 l2cap_pass_to_tx_fbit(chan, control);
5891
5892 if (control->final) {
5893 if (chan->srej_save_reqseq != control->reqseq ||
5894 !test_and_clear_bit(CONN_SREJ_ACT,
5895 &chan->conn_state))
5896 l2cap_retransmit(chan, control);
5897 } else {
5898 l2cap_retransmit(chan, control);
5899 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5900 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5901 chan->srej_save_reqseq = control->reqseq;
5902 }
5903 }
5904 }
5905 }
5906
l2cap_handle_rej(struct l2cap_chan * chan,struct l2cap_ctrl * control)5907 static void l2cap_handle_rej(struct l2cap_chan *chan,
5908 struct l2cap_ctrl *control)
5909 {
5910 struct sk_buff *skb;
5911
5912 BT_DBG("chan %p, control %p", chan, control);
5913
5914 if (control->reqseq == chan->next_tx_seq) {
5915 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5916 l2cap_send_disconn_req(chan, ECONNRESET);
5917 return;
5918 }
5919
5920 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5921
5922 if (chan->max_tx && skb &&
5923 bt_cb(skb)->l2cap.retries >= chan->max_tx) {
5924 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5925 l2cap_send_disconn_req(chan, ECONNRESET);
5926 return;
5927 }
5928
5929 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5930
5931 l2cap_pass_to_tx(chan, control);
5932
5933 if (control->final) {
5934 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5935 l2cap_retransmit_all(chan, control);
5936 } else {
5937 l2cap_retransmit_all(chan, control);
5938 l2cap_ertm_send(chan);
5939 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5940 set_bit(CONN_REJ_ACT, &chan->conn_state);
5941 }
5942 }
5943
l2cap_classify_txseq(struct l2cap_chan * chan,u16 txseq)5944 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5945 {
5946 BT_DBG("chan %p, txseq %d", chan, txseq);
5947
5948 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5949 chan->expected_tx_seq);
5950
5951 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5952 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5953 chan->tx_win) {
5954 /* See notes below regarding "double poll" and
5955 * invalid packets.
5956 */
5957 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5958 BT_DBG("Invalid/Ignore - after SREJ");
5959 return L2CAP_TXSEQ_INVALID_IGNORE;
5960 } else {
5961 BT_DBG("Invalid - in window after SREJ sent");
5962 return L2CAP_TXSEQ_INVALID;
5963 }
5964 }
5965
5966 if (chan->srej_list.head == txseq) {
5967 BT_DBG("Expected SREJ");
5968 return L2CAP_TXSEQ_EXPECTED_SREJ;
5969 }
5970
5971 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
5972 BT_DBG("Duplicate SREJ - txseq already stored");
5973 return L2CAP_TXSEQ_DUPLICATE_SREJ;
5974 }
5975
5976 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
5977 BT_DBG("Unexpected SREJ - not requested");
5978 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
5979 }
5980 }
5981
5982 if (chan->expected_tx_seq == txseq) {
5983 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5984 chan->tx_win) {
5985 BT_DBG("Invalid - txseq outside tx window");
5986 return L2CAP_TXSEQ_INVALID;
5987 } else {
5988 BT_DBG("Expected");
5989 return L2CAP_TXSEQ_EXPECTED;
5990 }
5991 }
5992
5993 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
5994 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
5995 BT_DBG("Duplicate - expected_tx_seq later than txseq");
5996 return L2CAP_TXSEQ_DUPLICATE;
5997 }
5998
5999 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6000 /* A source of invalid packets is a "double poll" condition,
6001 * where delays cause us to send multiple poll packets. If
6002 * the remote stack receives and processes both polls,
6003 * sequence numbers can wrap around in such a way that a
6004 * resent frame has a sequence number that looks like new data
6005 * with a sequence gap. This would trigger an erroneous SREJ
6006 * request.
6007 *
6008 * Fortunately, this is impossible with a tx window that's
6009 * less than half of the maximum sequence number, which allows
6010 * invalid frames to be safely ignored.
6011 *
6012 * With tx window sizes greater than half of the tx window
6013 * maximum, the frame is invalid and cannot be ignored. This
6014 * causes a disconnect.
6015 */
6016
6017 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6018 BT_DBG("Invalid/Ignore - txseq outside tx window");
6019 return L2CAP_TXSEQ_INVALID_IGNORE;
6020 } else {
6021 BT_DBG("Invalid - txseq outside tx window");
6022 return L2CAP_TXSEQ_INVALID;
6023 }
6024 } else {
6025 BT_DBG("Unexpected - txseq indicates missing frames");
6026 return L2CAP_TXSEQ_UNEXPECTED;
6027 }
6028 }
6029
l2cap_rx_state_recv(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6030 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6031 struct l2cap_ctrl *control,
6032 struct sk_buff *skb, u8 event)
6033 {
6034 struct l2cap_ctrl local_control;
6035 int err = 0;
6036 bool skb_in_use = false;
6037
6038 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6039 event);
6040
6041 switch (event) {
6042 case L2CAP_EV_RECV_IFRAME:
6043 switch (l2cap_classify_txseq(chan, control->txseq)) {
6044 case L2CAP_TXSEQ_EXPECTED:
6045 l2cap_pass_to_tx(chan, control);
6046
6047 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6048 BT_DBG("Busy, discarding expected seq %d",
6049 control->txseq);
6050 break;
6051 }
6052
6053 chan->expected_tx_seq = __next_seq(chan,
6054 control->txseq);
6055
6056 chan->buffer_seq = chan->expected_tx_seq;
6057 skb_in_use = true;
6058
6059 /* l2cap_reassemble_sdu may free skb, hence invalidate
6060 * control, so make a copy in advance to use it after
6061 * l2cap_reassemble_sdu returns and to avoid the race
6062 * condition, for example:
6063 *
6064 * The current thread calls:
6065 * l2cap_reassemble_sdu
6066 * chan->ops->recv == l2cap_sock_recv_cb
6067 * __sock_queue_rcv_skb
6068 * Another thread calls:
6069 * bt_sock_recvmsg
6070 * skb_recv_datagram
6071 * skb_free_datagram
6072 * Then the current thread tries to access control, but
6073 * it was freed by skb_free_datagram.
6074 */
6075 local_control = *control;
6076 err = l2cap_reassemble_sdu(chan, skb, control);
6077 if (err)
6078 break;
6079
6080 if (local_control.final) {
6081 if (!test_and_clear_bit(CONN_REJ_ACT,
6082 &chan->conn_state)) {
6083 local_control.final = 0;
6084 l2cap_retransmit_all(chan, &local_control);
6085 l2cap_ertm_send(chan);
6086 }
6087 }
6088
6089 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6090 l2cap_send_ack(chan);
6091 break;
6092 case L2CAP_TXSEQ_UNEXPECTED:
6093 l2cap_pass_to_tx(chan, control);
6094
6095 /* Can't issue SREJ frames in the local busy state.
6096 * Drop this frame, it will be seen as missing
6097 * when local busy is exited.
6098 */
6099 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6100 BT_DBG("Busy, discarding unexpected seq %d",
6101 control->txseq);
6102 break;
6103 }
6104
6105 /* There was a gap in the sequence, so an SREJ
6106 * must be sent for each missing frame. The
6107 * current frame is stored for later use.
6108 */
6109 skb_queue_tail(&chan->srej_q, skb);
6110 skb_in_use = true;
6111 BT_DBG("Queued %p (queue len %d)", skb,
6112 skb_queue_len(&chan->srej_q));
6113
6114 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6115 l2cap_seq_list_clear(&chan->srej_list);
6116 l2cap_send_srej(chan, control->txseq);
6117
6118 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6119 break;
6120 case L2CAP_TXSEQ_DUPLICATE:
6121 l2cap_pass_to_tx(chan, control);
6122 break;
6123 case L2CAP_TXSEQ_INVALID_IGNORE:
6124 break;
6125 case L2CAP_TXSEQ_INVALID:
6126 default:
6127 l2cap_send_disconn_req(chan, ECONNRESET);
6128 break;
6129 }
6130 break;
6131 case L2CAP_EV_RECV_RR:
6132 l2cap_pass_to_tx(chan, control);
6133 if (control->final) {
6134 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6135
6136 if (!test_and_clear_bit(CONN_REJ_ACT,
6137 &chan->conn_state)) {
6138 control->final = 0;
6139 l2cap_retransmit_all(chan, control);
6140 }
6141
6142 l2cap_ertm_send(chan);
6143 } else if (control->poll) {
6144 l2cap_send_i_or_rr_or_rnr(chan);
6145 } else {
6146 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6147 &chan->conn_state) &&
6148 chan->unacked_frames)
6149 __set_retrans_timer(chan);
6150
6151 l2cap_ertm_send(chan);
6152 }
6153 break;
6154 case L2CAP_EV_RECV_RNR:
6155 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6156 l2cap_pass_to_tx(chan, control);
6157 if (control && control->poll) {
6158 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6159 l2cap_send_rr_or_rnr(chan, 0);
6160 }
6161 __clear_retrans_timer(chan);
6162 l2cap_seq_list_clear(&chan->retrans_list);
6163 break;
6164 case L2CAP_EV_RECV_REJ:
6165 l2cap_handle_rej(chan, control);
6166 break;
6167 case L2CAP_EV_RECV_SREJ:
6168 l2cap_handle_srej(chan, control);
6169 break;
6170 default:
6171 break;
6172 }
6173
6174 if (skb && !skb_in_use) {
6175 BT_DBG("Freeing %p", skb);
6176 kfree_skb(skb);
6177 }
6178
6179 return err;
6180 }
6181
l2cap_rx_state_srej_sent(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6182 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6183 struct l2cap_ctrl *control,
6184 struct sk_buff *skb, u8 event)
6185 {
6186 int err = 0;
6187 u16 txseq = control->txseq;
6188 bool skb_in_use = false;
6189
6190 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6191 event);
6192
6193 switch (event) {
6194 case L2CAP_EV_RECV_IFRAME:
6195 switch (l2cap_classify_txseq(chan, txseq)) {
6196 case L2CAP_TXSEQ_EXPECTED:
6197 /* Keep frame for reassembly later */
6198 l2cap_pass_to_tx(chan, control);
6199 skb_queue_tail(&chan->srej_q, skb);
6200 skb_in_use = true;
6201 BT_DBG("Queued %p (queue len %d)", skb,
6202 skb_queue_len(&chan->srej_q));
6203
6204 chan->expected_tx_seq = __next_seq(chan, txseq);
6205 break;
6206 case L2CAP_TXSEQ_EXPECTED_SREJ:
6207 l2cap_seq_list_pop(&chan->srej_list);
6208
6209 l2cap_pass_to_tx(chan, control);
6210 skb_queue_tail(&chan->srej_q, skb);
6211 skb_in_use = true;
6212 BT_DBG("Queued %p (queue len %d)", skb,
6213 skb_queue_len(&chan->srej_q));
6214
6215 err = l2cap_rx_queued_iframes(chan);
6216 if (err)
6217 break;
6218
6219 break;
6220 case L2CAP_TXSEQ_UNEXPECTED:
6221 /* Got a frame that can't be reassembled yet.
6222 * Save it for later, and send SREJs to cover
6223 * the missing frames.
6224 */
6225 skb_queue_tail(&chan->srej_q, skb);
6226 skb_in_use = true;
6227 BT_DBG("Queued %p (queue len %d)", skb,
6228 skb_queue_len(&chan->srej_q));
6229
6230 l2cap_pass_to_tx(chan, control);
6231 l2cap_send_srej(chan, control->txseq);
6232 break;
6233 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6234 /* This frame was requested with an SREJ, but
6235 * some expected retransmitted frames are
6236 * missing. Request retransmission of missing
6237 * SREJ'd frames.
6238 */
6239 skb_queue_tail(&chan->srej_q, skb);
6240 skb_in_use = true;
6241 BT_DBG("Queued %p (queue len %d)", skb,
6242 skb_queue_len(&chan->srej_q));
6243
6244 l2cap_pass_to_tx(chan, control);
6245 l2cap_send_srej_list(chan, control->txseq);
6246 break;
6247 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6248 /* We've already queued this frame. Drop this copy. */
6249 l2cap_pass_to_tx(chan, control);
6250 break;
6251 case L2CAP_TXSEQ_DUPLICATE:
6252 /* Expecting a later sequence number, so this frame
6253 * was already received. Ignore it completely.
6254 */
6255 break;
6256 case L2CAP_TXSEQ_INVALID_IGNORE:
6257 break;
6258 case L2CAP_TXSEQ_INVALID:
6259 default:
6260 l2cap_send_disconn_req(chan, ECONNRESET);
6261 break;
6262 }
6263 break;
6264 case L2CAP_EV_RECV_RR:
6265 l2cap_pass_to_tx(chan, control);
6266 if (control->final) {
6267 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6268
6269 if (!test_and_clear_bit(CONN_REJ_ACT,
6270 &chan->conn_state)) {
6271 control->final = 0;
6272 l2cap_retransmit_all(chan, control);
6273 }
6274
6275 l2cap_ertm_send(chan);
6276 } else if (control->poll) {
6277 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6278 &chan->conn_state) &&
6279 chan->unacked_frames) {
6280 __set_retrans_timer(chan);
6281 }
6282
6283 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6284 l2cap_send_srej_tail(chan);
6285 } else {
6286 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6287 &chan->conn_state) &&
6288 chan->unacked_frames)
6289 __set_retrans_timer(chan);
6290
6291 l2cap_send_ack(chan);
6292 }
6293 break;
6294 case L2CAP_EV_RECV_RNR:
6295 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6296 l2cap_pass_to_tx(chan, control);
6297 if (control->poll) {
6298 l2cap_send_srej_tail(chan);
6299 } else {
6300 struct l2cap_ctrl rr_control;
6301 memset(&rr_control, 0, sizeof(rr_control));
6302 rr_control.sframe = 1;
6303 rr_control.super = L2CAP_SUPER_RR;
6304 rr_control.reqseq = chan->buffer_seq;
6305 l2cap_send_sframe(chan, &rr_control);
6306 }
6307
6308 break;
6309 case L2CAP_EV_RECV_REJ:
6310 l2cap_handle_rej(chan, control);
6311 break;
6312 case L2CAP_EV_RECV_SREJ:
6313 l2cap_handle_srej(chan, control);
6314 break;
6315 }
6316
6317 if (skb && !skb_in_use) {
6318 BT_DBG("Freeing %p", skb);
6319 kfree_skb(skb);
6320 }
6321
6322 return err;
6323 }
6324
l2cap_finish_move(struct l2cap_chan * chan)6325 static int l2cap_finish_move(struct l2cap_chan *chan)
6326 {
6327 BT_DBG("chan %p", chan);
6328
6329 chan->rx_state = L2CAP_RX_STATE_RECV;
6330 chan->conn->mtu = chan->conn->hcon->mtu;
6331
6332 return l2cap_resegment(chan);
6333 }
6334
l2cap_rx_state_wait_p(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6335 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6336 struct l2cap_ctrl *control,
6337 struct sk_buff *skb, u8 event)
6338 {
6339 int err;
6340
6341 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6342 event);
6343
6344 if (!control->poll)
6345 return -EPROTO;
6346
6347 l2cap_process_reqseq(chan, control->reqseq);
6348
6349 if (!skb_queue_empty(&chan->tx_q))
6350 chan->tx_send_head = skb_peek(&chan->tx_q);
6351 else
6352 chan->tx_send_head = NULL;
6353
6354 /* Rewind next_tx_seq to the point expected
6355 * by the receiver.
6356 */
6357 chan->next_tx_seq = control->reqseq;
6358 chan->unacked_frames = 0;
6359
6360 err = l2cap_finish_move(chan);
6361 if (err)
6362 return err;
6363
6364 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6365 l2cap_send_i_or_rr_or_rnr(chan);
6366
6367 if (event == L2CAP_EV_RECV_IFRAME)
6368 return -EPROTO;
6369
6370 return l2cap_rx_state_recv(chan, control, NULL, event);
6371 }
6372
l2cap_rx_state_wait_f(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6373 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6374 struct l2cap_ctrl *control,
6375 struct sk_buff *skb, u8 event)
6376 {
6377 int err;
6378
6379 if (!control->final)
6380 return -EPROTO;
6381
6382 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6383
6384 chan->rx_state = L2CAP_RX_STATE_RECV;
6385 l2cap_process_reqseq(chan, control->reqseq);
6386
6387 if (!skb_queue_empty(&chan->tx_q))
6388 chan->tx_send_head = skb_peek(&chan->tx_q);
6389 else
6390 chan->tx_send_head = NULL;
6391
6392 /* Rewind next_tx_seq to the point expected
6393 * by the receiver.
6394 */
6395 chan->next_tx_seq = control->reqseq;
6396 chan->unacked_frames = 0;
6397 chan->conn->mtu = chan->conn->hcon->mtu;
6398
6399 err = l2cap_resegment(chan);
6400
6401 if (!err)
6402 err = l2cap_rx_state_recv(chan, control, skb, event);
6403
6404 return err;
6405 }
6406
__valid_reqseq(struct l2cap_chan * chan,u16 reqseq)6407 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6408 {
6409 /* Make sure reqseq is for a packet that has been sent but not acked */
6410 u16 unacked;
6411
6412 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6413 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6414 }
6415
l2cap_rx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6416 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6417 struct sk_buff *skb, u8 event)
6418 {
6419 int err = 0;
6420
6421 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6422 control, skb, event, chan->rx_state);
6423
6424 if (__valid_reqseq(chan, control->reqseq)) {
6425 switch (chan->rx_state) {
6426 case L2CAP_RX_STATE_RECV:
6427 err = l2cap_rx_state_recv(chan, control, skb, event);
6428 break;
6429 case L2CAP_RX_STATE_SREJ_SENT:
6430 err = l2cap_rx_state_srej_sent(chan, control, skb,
6431 event);
6432 break;
6433 case L2CAP_RX_STATE_WAIT_P:
6434 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6435 break;
6436 case L2CAP_RX_STATE_WAIT_F:
6437 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6438 break;
6439 default:
6440 /* shut it down */
6441 break;
6442 }
6443 } else {
6444 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6445 control->reqseq, chan->next_tx_seq,
6446 chan->expected_ack_seq);
6447 l2cap_send_disconn_req(chan, ECONNRESET);
6448 }
6449
6450 return err;
6451 }
6452
l2cap_stream_rx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb)6453 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6454 struct sk_buff *skb)
6455 {
6456 /* l2cap_reassemble_sdu may free skb, hence invalidate control, so store
6457 * the txseq field in advance to use it after l2cap_reassemble_sdu
6458 * returns and to avoid the race condition, for example:
6459 *
6460 * The current thread calls:
6461 * l2cap_reassemble_sdu
6462 * chan->ops->recv == l2cap_sock_recv_cb
6463 * __sock_queue_rcv_skb
6464 * Another thread calls:
6465 * bt_sock_recvmsg
6466 * skb_recv_datagram
6467 * skb_free_datagram
6468 * Then the current thread tries to access control, but it was freed by
6469 * skb_free_datagram.
6470 */
6471 u16 txseq = control->txseq;
6472
6473 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6474 chan->rx_state);
6475
6476 if (l2cap_classify_txseq(chan, txseq) == L2CAP_TXSEQ_EXPECTED) {
6477 l2cap_pass_to_tx(chan, control);
6478
6479 BT_DBG("buffer_seq %u->%u", chan->buffer_seq,
6480 __next_seq(chan, chan->buffer_seq));
6481
6482 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6483
6484 l2cap_reassemble_sdu(chan, skb, control);
6485 } else {
6486 if (chan->sdu) {
6487 kfree_skb(chan->sdu);
6488 chan->sdu = NULL;
6489 }
6490 chan->sdu_last_frag = NULL;
6491 chan->sdu_len = 0;
6492
6493 if (skb) {
6494 BT_DBG("Freeing %p", skb);
6495 kfree_skb(skb);
6496 }
6497 }
6498
6499 chan->last_acked_seq = txseq;
6500 chan->expected_tx_seq = __next_seq(chan, txseq);
6501
6502 return 0;
6503 }
6504
l2cap_data_rcv(struct l2cap_chan * chan,struct sk_buff * skb)6505 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6506 {
6507 struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
6508 u16 len;
6509 u8 event;
6510
6511 __unpack_control(chan, skb);
6512
6513 len = skb->len;
6514
6515 /*
6516 * We can just drop the corrupted I-frame here.
6517 * Receiver will miss it and start proper recovery
6518 * procedures and ask for retransmission.
6519 */
6520 if (l2cap_check_fcs(chan, skb))
6521 goto drop;
6522
6523 if (!control->sframe && control->sar == L2CAP_SAR_START)
6524 len -= L2CAP_SDULEN_SIZE;
6525
6526 if (chan->fcs == L2CAP_FCS_CRC16)
6527 len -= L2CAP_FCS_SIZE;
6528
6529 if (len > chan->mps) {
6530 l2cap_send_disconn_req(chan, ECONNRESET);
6531 goto drop;
6532 }
6533
6534 if (chan->ops->filter) {
6535 if (chan->ops->filter(chan, skb))
6536 goto drop;
6537 }
6538
6539 if (!control->sframe) {
6540 int err;
6541
6542 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6543 control->sar, control->reqseq, control->final,
6544 control->txseq);
6545
6546 /* Validate F-bit - F=0 always valid, F=1 only
6547 * valid in TX WAIT_F
6548 */
6549 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6550 goto drop;
6551
6552 if (chan->mode != L2CAP_MODE_STREAMING) {
6553 event = L2CAP_EV_RECV_IFRAME;
6554 err = l2cap_rx(chan, control, skb, event);
6555 } else {
6556 err = l2cap_stream_rx(chan, control, skb);
6557 }
6558
6559 if (err)
6560 l2cap_send_disconn_req(chan, ECONNRESET);
6561 } else {
6562 const u8 rx_func_to_event[4] = {
6563 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6564 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6565 };
6566
6567 /* Only I-frames are expected in streaming mode */
6568 if (chan->mode == L2CAP_MODE_STREAMING)
6569 goto drop;
6570
6571 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6572 control->reqseq, control->final, control->poll,
6573 control->super);
6574
6575 if (len != 0) {
6576 BT_ERR("Trailing bytes: %d in sframe", len);
6577 l2cap_send_disconn_req(chan, ECONNRESET);
6578 goto drop;
6579 }
6580
6581 /* Validate F and P bits */
6582 if (control->final && (control->poll ||
6583 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6584 goto drop;
6585
6586 event = rx_func_to_event[control->super];
6587 if (l2cap_rx(chan, control, skb, event))
6588 l2cap_send_disconn_req(chan, ECONNRESET);
6589 }
6590
6591 return 0;
6592
6593 drop:
6594 kfree_skb(skb);
6595 return 0;
6596 }
6597
l2cap_chan_le_send_credits(struct l2cap_chan * chan)6598 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6599 {
6600 struct l2cap_conn *conn = chan->conn;
6601 struct l2cap_le_credits pkt;
6602 u16 return_credits = l2cap_le_rx_credits(chan);
6603
6604 if (chan->rx_credits >= return_credits)
6605 return;
6606
6607 return_credits -= chan->rx_credits;
6608
6609 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6610
6611 chan->rx_credits += return_credits;
6612
6613 pkt.cid = cpu_to_le16(chan->scid);
6614 pkt.credits = cpu_to_le16(return_credits);
6615
6616 chan->ident = l2cap_get_ident(conn);
6617
6618 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6619 }
6620
l2cap_chan_rx_avail(struct l2cap_chan * chan,ssize_t rx_avail)6621 void l2cap_chan_rx_avail(struct l2cap_chan *chan, ssize_t rx_avail)
6622 {
6623 if (chan->rx_avail == rx_avail)
6624 return;
6625
6626 BT_DBG("chan %p has %zd bytes avail for rx", chan, rx_avail);
6627
6628 chan->rx_avail = rx_avail;
6629
6630 if (chan->state == BT_CONNECTED)
6631 l2cap_chan_le_send_credits(chan);
6632 }
6633
l2cap_ecred_recv(struct l2cap_chan * chan,struct sk_buff * skb)6634 static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
6635 {
6636 int err;
6637
6638 BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
6639
6640 /* Wait recv to confirm reception before updating the credits */
6641 err = chan->ops->recv(chan, skb);
6642
6643 if (err < 0 && chan->rx_avail != -1) {
6644 BT_ERR("Queueing received LE L2CAP data failed");
6645 l2cap_send_disconn_req(chan, ECONNRESET);
6646 return err;
6647 }
6648
6649 /* Update credits whenever an SDU is received */
6650 l2cap_chan_le_send_credits(chan);
6651
6652 return err;
6653 }
6654
l2cap_ecred_data_rcv(struct l2cap_chan * chan,struct sk_buff * skb)6655 static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6656 {
6657 int err;
6658
6659 if (!chan->rx_credits) {
6660 BT_ERR("No credits to receive LE L2CAP data");
6661 l2cap_send_disconn_req(chan, ECONNRESET);
6662 return -ENOBUFS;
6663 }
6664
6665 if (chan->imtu < skb->len) {
6666 BT_ERR("Too big LE L2CAP PDU");
6667 return -ENOBUFS;
6668 }
6669
6670 chan->rx_credits--;
6671 BT_DBG("chan %p: rx_credits %u -> %u",
6672 chan, chan->rx_credits + 1, chan->rx_credits);
6673
6674 /* Update if remote had run out of credits, this should only happens
6675 * if the remote is not using the entire MPS.
6676 */
6677 if (!chan->rx_credits)
6678 l2cap_chan_le_send_credits(chan);
6679
6680 err = 0;
6681
6682 if (!chan->sdu) {
6683 u16 sdu_len;
6684
6685 sdu_len = get_unaligned_le16(skb->data);
6686 skb_pull(skb, L2CAP_SDULEN_SIZE);
6687
6688 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6689 sdu_len, skb->len, chan->imtu);
6690
6691 if (sdu_len > chan->imtu) {
6692 BT_ERR("Too big LE L2CAP SDU length received");
6693 err = -EMSGSIZE;
6694 goto failed;
6695 }
6696
6697 if (skb->len > sdu_len) {
6698 BT_ERR("Too much LE L2CAP data received");
6699 err = -EINVAL;
6700 goto failed;
6701 }
6702
6703 if (skb->len == sdu_len)
6704 return l2cap_ecred_recv(chan, skb);
6705
6706 chan->sdu = skb;
6707 chan->sdu_len = sdu_len;
6708 chan->sdu_last_frag = skb;
6709
6710 /* Detect if remote is not able to use the selected MPS */
6711 if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
6712 u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
6713
6714 /* Adjust the number of credits */
6715 BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
6716 chan->mps = mps_len;
6717 l2cap_chan_le_send_credits(chan);
6718 }
6719
6720 return 0;
6721 }
6722
6723 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6724 chan->sdu->len, skb->len, chan->sdu_len);
6725
6726 if (chan->sdu->len + skb->len > chan->sdu_len) {
6727 BT_ERR("Too much LE L2CAP data received");
6728 err = -EINVAL;
6729 goto failed;
6730 }
6731
6732 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6733 skb = NULL;
6734
6735 if (chan->sdu->len == chan->sdu_len) {
6736 err = l2cap_ecred_recv(chan, chan->sdu);
6737 if (!err) {
6738 chan->sdu = NULL;
6739 chan->sdu_last_frag = NULL;
6740 chan->sdu_len = 0;
6741 }
6742 }
6743
6744 failed:
6745 if (err) {
6746 kfree_skb(skb);
6747 kfree_skb(chan->sdu);
6748 chan->sdu = NULL;
6749 chan->sdu_last_frag = NULL;
6750 chan->sdu_len = 0;
6751 }
6752
6753 /* We can't return an error here since we took care of the skb
6754 * freeing internally. An error return would cause the caller to
6755 * do a double-free of the skb.
6756 */
6757 return 0;
6758 }
6759
l2cap_data_channel(struct l2cap_conn * conn,u16 cid,struct sk_buff * skb)6760 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6761 struct sk_buff *skb)
6762 {
6763 struct l2cap_chan *chan;
6764
6765 chan = l2cap_get_chan_by_scid(conn, cid);
6766 if (!chan) {
6767 BT_DBG("unknown cid 0x%4.4x", cid);
6768 /* Drop packet and return */
6769 kfree_skb(skb);
6770 return;
6771 }
6772
6773 BT_DBG("chan %p, len %d", chan, skb->len);
6774
6775 /* If we receive data on a fixed channel before the info req/rsp
6776 * procedure is done simply assume that the channel is supported
6777 * and mark it as ready.
6778 */
6779 if (chan->chan_type == L2CAP_CHAN_FIXED)
6780 l2cap_chan_ready(chan);
6781
6782 if (chan->state != BT_CONNECTED)
6783 goto drop;
6784
6785 switch (chan->mode) {
6786 case L2CAP_MODE_LE_FLOWCTL:
6787 case L2CAP_MODE_EXT_FLOWCTL:
6788 if (l2cap_ecred_data_rcv(chan, skb) < 0)
6789 goto drop;
6790
6791 goto done;
6792
6793 case L2CAP_MODE_BASIC:
6794 /* If socket recv buffers overflows we drop data here
6795 * which is *bad* because L2CAP has to be reliable.
6796 * But we don't have any other choice. L2CAP doesn't
6797 * provide flow control mechanism. */
6798
6799 if (chan->imtu < skb->len) {
6800 BT_ERR("Dropping L2CAP data: receive buffer overflow");
6801 goto drop;
6802 }
6803
6804 if (!chan->ops->recv(chan, skb))
6805 goto done;
6806 break;
6807
6808 case L2CAP_MODE_ERTM:
6809 case L2CAP_MODE_STREAMING:
6810 l2cap_data_rcv(chan, skb);
6811 goto done;
6812
6813 default:
6814 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6815 break;
6816 }
6817
6818 drop:
6819 kfree_skb(skb);
6820
6821 done:
6822 l2cap_chan_unlock(chan);
6823 l2cap_chan_put(chan);
6824 }
6825
l2cap_conless_channel(struct l2cap_conn * conn,__le16 psm,struct sk_buff * skb)6826 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6827 struct sk_buff *skb)
6828 {
6829 struct hci_conn *hcon = conn->hcon;
6830 struct l2cap_chan *chan;
6831
6832 if (hcon->type != ACL_LINK)
6833 goto free_skb;
6834
6835 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6836 ACL_LINK);
6837 if (!chan)
6838 goto free_skb;
6839
6840 BT_DBG("chan %p, len %d", chan, skb->len);
6841
6842 l2cap_chan_lock(chan);
6843
6844 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6845 goto drop;
6846
6847 if (chan->imtu < skb->len)
6848 goto drop;
6849
6850 /* Store remote BD_ADDR and PSM for msg_name */
6851 bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
6852 bt_cb(skb)->l2cap.psm = psm;
6853
6854 if (!chan->ops->recv(chan, skb)) {
6855 l2cap_chan_unlock(chan);
6856 l2cap_chan_put(chan);
6857 return;
6858 }
6859
6860 drop:
6861 l2cap_chan_unlock(chan);
6862 l2cap_chan_put(chan);
6863 free_skb:
6864 kfree_skb(skb);
6865 }
6866
l2cap_recv_frame(struct l2cap_conn * conn,struct sk_buff * skb)6867 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6868 {
6869 struct l2cap_hdr *lh = (void *) skb->data;
6870 struct hci_conn *hcon = conn->hcon;
6871 u16 cid, len;
6872 __le16 psm;
6873
6874 if (hcon->state != BT_CONNECTED) {
6875 BT_DBG("queueing pending rx skb");
6876 skb_queue_tail(&conn->pending_rx, skb);
6877 return;
6878 }
6879
6880 skb_pull(skb, L2CAP_HDR_SIZE);
6881 cid = __le16_to_cpu(lh->cid);
6882 len = __le16_to_cpu(lh->len);
6883
6884 if (len != skb->len) {
6885 kfree_skb(skb);
6886 return;
6887 }
6888
6889 /* Since we can't actively block incoming LE connections we must
6890 * at least ensure that we ignore incoming data from them.
6891 */
6892 if (hcon->type == LE_LINK &&
6893 hci_bdaddr_list_lookup(&hcon->hdev->reject_list, &hcon->dst,
6894 bdaddr_dst_type(hcon))) {
6895 kfree_skb(skb);
6896 return;
6897 }
6898
6899 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6900
6901 switch (cid) {
6902 case L2CAP_CID_SIGNALING:
6903 l2cap_sig_channel(conn, skb);
6904 break;
6905
6906 case L2CAP_CID_CONN_LESS:
6907 psm = get_unaligned((__le16 *) skb->data);
6908 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6909 l2cap_conless_channel(conn, psm, skb);
6910 break;
6911
6912 case L2CAP_CID_LE_SIGNALING:
6913 l2cap_le_sig_channel(conn, skb);
6914 break;
6915
6916 default:
6917 l2cap_data_channel(conn, cid, skb);
6918 break;
6919 }
6920 }
6921
process_pending_rx(struct work_struct * work)6922 static void process_pending_rx(struct work_struct *work)
6923 {
6924 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
6925 pending_rx_work);
6926 struct sk_buff *skb;
6927
6928 BT_DBG("");
6929
6930 mutex_lock(&conn->lock);
6931
6932 while ((skb = skb_dequeue(&conn->pending_rx)))
6933 l2cap_recv_frame(conn, skb);
6934
6935 mutex_unlock(&conn->lock);
6936 }
6937
l2cap_conn_add(struct hci_conn * hcon)6938 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
6939 {
6940 struct l2cap_conn *conn = hcon->l2cap_data;
6941 struct hci_chan *hchan;
6942
6943 if (conn)
6944 return conn;
6945
6946 hchan = hci_chan_create(hcon);
6947 if (!hchan)
6948 return NULL;
6949
6950 conn = kzalloc_obj(*conn);
6951 if (!conn) {
6952 hci_chan_del(hchan);
6953 return NULL;
6954 }
6955
6956 kref_init(&conn->ref);
6957 hcon->l2cap_data = conn;
6958 conn->hcon = hci_conn_get(hcon);
6959 conn->hchan = hchan;
6960
6961 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
6962
6963 conn->mtu = hcon->mtu;
6964 conn->feat_mask = 0;
6965
6966 conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
6967
6968 if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
6969 (bredr_sc_enabled(hcon->hdev) ||
6970 hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
6971 conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
6972
6973 mutex_init(&conn->lock);
6974
6975 INIT_LIST_HEAD(&conn->chan_l);
6976 INIT_LIST_HEAD(&conn->users);
6977
6978 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
6979 ida_init(&conn->tx_ida);
6980
6981 skb_queue_head_init(&conn->pending_rx);
6982 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
6983 INIT_DELAYED_WORK(&conn->id_addr_timer, l2cap_conn_update_id_addr);
6984
6985 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
6986
6987 return conn;
6988 }
6989
is_valid_psm(u16 psm,u8 dst_type)6990 static bool is_valid_psm(u16 psm, u8 dst_type)
6991 {
6992 if (!psm)
6993 return false;
6994
6995 if (bdaddr_type_is_le(dst_type))
6996 return (psm <= 0x00ff);
6997
6998 /* PSM must be odd and lsb of upper byte must be 0 */
6999 return ((psm & 0x0101) == 0x0001);
7000 }
7001
7002 struct l2cap_chan_data {
7003 struct l2cap_chan *chan;
7004 struct pid *pid;
7005 int count;
7006 };
7007
l2cap_chan_by_pid(struct l2cap_chan * chan,void * data)7008 static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data)
7009 {
7010 struct l2cap_chan_data *d = data;
7011 struct pid *pid;
7012
7013 if (chan == d->chan)
7014 return;
7015
7016 if (!test_bit(FLAG_DEFER_SETUP, &chan->flags))
7017 return;
7018
7019 pid = chan->ops->get_peer_pid(chan);
7020
7021 /* Only count deferred channels with the same PID/PSM */
7022 if (d->pid != pid || chan->psm != d->chan->psm || chan->ident ||
7023 chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
7024 return;
7025
7026 d->count++;
7027 }
7028
l2cap_chan_connect(struct l2cap_chan * chan,__le16 psm,u16 cid,bdaddr_t * dst,u8 dst_type,u16 timeout)7029 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7030 bdaddr_t *dst, u8 dst_type, u16 timeout)
7031 {
7032 struct l2cap_conn *conn;
7033 struct hci_conn *hcon;
7034 struct hci_dev *hdev;
7035 int err;
7036
7037 BT_DBG("%pMR -> %pMR (type %u) psm 0x%4.4x mode 0x%2.2x", &chan->src,
7038 dst, dst_type, __le16_to_cpu(psm), chan->mode);
7039
7040 hdev = hci_get_route(dst, &chan->src, chan->src_type);
7041 if (!hdev)
7042 return -EHOSTUNREACH;
7043
7044 hci_dev_lock(hdev);
7045
7046 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7047 chan->chan_type != L2CAP_CHAN_RAW) {
7048 err = -EINVAL;
7049 goto done;
7050 }
7051
7052 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7053 err = -EINVAL;
7054 goto done;
7055 }
7056
7057 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7058 err = -EINVAL;
7059 goto done;
7060 }
7061
7062 switch (chan->mode) {
7063 case L2CAP_MODE_BASIC:
7064 break;
7065 case L2CAP_MODE_LE_FLOWCTL:
7066 break;
7067 case L2CAP_MODE_EXT_FLOWCTL:
7068 if (!enable_ecred) {
7069 err = -EOPNOTSUPP;
7070 goto done;
7071 }
7072 break;
7073 case L2CAP_MODE_ERTM:
7074 case L2CAP_MODE_STREAMING:
7075 if (!disable_ertm)
7076 break;
7077 fallthrough;
7078 default:
7079 err = -EOPNOTSUPP;
7080 goto done;
7081 }
7082
7083 switch (chan->state) {
7084 case BT_CONNECT:
7085 case BT_CONNECT2:
7086 case BT_CONFIG:
7087 /* Already connecting */
7088 err = 0;
7089 goto done;
7090
7091 case BT_CONNECTED:
7092 /* Already connected */
7093 err = -EISCONN;
7094 goto done;
7095
7096 case BT_OPEN:
7097 case BT_BOUND:
7098 /* Can connect */
7099 break;
7100
7101 default:
7102 err = -EBADFD;
7103 goto done;
7104 }
7105
7106 /* Set destination address and psm */
7107 bacpy(&chan->dst, dst);
7108 chan->dst_type = dst_type;
7109
7110 chan->psm = psm;
7111 chan->dcid = cid;
7112
7113 if (bdaddr_type_is_le(dst_type)) {
7114 /* Convert from L2CAP channel address type to HCI address type
7115 */
7116 if (dst_type == BDADDR_LE_PUBLIC)
7117 dst_type = ADDR_LE_DEV_PUBLIC;
7118 else
7119 dst_type = ADDR_LE_DEV_RANDOM;
7120
7121 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7122 hcon = hci_connect_le(hdev, dst, dst_type, false,
7123 chan->sec_level, timeout,
7124 HCI_ROLE_SLAVE, 0, 0);
7125 else
7126 hcon = hci_connect_le_scan(hdev, dst, dst_type,
7127 chan->sec_level, timeout,
7128 CONN_REASON_L2CAP_CHAN);
7129
7130 } else {
7131 u8 auth_type = l2cap_get_auth_type(chan);
7132 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type,
7133 CONN_REASON_L2CAP_CHAN, timeout);
7134 }
7135
7136 if (IS_ERR(hcon)) {
7137 err = PTR_ERR(hcon);
7138 goto done;
7139 }
7140
7141 conn = l2cap_conn_add(hcon);
7142 if (!conn) {
7143 hci_conn_drop(hcon);
7144 err = -ENOMEM;
7145 goto done;
7146 }
7147
7148 if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) {
7149 struct l2cap_chan_data data;
7150
7151 data.chan = chan;
7152 data.pid = chan->ops->get_peer_pid(chan);
7153 data.count = 1;
7154
7155 l2cap_chan_list(conn, l2cap_chan_by_pid, &data);
7156
7157 /* Check if there isn't too many channels being connected */
7158 if (data.count > L2CAP_ECRED_CONN_SCID_MAX) {
7159 hci_conn_drop(hcon);
7160 err = -EPROTO;
7161 goto done;
7162 }
7163 }
7164
7165 mutex_lock(&conn->lock);
7166 l2cap_chan_lock(chan);
7167
7168 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7169 hci_conn_drop(hcon);
7170 err = -EBUSY;
7171 goto chan_unlock;
7172 }
7173
7174 /* Update source addr of the socket */
7175 bacpy(&chan->src, &hcon->src);
7176 chan->src_type = bdaddr_src_type(hcon);
7177
7178 __l2cap_chan_add(conn, chan);
7179
7180 /* l2cap_chan_add takes its own ref so we can drop this one */
7181 hci_conn_drop(hcon);
7182
7183 l2cap_state_change(chan, BT_CONNECT);
7184 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7185
7186 /* Release chan->sport so that it can be reused by other
7187 * sockets (as it's only used for listening sockets).
7188 */
7189 write_lock(&chan_list_lock);
7190 chan->sport = 0;
7191 write_unlock(&chan_list_lock);
7192
7193 if (hcon->state == BT_CONNECTED) {
7194 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7195 __clear_chan_timer(chan);
7196 if (l2cap_chan_check_security(chan, true))
7197 l2cap_state_change(chan, BT_CONNECTED);
7198 } else
7199 l2cap_do_start(chan);
7200 }
7201
7202 err = 0;
7203
7204 chan_unlock:
7205 l2cap_chan_unlock(chan);
7206 mutex_unlock(&conn->lock);
7207 done:
7208 hci_dev_unlock(hdev);
7209 hci_dev_put(hdev);
7210 return err;
7211 }
7212 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7213
l2cap_ecred_reconfigure(struct l2cap_chan * chan)7214 static void l2cap_ecred_reconfigure(struct l2cap_chan *chan)
7215 {
7216 struct l2cap_conn *conn = chan->conn;
7217 DEFINE_RAW_FLEX(struct l2cap_ecred_reconf_req, pdu, scid, 1);
7218
7219 pdu->mtu = cpu_to_le16(chan->imtu);
7220 pdu->mps = cpu_to_le16(chan->mps);
7221 pdu->scid[0] = cpu_to_le16(chan->scid);
7222
7223 chan->ident = l2cap_get_ident(conn);
7224
7225 l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_RECONF_REQ,
7226 sizeof(pdu), &pdu);
7227 }
7228
l2cap_chan_reconfigure(struct l2cap_chan * chan,__u16 mtu)7229 int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu)
7230 {
7231 if (chan->imtu > mtu)
7232 return -EINVAL;
7233
7234 BT_DBG("chan %p mtu 0x%4.4x", chan, mtu);
7235
7236 chan->imtu = mtu;
7237
7238 l2cap_ecred_reconfigure(chan);
7239
7240 return 0;
7241 }
7242
7243 /* ---- L2CAP interface with lower layer (HCI) ---- */
7244
l2cap_connect_ind(struct hci_dev * hdev,bdaddr_t * bdaddr)7245 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7246 {
7247 int exact = 0, lm1 = 0, lm2 = 0;
7248 struct l2cap_chan *c;
7249
7250 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7251
7252 /* Find listening sockets and check their link_mode */
7253 read_lock(&chan_list_lock);
7254 list_for_each_entry(c, &chan_list, global_l) {
7255 if (c->state != BT_LISTEN)
7256 continue;
7257
7258 if (!bacmp(&c->src, &hdev->bdaddr)) {
7259 lm1 |= HCI_LM_ACCEPT;
7260 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7261 lm1 |= HCI_LM_MASTER;
7262 exact++;
7263 } else if (!bacmp(&c->src, BDADDR_ANY)) {
7264 lm2 |= HCI_LM_ACCEPT;
7265 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7266 lm2 |= HCI_LM_MASTER;
7267 }
7268 }
7269 read_unlock(&chan_list_lock);
7270
7271 return exact ? lm1 : lm2;
7272 }
7273
7274 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7275 * from an existing channel in the list or from the beginning of the
7276 * global list (by passing NULL as first parameter).
7277 */
l2cap_global_fixed_chan(struct l2cap_chan * c,struct hci_conn * hcon)7278 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7279 struct hci_conn *hcon)
7280 {
7281 u8 src_type = bdaddr_src_type(hcon);
7282
7283 read_lock(&chan_list_lock);
7284
7285 if (c)
7286 c = list_next_entry(c, global_l);
7287 else
7288 c = list_entry(chan_list.next, typeof(*c), global_l);
7289
7290 list_for_each_entry_from(c, &chan_list, global_l) {
7291 if (c->chan_type != L2CAP_CHAN_FIXED)
7292 continue;
7293 if (c->state != BT_LISTEN)
7294 continue;
7295 if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
7296 continue;
7297 if (src_type != c->src_type)
7298 continue;
7299
7300 c = l2cap_chan_hold_unless_zero(c);
7301 read_unlock(&chan_list_lock);
7302 return c;
7303 }
7304
7305 read_unlock(&chan_list_lock);
7306
7307 return NULL;
7308 }
7309
l2cap_connect_cfm(struct hci_conn * hcon,u8 status)7310 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7311 {
7312 struct hci_dev *hdev = hcon->hdev;
7313 struct l2cap_conn *conn;
7314 struct l2cap_chan *pchan;
7315 u8 dst_type;
7316
7317 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7318 return;
7319
7320 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7321
7322 if (status) {
7323 l2cap_conn_del(hcon, bt_to_errno(status));
7324 return;
7325 }
7326
7327 conn = l2cap_conn_add(hcon);
7328 if (!conn)
7329 return;
7330
7331 dst_type = bdaddr_dst_type(hcon);
7332
7333 /* If device is blocked, do not create channels for it */
7334 if (hci_bdaddr_list_lookup(&hdev->reject_list, &hcon->dst, dst_type))
7335 return;
7336
7337 /* Find fixed channels and notify them of the new connection. We
7338 * use multiple individual lookups, continuing each time where
7339 * we left off, because the list lock would prevent calling the
7340 * potentially sleeping l2cap_chan_lock() function.
7341 */
7342 pchan = l2cap_global_fixed_chan(NULL, hcon);
7343 while (pchan) {
7344 struct l2cap_chan *chan, *next;
7345
7346 /* Client fixed channels should override server ones */
7347 if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7348 goto next;
7349
7350 l2cap_chan_lock(pchan);
7351 chan = pchan->ops->new_connection(pchan);
7352 if (chan) {
7353 bacpy(&chan->src, &hcon->src);
7354 bacpy(&chan->dst, &hcon->dst);
7355 chan->src_type = bdaddr_src_type(hcon);
7356 chan->dst_type = dst_type;
7357
7358 __l2cap_chan_add(conn, chan);
7359 }
7360
7361 l2cap_chan_unlock(pchan);
7362 next:
7363 next = l2cap_global_fixed_chan(pchan, hcon);
7364 l2cap_chan_put(pchan);
7365 pchan = next;
7366 }
7367
7368 l2cap_conn_ready(conn);
7369 }
7370
l2cap_disconn_ind(struct hci_conn * hcon)7371 int l2cap_disconn_ind(struct hci_conn *hcon)
7372 {
7373 struct l2cap_conn *conn = hcon->l2cap_data;
7374
7375 BT_DBG("hcon %p", hcon);
7376
7377 if (!conn)
7378 return HCI_ERROR_REMOTE_USER_TERM;
7379 return conn->disc_reason;
7380 }
7381
l2cap_disconn_cfm(struct hci_conn * hcon,u8 reason)7382 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7383 {
7384 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7385 return;
7386
7387 BT_DBG("hcon %p reason %d", hcon, reason);
7388
7389 l2cap_conn_del(hcon, bt_to_errno(reason));
7390 }
7391
l2cap_check_encryption(struct l2cap_chan * chan,u8 encrypt)7392 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7393 {
7394 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7395 return;
7396
7397 if (encrypt == 0x00) {
7398 if (chan->sec_level == BT_SECURITY_MEDIUM) {
7399 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7400 } else if (chan->sec_level == BT_SECURITY_HIGH ||
7401 chan->sec_level == BT_SECURITY_FIPS)
7402 l2cap_chan_close(chan, ECONNREFUSED);
7403 } else {
7404 if (chan->sec_level == BT_SECURITY_MEDIUM)
7405 __clear_chan_timer(chan);
7406 }
7407 }
7408
l2cap_security_cfm(struct hci_conn * hcon,u8 status,u8 encrypt)7409 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7410 {
7411 struct l2cap_conn *conn = hcon->l2cap_data;
7412 struct l2cap_chan *chan;
7413
7414 if (!conn)
7415 return;
7416
7417 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7418
7419 mutex_lock(&conn->lock);
7420
7421 list_for_each_entry(chan, &conn->chan_l, list) {
7422 l2cap_chan_lock(chan);
7423
7424 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7425 state_to_string(chan->state));
7426
7427 if (!status && encrypt)
7428 chan->sec_level = hcon->sec_level;
7429
7430 if (!__l2cap_no_conn_pending(chan)) {
7431 l2cap_chan_unlock(chan);
7432 continue;
7433 }
7434
7435 if (!status && (chan->state == BT_CONNECTED ||
7436 chan->state == BT_CONFIG)) {
7437 chan->ops->resume(chan);
7438 l2cap_check_encryption(chan, encrypt);
7439 l2cap_chan_unlock(chan);
7440 continue;
7441 }
7442
7443 if (chan->state == BT_CONNECT) {
7444 if (!status && l2cap_check_enc_key_size(hcon, chan))
7445 l2cap_start_connection(chan);
7446 else
7447 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7448 } else if (chan->state == BT_CONNECT2 &&
7449 !(chan->mode == L2CAP_MODE_EXT_FLOWCTL ||
7450 chan->mode == L2CAP_MODE_LE_FLOWCTL)) {
7451 struct l2cap_conn_rsp rsp;
7452 __u16 res, stat;
7453
7454 if (!status && l2cap_check_enc_key_size(hcon, chan)) {
7455 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7456 res = L2CAP_CR_PEND;
7457 stat = L2CAP_CS_AUTHOR_PEND;
7458 chan->ops->defer(chan);
7459 } else {
7460 l2cap_state_change(chan, BT_CONFIG);
7461 res = L2CAP_CR_SUCCESS;
7462 stat = L2CAP_CS_NO_INFO;
7463 }
7464 } else {
7465 l2cap_state_change(chan, BT_DISCONN);
7466 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7467 res = L2CAP_CR_SEC_BLOCK;
7468 stat = L2CAP_CS_NO_INFO;
7469 }
7470
7471 rsp.scid = cpu_to_le16(chan->dcid);
7472 rsp.dcid = cpu_to_le16(chan->scid);
7473 rsp.result = cpu_to_le16(res);
7474 rsp.status = cpu_to_le16(stat);
7475 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7476 sizeof(rsp), &rsp);
7477
7478 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7479 res == L2CAP_CR_SUCCESS) {
7480 char buf[128];
7481 set_bit(CONF_REQ_SENT, &chan->conf_state);
7482 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7483 L2CAP_CONF_REQ,
7484 l2cap_build_conf_req(chan, buf, sizeof(buf)),
7485 buf);
7486 chan->num_conf_req++;
7487 }
7488 }
7489
7490 l2cap_chan_unlock(chan);
7491 }
7492
7493 mutex_unlock(&conn->lock);
7494 }
7495
7496 /* Append fragment into frame respecting the maximum len of rx_skb */
l2cap_recv_frag(struct l2cap_conn * conn,struct sk_buff * skb,u16 len)7497 static int l2cap_recv_frag(struct l2cap_conn *conn, struct sk_buff *skb,
7498 u16 len)
7499 {
7500 if (!conn->rx_skb) {
7501 /* Allocate skb for the complete frame (with header) */
7502 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7503 if (!conn->rx_skb)
7504 return -ENOMEM;
7505 /* Init rx_len */
7506 conn->rx_len = len;
7507
7508 skb_set_delivery_time(conn->rx_skb, skb->tstamp,
7509 skb->tstamp_type);
7510 }
7511
7512 /* Copy as much as the rx_skb can hold */
7513 len = min_t(u16, len, skb->len);
7514 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, len), len);
7515 skb_pull(skb, len);
7516 conn->rx_len -= len;
7517
7518 return len;
7519 }
7520
l2cap_recv_len(struct l2cap_conn * conn,struct sk_buff * skb)7521 static int l2cap_recv_len(struct l2cap_conn *conn, struct sk_buff *skb)
7522 {
7523 struct sk_buff *rx_skb;
7524 int len;
7525
7526 /* Append just enough to complete the header */
7527 len = l2cap_recv_frag(conn, skb, L2CAP_LEN_SIZE - conn->rx_skb->len);
7528
7529 /* If header could not be read just continue */
7530 if (len < 0 || conn->rx_skb->len < L2CAP_LEN_SIZE)
7531 return len;
7532
7533 rx_skb = conn->rx_skb;
7534 len = get_unaligned_le16(rx_skb->data);
7535
7536 /* Check if rx_skb has enough space to received all fragments */
7537 if (len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE) <= skb_tailroom(rx_skb)) {
7538 /* Update expected len */
7539 conn->rx_len = len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE);
7540 return L2CAP_LEN_SIZE;
7541 }
7542
7543 /* Reset conn->rx_skb since it will need to be reallocated in order to
7544 * fit all fragments.
7545 */
7546 conn->rx_skb = NULL;
7547
7548 /* Reallocates rx_skb using the exact expected length */
7549 len = l2cap_recv_frag(conn, rx_skb,
7550 len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE));
7551 kfree_skb(rx_skb);
7552
7553 return len;
7554 }
7555
l2cap_recv_reset(struct l2cap_conn * conn)7556 static void l2cap_recv_reset(struct l2cap_conn *conn)
7557 {
7558 kfree_skb(conn->rx_skb);
7559 conn->rx_skb = NULL;
7560 conn->rx_len = 0;
7561 }
7562
l2cap_conn_hold_unless_zero(struct l2cap_conn * c)7563 struct l2cap_conn *l2cap_conn_hold_unless_zero(struct l2cap_conn *c)
7564 {
7565 if (!c)
7566 return NULL;
7567
7568 BT_DBG("conn %p orig refcnt %u", c, kref_read(&c->ref));
7569
7570 if (!kref_get_unless_zero(&c->ref))
7571 return NULL;
7572
7573 return c;
7574 }
7575
l2cap_recv_acldata(struct hci_dev * hdev,u16 handle,struct sk_buff * skb,u16 flags)7576 int l2cap_recv_acldata(struct hci_dev *hdev, u16 handle,
7577 struct sk_buff *skb, u16 flags)
7578 {
7579 struct hci_conn *hcon;
7580 struct l2cap_conn *conn;
7581 int len;
7582
7583 /* Lock hdev for hci_conn, and race on l2cap_data vs. l2cap_conn_del */
7584 hci_dev_lock(hdev);
7585
7586 hcon = hci_conn_hash_lookup_handle(hdev, handle);
7587 if (!hcon) {
7588 hci_dev_unlock(hdev);
7589 kfree_skb(skb);
7590 return -ENOENT;
7591 }
7592
7593 hci_conn_enter_active_mode(hcon, BT_POWER_FORCE_ACTIVE_OFF);
7594
7595 conn = hcon->l2cap_data;
7596
7597 if (!conn)
7598 conn = l2cap_conn_add(hcon);
7599
7600 conn = l2cap_conn_hold_unless_zero(conn);
7601 hcon = NULL;
7602
7603 hci_dev_unlock(hdev);
7604
7605 if (!conn) {
7606 kfree_skb(skb);
7607 return -EINVAL;
7608 }
7609
7610 BT_DBG("conn %p len %u flags 0x%x", conn, skb->len, flags);
7611
7612 mutex_lock(&conn->lock);
7613
7614 switch (flags) {
7615 case ACL_START:
7616 case ACL_START_NO_FLUSH:
7617 case ACL_COMPLETE:
7618 if (conn->rx_skb) {
7619 BT_ERR("Unexpected start frame (len %d)", skb->len);
7620 l2cap_recv_reset(conn);
7621 l2cap_conn_unreliable(conn, ECOMM);
7622 }
7623
7624 /* Start fragment may not contain the L2CAP length so just
7625 * copy the initial byte when that happens and use conn->mtu as
7626 * expected length.
7627 */
7628 if (skb->len < L2CAP_LEN_SIZE) {
7629 l2cap_recv_frag(conn, skb, conn->mtu);
7630 break;
7631 }
7632
7633 len = get_unaligned_le16(skb->data) + L2CAP_HDR_SIZE;
7634
7635 if (len == skb->len) {
7636 /* Complete frame received */
7637 l2cap_recv_frame(conn, skb);
7638 goto unlock;
7639 }
7640
7641 BT_DBG("Start: total len %d, frag len %u", len, skb->len);
7642
7643 if (skb->len > len) {
7644 BT_ERR("Frame is too long (len %u, expected len %d)",
7645 skb->len, len);
7646 /* PTS test cases L2CAP/COS/CED/BI-14-C and BI-15-C
7647 * (Multiple Signaling Command in one PDU, Data
7648 * Truncated, BR/EDR) send a C-frame to the IUT with
7649 * PDU Length set to 8 and Channel ID set to the
7650 * correct signaling channel for the logical link.
7651 * The Information payload contains one L2CAP_ECHO_REQ
7652 * packet with Data Length set to 0 with 0 octets of
7653 * echo data and one invalid command packet due to
7654 * data truncated in PDU but present in HCI packet.
7655 *
7656 * Shorter the socket buffer to the PDU length to
7657 * allow to process valid commands from the PDU before
7658 * setting the socket unreliable.
7659 */
7660 skb->len = len;
7661 l2cap_recv_frame(conn, skb);
7662 l2cap_conn_unreliable(conn, ECOMM);
7663 goto unlock;
7664 }
7665
7666 /* Append fragment into frame (with header) */
7667 if (l2cap_recv_frag(conn, skb, len) < 0)
7668 goto drop;
7669
7670 break;
7671
7672 case ACL_CONT:
7673 BT_DBG("Cont: frag len %u (expecting %u)", skb->len, conn->rx_len);
7674
7675 if (!conn->rx_skb) {
7676 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7677 l2cap_conn_unreliable(conn, ECOMM);
7678 goto drop;
7679 }
7680
7681 /* Complete the L2CAP length if it has not been read */
7682 if (conn->rx_skb->len < L2CAP_LEN_SIZE) {
7683 if (l2cap_recv_len(conn, skb) < 0) {
7684 l2cap_conn_unreliable(conn, ECOMM);
7685 goto drop;
7686 }
7687
7688 /* Header still could not be read just continue */
7689 if (conn->rx_skb->len < L2CAP_LEN_SIZE)
7690 break;
7691 }
7692
7693 if (skb->len > conn->rx_len) {
7694 BT_ERR("Fragment is too long (len %u, expected %u)",
7695 skb->len, conn->rx_len);
7696 l2cap_recv_reset(conn);
7697 l2cap_conn_unreliable(conn, ECOMM);
7698 goto drop;
7699 }
7700
7701 /* Append fragment into frame (with header) */
7702 l2cap_recv_frag(conn, skb, skb->len);
7703
7704 if (!conn->rx_len) {
7705 /* Complete frame received. l2cap_recv_frame
7706 * takes ownership of the skb so set the global
7707 * rx_skb pointer to NULL first.
7708 */
7709 struct sk_buff *rx_skb = conn->rx_skb;
7710 conn->rx_skb = NULL;
7711 l2cap_recv_frame(conn, rx_skb);
7712 }
7713 break;
7714 }
7715
7716 drop:
7717 kfree_skb(skb);
7718 unlock:
7719 mutex_unlock(&conn->lock);
7720 l2cap_conn_put(conn);
7721 return 0;
7722 }
7723
7724 static struct hci_cb l2cap_cb = {
7725 .name = "L2CAP",
7726 .connect_cfm = l2cap_connect_cfm,
7727 .disconn_cfm = l2cap_disconn_cfm,
7728 .security_cfm = l2cap_security_cfm,
7729 };
7730
l2cap_debugfs_show(struct seq_file * f,void * p)7731 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7732 {
7733 struct l2cap_chan *c;
7734
7735 read_lock(&chan_list_lock);
7736
7737 list_for_each_entry(c, &chan_list, global_l) {
7738 seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7739 &c->src, c->src_type, &c->dst, c->dst_type,
7740 c->state, __le16_to_cpu(c->psm),
7741 c->scid, c->dcid, c->imtu, c->omtu,
7742 c->sec_level, c->mode);
7743 }
7744
7745 read_unlock(&chan_list_lock);
7746
7747 return 0;
7748 }
7749
7750 DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs);
7751
7752 static struct dentry *l2cap_debugfs;
7753
l2cap_init(void)7754 int __init l2cap_init(void)
7755 {
7756 int err;
7757
7758 err = l2cap_init_sockets();
7759 if (err < 0)
7760 return err;
7761
7762 hci_register_cb(&l2cap_cb);
7763
7764 if (IS_ERR_OR_NULL(bt_debugfs))
7765 return 0;
7766
7767 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7768 NULL, &l2cap_debugfs_fops);
7769
7770 return 0;
7771 }
7772
l2cap_exit(void)7773 void l2cap_exit(void)
7774 {
7775 debugfs_remove(l2cap_debugfs);
7776 hci_unregister_cb(&l2cap_cb);
7777 l2cap_cleanup_sockets();
7778 }
7779
7780 module_param(disable_ertm, bool, 0644);
7781 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
7782
7783 module_param(enable_ecred, bool, 0644);
7784 MODULE_PARM_DESC(enable_ecred, "Enable enhanced credit flow control mode");
7785