1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27 */
28
29 /* Bluetooth L2CAP core. */
30
31 #include <linux/module.h>
32
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
36
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40
41 #include "smp.h"
42
43 #define LE_FLOWCTL_MAX_CREDITS 65535
44
45 bool disable_ertm;
46 bool enable_ecred = IS_ENABLED(CONFIG_BT_LE_L2CAP_ECRED);
47
48 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
49
50 static LIST_HEAD(chan_list);
51 static DEFINE_RWLOCK(chan_list_lock);
52
53 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
54 u8 code, u8 ident, u16 dlen, void *data);
55 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
56 void *data);
57 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
58 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
59
60 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
61 struct sk_buff_head *skbs, u8 event);
62 static void l2cap_retrans_timeout(struct work_struct *work);
63 static void l2cap_monitor_timeout(struct work_struct *work);
64 static void l2cap_ack_timeout(struct work_struct *work);
65
bdaddr_type(u8 link_type,u8 bdaddr_type)66 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
67 {
68 if (link_type == LE_LINK) {
69 if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
70 return BDADDR_LE_PUBLIC;
71 else
72 return BDADDR_LE_RANDOM;
73 }
74
75 return BDADDR_BREDR;
76 }
77
bdaddr_src_type(struct hci_conn * hcon)78 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
79 {
80 return bdaddr_type(hcon->type, hcon->src_type);
81 }
82
bdaddr_dst_type(struct hci_conn * hcon)83 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
84 {
85 return bdaddr_type(hcon->type, hcon->dst_type);
86 }
87
88 /* ---- L2CAP channels ---- */
89
__l2cap_get_chan_by_dcid(struct l2cap_conn * conn,u16 cid)90 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
91 u16 cid)
92 {
93 struct l2cap_chan *c;
94
95 list_for_each_entry(c, &conn->chan_l, list) {
96 if (c->dcid == cid)
97 return c;
98 }
99 return NULL;
100 }
101
__l2cap_get_chan_by_scid(struct l2cap_conn * conn,u16 cid)102 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
103 u16 cid)
104 {
105 struct l2cap_chan *c;
106
107 list_for_each_entry(c, &conn->chan_l, list) {
108 if (c->scid == cid)
109 return c;
110 }
111 return NULL;
112 }
113
114 /* Find channel with given SCID.
115 * Returns a reference locked channel.
116 */
l2cap_get_chan_by_scid(struct l2cap_conn * conn,u16 cid)117 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
118 u16 cid)
119 {
120 struct l2cap_chan *c;
121
122 c = __l2cap_get_chan_by_scid(conn, cid);
123 if (c) {
124 /* Only lock if chan reference is not 0 */
125 c = l2cap_chan_hold_unless_zero(c);
126 if (c)
127 l2cap_chan_lock(c);
128 }
129
130 return c;
131 }
132
133 /* Find channel with given DCID.
134 * Returns a reference locked channel.
135 */
l2cap_get_chan_by_dcid(struct l2cap_conn * conn,u16 cid)136 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
137 u16 cid)
138 {
139 struct l2cap_chan *c;
140
141 c = __l2cap_get_chan_by_dcid(conn, cid);
142 if (c) {
143 /* Only lock if chan reference is not 0 */
144 c = l2cap_chan_hold_unless_zero(c);
145 if (c)
146 l2cap_chan_lock(c);
147 }
148
149 return c;
150 }
151
__l2cap_get_chan_by_ident(struct l2cap_conn * conn,u8 ident)152 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
153 u8 ident)
154 {
155 struct l2cap_chan *c;
156
157 list_for_each_entry(c, &conn->chan_l, list) {
158 if (c->ident == ident)
159 return c;
160 }
161 return NULL;
162 }
163
__l2cap_global_chan_by_addr(__le16 psm,bdaddr_t * src,u8 src_type)164 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src,
165 u8 src_type)
166 {
167 struct l2cap_chan *c;
168
169 list_for_each_entry(c, &chan_list, global_l) {
170 if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR)
171 continue;
172
173 if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR)
174 continue;
175
176 if (c->sport == psm && !bacmp(&c->src, src))
177 return c;
178 }
179 return NULL;
180 }
181
l2cap_add_psm(struct l2cap_chan * chan,bdaddr_t * src,__le16 psm)182 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
183 {
184 int err;
185
186 write_lock(&chan_list_lock);
187
188 if (psm && __l2cap_global_chan_by_addr(psm, src, chan->src_type)) {
189 err = -EADDRINUSE;
190 goto done;
191 }
192
193 if (psm) {
194 chan->psm = psm;
195 chan->sport = psm;
196 err = 0;
197 } else {
198 u16 p, start, end, incr;
199
200 if (chan->src_type == BDADDR_BREDR) {
201 start = L2CAP_PSM_DYN_START;
202 end = L2CAP_PSM_AUTO_END;
203 incr = 2;
204 } else {
205 start = L2CAP_PSM_LE_DYN_START;
206 end = L2CAP_PSM_LE_DYN_END;
207 incr = 1;
208 }
209
210 err = -EINVAL;
211 for (p = start; p <= end; p += incr)
212 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src,
213 chan->src_type)) {
214 chan->psm = cpu_to_le16(p);
215 chan->sport = cpu_to_le16(p);
216 err = 0;
217 break;
218 }
219 }
220
221 done:
222 write_unlock(&chan_list_lock);
223 return err;
224 }
225 EXPORT_SYMBOL_GPL(l2cap_add_psm);
226
l2cap_add_scid(struct l2cap_chan * chan,__u16 scid)227 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
228 {
229 write_lock(&chan_list_lock);
230
231 /* Override the defaults (which are for conn-oriented) */
232 chan->omtu = L2CAP_DEFAULT_MTU;
233 chan->chan_type = L2CAP_CHAN_FIXED;
234
235 chan->scid = scid;
236
237 write_unlock(&chan_list_lock);
238
239 return 0;
240 }
241
l2cap_alloc_cid(struct l2cap_conn * conn)242 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
243 {
244 u16 cid, dyn_end;
245
246 if (conn->hcon->type == LE_LINK)
247 dyn_end = L2CAP_CID_LE_DYN_END;
248 else
249 dyn_end = L2CAP_CID_DYN_END;
250
251 for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
252 if (!__l2cap_get_chan_by_scid(conn, cid))
253 return cid;
254 }
255
256 return 0;
257 }
258
l2cap_state_change(struct l2cap_chan * chan,int state)259 static void l2cap_state_change(struct l2cap_chan *chan, int state)
260 {
261 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
262 state_to_string(state));
263
264 chan->state = state;
265 chan->ops->state_change(chan, state, 0);
266 }
267
l2cap_state_change_and_error(struct l2cap_chan * chan,int state,int err)268 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
269 int state, int err)
270 {
271 chan->state = state;
272 chan->ops->state_change(chan, chan->state, err);
273 }
274
l2cap_chan_set_err(struct l2cap_chan * chan,int err)275 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
276 {
277 chan->ops->state_change(chan, chan->state, err);
278 }
279
__set_retrans_timer(struct l2cap_chan * chan)280 static void __set_retrans_timer(struct l2cap_chan *chan)
281 {
282 if (!delayed_work_pending(&chan->monitor_timer) &&
283 chan->retrans_timeout) {
284 l2cap_set_timer(chan, &chan->retrans_timer,
285 msecs_to_jiffies(chan->retrans_timeout));
286 }
287 }
288
__set_monitor_timer(struct l2cap_chan * chan)289 static void __set_monitor_timer(struct l2cap_chan *chan)
290 {
291 __clear_retrans_timer(chan);
292 if (chan->monitor_timeout) {
293 l2cap_set_timer(chan, &chan->monitor_timer,
294 msecs_to_jiffies(chan->monitor_timeout));
295 }
296 }
297
l2cap_ertm_seq_in_queue(struct sk_buff_head * head,u16 seq)298 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
299 u16 seq)
300 {
301 struct sk_buff *skb;
302
303 skb_queue_walk(head, skb) {
304 if (bt_cb(skb)->l2cap.txseq == seq)
305 return skb;
306 }
307
308 return NULL;
309 }
310
311 /* ---- L2CAP sequence number lists ---- */
312
313 /* For ERTM, ordered lists of sequence numbers must be tracked for
314 * SREJ requests that are received and for frames that are to be
315 * retransmitted. These seq_list functions implement a singly-linked
316 * list in an array, where membership in the list can also be checked
317 * in constant time. Items can also be added to the tail of the list
318 * and removed from the head in constant time, without further memory
319 * allocs or frees.
320 */
321
l2cap_seq_list_init(struct l2cap_seq_list * seq_list,u16 size)322 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
323 {
324 size_t alloc_size, i;
325
326 /* Allocated size is a power of 2 to map sequence numbers
327 * (which may be up to 14 bits) in to a smaller array that is
328 * sized for the negotiated ERTM transmit windows.
329 */
330 alloc_size = roundup_pow_of_two(size);
331
332 seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
333 if (!seq_list->list)
334 return -ENOMEM;
335
336 seq_list->mask = alloc_size - 1;
337 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
338 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
339 for (i = 0; i < alloc_size; i++)
340 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
341
342 return 0;
343 }
344
l2cap_seq_list_free(struct l2cap_seq_list * seq_list)345 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
346 {
347 kfree(seq_list->list);
348 }
349
l2cap_seq_list_contains(struct l2cap_seq_list * seq_list,u16 seq)350 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
351 u16 seq)
352 {
353 /* Constant-time check for list membership */
354 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
355 }
356
l2cap_seq_list_pop(struct l2cap_seq_list * seq_list)357 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
358 {
359 u16 seq = seq_list->head;
360 u16 mask = seq_list->mask;
361
362 seq_list->head = seq_list->list[seq & mask];
363 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
364
365 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
366 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
367 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
368 }
369
370 return seq;
371 }
372
l2cap_seq_list_clear(struct l2cap_seq_list * seq_list)373 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
374 {
375 u16 i;
376
377 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
378 return;
379
380 for (i = 0; i <= seq_list->mask; i++)
381 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
382
383 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
384 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
385 }
386
l2cap_seq_list_append(struct l2cap_seq_list * seq_list,u16 seq)387 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
388 {
389 u16 mask = seq_list->mask;
390
391 /* All appends happen in constant time */
392
393 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
394 return;
395
396 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
397 seq_list->head = seq;
398 else
399 seq_list->list[seq_list->tail & mask] = seq;
400
401 seq_list->tail = seq;
402 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
403 }
404
l2cap_chan_timeout(struct work_struct * work)405 static void l2cap_chan_timeout(struct work_struct *work)
406 {
407 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
408 chan_timer.work);
409 struct l2cap_conn *conn = chan->conn;
410 int reason;
411
412 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
413
414 if (!conn)
415 return;
416
417 mutex_lock(&conn->lock);
418 /* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
419 * this work. No need to call l2cap_chan_hold(chan) here again.
420 */
421 l2cap_chan_lock(chan);
422
423 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
424 reason = ECONNREFUSED;
425 else if (chan->state == BT_CONNECT &&
426 chan->sec_level != BT_SECURITY_SDP)
427 reason = ECONNREFUSED;
428 else
429 reason = ETIMEDOUT;
430
431 l2cap_chan_close(chan, reason);
432
433 chan->ops->close(chan);
434
435 l2cap_chan_unlock(chan);
436 l2cap_chan_put(chan);
437
438 mutex_unlock(&conn->lock);
439 }
440
l2cap_chan_create(void)441 struct l2cap_chan *l2cap_chan_create(void)
442 {
443 struct l2cap_chan *chan;
444
445 chan = kzalloc_obj(*chan, GFP_ATOMIC);
446 if (!chan)
447 return NULL;
448
449 skb_queue_head_init(&chan->tx_q);
450 skb_queue_head_init(&chan->srej_q);
451 mutex_init(&chan->lock);
452
453 /* Set default lock nesting level */
454 atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
455
456 /* Available receive buffer space is initially unknown */
457 chan->rx_avail = -1;
458
459 write_lock(&chan_list_lock);
460 list_add(&chan->global_l, &chan_list);
461 write_unlock(&chan_list_lock);
462
463 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
464 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
465 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
466 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
467
468 chan->state = BT_OPEN;
469
470 kref_init(&chan->kref);
471
472 /* This flag is cleared in l2cap_chan_ready() */
473 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
474
475 BT_DBG("chan %p", chan);
476
477 return chan;
478 }
479 EXPORT_SYMBOL_GPL(l2cap_chan_create);
480
l2cap_chan_destroy(struct kref * kref)481 static void l2cap_chan_destroy(struct kref *kref)
482 {
483 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
484
485 BT_DBG("chan %p", chan);
486
487 write_lock(&chan_list_lock);
488 list_del(&chan->global_l);
489 write_unlock(&chan_list_lock);
490
491 kfree(chan);
492 }
493
l2cap_chan_hold(struct l2cap_chan * c)494 void l2cap_chan_hold(struct l2cap_chan *c)
495 {
496 BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
497
498 kref_get(&c->kref);
499 }
500 EXPORT_SYMBOL_GPL(l2cap_chan_hold);
501
l2cap_chan_hold_unless_zero(struct l2cap_chan * c)502 struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c)
503 {
504 BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
505
506 if (!kref_get_unless_zero(&c->kref))
507 return NULL;
508
509 return c;
510 }
511
l2cap_chan_put(struct l2cap_chan * c)512 void l2cap_chan_put(struct l2cap_chan *c)
513 {
514 BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
515
516 kref_put(&c->kref, l2cap_chan_destroy);
517 }
518 EXPORT_SYMBOL_GPL(l2cap_chan_put);
519
l2cap_chan_set_defaults(struct l2cap_chan * chan)520 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
521 {
522 chan->fcs = L2CAP_FCS_CRC16;
523 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
524 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
525 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
526 chan->remote_max_tx = chan->max_tx;
527 chan->remote_tx_win = chan->tx_win;
528 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
529 chan->sec_level = BT_SECURITY_LOW;
530 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
531 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
532 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
533
534 chan->conf_state = 0;
535 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
536
537 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
538 }
539 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
540
l2cap_le_rx_credits(struct l2cap_chan * chan)541 static __u16 l2cap_le_rx_credits(struct l2cap_chan *chan)
542 {
543 size_t sdu_len = chan->sdu ? chan->sdu->len : 0;
544
545 if (chan->mps == 0)
546 return 0;
547
548 /* If we don't know the available space in the receiver buffer, give
549 * enough credits for a full packet.
550 */
551 if (chan->rx_avail == -1)
552 return (chan->imtu / chan->mps) + 1;
553
554 /* If we know how much space is available in the receive buffer, give
555 * out as many credits as would fill the buffer.
556 */
557 if (chan->rx_avail <= sdu_len)
558 return 0;
559
560 return DIV_ROUND_UP(chan->rx_avail - sdu_len, chan->mps);
561 }
562
l2cap_le_flowctl_init(struct l2cap_chan * chan,u16 tx_credits)563 static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
564 {
565 chan->sdu = NULL;
566 chan->sdu_last_frag = NULL;
567 chan->sdu_len = 0;
568 chan->tx_credits = tx_credits;
569 /* Derive MPS from connection MTU to stop HCI fragmentation */
570 chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
571 chan->rx_credits = l2cap_le_rx_credits(chan);
572
573 skb_queue_head_init(&chan->tx_q);
574 }
575
l2cap_ecred_init(struct l2cap_chan * chan,u16 tx_credits)576 static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits)
577 {
578 l2cap_le_flowctl_init(chan, tx_credits);
579
580 /* L2CAP implementations shall support a minimum MPS of 64 octets */
581 if (chan->mps < L2CAP_ECRED_MIN_MPS) {
582 chan->mps = L2CAP_ECRED_MIN_MPS;
583 chan->rx_credits = l2cap_le_rx_credits(chan);
584 }
585 }
586
__l2cap_chan_add(struct l2cap_conn * conn,struct l2cap_chan * chan)587 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
588 {
589 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
590 __le16_to_cpu(chan->psm), chan->dcid);
591
592 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
593
594 chan->conn = conn;
595
596 switch (chan->chan_type) {
597 case L2CAP_CHAN_CONN_ORIENTED:
598 /* Alloc CID for connection-oriented socket */
599 chan->scid = l2cap_alloc_cid(conn);
600 if (conn->hcon->type == ACL_LINK)
601 chan->omtu = L2CAP_DEFAULT_MTU;
602 break;
603
604 case L2CAP_CHAN_CONN_LESS:
605 /* Connectionless socket */
606 chan->scid = L2CAP_CID_CONN_LESS;
607 chan->dcid = L2CAP_CID_CONN_LESS;
608 chan->omtu = L2CAP_DEFAULT_MTU;
609 break;
610
611 case L2CAP_CHAN_FIXED:
612 /* Caller will set CID and CID specific MTU values */
613 break;
614
615 default:
616 /* Raw socket can send/recv signalling messages only */
617 chan->scid = L2CAP_CID_SIGNALING;
618 chan->dcid = L2CAP_CID_SIGNALING;
619 chan->omtu = L2CAP_DEFAULT_MTU;
620 }
621
622 chan->local_id = L2CAP_BESTEFFORT_ID;
623 chan->local_stype = L2CAP_SERV_BESTEFFORT;
624 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
625 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
626 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
627 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
628
629 l2cap_chan_hold(chan);
630
631 /* Only keep a reference for fixed channels if they requested it */
632 if (chan->chan_type != L2CAP_CHAN_FIXED ||
633 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
634 hci_conn_hold(conn->hcon);
635
636 /* Append to the list since the order matters for ECRED */
637 list_add_tail(&chan->list, &conn->chan_l);
638 }
639
l2cap_chan_add(struct l2cap_conn * conn,struct l2cap_chan * chan)640 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
641 {
642 mutex_lock(&conn->lock);
643 __l2cap_chan_add(conn, chan);
644 mutex_unlock(&conn->lock);
645 }
646
l2cap_chan_del(struct l2cap_chan * chan,int err)647 void l2cap_chan_del(struct l2cap_chan *chan, int err)
648 {
649 struct l2cap_conn *conn = chan->conn;
650
651 __clear_chan_timer(chan);
652
653 BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
654 state_to_string(chan->state));
655
656 chan->ops->teardown(chan, err);
657
658 if (conn) {
659 /* Delete from channel list */
660 list_del(&chan->list);
661
662 l2cap_chan_put(chan);
663
664 chan->conn = NULL;
665
666 /* Reference was only held for non-fixed channels or
667 * fixed channels that explicitly requested it using the
668 * FLAG_HOLD_HCI_CONN flag.
669 */
670 if (chan->chan_type != L2CAP_CHAN_FIXED ||
671 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
672 hci_conn_drop(conn->hcon);
673 }
674
675 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
676 return;
677
678 switch (chan->mode) {
679 case L2CAP_MODE_BASIC:
680 break;
681
682 case L2CAP_MODE_LE_FLOWCTL:
683 case L2CAP_MODE_EXT_FLOWCTL:
684 skb_queue_purge(&chan->tx_q);
685 break;
686
687 case L2CAP_MODE_ERTM:
688 __clear_retrans_timer(chan);
689 __clear_monitor_timer(chan);
690 __clear_ack_timer(chan);
691
692 skb_queue_purge(&chan->srej_q);
693
694 l2cap_seq_list_free(&chan->srej_list);
695 l2cap_seq_list_free(&chan->retrans_list);
696 fallthrough;
697
698 case L2CAP_MODE_STREAMING:
699 skb_queue_purge(&chan->tx_q);
700 break;
701 }
702 }
703 EXPORT_SYMBOL_GPL(l2cap_chan_del);
704
__l2cap_chan_list_id(struct l2cap_conn * conn,u16 id,l2cap_chan_func_t func,void * data)705 static void __l2cap_chan_list_id(struct l2cap_conn *conn, u16 id,
706 l2cap_chan_func_t func, void *data)
707 {
708 struct l2cap_chan *chan, *l;
709
710 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
711 if (chan->ident == id)
712 func(chan, data);
713 }
714 }
715
__l2cap_chan_list(struct l2cap_conn * conn,l2cap_chan_func_t func,void * data)716 static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
717 void *data)
718 {
719 struct l2cap_chan *chan;
720
721 list_for_each_entry(chan, &conn->chan_l, list) {
722 func(chan, data);
723 }
724 }
725
l2cap_chan_list(struct l2cap_conn * conn,l2cap_chan_func_t func,void * data)726 void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
727 void *data)
728 {
729 if (!conn)
730 return;
731
732 mutex_lock(&conn->lock);
733 __l2cap_chan_list(conn, func, data);
734 mutex_unlock(&conn->lock);
735 }
736
737 EXPORT_SYMBOL_GPL(l2cap_chan_list);
738
l2cap_conn_update_id_addr(struct work_struct * work)739 static void l2cap_conn_update_id_addr(struct work_struct *work)
740 {
741 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
742 id_addr_timer.work);
743 struct hci_conn *hcon = conn->hcon;
744 struct l2cap_chan *chan;
745
746 mutex_lock(&conn->lock);
747
748 list_for_each_entry(chan, &conn->chan_l, list) {
749 l2cap_chan_lock(chan);
750 bacpy(&chan->dst, &hcon->dst);
751 chan->dst_type = bdaddr_dst_type(hcon);
752 l2cap_chan_unlock(chan);
753 }
754
755 mutex_unlock(&conn->lock);
756 }
757
l2cap_chan_le_connect_reject(struct l2cap_chan * chan)758 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
759 {
760 struct l2cap_conn *conn = chan->conn;
761 struct l2cap_le_conn_rsp rsp;
762 u16 result;
763
764 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
765 result = L2CAP_CR_LE_AUTHORIZATION;
766 else
767 result = L2CAP_CR_LE_BAD_PSM;
768
769 l2cap_state_change(chan, BT_DISCONN);
770
771 rsp.dcid = cpu_to_le16(chan->scid);
772 rsp.mtu = cpu_to_le16(chan->imtu);
773 rsp.mps = cpu_to_le16(chan->mps);
774 rsp.credits = cpu_to_le16(chan->rx_credits);
775 rsp.result = cpu_to_le16(result);
776
777 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
778 &rsp);
779 }
780
l2cap_chan_ecred_connect_reject(struct l2cap_chan * chan)781 static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan)
782 {
783 l2cap_state_change(chan, BT_DISCONN);
784
785 __l2cap_ecred_conn_rsp_defer(chan);
786 }
787
l2cap_chan_connect_reject(struct l2cap_chan * chan)788 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
789 {
790 struct l2cap_conn *conn = chan->conn;
791 struct l2cap_conn_rsp rsp;
792 u16 result;
793
794 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
795 result = L2CAP_CR_SEC_BLOCK;
796 else
797 result = L2CAP_CR_BAD_PSM;
798
799 l2cap_state_change(chan, BT_DISCONN);
800
801 rsp.scid = cpu_to_le16(chan->dcid);
802 rsp.dcid = cpu_to_le16(chan->scid);
803 rsp.result = cpu_to_le16(result);
804 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
805
806 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
807 }
808
l2cap_chan_close(struct l2cap_chan * chan,int reason)809 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
810 {
811 struct l2cap_conn *conn = chan->conn;
812
813 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
814
815 switch (chan->state) {
816 case BT_LISTEN:
817 chan->ops->teardown(chan, 0);
818 break;
819
820 case BT_CONNECTED:
821 case BT_CONFIG:
822 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
823 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
824 l2cap_send_disconn_req(chan, reason);
825 } else
826 l2cap_chan_del(chan, reason);
827 break;
828
829 case BT_CONNECT2:
830 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
831 if (conn->hcon->type == ACL_LINK)
832 l2cap_chan_connect_reject(chan);
833 else if (conn->hcon->type == LE_LINK) {
834 switch (chan->mode) {
835 case L2CAP_MODE_LE_FLOWCTL:
836 l2cap_chan_le_connect_reject(chan);
837 break;
838 case L2CAP_MODE_EXT_FLOWCTL:
839 l2cap_chan_ecred_connect_reject(chan);
840 return;
841 }
842 }
843 }
844
845 l2cap_chan_del(chan, reason);
846 break;
847
848 case BT_CONNECT:
849 case BT_DISCONN:
850 l2cap_chan_del(chan, reason);
851 break;
852
853 default:
854 chan->ops->teardown(chan, 0);
855 break;
856 }
857 }
858 EXPORT_SYMBOL(l2cap_chan_close);
859
l2cap_get_auth_type(struct l2cap_chan * chan)860 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
861 {
862 switch (chan->chan_type) {
863 case L2CAP_CHAN_RAW:
864 switch (chan->sec_level) {
865 case BT_SECURITY_HIGH:
866 case BT_SECURITY_FIPS:
867 return HCI_AT_DEDICATED_BONDING_MITM;
868 case BT_SECURITY_MEDIUM:
869 return HCI_AT_DEDICATED_BONDING;
870 default:
871 return HCI_AT_NO_BONDING;
872 }
873 break;
874 case L2CAP_CHAN_CONN_LESS:
875 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
876 if (chan->sec_level == BT_SECURITY_LOW)
877 chan->sec_level = BT_SECURITY_SDP;
878 }
879 if (chan->sec_level == BT_SECURITY_HIGH ||
880 chan->sec_level == BT_SECURITY_FIPS)
881 return HCI_AT_NO_BONDING_MITM;
882 else
883 return HCI_AT_NO_BONDING;
884 break;
885 case L2CAP_CHAN_CONN_ORIENTED:
886 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
887 if (chan->sec_level == BT_SECURITY_LOW)
888 chan->sec_level = BT_SECURITY_SDP;
889
890 if (chan->sec_level == BT_SECURITY_HIGH ||
891 chan->sec_level == BT_SECURITY_FIPS)
892 return HCI_AT_NO_BONDING_MITM;
893 else
894 return HCI_AT_NO_BONDING;
895 }
896 fallthrough;
897
898 default:
899 switch (chan->sec_level) {
900 case BT_SECURITY_HIGH:
901 case BT_SECURITY_FIPS:
902 return HCI_AT_GENERAL_BONDING_MITM;
903 case BT_SECURITY_MEDIUM:
904 return HCI_AT_GENERAL_BONDING;
905 default:
906 return HCI_AT_NO_BONDING;
907 }
908 break;
909 }
910 }
911
912 /* Service level security */
l2cap_chan_check_security(struct l2cap_chan * chan,bool initiator)913 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
914 {
915 struct l2cap_conn *conn = chan->conn;
916 __u8 auth_type;
917
918 if (conn->hcon->type == LE_LINK)
919 return smp_conn_security(conn->hcon, chan->sec_level);
920
921 auth_type = l2cap_get_auth_type(chan);
922
923 return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
924 initiator);
925 }
926
l2cap_get_ident(struct l2cap_conn * conn)927 static int l2cap_get_ident(struct l2cap_conn *conn)
928 {
929 /* LE link does not support tools like l2ping so use the full range */
930 if (conn->hcon->type == LE_LINK)
931 return ida_alloc_range(&conn->tx_ida, 1, 255, GFP_ATOMIC);
932
933 /* Get next available identificator.
934 * 1 - 128 are used by kernel.
935 * 129 - 199 are reserved.
936 * 200 - 254 are used by utilities like l2ping, etc.
937 */
938 return ida_alloc_range(&conn->tx_ida, 1, 128, GFP_ATOMIC);
939 }
940
l2cap_send_acl(struct l2cap_conn * conn,struct sk_buff * skb,u8 flags)941 static void l2cap_send_acl(struct l2cap_conn *conn, struct sk_buff *skb,
942 u8 flags)
943 {
944 /* Check if the hcon still valid before attempting to send */
945 if (hci_conn_valid(conn->hcon->hdev, conn->hcon))
946 hci_send_acl(conn->hchan, skb, flags);
947 else
948 kfree_skb(skb);
949 }
950
l2cap_send_cmd(struct l2cap_conn * conn,u8 ident,u8 code,u16 len,void * data)951 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
952 void *data)
953 {
954 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
955 u8 flags;
956
957 BT_DBG("code 0x%2.2x", code);
958
959 if (!skb)
960 return;
961
962 /* Use NO_FLUSH if supported or we have an LE link (which does
963 * not support auto-flushing packets) */
964 if (lmp_no_flush_capable(conn->hcon->hdev) ||
965 conn->hcon->type == LE_LINK)
966 flags = ACL_START_NO_FLUSH;
967 else
968 flags = ACL_START;
969
970 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
971 skb->priority = HCI_PRIO_MAX;
972
973 l2cap_send_acl(conn, skb, flags);
974 }
975
l2cap_do_send(struct l2cap_chan * chan,struct sk_buff * skb)976 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
977 {
978 struct hci_conn *hcon = chan->conn->hcon;
979 u16 flags;
980
981 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
982 skb->priority);
983
984 /* Use NO_FLUSH for LE links (where this is the only option) or
985 * if the BR/EDR link supports it and flushing has not been
986 * explicitly requested (through FLAG_FLUSHABLE).
987 */
988 if (hcon->type == LE_LINK ||
989 (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
990 lmp_no_flush_capable(hcon->hdev)))
991 flags = ACL_START_NO_FLUSH;
992 else
993 flags = ACL_START;
994
995 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
996 hci_send_acl(chan->conn->hchan, skb, flags);
997 }
998
__unpack_enhanced_control(u16 enh,struct l2cap_ctrl * control)999 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
1000 {
1001 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
1002 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
1003
1004 if (enh & L2CAP_CTRL_FRAME_TYPE) {
1005 /* S-Frame */
1006 control->sframe = 1;
1007 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
1008 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
1009
1010 control->sar = 0;
1011 control->txseq = 0;
1012 } else {
1013 /* I-Frame */
1014 control->sframe = 0;
1015 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
1016 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
1017
1018 control->poll = 0;
1019 control->super = 0;
1020 }
1021 }
1022
__unpack_extended_control(u32 ext,struct l2cap_ctrl * control)1023 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
1024 {
1025 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1026 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
1027
1028 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
1029 /* S-Frame */
1030 control->sframe = 1;
1031 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
1032 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
1033
1034 control->sar = 0;
1035 control->txseq = 0;
1036 } else {
1037 /* I-Frame */
1038 control->sframe = 0;
1039 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
1040 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1041
1042 control->poll = 0;
1043 control->super = 0;
1044 }
1045 }
1046
__unpack_control(struct l2cap_chan * chan,struct sk_buff * skb)1047 static inline void __unpack_control(struct l2cap_chan *chan,
1048 struct sk_buff *skb)
1049 {
1050 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1051 __unpack_extended_control(get_unaligned_le32(skb->data),
1052 &bt_cb(skb)->l2cap);
1053 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
1054 } else {
1055 __unpack_enhanced_control(get_unaligned_le16(skb->data),
1056 &bt_cb(skb)->l2cap);
1057 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
1058 }
1059 }
1060
__pack_extended_control(struct l2cap_ctrl * control)1061 static u32 __pack_extended_control(struct l2cap_ctrl *control)
1062 {
1063 u32 packed;
1064
1065 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1066 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
1067
1068 if (control->sframe) {
1069 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
1070 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
1071 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
1072 } else {
1073 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
1074 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1075 }
1076
1077 return packed;
1078 }
1079
__pack_enhanced_control(struct l2cap_ctrl * control)1080 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1081 {
1082 u16 packed;
1083
1084 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1085 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1086
1087 if (control->sframe) {
1088 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1089 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1090 packed |= L2CAP_CTRL_FRAME_TYPE;
1091 } else {
1092 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1093 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1094 }
1095
1096 return packed;
1097 }
1098
__pack_control(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb)1099 static inline void __pack_control(struct l2cap_chan *chan,
1100 struct l2cap_ctrl *control,
1101 struct sk_buff *skb)
1102 {
1103 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1104 put_unaligned_le32(__pack_extended_control(control),
1105 skb->data + L2CAP_HDR_SIZE);
1106 } else {
1107 put_unaligned_le16(__pack_enhanced_control(control),
1108 skb->data + L2CAP_HDR_SIZE);
1109 }
1110 }
1111
__ertm_hdr_size(struct l2cap_chan * chan)1112 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1113 {
1114 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1115 return L2CAP_EXT_HDR_SIZE;
1116 else
1117 return L2CAP_ENH_HDR_SIZE;
1118 }
1119
l2cap_create_sframe_pdu(struct l2cap_chan * chan,u32 control)1120 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1121 u32 control)
1122 {
1123 struct sk_buff *skb;
1124 struct l2cap_hdr *lh;
1125 int hlen = __ertm_hdr_size(chan);
1126
1127 if (chan->fcs == L2CAP_FCS_CRC16)
1128 hlen += L2CAP_FCS_SIZE;
1129
1130 skb = bt_skb_alloc(hlen, GFP_KERNEL);
1131
1132 if (!skb)
1133 return ERR_PTR(-ENOMEM);
1134
1135 lh = skb_put(skb, L2CAP_HDR_SIZE);
1136 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1137 lh->cid = cpu_to_le16(chan->dcid);
1138
1139 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1140 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1141 else
1142 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1143
1144 if (chan->fcs == L2CAP_FCS_CRC16) {
1145 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1146 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1147 }
1148
1149 skb->priority = HCI_PRIO_MAX;
1150 return skb;
1151 }
1152
l2cap_send_sframe(struct l2cap_chan * chan,struct l2cap_ctrl * control)1153 static void l2cap_send_sframe(struct l2cap_chan *chan,
1154 struct l2cap_ctrl *control)
1155 {
1156 struct sk_buff *skb;
1157 u32 control_field;
1158
1159 BT_DBG("chan %p, control %p", chan, control);
1160
1161 if (!control->sframe)
1162 return;
1163
1164 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1165 !control->poll)
1166 control->final = 1;
1167
1168 if (control->super == L2CAP_SUPER_RR)
1169 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1170 else if (control->super == L2CAP_SUPER_RNR)
1171 set_bit(CONN_RNR_SENT, &chan->conn_state);
1172
1173 if (control->super != L2CAP_SUPER_SREJ) {
1174 chan->last_acked_seq = control->reqseq;
1175 __clear_ack_timer(chan);
1176 }
1177
1178 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1179 control->final, control->poll, control->super);
1180
1181 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1182 control_field = __pack_extended_control(control);
1183 else
1184 control_field = __pack_enhanced_control(control);
1185
1186 skb = l2cap_create_sframe_pdu(chan, control_field);
1187 if (!IS_ERR(skb))
1188 l2cap_do_send(chan, skb);
1189 }
1190
l2cap_send_rr_or_rnr(struct l2cap_chan * chan,bool poll)1191 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1192 {
1193 struct l2cap_ctrl control;
1194
1195 BT_DBG("chan %p, poll %d", chan, poll);
1196
1197 memset(&control, 0, sizeof(control));
1198 control.sframe = 1;
1199 control.poll = poll;
1200
1201 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1202 control.super = L2CAP_SUPER_RNR;
1203 else
1204 control.super = L2CAP_SUPER_RR;
1205
1206 control.reqseq = chan->buffer_seq;
1207 l2cap_send_sframe(chan, &control);
1208 }
1209
__l2cap_no_conn_pending(struct l2cap_chan * chan)1210 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1211 {
1212 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1213 return true;
1214
1215 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1216 }
1217
l2cap_send_conn_req(struct l2cap_chan * chan)1218 void l2cap_send_conn_req(struct l2cap_chan *chan)
1219 {
1220 struct l2cap_conn *conn = chan->conn;
1221 struct l2cap_conn_req req;
1222
1223 req.scid = cpu_to_le16(chan->scid);
1224 req.psm = chan->psm;
1225
1226 chan->ident = l2cap_get_ident(conn);
1227
1228 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1229
1230 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1231 }
1232
l2cap_chan_ready(struct l2cap_chan * chan)1233 static void l2cap_chan_ready(struct l2cap_chan *chan)
1234 {
1235 /* The channel may have already been flagged as connected in
1236 * case of receiving data before the L2CAP info req/rsp
1237 * procedure is complete.
1238 */
1239 if (chan->state == BT_CONNECTED)
1240 return;
1241
1242 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1243 chan->conf_state = 0;
1244 __clear_chan_timer(chan);
1245
1246 switch (chan->mode) {
1247 case L2CAP_MODE_LE_FLOWCTL:
1248 case L2CAP_MODE_EXT_FLOWCTL:
1249 if (!chan->tx_credits)
1250 chan->ops->suspend(chan);
1251 break;
1252 }
1253
1254 chan->state = BT_CONNECTED;
1255
1256 chan->ops->ready(chan);
1257 }
1258
l2cap_le_connect(struct l2cap_chan * chan)1259 static void l2cap_le_connect(struct l2cap_chan *chan)
1260 {
1261 struct l2cap_conn *conn = chan->conn;
1262 struct l2cap_le_conn_req req;
1263
1264 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1265 return;
1266
1267 if (!chan->imtu)
1268 chan->imtu = chan->conn->mtu;
1269
1270 l2cap_le_flowctl_init(chan, 0);
1271
1272 memset(&req, 0, sizeof(req));
1273 req.psm = chan->psm;
1274 req.scid = cpu_to_le16(chan->scid);
1275 req.mtu = cpu_to_le16(chan->imtu);
1276 req.mps = cpu_to_le16(chan->mps);
1277 req.credits = cpu_to_le16(chan->rx_credits);
1278
1279 chan->ident = l2cap_get_ident(conn);
1280
1281 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1282 sizeof(req), &req);
1283 }
1284
1285 struct l2cap_ecred_conn_data {
1286 struct {
1287 struct l2cap_ecred_conn_req_hdr req;
1288 __le16 scid[5];
1289 } __packed pdu;
1290 struct l2cap_chan *chan;
1291 struct pid *pid;
1292 int count;
1293 };
1294
l2cap_ecred_defer_connect(struct l2cap_chan * chan,void * data)1295 static void l2cap_ecred_defer_connect(struct l2cap_chan *chan, void *data)
1296 {
1297 struct l2cap_ecred_conn_data *conn = data;
1298 struct pid *pid;
1299
1300 if (chan == conn->chan)
1301 return;
1302
1303 if (!test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
1304 return;
1305
1306 pid = chan->ops->get_peer_pid(chan);
1307
1308 /* Only add deferred channels with the same PID/PSM */
1309 if (conn->pid != pid || chan->psm != conn->chan->psm || chan->ident ||
1310 chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
1311 return;
1312
1313 if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1314 return;
1315
1316 l2cap_ecred_init(chan, 0);
1317
1318 /* Set the same ident so we can match on the rsp */
1319 chan->ident = conn->chan->ident;
1320
1321 /* Include all channels deferred */
1322 conn->pdu.scid[conn->count] = cpu_to_le16(chan->scid);
1323
1324 conn->count++;
1325 }
1326
l2cap_ecred_connect(struct l2cap_chan * chan)1327 static void l2cap_ecred_connect(struct l2cap_chan *chan)
1328 {
1329 struct l2cap_conn *conn = chan->conn;
1330 struct l2cap_ecred_conn_data data;
1331
1332 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
1333 return;
1334
1335 if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1336 return;
1337
1338 l2cap_ecred_init(chan, 0);
1339
1340 memset(&data, 0, sizeof(data));
1341 data.pdu.req.psm = chan->psm;
1342 data.pdu.req.mtu = cpu_to_le16(chan->imtu);
1343 data.pdu.req.mps = cpu_to_le16(chan->mps);
1344 data.pdu.req.credits = cpu_to_le16(chan->rx_credits);
1345 data.pdu.scid[0] = cpu_to_le16(chan->scid);
1346
1347 chan->ident = l2cap_get_ident(conn);
1348
1349 data.count = 1;
1350 data.chan = chan;
1351 data.pid = chan->ops->get_peer_pid(chan);
1352
1353 __l2cap_chan_list(conn, l2cap_ecred_defer_connect, &data);
1354
1355 l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_CONN_REQ,
1356 sizeof(data.pdu.req) + data.count * sizeof(__le16),
1357 &data.pdu);
1358 }
1359
l2cap_le_start(struct l2cap_chan * chan)1360 static void l2cap_le_start(struct l2cap_chan *chan)
1361 {
1362 struct l2cap_conn *conn = chan->conn;
1363
1364 if (!smp_conn_security(conn->hcon, chan->sec_level))
1365 return;
1366
1367 if (!chan->psm) {
1368 l2cap_chan_ready(chan);
1369 return;
1370 }
1371
1372 if (chan->state == BT_CONNECT) {
1373 if (chan->mode == L2CAP_MODE_EXT_FLOWCTL)
1374 l2cap_ecred_connect(chan);
1375 else
1376 l2cap_le_connect(chan);
1377 }
1378 }
1379
l2cap_start_connection(struct l2cap_chan * chan)1380 static void l2cap_start_connection(struct l2cap_chan *chan)
1381 {
1382 if (chan->conn->hcon->type == LE_LINK) {
1383 l2cap_le_start(chan);
1384 } else {
1385 l2cap_send_conn_req(chan);
1386 }
1387 }
1388
l2cap_request_info(struct l2cap_conn * conn)1389 static void l2cap_request_info(struct l2cap_conn *conn)
1390 {
1391 struct l2cap_info_req req;
1392
1393 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1394 return;
1395
1396 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1397
1398 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1399 conn->info_ident = l2cap_get_ident(conn);
1400
1401 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1402
1403 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1404 sizeof(req), &req);
1405 }
1406
l2cap_check_enc_key_size(struct hci_conn * hcon,struct l2cap_chan * chan)1407 static bool l2cap_check_enc_key_size(struct hci_conn *hcon,
1408 struct l2cap_chan *chan)
1409 {
1410 /* The minimum encryption key size needs to be enforced by the
1411 * host stack before establishing any L2CAP connections. The
1412 * specification in theory allows a minimum of 1, but to align
1413 * BR/EDR and LE transports, a minimum of 7 is chosen.
1414 *
1415 * This check might also be called for unencrypted connections
1416 * that have no key size requirements. Ensure that the link is
1417 * actually encrypted before enforcing a key size.
1418 */
1419 int min_key_size = hcon->hdev->min_enc_key_size;
1420
1421 /* On FIPS security level, key size must be 16 bytes */
1422 if (chan->sec_level == BT_SECURITY_FIPS)
1423 min_key_size = 16;
1424
1425 return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1426 hcon->enc_key_size >= min_key_size);
1427 }
1428
l2cap_do_start(struct l2cap_chan * chan)1429 static void l2cap_do_start(struct l2cap_chan *chan)
1430 {
1431 struct l2cap_conn *conn = chan->conn;
1432
1433 if (conn->hcon->type == LE_LINK) {
1434 l2cap_le_start(chan);
1435 return;
1436 }
1437
1438 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1439 l2cap_request_info(conn);
1440 return;
1441 }
1442
1443 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1444 return;
1445
1446 if (!l2cap_chan_check_security(chan, true) ||
1447 !__l2cap_no_conn_pending(chan))
1448 return;
1449
1450 if (l2cap_check_enc_key_size(conn->hcon, chan))
1451 l2cap_start_connection(chan);
1452 else
1453 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1454 }
1455
l2cap_mode_supported(__u8 mode,__u32 feat_mask)1456 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1457 {
1458 u32 local_feat_mask = l2cap_feat_mask;
1459 if (!disable_ertm)
1460 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1461
1462 switch (mode) {
1463 case L2CAP_MODE_ERTM:
1464 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1465 case L2CAP_MODE_STREAMING:
1466 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1467 default:
1468 return 0x00;
1469 }
1470 }
1471
l2cap_send_disconn_req(struct l2cap_chan * chan,int err)1472 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1473 {
1474 struct l2cap_conn *conn = chan->conn;
1475 struct l2cap_disconn_req req;
1476
1477 if (!conn)
1478 return;
1479
1480 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1481 __clear_retrans_timer(chan);
1482 __clear_monitor_timer(chan);
1483 __clear_ack_timer(chan);
1484 }
1485
1486 req.dcid = cpu_to_le16(chan->dcid);
1487 req.scid = cpu_to_le16(chan->scid);
1488 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1489 sizeof(req), &req);
1490
1491 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1492 }
1493
1494 /* ---- L2CAP connections ---- */
l2cap_conn_start(struct l2cap_conn * conn)1495 static void l2cap_conn_start(struct l2cap_conn *conn)
1496 {
1497 struct l2cap_chan *chan, *tmp;
1498
1499 BT_DBG("conn %p", conn);
1500
1501 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1502 l2cap_chan_lock(chan);
1503
1504 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1505 l2cap_chan_ready(chan);
1506 l2cap_chan_unlock(chan);
1507 continue;
1508 }
1509
1510 if (chan->state == BT_CONNECT) {
1511 if (!l2cap_chan_check_security(chan, true) ||
1512 !__l2cap_no_conn_pending(chan)) {
1513 l2cap_chan_unlock(chan);
1514 continue;
1515 }
1516
1517 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1518 && test_bit(CONF_STATE2_DEVICE,
1519 &chan->conf_state)) {
1520 l2cap_chan_close(chan, ECONNRESET);
1521 l2cap_chan_unlock(chan);
1522 continue;
1523 }
1524
1525 if (l2cap_check_enc_key_size(conn->hcon, chan))
1526 l2cap_start_connection(chan);
1527 else
1528 l2cap_chan_close(chan, ECONNREFUSED);
1529
1530 } else if (chan->state == BT_CONNECT2) {
1531 struct l2cap_conn_rsp rsp;
1532 char buf[128];
1533 rsp.scid = cpu_to_le16(chan->dcid);
1534 rsp.dcid = cpu_to_le16(chan->scid);
1535
1536 if (l2cap_chan_check_security(chan, false)) {
1537 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1538 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1539 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1540 chan->ops->defer(chan);
1541
1542 } else {
1543 l2cap_state_change(chan, BT_CONFIG);
1544 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1545 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1546 }
1547 } else {
1548 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1549 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1550 }
1551
1552 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1553 sizeof(rsp), &rsp);
1554
1555 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1556 rsp.result != L2CAP_CR_SUCCESS) {
1557 l2cap_chan_unlock(chan);
1558 continue;
1559 }
1560
1561 set_bit(CONF_REQ_SENT, &chan->conf_state);
1562 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1563 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1564 chan->num_conf_req++;
1565 }
1566
1567 l2cap_chan_unlock(chan);
1568 }
1569 }
1570
l2cap_le_conn_ready(struct l2cap_conn * conn)1571 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1572 {
1573 struct hci_conn *hcon = conn->hcon;
1574 struct hci_dev *hdev = hcon->hdev;
1575
1576 BT_DBG("%s conn %p", hdev->name, conn);
1577
1578 /* For outgoing pairing which doesn't necessarily have an
1579 * associated socket (e.g. mgmt_pair_device).
1580 */
1581 if (hcon->out)
1582 smp_conn_security(hcon, hcon->pending_sec_level);
1583
1584 /* For LE peripheral connections, make sure the connection interval
1585 * is in the range of the minimum and maximum interval that has
1586 * been configured for this connection. If not, then trigger
1587 * the connection update procedure.
1588 */
1589 if (hcon->role == HCI_ROLE_SLAVE &&
1590 (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1591 hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1592 struct l2cap_conn_param_update_req req;
1593
1594 req.min = cpu_to_le16(hcon->le_conn_min_interval);
1595 req.max = cpu_to_le16(hcon->le_conn_max_interval);
1596 req.latency = cpu_to_le16(hcon->le_conn_latency);
1597 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1598
1599 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1600 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1601 }
1602 }
1603
l2cap_conn_ready(struct l2cap_conn * conn)1604 static void l2cap_conn_ready(struct l2cap_conn *conn)
1605 {
1606 struct l2cap_chan *chan;
1607 struct hci_conn *hcon = conn->hcon;
1608
1609 BT_DBG("conn %p", conn);
1610
1611 if (hcon->type == ACL_LINK)
1612 l2cap_request_info(conn);
1613
1614 mutex_lock(&conn->lock);
1615
1616 list_for_each_entry(chan, &conn->chan_l, list) {
1617
1618 l2cap_chan_lock(chan);
1619
1620 if (hcon->type == LE_LINK) {
1621 l2cap_le_start(chan);
1622 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1623 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1624 l2cap_chan_ready(chan);
1625 } else if (chan->state == BT_CONNECT) {
1626 l2cap_do_start(chan);
1627 }
1628
1629 l2cap_chan_unlock(chan);
1630 }
1631
1632 mutex_unlock(&conn->lock);
1633
1634 if (hcon->type == LE_LINK)
1635 l2cap_le_conn_ready(conn);
1636
1637 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1638 }
1639
1640 /* Notify sockets that we cannot guaranty reliability anymore */
l2cap_conn_unreliable(struct l2cap_conn * conn,int err)1641 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1642 {
1643 struct l2cap_chan *chan;
1644
1645 BT_DBG("conn %p", conn);
1646
1647 list_for_each_entry(chan, &conn->chan_l, list) {
1648 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1649 l2cap_chan_set_err(chan, err);
1650 }
1651 }
1652
l2cap_info_timeout(struct work_struct * work)1653 static void l2cap_info_timeout(struct work_struct *work)
1654 {
1655 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1656 info_timer.work);
1657
1658 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1659 conn->info_ident = 0;
1660
1661 mutex_lock(&conn->lock);
1662 l2cap_conn_start(conn);
1663 mutex_unlock(&conn->lock);
1664 }
1665
1666 /*
1667 * l2cap_user
1668 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1669 * callback is called during registration. The ->remove callback is called
1670 * during unregistration.
1671 * An l2cap_user object can either be explicitly unregistered or when the
1672 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1673 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1674 * External modules must own a reference to the l2cap_conn object if they intend
1675 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1676 * any time if they don't.
1677 */
1678
l2cap_register_user(struct l2cap_conn * conn,struct l2cap_user * user)1679 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1680 {
1681 int ret;
1682
1683 /* We need to check whether l2cap_conn is registered. If it is not, we
1684 * must not register the l2cap_user. l2cap_conn_del() unregisters
1685 * l2cap_conn objects under conn->lock, and we use the same lock here
1686 * to protect access to conn->users and conn->hchan.
1687 */
1688
1689 mutex_lock(&conn->lock);
1690
1691 if (!list_empty(&user->list)) {
1692 ret = -EINVAL;
1693 goto out_unlock;
1694 }
1695
1696 /* conn->hchan is NULL after l2cap_conn_del() was called */
1697 if (!conn->hchan) {
1698 ret = -ENODEV;
1699 goto out_unlock;
1700 }
1701
1702 ret = user->probe(conn, user);
1703 if (ret)
1704 goto out_unlock;
1705
1706 list_add(&user->list, &conn->users);
1707 ret = 0;
1708
1709 out_unlock:
1710 mutex_unlock(&conn->lock);
1711 return ret;
1712 }
1713 EXPORT_SYMBOL(l2cap_register_user);
1714
l2cap_unregister_user(struct l2cap_conn * conn,struct l2cap_user * user)1715 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1716 {
1717 mutex_lock(&conn->lock);
1718
1719 if (list_empty(&user->list))
1720 goto out_unlock;
1721
1722 list_del_init(&user->list);
1723 user->remove(conn, user);
1724
1725 out_unlock:
1726 mutex_unlock(&conn->lock);
1727 }
1728 EXPORT_SYMBOL(l2cap_unregister_user);
1729
l2cap_unregister_all_users(struct l2cap_conn * conn)1730 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1731 {
1732 struct l2cap_user *user;
1733
1734 while (!list_empty(&conn->users)) {
1735 user = list_first_entry(&conn->users, struct l2cap_user, list);
1736 list_del_init(&user->list);
1737 user->remove(conn, user);
1738 }
1739 }
1740
l2cap_conn_del(struct hci_conn * hcon,int err)1741 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1742 {
1743 struct l2cap_conn *conn = hcon->l2cap_data;
1744 struct l2cap_chan *chan, *l;
1745
1746 if (!conn)
1747 return;
1748
1749 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1750
1751 mutex_lock(&conn->lock);
1752
1753 kfree_skb(conn->rx_skb);
1754
1755 skb_queue_purge(&conn->pending_rx);
1756
1757 /* We can not call flush_work(&conn->pending_rx_work) here since we
1758 * might block if we are running on a worker from the same workqueue
1759 * pending_rx_work is waiting on.
1760 */
1761 if (work_pending(&conn->pending_rx_work))
1762 cancel_work_sync(&conn->pending_rx_work);
1763
1764 ida_destroy(&conn->tx_ida);
1765
1766 cancel_delayed_work_sync(&conn->id_addr_timer);
1767
1768 l2cap_unregister_all_users(conn);
1769
1770 /* Force the connection to be immediately dropped */
1771 hcon->disc_timeout = 0;
1772
1773 /* Kill channels */
1774 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1775 l2cap_chan_hold(chan);
1776 l2cap_chan_lock(chan);
1777
1778 l2cap_chan_del(chan, err);
1779
1780 chan->ops->close(chan);
1781
1782 l2cap_chan_unlock(chan);
1783 l2cap_chan_put(chan);
1784 }
1785
1786 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1787 cancel_delayed_work_sync(&conn->info_timer);
1788
1789 hci_chan_del(conn->hchan);
1790 conn->hchan = NULL;
1791
1792 hcon->l2cap_data = NULL;
1793 mutex_unlock(&conn->lock);
1794 l2cap_conn_put(conn);
1795 }
1796
l2cap_conn_free(struct kref * ref)1797 static void l2cap_conn_free(struct kref *ref)
1798 {
1799 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1800
1801 hci_conn_put(conn->hcon);
1802 kfree(conn);
1803 }
1804
l2cap_conn_get(struct l2cap_conn * conn)1805 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1806 {
1807 kref_get(&conn->ref);
1808 return conn;
1809 }
1810 EXPORT_SYMBOL(l2cap_conn_get);
1811
l2cap_conn_put(struct l2cap_conn * conn)1812 void l2cap_conn_put(struct l2cap_conn *conn)
1813 {
1814 kref_put(&conn->ref, l2cap_conn_free);
1815 }
1816 EXPORT_SYMBOL(l2cap_conn_put);
1817
1818 /* ---- Socket interface ---- */
1819
1820 /* Find socket with psm and source / destination bdaddr.
1821 * Returns closest match.
1822 */
l2cap_global_chan_by_psm(int state,__le16 psm,bdaddr_t * src,bdaddr_t * dst,u8 link_type)1823 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1824 bdaddr_t *src,
1825 bdaddr_t *dst,
1826 u8 link_type)
1827 {
1828 struct l2cap_chan *c, *tmp, *c1 = NULL;
1829
1830 read_lock(&chan_list_lock);
1831
1832 list_for_each_entry_safe(c, tmp, &chan_list, global_l) {
1833 if (state && c->state != state)
1834 continue;
1835
1836 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1837 continue;
1838
1839 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1840 continue;
1841
1842 if (c->chan_type != L2CAP_CHAN_FIXED && c->psm == psm) {
1843 int src_match, dst_match;
1844 int src_any, dst_any;
1845
1846 /* Exact match. */
1847 src_match = !bacmp(&c->src, src);
1848 dst_match = !bacmp(&c->dst, dst);
1849 if (src_match && dst_match) {
1850 if (!l2cap_chan_hold_unless_zero(c))
1851 continue;
1852
1853 read_unlock(&chan_list_lock);
1854 return c;
1855 }
1856
1857 /* Closest match */
1858 src_any = !bacmp(&c->src, BDADDR_ANY);
1859 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1860 if ((src_match && dst_any) || (src_any && dst_match) ||
1861 (src_any && dst_any))
1862 c1 = c;
1863 }
1864 }
1865
1866 if (c1)
1867 c1 = l2cap_chan_hold_unless_zero(c1);
1868
1869 read_unlock(&chan_list_lock);
1870
1871 return c1;
1872 }
1873
l2cap_monitor_timeout(struct work_struct * work)1874 static void l2cap_monitor_timeout(struct work_struct *work)
1875 {
1876 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1877 monitor_timer.work);
1878
1879 BT_DBG("chan %p", chan);
1880
1881 l2cap_chan_lock(chan);
1882
1883 if (!chan->conn) {
1884 l2cap_chan_unlock(chan);
1885 l2cap_chan_put(chan);
1886 return;
1887 }
1888
1889 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1890
1891 l2cap_chan_unlock(chan);
1892 l2cap_chan_put(chan);
1893 }
1894
l2cap_retrans_timeout(struct work_struct * work)1895 static void l2cap_retrans_timeout(struct work_struct *work)
1896 {
1897 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1898 retrans_timer.work);
1899
1900 BT_DBG("chan %p", chan);
1901
1902 l2cap_chan_lock(chan);
1903
1904 if (!chan->conn) {
1905 l2cap_chan_unlock(chan);
1906 l2cap_chan_put(chan);
1907 return;
1908 }
1909
1910 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1911 l2cap_chan_unlock(chan);
1912 l2cap_chan_put(chan);
1913 }
1914
l2cap_streaming_send(struct l2cap_chan * chan,struct sk_buff_head * skbs)1915 static void l2cap_streaming_send(struct l2cap_chan *chan,
1916 struct sk_buff_head *skbs)
1917 {
1918 struct sk_buff *skb;
1919 struct l2cap_ctrl *control;
1920
1921 BT_DBG("chan %p, skbs %p", chan, skbs);
1922
1923 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1924
1925 while (!skb_queue_empty(&chan->tx_q)) {
1926
1927 skb = skb_dequeue(&chan->tx_q);
1928
1929 bt_cb(skb)->l2cap.retries = 1;
1930 control = &bt_cb(skb)->l2cap;
1931
1932 control->reqseq = 0;
1933 control->txseq = chan->next_tx_seq;
1934
1935 __pack_control(chan, control, skb);
1936
1937 if (chan->fcs == L2CAP_FCS_CRC16) {
1938 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1939 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1940 }
1941
1942 l2cap_do_send(chan, skb);
1943
1944 BT_DBG("Sent txseq %u", control->txseq);
1945
1946 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1947 chan->frames_sent++;
1948 }
1949 }
1950
l2cap_ertm_send(struct l2cap_chan * chan)1951 static int l2cap_ertm_send(struct l2cap_chan *chan)
1952 {
1953 struct sk_buff *skb, *tx_skb;
1954 struct l2cap_ctrl *control;
1955 int sent = 0;
1956
1957 BT_DBG("chan %p", chan);
1958
1959 if (chan->state != BT_CONNECTED)
1960 return -ENOTCONN;
1961
1962 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1963 return 0;
1964
1965 while (chan->tx_send_head &&
1966 chan->unacked_frames < chan->remote_tx_win &&
1967 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1968
1969 skb = chan->tx_send_head;
1970
1971 bt_cb(skb)->l2cap.retries = 1;
1972 control = &bt_cb(skb)->l2cap;
1973
1974 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1975 control->final = 1;
1976
1977 control->reqseq = chan->buffer_seq;
1978 chan->last_acked_seq = chan->buffer_seq;
1979 control->txseq = chan->next_tx_seq;
1980
1981 __pack_control(chan, control, skb);
1982
1983 if (chan->fcs == L2CAP_FCS_CRC16) {
1984 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1985 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1986 }
1987
1988 /* Clone after data has been modified. Data is assumed to be
1989 read-only (for locking purposes) on cloned sk_buffs.
1990 */
1991 tx_skb = skb_clone(skb, GFP_KERNEL);
1992
1993 if (!tx_skb)
1994 break;
1995
1996 __set_retrans_timer(chan);
1997
1998 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1999 chan->unacked_frames++;
2000 chan->frames_sent++;
2001 sent++;
2002
2003 if (skb_queue_is_last(&chan->tx_q, skb))
2004 chan->tx_send_head = NULL;
2005 else
2006 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2007
2008 l2cap_do_send(chan, tx_skb);
2009 BT_DBG("Sent txseq %u", control->txseq);
2010 }
2011
2012 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2013 chan->unacked_frames, skb_queue_len(&chan->tx_q));
2014
2015 return sent;
2016 }
2017
l2cap_ertm_resend(struct l2cap_chan * chan)2018 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2019 {
2020 struct l2cap_ctrl control;
2021 struct sk_buff *skb;
2022 struct sk_buff *tx_skb;
2023 u16 seq;
2024
2025 BT_DBG("chan %p", chan);
2026
2027 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2028 return;
2029
2030 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2031 seq = l2cap_seq_list_pop(&chan->retrans_list);
2032
2033 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2034 if (!skb) {
2035 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2036 seq);
2037 continue;
2038 }
2039
2040 bt_cb(skb)->l2cap.retries++;
2041 control = bt_cb(skb)->l2cap;
2042
2043 if (chan->max_tx != 0 &&
2044 bt_cb(skb)->l2cap.retries > chan->max_tx) {
2045 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2046 l2cap_send_disconn_req(chan, ECONNRESET);
2047 l2cap_seq_list_clear(&chan->retrans_list);
2048 break;
2049 }
2050
2051 control.reqseq = chan->buffer_seq;
2052 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2053 control.final = 1;
2054 else
2055 control.final = 0;
2056
2057 if (skb_cloned(skb)) {
2058 /* Cloned sk_buffs are read-only, so we need a
2059 * writeable copy
2060 */
2061 tx_skb = skb_copy(skb, GFP_KERNEL);
2062 } else {
2063 tx_skb = skb_clone(skb, GFP_KERNEL);
2064 }
2065
2066 if (!tx_skb) {
2067 l2cap_seq_list_clear(&chan->retrans_list);
2068 break;
2069 }
2070
2071 /* Update skb contents */
2072 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2073 put_unaligned_le32(__pack_extended_control(&control),
2074 tx_skb->data + L2CAP_HDR_SIZE);
2075 } else {
2076 put_unaligned_le16(__pack_enhanced_control(&control),
2077 tx_skb->data + L2CAP_HDR_SIZE);
2078 }
2079
2080 /* Update FCS */
2081 if (chan->fcs == L2CAP_FCS_CRC16) {
2082 u16 fcs = crc16(0, (u8 *) tx_skb->data,
2083 tx_skb->len - L2CAP_FCS_SIZE);
2084 put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2085 L2CAP_FCS_SIZE);
2086 }
2087
2088 l2cap_do_send(chan, tx_skb);
2089
2090 BT_DBG("Resent txseq %d", control.txseq);
2091
2092 chan->last_acked_seq = chan->buffer_seq;
2093 }
2094 }
2095
l2cap_retransmit(struct l2cap_chan * chan,struct l2cap_ctrl * control)2096 static void l2cap_retransmit(struct l2cap_chan *chan,
2097 struct l2cap_ctrl *control)
2098 {
2099 BT_DBG("chan %p, control %p", chan, control);
2100
2101 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2102 l2cap_ertm_resend(chan);
2103 }
2104
l2cap_retransmit_all(struct l2cap_chan * chan,struct l2cap_ctrl * control)2105 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2106 struct l2cap_ctrl *control)
2107 {
2108 struct sk_buff *skb;
2109
2110 BT_DBG("chan %p, control %p", chan, control);
2111
2112 if (control->poll)
2113 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2114
2115 l2cap_seq_list_clear(&chan->retrans_list);
2116
2117 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2118 return;
2119
2120 if (chan->unacked_frames) {
2121 skb_queue_walk(&chan->tx_q, skb) {
2122 if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2123 skb == chan->tx_send_head)
2124 break;
2125 }
2126
2127 skb_queue_walk_from(&chan->tx_q, skb) {
2128 if (skb == chan->tx_send_head)
2129 break;
2130
2131 l2cap_seq_list_append(&chan->retrans_list,
2132 bt_cb(skb)->l2cap.txseq);
2133 }
2134
2135 l2cap_ertm_resend(chan);
2136 }
2137 }
2138
l2cap_send_ack(struct l2cap_chan * chan)2139 static void l2cap_send_ack(struct l2cap_chan *chan)
2140 {
2141 struct l2cap_ctrl control;
2142 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2143 chan->last_acked_seq);
2144 int threshold;
2145
2146 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2147 chan, chan->last_acked_seq, chan->buffer_seq);
2148
2149 memset(&control, 0, sizeof(control));
2150 control.sframe = 1;
2151
2152 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2153 chan->rx_state == L2CAP_RX_STATE_RECV) {
2154 __clear_ack_timer(chan);
2155 control.super = L2CAP_SUPER_RNR;
2156 control.reqseq = chan->buffer_seq;
2157 l2cap_send_sframe(chan, &control);
2158 } else {
2159 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2160 l2cap_ertm_send(chan);
2161 /* If any i-frames were sent, they included an ack */
2162 if (chan->buffer_seq == chan->last_acked_seq)
2163 frames_to_ack = 0;
2164 }
2165
2166 /* Ack now if the window is 3/4ths full.
2167 * Calculate without mul or div
2168 */
2169 threshold = chan->ack_win;
2170 threshold += threshold << 1;
2171 threshold >>= 2;
2172
2173 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2174 threshold);
2175
2176 if (frames_to_ack >= threshold) {
2177 __clear_ack_timer(chan);
2178 control.super = L2CAP_SUPER_RR;
2179 control.reqseq = chan->buffer_seq;
2180 l2cap_send_sframe(chan, &control);
2181 frames_to_ack = 0;
2182 }
2183
2184 if (frames_to_ack)
2185 __set_ack_timer(chan);
2186 }
2187 }
2188
l2cap_skbuff_fromiovec(struct l2cap_chan * chan,struct msghdr * msg,int len,int count,struct sk_buff * skb)2189 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2190 struct msghdr *msg, int len,
2191 int count, struct sk_buff *skb)
2192 {
2193 struct l2cap_conn *conn = chan->conn;
2194 struct sk_buff **frag;
2195 int sent = 0;
2196
2197 if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2198 return -EFAULT;
2199
2200 sent += count;
2201 len -= count;
2202
2203 /* Continuation fragments (no L2CAP header) */
2204 frag = &skb_shinfo(skb)->frag_list;
2205 while (len) {
2206 struct sk_buff *tmp;
2207
2208 count = min_t(unsigned int, conn->mtu, len);
2209
2210 tmp = chan->ops->alloc_skb(chan, 0, count,
2211 msg->msg_flags & MSG_DONTWAIT);
2212 if (IS_ERR(tmp))
2213 return PTR_ERR(tmp);
2214
2215 *frag = tmp;
2216
2217 if (!copy_from_iter_full(skb_put(*frag, count), count,
2218 &msg->msg_iter))
2219 return -EFAULT;
2220
2221 sent += count;
2222 len -= count;
2223
2224 skb->len += (*frag)->len;
2225 skb->data_len += (*frag)->len;
2226
2227 frag = &(*frag)->next;
2228 }
2229
2230 return sent;
2231 }
2232
l2cap_create_connless_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2233 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2234 struct msghdr *msg, size_t len)
2235 {
2236 struct l2cap_conn *conn = chan->conn;
2237 struct sk_buff *skb;
2238 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2239 struct l2cap_hdr *lh;
2240
2241 BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2242 __le16_to_cpu(chan->psm), len);
2243
2244 count = min_t(unsigned int, (conn->mtu - hlen), len);
2245
2246 skb = chan->ops->alloc_skb(chan, hlen, count,
2247 msg->msg_flags & MSG_DONTWAIT);
2248 if (IS_ERR(skb))
2249 return skb;
2250
2251 /* Create L2CAP header */
2252 lh = skb_put(skb, L2CAP_HDR_SIZE);
2253 lh->cid = cpu_to_le16(chan->dcid);
2254 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2255 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2256
2257 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2258 if (unlikely(err < 0)) {
2259 kfree_skb(skb);
2260 return ERR_PTR(err);
2261 }
2262 return skb;
2263 }
2264
l2cap_create_basic_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2265 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2266 struct msghdr *msg, size_t len)
2267 {
2268 struct l2cap_conn *conn = chan->conn;
2269 struct sk_buff *skb;
2270 int err, count;
2271 struct l2cap_hdr *lh;
2272
2273 BT_DBG("chan %p len %zu", chan, len);
2274
2275 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2276
2277 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2278 msg->msg_flags & MSG_DONTWAIT);
2279 if (IS_ERR(skb))
2280 return skb;
2281
2282 /* Create L2CAP header */
2283 lh = skb_put(skb, L2CAP_HDR_SIZE);
2284 lh->cid = cpu_to_le16(chan->dcid);
2285 lh->len = cpu_to_le16(len);
2286
2287 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2288 if (unlikely(err < 0)) {
2289 kfree_skb(skb);
2290 return ERR_PTR(err);
2291 }
2292 return skb;
2293 }
2294
l2cap_create_iframe_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len,u16 sdulen)2295 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2296 struct msghdr *msg, size_t len,
2297 u16 sdulen)
2298 {
2299 struct l2cap_conn *conn = chan->conn;
2300 struct sk_buff *skb;
2301 int err, count, hlen;
2302 struct l2cap_hdr *lh;
2303
2304 BT_DBG("chan %p len %zu", chan, len);
2305
2306 if (!conn)
2307 return ERR_PTR(-ENOTCONN);
2308
2309 hlen = __ertm_hdr_size(chan);
2310
2311 if (sdulen)
2312 hlen += L2CAP_SDULEN_SIZE;
2313
2314 if (chan->fcs == L2CAP_FCS_CRC16)
2315 hlen += L2CAP_FCS_SIZE;
2316
2317 count = min_t(unsigned int, (conn->mtu - hlen), len);
2318
2319 skb = chan->ops->alloc_skb(chan, hlen, count,
2320 msg->msg_flags & MSG_DONTWAIT);
2321 if (IS_ERR(skb))
2322 return skb;
2323
2324 /* Create L2CAP header */
2325 lh = skb_put(skb, L2CAP_HDR_SIZE);
2326 lh->cid = cpu_to_le16(chan->dcid);
2327 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2328
2329 /* Control header is populated later */
2330 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2331 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2332 else
2333 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2334
2335 if (sdulen)
2336 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2337
2338 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2339 if (unlikely(err < 0)) {
2340 kfree_skb(skb);
2341 return ERR_PTR(err);
2342 }
2343
2344 bt_cb(skb)->l2cap.fcs = chan->fcs;
2345 bt_cb(skb)->l2cap.retries = 0;
2346 return skb;
2347 }
2348
l2cap_segment_sdu(struct l2cap_chan * chan,struct sk_buff_head * seg_queue,struct msghdr * msg,size_t len)2349 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2350 struct sk_buff_head *seg_queue,
2351 struct msghdr *msg, size_t len)
2352 {
2353 struct sk_buff *skb;
2354 u16 sdu_len;
2355 size_t pdu_len;
2356 u8 sar;
2357
2358 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2359
2360 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2361 * so fragmented skbs are not used. The HCI layer's handling
2362 * of fragmented skbs is not compatible with ERTM's queueing.
2363 */
2364
2365 /* PDU size is derived from the HCI MTU */
2366 pdu_len = chan->conn->mtu;
2367
2368 /* Constrain PDU size for BR/EDR connections */
2369 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2370
2371 /* Adjust for largest possible L2CAP overhead. */
2372 if (chan->fcs)
2373 pdu_len -= L2CAP_FCS_SIZE;
2374
2375 pdu_len -= __ertm_hdr_size(chan);
2376
2377 /* Remote device may have requested smaller PDUs */
2378 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2379
2380 if (len <= pdu_len) {
2381 sar = L2CAP_SAR_UNSEGMENTED;
2382 sdu_len = 0;
2383 pdu_len = len;
2384 } else {
2385 sar = L2CAP_SAR_START;
2386 sdu_len = len;
2387 }
2388
2389 while (len > 0) {
2390 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2391
2392 if (IS_ERR(skb)) {
2393 __skb_queue_purge(seg_queue);
2394 return PTR_ERR(skb);
2395 }
2396
2397 bt_cb(skb)->l2cap.sar = sar;
2398 __skb_queue_tail(seg_queue, skb);
2399
2400 len -= pdu_len;
2401 if (sdu_len)
2402 sdu_len = 0;
2403
2404 if (len <= pdu_len) {
2405 sar = L2CAP_SAR_END;
2406 pdu_len = len;
2407 } else {
2408 sar = L2CAP_SAR_CONTINUE;
2409 }
2410 }
2411
2412 return 0;
2413 }
2414
l2cap_create_le_flowctl_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len,u16 sdulen)2415 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2416 struct msghdr *msg,
2417 size_t len, u16 sdulen)
2418 {
2419 struct l2cap_conn *conn = chan->conn;
2420 struct sk_buff *skb;
2421 int err, count, hlen;
2422 struct l2cap_hdr *lh;
2423
2424 BT_DBG("chan %p len %zu", chan, len);
2425
2426 if (!conn)
2427 return ERR_PTR(-ENOTCONN);
2428
2429 hlen = L2CAP_HDR_SIZE;
2430
2431 if (sdulen)
2432 hlen += L2CAP_SDULEN_SIZE;
2433
2434 count = min_t(unsigned int, (conn->mtu - hlen), len);
2435
2436 skb = chan->ops->alloc_skb(chan, hlen, count,
2437 msg->msg_flags & MSG_DONTWAIT);
2438 if (IS_ERR(skb))
2439 return skb;
2440
2441 /* Create L2CAP header */
2442 lh = skb_put(skb, L2CAP_HDR_SIZE);
2443 lh->cid = cpu_to_le16(chan->dcid);
2444 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2445
2446 if (sdulen)
2447 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2448
2449 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2450 if (unlikely(err < 0)) {
2451 kfree_skb(skb);
2452 return ERR_PTR(err);
2453 }
2454
2455 return skb;
2456 }
2457
l2cap_segment_le_sdu(struct l2cap_chan * chan,struct sk_buff_head * seg_queue,struct msghdr * msg,size_t len)2458 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2459 struct sk_buff_head *seg_queue,
2460 struct msghdr *msg, size_t len)
2461 {
2462 struct sk_buff *skb;
2463 size_t pdu_len;
2464 u16 sdu_len;
2465
2466 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2467
2468 sdu_len = len;
2469 pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2470
2471 while (len > 0) {
2472 if (len <= pdu_len)
2473 pdu_len = len;
2474
2475 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2476 if (IS_ERR(skb)) {
2477 __skb_queue_purge(seg_queue);
2478 return PTR_ERR(skb);
2479 }
2480
2481 __skb_queue_tail(seg_queue, skb);
2482
2483 len -= pdu_len;
2484
2485 if (sdu_len) {
2486 sdu_len = 0;
2487 pdu_len += L2CAP_SDULEN_SIZE;
2488 }
2489 }
2490
2491 return 0;
2492 }
2493
l2cap_le_flowctl_send(struct l2cap_chan * chan)2494 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2495 {
2496 int sent = 0;
2497
2498 BT_DBG("chan %p", chan);
2499
2500 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2501 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2502 chan->tx_credits--;
2503 sent++;
2504 }
2505
2506 BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2507 skb_queue_len(&chan->tx_q));
2508 }
2509
l2cap_tx_timestamp(struct sk_buff * skb,const struct sockcm_cookie * sockc,size_t len)2510 static void l2cap_tx_timestamp(struct sk_buff *skb,
2511 const struct sockcm_cookie *sockc,
2512 size_t len)
2513 {
2514 struct sock *sk = skb ? skb->sk : NULL;
2515
2516 if (sk && sk->sk_type == SOCK_STREAM)
2517 hci_setup_tx_timestamp(skb, len, sockc);
2518 else
2519 hci_setup_tx_timestamp(skb, 1, sockc);
2520 }
2521
l2cap_tx_timestamp_seg(struct sk_buff_head * queue,const struct sockcm_cookie * sockc,size_t len)2522 static void l2cap_tx_timestamp_seg(struct sk_buff_head *queue,
2523 const struct sockcm_cookie *sockc,
2524 size_t len)
2525 {
2526 struct sk_buff *skb = skb_peek(queue);
2527 struct sock *sk = skb ? skb->sk : NULL;
2528
2529 if (sk && sk->sk_type == SOCK_STREAM)
2530 l2cap_tx_timestamp(skb_peek_tail(queue), sockc, len);
2531 else
2532 l2cap_tx_timestamp(skb, sockc, len);
2533 }
2534
l2cap_chan_send(struct l2cap_chan * chan,struct msghdr * msg,size_t len,const struct sockcm_cookie * sockc)2535 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2536 const struct sockcm_cookie *sockc)
2537 {
2538 struct sk_buff *skb;
2539 int err;
2540 struct sk_buff_head seg_queue;
2541
2542 if (!chan->conn)
2543 return -ENOTCONN;
2544
2545 /* Connectionless channel */
2546 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2547 skb = l2cap_create_connless_pdu(chan, msg, len);
2548 if (IS_ERR(skb))
2549 return PTR_ERR(skb);
2550
2551 l2cap_tx_timestamp(skb, sockc, len);
2552
2553 l2cap_do_send(chan, skb);
2554 return len;
2555 }
2556
2557 switch (chan->mode) {
2558 case L2CAP_MODE_LE_FLOWCTL:
2559 case L2CAP_MODE_EXT_FLOWCTL:
2560 /* Check outgoing MTU */
2561 if (len > chan->omtu)
2562 return -EMSGSIZE;
2563
2564 __skb_queue_head_init(&seg_queue);
2565
2566 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2567
2568 if (chan->state != BT_CONNECTED) {
2569 __skb_queue_purge(&seg_queue);
2570 err = -ENOTCONN;
2571 }
2572
2573 if (err)
2574 return err;
2575
2576 l2cap_tx_timestamp_seg(&seg_queue, sockc, len);
2577
2578 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2579
2580 l2cap_le_flowctl_send(chan);
2581
2582 if (!chan->tx_credits)
2583 chan->ops->suspend(chan);
2584
2585 err = len;
2586
2587 break;
2588
2589 case L2CAP_MODE_BASIC:
2590 /* Check outgoing MTU */
2591 if (len > chan->omtu)
2592 return -EMSGSIZE;
2593
2594 /* Create a basic PDU */
2595 skb = l2cap_create_basic_pdu(chan, msg, len);
2596 if (IS_ERR(skb))
2597 return PTR_ERR(skb);
2598
2599 l2cap_tx_timestamp(skb, sockc, len);
2600
2601 l2cap_do_send(chan, skb);
2602 err = len;
2603 break;
2604
2605 case L2CAP_MODE_ERTM:
2606 case L2CAP_MODE_STREAMING:
2607 /* Check outgoing MTU */
2608 if (len > chan->omtu) {
2609 err = -EMSGSIZE;
2610 break;
2611 }
2612
2613 __skb_queue_head_init(&seg_queue);
2614
2615 /* Do segmentation before calling in to the state machine,
2616 * since it's possible to block while waiting for memory
2617 * allocation.
2618 */
2619 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2620
2621 if (err)
2622 break;
2623
2624 if (chan->mode == L2CAP_MODE_ERTM) {
2625 /* TODO: ERTM mode timestamping */
2626 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2627 } else {
2628 l2cap_tx_timestamp_seg(&seg_queue, sockc, len);
2629 l2cap_streaming_send(chan, &seg_queue);
2630 }
2631
2632 err = len;
2633
2634 /* If the skbs were not queued for sending, they'll still be in
2635 * seg_queue and need to be purged.
2636 */
2637 __skb_queue_purge(&seg_queue);
2638 break;
2639
2640 default:
2641 BT_DBG("bad state %1.1x", chan->mode);
2642 err = -EBADFD;
2643 }
2644
2645 return err;
2646 }
2647 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2648
l2cap_send_srej(struct l2cap_chan * chan,u16 txseq)2649 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2650 {
2651 struct l2cap_ctrl control;
2652 u16 seq;
2653
2654 BT_DBG("chan %p, txseq %u", chan, txseq);
2655
2656 memset(&control, 0, sizeof(control));
2657 control.sframe = 1;
2658 control.super = L2CAP_SUPER_SREJ;
2659
2660 for (seq = chan->expected_tx_seq; seq != txseq;
2661 seq = __next_seq(chan, seq)) {
2662 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2663 control.reqseq = seq;
2664 l2cap_send_sframe(chan, &control);
2665 l2cap_seq_list_append(&chan->srej_list, seq);
2666 }
2667 }
2668
2669 chan->expected_tx_seq = __next_seq(chan, txseq);
2670 }
2671
l2cap_send_srej_tail(struct l2cap_chan * chan)2672 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2673 {
2674 struct l2cap_ctrl control;
2675
2676 BT_DBG("chan %p", chan);
2677
2678 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2679 return;
2680
2681 memset(&control, 0, sizeof(control));
2682 control.sframe = 1;
2683 control.super = L2CAP_SUPER_SREJ;
2684 control.reqseq = chan->srej_list.tail;
2685 l2cap_send_sframe(chan, &control);
2686 }
2687
l2cap_send_srej_list(struct l2cap_chan * chan,u16 txseq)2688 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2689 {
2690 struct l2cap_ctrl control;
2691 u16 initial_head;
2692 u16 seq;
2693
2694 BT_DBG("chan %p, txseq %u", chan, txseq);
2695
2696 memset(&control, 0, sizeof(control));
2697 control.sframe = 1;
2698 control.super = L2CAP_SUPER_SREJ;
2699
2700 /* Capture initial list head to allow only one pass through the list. */
2701 initial_head = chan->srej_list.head;
2702
2703 do {
2704 seq = l2cap_seq_list_pop(&chan->srej_list);
2705 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2706 break;
2707
2708 control.reqseq = seq;
2709 l2cap_send_sframe(chan, &control);
2710 l2cap_seq_list_append(&chan->srej_list, seq);
2711 } while (chan->srej_list.head != initial_head);
2712 }
2713
l2cap_process_reqseq(struct l2cap_chan * chan,u16 reqseq)2714 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2715 {
2716 struct sk_buff *acked_skb;
2717 u16 ackseq;
2718
2719 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2720
2721 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2722 return;
2723
2724 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2725 chan->expected_ack_seq, chan->unacked_frames);
2726
2727 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2728 ackseq = __next_seq(chan, ackseq)) {
2729
2730 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2731 if (acked_skb) {
2732 skb_unlink(acked_skb, &chan->tx_q);
2733 kfree_skb(acked_skb);
2734 chan->unacked_frames--;
2735 }
2736 }
2737
2738 chan->expected_ack_seq = reqseq;
2739
2740 if (chan->unacked_frames == 0)
2741 __clear_retrans_timer(chan);
2742
2743 BT_DBG("unacked_frames %u", chan->unacked_frames);
2744 }
2745
l2cap_abort_rx_srej_sent(struct l2cap_chan * chan)2746 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2747 {
2748 BT_DBG("chan %p", chan);
2749
2750 chan->expected_tx_seq = chan->buffer_seq;
2751 l2cap_seq_list_clear(&chan->srej_list);
2752 skb_queue_purge(&chan->srej_q);
2753 chan->rx_state = L2CAP_RX_STATE_RECV;
2754 }
2755
l2cap_tx_state_xmit(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2756 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2757 struct l2cap_ctrl *control,
2758 struct sk_buff_head *skbs, u8 event)
2759 {
2760 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2761 event);
2762
2763 switch (event) {
2764 case L2CAP_EV_DATA_REQUEST:
2765 if (chan->tx_send_head == NULL)
2766 chan->tx_send_head = skb_peek(skbs);
2767
2768 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2769 l2cap_ertm_send(chan);
2770 break;
2771 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2772 BT_DBG("Enter LOCAL_BUSY");
2773 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2774
2775 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2776 /* The SREJ_SENT state must be aborted if we are to
2777 * enter the LOCAL_BUSY state.
2778 */
2779 l2cap_abort_rx_srej_sent(chan);
2780 }
2781
2782 l2cap_send_ack(chan);
2783
2784 break;
2785 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2786 BT_DBG("Exit LOCAL_BUSY");
2787 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2788
2789 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2790 struct l2cap_ctrl local_control;
2791
2792 memset(&local_control, 0, sizeof(local_control));
2793 local_control.sframe = 1;
2794 local_control.super = L2CAP_SUPER_RR;
2795 local_control.poll = 1;
2796 local_control.reqseq = chan->buffer_seq;
2797 l2cap_send_sframe(chan, &local_control);
2798
2799 chan->retry_count = 1;
2800 __set_monitor_timer(chan);
2801 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2802 }
2803 break;
2804 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2805 l2cap_process_reqseq(chan, control->reqseq);
2806 break;
2807 case L2CAP_EV_EXPLICIT_POLL:
2808 l2cap_send_rr_or_rnr(chan, 1);
2809 chan->retry_count = 1;
2810 __set_monitor_timer(chan);
2811 __clear_ack_timer(chan);
2812 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2813 break;
2814 case L2CAP_EV_RETRANS_TO:
2815 l2cap_send_rr_or_rnr(chan, 1);
2816 chan->retry_count = 1;
2817 __set_monitor_timer(chan);
2818 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2819 break;
2820 case L2CAP_EV_RECV_FBIT:
2821 /* Nothing to process */
2822 break;
2823 default:
2824 break;
2825 }
2826 }
2827
l2cap_tx_state_wait_f(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2828 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2829 struct l2cap_ctrl *control,
2830 struct sk_buff_head *skbs, u8 event)
2831 {
2832 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2833 event);
2834
2835 switch (event) {
2836 case L2CAP_EV_DATA_REQUEST:
2837 if (chan->tx_send_head == NULL)
2838 chan->tx_send_head = skb_peek(skbs);
2839 /* Queue data, but don't send. */
2840 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2841 break;
2842 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2843 BT_DBG("Enter LOCAL_BUSY");
2844 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2845
2846 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2847 /* The SREJ_SENT state must be aborted if we are to
2848 * enter the LOCAL_BUSY state.
2849 */
2850 l2cap_abort_rx_srej_sent(chan);
2851 }
2852
2853 l2cap_send_ack(chan);
2854
2855 break;
2856 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2857 BT_DBG("Exit LOCAL_BUSY");
2858 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2859
2860 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2861 struct l2cap_ctrl local_control;
2862 memset(&local_control, 0, sizeof(local_control));
2863 local_control.sframe = 1;
2864 local_control.super = L2CAP_SUPER_RR;
2865 local_control.poll = 1;
2866 local_control.reqseq = chan->buffer_seq;
2867 l2cap_send_sframe(chan, &local_control);
2868
2869 chan->retry_count = 1;
2870 __set_monitor_timer(chan);
2871 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2872 }
2873 break;
2874 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2875 l2cap_process_reqseq(chan, control->reqseq);
2876 fallthrough;
2877
2878 case L2CAP_EV_RECV_FBIT:
2879 if (control && control->final) {
2880 __clear_monitor_timer(chan);
2881 if (chan->unacked_frames > 0)
2882 __set_retrans_timer(chan);
2883 chan->retry_count = 0;
2884 chan->tx_state = L2CAP_TX_STATE_XMIT;
2885 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2886 }
2887 break;
2888 case L2CAP_EV_EXPLICIT_POLL:
2889 /* Ignore */
2890 break;
2891 case L2CAP_EV_MONITOR_TO:
2892 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2893 l2cap_send_rr_or_rnr(chan, 1);
2894 __set_monitor_timer(chan);
2895 chan->retry_count++;
2896 } else {
2897 l2cap_send_disconn_req(chan, ECONNABORTED);
2898 }
2899 break;
2900 default:
2901 break;
2902 }
2903 }
2904
l2cap_tx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2905 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2906 struct sk_buff_head *skbs, u8 event)
2907 {
2908 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2909 chan, control, skbs, event, chan->tx_state);
2910
2911 switch (chan->tx_state) {
2912 case L2CAP_TX_STATE_XMIT:
2913 l2cap_tx_state_xmit(chan, control, skbs, event);
2914 break;
2915 case L2CAP_TX_STATE_WAIT_F:
2916 l2cap_tx_state_wait_f(chan, control, skbs, event);
2917 break;
2918 default:
2919 /* Ignore event */
2920 break;
2921 }
2922 }
2923
l2cap_pass_to_tx(struct l2cap_chan * chan,struct l2cap_ctrl * control)2924 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2925 struct l2cap_ctrl *control)
2926 {
2927 BT_DBG("chan %p, control %p", chan, control);
2928 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2929 }
2930
l2cap_pass_to_tx_fbit(struct l2cap_chan * chan,struct l2cap_ctrl * control)2931 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2932 struct l2cap_ctrl *control)
2933 {
2934 BT_DBG("chan %p, control %p", chan, control);
2935 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2936 }
2937
2938 /* Copy frame to all raw sockets on that connection */
l2cap_raw_recv(struct l2cap_conn * conn,struct sk_buff * skb)2939 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2940 {
2941 struct sk_buff *nskb;
2942 struct l2cap_chan *chan;
2943
2944 BT_DBG("conn %p", conn);
2945
2946 list_for_each_entry(chan, &conn->chan_l, list) {
2947 if (chan->chan_type != L2CAP_CHAN_RAW)
2948 continue;
2949
2950 /* Don't send frame to the channel it came from */
2951 if (bt_cb(skb)->l2cap.chan == chan)
2952 continue;
2953
2954 nskb = skb_clone(skb, GFP_KERNEL);
2955 if (!nskb)
2956 continue;
2957 if (chan->ops->recv(chan, nskb))
2958 kfree_skb(nskb);
2959 }
2960 }
2961
2962 /* ---- L2CAP signalling commands ---- */
l2cap_build_cmd(struct l2cap_conn * conn,u8 code,u8 ident,u16 dlen,void * data)2963 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2964 u8 ident, u16 dlen, void *data)
2965 {
2966 struct sk_buff *skb, **frag;
2967 struct l2cap_cmd_hdr *cmd;
2968 struct l2cap_hdr *lh;
2969 int len, count;
2970
2971 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2972 conn, code, ident, dlen);
2973
2974 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2975 return NULL;
2976
2977 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2978 count = min_t(unsigned int, conn->mtu, len);
2979
2980 skb = bt_skb_alloc(count, GFP_KERNEL);
2981 if (!skb)
2982 return NULL;
2983
2984 lh = skb_put(skb, L2CAP_HDR_SIZE);
2985 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2986
2987 if (conn->hcon->type == LE_LINK)
2988 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2989 else
2990 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2991
2992 cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
2993 cmd->code = code;
2994 cmd->ident = ident;
2995 cmd->len = cpu_to_le16(dlen);
2996
2997 if (dlen) {
2998 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2999 skb_put_data(skb, data, count);
3000 data += count;
3001 }
3002
3003 len -= skb->len;
3004
3005 /* Continuation fragments (no L2CAP header) */
3006 frag = &skb_shinfo(skb)->frag_list;
3007 while (len) {
3008 count = min_t(unsigned int, conn->mtu, len);
3009
3010 *frag = bt_skb_alloc(count, GFP_KERNEL);
3011 if (!*frag)
3012 goto fail;
3013
3014 skb_put_data(*frag, data, count);
3015
3016 len -= count;
3017 data += count;
3018
3019 frag = &(*frag)->next;
3020 }
3021
3022 return skb;
3023
3024 fail:
3025 kfree_skb(skb);
3026 return NULL;
3027 }
3028
l2cap_get_conf_opt(void ** ptr,int * type,int * olen,unsigned long * val)3029 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
3030 unsigned long *val)
3031 {
3032 struct l2cap_conf_opt *opt = *ptr;
3033 int len;
3034
3035 len = L2CAP_CONF_OPT_SIZE + opt->len;
3036 *ptr += len;
3037
3038 *type = opt->type;
3039 *olen = opt->len;
3040
3041 switch (opt->len) {
3042 case 1:
3043 *val = *((u8 *) opt->val);
3044 break;
3045
3046 case 2:
3047 *val = get_unaligned_le16(opt->val);
3048 break;
3049
3050 case 4:
3051 *val = get_unaligned_le32(opt->val);
3052 break;
3053
3054 default:
3055 *val = (unsigned long) opt->val;
3056 break;
3057 }
3058
3059 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3060 return len;
3061 }
3062
l2cap_add_conf_opt(void ** ptr,u8 type,u8 len,unsigned long val,size_t size)3063 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3064 {
3065 struct l2cap_conf_opt *opt = *ptr;
3066
3067 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3068
3069 if (size < L2CAP_CONF_OPT_SIZE + len)
3070 return;
3071
3072 opt->type = type;
3073 opt->len = len;
3074
3075 switch (len) {
3076 case 1:
3077 *((u8 *) opt->val) = val;
3078 break;
3079
3080 case 2:
3081 put_unaligned_le16(val, opt->val);
3082 break;
3083
3084 case 4:
3085 put_unaligned_le32(val, opt->val);
3086 break;
3087
3088 default:
3089 memcpy(opt->val, (void *) val, len);
3090 break;
3091 }
3092
3093 *ptr += L2CAP_CONF_OPT_SIZE + len;
3094 }
3095
l2cap_add_opt_efs(void ** ptr,struct l2cap_chan * chan,size_t size)3096 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3097 {
3098 struct l2cap_conf_efs efs;
3099
3100 switch (chan->mode) {
3101 case L2CAP_MODE_ERTM:
3102 efs.id = chan->local_id;
3103 efs.stype = chan->local_stype;
3104 efs.msdu = cpu_to_le16(chan->local_msdu);
3105 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3106 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3107 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3108 break;
3109
3110 case L2CAP_MODE_STREAMING:
3111 efs.id = 1;
3112 efs.stype = L2CAP_SERV_BESTEFFORT;
3113 efs.msdu = cpu_to_le16(chan->local_msdu);
3114 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3115 efs.acc_lat = 0;
3116 efs.flush_to = 0;
3117 break;
3118
3119 default:
3120 return;
3121 }
3122
3123 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3124 (unsigned long) &efs, size);
3125 }
3126
l2cap_ack_timeout(struct work_struct * work)3127 static void l2cap_ack_timeout(struct work_struct *work)
3128 {
3129 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3130 ack_timer.work);
3131 u16 frames_to_ack;
3132
3133 BT_DBG("chan %p", chan);
3134
3135 l2cap_chan_lock(chan);
3136
3137 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3138 chan->last_acked_seq);
3139
3140 if (frames_to_ack)
3141 l2cap_send_rr_or_rnr(chan, 0);
3142
3143 l2cap_chan_unlock(chan);
3144 l2cap_chan_put(chan);
3145 }
3146
l2cap_ertm_init(struct l2cap_chan * chan)3147 int l2cap_ertm_init(struct l2cap_chan *chan)
3148 {
3149 int err;
3150
3151 chan->next_tx_seq = 0;
3152 chan->expected_tx_seq = 0;
3153 chan->expected_ack_seq = 0;
3154 chan->unacked_frames = 0;
3155 chan->buffer_seq = 0;
3156 chan->frames_sent = 0;
3157 chan->last_acked_seq = 0;
3158 chan->sdu = NULL;
3159 chan->sdu_last_frag = NULL;
3160 chan->sdu_len = 0;
3161
3162 skb_queue_head_init(&chan->tx_q);
3163
3164 if (chan->mode != L2CAP_MODE_ERTM)
3165 return 0;
3166
3167 chan->rx_state = L2CAP_RX_STATE_RECV;
3168 chan->tx_state = L2CAP_TX_STATE_XMIT;
3169
3170 skb_queue_head_init(&chan->srej_q);
3171
3172 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3173 if (err < 0)
3174 return err;
3175
3176 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3177 if (err < 0)
3178 l2cap_seq_list_free(&chan->srej_list);
3179
3180 return err;
3181 }
3182
l2cap_select_mode(__u8 mode,__u16 remote_feat_mask)3183 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3184 {
3185 switch (mode) {
3186 case L2CAP_MODE_STREAMING:
3187 case L2CAP_MODE_ERTM:
3188 if (l2cap_mode_supported(mode, remote_feat_mask))
3189 return mode;
3190 fallthrough;
3191 default:
3192 return L2CAP_MODE_BASIC;
3193 }
3194 }
3195
__l2cap_ews_supported(struct l2cap_conn * conn)3196 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3197 {
3198 return (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW);
3199 }
3200
__l2cap_efs_supported(struct l2cap_conn * conn)3201 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3202 {
3203 return (conn->feat_mask & L2CAP_FEAT_EXT_FLOW);
3204 }
3205
__l2cap_set_ertm_timeouts(struct l2cap_chan * chan,struct l2cap_conf_rfc * rfc)3206 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3207 struct l2cap_conf_rfc *rfc)
3208 {
3209 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3210 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3211 }
3212
l2cap_txwin_setup(struct l2cap_chan * chan)3213 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3214 {
3215 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3216 __l2cap_ews_supported(chan->conn)) {
3217 /* use extended control field */
3218 set_bit(FLAG_EXT_CTRL, &chan->flags);
3219 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3220 } else {
3221 chan->tx_win = min_t(u16, chan->tx_win,
3222 L2CAP_DEFAULT_TX_WINDOW);
3223 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3224 }
3225 chan->ack_win = chan->tx_win;
3226 }
3227
l2cap_mtu_auto(struct l2cap_chan * chan)3228 static void l2cap_mtu_auto(struct l2cap_chan *chan)
3229 {
3230 struct hci_conn *conn = chan->conn->hcon;
3231
3232 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3233
3234 /* The 2-DH1 packet has between 2 and 56 information bytes
3235 * (including the 2-byte payload header)
3236 */
3237 if (!(conn->pkt_type & HCI_2DH1))
3238 chan->imtu = 54;
3239
3240 /* The 3-DH1 packet has between 2 and 85 information bytes
3241 * (including the 2-byte payload header)
3242 */
3243 if (!(conn->pkt_type & HCI_3DH1))
3244 chan->imtu = 83;
3245
3246 /* The 2-DH3 packet has between 2 and 369 information bytes
3247 * (including the 2-byte payload header)
3248 */
3249 if (!(conn->pkt_type & HCI_2DH3))
3250 chan->imtu = 367;
3251
3252 /* The 3-DH3 packet has between 2 and 554 information bytes
3253 * (including the 2-byte payload header)
3254 */
3255 if (!(conn->pkt_type & HCI_3DH3))
3256 chan->imtu = 552;
3257
3258 /* The 2-DH5 packet has between 2 and 681 information bytes
3259 * (including the 2-byte payload header)
3260 */
3261 if (!(conn->pkt_type & HCI_2DH5))
3262 chan->imtu = 679;
3263
3264 /* The 3-DH5 packet has between 2 and 1023 information bytes
3265 * (including the 2-byte payload header)
3266 */
3267 if (!(conn->pkt_type & HCI_3DH5))
3268 chan->imtu = 1021;
3269 }
3270
l2cap_build_conf_req(struct l2cap_chan * chan,void * data,size_t data_size)3271 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3272 {
3273 struct l2cap_conf_req *req = data;
3274 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3275 void *ptr = req->data;
3276 void *endptr = data + data_size;
3277 u16 size;
3278
3279 BT_DBG("chan %p", chan);
3280
3281 if (chan->num_conf_req || chan->num_conf_rsp)
3282 goto done;
3283
3284 switch (chan->mode) {
3285 case L2CAP_MODE_STREAMING:
3286 case L2CAP_MODE_ERTM:
3287 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3288 break;
3289
3290 if (__l2cap_efs_supported(chan->conn))
3291 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3292
3293 fallthrough;
3294 default:
3295 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3296 break;
3297 }
3298
3299 done:
3300 if (chan->imtu != L2CAP_DEFAULT_MTU) {
3301 if (!chan->imtu)
3302 l2cap_mtu_auto(chan);
3303 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3304 endptr - ptr);
3305 }
3306
3307 switch (chan->mode) {
3308 case L2CAP_MODE_BASIC:
3309 if (disable_ertm)
3310 break;
3311
3312 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3313 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3314 break;
3315
3316 rfc.mode = L2CAP_MODE_BASIC;
3317 rfc.txwin_size = 0;
3318 rfc.max_transmit = 0;
3319 rfc.retrans_timeout = 0;
3320 rfc.monitor_timeout = 0;
3321 rfc.max_pdu_size = 0;
3322
3323 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3324 (unsigned long) &rfc, endptr - ptr);
3325 break;
3326
3327 case L2CAP_MODE_ERTM:
3328 rfc.mode = L2CAP_MODE_ERTM;
3329 rfc.max_transmit = chan->max_tx;
3330
3331 __l2cap_set_ertm_timeouts(chan, &rfc);
3332
3333 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3334 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3335 L2CAP_FCS_SIZE);
3336 rfc.max_pdu_size = cpu_to_le16(size);
3337
3338 l2cap_txwin_setup(chan);
3339
3340 rfc.txwin_size = min_t(u16, chan->tx_win,
3341 L2CAP_DEFAULT_TX_WINDOW);
3342
3343 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3344 (unsigned long) &rfc, endptr - ptr);
3345
3346 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3347 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3348
3349 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3350 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3351 chan->tx_win, endptr - ptr);
3352
3353 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3354 if (chan->fcs == L2CAP_FCS_NONE ||
3355 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3356 chan->fcs = L2CAP_FCS_NONE;
3357 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3358 chan->fcs, endptr - ptr);
3359 }
3360 break;
3361
3362 case L2CAP_MODE_STREAMING:
3363 l2cap_txwin_setup(chan);
3364 rfc.mode = L2CAP_MODE_STREAMING;
3365 rfc.txwin_size = 0;
3366 rfc.max_transmit = 0;
3367 rfc.retrans_timeout = 0;
3368 rfc.monitor_timeout = 0;
3369
3370 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3371 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3372 L2CAP_FCS_SIZE);
3373 rfc.max_pdu_size = cpu_to_le16(size);
3374
3375 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3376 (unsigned long) &rfc, endptr - ptr);
3377
3378 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3379 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3380
3381 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3382 if (chan->fcs == L2CAP_FCS_NONE ||
3383 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3384 chan->fcs = L2CAP_FCS_NONE;
3385 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3386 chan->fcs, endptr - ptr);
3387 }
3388 break;
3389 }
3390
3391 req->dcid = cpu_to_le16(chan->dcid);
3392 req->flags = cpu_to_le16(0);
3393
3394 return ptr - data;
3395 }
3396
l2cap_parse_conf_req(struct l2cap_chan * chan,void * data,size_t data_size)3397 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3398 {
3399 struct l2cap_conf_rsp *rsp = data;
3400 void *ptr = rsp->data;
3401 void *endptr = data + data_size;
3402 void *req = chan->conf_req;
3403 int len = chan->conf_len;
3404 int type, hint, olen;
3405 unsigned long val;
3406 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3407 struct l2cap_conf_efs efs;
3408 u8 remote_efs = 0;
3409 u16 mtu = 0;
3410 u16 result = L2CAP_CONF_SUCCESS;
3411 u16 size;
3412
3413 BT_DBG("chan %p", chan);
3414
3415 while (len >= L2CAP_CONF_OPT_SIZE) {
3416 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3417 if (len < 0)
3418 break;
3419
3420 hint = type & L2CAP_CONF_HINT;
3421 type &= L2CAP_CONF_MASK;
3422
3423 switch (type) {
3424 case L2CAP_CONF_MTU:
3425 if (olen != 2)
3426 break;
3427 mtu = val;
3428 break;
3429
3430 case L2CAP_CONF_FLUSH_TO:
3431 if (olen != 2)
3432 break;
3433 chan->flush_to = val;
3434 break;
3435
3436 case L2CAP_CONF_QOS:
3437 break;
3438
3439 case L2CAP_CONF_RFC:
3440 if (olen != sizeof(rfc))
3441 break;
3442 memcpy(&rfc, (void *) val, olen);
3443 break;
3444
3445 case L2CAP_CONF_FCS:
3446 if (olen != 1)
3447 break;
3448 if (val == L2CAP_FCS_NONE)
3449 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3450 break;
3451
3452 case L2CAP_CONF_EFS:
3453 if (olen != sizeof(efs))
3454 break;
3455 remote_efs = 1;
3456 memcpy(&efs, (void *) val, olen);
3457 break;
3458
3459 case L2CAP_CONF_EWS:
3460 if (olen != 2)
3461 break;
3462 return -ECONNREFUSED;
3463
3464 default:
3465 if (hint)
3466 break;
3467 result = L2CAP_CONF_UNKNOWN;
3468 l2cap_add_conf_opt(&ptr, (u8)type, sizeof(u8), type, endptr - ptr);
3469 break;
3470 }
3471 }
3472
3473 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3474 goto done;
3475
3476 switch (chan->mode) {
3477 case L2CAP_MODE_STREAMING:
3478 case L2CAP_MODE_ERTM:
3479 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3480 chan->mode = l2cap_select_mode(rfc.mode,
3481 chan->conn->feat_mask);
3482 break;
3483 }
3484
3485 if (remote_efs) {
3486 if (__l2cap_efs_supported(chan->conn))
3487 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3488 else
3489 return -ECONNREFUSED;
3490 }
3491
3492 if (chan->mode != rfc.mode)
3493 return -ECONNREFUSED;
3494
3495 break;
3496 }
3497
3498 done:
3499 if (chan->mode != rfc.mode) {
3500 result = L2CAP_CONF_UNACCEPT;
3501 rfc.mode = chan->mode;
3502
3503 if (chan->num_conf_rsp == 1)
3504 return -ECONNREFUSED;
3505
3506 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3507 (unsigned long) &rfc, endptr - ptr);
3508 }
3509
3510 if (result == L2CAP_CONF_SUCCESS) {
3511 /* Configure output options and let the other side know
3512 * which ones we don't like. */
3513
3514 /* If MTU is not provided in configure request, try adjusting it
3515 * to the current output MTU if it has been set
3516 *
3517 * Bluetooth Core 6.1, Vol 3, Part A, Section 4.5
3518 *
3519 * Each configuration parameter value (if any is present) in an
3520 * L2CAP_CONFIGURATION_RSP packet reflects an ‘adjustment’ to a
3521 * configuration parameter value that has been sent (or, in case
3522 * of default values, implied) in the corresponding
3523 * L2CAP_CONFIGURATION_REQ packet.
3524 */
3525 if (!mtu) {
3526 /* Only adjust for ERTM channels as for older modes the
3527 * remote stack may not be able to detect that the
3528 * adjustment causing it to silently drop packets.
3529 */
3530 if (chan->mode == L2CAP_MODE_ERTM &&
3531 chan->omtu && chan->omtu != L2CAP_DEFAULT_MTU)
3532 mtu = chan->omtu;
3533 else
3534 mtu = L2CAP_DEFAULT_MTU;
3535 }
3536
3537 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3538 result = L2CAP_CONF_UNACCEPT;
3539 else {
3540 chan->omtu = mtu;
3541 set_bit(CONF_MTU_DONE, &chan->conf_state);
3542 }
3543 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3544
3545 if (remote_efs) {
3546 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3547 efs.stype != L2CAP_SERV_NOTRAFIC &&
3548 efs.stype != chan->local_stype) {
3549
3550 result = L2CAP_CONF_UNACCEPT;
3551
3552 if (chan->num_conf_req >= 1)
3553 return -ECONNREFUSED;
3554
3555 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3556 sizeof(efs),
3557 (unsigned long) &efs, endptr - ptr);
3558 } else {
3559 /* Send PENDING Conf Rsp */
3560 result = L2CAP_CONF_PENDING;
3561 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3562 }
3563 }
3564
3565 switch (rfc.mode) {
3566 case L2CAP_MODE_BASIC:
3567 chan->fcs = L2CAP_FCS_NONE;
3568 set_bit(CONF_MODE_DONE, &chan->conf_state);
3569 break;
3570
3571 case L2CAP_MODE_ERTM:
3572 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3573 chan->remote_tx_win = rfc.txwin_size;
3574 else
3575 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3576
3577 chan->remote_max_tx = rfc.max_transmit;
3578
3579 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3580 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3581 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3582 rfc.max_pdu_size = cpu_to_le16(size);
3583 chan->remote_mps = size;
3584
3585 __l2cap_set_ertm_timeouts(chan, &rfc);
3586
3587 set_bit(CONF_MODE_DONE, &chan->conf_state);
3588
3589 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3590 sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3591
3592 if (remote_efs &&
3593 test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3594 chan->remote_id = efs.id;
3595 chan->remote_stype = efs.stype;
3596 chan->remote_msdu = le16_to_cpu(efs.msdu);
3597 chan->remote_flush_to =
3598 le32_to_cpu(efs.flush_to);
3599 chan->remote_acc_lat =
3600 le32_to_cpu(efs.acc_lat);
3601 chan->remote_sdu_itime =
3602 le32_to_cpu(efs.sdu_itime);
3603 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3604 sizeof(efs),
3605 (unsigned long) &efs, endptr - ptr);
3606 }
3607 break;
3608
3609 case L2CAP_MODE_STREAMING:
3610 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3611 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3612 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3613 rfc.max_pdu_size = cpu_to_le16(size);
3614 chan->remote_mps = size;
3615
3616 set_bit(CONF_MODE_DONE, &chan->conf_state);
3617
3618 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3619 (unsigned long) &rfc, endptr - ptr);
3620
3621 break;
3622
3623 default:
3624 result = L2CAP_CONF_UNACCEPT;
3625
3626 memset(&rfc, 0, sizeof(rfc));
3627 rfc.mode = chan->mode;
3628 }
3629
3630 if (result == L2CAP_CONF_SUCCESS)
3631 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3632 }
3633 rsp->scid = cpu_to_le16(chan->dcid);
3634 rsp->result = cpu_to_le16(result);
3635 rsp->flags = cpu_to_le16(0);
3636
3637 return ptr - data;
3638 }
3639
l2cap_parse_conf_rsp(struct l2cap_chan * chan,void * rsp,int len,void * data,size_t size,u16 * result)3640 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3641 void *data, size_t size, u16 *result)
3642 {
3643 struct l2cap_conf_req *req = data;
3644 void *ptr = req->data;
3645 void *endptr = data + size;
3646 int type, olen;
3647 unsigned long val;
3648 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3649 struct l2cap_conf_efs efs;
3650
3651 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3652
3653 while (len >= L2CAP_CONF_OPT_SIZE) {
3654 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3655 if (len < 0)
3656 break;
3657
3658 switch (type) {
3659 case L2CAP_CONF_MTU:
3660 if (olen != 2)
3661 break;
3662 if (val < L2CAP_DEFAULT_MIN_MTU) {
3663 *result = L2CAP_CONF_UNACCEPT;
3664 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3665 } else
3666 chan->imtu = val;
3667 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3668 endptr - ptr);
3669 break;
3670
3671 case L2CAP_CONF_FLUSH_TO:
3672 if (olen != 2)
3673 break;
3674 chan->flush_to = val;
3675 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3676 chan->flush_to, endptr - ptr);
3677 break;
3678
3679 case L2CAP_CONF_RFC:
3680 if (olen != sizeof(rfc))
3681 break;
3682 memcpy(&rfc, (void *)val, olen);
3683 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3684 rfc.mode != chan->mode)
3685 return -ECONNREFUSED;
3686 chan->fcs = 0;
3687 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3688 (unsigned long) &rfc, endptr - ptr);
3689 break;
3690
3691 case L2CAP_CONF_EWS:
3692 if (olen != 2)
3693 break;
3694 chan->ack_win = min_t(u16, val, chan->ack_win);
3695 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3696 chan->tx_win, endptr - ptr);
3697 break;
3698
3699 case L2CAP_CONF_EFS:
3700 if (olen != sizeof(efs))
3701 break;
3702 memcpy(&efs, (void *)val, olen);
3703 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3704 efs.stype != L2CAP_SERV_NOTRAFIC &&
3705 efs.stype != chan->local_stype)
3706 return -ECONNREFUSED;
3707 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3708 (unsigned long) &efs, endptr - ptr);
3709 break;
3710
3711 case L2CAP_CONF_FCS:
3712 if (olen != 1)
3713 break;
3714 if (*result == L2CAP_CONF_PENDING)
3715 if (val == L2CAP_FCS_NONE)
3716 set_bit(CONF_RECV_NO_FCS,
3717 &chan->conf_state);
3718 break;
3719 }
3720 }
3721
3722 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3723 return -ECONNREFUSED;
3724
3725 chan->mode = rfc.mode;
3726
3727 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3728 switch (rfc.mode) {
3729 case L2CAP_MODE_ERTM:
3730 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3731 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3732 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3733 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3734 chan->ack_win = min_t(u16, chan->ack_win,
3735 rfc.txwin_size);
3736
3737 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3738 chan->local_msdu = le16_to_cpu(efs.msdu);
3739 chan->local_sdu_itime =
3740 le32_to_cpu(efs.sdu_itime);
3741 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3742 chan->local_flush_to =
3743 le32_to_cpu(efs.flush_to);
3744 }
3745 break;
3746
3747 case L2CAP_MODE_STREAMING:
3748 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3749 }
3750 }
3751
3752 req->dcid = cpu_to_le16(chan->dcid);
3753 req->flags = cpu_to_le16(0);
3754
3755 return ptr - data;
3756 }
3757
l2cap_build_conf_rsp(struct l2cap_chan * chan,void * data,u16 result,u16 flags)3758 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3759 u16 result, u16 flags)
3760 {
3761 struct l2cap_conf_rsp *rsp = data;
3762 void *ptr = rsp->data;
3763
3764 BT_DBG("chan %p", chan);
3765
3766 rsp->scid = cpu_to_le16(chan->dcid);
3767 rsp->result = cpu_to_le16(result);
3768 rsp->flags = cpu_to_le16(flags);
3769
3770 return ptr - data;
3771 }
3772
__l2cap_le_connect_rsp_defer(struct l2cap_chan * chan)3773 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3774 {
3775 struct l2cap_le_conn_rsp rsp;
3776 struct l2cap_conn *conn = chan->conn;
3777
3778 BT_DBG("chan %p", chan);
3779
3780 rsp.dcid = cpu_to_le16(chan->scid);
3781 rsp.mtu = cpu_to_le16(chan->imtu);
3782 rsp.mps = cpu_to_le16(chan->mps);
3783 rsp.credits = cpu_to_le16(chan->rx_credits);
3784 rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3785
3786 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3787 &rsp);
3788 }
3789
l2cap_ecred_list_defer(struct l2cap_chan * chan,void * data)3790 static void l2cap_ecred_list_defer(struct l2cap_chan *chan, void *data)
3791 {
3792 int *result = data;
3793
3794 if (*result || test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
3795 return;
3796
3797 switch (chan->state) {
3798 case BT_CONNECT2:
3799 /* If channel still pending accept add to result */
3800 (*result)++;
3801 return;
3802 case BT_CONNECTED:
3803 return;
3804 default:
3805 /* If not connected or pending accept it has been refused */
3806 *result = -ECONNREFUSED;
3807 return;
3808 }
3809 }
3810
3811 struct l2cap_ecred_rsp_data {
3812 struct {
3813 struct l2cap_ecred_conn_rsp_hdr rsp;
3814 __le16 scid[L2CAP_ECRED_MAX_CID];
3815 } __packed pdu;
3816 int count;
3817 };
3818
l2cap_ecred_rsp_defer(struct l2cap_chan * chan,void * data)3819 static void l2cap_ecred_rsp_defer(struct l2cap_chan *chan, void *data)
3820 {
3821 struct l2cap_ecred_rsp_data *rsp = data;
3822 struct l2cap_ecred_conn_rsp *rsp_flex =
3823 container_of(&rsp->pdu.rsp, struct l2cap_ecred_conn_rsp, hdr);
3824
3825 /* Check if channel for outgoing connection or if it wasn't deferred
3826 * since in those cases it must be skipped.
3827 */
3828 if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags) ||
3829 !test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
3830 return;
3831
3832 /* Reset ident so only one response is sent */
3833 chan->ident = 0;
3834
3835 /* Include all channels pending with the same ident */
3836 if (!rsp->pdu.rsp.result)
3837 rsp_flex->dcid[rsp->count++] = cpu_to_le16(chan->scid);
3838 else
3839 l2cap_chan_del(chan, ECONNRESET);
3840 }
3841
__l2cap_ecred_conn_rsp_defer(struct l2cap_chan * chan)3842 void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
3843 {
3844 struct l2cap_conn *conn = chan->conn;
3845 struct l2cap_ecred_rsp_data data;
3846 u16 id = chan->ident;
3847 int result = 0;
3848
3849 if (!id)
3850 return;
3851
3852 BT_DBG("chan %p id %d", chan, id);
3853
3854 memset(&data, 0, sizeof(data));
3855
3856 data.pdu.rsp.mtu = cpu_to_le16(chan->imtu);
3857 data.pdu.rsp.mps = cpu_to_le16(chan->mps);
3858 data.pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
3859 data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3860
3861 /* Verify that all channels are ready */
3862 __l2cap_chan_list_id(conn, id, l2cap_ecred_list_defer, &result);
3863
3864 if (result > 0)
3865 return;
3866
3867 if (result < 0)
3868 data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_AUTHORIZATION);
3869
3870 /* Build response */
3871 __l2cap_chan_list_id(conn, id, l2cap_ecred_rsp_defer, &data);
3872
3873 l2cap_send_cmd(conn, id, L2CAP_ECRED_CONN_RSP,
3874 sizeof(data.pdu.rsp) + (data.count * sizeof(__le16)),
3875 &data.pdu);
3876 }
3877
__l2cap_connect_rsp_defer(struct l2cap_chan * chan)3878 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3879 {
3880 struct l2cap_conn_rsp rsp;
3881 struct l2cap_conn *conn = chan->conn;
3882 u8 buf[128];
3883 u8 rsp_code;
3884
3885 rsp.scid = cpu_to_le16(chan->dcid);
3886 rsp.dcid = cpu_to_le16(chan->scid);
3887 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3888 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3889 rsp_code = L2CAP_CONN_RSP;
3890
3891 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3892
3893 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3894
3895 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3896 return;
3897
3898 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3899 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3900 chan->num_conf_req++;
3901 }
3902
l2cap_conf_rfc_get(struct l2cap_chan * chan,void * rsp,int len)3903 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3904 {
3905 int type, olen;
3906 unsigned long val;
3907 /* Use sane default values in case a misbehaving remote device
3908 * did not send an RFC or extended window size option.
3909 */
3910 u16 txwin_ext = chan->ack_win;
3911 struct l2cap_conf_rfc rfc = {
3912 .mode = chan->mode,
3913 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3914 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3915 .max_pdu_size = cpu_to_le16(chan->imtu),
3916 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3917 };
3918
3919 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3920
3921 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3922 return;
3923
3924 while (len >= L2CAP_CONF_OPT_SIZE) {
3925 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3926 if (len < 0)
3927 break;
3928
3929 switch (type) {
3930 case L2CAP_CONF_RFC:
3931 if (olen != sizeof(rfc))
3932 break;
3933 memcpy(&rfc, (void *)val, olen);
3934 break;
3935 case L2CAP_CONF_EWS:
3936 if (olen != 2)
3937 break;
3938 txwin_ext = val;
3939 break;
3940 }
3941 }
3942
3943 switch (rfc.mode) {
3944 case L2CAP_MODE_ERTM:
3945 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3946 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3947 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3948 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3949 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3950 else
3951 chan->ack_win = min_t(u16, chan->ack_win,
3952 rfc.txwin_size);
3953 break;
3954 case L2CAP_MODE_STREAMING:
3955 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3956 }
3957 }
3958
l2cap_command_rej(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)3959 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3960 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3961 u8 *data)
3962 {
3963 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3964
3965 if (cmd_len < sizeof(*rej))
3966 return -EPROTO;
3967
3968 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3969 return 0;
3970
3971 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3972 cmd->ident == conn->info_ident) {
3973 cancel_delayed_work(&conn->info_timer);
3974
3975 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3976 conn->info_ident = 0;
3977
3978 l2cap_conn_start(conn);
3979 }
3980
3981 return 0;
3982 }
3983
l2cap_connect(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u8 * data,u8 rsp_code)3984 static void l2cap_connect(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd,
3985 u8 *data, u8 rsp_code)
3986 {
3987 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3988 struct l2cap_conn_rsp rsp;
3989 struct l2cap_chan *chan = NULL, *pchan = NULL;
3990 int result, status = L2CAP_CS_NO_INFO;
3991
3992 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3993 __le16 psm = req->psm;
3994
3995 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3996
3997 /* Check if we have socket listening on psm */
3998 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3999 &conn->hcon->dst, ACL_LINK);
4000 if (!pchan) {
4001 result = L2CAP_CR_BAD_PSM;
4002 goto response;
4003 }
4004
4005 l2cap_chan_lock(pchan);
4006
4007 /* Check if the ACL is secure enough (if not SDP) */
4008 if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
4009 (!hci_conn_check_link_mode(conn->hcon) ||
4010 !l2cap_check_enc_key_size(conn->hcon, pchan))) {
4011 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
4012 result = L2CAP_CR_SEC_BLOCK;
4013 goto response;
4014 }
4015
4016 result = L2CAP_CR_NO_MEM;
4017
4018 /* Check for valid dynamic CID range (as per Erratum 3253) */
4019 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) {
4020 result = L2CAP_CR_INVALID_SCID;
4021 goto response;
4022 }
4023
4024 /* Check if we already have channel with that dcid */
4025 if (__l2cap_get_chan_by_dcid(conn, scid)) {
4026 result = L2CAP_CR_SCID_IN_USE;
4027 goto response;
4028 }
4029
4030 chan = pchan->ops->new_connection(pchan);
4031 if (!chan)
4032 goto response;
4033
4034 /* For certain devices (ex: HID mouse), support for authentication,
4035 * pairing and bonding is optional. For such devices, inorder to avoid
4036 * the ACL alive for too long after L2CAP disconnection, reset the ACL
4037 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
4038 */
4039 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4040
4041 bacpy(&chan->src, &conn->hcon->src);
4042 bacpy(&chan->dst, &conn->hcon->dst);
4043 chan->src_type = bdaddr_src_type(conn->hcon);
4044 chan->dst_type = bdaddr_dst_type(conn->hcon);
4045 chan->psm = psm;
4046 chan->dcid = scid;
4047
4048 __l2cap_chan_add(conn, chan);
4049
4050 dcid = chan->scid;
4051
4052 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4053
4054 chan->ident = cmd->ident;
4055
4056 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
4057 if (l2cap_chan_check_security(chan, false)) {
4058 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4059 l2cap_state_change(chan, BT_CONNECT2);
4060 result = L2CAP_CR_PEND;
4061 status = L2CAP_CS_AUTHOR_PEND;
4062 chan->ops->defer(chan);
4063 } else {
4064 l2cap_state_change(chan, BT_CONFIG);
4065 result = L2CAP_CR_SUCCESS;
4066 status = L2CAP_CS_NO_INFO;
4067 }
4068 } else {
4069 l2cap_state_change(chan, BT_CONNECT2);
4070 result = L2CAP_CR_PEND;
4071 status = L2CAP_CS_AUTHEN_PEND;
4072 }
4073 } else {
4074 l2cap_state_change(chan, BT_CONNECT2);
4075 result = L2CAP_CR_PEND;
4076 status = L2CAP_CS_NO_INFO;
4077 }
4078
4079 response:
4080 rsp.scid = cpu_to_le16(scid);
4081 rsp.dcid = cpu_to_le16(dcid);
4082 rsp.result = cpu_to_le16(result);
4083 rsp.status = cpu_to_le16(status);
4084 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4085
4086 if (!pchan)
4087 return;
4088
4089 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4090 struct l2cap_info_req info;
4091 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4092
4093 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4094 conn->info_ident = l2cap_get_ident(conn);
4095
4096 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4097
4098 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4099 sizeof(info), &info);
4100 }
4101
4102 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4103 result == L2CAP_CR_SUCCESS) {
4104 u8 buf[128];
4105 set_bit(CONF_REQ_SENT, &chan->conf_state);
4106 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4107 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4108 chan->num_conf_req++;
4109 }
4110
4111 l2cap_chan_unlock(pchan);
4112 l2cap_chan_put(pchan);
4113 }
4114
l2cap_connect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4115 static int l2cap_connect_req(struct l2cap_conn *conn,
4116 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4117 {
4118 if (cmd_len < sizeof(struct l2cap_conn_req))
4119 return -EPROTO;
4120
4121 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP);
4122 return 0;
4123 }
4124
l2cap_connect_create_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4125 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4126 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4127 u8 *data)
4128 {
4129 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4130 u16 scid, dcid, result, status;
4131 struct l2cap_chan *chan;
4132 u8 req[128];
4133 int err;
4134
4135 if (cmd_len < sizeof(*rsp))
4136 return -EPROTO;
4137
4138 scid = __le16_to_cpu(rsp->scid);
4139 dcid = __le16_to_cpu(rsp->dcid);
4140 result = __le16_to_cpu(rsp->result);
4141 status = __le16_to_cpu(rsp->status);
4142
4143 if (result == L2CAP_CR_SUCCESS && (dcid < L2CAP_CID_DYN_START ||
4144 dcid > L2CAP_CID_DYN_END))
4145 return -EPROTO;
4146
4147 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4148 dcid, scid, result, status);
4149
4150 if (scid) {
4151 chan = __l2cap_get_chan_by_scid(conn, scid);
4152 if (!chan)
4153 return -EBADSLT;
4154 } else {
4155 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4156 if (!chan)
4157 return -EBADSLT;
4158 }
4159
4160 chan = l2cap_chan_hold_unless_zero(chan);
4161 if (!chan)
4162 return -EBADSLT;
4163
4164 err = 0;
4165
4166 l2cap_chan_lock(chan);
4167
4168 switch (result) {
4169 case L2CAP_CR_SUCCESS:
4170 if (__l2cap_get_chan_by_dcid(conn, dcid)) {
4171 err = -EBADSLT;
4172 break;
4173 }
4174
4175 l2cap_state_change(chan, BT_CONFIG);
4176 chan->ident = 0;
4177 chan->dcid = dcid;
4178 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4179
4180 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4181 break;
4182
4183 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4184 l2cap_build_conf_req(chan, req, sizeof(req)), req);
4185 chan->num_conf_req++;
4186 break;
4187
4188 case L2CAP_CR_PEND:
4189 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4190 break;
4191
4192 default:
4193 l2cap_chan_del(chan, ECONNREFUSED);
4194 break;
4195 }
4196
4197 l2cap_chan_unlock(chan);
4198 l2cap_chan_put(chan);
4199
4200 return err;
4201 }
4202
set_default_fcs(struct l2cap_chan * chan)4203 static inline void set_default_fcs(struct l2cap_chan *chan)
4204 {
4205 /* FCS is enabled only in ERTM or streaming mode, if one or both
4206 * sides request it.
4207 */
4208 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4209 chan->fcs = L2CAP_FCS_NONE;
4210 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4211 chan->fcs = L2CAP_FCS_CRC16;
4212 }
4213
l2cap_send_efs_conf_rsp(struct l2cap_chan * chan,void * data,u8 ident,u16 flags)4214 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4215 u8 ident, u16 flags)
4216 {
4217 struct l2cap_conn *conn = chan->conn;
4218
4219 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4220 flags);
4221
4222 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4223 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4224
4225 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4226 l2cap_build_conf_rsp(chan, data,
4227 L2CAP_CONF_SUCCESS, flags), data);
4228 }
4229
cmd_reject_invalid_cid(struct l2cap_conn * conn,u8 ident,u16 scid,u16 dcid)4230 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4231 u16 scid, u16 dcid)
4232 {
4233 struct l2cap_cmd_rej_cid rej;
4234
4235 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4236 rej.scid = __cpu_to_le16(scid);
4237 rej.dcid = __cpu_to_le16(dcid);
4238
4239 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4240 }
4241
l2cap_config_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4242 static inline int l2cap_config_req(struct l2cap_conn *conn,
4243 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4244 u8 *data)
4245 {
4246 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4247 u16 dcid, flags;
4248 u8 rsp[64];
4249 struct l2cap_chan *chan;
4250 int len, err = 0;
4251
4252 if (cmd_len < sizeof(*req))
4253 return -EPROTO;
4254
4255 dcid = __le16_to_cpu(req->dcid);
4256 flags = __le16_to_cpu(req->flags);
4257
4258 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4259
4260 chan = l2cap_get_chan_by_scid(conn, dcid);
4261 if (!chan) {
4262 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4263 return 0;
4264 }
4265
4266 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
4267 chan->state != BT_CONNECTED) {
4268 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4269 chan->dcid);
4270 goto unlock;
4271 }
4272
4273 /* Reject if config buffer is too small. */
4274 len = cmd_len - sizeof(*req);
4275 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4276 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4277 l2cap_build_conf_rsp(chan, rsp,
4278 L2CAP_CONF_REJECT, flags), rsp);
4279 goto unlock;
4280 }
4281
4282 /* Store config. */
4283 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4284 chan->conf_len += len;
4285
4286 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4287 /* Incomplete config. Send empty response. */
4288 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4289 l2cap_build_conf_rsp(chan, rsp,
4290 L2CAP_CONF_SUCCESS, flags), rsp);
4291 goto unlock;
4292 }
4293
4294 /* Complete config. */
4295 len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4296 if (len < 0) {
4297 l2cap_send_disconn_req(chan, ECONNRESET);
4298 goto unlock;
4299 }
4300
4301 chan->ident = cmd->ident;
4302 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4303 if (chan->num_conf_rsp < L2CAP_CONF_MAX_CONF_RSP)
4304 chan->num_conf_rsp++;
4305
4306 /* Reset config buffer. */
4307 chan->conf_len = 0;
4308
4309 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4310 goto unlock;
4311
4312 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4313 set_default_fcs(chan);
4314
4315 if (chan->mode == L2CAP_MODE_ERTM ||
4316 chan->mode == L2CAP_MODE_STREAMING)
4317 err = l2cap_ertm_init(chan);
4318
4319 if (err < 0)
4320 l2cap_send_disconn_req(chan, -err);
4321 else
4322 l2cap_chan_ready(chan);
4323
4324 goto unlock;
4325 }
4326
4327 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4328 u8 buf[64];
4329 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4330 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4331 chan->num_conf_req++;
4332 }
4333
4334 /* Got Conf Rsp PENDING from remote side and assume we sent
4335 Conf Rsp PENDING in the code above */
4336 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4337 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4338
4339 /* check compatibility */
4340
4341 /* Send rsp for BR/EDR channel */
4342 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4343 }
4344
4345 unlock:
4346 l2cap_chan_unlock(chan);
4347 l2cap_chan_put(chan);
4348 return err;
4349 }
4350
l2cap_config_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4351 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4352 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4353 u8 *data)
4354 {
4355 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4356 u16 scid, flags, result;
4357 struct l2cap_chan *chan;
4358 int len = cmd_len - sizeof(*rsp);
4359 int err = 0;
4360
4361 if (cmd_len < sizeof(*rsp))
4362 return -EPROTO;
4363
4364 scid = __le16_to_cpu(rsp->scid);
4365 flags = __le16_to_cpu(rsp->flags);
4366 result = __le16_to_cpu(rsp->result);
4367
4368 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4369 result, len);
4370
4371 chan = l2cap_get_chan_by_scid(conn, scid);
4372 if (!chan)
4373 return 0;
4374
4375 switch (result) {
4376 case L2CAP_CONF_SUCCESS:
4377 l2cap_conf_rfc_get(chan, rsp->data, len);
4378 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4379 break;
4380
4381 case L2CAP_CONF_PENDING:
4382 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4383
4384 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4385 char buf[64];
4386
4387 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4388 buf, sizeof(buf), &result);
4389 if (len < 0) {
4390 l2cap_send_disconn_req(chan, ECONNRESET);
4391 goto done;
4392 }
4393
4394 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident, 0);
4395 }
4396 goto done;
4397
4398 case L2CAP_CONF_UNKNOWN:
4399 case L2CAP_CONF_UNACCEPT:
4400 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4401 char req[64];
4402
4403 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4404 l2cap_send_disconn_req(chan, ECONNRESET);
4405 goto done;
4406 }
4407
4408 /* throw out any old stored conf requests */
4409 result = L2CAP_CONF_SUCCESS;
4410 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4411 req, sizeof(req), &result);
4412 if (len < 0) {
4413 l2cap_send_disconn_req(chan, ECONNRESET);
4414 goto done;
4415 }
4416
4417 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4418 L2CAP_CONF_REQ, len, req);
4419 chan->num_conf_req++;
4420 if (result != L2CAP_CONF_SUCCESS)
4421 goto done;
4422 break;
4423 }
4424 fallthrough;
4425
4426 default:
4427 l2cap_chan_set_err(chan, ECONNRESET);
4428
4429 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4430 l2cap_send_disconn_req(chan, ECONNRESET);
4431 goto done;
4432 }
4433
4434 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4435 goto done;
4436
4437 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4438
4439 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4440 set_default_fcs(chan);
4441
4442 if (chan->mode == L2CAP_MODE_ERTM ||
4443 chan->mode == L2CAP_MODE_STREAMING)
4444 err = l2cap_ertm_init(chan);
4445
4446 if (err < 0)
4447 l2cap_send_disconn_req(chan, -err);
4448 else
4449 l2cap_chan_ready(chan);
4450 }
4451
4452 done:
4453 l2cap_chan_unlock(chan);
4454 l2cap_chan_put(chan);
4455 return err;
4456 }
4457
l2cap_disconnect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4458 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4459 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4460 u8 *data)
4461 {
4462 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4463 struct l2cap_disconn_rsp rsp;
4464 u16 dcid, scid;
4465 struct l2cap_chan *chan;
4466
4467 if (cmd_len != sizeof(*req))
4468 return -EPROTO;
4469
4470 scid = __le16_to_cpu(req->scid);
4471 dcid = __le16_to_cpu(req->dcid);
4472
4473 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4474
4475 chan = l2cap_get_chan_by_scid(conn, dcid);
4476 if (!chan) {
4477 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4478 return 0;
4479 }
4480
4481 rsp.dcid = cpu_to_le16(chan->scid);
4482 rsp.scid = cpu_to_le16(chan->dcid);
4483 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4484
4485 chan->ops->set_shutdown(chan);
4486
4487 l2cap_chan_del(chan, ECONNRESET);
4488
4489 chan->ops->close(chan);
4490
4491 l2cap_chan_unlock(chan);
4492 l2cap_chan_put(chan);
4493
4494 return 0;
4495 }
4496
l2cap_disconnect_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4497 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4498 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4499 u8 *data)
4500 {
4501 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4502 u16 dcid, scid;
4503 struct l2cap_chan *chan;
4504
4505 if (cmd_len != sizeof(*rsp))
4506 return -EPROTO;
4507
4508 scid = __le16_to_cpu(rsp->scid);
4509 dcid = __le16_to_cpu(rsp->dcid);
4510
4511 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4512
4513 chan = l2cap_get_chan_by_scid(conn, scid);
4514 if (!chan) {
4515 return 0;
4516 }
4517
4518 if (chan->state != BT_DISCONN) {
4519 l2cap_chan_unlock(chan);
4520 l2cap_chan_put(chan);
4521 return 0;
4522 }
4523
4524 l2cap_chan_del(chan, 0);
4525
4526 chan->ops->close(chan);
4527
4528 l2cap_chan_unlock(chan);
4529 l2cap_chan_put(chan);
4530
4531 return 0;
4532 }
4533
l2cap_information_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4534 static inline int l2cap_information_req(struct l2cap_conn *conn,
4535 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4536 u8 *data)
4537 {
4538 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4539 u16 type;
4540
4541 if (cmd_len != sizeof(*req))
4542 return -EPROTO;
4543
4544 type = __le16_to_cpu(req->type);
4545
4546 BT_DBG("type 0x%4.4x", type);
4547
4548 if (type == L2CAP_IT_FEAT_MASK) {
4549 u8 buf[8];
4550 u32 feat_mask = l2cap_feat_mask;
4551 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4552 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4553 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4554 if (!disable_ertm)
4555 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4556 | L2CAP_FEAT_FCS;
4557
4558 put_unaligned_le32(feat_mask, rsp->data);
4559 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4560 buf);
4561 } else if (type == L2CAP_IT_FIXED_CHAN) {
4562 u8 buf[12];
4563 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4564
4565 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4566 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4567 rsp->data[0] = conn->local_fixed_chan;
4568 memset(rsp->data + 1, 0, 7);
4569 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4570 buf);
4571 } else {
4572 struct l2cap_info_rsp rsp;
4573 rsp.type = cpu_to_le16(type);
4574 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4575 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4576 &rsp);
4577 }
4578
4579 return 0;
4580 }
4581
l2cap_information_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4582 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4583 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4584 u8 *data)
4585 {
4586 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4587 u16 type, result;
4588
4589 if (cmd_len < sizeof(*rsp))
4590 return -EPROTO;
4591
4592 type = __le16_to_cpu(rsp->type);
4593 result = __le16_to_cpu(rsp->result);
4594
4595 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4596
4597 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4598 if (cmd->ident != conn->info_ident ||
4599 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4600 return 0;
4601
4602 cancel_delayed_work(&conn->info_timer);
4603
4604 if (result != L2CAP_IR_SUCCESS) {
4605 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4606 conn->info_ident = 0;
4607
4608 l2cap_conn_start(conn);
4609
4610 return 0;
4611 }
4612
4613 switch (type) {
4614 case L2CAP_IT_FEAT_MASK:
4615 if (cmd_len >= sizeof(*rsp) + sizeof(u32))
4616 conn->feat_mask = get_unaligned_le32(rsp->data);
4617
4618 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4619 struct l2cap_info_req req;
4620 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4621
4622 conn->info_ident = l2cap_get_ident(conn);
4623
4624 l2cap_send_cmd(conn, conn->info_ident,
4625 L2CAP_INFO_REQ, sizeof(req), &req);
4626 } else {
4627 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4628 conn->info_ident = 0;
4629
4630 l2cap_conn_start(conn);
4631 }
4632 break;
4633
4634 case L2CAP_IT_FIXED_CHAN:
4635 if (cmd_len >= sizeof(*rsp) + sizeof(rsp->data[0]))
4636 conn->remote_fixed_chan = rsp->data[0];
4637 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4638 conn->info_ident = 0;
4639
4640 l2cap_conn_start(conn);
4641 break;
4642 }
4643
4644 return 0;
4645 }
4646
l2cap_conn_param_update_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4647 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4648 struct l2cap_cmd_hdr *cmd,
4649 u16 cmd_len, u8 *data)
4650 {
4651 struct hci_conn *hcon = conn->hcon;
4652 struct l2cap_conn_param_update_req *req;
4653 struct l2cap_conn_param_update_rsp rsp;
4654 u16 min, max, latency, to_multiplier;
4655 int err;
4656
4657 if (hcon->role != HCI_ROLE_MASTER)
4658 return -EINVAL;
4659
4660 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4661 return -EPROTO;
4662
4663 req = (struct l2cap_conn_param_update_req *) data;
4664 min = __le16_to_cpu(req->min);
4665 max = __le16_to_cpu(req->max);
4666 latency = __le16_to_cpu(req->latency);
4667 to_multiplier = __le16_to_cpu(req->to_multiplier);
4668
4669 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4670 min, max, latency, to_multiplier);
4671
4672 memset(&rsp, 0, sizeof(rsp));
4673
4674 err = hci_check_conn_params(min, max, latency, to_multiplier);
4675 if (err)
4676 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4677 else
4678 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4679
4680 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4681 sizeof(rsp), &rsp);
4682
4683 if (!err) {
4684 u8 store_hint;
4685
4686 store_hint = hci_le_conn_update(hcon, min, max, latency,
4687 to_multiplier);
4688 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
4689 store_hint, min, max, latency,
4690 to_multiplier);
4691
4692 }
4693
4694 return 0;
4695 }
4696
l2cap_le_connect_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4697 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
4698 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4699 u8 *data)
4700 {
4701 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
4702 struct hci_conn *hcon = conn->hcon;
4703 u16 dcid, mtu, mps, credits, result;
4704 struct l2cap_chan *chan;
4705 int err, sec_level;
4706
4707 if (cmd_len < sizeof(*rsp))
4708 return -EPROTO;
4709
4710 dcid = __le16_to_cpu(rsp->dcid);
4711 mtu = __le16_to_cpu(rsp->mtu);
4712 mps = __le16_to_cpu(rsp->mps);
4713 credits = __le16_to_cpu(rsp->credits);
4714 result = __le16_to_cpu(rsp->result);
4715
4716 if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
4717 dcid < L2CAP_CID_DYN_START ||
4718 dcid > L2CAP_CID_LE_DYN_END))
4719 return -EPROTO;
4720
4721 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
4722 dcid, mtu, mps, credits, result);
4723
4724 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4725 if (!chan)
4726 return -EBADSLT;
4727
4728 err = 0;
4729
4730 l2cap_chan_lock(chan);
4731
4732 switch (result) {
4733 case L2CAP_CR_LE_SUCCESS:
4734 if (__l2cap_get_chan_by_dcid(conn, dcid)) {
4735 err = -EBADSLT;
4736 break;
4737 }
4738
4739 chan->ident = 0;
4740 chan->dcid = dcid;
4741 chan->omtu = mtu;
4742 chan->remote_mps = mps;
4743 chan->tx_credits = credits;
4744 l2cap_chan_ready(chan);
4745 break;
4746
4747 case L2CAP_CR_LE_AUTHENTICATION:
4748 case L2CAP_CR_LE_ENCRYPTION:
4749 /* If we already have MITM protection we can't do
4750 * anything.
4751 */
4752 if (hcon->sec_level > BT_SECURITY_MEDIUM) {
4753 l2cap_chan_del(chan, ECONNREFUSED);
4754 break;
4755 }
4756
4757 sec_level = hcon->sec_level + 1;
4758 if (chan->sec_level < sec_level)
4759 chan->sec_level = sec_level;
4760
4761 /* We'll need to send a new Connect Request */
4762 clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
4763
4764 smp_conn_security(hcon, chan->sec_level);
4765 break;
4766
4767 default:
4768 l2cap_chan_del(chan, ECONNREFUSED);
4769 break;
4770 }
4771
4772 l2cap_chan_unlock(chan);
4773
4774 return err;
4775 }
4776
l2cap_put_ident(struct l2cap_conn * conn,u8 code,u8 id)4777 static void l2cap_put_ident(struct l2cap_conn *conn, u8 code, u8 id)
4778 {
4779 switch (code) {
4780 case L2CAP_COMMAND_REJ:
4781 case L2CAP_CONN_RSP:
4782 case L2CAP_CONF_RSP:
4783 case L2CAP_DISCONN_RSP:
4784 case L2CAP_ECHO_RSP:
4785 case L2CAP_INFO_RSP:
4786 case L2CAP_CONN_PARAM_UPDATE_RSP:
4787 case L2CAP_ECRED_CONN_RSP:
4788 case L2CAP_ECRED_RECONF_RSP:
4789 /* First do a lookup since the remote may send bogus ids that
4790 * would make ida_free to generate warnings.
4791 */
4792 if (ida_find_first_range(&conn->tx_ida, id, id) >= 0)
4793 ida_free(&conn->tx_ida, id);
4794 }
4795 }
4796
l2cap_bredr_sig_cmd(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4797 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4798 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4799 u8 *data)
4800 {
4801 int err = 0;
4802
4803 l2cap_put_ident(conn, cmd->code, cmd->ident);
4804
4805 switch (cmd->code) {
4806 case L2CAP_COMMAND_REJ:
4807 l2cap_command_rej(conn, cmd, cmd_len, data);
4808 break;
4809
4810 case L2CAP_CONN_REQ:
4811 err = l2cap_connect_req(conn, cmd, cmd_len, data);
4812 break;
4813
4814 case L2CAP_CONN_RSP:
4815 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
4816 break;
4817
4818 case L2CAP_CONF_REQ:
4819 err = l2cap_config_req(conn, cmd, cmd_len, data);
4820 break;
4821
4822 case L2CAP_CONF_RSP:
4823 l2cap_config_rsp(conn, cmd, cmd_len, data);
4824 break;
4825
4826 case L2CAP_DISCONN_REQ:
4827 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
4828 break;
4829
4830 case L2CAP_DISCONN_RSP:
4831 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
4832 break;
4833
4834 case L2CAP_ECHO_REQ:
4835 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4836 break;
4837
4838 case L2CAP_ECHO_RSP:
4839 break;
4840
4841 case L2CAP_INFO_REQ:
4842 err = l2cap_information_req(conn, cmd, cmd_len, data);
4843 break;
4844
4845 case L2CAP_INFO_RSP:
4846 l2cap_information_rsp(conn, cmd, cmd_len, data);
4847 break;
4848
4849 default:
4850 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4851 err = -EINVAL;
4852 break;
4853 }
4854
4855 return err;
4856 }
4857
l2cap_le_connect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4858 static int l2cap_le_connect_req(struct l2cap_conn *conn,
4859 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4860 u8 *data)
4861 {
4862 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
4863 struct l2cap_le_conn_rsp rsp;
4864 struct l2cap_chan *chan, *pchan;
4865 u16 dcid, scid, credits, mtu, mps;
4866 __le16 psm;
4867 u8 result;
4868
4869 if (cmd_len != sizeof(*req))
4870 return -EPROTO;
4871
4872 scid = __le16_to_cpu(req->scid);
4873 mtu = __le16_to_cpu(req->mtu);
4874 mps = __le16_to_cpu(req->mps);
4875 psm = req->psm;
4876 dcid = 0;
4877 credits = 0;
4878
4879 if (mtu < 23 || mps < 23)
4880 return -EPROTO;
4881
4882 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
4883 scid, mtu, mps);
4884
4885 /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
4886 * page 1059:
4887 *
4888 * Valid range: 0x0001-0x00ff
4889 *
4890 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
4891 */
4892 if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
4893 result = L2CAP_CR_LE_BAD_PSM;
4894 chan = NULL;
4895 goto response;
4896 }
4897
4898 /* Check if we have socket listening on psm */
4899 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
4900 &conn->hcon->dst, LE_LINK);
4901 if (!pchan) {
4902 result = L2CAP_CR_LE_BAD_PSM;
4903 chan = NULL;
4904 goto response;
4905 }
4906
4907 l2cap_chan_lock(pchan);
4908
4909 if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
4910 SMP_ALLOW_STK)) {
4911 result = pchan->sec_level == BT_SECURITY_MEDIUM ?
4912 L2CAP_CR_LE_ENCRYPTION : L2CAP_CR_LE_AUTHENTICATION;
4913 chan = NULL;
4914 goto response_unlock;
4915 }
4916
4917 /* Check if Key Size is sufficient for the security level */
4918 if (!l2cap_check_enc_key_size(conn->hcon, pchan)) {
4919 result = L2CAP_CR_LE_BAD_KEY_SIZE;
4920 chan = NULL;
4921 goto response_unlock;
4922 }
4923
4924 /* Check for valid dynamic CID range */
4925 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
4926 result = L2CAP_CR_LE_INVALID_SCID;
4927 chan = NULL;
4928 goto response_unlock;
4929 }
4930
4931 /* Check if we already have channel with that dcid */
4932 if (__l2cap_get_chan_by_dcid(conn, scid)) {
4933 result = L2CAP_CR_LE_SCID_IN_USE;
4934 chan = NULL;
4935 goto response_unlock;
4936 }
4937
4938 chan = pchan->ops->new_connection(pchan);
4939 if (!chan) {
4940 result = L2CAP_CR_LE_NO_MEM;
4941 goto response_unlock;
4942 }
4943
4944 bacpy(&chan->src, &conn->hcon->src);
4945 bacpy(&chan->dst, &conn->hcon->dst);
4946 chan->src_type = bdaddr_src_type(conn->hcon);
4947 chan->dst_type = bdaddr_dst_type(conn->hcon);
4948 chan->psm = psm;
4949 chan->dcid = scid;
4950 chan->omtu = mtu;
4951 chan->remote_mps = mps;
4952
4953 __l2cap_chan_add(conn, chan);
4954
4955 l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits));
4956
4957 dcid = chan->scid;
4958 credits = chan->rx_credits;
4959
4960 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4961
4962 chan->ident = cmd->ident;
4963
4964 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4965 l2cap_state_change(chan, BT_CONNECT2);
4966 /* The following result value is actually not defined
4967 * for LE CoC but we use it to let the function know
4968 * that it should bail out after doing its cleanup
4969 * instead of sending a response.
4970 */
4971 result = L2CAP_CR_PEND;
4972 chan->ops->defer(chan);
4973 } else {
4974 l2cap_chan_ready(chan);
4975 result = L2CAP_CR_LE_SUCCESS;
4976 }
4977
4978 response_unlock:
4979 l2cap_chan_unlock(pchan);
4980 l2cap_chan_put(pchan);
4981
4982 if (result == L2CAP_CR_PEND)
4983 return 0;
4984
4985 response:
4986 if (chan) {
4987 rsp.mtu = cpu_to_le16(chan->imtu);
4988 rsp.mps = cpu_to_le16(chan->mps);
4989 } else {
4990 rsp.mtu = 0;
4991 rsp.mps = 0;
4992 }
4993
4994 rsp.dcid = cpu_to_le16(dcid);
4995 rsp.credits = cpu_to_le16(credits);
4996 rsp.result = cpu_to_le16(result);
4997
4998 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
4999
5000 return 0;
5001 }
5002
l2cap_le_credits(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5003 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5004 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5005 u8 *data)
5006 {
5007 struct l2cap_le_credits *pkt;
5008 struct l2cap_chan *chan;
5009 u16 cid, credits, max_credits;
5010
5011 if (cmd_len != sizeof(*pkt))
5012 return -EPROTO;
5013
5014 pkt = (struct l2cap_le_credits *) data;
5015 cid = __le16_to_cpu(pkt->cid);
5016 credits = __le16_to_cpu(pkt->credits);
5017
5018 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5019
5020 chan = l2cap_get_chan_by_dcid(conn, cid);
5021 if (!chan)
5022 return -EBADSLT;
5023
5024 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5025 if (credits > max_credits) {
5026 BT_ERR("LE credits overflow");
5027 l2cap_send_disconn_req(chan, ECONNRESET);
5028
5029 /* Return 0 so that we don't trigger an unnecessary
5030 * command reject packet.
5031 */
5032 goto unlock;
5033 }
5034
5035 chan->tx_credits += credits;
5036
5037 /* Resume sending */
5038 l2cap_le_flowctl_send(chan);
5039
5040 if (chan->tx_credits)
5041 chan->ops->resume(chan);
5042
5043 unlock:
5044 l2cap_chan_unlock(chan);
5045 l2cap_chan_put(chan);
5046
5047 return 0;
5048 }
5049
l2cap_ecred_conn_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5050 static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
5051 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5052 u8 *data)
5053 {
5054 struct l2cap_ecred_conn_req *req = (void *) data;
5055 DEFINE_RAW_FLEX(struct l2cap_ecred_conn_rsp, pdu, dcid, L2CAP_ECRED_MAX_CID);
5056 struct l2cap_chan *chan, *pchan;
5057 u16 mtu, mps;
5058 __le16 psm;
5059 u8 result, rsp_len = 0;
5060 int i, num_scid = 0;
5061 bool defer = false;
5062
5063 if (!enable_ecred)
5064 return -EINVAL;
5065
5066 memset(pdu, 0, sizeof(*pdu));
5067
5068 if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) {
5069 result = L2CAP_CR_LE_INVALID_PARAMS;
5070 goto response;
5071 }
5072
5073 /* Check if there are no pending channels with the same ident */
5074 __l2cap_chan_list_id(conn, cmd->ident, l2cap_ecred_list_defer,
5075 &num_scid);
5076 if (num_scid) {
5077 result = L2CAP_CR_LE_INVALID_PARAMS;
5078 goto response;
5079 }
5080
5081 cmd_len -= sizeof(*req);
5082 num_scid = cmd_len / sizeof(u16);
5083
5084 /* Always respond with the same number of scids as in the request */
5085 rsp_len = cmd_len;
5086
5087 if (num_scid > L2CAP_ECRED_MAX_CID) {
5088 result = L2CAP_CR_LE_INVALID_PARAMS;
5089 goto response;
5090 }
5091
5092 mtu = __le16_to_cpu(req->mtu);
5093 mps = __le16_to_cpu(req->mps);
5094
5095 if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MPS) {
5096 result = L2CAP_CR_LE_INVALID_PARAMS;
5097 goto response;
5098 }
5099
5100 psm = req->psm;
5101
5102 /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
5103 * page 1059:
5104 *
5105 * Valid range: 0x0001-0x00ff
5106 *
5107 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
5108 */
5109 if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
5110 result = L2CAP_CR_LE_BAD_PSM;
5111 goto response;
5112 }
5113
5114 BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps);
5115
5116 /* Check if we have socket listening on psm */
5117 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5118 &conn->hcon->dst, LE_LINK);
5119 if (!pchan) {
5120 result = L2CAP_CR_LE_BAD_PSM;
5121 goto response;
5122 }
5123
5124 l2cap_chan_lock(pchan);
5125
5126 if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5127 SMP_ALLOW_STK)) {
5128 result = pchan->sec_level == BT_SECURITY_MEDIUM ?
5129 L2CAP_CR_LE_ENCRYPTION : L2CAP_CR_LE_AUTHENTICATION;
5130 goto unlock;
5131 }
5132
5133 /* Check if the listening channel has set an output MTU then the
5134 * requested MTU shall be less than or equal to that value.
5135 */
5136 if (pchan->omtu && mtu < pchan->omtu) {
5137 result = L2CAP_CR_LE_UNACCEPT_PARAMS;
5138 goto unlock;
5139 }
5140
5141 result = L2CAP_CR_LE_SUCCESS;
5142
5143 for (i = 0; i < num_scid; i++) {
5144 u16 scid = __le16_to_cpu(req->scid[i]);
5145
5146 BT_DBG("scid[%d] 0x%4.4x", i, scid);
5147
5148 pdu->dcid[i] = 0x0000;
5149
5150 /* Check for valid dynamic CID range */
5151 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5152 result = L2CAP_CR_LE_INVALID_SCID;
5153 continue;
5154 }
5155
5156 /* Check if we already have channel with that dcid */
5157 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5158 result = L2CAP_CR_LE_SCID_IN_USE;
5159 continue;
5160 }
5161
5162 chan = pchan->ops->new_connection(pchan);
5163 if (!chan) {
5164 result = L2CAP_CR_LE_NO_MEM;
5165 continue;
5166 }
5167
5168 bacpy(&chan->src, &conn->hcon->src);
5169 bacpy(&chan->dst, &conn->hcon->dst);
5170 chan->src_type = bdaddr_src_type(conn->hcon);
5171 chan->dst_type = bdaddr_dst_type(conn->hcon);
5172 chan->psm = psm;
5173 chan->dcid = scid;
5174 chan->omtu = mtu;
5175 chan->remote_mps = mps;
5176
5177 __l2cap_chan_add(conn, chan);
5178
5179 l2cap_ecred_init(chan, __le16_to_cpu(req->credits));
5180
5181 /* Init response */
5182 if (!pdu->credits) {
5183 pdu->mtu = cpu_to_le16(chan->imtu);
5184 pdu->mps = cpu_to_le16(chan->mps);
5185 pdu->credits = cpu_to_le16(chan->rx_credits);
5186 }
5187
5188 pdu->dcid[i] = cpu_to_le16(chan->scid);
5189
5190 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5191
5192 chan->ident = cmd->ident;
5193 chan->mode = L2CAP_MODE_EXT_FLOWCTL;
5194
5195 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5196 l2cap_state_change(chan, BT_CONNECT2);
5197 defer = true;
5198 chan->ops->defer(chan);
5199 } else {
5200 l2cap_chan_ready(chan);
5201 }
5202 }
5203
5204 unlock:
5205 l2cap_chan_unlock(pchan);
5206 l2cap_chan_put(pchan);
5207
5208 response:
5209 pdu->result = cpu_to_le16(result);
5210
5211 if (defer)
5212 return 0;
5213
5214 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP,
5215 sizeof(*pdu) + rsp_len, pdu);
5216
5217 return 0;
5218 }
5219
l2cap_ecred_conn_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5220 static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
5221 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5222 u8 *data)
5223 {
5224 struct l2cap_ecred_conn_rsp *rsp = (void *) data;
5225 struct hci_conn *hcon = conn->hcon;
5226 u16 mtu, mps, credits, result;
5227 struct l2cap_chan *chan, *tmp;
5228 int err = 0, sec_level;
5229 int i = 0;
5230
5231 if (cmd_len < sizeof(*rsp))
5232 return -EPROTO;
5233
5234 mtu = __le16_to_cpu(rsp->mtu);
5235 mps = __le16_to_cpu(rsp->mps);
5236 credits = __le16_to_cpu(rsp->credits);
5237 result = __le16_to_cpu(rsp->result);
5238
5239 BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits,
5240 result);
5241
5242 cmd_len -= sizeof(*rsp);
5243
5244 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
5245 u16 dcid;
5246
5247 if (chan->ident != cmd->ident ||
5248 chan->mode != L2CAP_MODE_EXT_FLOWCTL ||
5249 chan->state == BT_CONNECTED)
5250 continue;
5251
5252 l2cap_chan_lock(chan);
5253
5254 /* Check that there is a dcid for each pending channel */
5255 if (cmd_len < sizeof(dcid)) {
5256 l2cap_chan_del(chan, ECONNREFUSED);
5257 l2cap_chan_unlock(chan);
5258 continue;
5259 }
5260
5261 dcid = __le16_to_cpu(rsp->dcid[i++]);
5262 cmd_len -= sizeof(u16);
5263
5264 BT_DBG("dcid[%d] 0x%4.4x", i, dcid);
5265
5266 /* Check if dcid is already in use */
5267 if (dcid && __l2cap_get_chan_by_dcid(conn, dcid)) {
5268 /* If a device receives a
5269 * L2CAP_CREDIT_BASED_CONNECTION_RSP packet with an
5270 * already-assigned Destination CID, then both the
5271 * original channel and the new channel shall be
5272 * immediately discarded and not used.
5273 */
5274 l2cap_chan_del(chan, ECONNREFUSED);
5275 l2cap_chan_unlock(chan);
5276 chan = __l2cap_get_chan_by_dcid(conn, dcid);
5277 l2cap_chan_lock(chan);
5278 l2cap_chan_del(chan, ECONNRESET);
5279 l2cap_chan_unlock(chan);
5280 continue;
5281 }
5282
5283 switch (result) {
5284 case L2CAP_CR_LE_AUTHENTICATION:
5285 case L2CAP_CR_LE_ENCRYPTION:
5286 /* If we already have MITM protection we can't do
5287 * anything.
5288 */
5289 if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5290 l2cap_chan_del(chan, ECONNREFUSED);
5291 break;
5292 }
5293
5294 sec_level = hcon->sec_level + 1;
5295 if (chan->sec_level < sec_level)
5296 chan->sec_level = sec_level;
5297
5298 /* We'll need to send a new Connect Request */
5299 clear_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags);
5300
5301 smp_conn_security(hcon, chan->sec_level);
5302 break;
5303
5304 case L2CAP_CR_LE_BAD_PSM:
5305 l2cap_chan_del(chan, ECONNREFUSED);
5306 break;
5307
5308 default:
5309 /* If dcid was not set it means channels was refused */
5310 if (!dcid) {
5311 l2cap_chan_del(chan, ECONNREFUSED);
5312 break;
5313 }
5314
5315 chan->ident = 0;
5316 chan->dcid = dcid;
5317 chan->omtu = mtu;
5318 chan->remote_mps = mps;
5319 chan->tx_credits = credits;
5320 l2cap_chan_ready(chan);
5321 break;
5322 }
5323
5324 l2cap_chan_unlock(chan);
5325 }
5326
5327 return err;
5328 }
5329
l2cap_ecred_reconf_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5330 static inline int l2cap_ecred_reconf_req(struct l2cap_conn *conn,
5331 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5332 u8 *data)
5333 {
5334 struct l2cap_ecred_reconf_req *req = (void *) data;
5335 struct l2cap_ecred_reconf_rsp rsp;
5336 u16 mtu, mps, result;
5337 struct l2cap_chan *chan[L2CAP_ECRED_MAX_CID] = {};
5338 int i, num_scid;
5339
5340 if (!enable_ecred)
5341 return -EINVAL;
5342
5343 if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) {
5344 result = L2CAP_RECONF_INVALID_CID;
5345 goto respond;
5346 }
5347
5348 mtu = __le16_to_cpu(req->mtu);
5349 mps = __le16_to_cpu(req->mps);
5350
5351 BT_DBG("mtu %u mps %u", mtu, mps);
5352
5353 if (mtu < L2CAP_ECRED_MIN_MTU) {
5354 result = L2CAP_RECONF_INVALID_PARAMS;
5355 goto respond;
5356 }
5357
5358 if (mps < L2CAP_ECRED_MIN_MPS) {
5359 result = L2CAP_RECONF_INVALID_PARAMS;
5360 goto respond;
5361 }
5362
5363 cmd_len -= sizeof(*req);
5364 num_scid = cmd_len / sizeof(u16);
5365
5366 if (num_scid > L2CAP_ECRED_MAX_CID) {
5367 result = L2CAP_RECONF_INVALID_PARAMS;
5368 goto respond;
5369 }
5370
5371 result = L2CAP_RECONF_SUCCESS;
5372
5373 /* Check if each SCID, MTU and MPS are valid */
5374 for (i = 0; i < num_scid; i++) {
5375 u16 scid;
5376
5377 scid = __le16_to_cpu(req->scid[i]);
5378 if (!scid) {
5379 result = L2CAP_RECONF_INVALID_CID;
5380 goto respond;
5381 }
5382
5383 chan[i] = __l2cap_get_chan_by_dcid(conn, scid);
5384 if (!chan[i]) {
5385 result = L2CAP_RECONF_INVALID_CID;
5386 goto respond;
5387 }
5388
5389 /* The MTU field shall be greater than or equal to the greatest
5390 * current MTU size of these channels.
5391 */
5392 if (chan[i]->omtu > mtu) {
5393 BT_ERR("chan %p decreased MTU %u -> %u", chan[i],
5394 chan[i]->omtu, mtu);
5395 result = L2CAP_RECONF_INVALID_MTU;
5396 goto respond;
5397 }
5398
5399 /* If more than one channel is being configured, the MPS field
5400 * shall be greater than or equal to the current MPS size of
5401 * each of these channels. If only one channel is being
5402 * configured, the MPS field may be less than the current MPS
5403 * of that channel.
5404 */
5405 if (chan[i]->remote_mps >= mps && i) {
5406 BT_ERR("chan %p decreased MPS %u -> %u", chan[i],
5407 chan[i]->remote_mps, mps);
5408 result = L2CAP_RECONF_INVALID_MPS;
5409 goto respond;
5410 }
5411 }
5412
5413 /* Commit the new MTU and MPS values after checking they are valid */
5414 for (i = 0; i < num_scid; i++) {
5415 chan[i]->omtu = mtu;
5416 chan[i]->remote_mps = mps;
5417 }
5418
5419 respond:
5420 rsp.result = cpu_to_le16(result);
5421
5422 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_RECONF_RSP, sizeof(rsp),
5423 &rsp);
5424
5425 return 0;
5426 }
5427
l2cap_ecred_reconf_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5428 static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn,
5429 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5430 u8 *data)
5431 {
5432 struct l2cap_chan *chan, *tmp;
5433 struct l2cap_ecred_reconf_rsp *rsp = (void *)data;
5434 u16 result;
5435
5436 if (cmd_len < sizeof(*rsp))
5437 return -EPROTO;
5438
5439 result = __le16_to_cpu(rsp->result);
5440
5441 BT_DBG("result 0x%4.4x", result);
5442
5443 if (!result)
5444 return 0;
5445
5446 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
5447 if (chan->ident != cmd->ident)
5448 continue;
5449
5450 l2cap_chan_del(chan, ECONNRESET);
5451 }
5452
5453 return 0;
5454 }
5455
l2cap_le_command_rej(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5456 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5457 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5458 u8 *data)
5459 {
5460 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5461 struct l2cap_chan *chan;
5462
5463 if (cmd_len < sizeof(*rej))
5464 return -EPROTO;
5465
5466 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5467 if (!chan)
5468 goto done;
5469
5470 chan = l2cap_chan_hold_unless_zero(chan);
5471 if (!chan)
5472 goto done;
5473
5474 l2cap_chan_lock(chan);
5475 l2cap_chan_del(chan, ECONNREFUSED);
5476 l2cap_chan_unlock(chan);
5477 l2cap_chan_put(chan);
5478
5479 done:
5480 return 0;
5481 }
5482
l2cap_le_sig_cmd(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5483 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5484 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5485 u8 *data)
5486 {
5487 int err = 0;
5488
5489 l2cap_put_ident(conn, cmd->code, cmd->ident);
5490
5491 switch (cmd->code) {
5492 case L2CAP_COMMAND_REJ:
5493 l2cap_le_command_rej(conn, cmd, cmd_len, data);
5494 break;
5495
5496 case L2CAP_CONN_PARAM_UPDATE_REQ:
5497 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5498 break;
5499
5500 case L2CAP_CONN_PARAM_UPDATE_RSP:
5501 break;
5502
5503 case L2CAP_LE_CONN_RSP:
5504 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5505 break;
5506
5507 case L2CAP_LE_CONN_REQ:
5508 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5509 break;
5510
5511 case L2CAP_LE_CREDITS:
5512 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5513 break;
5514
5515 case L2CAP_ECRED_CONN_REQ:
5516 err = l2cap_ecred_conn_req(conn, cmd, cmd_len, data);
5517 break;
5518
5519 case L2CAP_ECRED_CONN_RSP:
5520 err = l2cap_ecred_conn_rsp(conn, cmd, cmd_len, data);
5521 break;
5522
5523 case L2CAP_ECRED_RECONF_REQ:
5524 err = l2cap_ecred_reconf_req(conn, cmd, cmd_len, data);
5525 break;
5526
5527 case L2CAP_ECRED_RECONF_RSP:
5528 err = l2cap_ecred_reconf_rsp(conn, cmd, cmd_len, data);
5529 break;
5530
5531 case L2CAP_DISCONN_REQ:
5532 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5533 break;
5534
5535 case L2CAP_DISCONN_RSP:
5536 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5537 break;
5538
5539 default:
5540 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5541 err = -EINVAL;
5542 break;
5543 }
5544
5545 return err;
5546 }
5547
l2cap_le_sig_channel(struct l2cap_conn * conn,struct sk_buff * skb)5548 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5549 struct sk_buff *skb)
5550 {
5551 struct hci_conn *hcon = conn->hcon;
5552 struct l2cap_cmd_hdr *cmd;
5553 u16 len;
5554 int err;
5555
5556 if (hcon->type != LE_LINK)
5557 goto drop;
5558
5559 if (skb->len < L2CAP_CMD_HDR_SIZE)
5560 goto drop;
5561
5562 cmd = (void *) skb->data;
5563 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5564
5565 len = le16_to_cpu(cmd->len);
5566
5567 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5568
5569 if (len != skb->len || !cmd->ident) {
5570 BT_DBG("corrupted command");
5571 goto drop;
5572 }
5573
5574 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5575 if (err) {
5576 struct l2cap_cmd_rej_unk rej;
5577
5578 BT_ERR("Wrong link type (%d)", err);
5579
5580 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5581 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5582 sizeof(rej), &rej);
5583 }
5584
5585 drop:
5586 kfree_skb(skb);
5587 }
5588
l2cap_sig_send_rej(struct l2cap_conn * conn,u16 ident)5589 static inline void l2cap_sig_send_rej(struct l2cap_conn *conn, u16 ident)
5590 {
5591 struct l2cap_cmd_rej_unk rej;
5592
5593 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5594 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
5595 }
5596
l2cap_sig_channel(struct l2cap_conn * conn,struct sk_buff * skb)5597 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5598 struct sk_buff *skb)
5599 {
5600 struct hci_conn *hcon = conn->hcon;
5601 struct l2cap_cmd_hdr *cmd;
5602 int err;
5603
5604 l2cap_raw_recv(conn, skb);
5605
5606 if (hcon->type != ACL_LINK)
5607 goto drop;
5608
5609 while (skb->len >= L2CAP_CMD_HDR_SIZE) {
5610 u16 len;
5611
5612 cmd = (void *) skb->data;
5613 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5614
5615 len = le16_to_cpu(cmd->len);
5616
5617 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len,
5618 cmd->ident);
5619
5620 if (len > skb->len || !cmd->ident) {
5621 BT_DBG("corrupted command");
5622 l2cap_sig_send_rej(conn, cmd->ident);
5623 skb_pull(skb, len > skb->len ? skb->len : len);
5624 continue;
5625 }
5626
5627 err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data);
5628 if (err) {
5629 BT_ERR("Wrong link type (%d)", err);
5630 l2cap_sig_send_rej(conn, cmd->ident);
5631 }
5632
5633 skb_pull(skb, len);
5634 }
5635
5636 if (skb->len > 0) {
5637 BT_DBG("corrupted command");
5638 l2cap_sig_send_rej(conn, 0);
5639 }
5640
5641 drop:
5642 kfree_skb(skb);
5643 }
5644
l2cap_check_fcs(struct l2cap_chan * chan,struct sk_buff * skb)5645 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5646 {
5647 u16 our_fcs, rcv_fcs;
5648 int hdr_size;
5649
5650 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5651 hdr_size = L2CAP_EXT_HDR_SIZE;
5652 else
5653 hdr_size = L2CAP_ENH_HDR_SIZE;
5654
5655 if (chan->fcs == L2CAP_FCS_CRC16) {
5656 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5657 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5658 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5659
5660 if (our_fcs != rcv_fcs)
5661 return -EBADMSG;
5662 }
5663 return 0;
5664 }
5665
l2cap_send_i_or_rr_or_rnr(struct l2cap_chan * chan)5666 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5667 {
5668 struct l2cap_ctrl control;
5669
5670 BT_DBG("chan %p", chan);
5671
5672 memset(&control, 0, sizeof(control));
5673 control.sframe = 1;
5674 control.final = 1;
5675 control.reqseq = chan->buffer_seq;
5676 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5677
5678 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5679 control.super = L2CAP_SUPER_RNR;
5680 l2cap_send_sframe(chan, &control);
5681 }
5682
5683 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5684 chan->unacked_frames > 0)
5685 __set_retrans_timer(chan);
5686
5687 /* Send pending iframes */
5688 l2cap_ertm_send(chan);
5689
5690 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5691 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5692 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5693 * send it now.
5694 */
5695 control.super = L2CAP_SUPER_RR;
5696 l2cap_send_sframe(chan, &control);
5697 }
5698 }
5699
append_skb_frag(struct sk_buff * skb,struct sk_buff * new_frag,struct sk_buff ** last_frag)5700 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5701 struct sk_buff **last_frag)
5702 {
5703 /* skb->len reflects data in skb as well as all fragments
5704 * skb->data_len reflects only data in fragments
5705 */
5706 if (!skb_has_frag_list(skb))
5707 skb_shinfo(skb)->frag_list = new_frag;
5708
5709 new_frag->next = NULL;
5710
5711 (*last_frag)->next = new_frag;
5712 *last_frag = new_frag;
5713
5714 skb->len += new_frag->len;
5715 skb->data_len += new_frag->len;
5716 skb->truesize += new_frag->truesize;
5717 }
5718
l2cap_reassemble_sdu(struct l2cap_chan * chan,struct sk_buff * skb,struct l2cap_ctrl * control)5719 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5720 struct l2cap_ctrl *control)
5721 {
5722 int err = -EINVAL;
5723
5724 switch (control->sar) {
5725 case L2CAP_SAR_UNSEGMENTED:
5726 if (chan->sdu)
5727 break;
5728
5729 err = chan->ops->recv(chan, skb);
5730 break;
5731
5732 case L2CAP_SAR_START:
5733 if (chan->sdu)
5734 break;
5735
5736 if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
5737 break;
5738
5739 chan->sdu_len = get_unaligned_le16(skb->data);
5740 skb_pull(skb, L2CAP_SDULEN_SIZE);
5741
5742 if (chan->sdu_len > chan->imtu) {
5743 err = -EMSGSIZE;
5744 break;
5745 }
5746
5747 if (skb->len >= chan->sdu_len)
5748 break;
5749
5750 chan->sdu = skb;
5751 chan->sdu_last_frag = skb;
5752
5753 skb = NULL;
5754 err = 0;
5755 break;
5756
5757 case L2CAP_SAR_CONTINUE:
5758 if (!chan->sdu)
5759 break;
5760
5761 append_skb_frag(chan->sdu, skb,
5762 &chan->sdu_last_frag);
5763 skb = NULL;
5764
5765 if (chan->sdu->len >= chan->sdu_len)
5766 break;
5767
5768 err = 0;
5769 break;
5770
5771 case L2CAP_SAR_END:
5772 if (!chan->sdu)
5773 break;
5774
5775 append_skb_frag(chan->sdu, skb,
5776 &chan->sdu_last_frag);
5777 skb = NULL;
5778
5779 if (chan->sdu->len != chan->sdu_len)
5780 break;
5781
5782 err = chan->ops->recv(chan, chan->sdu);
5783
5784 if (!err) {
5785 /* Reassembly complete */
5786 chan->sdu = NULL;
5787 chan->sdu_last_frag = NULL;
5788 chan->sdu_len = 0;
5789 }
5790 break;
5791 }
5792
5793 if (err) {
5794 kfree_skb(skb);
5795 kfree_skb(chan->sdu);
5796 chan->sdu = NULL;
5797 chan->sdu_last_frag = NULL;
5798 chan->sdu_len = 0;
5799 }
5800
5801 return err;
5802 }
5803
l2cap_resegment(struct l2cap_chan * chan)5804 static int l2cap_resegment(struct l2cap_chan *chan)
5805 {
5806 /* Placeholder */
5807 return 0;
5808 }
5809
l2cap_chan_busy(struct l2cap_chan * chan,int busy)5810 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5811 {
5812 u8 event;
5813
5814 if (chan->mode != L2CAP_MODE_ERTM)
5815 return;
5816
5817 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5818 l2cap_tx(chan, NULL, NULL, event);
5819 }
5820
l2cap_rx_queued_iframes(struct l2cap_chan * chan)5821 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5822 {
5823 int err = 0;
5824 /* Pass sequential frames to l2cap_reassemble_sdu()
5825 * until a gap is encountered.
5826 */
5827
5828 BT_DBG("chan %p", chan);
5829
5830 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5831 struct sk_buff *skb;
5832 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5833 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5834
5835 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5836
5837 if (!skb)
5838 break;
5839
5840 skb_unlink(skb, &chan->srej_q);
5841 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5842 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
5843 if (err)
5844 break;
5845 }
5846
5847 if (skb_queue_empty(&chan->srej_q)) {
5848 chan->rx_state = L2CAP_RX_STATE_RECV;
5849 l2cap_send_ack(chan);
5850 }
5851
5852 return err;
5853 }
5854
l2cap_handle_srej(struct l2cap_chan * chan,struct l2cap_ctrl * control)5855 static void l2cap_handle_srej(struct l2cap_chan *chan,
5856 struct l2cap_ctrl *control)
5857 {
5858 struct sk_buff *skb;
5859
5860 BT_DBG("chan %p, control %p", chan, control);
5861
5862 if (control->reqseq == chan->next_tx_seq) {
5863 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5864 l2cap_send_disconn_req(chan, ECONNRESET);
5865 return;
5866 }
5867
5868 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5869
5870 if (skb == NULL) {
5871 BT_DBG("Seq %d not available for retransmission",
5872 control->reqseq);
5873 return;
5874 }
5875
5876 if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
5877 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5878 l2cap_send_disconn_req(chan, ECONNRESET);
5879 return;
5880 }
5881
5882 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5883
5884 if (control->poll) {
5885 l2cap_pass_to_tx(chan, control);
5886
5887 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5888 l2cap_retransmit(chan, control);
5889 l2cap_ertm_send(chan);
5890
5891 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5892 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5893 chan->srej_save_reqseq = control->reqseq;
5894 }
5895 } else {
5896 l2cap_pass_to_tx_fbit(chan, control);
5897
5898 if (control->final) {
5899 if (chan->srej_save_reqseq != control->reqseq ||
5900 !test_and_clear_bit(CONN_SREJ_ACT,
5901 &chan->conn_state))
5902 l2cap_retransmit(chan, control);
5903 } else {
5904 l2cap_retransmit(chan, control);
5905 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5906 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5907 chan->srej_save_reqseq = control->reqseq;
5908 }
5909 }
5910 }
5911 }
5912
l2cap_handle_rej(struct l2cap_chan * chan,struct l2cap_ctrl * control)5913 static void l2cap_handle_rej(struct l2cap_chan *chan,
5914 struct l2cap_ctrl *control)
5915 {
5916 struct sk_buff *skb;
5917
5918 BT_DBG("chan %p, control %p", chan, control);
5919
5920 if (control->reqseq == chan->next_tx_seq) {
5921 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5922 l2cap_send_disconn_req(chan, ECONNRESET);
5923 return;
5924 }
5925
5926 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5927
5928 if (chan->max_tx && skb &&
5929 bt_cb(skb)->l2cap.retries >= chan->max_tx) {
5930 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5931 l2cap_send_disconn_req(chan, ECONNRESET);
5932 return;
5933 }
5934
5935 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5936
5937 l2cap_pass_to_tx(chan, control);
5938
5939 if (control->final) {
5940 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5941 l2cap_retransmit_all(chan, control);
5942 } else {
5943 l2cap_retransmit_all(chan, control);
5944 l2cap_ertm_send(chan);
5945 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5946 set_bit(CONN_REJ_ACT, &chan->conn_state);
5947 }
5948 }
5949
l2cap_classify_txseq(struct l2cap_chan * chan,u16 txseq)5950 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5951 {
5952 BT_DBG("chan %p, txseq %d", chan, txseq);
5953
5954 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5955 chan->expected_tx_seq);
5956
5957 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5958 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5959 chan->tx_win) {
5960 /* See notes below regarding "double poll" and
5961 * invalid packets.
5962 */
5963 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5964 BT_DBG("Invalid/Ignore - after SREJ");
5965 return L2CAP_TXSEQ_INVALID_IGNORE;
5966 } else {
5967 BT_DBG("Invalid - in window after SREJ sent");
5968 return L2CAP_TXSEQ_INVALID;
5969 }
5970 }
5971
5972 if (chan->srej_list.head == txseq) {
5973 BT_DBG("Expected SREJ");
5974 return L2CAP_TXSEQ_EXPECTED_SREJ;
5975 }
5976
5977 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
5978 BT_DBG("Duplicate SREJ - txseq already stored");
5979 return L2CAP_TXSEQ_DUPLICATE_SREJ;
5980 }
5981
5982 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
5983 BT_DBG("Unexpected SREJ - not requested");
5984 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
5985 }
5986 }
5987
5988 if (chan->expected_tx_seq == txseq) {
5989 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5990 chan->tx_win) {
5991 BT_DBG("Invalid - txseq outside tx window");
5992 return L2CAP_TXSEQ_INVALID;
5993 } else {
5994 BT_DBG("Expected");
5995 return L2CAP_TXSEQ_EXPECTED;
5996 }
5997 }
5998
5999 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6000 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6001 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6002 return L2CAP_TXSEQ_DUPLICATE;
6003 }
6004
6005 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6006 /* A source of invalid packets is a "double poll" condition,
6007 * where delays cause us to send multiple poll packets. If
6008 * the remote stack receives and processes both polls,
6009 * sequence numbers can wrap around in such a way that a
6010 * resent frame has a sequence number that looks like new data
6011 * with a sequence gap. This would trigger an erroneous SREJ
6012 * request.
6013 *
6014 * Fortunately, this is impossible with a tx window that's
6015 * less than half of the maximum sequence number, which allows
6016 * invalid frames to be safely ignored.
6017 *
6018 * With tx window sizes greater than half of the tx window
6019 * maximum, the frame is invalid and cannot be ignored. This
6020 * causes a disconnect.
6021 */
6022
6023 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6024 BT_DBG("Invalid/Ignore - txseq outside tx window");
6025 return L2CAP_TXSEQ_INVALID_IGNORE;
6026 } else {
6027 BT_DBG("Invalid - txseq outside tx window");
6028 return L2CAP_TXSEQ_INVALID;
6029 }
6030 } else {
6031 BT_DBG("Unexpected - txseq indicates missing frames");
6032 return L2CAP_TXSEQ_UNEXPECTED;
6033 }
6034 }
6035
l2cap_rx_state_recv(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6036 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6037 struct l2cap_ctrl *control,
6038 struct sk_buff *skb, u8 event)
6039 {
6040 struct l2cap_ctrl local_control;
6041 int err = 0;
6042 bool skb_in_use = false;
6043
6044 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6045 event);
6046
6047 switch (event) {
6048 case L2CAP_EV_RECV_IFRAME:
6049 switch (l2cap_classify_txseq(chan, control->txseq)) {
6050 case L2CAP_TXSEQ_EXPECTED:
6051 l2cap_pass_to_tx(chan, control);
6052
6053 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6054 BT_DBG("Busy, discarding expected seq %d",
6055 control->txseq);
6056 break;
6057 }
6058
6059 chan->expected_tx_seq = __next_seq(chan,
6060 control->txseq);
6061
6062 chan->buffer_seq = chan->expected_tx_seq;
6063 skb_in_use = true;
6064
6065 /* l2cap_reassemble_sdu may free skb, hence invalidate
6066 * control, so make a copy in advance to use it after
6067 * l2cap_reassemble_sdu returns and to avoid the race
6068 * condition, for example:
6069 *
6070 * The current thread calls:
6071 * l2cap_reassemble_sdu
6072 * chan->ops->recv == l2cap_sock_recv_cb
6073 * __sock_queue_rcv_skb
6074 * Another thread calls:
6075 * bt_sock_recvmsg
6076 * skb_recv_datagram
6077 * skb_free_datagram
6078 * Then the current thread tries to access control, but
6079 * it was freed by skb_free_datagram.
6080 */
6081 local_control = *control;
6082 err = l2cap_reassemble_sdu(chan, skb, control);
6083 if (err)
6084 break;
6085
6086 if (local_control.final) {
6087 if (!test_and_clear_bit(CONN_REJ_ACT,
6088 &chan->conn_state)) {
6089 local_control.final = 0;
6090 l2cap_retransmit_all(chan, &local_control);
6091 l2cap_ertm_send(chan);
6092 }
6093 }
6094
6095 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6096 l2cap_send_ack(chan);
6097 break;
6098 case L2CAP_TXSEQ_UNEXPECTED:
6099 l2cap_pass_to_tx(chan, control);
6100
6101 /* Can't issue SREJ frames in the local busy state.
6102 * Drop this frame, it will be seen as missing
6103 * when local busy is exited.
6104 */
6105 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6106 BT_DBG("Busy, discarding unexpected seq %d",
6107 control->txseq);
6108 break;
6109 }
6110
6111 /* There was a gap in the sequence, so an SREJ
6112 * must be sent for each missing frame. The
6113 * current frame is stored for later use.
6114 */
6115 skb_queue_tail(&chan->srej_q, skb);
6116 skb_in_use = true;
6117 BT_DBG("Queued %p (queue len %d)", skb,
6118 skb_queue_len(&chan->srej_q));
6119
6120 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6121 l2cap_seq_list_clear(&chan->srej_list);
6122 l2cap_send_srej(chan, control->txseq);
6123
6124 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6125 break;
6126 case L2CAP_TXSEQ_DUPLICATE:
6127 l2cap_pass_to_tx(chan, control);
6128 break;
6129 case L2CAP_TXSEQ_INVALID_IGNORE:
6130 break;
6131 case L2CAP_TXSEQ_INVALID:
6132 default:
6133 l2cap_send_disconn_req(chan, ECONNRESET);
6134 break;
6135 }
6136 break;
6137 case L2CAP_EV_RECV_RR:
6138 l2cap_pass_to_tx(chan, control);
6139 if (control->final) {
6140 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6141
6142 if (!test_and_clear_bit(CONN_REJ_ACT,
6143 &chan->conn_state)) {
6144 control->final = 0;
6145 l2cap_retransmit_all(chan, control);
6146 }
6147
6148 l2cap_ertm_send(chan);
6149 } else if (control->poll) {
6150 l2cap_send_i_or_rr_or_rnr(chan);
6151 } else {
6152 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6153 &chan->conn_state) &&
6154 chan->unacked_frames)
6155 __set_retrans_timer(chan);
6156
6157 l2cap_ertm_send(chan);
6158 }
6159 break;
6160 case L2CAP_EV_RECV_RNR:
6161 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6162 l2cap_pass_to_tx(chan, control);
6163 if (control && control->poll) {
6164 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6165 l2cap_send_rr_or_rnr(chan, 0);
6166 }
6167 __clear_retrans_timer(chan);
6168 l2cap_seq_list_clear(&chan->retrans_list);
6169 break;
6170 case L2CAP_EV_RECV_REJ:
6171 l2cap_handle_rej(chan, control);
6172 break;
6173 case L2CAP_EV_RECV_SREJ:
6174 l2cap_handle_srej(chan, control);
6175 break;
6176 default:
6177 break;
6178 }
6179
6180 if (skb && !skb_in_use) {
6181 BT_DBG("Freeing %p", skb);
6182 kfree_skb(skb);
6183 }
6184
6185 return err;
6186 }
6187
l2cap_rx_state_srej_sent(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6188 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6189 struct l2cap_ctrl *control,
6190 struct sk_buff *skb, u8 event)
6191 {
6192 int err = 0;
6193 u16 txseq = control->txseq;
6194 bool skb_in_use = false;
6195
6196 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6197 event);
6198
6199 switch (event) {
6200 case L2CAP_EV_RECV_IFRAME:
6201 switch (l2cap_classify_txseq(chan, txseq)) {
6202 case L2CAP_TXSEQ_EXPECTED:
6203 /* Keep frame for reassembly later */
6204 l2cap_pass_to_tx(chan, control);
6205 skb_queue_tail(&chan->srej_q, skb);
6206 skb_in_use = true;
6207 BT_DBG("Queued %p (queue len %d)", skb,
6208 skb_queue_len(&chan->srej_q));
6209
6210 chan->expected_tx_seq = __next_seq(chan, txseq);
6211 break;
6212 case L2CAP_TXSEQ_EXPECTED_SREJ:
6213 l2cap_seq_list_pop(&chan->srej_list);
6214
6215 l2cap_pass_to_tx(chan, control);
6216 skb_queue_tail(&chan->srej_q, skb);
6217 skb_in_use = true;
6218 BT_DBG("Queued %p (queue len %d)", skb,
6219 skb_queue_len(&chan->srej_q));
6220
6221 err = l2cap_rx_queued_iframes(chan);
6222 if (err)
6223 break;
6224
6225 break;
6226 case L2CAP_TXSEQ_UNEXPECTED:
6227 /* Got a frame that can't be reassembled yet.
6228 * Save it for later, and send SREJs to cover
6229 * the missing frames.
6230 */
6231 skb_queue_tail(&chan->srej_q, skb);
6232 skb_in_use = true;
6233 BT_DBG("Queued %p (queue len %d)", skb,
6234 skb_queue_len(&chan->srej_q));
6235
6236 l2cap_pass_to_tx(chan, control);
6237 l2cap_send_srej(chan, control->txseq);
6238 break;
6239 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6240 /* This frame was requested with an SREJ, but
6241 * some expected retransmitted frames are
6242 * missing. Request retransmission of missing
6243 * SREJ'd frames.
6244 */
6245 skb_queue_tail(&chan->srej_q, skb);
6246 skb_in_use = true;
6247 BT_DBG("Queued %p (queue len %d)", skb,
6248 skb_queue_len(&chan->srej_q));
6249
6250 l2cap_pass_to_tx(chan, control);
6251 l2cap_send_srej_list(chan, control->txseq);
6252 break;
6253 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6254 /* We've already queued this frame. Drop this copy. */
6255 l2cap_pass_to_tx(chan, control);
6256 break;
6257 case L2CAP_TXSEQ_DUPLICATE:
6258 /* Expecting a later sequence number, so this frame
6259 * was already received. Ignore it completely.
6260 */
6261 break;
6262 case L2CAP_TXSEQ_INVALID_IGNORE:
6263 break;
6264 case L2CAP_TXSEQ_INVALID:
6265 default:
6266 l2cap_send_disconn_req(chan, ECONNRESET);
6267 break;
6268 }
6269 break;
6270 case L2CAP_EV_RECV_RR:
6271 l2cap_pass_to_tx(chan, control);
6272 if (control->final) {
6273 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6274
6275 if (!test_and_clear_bit(CONN_REJ_ACT,
6276 &chan->conn_state)) {
6277 control->final = 0;
6278 l2cap_retransmit_all(chan, control);
6279 }
6280
6281 l2cap_ertm_send(chan);
6282 } else if (control->poll) {
6283 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6284 &chan->conn_state) &&
6285 chan->unacked_frames) {
6286 __set_retrans_timer(chan);
6287 }
6288
6289 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6290 l2cap_send_srej_tail(chan);
6291 } else {
6292 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6293 &chan->conn_state) &&
6294 chan->unacked_frames)
6295 __set_retrans_timer(chan);
6296
6297 l2cap_send_ack(chan);
6298 }
6299 break;
6300 case L2CAP_EV_RECV_RNR:
6301 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6302 l2cap_pass_to_tx(chan, control);
6303 if (control->poll) {
6304 l2cap_send_srej_tail(chan);
6305 } else {
6306 struct l2cap_ctrl rr_control;
6307 memset(&rr_control, 0, sizeof(rr_control));
6308 rr_control.sframe = 1;
6309 rr_control.super = L2CAP_SUPER_RR;
6310 rr_control.reqseq = chan->buffer_seq;
6311 l2cap_send_sframe(chan, &rr_control);
6312 }
6313
6314 break;
6315 case L2CAP_EV_RECV_REJ:
6316 l2cap_handle_rej(chan, control);
6317 break;
6318 case L2CAP_EV_RECV_SREJ:
6319 l2cap_handle_srej(chan, control);
6320 break;
6321 }
6322
6323 if (skb && !skb_in_use) {
6324 BT_DBG("Freeing %p", skb);
6325 kfree_skb(skb);
6326 }
6327
6328 return err;
6329 }
6330
l2cap_finish_move(struct l2cap_chan * chan)6331 static int l2cap_finish_move(struct l2cap_chan *chan)
6332 {
6333 BT_DBG("chan %p", chan);
6334
6335 chan->rx_state = L2CAP_RX_STATE_RECV;
6336 chan->conn->mtu = chan->conn->hcon->mtu;
6337
6338 return l2cap_resegment(chan);
6339 }
6340
l2cap_rx_state_wait_p(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6341 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6342 struct l2cap_ctrl *control,
6343 struct sk_buff *skb, u8 event)
6344 {
6345 int err;
6346
6347 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6348 event);
6349
6350 if (!control->poll)
6351 return -EPROTO;
6352
6353 l2cap_process_reqseq(chan, control->reqseq);
6354
6355 if (!skb_queue_empty(&chan->tx_q))
6356 chan->tx_send_head = skb_peek(&chan->tx_q);
6357 else
6358 chan->tx_send_head = NULL;
6359
6360 /* Rewind next_tx_seq to the point expected
6361 * by the receiver.
6362 */
6363 chan->next_tx_seq = control->reqseq;
6364 chan->unacked_frames = 0;
6365
6366 err = l2cap_finish_move(chan);
6367 if (err)
6368 return err;
6369
6370 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6371 l2cap_send_i_or_rr_or_rnr(chan);
6372
6373 if (event == L2CAP_EV_RECV_IFRAME)
6374 return -EPROTO;
6375
6376 return l2cap_rx_state_recv(chan, control, NULL, event);
6377 }
6378
l2cap_rx_state_wait_f(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6379 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6380 struct l2cap_ctrl *control,
6381 struct sk_buff *skb, u8 event)
6382 {
6383 int err;
6384
6385 if (!control->final)
6386 return -EPROTO;
6387
6388 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6389
6390 chan->rx_state = L2CAP_RX_STATE_RECV;
6391 l2cap_process_reqseq(chan, control->reqseq);
6392
6393 if (!skb_queue_empty(&chan->tx_q))
6394 chan->tx_send_head = skb_peek(&chan->tx_q);
6395 else
6396 chan->tx_send_head = NULL;
6397
6398 /* Rewind next_tx_seq to the point expected
6399 * by the receiver.
6400 */
6401 chan->next_tx_seq = control->reqseq;
6402 chan->unacked_frames = 0;
6403 chan->conn->mtu = chan->conn->hcon->mtu;
6404
6405 err = l2cap_resegment(chan);
6406
6407 if (!err)
6408 err = l2cap_rx_state_recv(chan, control, skb, event);
6409
6410 return err;
6411 }
6412
__valid_reqseq(struct l2cap_chan * chan,u16 reqseq)6413 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6414 {
6415 /* Make sure reqseq is for a packet that has been sent but not acked */
6416 u16 unacked;
6417
6418 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6419 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6420 }
6421
l2cap_rx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6422 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6423 struct sk_buff *skb, u8 event)
6424 {
6425 int err = 0;
6426
6427 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6428 control, skb, event, chan->rx_state);
6429
6430 if (__valid_reqseq(chan, control->reqseq)) {
6431 switch (chan->rx_state) {
6432 case L2CAP_RX_STATE_RECV:
6433 err = l2cap_rx_state_recv(chan, control, skb, event);
6434 break;
6435 case L2CAP_RX_STATE_SREJ_SENT:
6436 err = l2cap_rx_state_srej_sent(chan, control, skb,
6437 event);
6438 break;
6439 case L2CAP_RX_STATE_WAIT_P:
6440 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6441 break;
6442 case L2CAP_RX_STATE_WAIT_F:
6443 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6444 break;
6445 default:
6446 /* shut it down */
6447 break;
6448 }
6449 } else {
6450 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6451 control->reqseq, chan->next_tx_seq,
6452 chan->expected_ack_seq);
6453 l2cap_send_disconn_req(chan, ECONNRESET);
6454 }
6455
6456 return err;
6457 }
6458
l2cap_stream_rx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb)6459 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6460 struct sk_buff *skb)
6461 {
6462 /* l2cap_reassemble_sdu may free skb, hence invalidate control, so store
6463 * the txseq field in advance to use it after l2cap_reassemble_sdu
6464 * returns and to avoid the race condition, for example:
6465 *
6466 * The current thread calls:
6467 * l2cap_reassemble_sdu
6468 * chan->ops->recv == l2cap_sock_recv_cb
6469 * __sock_queue_rcv_skb
6470 * Another thread calls:
6471 * bt_sock_recvmsg
6472 * skb_recv_datagram
6473 * skb_free_datagram
6474 * Then the current thread tries to access control, but it was freed by
6475 * skb_free_datagram.
6476 */
6477 u16 txseq = control->txseq;
6478
6479 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6480 chan->rx_state);
6481
6482 if (l2cap_classify_txseq(chan, txseq) == L2CAP_TXSEQ_EXPECTED) {
6483 l2cap_pass_to_tx(chan, control);
6484
6485 BT_DBG("buffer_seq %u->%u", chan->buffer_seq,
6486 __next_seq(chan, chan->buffer_seq));
6487
6488 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6489
6490 l2cap_reassemble_sdu(chan, skb, control);
6491 } else {
6492 if (chan->sdu) {
6493 kfree_skb(chan->sdu);
6494 chan->sdu = NULL;
6495 }
6496 chan->sdu_last_frag = NULL;
6497 chan->sdu_len = 0;
6498
6499 if (skb) {
6500 BT_DBG("Freeing %p", skb);
6501 kfree_skb(skb);
6502 }
6503 }
6504
6505 chan->last_acked_seq = txseq;
6506 chan->expected_tx_seq = __next_seq(chan, txseq);
6507
6508 return 0;
6509 }
6510
l2cap_data_rcv(struct l2cap_chan * chan,struct sk_buff * skb)6511 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6512 {
6513 struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
6514 u16 len;
6515 u8 event;
6516
6517 __unpack_control(chan, skb);
6518
6519 len = skb->len;
6520
6521 /*
6522 * We can just drop the corrupted I-frame here.
6523 * Receiver will miss it and start proper recovery
6524 * procedures and ask for retransmission.
6525 */
6526 if (l2cap_check_fcs(chan, skb))
6527 goto drop;
6528
6529 if (!control->sframe && control->sar == L2CAP_SAR_START)
6530 len -= L2CAP_SDULEN_SIZE;
6531
6532 if (chan->fcs == L2CAP_FCS_CRC16)
6533 len -= L2CAP_FCS_SIZE;
6534
6535 if (len > chan->mps) {
6536 l2cap_send_disconn_req(chan, ECONNRESET);
6537 goto drop;
6538 }
6539
6540 if (chan->ops->filter) {
6541 if (chan->ops->filter(chan, skb))
6542 goto drop;
6543 }
6544
6545 if (!control->sframe) {
6546 int err;
6547
6548 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6549 control->sar, control->reqseq, control->final,
6550 control->txseq);
6551
6552 /* Validate F-bit - F=0 always valid, F=1 only
6553 * valid in TX WAIT_F
6554 */
6555 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6556 goto drop;
6557
6558 if (chan->mode != L2CAP_MODE_STREAMING) {
6559 event = L2CAP_EV_RECV_IFRAME;
6560 err = l2cap_rx(chan, control, skb, event);
6561 } else {
6562 err = l2cap_stream_rx(chan, control, skb);
6563 }
6564
6565 if (err)
6566 l2cap_send_disconn_req(chan, ECONNRESET);
6567 } else {
6568 const u8 rx_func_to_event[4] = {
6569 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6570 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6571 };
6572
6573 /* Only I-frames are expected in streaming mode */
6574 if (chan->mode == L2CAP_MODE_STREAMING)
6575 goto drop;
6576
6577 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6578 control->reqseq, control->final, control->poll,
6579 control->super);
6580
6581 if (len != 0) {
6582 BT_ERR("Trailing bytes: %d in sframe", len);
6583 l2cap_send_disconn_req(chan, ECONNRESET);
6584 goto drop;
6585 }
6586
6587 /* Validate F and P bits */
6588 if (control->final && (control->poll ||
6589 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6590 goto drop;
6591
6592 event = rx_func_to_event[control->super];
6593 if (l2cap_rx(chan, control, skb, event))
6594 l2cap_send_disconn_req(chan, ECONNRESET);
6595 }
6596
6597 return 0;
6598
6599 drop:
6600 kfree_skb(skb);
6601 return 0;
6602 }
6603
l2cap_chan_le_send_credits(struct l2cap_chan * chan)6604 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6605 {
6606 struct l2cap_conn *conn = chan->conn;
6607 struct l2cap_le_credits pkt;
6608 u16 return_credits = l2cap_le_rx_credits(chan);
6609
6610 if (chan->rx_credits >= return_credits)
6611 return;
6612
6613 return_credits -= chan->rx_credits;
6614
6615 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6616
6617 chan->rx_credits += return_credits;
6618
6619 pkt.cid = cpu_to_le16(chan->scid);
6620 pkt.credits = cpu_to_le16(return_credits);
6621
6622 chan->ident = l2cap_get_ident(conn);
6623
6624 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6625 }
6626
l2cap_chan_rx_avail(struct l2cap_chan * chan,ssize_t rx_avail)6627 void l2cap_chan_rx_avail(struct l2cap_chan *chan, ssize_t rx_avail)
6628 {
6629 if (chan->rx_avail == rx_avail)
6630 return;
6631
6632 BT_DBG("chan %p has %zd bytes avail for rx", chan, rx_avail);
6633
6634 chan->rx_avail = rx_avail;
6635
6636 if (chan->state == BT_CONNECTED)
6637 l2cap_chan_le_send_credits(chan);
6638 }
6639
l2cap_ecred_recv(struct l2cap_chan * chan,struct sk_buff * skb)6640 static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
6641 {
6642 int err;
6643
6644 BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
6645
6646 /* Wait recv to confirm reception before updating the credits */
6647 err = chan->ops->recv(chan, skb);
6648
6649 if (err < 0 && chan->rx_avail != -1) {
6650 BT_ERR("Queueing received LE L2CAP data failed");
6651 l2cap_send_disconn_req(chan, ECONNRESET);
6652 return err;
6653 }
6654
6655 /* Update credits whenever an SDU is received */
6656 l2cap_chan_le_send_credits(chan);
6657
6658 return err;
6659 }
6660
l2cap_ecred_data_rcv(struct l2cap_chan * chan,struct sk_buff * skb)6661 static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6662 {
6663 int err;
6664
6665 if (!chan->rx_credits) {
6666 BT_ERR("No credits to receive LE L2CAP data");
6667 l2cap_send_disconn_req(chan, ECONNRESET);
6668 return -ENOBUFS;
6669 }
6670
6671 if (skb->len > chan->imtu) {
6672 BT_ERR("Too big LE L2CAP PDU: len %u > %u", skb->len,
6673 chan->imtu);
6674 l2cap_send_disconn_req(chan, ECONNRESET);
6675 return -ENOBUFS;
6676 }
6677
6678 chan->rx_credits--;
6679 BT_DBG("chan %p: rx_credits %u -> %u",
6680 chan, chan->rx_credits + 1, chan->rx_credits);
6681
6682 /* Update if remote had run out of credits, this should only happens
6683 * if the remote is not using the entire MPS.
6684 */
6685 if (!chan->rx_credits)
6686 l2cap_chan_le_send_credits(chan);
6687
6688 err = 0;
6689
6690 if (!chan->sdu) {
6691 u16 sdu_len;
6692
6693 sdu_len = get_unaligned_le16(skb->data);
6694 skb_pull(skb, L2CAP_SDULEN_SIZE);
6695
6696 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6697 sdu_len, skb->len, chan->imtu);
6698
6699 if (sdu_len > chan->imtu) {
6700 BT_ERR("Too big LE L2CAP SDU length: len %u > %u",
6701 skb->len, sdu_len);
6702 l2cap_send_disconn_req(chan, ECONNRESET);
6703 err = -EMSGSIZE;
6704 goto failed;
6705 }
6706
6707 if (skb->len > sdu_len) {
6708 BT_ERR("Too much LE L2CAP data received");
6709 err = -EINVAL;
6710 goto failed;
6711 }
6712
6713 if (skb->len == sdu_len)
6714 return l2cap_ecred_recv(chan, skb);
6715
6716 chan->sdu = skb;
6717 chan->sdu_len = sdu_len;
6718 chan->sdu_last_frag = skb;
6719
6720 /* Detect if remote is not able to use the selected MPS */
6721 if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
6722 u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
6723
6724 /* Adjust the number of credits */
6725 BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
6726 chan->mps = mps_len;
6727 l2cap_chan_le_send_credits(chan);
6728 }
6729
6730 return 0;
6731 }
6732
6733 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6734 chan->sdu->len, skb->len, chan->sdu_len);
6735
6736 if (chan->sdu->len + skb->len > chan->sdu_len) {
6737 BT_ERR("Too much LE L2CAP data received");
6738 l2cap_send_disconn_req(chan, ECONNRESET);
6739 err = -EINVAL;
6740 goto failed;
6741 }
6742
6743 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6744 skb = NULL;
6745
6746 if (chan->sdu->len == chan->sdu_len) {
6747 err = l2cap_ecred_recv(chan, chan->sdu);
6748 if (!err) {
6749 chan->sdu = NULL;
6750 chan->sdu_last_frag = NULL;
6751 chan->sdu_len = 0;
6752 }
6753 }
6754
6755 failed:
6756 if (err) {
6757 kfree_skb(skb);
6758 kfree_skb(chan->sdu);
6759 chan->sdu = NULL;
6760 chan->sdu_last_frag = NULL;
6761 chan->sdu_len = 0;
6762 }
6763
6764 /* We can't return an error here since we took care of the skb
6765 * freeing internally. An error return would cause the caller to
6766 * do a double-free of the skb.
6767 */
6768 return 0;
6769 }
6770
l2cap_data_channel(struct l2cap_conn * conn,u16 cid,struct sk_buff * skb)6771 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6772 struct sk_buff *skb)
6773 {
6774 struct l2cap_chan *chan;
6775
6776 chan = l2cap_get_chan_by_scid(conn, cid);
6777 if (!chan) {
6778 BT_DBG("unknown cid 0x%4.4x", cid);
6779 /* Drop packet and return */
6780 kfree_skb(skb);
6781 return;
6782 }
6783
6784 BT_DBG("chan %p, len %d", chan, skb->len);
6785
6786 /* If we receive data on a fixed channel before the info req/rsp
6787 * procedure is done simply assume that the channel is supported
6788 * and mark it as ready.
6789 */
6790 if (chan->chan_type == L2CAP_CHAN_FIXED)
6791 l2cap_chan_ready(chan);
6792
6793 if (chan->state != BT_CONNECTED)
6794 goto drop;
6795
6796 switch (chan->mode) {
6797 case L2CAP_MODE_LE_FLOWCTL:
6798 case L2CAP_MODE_EXT_FLOWCTL:
6799 if (l2cap_ecred_data_rcv(chan, skb) < 0)
6800 goto drop;
6801
6802 goto done;
6803
6804 case L2CAP_MODE_BASIC:
6805 /* If socket recv buffers overflows we drop data here
6806 * which is *bad* because L2CAP has to be reliable.
6807 * But we don't have any other choice. L2CAP doesn't
6808 * provide flow control mechanism. */
6809
6810 if (chan->imtu < skb->len) {
6811 BT_ERR("Dropping L2CAP data: receive buffer overflow");
6812 goto drop;
6813 }
6814
6815 if (!chan->ops->recv(chan, skb))
6816 goto done;
6817 break;
6818
6819 case L2CAP_MODE_ERTM:
6820 case L2CAP_MODE_STREAMING:
6821 l2cap_data_rcv(chan, skb);
6822 goto done;
6823
6824 default:
6825 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6826 break;
6827 }
6828
6829 drop:
6830 kfree_skb(skb);
6831
6832 done:
6833 l2cap_chan_unlock(chan);
6834 l2cap_chan_put(chan);
6835 }
6836
l2cap_conless_channel(struct l2cap_conn * conn,__le16 psm,struct sk_buff * skb)6837 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6838 struct sk_buff *skb)
6839 {
6840 struct hci_conn *hcon = conn->hcon;
6841 struct l2cap_chan *chan;
6842
6843 if (hcon->type != ACL_LINK)
6844 goto free_skb;
6845
6846 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6847 ACL_LINK);
6848 if (!chan)
6849 goto free_skb;
6850
6851 BT_DBG("chan %p, len %d", chan, skb->len);
6852
6853 l2cap_chan_lock(chan);
6854
6855 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6856 goto drop;
6857
6858 if (chan->imtu < skb->len)
6859 goto drop;
6860
6861 /* Store remote BD_ADDR and PSM for msg_name */
6862 bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
6863 bt_cb(skb)->l2cap.psm = psm;
6864
6865 if (!chan->ops->recv(chan, skb)) {
6866 l2cap_chan_unlock(chan);
6867 l2cap_chan_put(chan);
6868 return;
6869 }
6870
6871 drop:
6872 l2cap_chan_unlock(chan);
6873 l2cap_chan_put(chan);
6874 free_skb:
6875 kfree_skb(skb);
6876 }
6877
l2cap_recv_frame(struct l2cap_conn * conn,struct sk_buff * skb)6878 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6879 {
6880 struct l2cap_hdr *lh = (void *) skb->data;
6881 struct hci_conn *hcon = conn->hcon;
6882 u16 cid, len;
6883 __le16 psm;
6884
6885 if (hcon->state != BT_CONNECTED) {
6886 BT_DBG("queueing pending rx skb");
6887 skb_queue_tail(&conn->pending_rx, skb);
6888 return;
6889 }
6890
6891 skb_pull(skb, L2CAP_HDR_SIZE);
6892 cid = __le16_to_cpu(lh->cid);
6893 len = __le16_to_cpu(lh->len);
6894
6895 if (len != skb->len) {
6896 kfree_skb(skb);
6897 return;
6898 }
6899
6900 /* Since we can't actively block incoming LE connections we must
6901 * at least ensure that we ignore incoming data from them.
6902 */
6903 if (hcon->type == LE_LINK &&
6904 hci_bdaddr_list_lookup(&hcon->hdev->reject_list, &hcon->dst,
6905 bdaddr_dst_type(hcon))) {
6906 kfree_skb(skb);
6907 return;
6908 }
6909
6910 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6911
6912 switch (cid) {
6913 case L2CAP_CID_SIGNALING:
6914 l2cap_sig_channel(conn, skb);
6915 break;
6916
6917 case L2CAP_CID_CONN_LESS:
6918 psm = get_unaligned((__le16 *) skb->data);
6919 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6920 l2cap_conless_channel(conn, psm, skb);
6921 break;
6922
6923 case L2CAP_CID_LE_SIGNALING:
6924 l2cap_le_sig_channel(conn, skb);
6925 break;
6926
6927 default:
6928 l2cap_data_channel(conn, cid, skb);
6929 break;
6930 }
6931 }
6932
process_pending_rx(struct work_struct * work)6933 static void process_pending_rx(struct work_struct *work)
6934 {
6935 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
6936 pending_rx_work);
6937 struct sk_buff *skb;
6938
6939 BT_DBG("");
6940
6941 mutex_lock(&conn->lock);
6942
6943 while ((skb = skb_dequeue(&conn->pending_rx)))
6944 l2cap_recv_frame(conn, skb);
6945
6946 mutex_unlock(&conn->lock);
6947 }
6948
l2cap_conn_add(struct hci_conn * hcon)6949 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
6950 {
6951 struct l2cap_conn *conn = hcon->l2cap_data;
6952 struct hci_chan *hchan;
6953
6954 if (conn)
6955 return conn;
6956
6957 hchan = hci_chan_create(hcon);
6958 if (!hchan)
6959 return NULL;
6960
6961 conn = kzalloc_obj(*conn);
6962 if (!conn) {
6963 hci_chan_del(hchan);
6964 return NULL;
6965 }
6966
6967 kref_init(&conn->ref);
6968 hcon->l2cap_data = conn;
6969 conn->hcon = hci_conn_get(hcon);
6970 conn->hchan = hchan;
6971
6972 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
6973
6974 conn->mtu = hcon->mtu;
6975 conn->feat_mask = 0;
6976
6977 conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
6978
6979 if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
6980 (bredr_sc_enabled(hcon->hdev) ||
6981 hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
6982 conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
6983
6984 mutex_init(&conn->lock);
6985
6986 INIT_LIST_HEAD(&conn->chan_l);
6987 INIT_LIST_HEAD(&conn->users);
6988
6989 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
6990 ida_init(&conn->tx_ida);
6991
6992 skb_queue_head_init(&conn->pending_rx);
6993 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
6994 INIT_DELAYED_WORK(&conn->id_addr_timer, l2cap_conn_update_id_addr);
6995
6996 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
6997
6998 return conn;
6999 }
7000
is_valid_psm(u16 psm,u8 dst_type)7001 static bool is_valid_psm(u16 psm, u8 dst_type)
7002 {
7003 if (!psm)
7004 return false;
7005
7006 if (bdaddr_type_is_le(dst_type))
7007 return (psm <= 0x00ff);
7008
7009 /* PSM must be odd and lsb of upper byte must be 0 */
7010 return ((psm & 0x0101) == 0x0001);
7011 }
7012
7013 struct l2cap_chan_data {
7014 struct l2cap_chan *chan;
7015 struct pid *pid;
7016 int count;
7017 };
7018
l2cap_chan_by_pid(struct l2cap_chan * chan,void * data)7019 static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data)
7020 {
7021 struct l2cap_chan_data *d = data;
7022 struct pid *pid;
7023
7024 if (chan == d->chan)
7025 return;
7026
7027 if (!test_bit(FLAG_DEFER_SETUP, &chan->flags))
7028 return;
7029
7030 pid = chan->ops->get_peer_pid(chan);
7031
7032 /* Only count deferred channels with the same PID/PSM */
7033 if (d->pid != pid || chan->psm != d->chan->psm || chan->ident ||
7034 chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
7035 return;
7036
7037 d->count++;
7038 }
7039
l2cap_chan_connect(struct l2cap_chan * chan,__le16 psm,u16 cid,bdaddr_t * dst,u8 dst_type,u16 timeout)7040 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7041 bdaddr_t *dst, u8 dst_type, u16 timeout)
7042 {
7043 struct l2cap_conn *conn;
7044 struct hci_conn *hcon;
7045 struct hci_dev *hdev;
7046 int err;
7047
7048 BT_DBG("%pMR -> %pMR (type %u) psm 0x%4.4x mode 0x%2.2x", &chan->src,
7049 dst, dst_type, __le16_to_cpu(psm), chan->mode);
7050
7051 hdev = hci_get_route(dst, &chan->src, chan->src_type);
7052 if (!hdev)
7053 return -EHOSTUNREACH;
7054
7055 hci_dev_lock(hdev);
7056
7057 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7058 chan->chan_type != L2CAP_CHAN_RAW) {
7059 err = -EINVAL;
7060 goto done;
7061 }
7062
7063 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7064 err = -EINVAL;
7065 goto done;
7066 }
7067
7068 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7069 err = -EINVAL;
7070 goto done;
7071 }
7072
7073 switch (chan->mode) {
7074 case L2CAP_MODE_BASIC:
7075 break;
7076 case L2CAP_MODE_LE_FLOWCTL:
7077 break;
7078 case L2CAP_MODE_EXT_FLOWCTL:
7079 if (!enable_ecred) {
7080 err = -EOPNOTSUPP;
7081 goto done;
7082 }
7083 break;
7084 case L2CAP_MODE_ERTM:
7085 case L2CAP_MODE_STREAMING:
7086 if (!disable_ertm)
7087 break;
7088 fallthrough;
7089 default:
7090 err = -EOPNOTSUPP;
7091 goto done;
7092 }
7093
7094 switch (chan->state) {
7095 case BT_CONNECT:
7096 case BT_CONNECT2:
7097 case BT_CONFIG:
7098 /* Already connecting */
7099 err = 0;
7100 goto done;
7101
7102 case BT_CONNECTED:
7103 /* Already connected */
7104 err = -EISCONN;
7105 goto done;
7106
7107 case BT_OPEN:
7108 case BT_BOUND:
7109 /* Can connect */
7110 break;
7111
7112 default:
7113 err = -EBADFD;
7114 goto done;
7115 }
7116
7117 /* Set destination address and psm */
7118 bacpy(&chan->dst, dst);
7119 chan->dst_type = dst_type;
7120
7121 chan->psm = psm;
7122 chan->dcid = cid;
7123
7124 if (bdaddr_type_is_le(dst_type)) {
7125 /* Convert from L2CAP channel address type to HCI address type
7126 */
7127 if (dst_type == BDADDR_LE_PUBLIC)
7128 dst_type = ADDR_LE_DEV_PUBLIC;
7129 else
7130 dst_type = ADDR_LE_DEV_RANDOM;
7131
7132 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7133 hcon = hci_connect_le(hdev, dst, dst_type, false,
7134 chan->sec_level, timeout,
7135 HCI_ROLE_SLAVE, 0, 0);
7136 else
7137 hcon = hci_connect_le_scan(hdev, dst, dst_type,
7138 chan->sec_level, timeout,
7139 CONN_REASON_L2CAP_CHAN);
7140
7141 } else {
7142 u8 auth_type = l2cap_get_auth_type(chan);
7143 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type,
7144 CONN_REASON_L2CAP_CHAN, timeout);
7145 }
7146
7147 if (IS_ERR(hcon)) {
7148 err = PTR_ERR(hcon);
7149 goto done;
7150 }
7151
7152 conn = l2cap_conn_add(hcon);
7153 if (!conn) {
7154 hci_conn_drop(hcon);
7155 err = -ENOMEM;
7156 goto done;
7157 }
7158
7159 if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) {
7160 struct l2cap_chan_data data;
7161
7162 data.chan = chan;
7163 data.pid = chan->ops->get_peer_pid(chan);
7164 data.count = 1;
7165
7166 l2cap_chan_list(conn, l2cap_chan_by_pid, &data);
7167
7168 /* Check if there isn't too many channels being connected */
7169 if (data.count > L2CAP_ECRED_CONN_SCID_MAX) {
7170 hci_conn_drop(hcon);
7171 err = -EPROTO;
7172 goto done;
7173 }
7174 }
7175
7176 mutex_lock(&conn->lock);
7177 l2cap_chan_lock(chan);
7178
7179 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7180 hci_conn_drop(hcon);
7181 err = -EBUSY;
7182 goto chan_unlock;
7183 }
7184
7185 /* Update source addr of the socket */
7186 bacpy(&chan->src, &hcon->src);
7187 chan->src_type = bdaddr_src_type(hcon);
7188
7189 __l2cap_chan_add(conn, chan);
7190
7191 /* l2cap_chan_add takes its own ref so we can drop this one */
7192 hci_conn_drop(hcon);
7193
7194 l2cap_state_change(chan, BT_CONNECT);
7195 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7196
7197 /* Release chan->sport so that it can be reused by other
7198 * sockets (as it's only used for listening sockets).
7199 */
7200 write_lock(&chan_list_lock);
7201 chan->sport = 0;
7202 write_unlock(&chan_list_lock);
7203
7204 if (hcon->state == BT_CONNECTED) {
7205 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7206 __clear_chan_timer(chan);
7207 if (l2cap_chan_check_security(chan, true))
7208 l2cap_state_change(chan, BT_CONNECTED);
7209 } else
7210 l2cap_do_start(chan);
7211 }
7212
7213 err = 0;
7214
7215 chan_unlock:
7216 l2cap_chan_unlock(chan);
7217 mutex_unlock(&conn->lock);
7218 done:
7219 hci_dev_unlock(hdev);
7220 hci_dev_put(hdev);
7221 return err;
7222 }
7223 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7224
l2cap_ecred_reconfigure(struct l2cap_chan * chan)7225 static void l2cap_ecred_reconfigure(struct l2cap_chan *chan)
7226 {
7227 struct l2cap_conn *conn = chan->conn;
7228 DEFINE_RAW_FLEX(struct l2cap_ecred_reconf_req, pdu, scid, 1);
7229
7230 pdu->mtu = cpu_to_le16(chan->imtu);
7231 pdu->mps = cpu_to_le16(chan->mps);
7232 pdu->scid[0] = cpu_to_le16(chan->scid);
7233
7234 chan->ident = l2cap_get_ident(conn);
7235
7236 l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_RECONF_REQ,
7237 sizeof(pdu), &pdu);
7238 }
7239
l2cap_chan_reconfigure(struct l2cap_chan * chan,__u16 mtu)7240 int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu)
7241 {
7242 if (chan->imtu > mtu)
7243 return -EINVAL;
7244
7245 BT_DBG("chan %p mtu 0x%4.4x", chan, mtu);
7246
7247 chan->imtu = mtu;
7248
7249 l2cap_ecred_reconfigure(chan);
7250
7251 return 0;
7252 }
7253
7254 /* ---- L2CAP interface with lower layer (HCI) ---- */
7255
l2cap_connect_ind(struct hci_dev * hdev,bdaddr_t * bdaddr)7256 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7257 {
7258 int exact = 0, lm1 = 0, lm2 = 0;
7259 struct l2cap_chan *c;
7260
7261 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7262
7263 /* Find listening sockets and check their link_mode */
7264 read_lock(&chan_list_lock);
7265 list_for_each_entry(c, &chan_list, global_l) {
7266 if (c->state != BT_LISTEN)
7267 continue;
7268
7269 if (!bacmp(&c->src, &hdev->bdaddr)) {
7270 lm1 |= HCI_LM_ACCEPT;
7271 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7272 lm1 |= HCI_LM_MASTER;
7273 exact++;
7274 } else if (!bacmp(&c->src, BDADDR_ANY)) {
7275 lm2 |= HCI_LM_ACCEPT;
7276 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7277 lm2 |= HCI_LM_MASTER;
7278 }
7279 }
7280 read_unlock(&chan_list_lock);
7281
7282 return exact ? lm1 : lm2;
7283 }
7284
7285 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7286 * from an existing channel in the list or from the beginning of the
7287 * global list (by passing NULL as first parameter).
7288 */
l2cap_global_fixed_chan(struct l2cap_chan * c,struct hci_conn * hcon)7289 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7290 struct hci_conn *hcon)
7291 {
7292 u8 src_type = bdaddr_src_type(hcon);
7293
7294 read_lock(&chan_list_lock);
7295
7296 if (c)
7297 c = list_next_entry(c, global_l);
7298 else
7299 c = list_entry(chan_list.next, typeof(*c), global_l);
7300
7301 list_for_each_entry_from(c, &chan_list, global_l) {
7302 if (c->chan_type != L2CAP_CHAN_FIXED)
7303 continue;
7304 if (c->state != BT_LISTEN)
7305 continue;
7306 if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
7307 continue;
7308 if (src_type != c->src_type)
7309 continue;
7310
7311 c = l2cap_chan_hold_unless_zero(c);
7312 read_unlock(&chan_list_lock);
7313 return c;
7314 }
7315
7316 read_unlock(&chan_list_lock);
7317
7318 return NULL;
7319 }
7320
l2cap_connect_cfm(struct hci_conn * hcon,u8 status)7321 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7322 {
7323 struct hci_dev *hdev = hcon->hdev;
7324 struct l2cap_conn *conn;
7325 struct l2cap_chan *pchan;
7326 u8 dst_type;
7327
7328 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7329 return;
7330
7331 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7332
7333 if (status) {
7334 l2cap_conn_del(hcon, bt_to_errno(status));
7335 return;
7336 }
7337
7338 conn = l2cap_conn_add(hcon);
7339 if (!conn)
7340 return;
7341
7342 dst_type = bdaddr_dst_type(hcon);
7343
7344 /* If device is blocked, do not create channels for it */
7345 if (hci_bdaddr_list_lookup(&hdev->reject_list, &hcon->dst, dst_type))
7346 return;
7347
7348 /* Find fixed channels and notify them of the new connection. We
7349 * use multiple individual lookups, continuing each time where
7350 * we left off, because the list lock would prevent calling the
7351 * potentially sleeping l2cap_chan_lock() function.
7352 */
7353 pchan = l2cap_global_fixed_chan(NULL, hcon);
7354 while (pchan) {
7355 struct l2cap_chan *chan, *next;
7356
7357 /* Client fixed channels should override server ones */
7358 if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7359 goto next;
7360
7361 l2cap_chan_lock(pchan);
7362 chan = pchan->ops->new_connection(pchan);
7363 if (chan) {
7364 bacpy(&chan->src, &hcon->src);
7365 bacpy(&chan->dst, &hcon->dst);
7366 chan->src_type = bdaddr_src_type(hcon);
7367 chan->dst_type = dst_type;
7368
7369 __l2cap_chan_add(conn, chan);
7370 }
7371
7372 l2cap_chan_unlock(pchan);
7373 next:
7374 next = l2cap_global_fixed_chan(pchan, hcon);
7375 l2cap_chan_put(pchan);
7376 pchan = next;
7377 }
7378
7379 l2cap_conn_ready(conn);
7380 }
7381
l2cap_disconn_ind(struct hci_conn * hcon)7382 int l2cap_disconn_ind(struct hci_conn *hcon)
7383 {
7384 struct l2cap_conn *conn = hcon->l2cap_data;
7385
7386 BT_DBG("hcon %p", hcon);
7387
7388 if (!conn)
7389 return HCI_ERROR_REMOTE_USER_TERM;
7390 return conn->disc_reason;
7391 }
7392
l2cap_disconn_cfm(struct hci_conn * hcon,u8 reason)7393 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7394 {
7395 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7396 return;
7397
7398 BT_DBG("hcon %p reason %d", hcon, reason);
7399
7400 l2cap_conn_del(hcon, bt_to_errno(reason));
7401 }
7402
l2cap_check_encryption(struct l2cap_chan * chan,u8 encrypt)7403 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7404 {
7405 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7406 return;
7407
7408 if (encrypt == 0x00) {
7409 if (chan->sec_level == BT_SECURITY_MEDIUM) {
7410 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7411 } else if (chan->sec_level == BT_SECURITY_HIGH ||
7412 chan->sec_level == BT_SECURITY_FIPS)
7413 l2cap_chan_close(chan, ECONNREFUSED);
7414 } else {
7415 if (chan->sec_level == BT_SECURITY_MEDIUM)
7416 __clear_chan_timer(chan);
7417 }
7418 }
7419
l2cap_security_cfm(struct hci_conn * hcon,u8 status,u8 encrypt)7420 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7421 {
7422 struct l2cap_conn *conn = hcon->l2cap_data;
7423 struct l2cap_chan *chan;
7424
7425 if (!conn)
7426 return;
7427
7428 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7429
7430 mutex_lock(&conn->lock);
7431
7432 list_for_each_entry(chan, &conn->chan_l, list) {
7433 l2cap_chan_lock(chan);
7434
7435 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7436 state_to_string(chan->state));
7437
7438 if (!status && encrypt)
7439 chan->sec_level = hcon->sec_level;
7440
7441 if (!__l2cap_no_conn_pending(chan)) {
7442 l2cap_chan_unlock(chan);
7443 continue;
7444 }
7445
7446 if (!status && (chan->state == BT_CONNECTED ||
7447 chan->state == BT_CONFIG)) {
7448 chan->ops->resume(chan);
7449 l2cap_check_encryption(chan, encrypt);
7450 l2cap_chan_unlock(chan);
7451 continue;
7452 }
7453
7454 if (chan->state == BT_CONNECT) {
7455 if (!status && l2cap_check_enc_key_size(hcon, chan))
7456 l2cap_start_connection(chan);
7457 else
7458 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7459 } else if (chan->state == BT_CONNECT2 &&
7460 !(chan->mode == L2CAP_MODE_EXT_FLOWCTL ||
7461 chan->mode == L2CAP_MODE_LE_FLOWCTL)) {
7462 struct l2cap_conn_rsp rsp;
7463 __u16 res, stat;
7464
7465 if (!status && l2cap_check_enc_key_size(hcon, chan)) {
7466 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7467 res = L2CAP_CR_PEND;
7468 stat = L2CAP_CS_AUTHOR_PEND;
7469 chan->ops->defer(chan);
7470 } else {
7471 l2cap_state_change(chan, BT_CONFIG);
7472 res = L2CAP_CR_SUCCESS;
7473 stat = L2CAP_CS_NO_INFO;
7474 }
7475 } else {
7476 l2cap_state_change(chan, BT_DISCONN);
7477 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7478 res = L2CAP_CR_SEC_BLOCK;
7479 stat = L2CAP_CS_NO_INFO;
7480 }
7481
7482 rsp.scid = cpu_to_le16(chan->dcid);
7483 rsp.dcid = cpu_to_le16(chan->scid);
7484 rsp.result = cpu_to_le16(res);
7485 rsp.status = cpu_to_le16(stat);
7486 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7487 sizeof(rsp), &rsp);
7488
7489 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7490 res == L2CAP_CR_SUCCESS) {
7491 char buf[128];
7492 set_bit(CONF_REQ_SENT, &chan->conf_state);
7493 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7494 L2CAP_CONF_REQ,
7495 l2cap_build_conf_req(chan, buf, sizeof(buf)),
7496 buf);
7497 chan->num_conf_req++;
7498 }
7499 }
7500
7501 l2cap_chan_unlock(chan);
7502 }
7503
7504 mutex_unlock(&conn->lock);
7505 }
7506
7507 /* Append fragment into frame respecting the maximum len of rx_skb */
l2cap_recv_frag(struct l2cap_conn * conn,struct sk_buff * skb,u16 len)7508 static int l2cap_recv_frag(struct l2cap_conn *conn, struct sk_buff *skb,
7509 u16 len)
7510 {
7511 if (!conn->rx_skb) {
7512 /* Allocate skb for the complete frame (with header) */
7513 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7514 if (!conn->rx_skb)
7515 return -ENOMEM;
7516 /* Init rx_len */
7517 conn->rx_len = len;
7518
7519 skb_set_delivery_time(conn->rx_skb, skb->tstamp,
7520 skb->tstamp_type);
7521 }
7522
7523 /* Copy as much as the rx_skb can hold */
7524 len = min_t(u16, len, skb->len);
7525 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, len), len);
7526 skb_pull(skb, len);
7527 conn->rx_len -= len;
7528
7529 return len;
7530 }
7531
l2cap_recv_len(struct l2cap_conn * conn,struct sk_buff * skb)7532 static int l2cap_recv_len(struct l2cap_conn *conn, struct sk_buff *skb)
7533 {
7534 struct sk_buff *rx_skb;
7535 int len;
7536
7537 /* Append just enough to complete the header */
7538 len = l2cap_recv_frag(conn, skb, L2CAP_LEN_SIZE - conn->rx_skb->len);
7539
7540 /* If header could not be read just continue */
7541 if (len < 0 || conn->rx_skb->len < L2CAP_LEN_SIZE)
7542 return len;
7543
7544 rx_skb = conn->rx_skb;
7545 len = get_unaligned_le16(rx_skb->data);
7546
7547 /* Check if rx_skb has enough space to received all fragments */
7548 if (len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE) <= skb_tailroom(rx_skb)) {
7549 /* Update expected len */
7550 conn->rx_len = len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE);
7551 return L2CAP_LEN_SIZE;
7552 }
7553
7554 /* Reset conn->rx_skb since it will need to be reallocated in order to
7555 * fit all fragments.
7556 */
7557 conn->rx_skb = NULL;
7558
7559 /* Reallocates rx_skb using the exact expected length */
7560 len = l2cap_recv_frag(conn, rx_skb,
7561 len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE));
7562 kfree_skb(rx_skb);
7563
7564 return len;
7565 }
7566
l2cap_recv_reset(struct l2cap_conn * conn)7567 static void l2cap_recv_reset(struct l2cap_conn *conn)
7568 {
7569 kfree_skb(conn->rx_skb);
7570 conn->rx_skb = NULL;
7571 conn->rx_len = 0;
7572 }
7573
l2cap_conn_hold_unless_zero(struct l2cap_conn * c)7574 struct l2cap_conn *l2cap_conn_hold_unless_zero(struct l2cap_conn *c)
7575 {
7576 if (!c)
7577 return NULL;
7578
7579 BT_DBG("conn %p orig refcnt %u", c, kref_read(&c->ref));
7580
7581 if (!kref_get_unless_zero(&c->ref))
7582 return NULL;
7583
7584 return c;
7585 }
7586
l2cap_recv_acldata(struct hci_dev * hdev,u16 handle,struct sk_buff * skb,u16 flags)7587 int l2cap_recv_acldata(struct hci_dev *hdev, u16 handle,
7588 struct sk_buff *skb, u16 flags)
7589 {
7590 struct hci_conn *hcon;
7591 struct l2cap_conn *conn;
7592 int len;
7593
7594 /* Lock hdev for hci_conn, and race on l2cap_data vs. l2cap_conn_del */
7595 hci_dev_lock(hdev);
7596
7597 hcon = hci_conn_hash_lookup_handle(hdev, handle);
7598 if (!hcon) {
7599 hci_dev_unlock(hdev);
7600 kfree_skb(skb);
7601 return -ENOENT;
7602 }
7603
7604 hci_conn_enter_active_mode(hcon, BT_POWER_FORCE_ACTIVE_OFF);
7605
7606 conn = hcon->l2cap_data;
7607
7608 if (!conn)
7609 conn = l2cap_conn_add(hcon);
7610
7611 conn = l2cap_conn_hold_unless_zero(conn);
7612 hcon = NULL;
7613
7614 hci_dev_unlock(hdev);
7615
7616 if (!conn) {
7617 kfree_skb(skb);
7618 return -EINVAL;
7619 }
7620
7621 BT_DBG("conn %p len %u flags 0x%x", conn, skb->len, flags);
7622
7623 mutex_lock(&conn->lock);
7624
7625 switch (flags) {
7626 case ACL_START:
7627 case ACL_START_NO_FLUSH:
7628 case ACL_COMPLETE:
7629 if (conn->rx_skb) {
7630 BT_ERR("Unexpected start frame (len %d)", skb->len);
7631 l2cap_recv_reset(conn);
7632 l2cap_conn_unreliable(conn, ECOMM);
7633 }
7634
7635 /* Start fragment may not contain the L2CAP length so just
7636 * copy the initial byte when that happens and use conn->mtu as
7637 * expected length.
7638 */
7639 if (skb->len < L2CAP_LEN_SIZE) {
7640 l2cap_recv_frag(conn, skb, conn->mtu);
7641 break;
7642 }
7643
7644 len = get_unaligned_le16(skb->data) + L2CAP_HDR_SIZE;
7645
7646 if (len == skb->len) {
7647 /* Complete frame received */
7648 l2cap_recv_frame(conn, skb);
7649 goto unlock;
7650 }
7651
7652 BT_DBG("Start: total len %d, frag len %u", len, skb->len);
7653
7654 if (skb->len > len) {
7655 BT_ERR("Frame is too long (len %u, expected len %d)",
7656 skb->len, len);
7657 /* PTS test cases L2CAP/COS/CED/BI-14-C and BI-15-C
7658 * (Multiple Signaling Command in one PDU, Data
7659 * Truncated, BR/EDR) send a C-frame to the IUT with
7660 * PDU Length set to 8 and Channel ID set to the
7661 * correct signaling channel for the logical link.
7662 * The Information payload contains one L2CAP_ECHO_REQ
7663 * packet with Data Length set to 0 with 0 octets of
7664 * echo data and one invalid command packet due to
7665 * data truncated in PDU but present in HCI packet.
7666 *
7667 * Shorter the socket buffer to the PDU length to
7668 * allow to process valid commands from the PDU before
7669 * setting the socket unreliable.
7670 */
7671 skb->len = len;
7672 l2cap_recv_frame(conn, skb);
7673 l2cap_conn_unreliable(conn, ECOMM);
7674 goto unlock;
7675 }
7676
7677 /* Append fragment into frame (with header) */
7678 if (l2cap_recv_frag(conn, skb, len) < 0)
7679 goto drop;
7680
7681 break;
7682
7683 case ACL_CONT:
7684 BT_DBG("Cont: frag len %u (expecting %u)", skb->len, conn->rx_len);
7685
7686 if (!conn->rx_skb) {
7687 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7688 l2cap_conn_unreliable(conn, ECOMM);
7689 goto drop;
7690 }
7691
7692 /* Complete the L2CAP length if it has not been read */
7693 if (conn->rx_skb->len < L2CAP_LEN_SIZE) {
7694 if (l2cap_recv_len(conn, skb) < 0) {
7695 l2cap_conn_unreliable(conn, ECOMM);
7696 goto drop;
7697 }
7698
7699 /* Header still could not be read just continue */
7700 if (conn->rx_skb->len < L2CAP_LEN_SIZE)
7701 break;
7702 }
7703
7704 if (skb->len > conn->rx_len) {
7705 BT_ERR("Fragment is too long (len %u, expected %u)",
7706 skb->len, conn->rx_len);
7707 l2cap_recv_reset(conn);
7708 l2cap_conn_unreliable(conn, ECOMM);
7709 goto drop;
7710 }
7711
7712 /* Append fragment into frame (with header) */
7713 l2cap_recv_frag(conn, skb, skb->len);
7714
7715 if (!conn->rx_len) {
7716 /* Complete frame received. l2cap_recv_frame
7717 * takes ownership of the skb so set the global
7718 * rx_skb pointer to NULL first.
7719 */
7720 struct sk_buff *rx_skb = conn->rx_skb;
7721 conn->rx_skb = NULL;
7722 l2cap_recv_frame(conn, rx_skb);
7723 }
7724 break;
7725 }
7726
7727 drop:
7728 kfree_skb(skb);
7729 unlock:
7730 mutex_unlock(&conn->lock);
7731 l2cap_conn_put(conn);
7732 return 0;
7733 }
7734
7735 static struct hci_cb l2cap_cb = {
7736 .name = "L2CAP",
7737 .connect_cfm = l2cap_connect_cfm,
7738 .disconn_cfm = l2cap_disconn_cfm,
7739 .security_cfm = l2cap_security_cfm,
7740 };
7741
l2cap_debugfs_show(struct seq_file * f,void * p)7742 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7743 {
7744 struct l2cap_chan *c;
7745
7746 read_lock(&chan_list_lock);
7747
7748 list_for_each_entry(c, &chan_list, global_l) {
7749 seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7750 &c->src, c->src_type, &c->dst, c->dst_type,
7751 c->state, __le16_to_cpu(c->psm),
7752 c->scid, c->dcid, c->imtu, c->omtu,
7753 c->sec_level, c->mode);
7754 }
7755
7756 read_unlock(&chan_list_lock);
7757
7758 return 0;
7759 }
7760
7761 DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs);
7762
7763 static struct dentry *l2cap_debugfs;
7764
l2cap_init(void)7765 int __init l2cap_init(void)
7766 {
7767 int err;
7768
7769 err = l2cap_init_sockets();
7770 if (err < 0)
7771 return err;
7772
7773 hci_register_cb(&l2cap_cb);
7774
7775 if (IS_ERR_OR_NULL(bt_debugfs))
7776 return 0;
7777
7778 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7779 NULL, &l2cap_debugfs_fops);
7780
7781 return 0;
7782 }
7783
l2cap_exit(void)7784 void l2cap_exit(void)
7785 {
7786 debugfs_remove(l2cap_debugfs);
7787 hci_unregister_cb(&l2cap_cb);
7788 l2cap_cleanup_sockets();
7789 }
7790
7791 module_param(disable_ertm, bool, 0644);
7792 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
7793
7794 module_param(enable_ecred, bool, 0644);
7795 MODULE_PARM_DESC(enable_ecred, "Enable enhanced credit flow control mode");
7796