xref: /linux/net/bluetooth/l2cap_core.c (revision 6832a9317eee280117cd695fa885b2b7a7a38daf)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5    Copyright (C) 2010 Google Inc.
6    Copyright (C) 2011 ProFUSION Embedded Systems
7    Copyright (c) 2012 Code Aurora Forum.  All rights reserved.
8 
9    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License version 2 as
13    published by the Free Software Foundation;
14 
15    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 
24    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26    SOFTWARE IS DISCLAIMED.
27 */
28 
29 /* Bluetooth L2CAP core. */
30 
31 #include <linux/module.h>
32 
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
36 
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 
41 #include "smp.h"
42 
43 #define LE_FLOWCTL_MAX_CREDITS 65535
44 
45 bool disable_ertm;
46 bool enable_ecred = IS_ENABLED(CONFIG_BT_LE_L2CAP_ECRED);
47 
48 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
49 
50 static LIST_HEAD(chan_list);
51 static DEFINE_RWLOCK(chan_list_lock);
52 
53 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
54 				       u8 code, u8 ident, u16 dlen, void *data);
55 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
56 			   void *data);
57 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
58 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
59 
60 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
61 		     struct sk_buff_head *skbs, u8 event);
62 static void l2cap_retrans_timeout(struct work_struct *work);
63 static void l2cap_monitor_timeout(struct work_struct *work);
64 static void l2cap_ack_timeout(struct work_struct *work);
65 
bdaddr_type(u8 link_type,u8 bdaddr_type)66 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
67 {
68 	if (link_type == LE_LINK) {
69 		if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
70 			return BDADDR_LE_PUBLIC;
71 		else
72 			return BDADDR_LE_RANDOM;
73 	}
74 
75 	return BDADDR_BREDR;
76 }
77 
bdaddr_src_type(struct hci_conn * hcon)78 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
79 {
80 	return bdaddr_type(hcon->type, hcon->src_type);
81 }
82 
bdaddr_dst_type(struct hci_conn * hcon)83 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
84 {
85 	return bdaddr_type(hcon->type, hcon->dst_type);
86 }
87 
88 /* ---- L2CAP channels ---- */
89 
__l2cap_get_chan_by_dcid(struct l2cap_conn * conn,u16 cid)90 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
91 						   u16 cid)
92 {
93 	struct l2cap_chan *c;
94 
95 	list_for_each_entry(c, &conn->chan_l, list) {
96 		if (c->dcid == cid)
97 			return c;
98 	}
99 	return NULL;
100 }
101 
__l2cap_get_chan_by_scid(struct l2cap_conn * conn,u16 cid)102 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
103 						   u16 cid)
104 {
105 	struct l2cap_chan *c;
106 
107 	list_for_each_entry(c, &conn->chan_l, list) {
108 		if (c->scid == cid)
109 			return c;
110 	}
111 	return NULL;
112 }
113 
114 /* Find channel with given SCID.
115  * Returns a reference locked channel.
116  */
l2cap_get_chan_by_scid(struct l2cap_conn * conn,u16 cid)117 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
118 						 u16 cid)
119 {
120 	struct l2cap_chan *c;
121 
122 	c = __l2cap_get_chan_by_scid(conn, cid);
123 	if (c) {
124 		/* Only lock if chan reference is not 0 */
125 		c = l2cap_chan_hold_unless_zero(c);
126 		if (c)
127 			l2cap_chan_lock(c);
128 	}
129 
130 	return c;
131 }
132 
133 /* Find channel with given DCID.
134  * Returns a reference locked channel.
135  */
l2cap_get_chan_by_dcid(struct l2cap_conn * conn,u16 cid)136 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
137 						 u16 cid)
138 {
139 	struct l2cap_chan *c;
140 
141 	c = __l2cap_get_chan_by_dcid(conn, cid);
142 	if (c) {
143 		/* Only lock if chan reference is not 0 */
144 		c = l2cap_chan_hold_unless_zero(c);
145 		if (c)
146 			l2cap_chan_lock(c);
147 	}
148 
149 	return c;
150 }
151 
__l2cap_get_chan_by_ident(struct l2cap_conn * conn,u8 ident)152 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
153 						    u8 ident)
154 {
155 	struct l2cap_chan *c;
156 
157 	list_for_each_entry(c, &conn->chan_l, list) {
158 		if (c->ident == ident)
159 			return c;
160 	}
161 	return NULL;
162 }
163 
__l2cap_global_chan_by_addr(__le16 psm,bdaddr_t * src,u8 src_type)164 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src,
165 						      u8 src_type)
166 {
167 	struct l2cap_chan *c;
168 
169 	list_for_each_entry(c, &chan_list, global_l) {
170 		if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR)
171 			continue;
172 
173 		if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR)
174 			continue;
175 
176 		if (c->sport == psm && !bacmp(&c->src, src))
177 			return c;
178 	}
179 	return NULL;
180 }
181 
l2cap_add_psm(struct l2cap_chan * chan,bdaddr_t * src,__le16 psm)182 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
183 {
184 	int err;
185 
186 	write_lock(&chan_list_lock);
187 
188 	if (psm && __l2cap_global_chan_by_addr(psm, src, chan->src_type)) {
189 		err = -EADDRINUSE;
190 		goto done;
191 	}
192 
193 	if (psm) {
194 		chan->psm = psm;
195 		chan->sport = psm;
196 		err = 0;
197 	} else {
198 		u16 p, start, end, incr;
199 
200 		if (chan->src_type == BDADDR_BREDR) {
201 			start = L2CAP_PSM_DYN_START;
202 			end = L2CAP_PSM_AUTO_END;
203 			incr = 2;
204 		} else {
205 			start = L2CAP_PSM_LE_DYN_START;
206 			end = L2CAP_PSM_LE_DYN_END;
207 			incr = 1;
208 		}
209 
210 		err = -EINVAL;
211 		for (p = start; p <= end; p += incr)
212 			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src,
213 							 chan->src_type)) {
214 				chan->psm   = cpu_to_le16(p);
215 				chan->sport = cpu_to_le16(p);
216 				err = 0;
217 				break;
218 			}
219 	}
220 
221 done:
222 	write_unlock(&chan_list_lock);
223 	return err;
224 }
225 EXPORT_SYMBOL_GPL(l2cap_add_psm);
226 
l2cap_add_scid(struct l2cap_chan * chan,__u16 scid)227 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
228 {
229 	write_lock(&chan_list_lock);
230 
231 	/* Override the defaults (which are for conn-oriented) */
232 	chan->omtu = L2CAP_DEFAULT_MTU;
233 	chan->chan_type = L2CAP_CHAN_FIXED;
234 
235 	chan->scid = scid;
236 
237 	write_unlock(&chan_list_lock);
238 
239 	return 0;
240 }
241 
l2cap_alloc_cid(struct l2cap_conn * conn)242 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
243 {
244 	u16 cid, dyn_end;
245 
246 	if (conn->hcon->type == LE_LINK)
247 		dyn_end = L2CAP_CID_LE_DYN_END;
248 	else
249 		dyn_end = L2CAP_CID_DYN_END;
250 
251 	for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
252 		if (!__l2cap_get_chan_by_scid(conn, cid))
253 			return cid;
254 	}
255 
256 	return 0;
257 }
258 
l2cap_state_change(struct l2cap_chan * chan,int state)259 static void l2cap_state_change(struct l2cap_chan *chan, int state)
260 {
261 	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
262 	       state_to_string(state));
263 
264 	chan->state = state;
265 	chan->ops->state_change(chan, state, 0);
266 }
267 
l2cap_state_change_and_error(struct l2cap_chan * chan,int state,int err)268 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
269 						int state, int err)
270 {
271 	chan->state = state;
272 	chan->ops->state_change(chan, chan->state, err);
273 }
274 
l2cap_chan_set_err(struct l2cap_chan * chan,int err)275 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
276 {
277 	chan->ops->state_change(chan, chan->state, err);
278 }
279 
__set_retrans_timer(struct l2cap_chan * chan)280 static void __set_retrans_timer(struct l2cap_chan *chan)
281 {
282 	if (!delayed_work_pending(&chan->monitor_timer) &&
283 	    chan->retrans_timeout) {
284 		l2cap_set_timer(chan, &chan->retrans_timer,
285 				secs_to_jiffies(chan->retrans_timeout));
286 	}
287 }
288 
__set_monitor_timer(struct l2cap_chan * chan)289 static void __set_monitor_timer(struct l2cap_chan *chan)
290 {
291 	__clear_retrans_timer(chan);
292 	if (chan->monitor_timeout) {
293 		l2cap_set_timer(chan, &chan->monitor_timer,
294 				secs_to_jiffies(chan->monitor_timeout));
295 	}
296 }
297 
l2cap_ertm_seq_in_queue(struct sk_buff_head * head,u16 seq)298 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
299 					       u16 seq)
300 {
301 	struct sk_buff *skb;
302 
303 	skb_queue_walk(head, skb) {
304 		if (bt_cb(skb)->l2cap.txseq == seq)
305 			return skb;
306 	}
307 
308 	return NULL;
309 }
310 
311 /* ---- L2CAP sequence number lists ---- */
312 
313 /* For ERTM, ordered lists of sequence numbers must be tracked for
314  * SREJ requests that are received and for frames that are to be
315  * retransmitted. These seq_list functions implement a singly-linked
316  * list in an array, where membership in the list can also be checked
317  * in constant time. Items can also be added to the tail of the list
318  * and removed from the head in constant time, without further memory
319  * allocs or frees.
320  */
321 
l2cap_seq_list_init(struct l2cap_seq_list * seq_list,u16 size)322 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
323 {
324 	size_t alloc_size, i;
325 
326 	/* Allocated size is a power of 2 to map sequence numbers
327 	 * (which may be up to 14 bits) in to a smaller array that is
328 	 * sized for the negotiated ERTM transmit windows.
329 	 */
330 	alloc_size = roundup_pow_of_two(size);
331 
332 	seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
333 	if (!seq_list->list)
334 		return -ENOMEM;
335 
336 	seq_list->mask = alloc_size - 1;
337 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
338 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
339 	for (i = 0; i < alloc_size; i++)
340 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
341 
342 	return 0;
343 }
344 
l2cap_seq_list_free(struct l2cap_seq_list * seq_list)345 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
346 {
347 	kfree(seq_list->list);
348 }
349 
l2cap_seq_list_contains(struct l2cap_seq_list * seq_list,u16 seq)350 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
351 					   u16 seq)
352 {
353 	/* Constant-time check for list membership */
354 	return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
355 }
356 
l2cap_seq_list_pop(struct l2cap_seq_list * seq_list)357 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
358 {
359 	u16 seq = seq_list->head;
360 	u16 mask = seq_list->mask;
361 
362 	seq_list->head = seq_list->list[seq & mask];
363 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
364 
365 	if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
366 		seq_list->head = L2CAP_SEQ_LIST_CLEAR;
367 		seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
368 	}
369 
370 	return seq;
371 }
372 
l2cap_seq_list_clear(struct l2cap_seq_list * seq_list)373 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
374 {
375 	u16 i;
376 
377 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
378 		return;
379 
380 	for (i = 0; i <= seq_list->mask; i++)
381 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
382 
383 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
384 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
385 }
386 
l2cap_seq_list_append(struct l2cap_seq_list * seq_list,u16 seq)387 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
388 {
389 	u16 mask = seq_list->mask;
390 
391 	/* All appends happen in constant time */
392 
393 	if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
394 		return;
395 
396 	if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
397 		seq_list->head = seq;
398 	else
399 		seq_list->list[seq_list->tail & mask] = seq;
400 
401 	seq_list->tail = seq;
402 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
403 }
404 
l2cap_chan_timeout(struct work_struct * work)405 static void l2cap_chan_timeout(struct work_struct *work)
406 {
407 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
408 					       chan_timer.work);
409 	struct l2cap_conn *conn = chan->conn;
410 	int reason;
411 
412 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
413 
414 	if (!conn)
415 		return;
416 
417 	mutex_lock(&conn->lock);
418 	/* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
419 	 * this work. No need to call l2cap_chan_hold(chan) here again.
420 	 */
421 	l2cap_chan_lock(chan);
422 
423 	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
424 		reason = ECONNREFUSED;
425 	else if (chan->state == BT_CONNECT &&
426 		 chan->sec_level != BT_SECURITY_SDP)
427 		reason = ECONNREFUSED;
428 	else
429 		reason = ETIMEDOUT;
430 
431 	l2cap_chan_close(chan, reason);
432 
433 	chan->ops->close(chan);
434 
435 	l2cap_chan_unlock(chan);
436 	l2cap_chan_put(chan);
437 
438 	mutex_unlock(&conn->lock);
439 }
440 
l2cap_chan_create(void)441 struct l2cap_chan *l2cap_chan_create(void)
442 {
443 	struct l2cap_chan *chan;
444 
445 	chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
446 	if (!chan)
447 		return NULL;
448 
449 	skb_queue_head_init(&chan->tx_q);
450 	skb_queue_head_init(&chan->srej_q);
451 	mutex_init(&chan->lock);
452 
453 	/* Set default lock nesting level */
454 	atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
455 
456 	/* Available receive buffer space is initially unknown */
457 	chan->rx_avail = -1;
458 
459 	write_lock(&chan_list_lock);
460 	list_add(&chan->global_l, &chan_list);
461 	write_unlock(&chan_list_lock);
462 
463 	INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
464 	INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
465 	INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
466 	INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
467 
468 	chan->state = BT_OPEN;
469 
470 	kref_init(&chan->kref);
471 
472 	/* This flag is cleared in l2cap_chan_ready() */
473 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
474 
475 	BT_DBG("chan %p", chan);
476 
477 	return chan;
478 }
479 EXPORT_SYMBOL_GPL(l2cap_chan_create);
480 
l2cap_chan_destroy(struct kref * kref)481 static void l2cap_chan_destroy(struct kref *kref)
482 {
483 	struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
484 
485 	BT_DBG("chan %p", chan);
486 
487 	write_lock(&chan_list_lock);
488 	list_del(&chan->global_l);
489 	write_unlock(&chan_list_lock);
490 
491 	kfree(chan);
492 }
493 
l2cap_chan_hold(struct l2cap_chan * c)494 void l2cap_chan_hold(struct l2cap_chan *c)
495 {
496 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
497 
498 	kref_get(&c->kref);
499 }
500 
l2cap_chan_hold_unless_zero(struct l2cap_chan * c)501 struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c)
502 {
503 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
504 
505 	if (!kref_get_unless_zero(&c->kref))
506 		return NULL;
507 
508 	return c;
509 }
510 
l2cap_chan_put(struct l2cap_chan * c)511 void l2cap_chan_put(struct l2cap_chan *c)
512 {
513 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
514 
515 	kref_put(&c->kref, l2cap_chan_destroy);
516 }
517 EXPORT_SYMBOL_GPL(l2cap_chan_put);
518 
l2cap_chan_set_defaults(struct l2cap_chan * chan)519 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
520 {
521 	chan->fcs  = L2CAP_FCS_CRC16;
522 	chan->max_tx = L2CAP_DEFAULT_MAX_TX;
523 	chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
524 	chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
525 	chan->remote_max_tx = chan->max_tx;
526 	chan->remote_tx_win = chan->tx_win;
527 	chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
528 	chan->sec_level = BT_SECURITY_LOW;
529 	chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
530 	chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
531 	chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
532 
533 	chan->conf_state = 0;
534 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
535 
536 	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
537 }
538 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
539 
l2cap_le_rx_credits(struct l2cap_chan * chan)540 static __u16 l2cap_le_rx_credits(struct l2cap_chan *chan)
541 {
542 	size_t sdu_len = chan->sdu ? chan->sdu->len : 0;
543 
544 	if (chan->mps == 0)
545 		return 0;
546 
547 	/* If we don't know the available space in the receiver buffer, give
548 	 * enough credits for a full packet.
549 	 */
550 	if (chan->rx_avail == -1)
551 		return (chan->imtu / chan->mps) + 1;
552 
553 	/* If we know how much space is available in the receive buffer, give
554 	 * out as many credits as would fill the buffer.
555 	 */
556 	if (chan->rx_avail <= sdu_len)
557 		return 0;
558 
559 	return DIV_ROUND_UP(chan->rx_avail - sdu_len, chan->mps);
560 }
561 
l2cap_le_flowctl_init(struct l2cap_chan * chan,u16 tx_credits)562 static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
563 {
564 	chan->sdu = NULL;
565 	chan->sdu_last_frag = NULL;
566 	chan->sdu_len = 0;
567 	chan->tx_credits = tx_credits;
568 	/* Derive MPS from connection MTU to stop HCI fragmentation */
569 	chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
570 	chan->rx_credits = l2cap_le_rx_credits(chan);
571 
572 	skb_queue_head_init(&chan->tx_q);
573 }
574 
l2cap_ecred_init(struct l2cap_chan * chan,u16 tx_credits)575 static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits)
576 {
577 	l2cap_le_flowctl_init(chan, tx_credits);
578 
579 	/* L2CAP implementations shall support a minimum MPS of 64 octets */
580 	if (chan->mps < L2CAP_ECRED_MIN_MPS) {
581 		chan->mps = L2CAP_ECRED_MIN_MPS;
582 		chan->rx_credits = l2cap_le_rx_credits(chan);
583 	}
584 }
585 
__l2cap_chan_add(struct l2cap_conn * conn,struct l2cap_chan * chan)586 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
587 {
588 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
589 	       __le16_to_cpu(chan->psm), chan->dcid);
590 
591 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
592 
593 	chan->conn = conn;
594 
595 	switch (chan->chan_type) {
596 	case L2CAP_CHAN_CONN_ORIENTED:
597 		/* Alloc CID for connection-oriented socket */
598 		chan->scid = l2cap_alloc_cid(conn);
599 		if (conn->hcon->type == ACL_LINK)
600 			chan->omtu = L2CAP_DEFAULT_MTU;
601 		break;
602 
603 	case L2CAP_CHAN_CONN_LESS:
604 		/* Connectionless socket */
605 		chan->scid = L2CAP_CID_CONN_LESS;
606 		chan->dcid = L2CAP_CID_CONN_LESS;
607 		chan->omtu = L2CAP_DEFAULT_MTU;
608 		break;
609 
610 	case L2CAP_CHAN_FIXED:
611 		/* Caller will set CID and CID specific MTU values */
612 		break;
613 
614 	default:
615 		/* Raw socket can send/recv signalling messages only */
616 		chan->scid = L2CAP_CID_SIGNALING;
617 		chan->dcid = L2CAP_CID_SIGNALING;
618 		chan->omtu = L2CAP_DEFAULT_MTU;
619 	}
620 
621 	chan->local_id		= L2CAP_BESTEFFORT_ID;
622 	chan->local_stype	= L2CAP_SERV_BESTEFFORT;
623 	chan->local_msdu	= L2CAP_DEFAULT_MAX_SDU_SIZE;
624 	chan->local_sdu_itime	= L2CAP_DEFAULT_SDU_ITIME;
625 	chan->local_acc_lat	= L2CAP_DEFAULT_ACC_LAT;
626 	chan->local_flush_to	= L2CAP_EFS_DEFAULT_FLUSH_TO;
627 
628 	l2cap_chan_hold(chan);
629 
630 	/* Only keep a reference for fixed channels if they requested it */
631 	if (chan->chan_type != L2CAP_CHAN_FIXED ||
632 	    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
633 		hci_conn_hold(conn->hcon);
634 
635 	/* Append to the list since the order matters for ECRED */
636 	list_add_tail(&chan->list, &conn->chan_l);
637 }
638 
l2cap_chan_add(struct l2cap_conn * conn,struct l2cap_chan * chan)639 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
640 {
641 	mutex_lock(&conn->lock);
642 	__l2cap_chan_add(conn, chan);
643 	mutex_unlock(&conn->lock);
644 }
645 
l2cap_chan_del(struct l2cap_chan * chan,int err)646 void l2cap_chan_del(struct l2cap_chan *chan, int err)
647 {
648 	struct l2cap_conn *conn = chan->conn;
649 
650 	__clear_chan_timer(chan);
651 
652 	BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
653 	       state_to_string(chan->state));
654 
655 	chan->ops->teardown(chan, err);
656 
657 	if (conn) {
658 		/* Delete from channel list */
659 		list_del(&chan->list);
660 
661 		l2cap_chan_put(chan);
662 
663 		chan->conn = NULL;
664 
665 		/* Reference was only held for non-fixed channels or
666 		 * fixed channels that explicitly requested it using the
667 		 * FLAG_HOLD_HCI_CONN flag.
668 		 */
669 		if (chan->chan_type != L2CAP_CHAN_FIXED ||
670 		    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
671 			hci_conn_drop(conn->hcon);
672 	}
673 
674 	if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
675 		return;
676 
677 	switch (chan->mode) {
678 	case L2CAP_MODE_BASIC:
679 		break;
680 
681 	case L2CAP_MODE_LE_FLOWCTL:
682 	case L2CAP_MODE_EXT_FLOWCTL:
683 		skb_queue_purge(&chan->tx_q);
684 		break;
685 
686 	case L2CAP_MODE_ERTM:
687 		__clear_retrans_timer(chan);
688 		__clear_monitor_timer(chan);
689 		__clear_ack_timer(chan);
690 
691 		skb_queue_purge(&chan->srej_q);
692 
693 		l2cap_seq_list_free(&chan->srej_list);
694 		l2cap_seq_list_free(&chan->retrans_list);
695 		fallthrough;
696 
697 	case L2CAP_MODE_STREAMING:
698 		skb_queue_purge(&chan->tx_q);
699 		break;
700 	}
701 }
702 EXPORT_SYMBOL_GPL(l2cap_chan_del);
703 
__l2cap_chan_list_id(struct l2cap_conn * conn,u16 id,l2cap_chan_func_t func,void * data)704 static void __l2cap_chan_list_id(struct l2cap_conn *conn, u16 id,
705 				 l2cap_chan_func_t func, void *data)
706 {
707 	struct l2cap_chan *chan, *l;
708 
709 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
710 		if (chan->ident == id)
711 			func(chan, data);
712 	}
713 }
714 
__l2cap_chan_list(struct l2cap_conn * conn,l2cap_chan_func_t func,void * data)715 static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
716 			      void *data)
717 {
718 	struct l2cap_chan *chan;
719 
720 	list_for_each_entry(chan, &conn->chan_l, list) {
721 		func(chan, data);
722 	}
723 }
724 
l2cap_chan_list(struct l2cap_conn * conn,l2cap_chan_func_t func,void * data)725 void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
726 		     void *data)
727 {
728 	if (!conn)
729 		return;
730 
731 	mutex_lock(&conn->lock);
732 	__l2cap_chan_list(conn, func, data);
733 	mutex_unlock(&conn->lock);
734 }
735 
736 EXPORT_SYMBOL_GPL(l2cap_chan_list);
737 
l2cap_conn_update_id_addr(struct work_struct * work)738 static void l2cap_conn_update_id_addr(struct work_struct *work)
739 {
740 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
741 					       id_addr_timer.work);
742 	struct hci_conn *hcon = conn->hcon;
743 	struct l2cap_chan *chan;
744 
745 	mutex_lock(&conn->lock);
746 
747 	list_for_each_entry(chan, &conn->chan_l, list) {
748 		l2cap_chan_lock(chan);
749 		bacpy(&chan->dst, &hcon->dst);
750 		chan->dst_type = bdaddr_dst_type(hcon);
751 		l2cap_chan_unlock(chan);
752 	}
753 
754 	mutex_unlock(&conn->lock);
755 }
756 
l2cap_chan_le_connect_reject(struct l2cap_chan * chan)757 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
758 {
759 	struct l2cap_conn *conn = chan->conn;
760 	struct l2cap_le_conn_rsp rsp;
761 	u16 result;
762 
763 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
764 		result = L2CAP_CR_LE_AUTHORIZATION;
765 	else
766 		result = L2CAP_CR_LE_BAD_PSM;
767 
768 	l2cap_state_change(chan, BT_DISCONN);
769 
770 	rsp.dcid    = cpu_to_le16(chan->scid);
771 	rsp.mtu     = cpu_to_le16(chan->imtu);
772 	rsp.mps     = cpu_to_le16(chan->mps);
773 	rsp.credits = cpu_to_le16(chan->rx_credits);
774 	rsp.result  = cpu_to_le16(result);
775 
776 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
777 		       &rsp);
778 }
779 
l2cap_chan_ecred_connect_reject(struct l2cap_chan * chan)780 static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan)
781 {
782 	l2cap_state_change(chan, BT_DISCONN);
783 
784 	__l2cap_ecred_conn_rsp_defer(chan);
785 }
786 
l2cap_chan_connect_reject(struct l2cap_chan * chan)787 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
788 {
789 	struct l2cap_conn *conn = chan->conn;
790 	struct l2cap_conn_rsp rsp;
791 	u16 result;
792 
793 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
794 		result = L2CAP_CR_SEC_BLOCK;
795 	else
796 		result = L2CAP_CR_BAD_PSM;
797 
798 	l2cap_state_change(chan, BT_DISCONN);
799 
800 	rsp.scid   = cpu_to_le16(chan->dcid);
801 	rsp.dcid   = cpu_to_le16(chan->scid);
802 	rsp.result = cpu_to_le16(result);
803 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
804 
805 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
806 }
807 
l2cap_chan_close(struct l2cap_chan * chan,int reason)808 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
809 {
810 	struct l2cap_conn *conn = chan->conn;
811 
812 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
813 
814 	switch (chan->state) {
815 	case BT_LISTEN:
816 		chan->ops->teardown(chan, 0);
817 		break;
818 
819 	case BT_CONNECTED:
820 	case BT_CONFIG:
821 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
822 			__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
823 			l2cap_send_disconn_req(chan, reason);
824 		} else
825 			l2cap_chan_del(chan, reason);
826 		break;
827 
828 	case BT_CONNECT2:
829 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
830 			if (conn->hcon->type == ACL_LINK)
831 				l2cap_chan_connect_reject(chan);
832 			else if (conn->hcon->type == LE_LINK) {
833 				switch (chan->mode) {
834 				case L2CAP_MODE_LE_FLOWCTL:
835 					l2cap_chan_le_connect_reject(chan);
836 					break;
837 				case L2CAP_MODE_EXT_FLOWCTL:
838 					l2cap_chan_ecred_connect_reject(chan);
839 					return;
840 				}
841 			}
842 		}
843 
844 		l2cap_chan_del(chan, reason);
845 		break;
846 
847 	case BT_CONNECT:
848 	case BT_DISCONN:
849 		l2cap_chan_del(chan, reason);
850 		break;
851 
852 	default:
853 		chan->ops->teardown(chan, 0);
854 		break;
855 	}
856 }
857 EXPORT_SYMBOL(l2cap_chan_close);
858 
l2cap_get_auth_type(struct l2cap_chan * chan)859 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
860 {
861 	switch (chan->chan_type) {
862 	case L2CAP_CHAN_RAW:
863 		switch (chan->sec_level) {
864 		case BT_SECURITY_HIGH:
865 		case BT_SECURITY_FIPS:
866 			return HCI_AT_DEDICATED_BONDING_MITM;
867 		case BT_SECURITY_MEDIUM:
868 			return HCI_AT_DEDICATED_BONDING;
869 		default:
870 			return HCI_AT_NO_BONDING;
871 		}
872 		break;
873 	case L2CAP_CHAN_CONN_LESS:
874 		if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
875 			if (chan->sec_level == BT_SECURITY_LOW)
876 				chan->sec_level = BT_SECURITY_SDP;
877 		}
878 		if (chan->sec_level == BT_SECURITY_HIGH ||
879 		    chan->sec_level == BT_SECURITY_FIPS)
880 			return HCI_AT_NO_BONDING_MITM;
881 		else
882 			return HCI_AT_NO_BONDING;
883 		break;
884 	case L2CAP_CHAN_CONN_ORIENTED:
885 		if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
886 			if (chan->sec_level == BT_SECURITY_LOW)
887 				chan->sec_level = BT_SECURITY_SDP;
888 
889 			if (chan->sec_level == BT_SECURITY_HIGH ||
890 			    chan->sec_level == BT_SECURITY_FIPS)
891 				return HCI_AT_NO_BONDING_MITM;
892 			else
893 				return HCI_AT_NO_BONDING;
894 		}
895 		fallthrough;
896 
897 	default:
898 		switch (chan->sec_level) {
899 		case BT_SECURITY_HIGH:
900 		case BT_SECURITY_FIPS:
901 			return HCI_AT_GENERAL_BONDING_MITM;
902 		case BT_SECURITY_MEDIUM:
903 			return HCI_AT_GENERAL_BONDING;
904 		default:
905 			return HCI_AT_NO_BONDING;
906 		}
907 		break;
908 	}
909 }
910 
911 /* Service level security */
l2cap_chan_check_security(struct l2cap_chan * chan,bool initiator)912 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
913 {
914 	struct l2cap_conn *conn = chan->conn;
915 	__u8 auth_type;
916 
917 	if (conn->hcon->type == LE_LINK)
918 		return smp_conn_security(conn->hcon, chan->sec_level);
919 
920 	auth_type = l2cap_get_auth_type(chan);
921 
922 	return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
923 				 initiator);
924 }
925 
l2cap_get_ident(struct l2cap_conn * conn)926 static u8 l2cap_get_ident(struct l2cap_conn *conn)
927 {
928 	u8 id;
929 
930 	/* Get next available identificator.
931 	 *    1 - 128 are used by kernel.
932 	 *  129 - 199 are reserved.
933 	 *  200 - 254 are used by utilities like l2ping, etc.
934 	 */
935 
936 	mutex_lock(&conn->ident_lock);
937 
938 	if (++conn->tx_ident > 128)
939 		conn->tx_ident = 1;
940 
941 	id = conn->tx_ident;
942 
943 	mutex_unlock(&conn->ident_lock);
944 
945 	return id;
946 }
947 
l2cap_send_acl(struct l2cap_conn * conn,struct sk_buff * skb,u8 flags)948 static void l2cap_send_acl(struct l2cap_conn *conn, struct sk_buff *skb,
949 			   u8 flags)
950 {
951 	/* Check if the hcon still valid before attempting to send */
952 	if (hci_conn_valid(conn->hcon->hdev, conn->hcon))
953 		hci_send_acl(conn->hchan, skb, flags);
954 	else
955 		kfree_skb(skb);
956 }
957 
l2cap_send_cmd(struct l2cap_conn * conn,u8 ident,u8 code,u16 len,void * data)958 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
959 			   void *data)
960 {
961 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
962 	u8 flags;
963 
964 	BT_DBG("code 0x%2.2x", code);
965 
966 	if (!skb)
967 		return;
968 
969 	/* Use NO_FLUSH if supported or we have an LE link (which does
970 	 * not support auto-flushing packets) */
971 	if (lmp_no_flush_capable(conn->hcon->hdev) ||
972 	    conn->hcon->type == LE_LINK)
973 		flags = ACL_START_NO_FLUSH;
974 	else
975 		flags = ACL_START;
976 
977 	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
978 	skb->priority = HCI_PRIO_MAX;
979 
980 	l2cap_send_acl(conn, skb, flags);
981 }
982 
l2cap_do_send(struct l2cap_chan * chan,struct sk_buff * skb)983 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
984 {
985 	struct hci_conn *hcon = chan->conn->hcon;
986 	u16 flags;
987 
988 	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
989 	       skb->priority);
990 
991 	/* Use NO_FLUSH for LE links (where this is the only option) or
992 	 * if the BR/EDR link supports it and flushing has not been
993 	 * explicitly requested (through FLAG_FLUSHABLE).
994 	 */
995 	if (hcon->type == LE_LINK ||
996 	    (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
997 	     lmp_no_flush_capable(hcon->hdev)))
998 		flags = ACL_START_NO_FLUSH;
999 	else
1000 		flags = ACL_START;
1001 
1002 	bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
1003 	hci_send_acl(chan->conn->hchan, skb, flags);
1004 }
1005 
__unpack_enhanced_control(u16 enh,struct l2cap_ctrl * control)1006 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
1007 {
1008 	control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
1009 	control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
1010 
1011 	if (enh & L2CAP_CTRL_FRAME_TYPE) {
1012 		/* S-Frame */
1013 		control->sframe = 1;
1014 		control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
1015 		control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
1016 
1017 		control->sar = 0;
1018 		control->txseq = 0;
1019 	} else {
1020 		/* I-Frame */
1021 		control->sframe = 0;
1022 		control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
1023 		control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
1024 
1025 		control->poll = 0;
1026 		control->super = 0;
1027 	}
1028 }
1029 
__unpack_extended_control(u32 ext,struct l2cap_ctrl * control)1030 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
1031 {
1032 	control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1033 	control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
1034 
1035 	if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
1036 		/* S-Frame */
1037 		control->sframe = 1;
1038 		control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
1039 		control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
1040 
1041 		control->sar = 0;
1042 		control->txseq = 0;
1043 	} else {
1044 		/* I-Frame */
1045 		control->sframe = 0;
1046 		control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
1047 		control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1048 
1049 		control->poll = 0;
1050 		control->super = 0;
1051 	}
1052 }
1053 
__unpack_control(struct l2cap_chan * chan,struct sk_buff * skb)1054 static inline void __unpack_control(struct l2cap_chan *chan,
1055 				    struct sk_buff *skb)
1056 {
1057 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1058 		__unpack_extended_control(get_unaligned_le32(skb->data),
1059 					  &bt_cb(skb)->l2cap);
1060 		skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
1061 	} else {
1062 		__unpack_enhanced_control(get_unaligned_le16(skb->data),
1063 					  &bt_cb(skb)->l2cap);
1064 		skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
1065 	}
1066 }
1067 
__pack_extended_control(struct l2cap_ctrl * control)1068 static u32 __pack_extended_control(struct l2cap_ctrl *control)
1069 {
1070 	u32 packed;
1071 
1072 	packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1073 	packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
1074 
1075 	if (control->sframe) {
1076 		packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
1077 		packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
1078 		packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
1079 	} else {
1080 		packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
1081 		packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1082 	}
1083 
1084 	return packed;
1085 }
1086 
__pack_enhanced_control(struct l2cap_ctrl * control)1087 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1088 {
1089 	u16 packed;
1090 
1091 	packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1092 	packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1093 
1094 	if (control->sframe) {
1095 		packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1096 		packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1097 		packed |= L2CAP_CTRL_FRAME_TYPE;
1098 	} else {
1099 		packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1100 		packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1101 	}
1102 
1103 	return packed;
1104 }
1105 
__pack_control(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb)1106 static inline void __pack_control(struct l2cap_chan *chan,
1107 				  struct l2cap_ctrl *control,
1108 				  struct sk_buff *skb)
1109 {
1110 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1111 		put_unaligned_le32(__pack_extended_control(control),
1112 				   skb->data + L2CAP_HDR_SIZE);
1113 	} else {
1114 		put_unaligned_le16(__pack_enhanced_control(control),
1115 				   skb->data + L2CAP_HDR_SIZE);
1116 	}
1117 }
1118 
__ertm_hdr_size(struct l2cap_chan * chan)1119 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1120 {
1121 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1122 		return L2CAP_EXT_HDR_SIZE;
1123 	else
1124 		return L2CAP_ENH_HDR_SIZE;
1125 }
1126 
l2cap_create_sframe_pdu(struct l2cap_chan * chan,u32 control)1127 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1128 					       u32 control)
1129 {
1130 	struct sk_buff *skb;
1131 	struct l2cap_hdr *lh;
1132 	int hlen = __ertm_hdr_size(chan);
1133 
1134 	if (chan->fcs == L2CAP_FCS_CRC16)
1135 		hlen += L2CAP_FCS_SIZE;
1136 
1137 	skb = bt_skb_alloc(hlen, GFP_KERNEL);
1138 
1139 	if (!skb)
1140 		return ERR_PTR(-ENOMEM);
1141 
1142 	lh = skb_put(skb, L2CAP_HDR_SIZE);
1143 	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1144 	lh->cid = cpu_to_le16(chan->dcid);
1145 
1146 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1147 		put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1148 	else
1149 		put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1150 
1151 	if (chan->fcs == L2CAP_FCS_CRC16) {
1152 		u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1153 		put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1154 	}
1155 
1156 	skb->priority = HCI_PRIO_MAX;
1157 	return skb;
1158 }
1159 
l2cap_send_sframe(struct l2cap_chan * chan,struct l2cap_ctrl * control)1160 static void l2cap_send_sframe(struct l2cap_chan *chan,
1161 			      struct l2cap_ctrl *control)
1162 {
1163 	struct sk_buff *skb;
1164 	u32 control_field;
1165 
1166 	BT_DBG("chan %p, control %p", chan, control);
1167 
1168 	if (!control->sframe)
1169 		return;
1170 
1171 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1172 	    !control->poll)
1173 		control->final = 1;
1174 
1175 	if (control->super == L2CAP_SUPER_RR)
1176 		clear_bit(CONN_RNR_SENT, &chan->conn_state);
1177 	else if (control->super == L2CAP_SUPER_RNR)
1178 		set_bit(CONN_RNR_SENT, &chan->conn_state);
1179 
1180 	if (control->super != L2CAP_SUPER_SREJ) {
1181 		chan->last_acked_seq = control->reqseq;
1182 		__clear_ack_timer(chan);
1183 	}
1184 
1185 	BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1186 	       control->final, control->poll, control->super);
1187 
1188 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1189 		control_field = __pack_extended_control(control);
1190 	else
1191 		control_field = __pack_enhanced_control(control);
1192 
1193 	skb = l2cap_create_sframe_pdu(chan, control_field);
1194 	if (!IS_ERR(skb))
1195 		l2cap_do_send(chan, skb);
1196 }
1197 
l2cap_send_rr_or_rnr(struct l2cap_chan * chan,bool poll)1198 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1199 {
1200 	struct l2cap_ctrl control;
1201 
1202 	BT_DBG("chan %p, poll %d", chan, poll);
1203 
1204 	memset(&control, 0, sizeof(control));
1205 	control.sframe = 1;
1206 	control.poll = poll;
1207 
1208 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1209 		control.super = L2CAP_SUPER_RNR;
1210 	else
1211 		control.super = L2CAP_SUPER_RR;
1212 
1213 	control.reqseq = chan->buffer_seq;
1214 	l2cap_send_sframe(chan, &control);
1215 }
1216 
__l2cap_no_conn_pending(struct l2cap_chan * chan)1217 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1218 {
1219 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1220 		return true;
1221 
1222 	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1223 }
1224 
l2cap_send_conn_req(struct l2cap_chan * chan)1225 void l2cap_send_conn_req(struct l2cap_chan *chan)
1226 {
1227 	struct l2cap_conn *conn = chan->conn;
1228 	struct l2cap_conn_req req;
1229 
1230 	req.scid = cpu_to_le16(chan->scid);
1231 	req.psm  = chan->psm;
1232 
1233 	chan->ident = l2cap_get_ident(conn);
1234 
1235 	set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1236 
1237 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1238 }
1239 
l2cap_chan_ready(struct l2cap_chan * chan)1240 static void l2cap_chan_ready(struct l2cap_chan *chan)
1241 {
1242 	/* The channel may have already been flagged as connected in
1243 	 * case of receiving data before the L2CAP info req/rsp
1244 	 * procedure is complete.
1245 	 */
1246 	if (chan->state == BT_CONNECTED)
1247 		return;
1248 
1249 	/* This clears all conf flags, including CONF_NOT_COMPLETE */
1250 	chan->conf_state = 0;
1251 	__clear_chan_timer(chan);
1252 
1253 	switch (chan->mode) {
1254 	case L2CAP_MODE_LE_FLOWCTL:
1255 	case L2CAP_MODE_EXT_FLOWCTL:
1256 		if (!chan->tx_credits)
1257 			chan->ops->suspend(chan);
1258 		break;
1259 	}
1260 
1261 	chan->state = BT_CONNECTED;
1262 
1263 	chan->ops->ready(chan);
1264 }
1265 
l2cap_le_connect(struct l2cap_chan * chan)1266 static void l2cap_le_connect(struct l2cap_chan *chan)
1267 {
1268 	struct l2cap_conn *conn = chan->conn;
1269 	struct l2cap_le_conn_req req;
1270 
1271 	if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1272 		return;
1273 
1274 	if (!chan->imtu)
1275 		chan->imtu = chan->conn->mtu;
1276 
1277 	l2cap_le_flowctl_init(chan, 0);
1278 
1279 	memset(&req, 0, sizeof(req));
1280 	req.psm     = chan->psm;
1281 	req.scid    = cpu_to_le16(chan->scid);
1282 	req.mtu     = cpu_to_le16(chan->imtu);
1283 	req.mps     = cpu_to_le16(chan->mps);
1284 	req.credits = cpu_to_le16(chan->rx_credits);
1285 
1286 	chan->ident = l2cap_get_ident(conn);
1287 
1288 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1289 		       sizeof(req), &req);
1290 }
1291 
1292 struct l2cap_ecred_conn_data {
1293 	struct {
1294 		struct l2cap_ecred_conn_req_hdr req;
1295 		__le16 scid[5];
1296 	} __packed pdu;
1297 	struct l2cap_chan *chan;
1298 	struct pid *pid;
1299 	int count;
1300 };
1301 
l2cap_ecred_defer_connect(struct l2cap_chan * chan,void * data)1302 static void l2cap_ecred_defer_connect(struct l2cap_chan *chan, void *data)
1303 {
1304 	struct l2cap_ecred_conn_data *conn = data;
1305 	struct pid *pid;
1306 
1307 	if (chan == conn->chan)
1308 		return;
1309 
1310 	if (!test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
1311 		return;
1312 
1313 	pid = chan->ops->get_peer_pid(chan);
1314 
1315 	/* Only add deferred channels with the same PID/PSM */
1316 	if (conn->pid != pid || chan->psm != conn->chan->psm || chan->ident ||
1317 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
1318 		return;
1319 
1320 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1321 		return;
1322 
1323 	l2cap_ecred_init(chan, 0);
1324 
1325 	/* Set the same ident so we can match on the rsp */
1326 	chan->ident = conn->chan->ident;
1327 
1328 	/* Include all channels deferred */
1329 	conn->pdu.scid[conn->count] = cpu_to_le16(chan->scid);
1330 
1331 	conn->count++;
1332 }
1333 
l2cap_ecred_connect(struct l2cap_chan * chan)1334 static void l2cap_ecred_connect(struct l2cap_chan *chan)
1335 {
1336 	struct l2cap_conn *conn = chan->conn;
1337 	struct l2cap_ecred_conn_data data;
1338 
1339 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
1340 		return;
1341 
1342 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1343 		return;
1344 
1345 	l2cap_ecred_init(chan, 0);
1346 
1347 	memset(&data, 0, sizeof(data));
1348 	data.pdu.req.psm     = chan->psm;
1349 	data.pdu.req.mtu     = cpu_to_le16(chan->imtu);
1350 	data.pdu.req.mps     = cpu_to_le16(chan->mps);
1351 	data.pdu.req.credits = cpu_to_le16(chan->rx_credits);
1352 	data.pdu.scid[0]     = cpu_to_le16(chan->scid);
1353 
1354 	chan->ident = l2cap_get_ident(conn);
1355 
1356 	data.count = 1;
1357 	data.chan = chan;
1358 	data.pid = chan->ops->get_peer_pid(chan);
1359 
1360 	__l2cap_chan_list(conn, l2cap_ecred_defer_connect, &data);
1361 
1362 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_CONN_REQ,
1363 		       sizeof(data.pdu.req) + data.count * sizeof(__le16),
1364 		       &data.pdu);
1365 }
1366 
l2cap_le_start(struct l2cap_chan * chan)1367 static void l2cap_le_start(struct l2cap_chan *chan)
1368 {
1369 	struct l2cap_conn *conn = chan->conn;
1370 
1371 	if (!smp_conn_security(conn->hcon, chan->sec_level))
1372 		return;
1373 
1374 	if (!chan->psm) {
1375 		l2cap_chan_ready(chan);
1376 		return;
1377 	}
1378 
1379 	if (chan->state == BT_CONNECT) {
1380 		if (chan->mode == L2CAP_MODE_EXT_FLOWCTL)
1381 			l2cap_ecred_connect(chan);
1382 		else
1383 			l2cap_le_connect(chan);
1384 	}
1385 }
1386 
l2cap_start_connection(struct l2cap_chan * chan)1387 static void l2cap_start_connection(struct l2cap_chan *chan)
1388 {
1389 	if (chan->conn->hcon->type == LE_LINK) {
1390 		l2cap_le_start(chan);
1391 	} else {
1392 		l2cap_send_conn_req(chan);
1393 	}
1394 }
1395 
l2cap_request_info(struct l2cap_conn * conn)1396 static void l2cap_request_info(struct l2cap_conn *conn)
1397 {
1398 	struct l2cap_info_req req;
1399 
1400 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1401 		return;
1402 
1403 	req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1404 
1405 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1406 	conn->info_ident = l2cap_get_ident(conn);
1407 
1408 	schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1409 
1410 	l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1411 		       sizeof(req), &req);
1412 }
1413 
l2cap_check_enc_key_size(struct hci_conn * hcon,struct l2cap_chan * chan)1414 static bool l2cap_check_enc_key_size(struct hci_conn *hcon,
1415 				     struct l2cap_chan *chan)
1416 {
1417 	/* The minimum encryption key size needs to be enforced by the
1418 	 * host stack before establishing any L2CAP connections. The
1419 	 * specification in theory allows a minimum of 1, but to align
1420 	 * BR/EDR and LE transports, a minimum of 7 is chosen.
1421 	 *
1422 	 * This check might also be called for unencrypted connections
1423 	 * that have no key size requirements. Ensure that the link is
1424 	 * actually encrypted before enforcing a key size.
1425 	 */
1426 	int min_key_size = hcon->hdev->min_enc_key_size;
1427 
1428 	/* On FIPS security level, key size must be 16 bytes */
1429 	if (chan->sec_level == BT_SECURITY_FIPS)
1430 		min_key_size = 16;
1431 
1432 	return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1433 		hcon->enc_key_size >= min_key_size);
1434 }
1435 
l2cap_do_start(struct l2cap_chan * chan)1436 static void l2cap_do_start(struct l2cap_chan *chan)
1437 {
1438 	struct l2cap_conn *conn = chan->conn;
1439 
1440 	if (conn->hcon->type == LE_LINK) {
1441 		l2cap_le_start(chan);
1442 		return;
1443 	}
1444 
1445 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1446 		l2cap_request_info(conn);
1447 		return;
1448 	}
1449 
1450 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1451 		return;
1452 
1453 	if (!l2cap_chan_check_security(chan, true) ||
1454 	    !__l2cap_no_conn_pending(chan))
1455 		return;
1456 
1457 	if (l2cap_check_enc_key_size(conn->hcon, chan))
1458 		l2cap_start_connection(chan);
1459 	else
1460 		__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1461 }
1462 
l2cap_mode_supported(__u8 mode,__u32 feat_mask)1463 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1464 {
1465 	u32 local_feat_mask = l2cap_feat_mask;
1466 	if (!disable_ertm)
1467 		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1468 
1469 	switch (mode) {
1470 	case L2CAP_MODE_ERTM:
1471 		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1472 	case L2CAP_MODE_STREAMING:
1473 		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1474 	default:
1475 		return 0x00;
1476 	}
1477 }
1478 
l2cap_send_disconn_req(struct l2cap_chan * chan,int err)1479 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1480 {
1481 	struct l2cap_conn *conn = chan->conn;
1482 	struct l2cap_disconn_req req;
1483 
1484 	if (!conn)
1485 		return;
1486 
1487 	if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1488 		__clear_retrans_timer(chan);
1489 		__clear_monitor_timer(chan);
1490 		__clear_ack_timer(chan);
1491 	}
1492 
1493 	req.dcid = cpu_to_le16(chan->dcid);
1494 	req.scid = cpu_to_le16(chan->scid);
1495 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1496 		       sizeof(req), &req);
1497 
1498 	l2cap_state_change_and_error(chan, BT_DISCONN, err);
1499 }
1500 
1501 /* ---- L2CAP connections ---- */
l2cap_conn_start(struct l2cap_conn * conn)1502 static void l2cap_conn_start(struct l2cap_conn *conn)
1503 {
1504 	struct l2cap_chan *chan, *tmp;
1505 
1506 	BT_DBG("conn %p", conn);
1507 
1508 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1509 		l2cap_chan_lock(chan);
1510 
1511 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1512 			l2cap_chan_ready(chan);
1513 			l2cap_chan_unlock(chan);
1514 			continue;
1515 		}
1516 
1517 		if (chan->state == BT_CONNECT) {
1518 			if (!l2cap_chan_check_security(chan, true) ||
1519 			    !__l2cap_no_conn_pending(chan)) {
1520 				l2cap_chan_unlock(chan);
1521 				continue;
1522 			}
1523 
1524 			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1525 			    && test_bit(CONF_STATE2_DEVICE,
1526 					&chan->conf_state)) {
1527 				l2cap_chan_close(chan, ECONNRESET);
1528 				l2cap_chan_unlock(chan);
1529 				continue;
1530 			}
1531 
1532 			if (l2cap_check_enc_key_size(conn->hcon, chan))
1533 				l2cap_start_connection(chan);
1534 			else
1535 				l2cap_chan_close(chan, ECONNREFUSED);
1536 
1537 		} else if (chan->state == BT_CONNECT2) {
1538 			struct l2cap_conn_rsp rsp;
1539 			char buf[128];
1540 			rsp.scid = cpu_to_le16(chan->dcid);
1541 			rsp.dcid = cpu_to_le16(chan->scid);
1542 
1543 			if (l2cap_chan_check_security(chan, false)) {
1544 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1545 					rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1546 					rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1547 					chan->ops->defer(chan);
1548 
1549 				} else {
1550 					l2cap_state_change(chan, BT_CONFIG);
1551 					rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1552 					rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1553 				}
1554 			} else {
1555 				rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1556 				rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1557 			}
1558 
1559 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1560 				       sizeof(rsp), &rsp);
1561 
1562 			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1563 			    rsp.result != L2CAP_CR_SUCCESS) {
1564 				l2cap_chan_unlock(chan);
1565 				continue;
1566 			}
1567 
1568 			set_bit(CONF_REQ_SENT, &chan->conf_state);
1569 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1570 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1571 			chan->num_conf_req++;
1572 		}
1573 
1574 		l2cap_chan_unlock(chan);
1575 	}
1576 }
1577 
l2cap_le_conn_ready(struct l2cap_conn * conn)1578 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1579 {
1580 	struct hci_conn *hcon = conn->hcon;
1581 	struct hci_dev *hdev = hcon->hdev;
1582 
1583 	BT_DBG("%s conn %p", hdev->name, conn);
1584 
1585 	/* For outgoing pairing which doesn't necessarily have an
1586 	 * associated socket (e.g. mgmt_pair_device).
1587 	 */
1588 	if (hcon->out)
1589 		smp_conn_security(hcon, hcon->pending_sec_level);
1590 
1591 	/* For LE peripheral connections, make sure the connection interval
1592 	 * is in the range of the minimum and maximum interval that has
1593 	 * been configured for this connection. If not, then trigger
1594 	 * the connection update procedure.
1595 	 */
1596 	if (hcon->role == HCI_ROLE_SLAVE &&
1597 	    (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1598 	     hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1599 		struct l2cap_conn_param_update_req req;
1600 
1601 		req.min = cpu_to_le16(hcon->le_conn_min_interval);
1602 		req.max = cpu_to_le16(hcon->le_conn_max_interval);
1603 		req.latency = cpu_to_le16(hcon->le_conn_latency);
1604 		req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1605 
1606 		l2cap_send_cmd(conn, l2cap_get_ident(conn),
1607 			       L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1608 	}
1609 }
1610 
l2cap_conn_ready(struct l2cap_conn * conn)1611 static void l2cap_conn_ready(struct l2cap_conn *conn)
1612 {
1613 	struct l2cap_chan *chan;
1614 	struct hci_conn *hcon = conn->hcon;
1615 
1616 	BT_DBG("conn %p", conn);
1617 
1618 	if (hcon->type == ACL_LINK)
1619 		l2cap_request_info(conn);
1620 
1621 	mutex_lock(&conn->lock);
1622 
1623 	list_for_each_entry(chan, &conn->chan_l, list) {
1624 
1625 		l2cap_chan_lock(chan);
1626 
1627 		if (hcon->type == LE_LINK) {
1628 			l2cap_le_start(chan);
1629 		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1630 			if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1631 				l2cap_chan_ready(chan);
1632 		} else if (chan->state == BT_CONNECT) {
1633 			l2cap_do_start(chan);
1634 		}
1635 
1636 		l2cap_chan_unlock(chan);
1637 	}
1638 
1639 	mutex_unlock(&conn->lock);
1640 
1641 	if (hcon->type == LE_LINK)
1642 		l2cap_le_conn_ready(conn);
1643 
1644 	queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1645 }
1646 
1647 /* Notify sockets that we cannot guaranty reliability anymore */
l2cap_conn_unreliable(struct l2cap_conn * conn,int err)1648 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1649 {
1650 	struct l2cap_chan *chan;
1651 
1652 	BT_DBG("conn %p", conn);
1653 
1654 	list_for_each_entry(chan, &conn->chan_l, list) {
1655 		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1656 			l2cap_chan_set_err(chan, err);
1657 	}
1658 }
1659 
l2cap_info_timeout(struct work_struct * work)1660 static void l2cap_info_timeout(struct work_struct *work)
1661 {
1662 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1663 					       info_timer.work);
1664 
1665 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1666 	conn->info_ident = 0;
1667 
1668 	mutex_lock(&conn->lock);
1669 	l2cap_conn_start(conn);
1670 	mutex_unlock(&conn->lock);
1671 }
1672 
1673 /*
1674  * l2cap_user
1675  * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1676  * callback is called during registration. The ->remove callback is called
1677  * during unregistration.
1678  * An l2cap_user object can either be explicitly unregistered or when the
1679  * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1680  * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1681  * External modules must own a reference to the l2cap_conn object if they intend
1682  * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1683  * any time if they don't.
1684  */
1685 
l2cap_register_user(struct l2cap_conn * conn,struct l2cap_user * user)1686 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1687 {
1688 	struct hci_dev *hdev = conn->hcon->hdev;
1689 	int ret;
1690 
1691 	/* We need to check whether l2cap_conn is registered. If it is not, we
1692 	 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1693 	 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1694 	 * relies on the parent hci_conn object to be locked. This itself relies
1695 	 * on the hci_dev object to be locked. So we must lock the hci device
1696 	 * here, too. */
1697 
1698 	hci_dev_lock(hdev);
1699 
1700 	if (!list_empty(&user->list)) {
1701 		ret = -EINVAL;
1702 		goto out_unlock;
1703 	}
1704 
1705 	/* conn->hchan is NULL after l2cap_conn_del() was called */
1706 	if (!conn->hchan) {
1707 		ret = -ENODEV;
1708 		goto out_unlock;
1709 	}
1710 
1711 	ret = user->probe(conn, user);
1712 	if (ret)
1713 		goto out_unlock;
1714 
1715 	list_add(&user->list, &conn->users);
1716 	ret = 0;
1717 
1718 out_unlock:
1719 	hci_dev_unlock(hdev);
1720 	return ret;
1721 }
1722 EXPORT_SYMBOL(l2cap_register_user);
1723 
l2cap_unregister_user(struct l2cap_conn * conn,struct l2cap_user * user)1724 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1725 {
1726 	struct hci_dev *hdev = conn->hcon->hdev;
1727 
1728 	hci_dev_lock(hdev);
1729 
1730 	if (list_empty(&user->list))
1731 		goto out_unlock;
1732 
1733 	list_del_init(&user->list);
1734 	user->remove(conn, user);
1735 
1736 out_unlock:
1737 	hci_dev_unlock(hdev);
1738 }
1739 EXPORT_SYMBOL(l2cap_unregister_user);
1740 
l2cap_unregister_all_users(struct l2cap_conn * conn)1741 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1742 {
1743 	struct l2cap_user *user;
1744 
1745 	while (!list_empty(&conn->users)) {
1746 		user = list_first_entry(&conn->users, struct l2cap_user, list);
1747 		list_del_init(&user->list);
1748 		user->remove(conn, user);
1749 	}
1750 }
1751 
l2cap_conn_del(struct hci_conn * hcon,int err)1752 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1753 {
1754 	struct l2cap_conn *conn = hcon->l2cap_data;
1755 	struct l2cap_chan *chan, *l;
1756 
1757 	if (!conn)
1758 		return;
1759 
1760 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1761 
1762 	mutex_lock(&conn->lock);
1763 
1764 	kfree_skb(conn->rx_skb);
1765 
1766 	skb_queue_purge(&conn->pending_rx);
1767 
1768 	/* We can not call flush_work(&conn->pending_rx_work) here since we
1769 	 * might block if we are running on a worker from the same workqueue
1770 	 * pending_rx_work is waiting on.
1771 	 */
1772 	if (work_pending(&conn->pending_rx_work))
1773 		cancel_work_sync(&conn->pending_rx_work);
1774 
1775 	cancel_delayed_work_sync(&conn->id_addr_timer);
1776 
1777 	l2cap_unregister_all_users(conn);
1778 
1779 	/* Force the connection to be immediately dropped */
1780 	hcon->disc_timeout = 0;
1781 
1782 	/* Kill channels */
1783 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1784 		l2cap_chan_hold(chan);
1785 		l2cap_chan_lock(chan);
1786 
1787 		l2cap_chan_del(chan, err);
1788 
1789 		chan->ops->close(chan);
1790 
1791 		l2cap_chan_unlock(chan);
1792 		l2cap_chan_put(chan);
1793 	}
1794 
1795 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1796 		cancel_delayed_work_sync(&conn->info_timer);
1797 
1798 	hci_chan_del(conn->hchan);
1799 	conn->hchan = NULL;
1800 
1801 	hcon->l2cap_data = NULL;
1802 	mutex_unlock(&conn->lock);
1803 	l2cap_conn_put(conn);
1804 }
1805 
l2cap_conn_free(struct kref * ref)1806 static void l2cap_conn_free(struct kref *ref)
1807 {
1808 	struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1809 
1810 	hci_conn_put(conn->hcon);
1811 	kfree(conn);
1812 }
1813 
l2cap_conn_get(struct l2cap_conn * conn)1814 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1815 {
1816 	kref_get(&conn->ref);
1817 	return conn;
1818 }
1819 EXPORT_SYMBOL(l2cap_conn_get);
1820 
l2cap_conn_put(struct l2cap_conn * conn)1821 void l2cap_conn_put(struct l2cap_conn *conn)
1822 {
1823 	kref_put(&conn->ref, l2cap_conn_free);
1824 }
1825 EXPORT_SYMBOL(l2cap_conn_put);
1826 
1827 /* ---- Socket interface ---- */
1828 
1829 /* Find socket with psm and source / destination bdaddr.
1830  * Returns closest match.
1831  */
l2cap_global_chan_by_psm(int state,__le16 psm,bdaddr_t * src,bdaddr_t * dst,u8 link_type)1832 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1833 						   bdaddr_t *src,
1834 						   bdaddr_t *dst,
1835 						   u8 link_type)
1836 {
1837 	struct l2cap_chan *c, *tmp, *c1 = NULL;
1838 
1839 	read_lock(&chan_list_lock);
1840 
1841 	list_for_each_entry_safe(c, tmp, &chan_list, global_l) {
1842 		if (state && c->state != state)
1843 			continue;
1844 
1845 		if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1846 			continue;
1847 
1848 		if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1849 			continue;
1850 
1851 		if (c->chan_type != L2CAP_CHAN_FIXED && c->psm == psm) {
1852 			int src_match, dst_match;
1853 			int src_any, dst_any;
1854 
1855 			/* Exact match. */
1856 			src_match = !bacmp(&c->src, src);
1857 			dst_match = !bacmp(&c->dst, dst);
1858 			if (src_match && dst_match) {
1859 				if (!l2cap_chan_hold_unless_zero(c))
1860 					continue;
1861 
1862 				read_unlock(&chan_list_lock);
1863 				return c;
1864 			}
1865 
1866 			/* Closest match */
1867 			src_any = !bacmp(&c->src, BDADDR_ANY);
1868 			dst_any = !bacmp(&c->dst, BDADDR_ANY);
1869 			if ((src_match && dst_any) || (src_any && dst_match) ||
1870 			    (src_any && dst_any))
1871 				c1 = c;
1872 		}
1873 	}
1874 
1875 	if (c1)
1876 		c1 = l2cap_chan_hold_unless_zero(c1);
1877 
1878 	read_unlock(&chan_list_lock);
1879 
1880 	return c1;
1881 }
1882 
l2cap_monitor_timeout(struct work_struct * work)1883 static void l2cap_monitor_timeout(struct work_struct *work)
1884 {
1885 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1886 					       monitor_timer.work);
1887 
1888 	BT_DBG("chan %p", chan);
1889 
1890 	l2cap_chan_lock(chan);
1891 
1892 	if (!chan->conn) {
1893 		l2cap_chan_unlock(chan);
1894 		l2cap_chan_put(chan);
1895 		return;
1896 	}
1897 
1898 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1899 
1900 	l2cap_chan_unlock(chan);
1901 	l2cap_chan_put(chan);
1902 }
1903 
l2cap_retrans_timeout(struct work_struct * work)1904 static void l2cap_retrans_timeout(struct work_struct *work)
1905 {
1906 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1907 					       retrans_timer.work);
1908 
1909 	BT_DBG("chan %p", chan);
1910 
1911 	l2cap_chan_lock(chan);
1912 
1913 	if (!chan->conn) {
1914 		l2cap_chan_unlock(chan);
1915 		l2cap_chan_put(chan);
1916 		return;
1917 	}
1918 
1919 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1920 	l2cap_chan_unlock(chan);
1921 	l2cap_chan_put(chan);
1922 }
1923 
l2cap_streaming_send(struct l2cap_chan * chan,struct sk_buff_head * skbs)1924 static void l2cap_streaming_send(struct l2cap_chan *chan,
1925 				 struct sk_buff_head *skbs)
1926 {
1927 	struct sk_buff *skb;
1928 	struct l2cap_ctrl *control;
1929 
1930 	BT_DBG("chan %p, skbs %p", chan, skbs);
1931 
1932 	skb_queue_splice_tail_init(skbs, &chan->tx_q);
1933 
1934 	while (!skb_queue_empty(&chan->tx_q)) {
1935 
1936 		skb = skb_dequeue(&chan->tx_q);
1937 
1938 		bt_cb(skb)->l2cap.retries = 1;
1939 		control = &bt_cb(skb)->l2cap;
1940 
1941 		control->reqseq = 0;
1942 		control->txseq = chan->next_tx_seq;
1943 
1944 		__pack_control(chan, control, skb);
1945 
1946 		if (chan->fcs == L2CAP_FCS_CRC16) {
1947 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1948 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1949 		}
1950 
1951 		l2cap_do_send(chan, skb);
1952 
1953 		BT_DBG("Sent txseq %u", control->txseq);
1954 
1955 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1956 		chan->frames_sent++;
1957 	}
1958 }
1959 
l2cap_ertm_send(struct l2cap_chan * chan)1960 static int l2cap_ertm_send(struct l2cap_chan *chan)
1961 {
1962 	struct sk_buff *skb, *tx_skb;
1963 	struct l2cap_ctrl *control;
1964 	int sent = 0;
1965 
1966 	BT_DBG("chan %p", chan);
1967 
1968 	if (chan->state != BT_CONNECTED)
1969 		return -ENOTCONN;
1970 
1971 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1972 		return 0;
1973 
1974 	while (chan->tx_send_head &&
1975 	       chan->unacked_frames < chan->remote_tx_win &&
1976 	       chan->tx_state == L2CAP_TX_STATE_XMIT) {
1977 
1978 		skb = chan->tx_send_head;
1979 
1980 		bt_cb(skb)->l2cap.retries = 1;
1981 		control = &bt_cb(skb)->l2cap;
1982 
1983 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1984 			control->final = 1;
1985 
1986 		control->reqseq = chan->buffer_seq;
1987 		chan->last_acked_seq = chan->buffer_seq;
1988 		control->txseq = chan->next_tx_seq;
1989 
1990 		__pack_control(chan, control, skb);
1991 
1992 		if (chan->fcs == L2CAP_FCS_CRC16) {
1993 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1994 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1995 		}
1996 
1997 		/* Clone after data has been modified. Data is assumed to be
1998 		   read-only (for locking purposes) on cloned sk_buffs.
1999 		 */
2000 		tx_skb = skb_clone(skb, GFP_KERNEL);
2001 
2002 		if (!tx_skb)
2003 			break;
2004 
2005 		__set_retrans_timer(chan);
2006 
2007 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2008 		chan->unacked_frames++;
2009 		chan->frames_sent++;
2010 		sent++;
2011 
2012 		if (skb_queue_is_last(&chan->tx_q, skb))
2013 			chan->tx_send_head = NULL;
2014 		else
2015 			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2016 
2017 		l2cap_do_send(chan, tx_skb);
2018 		BT_DBG("Sent txseq %u", control->txseq);
2019 	}
2020 
2021 	BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2022 	       chan->unacked_frames, skb_queue_len(&chan->tx_q));
2023 
2024 	return sent;
2025 }
2026 
l2cap_ertm_resend(struct l2cap_chan * chan)2027 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2028 {
2029 	struct l2cap_ctrl control;
2030 	struct sk_buff *skb;
2031 	struct sk_buff *tx_skb;
2032 	u16 seq;
2033 
2034 	BT_DBG("chan %p", chan);
2035 
2036 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2037 		return;
2038 
2039 	while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2040 		seq = l2cap_seq_list_pop(&chan->retrans_list);
2041 
2042 		skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2043 		if (!skb) {
2044 			BT_DBG("Error: Can't retransmit seq %d, frame missing",
2045 			       seq);
2046 			continue;
2047 		}
2048 
2049 		bt_cb(skb)->l2cap.retries++;
2050 		control = bt_cb(skb)->l2cap;
2051 
2052 		if (chan->max_tx != 0 &&
2053 		    bt_cb(skb)->l2cap.retries > chan->max_tx) {
2054 			BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2055 			l2cap_send_disconn_req(chan, ECONNRESET);
2056 			l2cap_seq_list_clear(&chan->retrans_list);
2057 			break;
2058 		}
2059 
2060 		control.reqseq = chan->buffer_seq;
2061 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2062 			control.final = 1;
2063 		else
2064 			control.final = 0;
2065 
2066 		if (skb_cloned(skb)) {
2067 			/* Cloned sk_buffs are read-only, so we need a
2068 			 * writeable copy
2069 			 */
2070 			tx_skb = skb_copy(skb, GFP_KERNEL);
2071 		} else {
2072 			tx_skb = skb_clone(skb, GFP_KERNEL);
2073 		}
2074 
2075 		if (!tx_skb) {
2076 			l2cap_seq_list_clear(&chan->retrans_list);
2077 			break;
2078 		}
2079 
2080 		/* Update skb contents */
2081 		if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2082 			put_unaligned_le32(__pack_extended_control(&control),
2083 					   tx_skb->data + L2CAP_HDR_SIZE);
2084 		} else {
2085 			put_unaligned_le16(__pack_enhanced_control(&control),
2086 					   tx_skb->data + L2CAP_HDR_SIZE);
2087 		}
2088 
2089 		/* Update FCS */
2090 		if (chan->fcs == L2CAP_FCS_CRC16) {
2091 			u16 fcs = crc16(0, (u8 *) tx_skb->data,
2092 					tx_skb->len - L2CAP_FCS_SIZE);
2093 			put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2094 						L2CAP_FCS_SIZE);
2095 		}
2096 
2097 		l2cap_do_send(chan, tx_skb);
2098 
2099 		BT_DBG("Resent txseq %d", control.txseq);
2100 
2101 		chan->last_acked_seq = chan->buffer_seq;
2102 	}
2103 }
2104 
l2cap_retransmit(struct l2cap_chan * chan,struct l2cap_ctrl * control)2105 static void l2cap_retransmit(struct l2cap_chan *chan,
2106 			     struct l2cap_ctrl *control)
2107 {
2108 	BT_DBG("chan %p, control %p", chan, control);
2109 
2110 	l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2111 	l2cap_ertm_resend(chan);
2112 }
2113 
l2cap_retransmit_all(struct l2cap_chan * chan,struct l2cap_ctrl * control)2114 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2115 				 struct l2cap_ctrl *control)
2116 {
2117 	struct sk_buff *skb;
2118 
2119 	BT_DBG("chan %p, control %p", chan, control);
2120 
2121 	if (control->poll)
2122 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
2123 
2124 	l2cap_seq_list_clear(&chan->retrans_list);
2125 
2126 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2127 		return;
2128 
2129 	if (chan->unacked_frames) {
2130 		skb_queue_walk(&chan->tx_q, skb) {
2131 			if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2132 			    skb == chan->tx_send_head)
2133 				break;
2134 		}
2135 
2136 		skb_queue_walk_from(&chan->tx_q, skb) {
2137 			if (skb == chan->tx_send_head)
2138 				break;
2139 
2140 			l2cap_seq_list_append(&chan->retrans_list,
2141 					      bt_cb(skb)->l2cap.txseq);
2142 		}
2143 
2144 		l2cap_ertm_resend(chan);
2145 	}
2146 }
2147 
l2cap_send_ack(struct l2cap_chan * chan)2148 static void l2cap_send_ack(struct l2cap_chan *chan)
2149 {
2150 	struct l2cap_ctrl control;
2151 	u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2152 					 chan->last_acked_seq);
2153 	int threshold;
2154 
2155 	BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2156 	       chan, chan->last_acked_seq, chan->buffer_seq);
2157 
2158 	memset(&control, 0, sizeof(control));
2159 	control.sframe = 1;
2160 
2161 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2162 	    chan->rx_state == L2CAP_RX_STATE_RECV) {
2163 		__clear_ack_timer(chan);
2164 		control.super = L2CAP_SUPER_RNR;
2165 		control.reqseq = chan->buffer_seq;
2166 		l2cap_send_sframe(chan, &control);
2167 	} else {
2168 		if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2169 			l2cap_ertm_send(chan);
2170 			/* If any i-frames were sent, they included an ack */
2171 			if (chan->buffer_seq == chan->last_acked_seq)
2172 				frames_to_ack = 0;
2173 		}
2174 
2175 		/* Ack now if the window is 3/4ths full.
2176 		 * Calculate without mul or div
2177 		 */
2178 		threshold = chan->ack_win;
2179 		threshold += threshold << 1;
2180 		threshold >>= 2;
2181 
2182 		BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2183 		       threshold);
2184 
2185 		if (frames_to_ack >= threshold) {
2186 			__clear_ack_timer(chan);
2187 			control.super = L2CAP_SUPER_RR;
2188 			control.reqseq = chan->buffer_seq;
2189 			l2cap_send_sframe(chan, &control);
2190 			frames_to_ack = 0;
2191 		}
2192 
2193 		if (frames_to_ack)
2194 			__set_ack_timer(chan);
2195 	}
2196 }
2197 
l2cap_skbuff_fromiovec(struct l2cap_chan * chan,struct msghdr * msg,int len,int count,struct sk_buff * skb)2198 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2199 					 struct msghdr *msg, int len,
2200 					 int count, struct sk_buff *skb)
2201 {
2202 	struct l2cap_conn *conn = chan->conn;
2203 	struct sk_buff **frag;
2204 	int sent = 0;
2205 
2206 	if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2207 		return -EFAULT;
2208 
2209 	sent += count;
2210 	len  -= count;
2211 
2212 	/* Continuation fragments (no L2CAP header) */
2213 	frag = &skb_shinfo(skb)->frag_list;
2214 	while (len) {
2215 		struct sk_buff *tmp;
2216 
2217 		count = min_t(unsigned int, conn->mtu, len);
2218 
2219 		tmp = chan->ops->alloc_skb(chan, 0, count,
2220 					   msg->msg_flags & MSG_DONTWAIT);
2221 		if (IS_ERR(tmp))
2222 			return PTR_ERR(tmp);
2223 
2224 		*frag = tmp;
2225 
2226 		if (!copy_from_iter_full(skb_put(*frag, count), count,
2227 				   &msg->msg_iter))
2228 			return -EFAULT;
2229 
2230 		sent += count;
2231 		len  -= count;
2232 
2233 		skb->len += (*frag)->len;
2234 		skb->data_len += (*frag)->len;
2235 
2236 		frag = &(*frag)->next;
2237 	}
2238 
2239 	return sent;
2240 }
2241 
l2cap_create_connless_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2242 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2243 						 struct msghdr *msg, size_t len)
2244 {
2245 	struct l2cap_conn *conn = chan->conn;
2246 	struct sk_buff *skb;
2247 	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2248 	struct l2cap_hdr *lh;
2249 
2250 	BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2251 	       __le16_to_cpu(chan->psm), len);
2252 
2253 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2254 
2255 	skb = chan->ops->alloc_skb(chan, hlen, count,
2256 				   msg->msg_flags & MSG_DONTWAIT);
2257 	if (IS_ERR(skb))
2258 		return skb;
2259 
2260 	/* Create L2CAP header */
2261 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2262 	lh->cid = cpu_to_le16(chan->dcid);
2263 	lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2264 	put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2265 
2266 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2267 	if (unlikely(err < 0)) {
2268 		kfree_skb(skb);
2269 		return ERR_PTR(err);
2270 	}
2271 	return skb;
2272 }
2273 
l2cap_create_basic_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2274 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2275 					      struct msghdr *msg, size_t len)
2276 {
2277 	struct l2cap_conn *conn = chan->conn;
2278 	struct sk_buff *skb;
2279 	int err, count;
2280 	struct l2cap_hdr *lh;
2281 
2282 	BT_DBG("chan %p len %zu", chan, len);
2283 
2284 	count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2285 
2286 	skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2287 				   msg->msg_flags & MSG_DONTWAIT);
2288 	if (IS_ERR(skb))
2289 		return skb;
2290 
2291 	/* Create L2CAP header */
2292 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2293 	lh->cid = cpu_to_le16(chan->dcid);
2294 	lh->len = cpu_to_le16(len);
2295 
2296 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2297 	if (unlikely(err < 0)) {
2298 		kfree_skb(skb);
2299 		return ERR_PTR(err);
2300 	}
2301 	return skb;
2302 }
2303 
l2cap_create_iframe_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len,u16 sdulen)2304 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2305 					       struct msghdr *msg, size_t len,
2306 					       u16 sdulen)
2307 {
2308 	struct l2cap_conn *conn = chan->conn;
2309 	struct sk_buff *skb;
2310 	int err, count, hlen;
2311 	struct l2cap_hdr *lh;
2312 
2313 	BT_DBG("chan %p len %zu", chan, len);
2314 
2315 	if (!conn)
2316 		return ERR_PTR(-ENOTCONN);
2317 
2318 	hlen = __ertm_hdr_size(chan);
2319 
2320 	if (sdulen)
2321 		hlen += L2CAP_SDULEN_SIZE;
2322 
2323 	if (chan->fcs == L2CAP_FCS_CRC16)
2324 		hlen += L2CAP_FCS_SIZE;
2325 
2326 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2327 
2328 	skb = chan->ops->alloc_skb(chan, hlen, count,
2329 				   msg->msg_flags & MSG_DONTWAIT);
2330 	if (IS_ERR(skb))
2331 		return skb;
2332 
2333 	/* Create L2CAP header */
2334 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2335 	lh->cid = cpu_to_le16(chan->dcid);
2336 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2337 
2338 	/* Control header is populated later */
2339 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2340 		put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2341 	else
2342 		put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2343 
2344 	if (sdulen)
2345 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2346 
2347 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2348 	if (unlikely(err < 0)) {
2349 		kfree_skb(skb);
2350 		return ERR_PTR(err);
2351 	}
2352 
2353 	bt_cb(skb)->l2cap.fcs = chan->fcs;
2354 	bt_cb(skb)->l2cap.retries = 0;
2355 	return skb;
2356 }
2357 
l2cap_segment_sdu(struct l2cap_chan * chan,struct sk_buff_head * seg_queue,struct msghdr * msg,size_t len)2358 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2359 			     struct sk_buff_head *seg_queue,
2360 			     struct msghdr *msg, size_t len)
2361 {
2362 	struct sk_buff *skb;
2363 	u16 sdu_len;
2364 	size_t pdu_len;
2365 	u8 sar;
2366 
2367 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2368 
2369 	/* It is critical that ERTM PDUs fit in a single HCI fragment,
2370 	 * so fragmented skbs are not used.  The HCI layer's handling
2371 	 * of fragmented skbs is not compatible with ERTM's queueing.
2372 	 */
2373 
2374 	/* PDU size is derived from the HCI MTU */
2375 	pdu_len = chan->conn->mtu;
2376 
2377 	/* Constrain PDU size for BR/EDR connections */
2378 	pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2379 
2380 	/* Adjust for largest possible L2CAP overhead. */
2381 	if (chan->fcs)
2382 		pdu_len -= L2CAP_FCS_SIZE;
2383 
2384 	pdu_len -= __ertm_hdr_size(chan);
2385 
2386 	/* Remote device may have requested smaller PDUs */
2387 	pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2388 
2389 	if (len <= pdu_len) {
2390 		sar = L2CAP_SAR_UNSEGMENTED;
2391 		sdu_len = 0;
2392 		pdu_len = len;
2393 	} else {
2394 		sar = L2CAP_SAR_START;
2395 		sdu_len = len;
2396 	}
2397 
2398 	while (len > 0) {
2399 		skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2400 
2401 		if (IS_ERR(skb)) {
2402 			__skb_queue_purge(seg_queue);
2403 			return PTR_ERR(skb);
2404 		}
2405 
2406 		bt_cb(skb)->l2cap.sar = sar;
2407 		__skb_queue_tail(seg_queue, skb);
2408 
2409 		len -= pdu_len;
2410 		if (sdu_len)
2411 			sdu_len = 0;
2412 
2413 		if (len <= pdu_len) {
2414 			sar = L2CAP_SAR_END;
2415 			pdu_len = len;
2416 		} else {
2417 			sar = L2CAP_SAR_CONTINUE;
2418 		}
2419 	}
2420 
2421 	return 0;
2422 }
2423 
l2cap_create_le_flowctl_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len,u16 sdulen)2424 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2425 						   struct msghdr *msg,
2426 						   size_t len, u16 sdulen)
2427 {
2428 	struct l2cap_conn *conn = chan->conn;
2429 	struct sk_buff *skb;
2430 	int err, count, hlen;
2431 	struct l2cap_hdr *lh;
2432 
2433 	BT_DBG("chan %p len %zu", chan, len);
2434 
2435 	if (!conn)
2436 		return ERR_PTR(-ENOTCONN);
2437 
2438 	hlen = L2CAP_HDR_SIZE;
2439 
2440 	if (sdulen)
2441 		hlen += L2CAP_SDULEN_SIZE;
2442 
2443 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2444 
2445 	skb = chan->ops->alloc_skb(chan, hlen, count,
2446 				   msg->msg_flags & MSG_DONTWAIT);
2447 	if (IS_ERR(skb))
2448 		return skb;
2449 
2450 	/* Create L2CAP header */
2451 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2452 	lh->cid = cpu_to_le16(chan->dcid);
2453 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2454 
2455 	if (sdulen)
2456 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2457 
2458 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2459 	if (unlikely(err < 0)) {
2460 		kfree_skb(skb);
2461 		return ERR_PTR(err);
2462 	}
2463 
2464 	return skb;
2465 }
2466 
l2cap_segment_le_sdu(struct l2cap_chan * chan,struct sk_buff_head * seg_queue,struct msghdr * msg,size_t len)2467 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2468 				struct sk_buff_head *seg_queue,
2469 				struct msghdr *msg, size_t len)
2470 {
2471 	struct sk_buff *skb;
2472 	size_t pdu_len;
2473 	u16 sdu_len;
2474 
2475 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2476 
2477 	sdu_len = len;
2478 	pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2479 
2480 	while (len > 0) {
2481 		if (len <= pdu_len)
2482 			pdu_len = len;
2483 
2484 		skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2485 		if (IS_ERR(skb)) {
2486 			__skb_queue_purge(seg_queue);
2487 			return PTR_ERR(skb);
2488 		}
2489 
2490 		__skb_queue_tail(seg_queue, skb);
2491 
2492 		len -= pdu_len;
2493 
2494 		if (sdu_len) {
2495 			sdu_len = 0;
2496 			pdu_len += L2CAP_SDULEN_SIZE;
2497 		}
2498 	}
2499 
2500 	return 0;
2501 }
2502 
l2cap_le_flowctl_send(struct l2cap_chan * chan)2503 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2504 {
2505 	int sent = 0;
2506 
2507 	BT_DBG("chan %p", chan);
2508 
2509 	while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2510 		l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2511 		chan->tx_credits--;
2512 		sent++;
2513 	}
2514 
2515 	BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2516 	       skb_queue_len(&chan->tx_q));
2517 }
2518 
l2cap_tx_timestamp(struct sk_buff * skb,const struct sockcm_cookie * sockc,size_t len)2519 static void l2cap_tx_timestamp(struct sk_buff *skb,
2520 			       const struct sockcm_cookie *sockc,
2521 			       size_t len)
2522 {
2523 	struct sock *sk = skb ? skb->sk : NULL;
2524 
2525 	if (sk && sk->sk_type == SOCK_STREAM)
2526 		hci_setup_tx_timestamp(skb, len, sockc);
2527 	else
2528 		hci_setup_tx_timestamp(skb, 1, sockc);
2529 }
2530 
l2cap_tx_timestamp_seg(struct sk_buff_head * queue,const struct sockcm_cookie * sockc,size_t len)2531 static void l2cap_tx_timestamp_seg(struct sk_buff_head *queue,
2532 				   const struct sockcm_cookie *sockc,
2533 				   size_t len)
2534 {
2535 	struct sk_buff *skb = skb_peek(queue);
2536 	struct sock *sk = skb ? skb->sk : NULL;
2537 
2538 	if (sk && sk->sk_type == SOCK_STREAM)
2539 		l2cap_tx_timestamp(skb_peek_tail(queue), sockc, len);
2540 	else
2541 		l2cap_tx_timestamp(skb, sockc, len);
2542 }
2543 
l2cap_chan_send(struct l2cap_chan * chan,struct msghdr * msg,size_t len,const struct sockcm_cookie * sockc)2544 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2545 		    const struct sockcm_cookie *sockc)
2546 {
2547 	struct sk_buff *skb;
2548 	int err;
2549 	struct sk_buff_head seg_queue;
2550 
2551 	if (!chan->conn)
2552 		return -ENOTCONN;
2553 
2554 	/* Connectionless channel */
2555 	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2556 		skb = l2cap_create_connless_pdu(chan, msg, len);
2557 		if (IS_ERR(skb))
2558 			return PTR_ERR(skb);
2559 
2560 		l2cap_tx_timestamp(skb, sockc, len);
2561 
2562 		l2cap_do_send(chan, skb);
2563 		return len;
2564 	}
2565 
2566 	switch (chan->mode) {
2567 	case L2CAP_MODE_LE_FLOWCTL:
2568 	case L2CAP_MODE_EXT_FLOWCTL:
2569 		/* Check outgoing MTU */
2570 		if (len > chan->omtu)
2571 			return -EMSGSIZE;
2572 
2573 		__skb_queue_head_init(&seg_queue);
2574 
2575 		err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2576 
2577 		if (chan->state != BT_CONNECTED) {
2578 			__skb_queue_purge(&seg_queue);
2579 			err = -ENOTCONN;
2580 		}
2581 
2582 		if (err)
2583 			return err;
2584 
2585 		l2cap_tx_timestamp_seg(&seg_queue, sockc, len);
2586 
2587 		skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2588 
2589 		l2cap_le_flowctl_send(chan);
2590 
2591 		if (!chan->tx_credits)
2592 			chan->ops->suspend(chan);
2593 
2594 		err = len;
2595 
2596 		break;
2597 
2598 	case L2CAP_MODE_BASIC:
2599 		/* Check outgoing MTU */
2600 		if (len > chan->omtu)
2601 			return -EMSGSIZE;
2602 
2603 		/* Create a basic PDU */
2604 		skb = l2cap_create_basic_pdu(chan, msg, len);
2605 		if (IS_ERR(skb))
2606 			return PTR_ERR(skb);
2607 
2608 		l2cap_tx_timestamp(skb, sockc, len);
2609 
2610 		l2cap_do_send(chan, skb);
2611 		err = len;
2612 		break;
2613 
2614 	case L2CAP_MODE_ERTM:
2615 	case L2CAP_MODE_STREAMING:
2616 		/* Check outgoing MTU */
2617 		if (len > chan->omtu) {
2618 			err = -EMSGSIZE;
2619 			break;
2620 		}
2621 
2622 		__skb_queue_head_init(&seg_queue);
2623 
2624 		/* Do segmentation before calling in to the state machine,
2625 		 * since it's possible to block while waiting for memory
2626 		 * allocation.
2627 		 */
2628 		err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2629 
2630 		if (err)
2631 			break;
2632 
2633 		if (chan->mode == L2CAP_MODE_ERTM) {
2634 			/* TODO: ERTM mode timestamping */
2635 			l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2636 		} else {
2637 			l2cap_tx_timestamp_seg(&seg_queue, sockc, len);
2638 			l2cap_streaming_send(chan, &seg_queue);
2639 		}
2640 
2641 		err = len;
2642 
2643 		/* If the skbs were not queued for sending, they'll still be in
2644 		 * seg_queue and need to be purged.
2645 		 */
2646 		__skb_queue_purge(&seg_queue);
2647 		break;
2648 
2649 	default:
2650 		BT_DBG("bad state %1.1x", chan->mode);
2651 		err = -EBADFD;
2652 	}
2653 
2654 	return err;
2655 }
2656 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2657 
l2cap_send_srej(struct l2cap_chan * chan,u16 txseq)2658 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2659 {
2660 	struct l2cap_ctrl control;
2661 	u16 seq;
2662 
2663 	BT_DBG("chan %p, txseq %u", chan, txseq);
2664 
2665 	memset(&control, 0, sizeof(control));
2666 	control.sframe = 1;
2667 	control.super = L2CAP_SUPER_SREJ;
2668 
2669 	for (seq = chan->expected_tx_seq; seq != txseq;
2670 	     seq = __next_seq(chan, seq)) {
2671 		if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2672 			control.reqseq = seq;
2673 			l2cap_send_sframe(chan, &control);
2674 			l2cap_seq_list_append(&chan->srej_list, seq);
2675 		}
2676 	}
2677 
2678 	chan->expected_tx_seq = __next_seq(chan, txseq);
2679 }
2680 
l2cap_send_srej_tail(struct l2cap_chan * chan)2681 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2682 {
2683 	struct l2cap_ctrl control;
2684 
2685 	BT_DBG("chan %p", chan);
2686 
2687 	if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2688 		return;
2689 
2690 	memset(&control, 0, sizeof(control));
2691 	control.sframe = 1;
2692 	control.super = L2CAP_SUPER_SREJ;
2693 	control.reqseq = chan->srej_list.tail;
2694 	l2cap_send_sframe(chan, &control);
2695 }
2696 
l2cap_send_srej_list(struct l2cap_chan * chan,u16 txseq)2697 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2698 {
2699 	struct l2cap_ctrl control;
2700 	u16 initial_head;
2701 	u16 seq;
2702 
2703 	BT_DBG("chan %p, txseq %u", chan, txseq);
2704 
2705 	memset(&control, 0, sizeof(control));
2706 	control.sframe = 1;
2707 	control.super = L2CAP_SUPER_SREJ;
2708 
2709 	/* Capture initial list head to allow only one pass through the list. */
2710 	initial_head = chan->srej_list.head;
2711 
2712 	do {
2713 		seq = l2cap_seq_list_pop(&chan->srej_list);
2714 		if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2715 			break;
2716 
2717 		control.reqseq = seq;
2718 		l2cap_send_sframe(chan, &control);
2719 		l2cap_seq_list_append(&chan->srej_list, seq);
2720 	} while (chan->srej_list.head != initial_head);
2721 }
2722 
l2cap_process_reqseq(struct l2cap_chan * chan,u16 reqseq)2723 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2724 {
2725 	struct sk_buff *acked_skb;
2726 	u16 ackseq;
2727 
2728 	BT_DBG("chan %p, reqseq %u", chan, reqseq);
2729 
2730 	if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2731 		return;
2732 
2733 	BT_DBG("expected_ack_seq %u, unacked_frames %u",
2734 	       chan->expected_ack_seq, chan->unacked_frames);
2735 
2736 	for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2737 	     ackseq = __next_seq(chan, ackseq)) {
2738 
2739 		acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2740 		if (acked_skb) {
2741 			skb_unlink(acked_skb, &chan->tx_q);
2742 			kfree_skb(acked_skb);
2743 			chan->unacked_frames--;
2744 		}
2745 	}
2746 
2747 	chan->expected_ack_seq = reqseq;
2748 
2749 	if (chan->unacked_frames == 0)
2750 		__clear_retrans_timer(chan);
2751 
2752 	BT_DBG("unacked_frames %u", chan->unacked_frames);
2753 }
2754 
l2cap_abort_rx_srej_sent(struct l2cap_chan * chan)2755 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2756 {
2757 	BT_DBG("chan %p", chan);
2758 
2759 	chan->expected_tx_seq = chan->buffer_seq;
2760 	l2cap_seq_list_clear(&chan->srej_list);
2761 	skb_queue_purge(&chan->srej_q);
2762 	chan->rx_state = L2CAP_RX_STATE_RECV;
2763 }
2764 
l2cap_tx_state_xmit(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2765 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2766 				struct l2cap_ctrl *control,
2767 				struct sk_buff_head *skbs, u8 event)
2768 {
2769 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2770 	       event);
2771 
2772 	switch (event) {
2773 	case L2CAP_EV_DATA_REQUEST:
2774 		if (chan->tx_send_head == NULL)
2775 			chan->tx_send_head = skb_peek(skbs);
2776 
2777 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2778 		l2cap_ertm_send(chan);
2779 		break;
2780 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2781 		BT_DBG("Enter LOCAL_BUSY");
2782 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2783 
2784 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2785 			/* The SREJ_SENT state must be aborted if we are to
2786 			 * enter the LOCAL_BUSY state.
2787 			 */
2788 			l2cap_abort_rx_srej_sent(chan);
2789 		}
2790 
2791 		l2cap_send_ack(chan);
2792 
2793 		break;
2794 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2795 		BT_DBG("Exit LOCAL_BUSY");
2796 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2797 
2798 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2799 			struct l2cap_ctrl local_control;
2800 
2801 			memset(&local_control, 0, sizeof(local_control));
2802 			local_control.sframe = 1;
2803 			local_control.super = L2CAP_SUPER_RR;
2804 			local_control.poll = 1;
2805 			local_control.reqseq = chan->buffer_seq;
2806 			l2cap_send_sframe(chan, &local_control);
2807 
2808 			chan->retry_count = 1;
2809 			__set_monitor_timer(chan);
2810 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2811 		}
2812 		break;
2813 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2814 		l2cap_process_reqseq(chan, control->reqseq);
2815 		break;
2816 	case L2CAP_EV_EXPLICIT_POLL:
2817 		l2cap_send_rr_or_rnr(chan, 1);
2818 		chan->retry_count = 1;
2819 		__set_monitor_timer(chan);
2820 		__clear_ack_timer(chan);
2821 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2822 		break;
2823 	case L2CAP_EV_RETRANS_TO:
2824 		l2cap_send_rr_or_rnr(chan, 1);
2825 		chan->retry_count = 1;
2826 		__set_monitor_timer(chan);
2827 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2828 		break;
2829 	case L2CAP_EV_RECV_FBIT:
2830 		/* Nothing to process */
2831 		break;
2832 	default:
2833 		break;
2834 	}
2835 }
2836 
l2cap_tx_state_wait_f(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2837 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2838 				  struct l2cap_ctrl *control,
2839 				  struct sk_buff_head *skbs, u8 event)
2840 {
2841 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2842 	       event);
2843 
2844 	switch (event) {
2845 	case L2CAP_EV_DATA_REQUEST:
2846 		if (chan->tx_send_head == NULL)
2847 			chan->tx_send_head = skb_peek(skbs);
2848 		/* Queue data, but don't send. */
2849 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2850 		break;
2851 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2852 		BT_DBG("Enter LOCAL_BUSY");
2853 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2854 
2855 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2856 			/* The SREJ_SENT state must be aborted if we are to
2857 			 * enter the LOCAL_BUSY state.
2858 			 */
2859 			l2cap_abort_rx_srej_sent(chan);
2860 		}
2861 
2862 		l2cap_send_ack(chan);
2863 
2864 		break;
2865 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2866 		BT_DBG("Exit LOCAL_BUSY");
2867 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2868 
2869 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2870 			struct l2cap_ctrl local_control;
2871 			memset(&local_control, 0, sizeof(local_control));
2872 			local_control.sframe = 1;
2873 			local_control.super = L2CAP_SUPER_RR;
2874 			local_control.poll = 1;
2875 			local_control.reqseq = chan->buffer_seq;
2876 			l2cap_send_sframe(chan, &local_control);
2877 
2878 			chan->retry_count = 1;
2879 			__set_monitor_timer(chan);
2880 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2881 		}
2882 		break;
2883 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2884 		l2cap_process_reqseq(chan, control->reqseq);
2885 		fallthrough;
2886 
2887 	case L2CAP_EV_RECV_FBIT:
2888 		if (control && control->final) {
2889 			__clear_monitor_timer(chan);
2890 			if (chan->unacked_frames > 0)
2891 				__set_retrans_timer(chan);
2892 			chan->retry_count = 0;
2893 			chan->tx_state = L2CAP_TX_STATE_XMIT;
2894 			BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2895 		}
2896 		break;
2897 	case L2CAP_EV_EXPLICIT_POLL:
2898 		/* Ignore */
2899 		break;
2900 	case L2CAP_EV_MONITOR_TO:
2901 		if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2902 			l2cap_send_rr_or_rnr(chan, 1);
2903 			__set_monitor_timer(chan);
2904 			chan->retry_count++;
2905 		} else {
2906 			l2cap_send_disconn_req(chan, ECONNABORTED);
2907 		}
2908 		break;
2909 	default:
2910 		break;
2911 	}
2912 }
2913 
l2cap_tx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2914 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2915 		     struct sk_buff_head *skbs, u8 event)
2916 {
2917 	BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2918 	       chan, control, skbs, event, chan->tx_state);
2919 
2920 	switch (chan->tx_state) {
2921 	case L2CAP_TX_STATE_XMIT:
2922 		l2cap_tx_state_xmit(chan, control, skbs, event);
2923 		break;
2924 	case L2CAP_TX_STATE_WAIT_F:
2925 		l2cap_tx_state_wait_f(chan, control, skbs, event);
2926 		break;
2927 	default:
2928 		/* Ignore event */
2929 		break;
2930 	}
2931 }
2932 
l2cap_pass_to_tx(struct l2cap_chan * chan,struct l2cap_ctrl * control)2933 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2934 			     struct l2cap_ctrl *control)
2935 {
2936 	BT_DBG("chan %p, control %p", chan, control);
2937 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2938 }
2939 
l2cap_pass_to_tx_fbit(struct l2cap_chan * chan,struct l2cap_ctrl * control)2940 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2941 				  struct l2cap_ctrl *control)
2942 {
2943 	BT_DBG("chan %p, control %p", chan, control);
2944 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2945 }
2946 
2947 /* Copy frame to all raw sockets on that connection */
l2cap_raw_recv(struct l2cap_conn * conn,struct sk_buff * skb)2948 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2949 {
2950 	struct sk_buff *nskb;
2951 	struct l2cap_chan *chan;
2952 
2953 	BT_DBG("conn %p", conn);
2954 
2955 	list_for_each_entry(chan, &conn->chan_l, list) {
2956 		if (chan->chan_type != L2CAP_CHAN_RAW)
2957 			continue;
2958 
2959 		/* Don't send frame to the channel it came from */
2960 		if (bt_cb(skb)->l2cap.chan == chan)
2961 			continue;
2962 
2963 		nskb = skb_clone(skb, GFP_KERNEL);
2964 		if (!nskb)
2965 			continue;
2966 		if (chan->ops->recv(chan, nskb))
2967 			kfree_skb(nskb);
2968 	}
2969 }
2970 
2971 /* ---- L2CAP signalling commands ---- */
l2cap_build_cmd(struct l2cap_conn * conn,u8 code,u8 ident,u16 dlen,void * data)2972 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2973 				       u8 ident, u16 dlen, void *data)
2974 {
2975 	struct sk_buff *skb, **frag;
2976 	struct l2cap_cmd_hdr *cmd;
2977 	struct l2cap_hdr *lh;
2978 	int len, count;
2979 
2980 	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2981 	       conn, code, ident, dlen);
2982 
2983 	if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2984 		return NULL;
2985 
2986 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2987 	count = min_t(unsigned int, conn->mtu, len);
2988 
2989 	skb = bt_skb_alloc(count, GFP_KERNEL);
2990 	if (!skb)
2991 		return NULL;
2992 
2993 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2994 	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2995 
2996 	if (conn->hcon->type == LE_LINK)
2997 		lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2998 	else
2999 		lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
3000 
3001 	cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
3002 	cmd->code  = code;
3003 	cmd->ident = ident;
3004 	cmd->len   = cpu_to_le16(dlen);
3005 
3006 	if (dlen) {
3007 		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
3008 		skb_put_data(skb, data, count);
3009 		data += count;
3010 	}
3011 
3012 	len -= skb->len;
3013 
3014 	/* Continuation fragments (no L2CAP header) */
3015 	frag = &skb_shinfo(skb)->frag_list;
3016 	while (len) {
3017 		count = min_t(unsigned int, conn->mtu, len);
3018 
3019 		*frag = bt_skb_alloc(count, GFP_KERNEL);
3020 		if (!*frag)
3021 			goto fail;
3022 
3023 		skb_put_data(*frag, data, count);
3024 
3025 		len  -= count;
3026 		data += count;
3027 
3028 		frag = &(*frag)->next;
3029 	}
3030 
3031 	return skb;
3032 
3033 fail:
3034 	kfree_skb(skb);
3035 	return NULL;
3036 }
3037 
l2cap_get_conf_opt(void ** ptr,int * type,int * olen,unsigned long * val)3038 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
3039 				     unsigned long *val)
3040 {
3041 	struct l2cap_conf_opt *opt = *ptr;
3042 	int len;
3043 
3044 	len = L2CAP_CONF_OPT_SIZE + opt->len;
3045 	*ptr += len;
3046 
3047 	*type = opt->type;
3048 	*olen = opt->len;
3049 
3050 	switch (opt->len) {
3051 	case 1:
3052 		*val = *((u8 *) opt->val);
3053 		break;
3054 
3055 	case 2:
3056 		*val = get_unaligned_le16(opt->val);
3057 		break;
3058 
3059 	case 4:
3060 		*val = get_unaligned_le32(opt->val);
3061 		break;
3062 
3063 	default:
3064 		*val = (unsigned long) opt->val;
3065 		break;
3066 	}
3067 
3068 	BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3069 	return len;
3070 }
3071 
l2cap_add_conf_opt(void ** ptr,u8 type,u8 len,unsigned long val,size_t size)3072 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3073 {
3074 	struct l2cap_conf_opt *opt = *ptr;
3075 
3076 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3077 
3078 	if (size < L2CAP_CONF_OPT_SIZE + len)
3079 		return;
3080 
3081 	opt->type = type;
3082 	opt->len  = len;
3083 
3084 	switch (len) {
3085 	case 1:
3086 		*((u8 *) opt->val)  = val;
3087 		break;
3088 
3089 	case 2:
3090 		put_unaligned_le16(val, opt->val);
3091 		break;
3092 
3093 	case 4:
3094 		put_unaligned_le32(val, opt->val);
3095 		break;
3096 
3097 	default:
3098 		memcpy(opt->val, (void *) val, len);
3099 		break;
3100 	}
3101 
3102 	*ptr += L2CAP_CONF_OPT_SIZE + len;
3103 }
3104 
l2cap_add_opt_efs(void ** ptr,struct l2cap_chan * chan,size_t size)3105 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3106 {
3107 	struct l2cap_conf_efs efs;
3108 
3109 	switch (chan->mode) {
3110 	case L2CAP_MODE_ERTM:
3111 		efs.id		= chan->local_id;
3112 		efs.stype	= chan->local_stype;
3113 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3114 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3115 		efs.acc_lat	= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3116 		efs.flush_to	= cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3117 		break;
3118 
3119 	case L2CAP_MODE_STREAMING:
3120 		efs.id		= 1;
3121 		efs.stype	= L2CAP_SERV_BESTEFFORT;
3122 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3123 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3124 		efs.acc_lat	= 0;
3125 		efs.flush_to	= 0;
3126 		break;
3127 
3128 	default:
3129 		return;
3130 	}
3131 
3132 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3133 			   (unsigned long) &efs, size);
3134 }
3135 
l2cap_ack_timeout(struct work_struct * work)3136 static void l2cap_ack_timeout(struct work_struct *work)
3137 {
3138 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3139 					       ack_timer.work);
3140 	u16 frames_to_ack;
3141 
3142 	BT_DBG("chan %p", chan);
3143 
3144 	l2cap_chan_lock(chan);
3145 
3146 	frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3147 				     chan->last_acked_seq);
3148 
3149 	if (frames_to_ack)
3150 		l2cap_send_rr_or_rnr(chan, 0);
3151 
3152 	l2cap_chan_unlock(chan);
3153 	l2cap_chan_put(chan);
3154 }
3155 
l2cap_ertm_init(struct l2cap_chan * chan)3156 int l2cap_ertm_init(struct l2cap_chan *chan)
3157 {
3158 	int err;
3159 
3160 	chan->next_tx_seq = 0;
3161 	chan->expected_tx_seq = 0;
3162 	chan->expected_ack_seq = 0;
3163 	chan->unacked_frames = 0;
3164 	chan->buffer_seq = 0;
3165 	chan->frames_sent = 0;
3166 	chan->last_acked_seq = 0;
3167 	chan->sdu = NULL;
3168 	chan->sdu_last_frag = NULL;
3169 	chan->sdu_len = 0;
3170 
3171 	skb_queue_head_init(&chan->tx_q);
3172 
3173 	if (chan->mode != L2CAP_MODE_ERTM)
3174 		return 0;
3175 
3176 	chan->rx_state = L2CAP_RX_STATE_RECV;
3177 	chan->tx_state = L2CAP_TX_STATE_XMIT;
3178 
3179 	skb_queue_head_init(&chan->srej_q);
3180 
3181 	err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3182 	if (err < 0)
3183 		return err;
3184 
3185 	err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3186 	if (err < 0)
3187 		l2cap_seq_list_free(&chan->srej_list);
3188 
3189 	return err;
3190 }
3191 
l2cap_select_mode(__u8 mode,__u16 remote_feat_mask)3192 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3193 {
3194 	switch (mode) {
3195 	case L2CAP_MODE_STREAMING:
3196 	case L2CAP_MODE_ERTM:
3197 		if (l2cap_mode_supported(mode, remote_feat_mask))
3198 			return mode;
3199 		fallthrough;
3200 	default:
3201 		return L2CAP_MODE_BASIC;
3202 	}
3203 }
3204 
__l2cap_ews_supported(struct l2cap_conn * conn)3205 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3206 {
3207 	return (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW);
3208 }
3209 
__l2cap_efs_supported(struct l2cap_conn * conn)3210 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3211 {
3212 	return (conn->feat_mask & L2CAP_FEAT_EXT_FLOW);
3213 }
3214 
__l2cap_set_ertm_timeouts(struct l2cap_chan * chan,struct l2cap_conf_rfc * rfc)3215 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3216 				      struct l2cap_conf_rfc *rfc)
3217 {
3218 	rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3219 	rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3220 }
3221 
l2cap_txwin_setup(struct l2cap_chan * chan)3222 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3223 {
3224 	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3225 	    __l2cap_ews_supported(chan->conn)) {
3226 		/* use extended control field */
3227 		set_bit(FLAG_EXT_CTRL, &chan->flags);
3228 		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3229 	} else {
3230 		chan->tx_win = min_t(u16, chan->tx_win,
3231 				     L2CAP_DEFAULT_TX_WINDOW);
3232 		chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3233 	}
3234 	chan->ack_win = chan->tx_win;
3235 }
3236 
l2cap_mtu_auto(struct l2cap_chan * chan)3237 static void l2cap_mtu_auto(struct l2cap_chan *chan)
3238 {
3239 	struct hci_conn *conn = chan->conn->hcon;
3240 
3241 	chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3242 
3243 	/* The 2-DH1 packet has between 2 and 56 information bytes
3244 	 * (including the 2-byte payload header)
3245 	 */
3246 	if (!(conn->pkt_type & HCI_2DH1))
3247 		chan->imtu = 54;
3248 
3249 	/* The 3-DH1 packet has between 2 and 85 information bytes
3250 	 * (including the 2-byte payload header)
3251 	 */
3252 	if (!(conn->pkt_type & HCI_3DH1))
3253 		chan->imtu = 83;
3254 
3255 	/* The 2-DH3 packet has between 2 and 369 information bytes
3256 	 * (including the 2-byte payload header)
3257 	 */
3258 	if (!(conn->pkt_type & HCI_2DH3))
3259 		chan->imtu = 367;
3260 
3261 	/* The 3-DH3 packet has between 2 and 554 information bytes
3262 	 * (including the 2-byte payload header)
3263 	 */
3264 	if (!(conn->pkt_type & HCI_3DH3))
3265 		chan->imtu = 552;
3266 
3267 	/* The 2-DH5 packet has between 2 and 681 information bytes
3268 	 * (including the 2-byte payload header)
3269 	 */
3270 	if (!(conn->pkt_type & HCI_2DH5))
3271 		chan->imtu = 679;
3272 
3273 	/* The 3-DH5 packet has between 2 and 1023 information bytes
3274 	 * (including the 2-byte payload header)
3275 	 */
3276 	if (!(conn->pkt_type & HCI_3DH5))
3277 		chan->imtu = 1021;
3278 }
3279 
l2cap_build_conf_req(struct l2cap_chan * chan,void * data,size_t data_size)3280 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3281 {
3282 	struct l2cap_conf_req *req = data;
3283 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3284 	void *ptr = req->data;
3285 	void *endptr = data + data_size;
3286 	u16 size;
3287 
3288 	BT_DBG("chan %p", chan);
3289 
3290 	if (chan->num_conf_req || chan->num_conf_rsp)
3291 		goto done;
3292 
3293 	switch (chan->mode) {
3294 	case L2CAP_MODE_STREAMING:
3295 	case L2CAP_MODE_ERTM:
3296 		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3297 			break;
3298 
3299 		if (__l2cap_efs_supported(chan->conn))
3300 			set_bit(FLAG_EFS_ENABLE, &chan->flags);
3301 
3302 		fallthrough;
3303 	default:
3304 		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3305 		break;
3306 	}
3307 
3308 done:
3309 	if (chan->imtu != L2CAP_DEFAULT_MTU) {
3310 		if (!chan->imtu)
3311 			l2cap_mtu_auto(chan);
3312 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3313 				   endptr - ptr);
3314 	}
3315 
3316 	switch (chan->mode) {
3317 	case L2CAP_MODE_BASIC:
3318 		if (disable_ertm)
3319 			break;
3320 
3321 		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3322 		    !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3323 			break;
3324 
3325 		rfc.mode            = L2CAP_MODE_BASIC;
3326 		rfc.txwin_size      = 0;
3327 		rfc.max_transmit    = 0;
3328 		rfc.retrans_timeout = 0;
3329 		rfc.monitor_timeout = 0;
3330 		rfc.max_pdu_size    = 0;
3331 
3332 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3333 				   (unsigned long) &rfc, endptr - ptr);
3334 		break;
3335 
3336 	case L2CAP_MODE_ERTM:
3337 		rfc.mode            = L2CAP_MODE_ERTM;
3338 		rfc.max_transmit    = chan->max_tx;
3339 
3340 		__l2cap_set_ertm_timeouts(chan, &rfc);
3341 
3342 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3343 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3344 			     L2CAP_FCS_SIZE);
3345 		rfc.max_pdu_size = cpu_to_le16(size);
3346 
3347 		l2cap_txwin_setup(chan);
3348 
3349 		rfc.txwin_size = min_t(u16, chan->tx_win,
3350 				       L2CAP_DEFAULT_TX_WINDOW);
3351 
3352 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3353 				   (unsigned long) &rfc, endptr - ptr);
3354 
3355 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3356 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3357 
3358 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3359 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3360 					   chan->tx_win, endptr - ptr);
3361 
3362 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3363 			if (chan->fcs == L2CAP_FCS_NONE ||
3364 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3365 				chan->fcs = L2CAP_FCS_NONE;
3366 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3367 						   chan->fcs, endptr - ptr);
3368 			}
3369 		break;
3370 
3371 	case L2CAP_MODE_STREAMING:
3372 		l2cap_txwin_setup(chan);
3373 		rfc.mode            = L2CAP_MODE_STREAMING;
3374 		rfc.txwin_size      = 0;
3375 		rfc.max_transmit    = 0;
3376 		rfc.retrans_timeout = 0;
3377 		rfc.monitor_timeout = 0;
3378 
3379 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3380 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3381 			     L2CAP_FCS_SIZE);
3382 		rfc.max_pdu_size = cpu_to_le16(size);
3383 
3384 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3385 				   (unsigned long) &rfc, endptr - ptr);
3386 
3387 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3388 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3389 
3390 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3391 			if (chan->fcs == L2CAP_FCS_NONE ||
3392 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3393 				chan->fcs = L2CAP_FCS_NONE;
3394 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3395 						   chan->fcs, endptr - ptr);
3396 			}
3397 		break;
3398 	}
3399 
3400 	req->dcid  = cpu_to_le16(chan->dcid);
3401 	req->flags = cpu_to_le16(0);
3402 
3403 	return ptr - data;
3404 }
3405 
l2cap_parse_conf_req(struct l2cap_chan * chan,void * data,size_t data_size)3406 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3407 {
3408 	struct l2cap_conf_rsp *rsp = data;
3409 	void *ptr = rsp->data;
3410 	void *endptr = data + data_size;
3411 	void *req = chan->conf_req;
3412 	int len = chan->conf_len;
3413 	int type, hint, olen;
3414 	unsigned long val;
3415 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3416 	struct l2cap_conf_efs efs;
3417 	u8 remote_efs = 0;
3418 	u16 mtu = 0;
3419 	u16 result = L2CAP_CONF_SUCCESS;
3420 	u16 size;
3421 
3422 	BT_DBG("chan %p", chan);
3423 
3424 	while (len >= L2CAP_CONF_OPT_SIZE) {
3425 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3426 		if (len < 0)
3427 			break;
3428 
3429 		hint  = type & L2CAP_CONF_HINT;
3430 		type &= L2CAP_CONF_MASK;
3431 
3432 		switch (type) {
3433 		case L2CAP_CONF_MTU:
3434 			if (olen != 2)
3435 				break;
3436 			mtu = val;
3437 			break;
3438 
3439 		case L2CAP_CONF_FLUSH_TO:
3440 			if (olen != 2)
3441 				break;
3442 			chan->flush_to = val;
3443 			break;
3444 
3445 		case L2CAP_CONF_QOS:
3446 			break;
3447 
3448 		case L2CAP_CONF_RFC:
3449 			if (olen != sizeof(rfc))
3450 				break;
3451 			memcpy(&rfc, (void *) val, olen);
3452 			break;
3453 
3454 		case L2CAP_CONF_FCS:
3455 			if (olen != 1)
3456 				break;
3457 			if (val == L2CAP_FCS_NONE)
3458 				set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3459 			break;
3460 
3461 		case L2CAP_CONF_EFS:
3462 			if (olen != sizeof(efs))
3463 				break;
3464 			remote_efs = 1;
3465 			memcpy(&efs, (void *) val, olen);
3466 			break;
3467 
3468 		case L2CAP_CONF_EWS:
3469 			if (olen != 2)
3470 				break;
3471 			return -ECONNREFUSED;
3472 
3473 		default:
3474 			if (hint)
3475 				break;
3476 			result = L2CAP_CONF_UNKNOWN;
3477 			l2cap_add_conf_opt(&ptr, (u8)type, sizeof(u8), type, endptr - ptr);
3478 			break;
3479 		}
3480 	}
3481 
3482 	if (chan->num_conf_rsp || chan->num_conf_req > 1)
3483 		goto done;
3484 
3485 	switch (chan->mode) {
3486 	case L2CAP_MODE_STREAMING:
3487 	case L2CAP_MODE_ERTM:
3488 		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3489 			chan->mode = l2cap_select_mode(rfc.mode,
3490 						       chan->conn->feat_mask);
3491 			break;
3492 		}
3493 
3494 		if (remote_efs) {
3495 			if (__l2cap_efs_supported(chan->conn))
3496 				set_bit(FLAG_EFS_ENABLE, &chan->flags);
3497 			else
3498 				return -ECONNREFUSED;
3499 		}
3500 
3501 		if (chan->mode != rfc.mode)
3502 			return -ECONNREFUSED;
3503 
3504 		break;
3505 	}
3506 
3507 done:
3508 	if (chan->mode != rfc.mode) {
3509 		result = L2CAP_CONF_UNACCEPT;
3510 		rfc.mode = chan->mode;
3511 
3512 		if (chan->num_conf_rsp == 1)
3513 			return -ECONNREFUSED;
3514 
3515 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3516 				   (unsigned long) &rfc, endptr - ptr);
3517 	}
3518 
3519 	if (result == L2CAP_CONF_SUCCESS) {
3520 		/* Configure output options and let the other side know
3521 		 * which ones we don't like. */
3522 
3523 		/* If MTU is not provided in configure request, try adjusting it
3524 		 * to the current output MTU if it has been set
3525 		 *
3526 		 * Bluetooth Core 6.1, Vol 3, Part A, Section 4.5
3527 		 *
3528 		 * Each configuration parameter value (if any is present) in an
3529 		 * L2CAP_CONFIGURATION_RSP packet reflects an ‘adjustment’ to a
3530 		 * configuration parameter value that has been sent (or, in case
3531 		 * of default values, implied) in the corresponding
3532 		 * L2CAP_CONFIGURATION_REQ packet.
3533 		 */
3534 		if (!mtu) {
3535 			/* Only adjust for ERTM channels as for older modes the
3536 			 * remote stack may not be able to detect that the
3537 			 * adjustment causing it to silently drop packets.
3538 			 */
3539 			if (chan->mode == L2CAP_MODE_ERTM &&
3540 			    chan->omtu && chan->omtu != L2CAP_DEFAULT_MTU)
3541 				mtu = chan->omtu;
3542 			else
3543 				mtu = L2CAP_DEFAULT_MTU;
3544 		}
3545 
3546 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
3547 			result = L2CAP_CONF_UNACCEPT;
3548 		else {
3549 			chan->omtu = mtu;
3550 			set_bit(CONF_MTU_DONE, &chan->conf_state);
3551 		}
3552 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3553 
3554 		if (remote_efs) {
3555 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3556 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3557 			    efs.stype != chan->local_stype) {
3558 
3559 				result = L2CAP_CONF_UNACCEPT;
3560 
3561 				if (chan->num_conf_req >= 1)
3562 					return -ECONNREFUSED;
3563 
3564 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3565 						   sizeof(efs),
3566 						   (unsigned long) &efs, endptr - ptr);
3567 			} else {
3568 				/* Send PENDING Conf Rsp */
3569 				result = L2CAP_CONF_PENDING;
3570 				set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3571 			}
3572 		}
3573 
3574 		switch (rfc.mode) {
3575 		case L2CAP_MODE_BASIC:
3576 			chan->fcs = L2CAP_FCS_NONE;
3577 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3578 			break;
3579 
3580 		case L2CAP_MODE_ERTM:
3581 			if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3582 				chan->remote_tx_win = rfc.txwin_size;
3583 			else
3584 				rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3585 
3586 			chan->remote_max_tx = rfc.max_transmit;
3587 
3588 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3589 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3590 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3591 			rfc.max_pdu_size = cpu_to_le16(size);
3592 			chan->remote_mps = size;
3593 
3594 			__l2cap_set_ertm_timeouts(chan, &rfc);
3595 
3596 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3597 
3598 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3599 					   sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3600 
3601 			if (remote_efs &&
3602 			    test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3603 				chan->remote_id = efs.id;
3604 				chan->remote_stype = efs.stype;
3605 				chan->remote_msdu = le16_to_cpu(efs.msdu);
3606 				chan->remote_flush_to =
3607 					le32_to_cpu(efs.flush_to);
3608 				chan->remote_acc_lat =
3609 					le32_to_cpu(efs.acc_lat);
3610 				chan->remote_sdu_itime =
3611 					le32_to_cpu(efs.sdu_itime);
3612 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3613 						   sizeof(efs),
3614 						   (unsigned long) &efs, endptr - ptr);
3615 			}
3616 			break;
3617 
3618 		case L2CAP_MODE_STREAMING:
3619 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3620 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3621 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3622 			rfc.max_pdu_size = cpu_to_le16(size);
3623 			chan->remote_mps = size;
3624 
3625 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3626 
3627 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3628 					   (unsigned long) &rfc, endptr - ptr);
3629 
3630 			break;
3631 
3632 		default:
3633 			result = L2CAP_CONF_UNACCEPT;
3634 
3635 			memset(&rfc, 0, sizeof(rfc));
3636 			rfc.mode = chan->mode;
3637 		}
3638 
3639 		if (result == L2CAP_CONF_SUCCESS)
3640 			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3641 	}
3642 	rsp->scid   = cpu_to_le16(chan->dcid);
3643 	rsp->result = cpu_to_le16(result);
3644 	rsp->flags  = cpu_to_le16(0);
3645 
3646 	return ptr - data;
3647 }
3648 
l2cap_parse_conf_rsp(struct l2cap_chan * chan,void * rsp,int len,void * data,size_t size,u16 * result)3649 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3650 				void *data, size_t size, u16 *result)
3651 {
3652 	struct l2cap_conf_req *req = data;
3653 	void *ptr = req->data;
3654 	void *endptr = data + size;
3655 	int type, olen;
3656 	unsigned long val;
3657 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3658 	struct l2cap_conf_efs efs;
3659 
3660 	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3661 
3662 	while (len >= L2CAP_CONF_OPT_SIZE) {
3663 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3664 		if (len < 0)
3665 			break;
3666 
3667 		switch (type) {
3668 		case L2CAP_CONF_MTU:
3669 			if (olen != 2)
3670 				break;
3671 			if (val < L2CAP_DEFAULT_MIN_MTU) {
3672 				*result = L2CAP_CONF_UNACCEPT;
3673 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3674 			} else
3675 				chan->imtu = val;
3676 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3677 					   endptr - ptr);
3678 			break;
3679 
3680 		case L2CAP_CONF_FLUSH_TO:
3681 			if (olen != 2)
3682 				break;
3683 			chan->flush_to = val;
3684 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3685 					   chan->flush_to, endptr - ptr);
3686 			break;
3687 
3688 		case L2CAP_CONF_RFC:
3689 			if (olen != sizeof(rfc))
3690 				break;
3691 			memcpy(&rfc, (void *)val, olen);
3692 			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3693 			    rfc.mode != chan->mode)
3694 				return -ECONNREFUSED;
3695 			chan->fcs = 0;
3696 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3697 					   (unsigned long) &rfc, endptr - ptr);
3698 			break;
3699 
3700 		case L2CAP_CONF_EWS:
3701 			if (olen != 2)
3702 				break;
3703 			chan->ack_win = min_t(u16, val, chan->ack_win);
3704 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3705 					   chan->tx_win, endptr - ptr);
3706 			break;
3707 
3708 		case L2CAP_CONF_EFS:
3709 			if (olen != sizeof(efs))
3710 				break;
3711 			memcpy(&efs, (void *)val, olen);
3712 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3713 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3714 			    efs.stype != chan->local_stype)
3715 				return -ECONNREFUSED;
3716 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3717 					   (unsigned long) &efs, endptr - ptr);
3718 			break;
3719 
3720 		case L2CAP_CONF_FCS:
3721 			if (olen != 1)
3722 				break;
3723 			if (*result == L2CAP_CONF_PENDING)
3724 				if (val == L2CAP_FCS_NONE)
3725 					set_bit(CONF_RECV_NO_FCS,
3726 						&chan->conf_state);
3727 			break;
3728 		}
3729 	}
3730 
3731 	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3732 		return -ECONNREFUSED;
3733 
3734 	chan->mode = rfc.mode;
3735 
3736 	if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3737 		switch (rfc.mode) {
3738 		case L2CAP_MODE_ERTM:
3739 			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3740 			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3741 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3742 			if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3743 				chan->ack_win = min_t(u16, chan->ack_win,
3744 						      rfc.txwin_size);
3745 
3746 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3747 				chan->local_msdu = le16_to_cpu(efs.msdu);
3748 				chan->local_sdu_itime =
3749 					le32_to_cpu(efs.sdu_itime);
3750 				chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3751 				chan->local_flush_to =
3752 					le32_to_cpu(efs.flush_to);
3753 			}
3754 			break;
3755 
3756 		case L2CAP_MODE_STREAMING:
3757 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3758 		}
3759 	}
3760 
3761 	req->dcid   = cpu_to_le16(chan->dcid);
3762 	req->flags  = cpu_to_le16(0);
3763 
3764 	return ptr - data;
3765 }
3766 
l2cap_build_conf_rsp(struct l2cap_chan * chan,void * data,u16 result,u16 flags)3767 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3768 				u16 result, u16 flags)
3769 {
3770 	struct l2cap_conf_rsp *rsp = data;
3771 	void *ptr = rsp->data;
3772 
3773 	BT_DBG("chan %p", chan);
3774 
3775 	rsp->scid   = cpu_to_le16(chan->dcid);
3776 	rsp->result = cpu_to_le16(result);
3777 	rsp->flags  = cpu_to_le16(flags);
3778 
3779 	return ptr - data;
3780 }
3781 
__l2cap_le_connect_rsp_defer(struct l2cap_chan * chan)3782 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3783 {
3784 	struct l2cap_le_conn_rsp rsp;
3785 	struct l2cap_conn *conn = chan->conn;
3786 
3787 	BT_DBG("chan %p", chan);
3788 
3789 	rsp.dcid    = cpu_to_le16(chan->scid);
3790 	rsp.mtu     = cpu_to_le16(chan->imtu);
3791 	rsp.mps     = cpu_to_le16(chan->mps);
3792 	rsp.credits = cpu_to_le16(chan->rx_credits);
3793 	rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3794 
3795 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3796 		       &rsp);
3797 }
3798 
l2cap_ecred_list_defer(struct l2cap_chan * chan,void * data)3799 static void l2cap_ecred_list_defer(struct l2cap_chan *chan, void *data)
3800 {
3801 	int *result = data;
3802 
3803 	if (*result || test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
3804 		return;
3805 
3806 	switch (chan->state) {
3807 	case BT_CONNECT2:
3808 		/* If channel still pending accept add to result */
3809 		(*result)++;
3810 		return;
3811 	case BT_CONNECTED:
3812 		return;
3813 	default:
3814 		/* If not connected or pending accept it has been refused */
3815 		*result = -ECONNREFUSED;
3816 		return;
3817 	}
3818 }
3819 
3820 struct l2cap_ecred_rsp_data {
3821 	struct {
3822 		struct l2cap_ecred_conn_rsp_hdr rsp;
3823 		__le16 scid[L2CAP_ECRED_MAX_CID];
3824 	} __packed pdu;
3825 	int count;
3826 };
3827 
l2cap_ecred_rsp_defer(struct l2cap_chan * chan,void * data)3828 static void l2cap_ecred_rsp_defer(struct l2cap_chan *chan, void *data)
3829 {
3830 	struct l2cap_ecred_rsp_data *rsp = data;
3831 	struct l2cap_ecred_conn_rsp *rsp_flex =
3832 		container_of(&rsp->pdu.rsp, struct l2cap_ecred_conn_rsp, hdr);
3833 
3834 	/* Check if channel for outgoing connection or if it wasn't deferred
3835 	 * since in those cases it must be skipped.
3836 	 */
3837 	if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags) ||
3838 	    !test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
3839 		return;
3840 
3841 	/* Reset ident so only one response is sent */
3842 	chan->ident = 0;
3843 
3844 	/* Include all channels pending with the same ident */
3845 	if (!rsp->pdu.rsp.result)
3846 		rsp_flex->dcid[rsp->count++] = cpu_to_le16(chan->scid);
3847 	else
3848 		l2cap_chan_del(chan, ECONNRESET);
3849 }
3850 
__l2cap_ecred_conn_rsp_defer(struct l2cap_chan * chan)3851 void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
3852 {
3853 	struct l2cap_conn *conn = chan->conn;
3854 	struct l2cap_ecred_rsp_data data;
3855 	u16 id = chan->ident;
3856 	int result = 0;
3857 
3858 	if (!id)
3859 		return;
3860 
3861 	BT_DBG("chan %p id %d", chan, id);
3862 
3863 	memset(&data, 0, sizeof(data));
3864 
3865 	data.pdu.rsp.mtu     = cpu_to_le16(chan->imtu);
3866 	data.pdu.rsp.mps     = cpu_to_le16(chan->mps);
3867 	data.pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
3868 	data.pdu.rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3869 
3870 	/* Verify that all channels are ready */
3871 	__l2cap_chan_list_id(conn, id, l2cap_ecred_list_defer, &result);
3872 
3873 	if (result > 0)
3874 		return;
3875 
3876 	if (result < 0)
3877 		data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_AUTHORIZATION);
3878 
3879 	/* Build response */
3880 	__l2cap_chan_list_id(conn, id, l2cap_ecred_rsp_defer, &data);
3881 
3882 	l2cap_send_cmd(conn, id, L2CAP_ECRED_CONN_RSP,
3883 		       sizeof(data.pdu.rsp) + (data.count * sizeof(__le16)),
3884 		       &data.pdu);
3885 }
3886 
__l2cap_connect_rsp_defer(struct l2cap_chan * chan)3887 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3888 {
3889 	struct l2cap_conn_rsp rsp;
3890 	struct l2cap_conn *conn = chan->conn;
3891 	u8 buf[128];
3892 	u8 rsp_code;
3893 
3894 	rsp.scid   = cpu_to_le16(chan->dcid);
3895 	rsp.dcid   = cpu_to_le16(chan->scid);
3896 	rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3897 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3898 	rsp_code = L2CAP_CONN_RSP;
3899 
3900 	BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3901 
3902 	l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3903 
3904 	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3905 		return;
3906 
3907 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3908 		       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3909 	chan->num_conf_req++;
3910 }
3911 
l2cap_conf_rfc_get(struct l2cap_chan * chan,void * rsp,int len)3912 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3913 {
3914 	int type, olen;
3915 	unsigned long val;
3916 	/* Use sane default values in case a misbehaving remote device
3917 	 * did not send an RFC or extended window size option.
3918 	 */
3919 	u16 txwin_ext = chan->ack_win;
3920 	struct l2cap_conf_rfc rfc = {
3921 		.mode = chan->mode,
3922 		.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3923 		.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3924 		.max_pdu_size = cpu_to_le16(chan->imtu),
3925 		.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3926 	};
3927 
3928 	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3929 
3930 	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3931 		return;
3932 
3933 	while (len >= L2CAP_CONF_OPT_SIZE) {
3934 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3935 		if (len < 0)
3936 			break;
3937 
3938 		switch (type) {
3939 		case L2CAP_CONF_RFC:
3940 			if (olen != sizeof(rfc))
3941 				break;
3942 			memcpy(&rfc, (void *)val, olen);
3943 			break;
3944 		case L2CAP_CONF_EWS:
3945 			if (olen != 2)
3946 				break;
3947 			txwin_ext = val;
3948 			break;
3949 		}
3950 	}
3951 
3952 	switch (rfc.mode) {
3953 	case L2CAP_MODE_ERTM:
3954 		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3955 		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3956 		chan->mps = le16_to_cpu(rfc.max_pdu_size);
3957 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3958 			chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3959 		else
3960 			chan->ack_win = min_t(u16, chan->ack_win,
3961 					      rfc.txwin_size);
3962 		break;
3963 	case L2CAP_MODE_STREAMING:
3964 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3965 	}
3966 }
3967 
l2cap_command_rej(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)3968 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3969 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3970 				    u8 *data)
3971 {
3972 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3973 
3974 	if (cmd_len < sizeof(*rej))
3975 		return -EPROTO;
3976 
3977 	if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3978 		return 0;
3979 
3980 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3981 	    cmd->ident == conn->info_ident) {
3982 		cancel_delayed_work(&conn->info_timer);
3983 
3984 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3985 		conn->info_ident = 0;
3986 
3987 		l2cap_conn_start(conn);
3988 	}
3989 
3990 	return 0;
3991 }
3992 
l2cap_connect(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u8 * data,u8 rsp_code)3993 static void l2cap_connect(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd,
3994 			  u8 *data, u8 rsp_code)
3995 {
3996 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3997 	struct l2cap_conn_rsp rsp;
3998 	struct l2cap_chan *chan = NULL, *pchan = NULL;
3999 	int result, status = L2CAP_CS_NO_INFO;
4000 
4001 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
4002 	__le16 psm = req->psm;
4003 
4004 	BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
4005 
4006 	/* Check if we have socket listening on psm */
4007 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
4008 					 &conn->hcon->dst, ACL_LINK);
4009 	if (!pchan) {
4010 		result = L2CAP_CR_BAD_PSM;
4011 		goto response;
4012 	}
4013 
4014 	l2cap_chan_lock(pchan);
4015 
4016 	/* Check if the ACL is secure enough (if not SDP) */
4017 	if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
4018 	    (!hci_conn_check_link_mode(conn->hcon) ||
4019 	    !l2cap_check_enc_key_size(conn->hcon, pchan))) {
4020 		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
4021 		result = L2CAP_CR_SEC_BLOCK;
4022 		goto response;
4023 	}
4024 
4025 	result = L2CAP_CR_NO_MEM;
4026 
4027 	/* Check for valid dynamic CID range (as per Erratum 3253) */
4028 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) {
4029 		result = L2CAP_CR_INVALID_SCID;
4030 		goto response;
4031 	}
4032 
4033 	/* Check if we already have channel with that dcid */
4034 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
4035 		result = L2CAP_CR_SCID_IN_USE;
4036 		goto response;
4037 	}
4038 
4039 	chan = pchan->ops->new_connection(pchan);
4040 	if (!chan)
4041 		goto response;
4042 
4043 	/* For certain devices (ex: HID mouse), support for authentication,
4044 	 * pairing and bonding is optional. For such devices, inorder to avoid
4045 	 * the ACL alive for too long after L2CAP disconnection, reset the ACL
4046 	 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
4047 	 */
4048 	conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4049 
4050 	bacpy(&chan->src, &conn->hcon->src);
4051 	bacpy(&chan->dst, &conn->hcon->dst);
4052 	chan->src_type = bdaddr_src_type(conn->hcon);
4053 	chan->dst_type = bdaddr_dst_type(conn->hcon);
4054 	chan->psm  = psm;
4055 	chan->dcid = scid;
4056 
4057 	__l2cap_chan_add(conn, chan);
4058 
4059 	dcid = chan->scid;
4060 
4061 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4062 
4063 	chan->ident = cmd->ident;
4064 
4065 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
4066 		if (l2cap_chan_check_security(chan, false)) {
4067 			if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4068 				l2cap_state_change(chan, BT_CONNECT2);
4069 				result = L2CAP_CR_PEND;
4070 				status = L2CAP_CS_AUTHOR_PEND;
4071 				chan->ops->defer(chan);
4072 			} else {
4073 				l2cap_state_change(chan, BT_CONFIG);
4074 				result = L2CAP_CR_SUCCESS;
4075 				status = L2CAP_CS_NO_INFO;
4076 			}
4077 		} else {
4078 			l2cap_state_change(chan, BT_CONNECT2);
4079 			result = L2CAP_CR_PEND;
4080 			status = L2CAP_CS_AUTHEN_PEND;
4081 		}
4082 	} else {
4083 		l2cap_state_change(chan, BT_CONNECT2);
4084 		result = L2CAP_CR_PEND;
4085 		status = L2CAP_CS_NO_INFO;
4086 	}
4087 
4088 response:
4089 	rsp.scid   = cpu_to_le16(scid);
4090 	rsp.dcid   = cpu_to_le16(dcid);
4091 	rsp.result = cpu_to_le16(result);
4092 	rsp.status = cpu_to_le16(status);
4093 	l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4094 
4095 	if (!pchan)
4096 		return;
4097 
4098 	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4099 		struct l2cap_info_req info;
4100 		info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4101 
4102 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4103 		conn->info_ident = l2cap_get_ident(conn);
4104 
4105 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4106 
4107 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4108 			       sizeof(info), &info);
4109 	}
4110 
4111 	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4112 	    result == L2CAP_CR_SUCCESS) {
4113 		u8 buf[128];
4114 		set_bit(CONF_REQ_SENT, &chan->conf_state);
4115 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4116 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4117 		chan->num_conf_req++;
4118 	}
4119 
4120 	l2cap_chan_unlock(pchan);
4121 	l2cap_chan_put(pchan);
4122 }
4123 
l2cap_connect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4124 static int l2cap_connect_req(struct l2cap_conn *conn,
4125 			     struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4126 {
4127 	if (cmd_len < sizeof(struct l2cap_conn_req))
4128 		return -EPROTO;
4129 
4130 	l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP);
4131 	return 0;
4132 }
4133 
l2cap_connect_create_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4134 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4135 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4136 				    u8 *data)
4137 {
4138 	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4139 	u16 scid, dcid, result, status;
4140 	struct l2cap_chan *chan;
4141 	u8 req[128];
4142 	int err;
4143 
4144 	if (cmd_len < sizeof(*rsp))
4145 		return -EPROTO;
4146 
4147 	scid   = __le16_to_cpu(rsp->scid);
4148 	dcid   = __le16_to_cpu(rsp->dcid);
4149 	result = __le16_to_cpu(rsp->result);
4150 	status = __le16_to_cpu(rsp->status);
4151 
4152 	if (result == L2CAP_CR_SUCCESS && (dcid < L2CAP_CID_DYN_START ||
4153 					   dcid > L2CAP_CID_DYN_END))
4154 		return -EPROTO;
4155 
4156 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4157 	       dcid, scid, result, status);
4158 
4159 	if (scid) {
4160 		chan = __l2cap_get_chan_by_scid(conn, scid);
4161 		if (!chan)
4162 			return -EBADSLT;
4163 	} else {
4164 		chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4165 		if (!chan)
4166 			return -EBADSLT;
4167 	}
4168 
4169 	chan = l2cap_chan_hold_unless_zero(chan);
4170 	if (!chan)
4171 		return -EBADSLT;
4172 
4173 	err = 0;
4174 
4175 	l2cap_chan_lock(chan);
4176 
4177 	switch (result) {
4178 	case L2CAP_CR_SUCCESS:
4179 		if (__l2cap_get_chan_by_dcid(conn, dcid)) {
4180 			err = -EBADSLT;
4181 			break;
4182 		}
4183 
4184 		l2cap_state_change(chan, BT_CONFIG);
4185 		chan->ident = 0;
4186 		chan->dcid = dcid;
4187 		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4188 
4189 		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4190 			break;
4191 
4192 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4193 			       l2cap_build_conf_req(chan, req, sizeof(req)), req);
4194 		chan->num_conf_req++;
4195 		break;
4196 
4197 	case L2CAP_CR_PEND:
4198 		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4199 		break;
4200 
4201 	default:
4202 		l2cap_chan_del(chan, ECONNREFUSED);
4203 		break;
4204 	}
4205 
4206 	l2cap_chan_unlock(chan);
4207 	l2cap_chan_put(chan);
4208 
4209 	return err;
4210 }
4211 
set_default_fcs(struct l2cap_chan * chan)4212 static inline void set_default_fcs(struct l2cap_chan *chan)
4213 {
4214 	/* FCS is enabled only in ERTM or streaming mode, if one or both
4215 	 * sides request it.
4216 	 */
4217 	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4218 		chan->fcs = L2CAP_FCS_NONE;
4219 	else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4220 		chan->fcs = L2CAP_FCS_CRC16;
4221 }
4222 
l2cap_send_efs_conf_rsp(struct l2cap_chan * chan,void * data,u8 ident,u16 flags)4223 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4224 				    u8 ident, u16 flags)
4225 {
4226 	struct l2cap_conn *conn = chan->conn;
4227 
4228 	BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4229 	       flags);
4230 
4231 	clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4232 	set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4233 
4234 	l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4235 		       l2cap_build_conf_rsp(chan, data,
4236 					    L2CAP_CONF_SUCCESS, flags), data);
4237 }
4238 
cmd_reject_invalid_cid(struct l2cap_conn * conn,u8 ident,u16 scid,u16 dcid)4239 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4240 				   u16 scid, u16 dcid)
4241 {
4242 	struct l2cap_cmd_rej_cid rej;
4243 
4244 	rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4245 	rej.scid = __cpu_to_le16(scid);
4246 	rej.dcid = __cpu_to_le16(dcid);
4247 
4248 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4249 }
4250 
l2cap_config_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4251 static inline int l2cap_config_req(struct l2cap_conn *conn,
4252 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4253 				   u8 *data)
4254 {
4255 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4256 	u16 dcid, flags;
4257 	u8 rsp[64];
4258 	struct l2cap_chan *chan;
4259 	int len, err = 0;
4260 
4261 	if (cmd_len < sizeof(*req))
4262 		return -EPROTO;
4263 
4264 	dcid  = __le16_to_cpu(req->dcid);
4265 	flags = __le16_to_cpu(req->flags);
4266 
4267 	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4268 
4269 	chan = l2cap_get_chan_by_scid(conn, dcid);
4270 	if (!chan) {
4271 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4272 		return 0;
4273 	}
4274 
4275 	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
4276 	    chan->state != BT_CONNECTED) {
4277 		cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4278 				       chan->dcid);
4279 		goto unlock;
4280 	}
4281 
4282 	/* Reject if config buffer is too small. */
4283 	len = cmd_len - sizeof(*req);
4284 	if (chan->conf_len + len > sizeof(chan->conf_req)) {
4285 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4286 			       l2cap_build_conf_rsp(chan, rsp,
4287 			       L2CAP_CONF_REJECT, flags), rsp);
4288 		goto unlock;
4289 	}
4290 
4291 	/* Store config. */
4292 	memcpy(chan->conf_req + chan->conf_len, req->data, len);
4293 	chan->conf_len += len;
4294 
4295 	if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4296 		/* Incomplete config. Send empty response. */
4297 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4298 			       l2cap_build_conf_rsp(chan, rsp,
4299 			       L2CAP_CONF_SUCCESS, flags), rsp);
4300 		goto unlock;
4301 	}
4302 
4303 	/* Complete config. */
4304 	len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4305 	if (len < 0) {
4306 		l2cap_send_disconn_req(chan, ECONNRESET);
4307 		goto unlock;
4308 	}
4309 
4310 	chan->ident = cmd->ident;
4311 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4312 	if (chan->num_conf_rsp < L2CAP_CONF_MAX_CONF_RSP)
4313 		chan->num_conf_rsp++;
4314 
4315 	/* Reset config buffer. */
4316 	chan->conf_len = 0;
4317 
4318 	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4319 		goto unlock;
4320 
4321 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4322 		set_default_fcs(chan);
4323 
4324 		if (chan->mode == L2CAP_MODE_ERTM ||
4325 		    chan->mode == L2CAP_MODE_STREAMING)
4326 			err = l2cap_ertm_init(chan);
4327 
4328 		if (err < 0)
4329 			l2cap_send_disconn_req(chan, -err);
4330 		else
4331 			l2cap_chan_ready(chan);
4332 
4333 		goto unlock;
4334 	}
4335 
4336 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4337 		u8 buf[64];
4338 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4339 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4340 		chan->num_conf_req++;
4341 	}
4342 
4343 	/* Got Conf Rsp PENDING from remote side and assume we sent
4344 	   Conf Rsp PENDING in the code above */
4345 	if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4346 	    test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4347 
4348 		/* check compatibility */
4349 
4350 		/* Send rsp for BR/EDR channel */
4351 		l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4352 	}
4353 
4354 unlock:
4355 	l2cap_chan_unlock(chan);
4356 	l2cap_chan_put(chan);
4357 	return err;
4358 }
4359 
l2cap_config_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4360 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4361 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4362 				   u8 *data)
4363 {
4364 	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4365 	u16 scid, flags, result;
4366 	struct l2cap_chan *chan;
4367 	int len = cmd_len - sizeof(*rsp);
4368 	int err = 0;
4369 
4370 	if (cmd_len < sizeof(*rsp))
4371 		return -EPROTO;
4372 
4373 	scid   = __le16_to_cpu(rsp->scid);
4374 	flags  = __le16_to_cpu(rsp->flags);
4375 	result = __le16_to_cpu(rsp->result);
4376 
4377 	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4378 	       result, len);
4379 
4380 	chan = l2cap_get_chan_by_scid(conn, scid);
4381 	if (!chan)
4382 		return 0;
4383 
4384 	switch (result) {
4385 	case L2CAP_CONF_SUCCESS:
4386 		l2cap_conf_rfc_get(chan, rsp->data, len);
4387 		clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4388 		break;
4389 
4390 	case L2CAP_CONF_PENDING:
4391 		set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4392 
4393 		if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4394 			char buf[64];
4395 
4396 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4397 						   buf, sizeof(buf), &result);
4398 			if (len < 0) {
4399 				l2cap_send_disconn_req(chan, ECONNRESET);
4400 				goto done;
4401 			}
4402 
4403 			l2cap_send_efs_conf_rsp(chan, buf, cmd->ident, 0);
4404 		}
4405 		goto done;
4406 
4407 	case L2CAP_CONF_UNKNOWN:
4408 	case L2CAP_CONF_UNACCEPT:
4409 		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4410 			char req[64];
4411 
4412 			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4413 				l2cap_send_disconn_req(chan, ECONNRESET);
4414 				goto done;
4415 			}
4416 
4417 			/* throw out any old stored conf requests */
4418 			result = L2CAP_CONF_SUCCESS;
4419 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4420 						   req, sizeof(req), &result);
4421 			if (len < 0) {
4422 				l2cap_send_disconn_req(chan, ECONNRESET);
4423 				goto done;
4424 			}
4425 
4426 			l2cap_send_cmd(conn, l2cap_get_ident(conn),
4427 				       L2CAP_CONF_REQ, len, req);
4428 			chan->num_conf_req++;
4429 			if (result != L2CAP_CONF_SUCCESS)
4430 				goto done;
4431 			break;
4432 		}
4433 		fallthrough;
4434 
4435 	default:
4436 		l2cap_chan_set_err(chan, ECONNRESET);
4437 
4438 		__set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4439 		l2cap_send_disconn_req(chan, ECONNRESET);
4440 		goto done;
4441 	}
4442 
4443 	if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4444 		goto done;
4445 
4446 	set_bit(CONF_INPUT_DONE, &chan->conf_state);
4447 
4448 	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4449 		set_default_fcs(chan);
4450 
4451 		if (chan->mode == L2CAP_MODE_ERTM ||
4452 		    chan->mode == L2CAP_MODE_STREAMING)
4453 			err = l2cap_ertm_init(chan);
4454 
4455 		if (err < 0)
4456 			l2cap_send_disconn_req(chan, -err);
4457 		else
4458 			l2cap_chan_ready(chan);
4459 	}
4460 
4461 done:
4462 	l2cap_chan_unlock(chan);
4463 	l2cap_chan_put(chan);
4464 	return err;
4465 }
4466 
l2cap_disconnect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4467 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4468 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4469 				       u8 *data)
4470 {
4471 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4472 	struct l2cap_disconn_rsp rsp;
4473 	u16 dcid, scid;
4474 	struct l2cap_chan *chan;
4475 
4476 	if (cmd_len != sizeof(*req))
4477 		return -EPROTO;
4478 
4479 	scid = __le16_to_cpu(req->scid);
4480 	dcid = __le16_to_cpu(req->dcid);
4481 
4482 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4483 
4484 	chan = l2cap_get_chan_by_scid(conn, dcid);
4485 	if (!chan) {
4486 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4487 		return 0;
4488 	}
4489 
4490 	rsp.dcid = cpu_to_le16(chan->scid);
4491 	rsp.scid = cpu_to_le16(chan->dcid);
4492 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4493 
4494 	chan->ops->set_shutdown(chan);
4495 
4496 	l2cap_chan_del(chan, ECONNRESET);
4497 
4498 	chan->ops->close(chan);
4499 
4500 	l2cap_chan_unlock(chan);
4501 	l2cap_chan_put(chan);
4502 
4503 	return 0;
4504 }
4505 
l2cap_disconnect_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4506 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4507 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4508 				       u8 *data)
4509 {
4510 	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4511 	u16 dcid, scid;
4512 	struct l2cap_chan *chan;
4513 
4514 	if (cmd_len != sizeof(*rsp))
4515 		return -EPROTO;
4516 
4517 	scid = __le16_to_cpu(rsp->scid);
4518 	dcid = __le16_to_cpu(rsp->dcid);
4519 
4520 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4521 
4522 	chan = l2cap_get_chan_by_scid(conn, scid);
4523 	if (!chan) {
4524 		return 0;
4525 	}
4526 
4527 	if (chan->state != BT_DISCONN) {
4528 		l2cap_chan_unlock(chan);
4529 		l2cap_chan_put(chan);
4530 		return 0;
4531 	}
4532 
4533 	l2cap_chan_del(chan, 0);
4534 
4535 	chan->ops->close(chan);
4536 
4537 	l2cap_chan_unlock(chan);
4538 	l2cap_chan_put(chan);
4539 
4540 	return 0;
4541 }
4542 
l2cap_information_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4543 static inline int l2cap_information_req(struct l2cap_conn *conn,
4544 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4545 					u8 *data)
4546 {
4547 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4548 	u16 type;
4549 
4550 	if (cmd_len != sizeof(*req))
4551 		return -EPROTO;
4552 
4553 	type = __le16_to_cpu(req->type);
4554 
4555 	BT_DBG("type 0x%4.4x", type);
4556 
4557 	if (type == L2CAP_IT_FEAT_MASK) {
4558 		u8 buf[8];
4559 		u32 feat_mask = l2cap_feat_mask;
4560 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4561 		rsp->type   = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4562 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4563 		if (!disable_ertm)
4564 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4565 				| L2CAP_FEAT_FCS;
4566 
4567 		put_unaligned_le32(feat_mask, rsp->data);
4568 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4569 			       buf);
4570 	} else if (type == L2CAP_IT_FIXED_CHAN) {
4571 		u8 buf[12];
4572 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4573 
4574 		rsp->type   = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4575 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4576 		rsp->data[0] = conn->local_fixed_chan;
4577 		memset(rsp->data + 1, 0, 7);
4578 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4579 			       buf);
4580 	} else {
4581 		struct l2cap_info_rsp rsp;
4582 		rsp.type   = cpu_to_le16(type);
4583 		rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4584 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4585 			       &rsp);
4586 	}
4587 
4588 	return 0;
4589 }
4590 
l2cap_information_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4591 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4592 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4593 					u8 *data)
4594 {
4595 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4596 	u16 type, result;
4597 
4598 	if (cmd_len < sizeof(*rsp))
4599 		return -EPROTO;
4600 
4601 	type   = __le16_to_cpu(rsp->type);
4602 	result = __le16_to_cpu(rsp->result);
4603 
4604 	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4605 
4606 	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
4607 	if (cmd->ident != conn->info_ident ||
4608 	    conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4609 		return 0;
4610 
4611 	cancel_delayed_work(&conn->info_timer);
4612 
4613 	if (result != L2CAP_IR_SUCCESS) {
4614 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4615 		conn->info_ident = 0;
4616 
4617 		l2cap_conn_start(conn);
4618 
4619 		return 0;
4620 	}
4621 
4622 	switch (type) {
4623 	case L2CAP_IT_FEAT_MASK:
4624 		conn->feat_mask = get_unaligned_le32(rsp->data);
4625 
4626 		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4627 			struct l2cap_info_req req;
4628 			req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4629 
4630 			conn->info_ident = l2cap_get_ident(conn);
4631 
4632 			l2cap_send_cmd(conn, conn->info_ident,
4633 				       L2CAP_INFO_REQ, sizeof(req), &req);
4634 		} else {
4635 			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4636 			conn->info_ident = 0;
4637 
4638 			l2cap_conn_start(conn);
4639 		}
4640 		break;
4641 
4642 	case L2CAP_IT_FIXED_CHAN:
4643 		conn->remote_fixed_chan = rsp->data[0];
4644 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4645 		conn->info_ident = 0;
4646 
4647 		l2cap_conn_start(conn);
4648 		break;
4649 	}
4650 
4651 	return 0;
4652 }
4653 
l2cap_conn_param_update_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4654 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4655 					      struct l2cap_cmd_hdr *cmd,
4656 					      u16 cmd_len, u8 *data)
4657 {
4658 	struct hci_conn *hcon = conn->hcon;
4659 	struct l2cap_conn_param_update_req *req;
4660 	struct l2cap_conn_param_update_rsp rsp;
4661 	u16 min, max, latency, to_multiplier;
4662 	int err;
4663 
4664 	if (hcon->role != HCI_ROLE_MASTER)
4665 		return -EINVAL;
4666 
4667 	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4668 		return -EPROTO;
4669 
4670 	req = (struct l2cap_conn_param_update_req *) data;
4671 	min		= __le16_to_cpu(req->min);
4672 	max		= __le16_to_cpu(req->max);
4673 	latency		= __le16_to_cpu(req->latency);
4674 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
4675 
4676 	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4677 	       min, max, latency, to_multiplier);
4678 
4679 	memset(&rsp, 0, sizeof(rsp));
4680 
4681 	err = hci_check_conn_params(min, max, latency, to_multiplier);
4682 	if (err)
4683 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4684 	else
4685 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4686 
4687 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4688 		       sizeof(rsp), &rsp);
4689 
4690 	if (!err) {
4691 		u8 store_hint;
4692 
4693 		store_hint = hci_le_conn_update(hcon, min, max, latency,
4694 						to_multiplier);
4695 		mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
4696 				    store_hint, min, max, latency,
4697 				    to_multiplier);
4698 
4699 	}
4700 
4701 	return 0;
4702 }
4703 
l2cap_le_connect_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4704 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
4705 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4706 				u8 *data)
4707 {
4708 	struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
4709 	struct hci_conn *hcon = conn->hcon;
4710 	u16 dcid, mtu, mps, credits, result;
4711 	struct l2cap_chan *chan;
4712 	int err, sec_level;
4713 
4714 	if (cmd_len < sizeof(*rsp))
4715 		return -EPROTO;
4716 
4717 	dcid    = __le16_to_cpu(rsp->dcid);
4718 	mtu     = __le16_to_cpu(rsp->mtu);
4719 	mps     = __le16_to_cpu(rsp->mps);
4720 	credits = __le16_to_cpu(rsp->credits);
4721 	result  = __le16_to_cpu(rsp->result);
4722 
4723 	if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
4724 					   dcid < L2CAP_CID_DYN_START ||
4725 					   dcid > L2CAP_CID_LE_DYN_END))
4726 		return -EPROTO;
4727 
4728 	BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
4729 	       dcid, mtu, mps, credits, result);
4730 
4731 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4732 	if (!chan)
4733 		return -EBADSLT;
4734 
4735 	err = 0;
4736 
4737 	l2cap_chan_lock(chan);
4738 
4739 	switch (result) {
4740 	case L2CAP_CR_LE_SUCCESS:
4741 		if (__l2cap_get_chan_by_dcid(conn, dcid)) {
4742 			err = -EBADSLT;
4743 			break;
4744 		}
4745 
4746 		chan->ident = 0;
4747 		chan->dcid = dcid;
4748 		chan->omtu = mtu;
4749 		chan->remote_mps = mps;
4750 		chan->tx_credits = credits;
4751 		l2cap_chan_ready(chan);
4752 		break;
4753 
4754 	case L2CAP_CR_LE_AUTHENTICATION:
4755 	case L2CAP_CR_LE_ENCRYPTION:
4756 		/* If we already have MITM protection we can't do
4757 		 * anything.
4758 		 */
4759 		if (hcon->sec_level > BT_SECURITY_MEDIUM) {
4760 			l2cap_chan_del(chan, ECONNREFUSED);
4761 			break;
4762 		}
4763 
4764 		sec_level = hcon->sec_level + 1;
4765 		if (chan->sec_level < sec_level)
4766 			chan->sec_level = sec_level;
4767 
4768 		/* We'll need to send a new Connect Request */
4769 		clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
4770 
4771 		smp_conn_security(hcon, chan->sec_level);
4772 		break;
4773 
4774 	default:
4775 		l2cap_chan_del(chan, ECONNREFUSED);
4776 		break;
4777 	}
4778 
4779 	l2cap_chan_unlock(chan);
4780 
4781 	return err;
4782 }
4783 
l2cap_bredr_sig_cmd(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4784 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4785 				      struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4786 				      u8 *data)
4787 {
4788 	int err = 0;
4789 
4790 	switch (cmd->code) {
4791 	case L2CAP_COMMAND_REJ:
4792 		l2cap_command_rej(conn, cmd, cmd_len, data);
4793 		break;
4794 
4795 	case L2CAP_CONN_REQ:
4796 		err = l2cap_connect_req(conn, cmd, cmd_len, data);
4797 		break;
4798 
4799 	case L2CAP_CONN_RSP:
4800 		l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
4801 		break;
4802 
4803 	case L2CAP_CONF_REQ:
4804 		err = l2cap_config_req(conn, cmd, cmd_len, data);
4805 		break;
4806 
4807 	case L2CAP_CONF_RSP:
4808 		l2cap_config_rsp(conn, cmd, cmd_len, data);
4809 		break;
4810 
4811 	case L2CAP_DISCONN_REQ:
4812 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
4813 		break;
4814 
4815 	case L2CAP_DISCONN_RSP:
4816 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
4817 		break;
4818 
4819 	case L2CAP_ECHO_REQ:
4820 		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4821 		break;
4822 
4823 	case L2CAP_ECHO_RSP:
4824 		break;
4825 
4826 	case L2CAP_INFO_REQ:
4827 		err = l2cap_information_req(conn, cmd, cmd_len, data);
4828 		break;
4829 
4830 	case L2CAP_INFO_RSP:
4831 		l2cap_information_rsp(conn, cmd, cmd_len, data);
4832 		break;
4833 
4834 	default:
4835 		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4836 		err = -EINVAL;
4837 		break;
4838 	}
4839 
4840 	return err;
4841 }
4842 
l2cap_le_connect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4843 static int l2cap_le_connect_req(struct l2cap_conn *conn,
4844 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4845 				u8 *data)
4846 {
4847 	struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
4848 	struct l2cap_le_conn_rsp rsp;
4849 	struct l2cap_chan *chan, *pchan;
4850 	u16 dcid, scid, credits, mtu, mps;
4851 	__le16 psm;
4852 	u8 result;
4853 
4854 	if (cmd_len != sizeof(*req))
4855 		return -EPROTO;
4856 
4857 	scid = __le16_to_cpu(req->scid);
4858 	mtu  = __le16_to_cpu(req->mtu);
4859 	mps  = __le16_to_cpu(req->mps);
4860 	psm  = req->psm;
4861 	dcid = 0;
4862 	credits = 0;
4863 
4864 	if (mtu < 23 || mps < 23)
4865 		return -EPROTO;
4866 
4867 	BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
4868 	       scid, mtu, mps);
4869 
4870 	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
4871 	 * page 1059:
4872 	 *
4873 	 * Valid range: 0x0001-0x00ff
4874 	 *
4875 	 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
4876 	 */
4877 	if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
4878 		result = L2CAP_CR_LE_BAD_PSM;
4879 		chan = NULL;
4880 		goto response;
4881 	}
4882 
4883 	/* Check if we have socket listening on psm */
4884 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
4885 					 &conn->hcon->dst, LE_LINK);
4886 	if (!pchan) {
4887 		result = L2CAP_CR_LE_BAD_PSM;
4888 		chan = NULL;
4889 		goto response;
4890 	}
4891 
4892 	l2cap_chan_lock(pchan);
4893 
4894 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
4895 				     SMP_ALLOW_STK)) {
4896 		result = pchan->sec_level == BT_SECURITY_MEDIUM ?
4897 			L2CAP_CR_LE_ENCRYPTION : L2CAP_CR_LE_AUTHENTICATION;
4898 		chan = NULL;
4899 		goto response_unlock;
4900 	}
4901 
4902 	/* Check for valid dynamic CID range */
4903 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
4904 		result = L2CAP_CR_LE_INVALID_SCID;
4905 		chan = NULL;
4906 		goto response_unlock;
4907 	}
4908 
4909 	/* Check if we already have channel with that dcid */
4910 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
4911 		result = L2CAP_CR_LE_SCID_IN_USE;
4912 		chan = NULL;
4913 		goto response_unlock;
4914 	}
4915 
4916 	chan = pchan->ops->new_connection(pchan);
4917 	if (!chan) {
4918 		result = L2CAP_CR_LE_NO_MEM;
4919 		goto response_unlock;
4920 	}
4921 
4922 	bacpy(&chan->src, &conn->hcon->src);
4923 	bacpy(&chan->dst, &conn->hcon->dst);
4924 	chan->src_type = bdaddr_src_type(conn->hcon);
4925 	chan->dst_type = bdaddr_dst_type(conn->hcon);
4926 	chan->psm  = psm;
4927 	chan->dcid = scid;
4928 	chan->omtu = mtu;
4929 	chan->remote_mps = mps;
4930 
4931 	__l2cap_chan_add(conn, chan);
4932 
4933 	l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits));
4934 
4935 	dcid = chan->scid;
4936 	credits = chan->rx_credits;
4937 
4938 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4939 
4940 	chan->ident = cmd->ident;
4941 
4942 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4943 		l2cap_state_change(chan, BT_CONNECT2);
4944 		/* The following result value is actually not defined
4945 		 * for LE CoC but we use it to let the function know
4946 		 * that it should bail out after doing its cleanup
4947 		 * instead of sending a response.
4948 		 */
4949 		result = L2CAP_CR_PEND;
4950 		chan->ops->defer(chan);
4951 	} else {
4952 		l2cap_chan_ready(chan);
4953 		result = L2CAP_CR_LE_SUCCESS;
4954 	}
4955 
4956 response_unlock:
4957 	l2cap_chan_unlock(pchan);
4958 	l2cap_chan_put(pchan);
4959 
4960 	if (result == L2CAP_CR_PEND)
4961 		return 0;
4962 
4963 response:
4964 	if (chan) {
4965 		rsp.mtu = cpu_to_le16(chan->imtu);
4966 		rsp.mps = cpu_to_le16(chan->mps);
4967 	} else {
4968 		rsp.mtu = 0;
4969 		rsp.mps = 0;
4970 	}
4971 
4972 	rsp.dcid    = cpu_to_le16(dcid);
4973 	rsp.credits = cpu_to_le16(credits);
4974 	rsp.result  = cpu_to_le16(result);
4975 
4976 	l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
4977 
4978 	return 0;
4979 }
4980 
l2cap_le_credits(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4981 static inline int l2cap_le_credits(struct l2cap_conn *conn,
4982 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4983 				   u8 *data)
4984 {
4985 	struct l2cap_le_credits *pkt;
4986 	struct l2cap_chan *chan;
4987 	u16 cid, credits, max_credits;
4988 
4989 	if (cmd_len != sizeof(*pkt))
4990 		return -EPROTO;
4991 
4992 	pkt = (struct l2cap_le_credits *) data;
4993 	cid	= __le16_to_cpu(pkt->cid);
4994 	credits	= __le16_to_cpu(pkt->credits);
4995 
4996 	BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
4997 
4998 	chan = l2cap_get_chan_by_dcid(conn, cid);
4999 	if (!chan)
5000 		return -EBADSLT;
5001 
5002 	max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5003 	if (credits > max_credits) {
5004 		BT_ERR("LE credits overflow");
5005 		l2cap_send_disconn_req(chan, ECONNRESET);
5006 
5007 		/* Return 0 so that we don't trigger an unnecessary
5008 		 * command reject packet.
5009 		 */
5010 		goto unlock;
5011 	}
5012 
5013 	chan->tx_credits += credits;
5014 
5015 	/* Resume sending */
5016 	l2cap_le_flowctl_send(chan);
5017 
5018 	if (chan->tx_credits)
5019 		chan->ops->resume(chan);
5020 
5021 unlock:
5022 	l2cap_chan_unlock(chan);
5023 	l2cap_chan_put(chan);
5024 
5025 	return 0;
5026 }
5027 
l2cap_ecred_conn_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5028 static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
5029 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5030 				       u8 *data)
5031 {
5032 	struct l2cap_ecred_conn_req *req = (void *) data;
5033 	DEFINE_RAW_FLEX(struct l2cap_ecred_conn_rsp, pdu, dcid, L2CAP_ECRED_MAX_CID);
5034 	struct l2cap_chan *chan, *pchan;
5035 	u16 mtu, mps;
5036 	__le16 psm;
5037 	u8 result, len = 0;
5038 	int i, num_scid;
5039 	bool defer = false;
5040 
5041 	if (!enable_ecred)
5042 		return -EINVAL;
5043 
5044 	if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) {
5045 		result = L2CAP_CR_LE_INVALID_PARAMS;
5046 		goto response;
5047 	}
5048 
5049 	cmd_len -= sizeof(*req);
5050 	num_scid = cmd_len / sizeof(u16);
5051 
5052 	if (num_scid > L2CAP_ECRED_MAX_CID) {
5053 		result = L2CAP_CR_LE_INVALID_PARAMS;
5054 		goto response;
5055 	}
5056 
5057 	mtu  = __le16_to_cpu(req->mtu);
5058 	mps  = __le16_to_cpu(req->mps);
5059 
5060 	if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MPS) {
5061 		result = L2CAP_CR_LE_UNACCEPT_PARAMS;
5062 		goto response;
5063 	}
5064 
5065 	psm  = req->psm;
5066 
5067 	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
5068 	 * page 1059:
5069 	 *
5070 	 * Valid range: 0x0001-0x00ff
5071 	 *
5072 	 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
5073 	 */
5074 	if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
5075 		result = L2CAP_CR_LE_BAD_PSM;
5076 		goto response;
5077 	}
5078 
5079 	BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps);
5080 
5081 	memset(pdu, 0, sizeof(*pdu));
5082 
5083 	/* Check if we have socket listening on psm */
5084 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5085 					 &conn->hcon->dst, LE_LINK);
5086 	if (!pchan) {
5087 		result = L2CAP_CR_LE_BAD_PSM;
5088 		goto response;
5089 	}
5090 
5091 	l2cap_chan_lock(pchan);
5092 
5093 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5094 				     SMP_ALLOW_STK)) {
5095 		result = L2CAP_CR_LE_AUTHENTICATION;
5096 		goto unlock;
5097 	}
5098 
5099 	result = L2CAP_CR_LE_SUCCESS;
5100 
5101 	for (i = 0; i < num_scid; i++) {
5102 		u16 scid = __le16_to_cpu(req->scid[i]);
5103 
5104 		BT_DBG("scid[%d] 0x%4.4x", i, scid);
5105 
5106 		pdu->dcid[i] = 0x0000;
5107 		len += sizeof(*pdu->dcid);
5108 
5109 		/* Check for valid dynamic CID range */
5110 		if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5111 			result = L2CAP_CR_LE_INVALID_SCID;
5112 			continue;
5113 		}
5114 
5115 		/* Check if we already have channel with that dcid */
5116 		if (__l2cap_get_chan_by_dcid(conn, scid)) {
5117 			result = L2CAP_CR_LE_SCID_IN_USE;
5118 			continue;
5119 		}
5120 
5121 		chan = pchan->ops->new_connection(pchan);
5122 		if (!chan) {
5123 			result = L2CAP_CR_LE_NO_MEM;
5124 			continue;
5125 		}
5126 
5127 		bacpy(&chan->src, &conn->hcon->src);
5128 		bacpy(&chan->dst, &conn->hcon->dst);
5129 		chan->src_type = bdaddr_src_type(conn->hcon);
5130 		chan->dst_type = bdaddr_dst_type(conn->hcon);
5131 		chan->psm  = psm;
5132 		chan->dcid = scid;
5133 		chan->omtu = mtu;
5134 		chan->remote_mps = mps;
5135 
5136 		__l2cap_chan_add(conn, chan);
5137 
5138 		l2cap_ecred_init(chan, __le16_to_cpu(req->credits));
5139 
5140 		/* Init response */
5141 		if (!pdu->credits) {
5142 			pdu->mtu = cpu_to_le16(chan->imtu);
5143 			pdu->mps = cpu_to_le16(chan->mps);
5144 			pdu->credits = cpu_to_le16(chan->rx_credits);
5145 		}
5146 
5147 		pdu->dcid[i] = cpu_to_le16(chan->scid);
5148 
5149 		__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5150 
5151 		chan->ident = cmd->ident;
5152 		chan->mode = L2CAP_MODE_EXT_FLOWCTL;
5153 
5154 		if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5155 			l2cap_state_change(chan, BT_CONNECT2);
5156 			defer = true;
5157 			chan->ops->defer(chan);
5158 		} else {
5159 			l2cap_chan_ready(chan);
5160 		}
5161 	}
5162 
5163 unlock:
5164 	l2cap_chan_unlock(pchan);
5165 	l2cap_chan_put(pchan);
5166 
5167 response:
5168 	pdu->result = cpu_to_le16(result);
5169 
5170 	if (defer)
5171 		return 0;
5172 
5173 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP,
5174 		       sizeof(*pdu) + len, pdu);
5175 
5176 	return 0;
5177 }
5178 
l2cap_ecred_conn_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5179 static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
5180 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5181 				       u8 *data)
5182 {
5183 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
5184 	struct hci_conn *hcon = conn->hcon;
5185 	u16 mtu, mps, credits, result;
5186 	struct l2cap_chan *chan, *tmp;
5187 	int err = 0, sec_level;
5188 	int i = 0;
5189 
5190 	if (cmd_len < sizeof(*rsp))
5191 		return -EPROTO;
5192 
5193 	mtu     = __le16_to_cpu(rsp->mtu);
5194 	mps     = __le16_to_cpu(rsp->mps);
5195 	credits = __le16_to_cpu(rsp->credits);
5196 	result  = __le16_to_cpu(rsp->result);
5197 
5198 	BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits,
5199 	       result);
5200 
5201 	cmd_len -= sizeof(*rsp);
5202 
5203 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
5204 		u16 dcid;
5205 
5206 		if (chan->ident != cmd->ident ||
5207 		    chan->mode != L2CAP_MODE_EXT_FLOWCTL ||
5208 		    chan->state == BT_CONNECTED)
5209 			continue;
5210 
5211 		l2cap_chan_lock(chan);
5212 
5213 		/* Check that there is a dcid for each pending channel */
5214 		if (cmd_len < sizeof(dcid)) {
5215 			l2cap_chan_del(chan, ECONNREFUSED);
5216 			l2cap_chan_unlock(chan);
5217 			continue;
5218 		}
5219 
5220 		dcid = __le16_to_cpu(rsp->dcid[i++]);
5221 		cmd_len -= sizeof(u16);
5222 
5223 		BT_DBG("dcid[%d] 0x%4.4x", i, dcid);
5224 
5225 		/* Check if dcid is already in use */
5226 		if (dcid && __l2cap_get_chan_by_dcid(conn, dcid)) {
5227 			/* If a device receives a
5228 			 * L2CAP_CREDIT_BASED_CONNECTION_RSP packet with an
5229 			 * already-assigned Destination CID, then both the
5230 			 * original channel and the new channel shall be
5231 			 * immediately discarded and not used.
5232 			 */
5233 			l2cap_chan_del(chan, ECONNREFUSED);
5234 			l2cap_chan_unlock(chan);
5235 			chan = __l2cap_get_chan_by_dcid(conn, dcid);
5236 			l2cap_chan_lock(chan);
5237 			l2cap_chan_del(chan, ECONNRESET);
5238 			l2cap_chan_unlock(chan);
5239 			continue;
5240 		}
5241 
5242 		switch (result) {
5243 		case L2CAP_CR_LE_AUTHENTICATION:
5244 		case L2CAP_CR_LE_ENCRYPTION:
5245 			/* If we already have MITM protection we can't do
5246 			 * anything.
5247 			 */
5248 			if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5249 				l2cap_chan_del(chan, ECONNREFUSED);
5250 				break;
5251 			}
5252 
5253 			sec_level = hcon->sec_level + 1;
5254 			if (chan->sec_level < sec_level)
5255 				chan->sec_level = sec_level;
5256 
5257 			/* We'll need to send a new Connect Request */
5258 			clear_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags);
5259 
5260 			smp_conn_security(hcon, chan->sec_level);
5261 			break;
5262 
5263 		case L2CAP_CR_LE_BAD_PSM:
5264 			l2cap_chan_del(chan, ECONNREFUSED);
5265 			break;
5266 
5267 		default:
5268 			/* If dcid was not set it means channels was refused */
5269 			if (!dcid) {
5270 				l2cap_chan_del(chan, ECONNREFUSED);
5271 				break;
5272 			}
5273 
5274 			chan->ident = 0;
5275 			chan->dcid = dcid;
5276 			chan->omtu = mtu;
5277 			chan->remote_mps = mps;
5278 			chan->tx_credits = credits;
5279 			l2cap_chan_ready(chan);
5280 			break;
5281 		}
5282 
5283 		l2cap_chan_unlock(chan);
5284 	}
5285 
5286 	return err;
5287 }
5288 
l2cap_ecred_reconf_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5289 static inline int l2cap_ecred_reconf_req(struct l2cap_conn *conn,
5290 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5291 					 u8 *data)
5292 {
5293 	struct l2cap_ecred_reconf_req *req = (void *) data;
5294 	struct l2cap_ecred_reconf_rsp rsp;
5295 	u16 mtu, mps, result;
5296 	struct l2cap_chan *chan;
5297 	int i, num_scid;
5298 
5299 	if (!enable_ecred)
5300 		return -EINVAL;
5301 
5302 	if (cmd_len < sizeof(*req) || cmd_len - sizeof(*req) % sizeof(u16)) {
5303 		result = L2CAP_CR_LE_INVALID_PARAMS;
5304 		goto respond;
5305 	}
5306 
5307 	mtu = __le16_to_cpu(req->mtu);
5308 	mps = __le16_to_cpu(req->mps);
5309 
5310 	BT_DBG("mtu %u mps %u", mtu, mps);
5311 
5312 	if (mtu < L2CAP_ECRED_MIN_MTU) {
5313 		result = L2CAP_RECONF_INVALID_MTU;
5314 		goto respond;
5315 	}
5316 
5317 	if (mps < L2CAP_ECRED_MIN_MPS) {
5318 		result = L2CAP_RECONF_INVALID_MPS;
5319 		goto respond;
5320 	}
5321 
5322 	cmd_len -= sizeof(*req);
5323 	num_scid = cmd_len / sizeof(u16);
5324 	result = L2CAP_RECONF_SUCCESS;
5325 
5326 	for (i = 0; i < num_scid; i++) {
5327 		u16 scid;
5328 
5329 		scid = __le16_to_cpu(req->scid[i]);
5330 		if (!scid)
5331 			return -EPROTO;
5332 
5333 		chan = __l2cap_get_chan_by_dcid(conn, scid);
5334 		if (!chan)
5335 			continue;
5336 
5337 		/* If the MTU value is decreased for any of the included
5338 		 * channels, then the receiver shall disconnect all
5339 		 * included channels.
5340 		 */
5341 		if (chan->omtu > mtu) {
5342 			BT_ERR("chan %p decreased MTU %u -> %u", chan,
5343 			       chan->omtu, mtu);
5344 			result = L2CAP_RECONF_INVALID_MTU;
5345 		}
5346 
5347 		chan->omtu = mtu;
5348 		chan->remote_mps = mps;
5349 	}
5350 
5351 respond:
5352 	rsp.result = cpu_to_le16(result);
5353 
5354 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_RECONF_RSP, sizeof(rsp),
5355 		       &rsp);
5356 
5357 	return 0;
5358 }
5359 
l2cap_ecred_reconf_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5360 static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn,
5361 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5362 					 u8 *data)
5363 {
5364 	struct l2cap_chan *chan, *tmp;
5365 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
5366 	u16 result;
5367 
5368 	if (cmd_len < sizeof(*rsp))
5369 		return -EPROTO;
5370 
5371 	result = __le16_to_cpu(rsp->result);
5372 
5373 	BT_DBG("result 0x%4.4x", rsp->result);
5374 
5375 	if (!result)
5376 		return 0;
5377 
5378 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
5379 		if (chan->ident != cmd->ident)
5380 			continue;
5381 
5382 		l2cap_chan_del(chan, ECONNRESET);
5383 	}
5384 
5385 	return 0;
5386 }
5387 
l2cap_le_command_rej(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5388 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5389 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5390 				       u8 *data)
5391 {
5392 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5393 	struct l2cap_chan *chan;
5394 
5395 	if (cmd_len < sizeof(*rej))
5396 		return -EPROTO;
5397 
5398 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5399 	if (!chan)
5400 		goto done;
5401 
5402 	chan = l2cap_chan_hold_unless_zero(chan);
5403 	if (!chan)
5404 		goto done;
5405 
5406 	l2cap_chan_lock(chan);
5407 	l2cap_chan_del(chan, ECONNREFUSED);
5408 	l2cap_chan_unlock(chan);
5409 	l2cap_chan_put(chan);
5410 
5411 done:
5412 	return 0;
5413 }
5414 
l2cap_le_sig_cmd(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5415 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5416 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5417 				   u8 *data)
5418 {
5419 	int err = 0;
5420 
5421 	switch (cmd->code) {
5422 	case L2CAP_COMMAND_REJ:
5423 		l2cap_le_command_rej(conn, cmd, cmd_len, data);
5424 		break;
5425 
5426 	case L2CAP_CONN_PARAM_UPDATE_REQ:
5427 		err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5428 		break;
5429 
5430 	case L2CAP_CONN_PARAM_UPDATE_RSP:
5431 		break;
5432 
5433 	case L2CAP_LE_CONN_RSP:
5434 		l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5435 		break;
5436 
5437 	case L2CAP_LE_CONN_REQ:
5438 		err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5439 		break;
5440 
5441 	case L2CAP_LE_CREDITS:
5442 		err = l2cap_le_credits(conn, cmd, cmd_len, data);
5443 		break;
5444 
5445 	case L2CAP_ECRED_CONN_REQ:
5446 		err = l2cap_ecred_conn_req(conn, cmd, cmd_len, data);
5447 		break;
5448 
5449 	case L2CAP_ECRED_CONN_RSP:
5450 		err = l2cap_ecred_conn_rsp(conn, cmd, cmd_len, data);
5451 		break;
5452 
5453 	case L2CAP_ECRED_RECONF_REQ:
5454 		err = l2cap_ecred_reconf_req(conn, cmd, cmd_len, data);
5455 		break;
5456 
5457 	case L2CAP_ECRED_RECONF_RSP:
5458 		err = l2cap_ecred_reconf_rsp(conn, cmd, cmd_len, data);
5459 		break;
5460 
5461 	case L2CAP_DISCONN_REQ:
5462 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5463 		break;
5464 
5465 	case L2CAP_DISCONN_RSP:
5466 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5467 		break;
5468 
5469 	default:
5470 		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5471 		err = -EINVAL;
5472 		break;
5473 	}
5474 
5475 	return err;
5476 }
5477 
l2cap_le_sig_channel(struct l2cap_conn * conn,struct sk_buff * skb)5478 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5479 					struct sk_buff *skb)
5480 {
5481 	struct hci_conn *hcon = conn->hcon;
5482 	struct l2cap_cmd_hdr *cmd;
5483 	u16 len;
5484 	int err;
5485 
5486 	if (hcon->type != LE_LINK)
5487 		goto drop;
5488 
5489 	if (skb->len < L2CAP_CMD_HDR_SIZE)
5490 		goto drop;
5491 
5492 	cmd = (void *) skb->data;
5493 	skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5494 
5495 	len = le16_to_cpu(cmd->len);
5496 
5497 	BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5498 
5499 	if (len != skb->len || !cmd->ident) {
5500 		BT_DBG("corrupted command");
5501 		goto drop;
5502 	}
5503 
5504 	err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5505 	if (err) {
5506 		struct l2cap_cmd_rej_unk rej;
5507 
5508 		BT_ERR("Wrong link type (%d)", err);
5509 
5510 		rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5511 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5512 			       sizeof(rej), &rej);
5513 	}
5514 
5515 drop:
5516 	kfree_skb(skb);
5517 }
5518 
l2cap_sig_send_rej(struct l2cap_conn * conn,u16 ident)5519 static inline void l2cap_sig_send_rej(struct l2cap_conn *conn, u16 ident)
5520 {
5521 	struct l2cap_cmd_rej_unk rej;
5522 
5523 	rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5524 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
5525 }
5526 
l2cap_sig_channel(struct l2cap_conn * conn,struct sk_buff * skb)5527 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5528 				     struct sk_buff *skb)
5529 {
5530 	struct hci_conn *hcon = conn->hcon;
5531 	struct l2cap_cmd_hdr *cmd;
5532 	int err;
5533 
5534 	l2cap_raw_recv(conn, skb);
5535 
5536 	if (hcon->type != ACL_LINK)
5537 		goto drop;
5538 
5539 	while (skb->len >= L2CAP_CMD_HDR_SIZE) {
5540 		u16 len;
5541 
5542 		cmd = (void *) skb->data;
5543 		skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5544 
5545 		len = le16_to_cpu(cmd->len);
5546 
5547 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len,
5548 		       cmd->ident);
5549 
5550 		if (len > skb->len || !cmd->ident) {
5551 			BT_DBG("corrupted command");
5552 			l2cap_sig_send_rej(conn, cmd->ident);
5553 			skb_pull(skb, len > skb->len ? skb->len : len);
5554 			continue;
5555 		}
5556 
5557 		err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data);
5558 		if (err) {
5559 			BT_ERR("Wrong link type (%d)", err);
5560 			l2cap_sig_send_rej(conn, cmd->ident);
5561 		}
5562 
5563 		skb_pull(skb, len);
5564 	}
5565 
5566 	if (skb->len > 0) {
5567 		BT_DBG("corrupted command");
5568 		l2cap_sig_send_rej(conn, 0);
5569 	}
5570 
5571 drop:
5572 	kfree_skb(skb);
5573 }
5574 
l2cap_check_fcs(struct l2cap_chan * chan,struct sk_buff * skb)5575 static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
5576 {
5577 	u16 our_fcs, rcv_fcs;
5578 	int hdr_size;
5579 
5580 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5581 		hdr_size = L2CAP_EXT_HDR_SIZE;
5582 	else
5583 		hdr_size = L2CAP_ENH_HDR_SIZE;
5584 
5585 	if (chan->fcs == L2CAP_FCS_CRC16) {
5586 		skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5587 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5588 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5589 
5590 		if (our_fcs != rcv_fcs)
5591 			return -EBADMSG;
5592 	}
5593 	return 0;
5594 }
5595 
l2cap_send_i_or_rr_or_rnr(struct l2cap_chan * chan)5596 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5597 {
5598 	struct l2cap_ctrl control;
5599 
5600 	BT_DBG("chan %p", chan);
5601 
5602 	memset(&control, 0, sizeof(control));
5603 	control.sframe = 1;
5604 	control.final = 1;
5605 	control.reqseq = chan->buffer_seq;
5606 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
5607 
5608 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5609 		control.super = L2CAP_SUPER_RNR;
5610 		l2cap_send_sframe(chan, &control);
5611 	}
5612 
5613 	if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5614 	    chan->unacked_frames > 0)
5615 		__set_retrans_timer(chan);
5616 
5617 	/* Send pending iframes */
5618 	l2cap_ertm_send(chan);
5619 
5620 	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5621 	    test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5622 		/* F-bit wasn't sent in an s-frame or i-frame yet, so
5623 		 * send it now.
5624 		 */
5625 		control.super = L2CAP_SUPER_RR;
5626 		l2cap_send_sframe(chan, &control);
5627 	}
5628 }
5629 
append_skb_frag(struct sk_buff * skb,struct sk_buff * new_frag,struct sk_buff ** last_frag)5630 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5631 			    struct sk_buff **last_frag)
5632 {
5633 	/* skb->len reflects data in skb as well as all fragments
5634 	 * skb->data_len reflects only data in fragments
5635 	 */
5636 	if (!skb_has_frag_list(skb))
5637 		skb_shinfo(skb)->frag_list = new_frag;
5638 
5639 	new_frag->next = NULL;
5640 
5641 	(*last_frag)->next = new_frag;
5642 	*last_frag = new_frag;
5643 
5644 	skb->len += new_frag->len;
5645 	skb->data_len += new_frag->len;
5646 	skb->truesize += new_frag->truesize;
5647 }
5648 
l2cap_reassemble_sdu(struct l2cap_chan * chan,struct sk_buff * skb,struct l2cap_ctrl * control)5649 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5650 				struct l2cap_ctrl *control)
5651 {
5652 	int err = -EINVAL;
5653 
5654 	switch (control->sar) {
5655 	case L2CAP_SAR_UNSEGMENTED:
5656 		if (chan->sdu)
5657 			break;
5658 
5659 		err = chan->ops->recv(chan, skb);
5660 		break;
5661 
5662 	case L2CAP_SAR_START:
5663 		if (chan->sdu)
5664 			break;
5665 
5666 		if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
5667 			break;
5668 
5669 		chan->sdu_len = get_unaligned_le16(skb->data);
5670 		skb_pull(skb, L2CAP_SDULEN_SIZE);
5671 
5672 		if (chan->sdu_len > chan->imtu) {
5673 			err = -EMSGSIZE;
5674 			break;
5675 		}
5676 
5677 		if (skb->len >= chan->sdu_len)
5678 			break;
5679 
5680 		chan->sdu = skb;
5681 		chan->sdu_last_frag = skb;
5682 
5683 		skb = NULL;
5684 		err = 0;
5685 		break;
5686 
5687 	case L2CAP_SAR_CONTINUE:
5688 		if (!chan->sdu)
5689 			break;
5690 
5691 		append_skb_frag(chan->sdu, skb,
5692 				&chan->sdu_last_frag);
5693 		skb = NULL;
5694 
5695 		if (chan->sdu->len >= chan->sdu_len)
5696 			break;
5697 
5698 		err = 0;
5699 		break;
5700 
5701 	case L2CAP_SAR_END:
5702 		if (!chan->sdu)
5703 			break;
5704 
5705 		append_skb_frag(chan->sdu, skb,
5706 				&chan->sdu_last_frag);
5707 		skb = NULL;
5708 
5709 		if (chan->sdu->len != chan->sdu_len)
5710 			break;
5711 
5712 		err = chan->ops->recv(chan, chan->sdu);
5713 
5714 		if (!err) {
5715 			/* Reassembly complete */
5716 			chan->sdu = NULL;
5717 			chan->sdu_last_frag = NULL;
5718 			chan->sdu_len = 0;
5719 		}
5720 		break;
5721 	}
5722 
5723 	if (err) {
5724 		kfree_skb(skb);
5725 		kfree_skb(chan->sdu);
5726 		chan->sdu = NULL;
5727 		chan->sdu_last_frag = NULL;
5728 		chan->sdu_len = 0;
5729 	}
5730 
5731 	return err;
5732 }
5733 
l2cap_resegment(struct l2cap_chan * chan)5734 static int l2cap_resegment(struct l2cap_chan *chan)
5735 {
5736 	/* Placeholder */
5737 	return 0;
5738 }
5739 
l2cap_chan_busy(struct l2cap_chan * chan,int busy)5740 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5741 {
5742 	u8 event;
5743 
5744 	if (chan->mode != L2CAP_MODE_ERTM)
5745 		return;
5746 
5747 	event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5748 	l2cap_tx(chan, NULL, NULL, event);
5749 }
5750 
l2cap_rx_queued_iframes(struct l2cap_chan * chan)5751 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5752 {
5753 	int err = 0;
5754 	/* Pass sequential frames to l2cap_reassemble_sdu()
5755 	 * until a gap is encountered.
5756 	 */
5757 
5758 	BT_DBG("chan %p", chan);
5759 
5760 	while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5761 		struct sk_buff *skb;
5762 		BT_DBG("Searching for skb with txseq %d (queue len %d)",
5763 		       chan->buffer_seq, skb_queue_len(&chan->srej_q));
5764 
5765 		skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5766 
5767 		if (!skb)
5768 			break;
5769 
5770 		skb_unlink(skb, &chan->srej_q);
5771 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5772 		err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
5773 		if (err)
5774 			break;
5775 	}
5776 
5777 	if (skb_queue_empty(&chan->srej_q)) {
5778 		chan->rx_state = L2CAP_RX_STATE_RECV;
5779 		l2cap_send_ack(chan);
5780 	}
5781 
5782 	return err;
5783 }
5784 
l2cap_handle_srej(struct l2cap_chan * chan,struct l2cap_ctrl * control)5785 static void l2cap_handle_srej(struct l2cap_chan *chan,
5786 			      struct l2cap_ctrl *control)
5787 {
5788 	struct sk_buff *skb;
5789 
5790 	BT_DBG("chan %p, control %p", chan, control);
5791 
5792 	if (control->reqseq == chan->next_tx_seq) {
5793 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5794 		l2cap_send_disconn_req(chan, ECONNRESET);
5795 		return;
5796 	}
5797 
5798 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5799 
5800 	if (skb == NULL) {
5801 		BT_DBG("Seq %d not available for retransmission",
5802 		       control->reqseq);
5803 		return;
5804 	}
5805 
5806 	if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
5807 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5808 		l2cap_send_disconn_req(chan, ECONNRESET);
5809 		return;
5810 	}
5811 
5812 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5813 
5814 	if (control->poll) {
5815 		l2cap_pass_to_tx(chan, control);
5816 
5817 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
5818 		l2cap_retransmit(chan, control);
5819 		l2cap_ertm_send(chan);
5820 
5821 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5822 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
5823 			chan->srej_save_reqseq = control->reqseq;
5824 		}
5825 	} else {
5826 		l2cap_pass_to_tx_fbit(chan, control);
5827 
5828 		if (control->final) {
5829 			if (chan->srej_save_reqseq != control->reqseq ||
5830 			    !test_and_clear_bit(CONN_SREJ_ACT,
5831 						&chan->conn_state))
5832 				l2cap_retransmit(chan, control);
5833 		} else {
5834 			l2cap_retransmit(chan, control);
5835 			if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5836 				set_bit(CONN_SREJ_ACT, &chan->conn_state);
5837 				chan->srej_save_reqseq = control->reqseq;
5838 			}
5839 		}
5840 	}
5841 }
5842 
l2cap_handle_rej(struct l2cap_chan * chan,struct l2cap_ctrl * control)5843 static void l2cap_handle_rej(struct l2cap_chan *chan,
5844 			     struct l2cap_ctrl *control)
5845 {
5846 	struct sk_buff *skb;
5847 
5848 	BT_DBG("chan %p, control %p", chan, control);
5849 
5850 	if (control->reqseq == chan->next_tx_seq) {
5851 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5852 		l2cap_send_disconn_req(chan, ECONNRESET);
5853 		return;
5854 	}
5855 
5856 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5857 
5858 	if (chan->max_tx && skb &&
5859 	    bt_cb(skb)->l2cap.retries >= chan->max_tx) {
5860 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5861 		l2cap_send_disconn_req(chan, ECONNRESET);
5862 		return;
5863 	}
5864 
5865 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5866 
5867 	l2cap_pass_to_tx(chan, control);
5868 
5869 	if (control->final) {
5870 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5871 			l2cap_retransmit_all(chan, control);
5872 	} else {
5873 		l2cap_retransmit_all(chan, control);
5874 		l2cap_ertm_send(chan);
5875 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5876 			set_bit(CONN_REJ_ACT, &chan->conn_state);
5877 	}
5878 }
5879 
l2cap_classify_txseq(struct l2cap_chan * chan,u16 txseq)5880 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5881 {
5882 	BT_DBG("chan %p, txseq %d", chan, txseq);
5883 
5884 	BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5885 	       chan->expected_tx_seq);
5886 
5887 	if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5888 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5889 		    chan->tx_win) {
5890 			/* See notes below regarding "double poll" and
5891 			 * invalid packets.
5892 			 */
5893 			if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5894 				BT_DBG("Invalid/Ignore - after SREJ");
5895 				return L2CAP_TXSEQ_INVALID_IGNORE;
5896 			} else {
5897 				BT_DBG("Invalid - in window after SREJ sent");
5898 				return L2CAP_TXSEQ_INVALID;
5899 			}
5900 		}
5901 
5902 		if (chan->srej_list.head == txseq) {
5903 			BT_DBG("Expected SREJ");
5904 			return L2CAP_TXSEQ_EXPECTED_SREJ;
5905 		}
5906 
5907 		if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
5908 			BT_DBG("Duplicate SREJ - txseq already stored");
5909 			return L2CAP_TXSEQ_DUPLICATE_SREJ;
5910 		}
5911 
5912 		if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
5913 			BT_DBG("Unexpected SREJ - not requested");
5914 			return L2CAP_TXSEQ_UNEXPECTED_SREJ;
5915 		}
5916 	}
5917 
5918 	if (chan->expected_tx_seq == txseq) {
5919 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5920 		    chan->tx_win) {
5921 			BT_DBG("Invalid - txseq outside tx window");
5922 			return L2CAP_TXSEQ_INVALID;
5923 		} else {
5924 			BT_DBG("Expected");
5925 			return L2CAP_TXSEQ_EXPECTED;
5926 		}
5927 	}
5928 
5929 	if (__seq_offset(chan, txseq, chan->last_acked_seq) <
5930 	    __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
5931 		BT_DBG("Duplicate - expected_tx_seq later than txseq");
5932 		return L2CAP_TXSEQ_DUPLICATE;
5933 	}
5934 
5935 	if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
5936 		/* A source of invalid packets is a "double poll" condition,
5937 		 * where delays cause us to send multiple poll packets.  If
5938 		 * the remote stack receives and processes both polls,
5939 		 * sequence numbers can wrap around in such a way that a
5940 		 * resent frame has a sequence number that looks like new data
5941 		 * with a sequence gap.  This would trigger an erroneous SREJ
5942 		 * request.
5943 		 *
5944 		 * Fortunately, this is impossible with a tx window that's
5945 		 * less than half of the maximum sequence number, which allows
5946 		 * invalid frames to be safely ignored.
5947 		 *
5948 		 * With tx window sizes greater than half of the tx window
5949 		 * maximum, the frame is invalid and cannot be ignored.  This
5950 		 * causes a disconnect.
5951 		 */
5952 
5953 		if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5954 			BT_DBG("Invalid/Ignore - txseq outside tx window");
5955 			return L2CAP_TXSEQ_INVALID_IGNORE;
5956 		} else {
5957 			BT_DBG("Invalid - txseq outside tx window");
5958 			return L2CAP_TXSEQ_INVALID;
5959 		}
5960 	} else {
5961 		BT_DBG("Unexpected - txseq indicates missing frames");
5962 		return L2CAP_TXSEQ_UNEXPECTED;
5963 	}
5964 }
5965 
l2cap_rx_state_recv(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)5966 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
5967 			       struct l2cap_ctrl *control,
5968 			       struct sk_buff *skb, u8 event)
5969 {
5970 	struct l2cap_ctrl local_control;
5971 	int err = 0;
5972 	bool skb_in_use = false;
5973 
5974 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5975 	       event);
5976 
5977 	switch (event) {
5978 	case L2CAP_EV_RECV_IFRAME:
5979 		switch (l2cap_classify_txseq(chan, control->txseq)) {
5980 		case L2CAP_TXSEQ_EXPECTED:
5981 			l2cap_pass_to_tx(chan, control);
5982 
5983 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5984 				BT_DBG("Busy, discarding expected seq %d",
5985 				       control->txseq);
5986 				break;
5987 			}
5988 
5989 			chan->expected_tx_seq = __next_seq(chan,
5990 							   control->txseq);
5991 
5992 			chan->buffer_seq = chan->expected_tx_seq;
5993 			skb_in_use = true;
5994 
5995 			/* l2cap_reassemble_sdu may free skb, hence invalidate
5996 			 * control, so make a copy in advance to use it after
5997 			 * l2cap_reassemble_sdu returns and to avoid the race
5998 			 * condition, for example:
5999 			 *
6000 			 * The current thread calls:
6001 			 *   l2cap_reassemble_sdu
6002 			 *     chan->ops->recv == l2cap_sock_recv_cb
6003 			 *       __sock_queue_rcv_skb
6004 			 * Another thread calls:
6005 			 *   bt_sock_recvmsg
6006 			 *     skb_recv_datagram
6007 			 *     skb_free_datagram
6008 			 * Then the current thread tries to access control, but
6009 			 * it was freed by skb_free_datagram.
6010 			 */
6011 			local_control = *control;
6012 			err = l2cap_reassemble_sdu(chan, skb, control);
6013 			if (err)
6014 				break;
6015 
6016 			if (local_control.final) {
6017 				if (!test_and_clear_bit(CONN_REJ_ACT,
6018 							&chan->conn_state)) {
6019 					local_control.final = 0;
6020 					l2cap_retransmit_all(chan, &local_control);
6021 					l2cap_ertm_send(chan);
6022 				}
6023 			}
6024 
6025 			if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6026 				l2cap_send_ack(chan);
6027 			break;
6028 		case L2CAP_TXSEQ_UNEXPECTED:
6029 			l2cap_pass_to_tx(chan, control);
6030 
6031 			/* Can't issue SREJ frames in the local busy state.
6032 			 * Drop this frame, it will be seen as missing
6033 			 * when local busy is exited.
6034 			 */
6035 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6036 				BT_DBG("Busy, discarding unexpected seq %d",
6037 				       control->txseq);
6038 				break;
6039 			}
6040 
6041 			/* There was a gap in the sequence, so an SREJ
6042 			 * must be sent for each missing frame.  The
6043 			 * current frame is stored for later use.
6044 			 */
6045 			skb_queue_tail(&chan->srej_q, skb);
6046 			skb_in_use = true;
6047 			BT_DBG("Queued %p (queue len %d)", skb,
6048 			       skb_queue_len(&chan->srej_q));
6049 
6050 			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6051 			l2cap_seq_list_clear(&chan->srej_list);
6052 			l2cap_send_srej(chan, control->txseq);
6053 
6054 			chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6055 			break;
6056 		case L2CAP_TXSEQ_DUPLICATE:
6057 			l2cap_pass_to_tx(chan, control);
6058 			break;
6059 		case L2CAP_TXSEQ_INVALID_IGNORE:
6060 			break;
6061 		case L2CAP_TXSEQ_INVALID:
6062 		default:
6063 			l2cap_send_disconn_req(chan, ECONNRESET);
6064 			break;
6065 		}
6066 		break;
6067 	case L2CAP_EV_RECV_RR:
6068 		l2cap_pass_to_tx(chan, control);
6069 		if (control->final) {
6070 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6071 
6072 			if (!test_and_clear_bit(CONN_REJ_ACT,
6073 						&chan->conn_state)) {
6074 				control->final = 0;
6075 				l2cap_retransmit_all(chan, control);
6076 			}
6077 
6078 			l2cap_ertm_send(chan);
6079 		} else if (control->poll) {
6080 			l2cap_send_i_or_rr_or_rnr(chan);
6081 		} else {
6082 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6083 					       &chan->conn_state) &&
6084 			    chan->unacked_frames)
6085 				__set_retrans_timer(chan);
6086 
6087 			l2cap_ertm_send(chan);
6088 		}
6089 		break;
6090 	case L2CAP_EV_RECV_RNR:
6091 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6092 		l2cap_pass_to_tx(chan, control);
6093 		if (control && control->poll) {
6094 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6095 			l2cap_send_rr_or_rnr(chan, 0);
6096 		}
6097 		__clear_retrans_timer(chan);
6098 		l2cap_seq_list_clear(&chan->retrans_list);
6099 		break;
6100 	case L2CAP_EV_RECV_REJ:
6101 		l2cap_handle_rej(chan, control);
6102 		break;
6103 	case L2CAP_EV_RECV_SREJ:
6104 		l2cap_handle_srej(chan, control);
6105 		break;
6106 	default:
6107 		break;
6108 	}
6109 
6110 	if (skb && !skb_in_use) {
6111 		BT_DBG("Freeing %p", skb);
6112 		kfree_skb(skb);
6113 	}
6114 
6115 	return err;
6116 }
6117 
l2cap_rx_state_srej_sent(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6118 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6119 				    struct l2cap_ctrl *control,
6120 				    struct sk_buff *skb, u8 event)
6121 {
6122 	int err = 0;
6123 	u16 txseq = control->txseq;
6124 	bool skb_in_use = false;
6125 
6126 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6127 	       event);
6128 
6129 	switch (event) {
6130 	case L2CAP_EV_RECV_IFRAME:
6131 		switch (l2cap_classify_txseq(chan, txseq)) {
6132 		case L2CAP_TXSEQ_EXPECTED:
6133 			/* Keep frame for reassembly later */
6134 			l2cap_pass_to_tx(chan, control);
6135 			skb_queue_tail(&chan->srej_q, skb);
6136 			skb_in_use = true;
6137 			BT_DBG("Queued %p (queue len %d)", skb,
6138 			       skb_queue_len(&chan->srej_q));
6139 
6140 			chan->expected_tx_seq = __next_seq(chan, txseq);
6141 			break;
6142 		case L2CAP_TXSEQ_EXPECTED_SREJ:
6143 			l2cap_seq_list_pop(&chan->srej_list);
6144 
6145 			l2cap_pass_to_tx(chan, control);
6146 			skb_queue_tail(&chan->srej_q, skb);
6147 			skb_in_use = true;
6148 			BT_DBG("Queued %p (queue len %d)", skb,
6149 			       skb_queue_len(&chan->srej_q));
6150 
6151 			err = l2cap_rx_queued_iframes(chan);
6152 			if (err)
6153 				break;
6154 
6155 			break;
6156 		case L2CAP_TXSEQ_UNEXPECTED:
6157 			/* Got a frame that can't be reassembled yet.
6158 			 * Save it for later, and send SREJs to cover
6159 			 * the missing frames.
6160 			 */
6161 			skb_queue_tail(&chan->srej_q, skb);
6162 			skb_in_use = true;
6163 			BT_DBG("Queued %p (queue len %d)", skb,
6164 			       skb_queue_len(&chan->srej_q));
6165 
6166 			l2cap_pass_to_tx(chan, control);
6167 			l2cap_send_srej(chan, control->txseq);
6168 			break;
6169 		case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6170 			/* This frame was requested with an SREJ, but
6171 			 * some expected retransmitted frames are
6172 			 * missing.  Request retransmission of missing
6173 			 * SREJ'd frames.
6174 			 */
6175 			skb_queue_tail(&chan->srej_q, skb);
6176 			skb_in_use = true;
6177 			BT_DBG("Queued %p (queue len %d)", skb,
6178 			       skb_queue_len(&chan->srej_q));
6179 
6180 			l2cap_pass_to_tx(chan, control);
6181 			l2cap_send_srej_list(chan, control->txseq);
6182 			break;
6183 		case L2CAP_TXSEQ_DUPLICATE_SREJ:
6184 			/* We've already queued this frame.  Drop this copy. */
6185 			l2cap_pass_to_tx(chan, control);
6186 			break;
6187 		case L2CAP_TXSEQ_DUPLICATE:
6188 			/* Expecting a later sequence number, so this frame
6189 			 * was already received.  Ignore it completely.
6190 			 */
6191 			break;
6192 		case L2CAP_TXSEQ_INVALID_IGNORE:
6193 			break;
6194 		case L2CAP_TXSEQ_INVALID:
6195 		default:
6196 			l2cap_send_disconn_req(chan, ECONNRESET);
6197 			break;
6198 		}
6199 		break;
6200 	case L2CAP_EV_RECV_RR:
6201 		l2cap_pass_to_tx(chan, control);
6202 		if (control->final) {
6203 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6204 
6205 			if (!test_and_clear_bit(CONN_REJ_ACT,
6206 						&chan->conn_state)) {
6207 				control->final = 0;
6208 				l2cap_retransmit_all(chan, control);
6209 			}
6210 
6211 			l2cap_ertm_send(chan);
6212 		} else if (control->poll) {
6213 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6214 					       &chan->conn_state) &&
6215 			    chan->unacked_frames) {
6216 				__set_retrans_timer(chan);
6217 			}
6218 
6219 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6220 			l2cap_send_srej_tail(chan);
6221 		} else {
6222 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6223 					       &chan->conn_state) &&
6224 			    chan->unacked_frames)
6225 				__set_retrans_timer(chan);
6226 
6227 			l2cap_send_ack(chan);
6228 		}
6229 		break;
6230 	case L2CAP_EV_RECV_RNR:
6231 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6232 		l2cap_pass_to_tx(chan, control);
6233 		if (control->poll) {
6234 			l2cap_send_srej_tail(chan);
6235 		} else {
6236 			struct l2cap_ctrl rr_control;
6237 			memset(&rr_control, 0, sizeof(rr_control));
6238 			rr_control.sframe = 1;
6239 			rr_control.super = L2CAP_SUPER_RR;
6240 			rr_control.reqseq = chan->buffer_seq;
6241 			l2cap_send_sframe(chan, &rr_control);
6242 		}
6243 
6244 		break;
6245 	case L2CAP_EV_RECV_REJ:
6246 		l2cap_handle_rej(chan, control);
6247 		break;
6248 	case L2CAP_EV_RECV_SREJ:
6249 		l2cap_handle_srej(chan, control);
6250 		break;
6251 	}
6252 
6253 	if (skb && !skb_in_use) {
6254 		BT_DBG("Freeing %p", skb);
6255 		kfree_skb(skb);
6256 	}
6257 
6258 	return err;
6259 }
6260 
l2cap_finish_move(struct l2cap_chan * chan)6261 static int l2cap_finish_move(struct l2cap_chan *chan)
6262 {
6263 	BT_DBG("chan %p", chan);
6264 
6265 	chan->rx_state = L2CAP_RX_STATE_RECV;
6266 	chan->conn->mtu = chan->conn->hcon->mtu;
6267 
6268 	return l2cap_resegment(chan);
6269 }
6270 
l2cap_rx_state_wait_p(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6271 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6272 				 struct l2cap_ctrl *control,
6273 				 struct sk_buff *skb, u8 event)
6274 {
6275 	int err;
6276 
6277 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6278 	       event);
6279 
6280 	if (!control->poll)
6281 		return -EPROTO;
6282 
6283 	l2cap_process_reqseq(chan, control->reqseq);
6284 
6285 	if (!skb_queue_empty(&chan->tx_q))
6286 		chan->tx_send_head = skb_peek(&chan->tx_q);
6287 	else
6288 		chan->tx_send_head = NULL;
6289 
6290 	/* Rewind next_tx_seq to the point expected
6291 	 * by the receiver.
6292 	 */
6293 	chan->next_tx_seq = control->reqseq;
6294 	chan->unacked_frames = 0;
6295 
6296 	err = l2cap_finish_move(chan);
6297 	if (err)
6298 		return err;
6299 
6300 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
6301 	l2cap_send_i_or_rr_or_rnr(chan);
6302 
6303 	if (event == L2CAP_EV_RECV_IFRAME)
6304 		return -EPROTO;
6305 
6306 	return l2cap_rx_state_recv(chan, control, NULL, event);
6307 }
6308 
l2cap_rx_state_wait_f(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6309 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6310 				 struct l2cap_ctrl *control,
6311 				 struct sk_buff *skb, u8 event)
6312 {
6313 	int err;
6314 
6315 	if (!control->final)
6316 		return -EPROTO;
6317 
6318 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6319 
6320 	chan->rx_state = L2CAP_RX_STATE_RECV;
6321 	l2cap_process_reqseq(chan, control->reqseq);
6322 
6323 	if (!skb_queue_empty(&chan->tx_q))
6324 		chan->tx_send_head = skb_peek(&chan->tx_q);
6325 	else
6326 		chan->tx_send_head = NULL;
6327 
6328 	/* Rewind next_tx_seq to the point expected
6329 	 * by the receiver.
6330 	 */
6331 	chan->next_tx_seq = control->reqseq;
6332 	chan->unacked_frames = 0;
6333 	chan->conn->mtu = chan->conn->hcon->mtu;
6334 
6335 	err = l2cap_resegment(chan);
6336 
6337 	if (!err)
6338 		err = l2cap_rx_state_recv(chan, control, skb, event);
6339 
6340 	return err;
6341 }
6342 
__valid_reqseq(struct l2cap_chan * chan,u16 reqseq)6343 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6344 {
6345 	/* Make sure reqseq is for a packet that has been sent but not acked */
6346 	u16 unacked;
6347 
6348 	unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6349 	return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6350 }
6351 
l2cap_rx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6352 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6353 		    struct sk_buff *skb, u8 event)
6354 {
6355 	int err = 0;
6356 
6357 	BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6358 	       control, skb, event, chan->rx_state);
6359 
6360 	if (__valid_reqseq(chan, control->reqseq)) {
6361 		switch (chan->rx_state) {
6362 		case L2CAP_RX_STATE_RECV:
6363 			err = l2cap_rx_state_recv(chan, control, skb, event);
6364 			break;
6365 		case L2CAP_RX_STATE_SREJ_SENT:
6366 			err = l2cap_rx_state_srej_sent(chan, control, skb,
6367 						       event);
6368 			break;
6369 		case L2CAP_RX_STATE_WAIT_P:
6370 			err = l2cap_rx_state_wait_p(chan, control, skb, event);
6371 			break;
6372 		case L2CAP_RX_STATE_WAIT_F:
6373 			err = l2cap_rx_state_wait_f(chan, control, skb, event);
6374 			break;
6375 		default:
6376 			/* shut it down */
6377 			break;
6378 		}
6379 	} else {
6380 		BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6381 		       control->reqseq, chan->next_tx_seq,
6382 		       chan->expected_ack_seq);
6383 		l2cap_send_disconn_req(chan, ECONNRESET);
6384 	}
6385 
6386 	return err;
6387 }
6388 
l2cap_stream_rx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb)6389 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6390 			   struct sk_buff *skb)
6391 {
6392 	/* l2cap_reassemble_sdu may free skb, hence invalidate control, so store
6393 	 * the txseq field in advance to use it after l2cap_reassemble_sdu
6394 	 * returns and to avoid the race condition, for example:
6395 	 *
6396 	 * The current thread calls:
6397 	 *   l2cap_reassemble_sdu
6398 	 *     chan->ops->recv == l2cap_sock_recv_cb
6399 	 *       __sock_queue_rcv_skb
6400 	 * Another thread calls:
6401 	 *   bt_sock_recvmsg
6402 	 *     skb_recv_datagram
6403 	 *     skb_free_datagram
6404 	 * Then the current thread tries to access control, but it was freed by
6405 	 * skb_free_datagram.
6406 	 */
6407 	u16 txseq = control->txseq;
6408 
6409 	BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6410 	       chan->rx_state);
6411 
6412 	if (l2cap_classify_txseq(chan, txseq) == L2CAP_TXSEQ_EXPECTED) {
6413 		l2cap_pass_to_tx(chan, control);
6414 
6415 		BT_DBG("buffer_seq %u->%u", chan->buffer_seq,
6416 		       __next_seq(chan, chan->buffer_seq));
6417 
6418 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6419 
6420 		l2cap_reassemble_sdu(chan, skb, control);
6421 	} else {
6422 		if (chan->sdu) {
6423 			kfree_skb(chan->sdu);
6424 			chan->sdu = NULL;
6425 		}
6426 		chan->sdu_last_frag = NULL;
6427 		chan->sdu_len = 0;
6428 
6429 		if (skb) {
6430 			BT_DBG("Freeing %p", skb);
6431 			kfree_skb(skb);
6432 		}
6433 	}
6434 
6435 	chan->last_acked_seq = txseq;
6436 	chan->expected_tx_seq = __next_seq(chan, txseq);
6437 
6438 	return 0;
6439 }
6440 
l2cap_data_rcv(struct l2cap_chan * chan,struct sk_buff * skb)6441 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6442 {
6443 	struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
6444 	u16 len;
6445 	u8 event;
6446 
6447 	__unpack_control(chan, skb);
6448 
6449 	len = skb->len;
6450 
6451 	/*
6452 	 * We can just drop the corrupted I-frame here.
6453 	 * Receiver will miss it and start proper recovery
6454 	 * procedures and ask for retransmission.
6455 	 */
6456 	if (l2cap_check_fcs(chan, skb))
6457 		goto drop;
6458 
6459 	if (!control->sframe && control->sar == L2CAP_SAR_START)
6460 		len -= L2CAP_SDULEN_SIZE;
6461 
6462 	if (chan->fcs == L2CAP_FCS_CRC16)
6463 		len -= L2CAP_FCS_SIZE;
6464 
6465 	if (len > chan->mps) {
6466 		l2cap_send_disconn_req(chan, ECONNRESET);
6467 		goto drop;
6468 	}
6469 
6470 	if (chan->ops->filter) {
6471 		if (chan->ops->filter(chan, skb))
6472 			goto drop;
6473 	}
6474 
6475 	if (!control->sframe) {
6476 		int err;
6477 
6478 		BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6479 		       control->sar, control->reqseq, control->final,
6480 		       control->txseq);
6481 
6482 		/* Validate F-bit - F=0 always valid, F=1 only
6483 		 * valid in TX WAIT_F
6484 		 */
6485 		if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6486 			goto drop;
6487 
6488 		if (chan->mode != L2CAP_MODE_STREAMING) {
6489 			event = L2CAP_EV_RECV_IFRAME;
6490 			err = l2cap_rx(chan, control, skb, event);
6491 		} else {
6492 			err = l2cap_stream_rx(chan, control, skb);
6493 		}
6494 
6495 		if (err)
6496 			l2cap_send_disconn_req(chan, ECONNRESET);
6497 	} else {
6498 		const u8 rx_func_to_event[4] = {
6499 			L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6500 			L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6501 		};
6502 
6503 		/* Only I-frames are expected in streaming mode */
6504 		if (chan->mode == L2CAP_MODE_STREAMING)
6505 			goto drop;
6506 
6507 		BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6508 		       control->reqseq, control->final, control->poll,
6509 		       control->super);
6510 
6511 		if (len != 0) {
6512 			BT_ERR("Trailing bytes: %d in sframe", len);
6513 			l2cap_send_disconn_req(chan, ECONNRESET);
6514 			goto drop;
6515 		}
6516 
6517 		/* Validate F and P bits */
6518 		if (control->final && (control->poll ||
6519 				       chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6520 			goto drop;
6521 
6522 		event = rx_func_to_event[control->super];
6523 		if (l2cap_rx(chan, control, skb, event))
6524 			l2cap_send_disconn_req(chan, ECONNRESET);
6525 	}
6526 
6527 	return 0;
6528 
6529 drop:
6530 	kfree_skb(skb);
6531 	return 0;
6532 }
6533 
l2cap_chan_le_send_credits(struct l2cap_chan * chan)6534 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6535 {
6536 	struct l2cap_conn *conn = chan->conn;
6537 	struct l2cap_le_credits pkt;
6538 	u16 return_credits = l2cap_le_rx_credits(chan);
6539 
6540 	if (chan->rx_credits >= return_credits)
6541 		return;
6542 
6543 	return_credits -= chan->rx_credits;
6544 
6545 	BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6546 
6547 	chan->rx_credits += return_credits;
6548 
6549 	pkt.cid     = cpu_to_le16(chan->scid);
6550 	pkt.credits = cpu_to_le16(return_credits);
6551 
6552 	chan->ident = l2cap_get_ident(conn);
6553 
6554 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6555 }
6556 
l2cap_chan_rx_avail(struct l2cap_chan * chan,ssize_t rx_avail)6557 void l2cap_chan_rx_avail(struct l2cap_chan *chan, ssize_t rx_avail)
6558 {
6559 	if (chan->rx_avail == rx_avail)
6560 		return;
6561 
6562 	BT_DBG("chan %p has %zd bytes avail for rx", chan, rx_avail);
6563 
6564 	chan->rx_avail = rx_avail;
6565 
6566 	if (chan->state == BT_CONNECTED)
6567 		l2cap_chan_le_send_credits(chan);
6568 }
6569 
l2cap_ecred_recv(struct l2cap_chan * chan,struct sk_buff * skb)6570 static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
6571 {
6572 	int err;
6573 
6574 	BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
6575 
6576 	/* Wait recv to confirm reception before updating the credits */
6577 	err = chan->ops->recv(chan, skb);
6578 
6579 	if (err < 0 && chan->rx_avail != -1) {
6580 		BT_ERR("Queueing received LE L2CAP data failed");
6581 		l2cap_send_disconn_req(chan, ECONNRESET);
6582 		return err;
6583 	}
6584 
6585 	/* Update credits whenever an SDU is received */
6586 	l2cap_chan_le_send_credits(chan);
6587 
6588 	return err;
6589 }
6590 
l2cap_ecred_data_rcv(struct l2cap_chan * chan,struct sk_buff * skb)6591 static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6592 {
6593 	int err;
6594 
6595 	if (!chan->rx_credits) {
6596 		BT_ERR("No credits to receive LE L2CAP data");
6597 		l2cap_send_disconn_req(chan, ECONNRESET);
6598 		return -ENOBUFS;
6599 	}
6600 
6601 	if (chan->imtu < skb->len) {
6602 		BT_ERR("Too big LE L2CAP PDU");
6603 		return -ENOBUFS;
6604 	}
6605 
6606 	chan->rx_credits--;
6607 	BT_DBG("chan %p: rx_credits %u -> %u",
6608 	       chan, chan->rx_credits + 1, chan->rx_credits);
6609 
6610 	/* Update if remote had run out of credits, this should only happens
6611 	 * if the remote is not using the entire MPS.
6612 	 */
6613 	if (!chan->rx_credits)
6614 		l2cap_chan_le_send_credits(chan);
6615 
6616 	err = 0;
6617 
6618 	if (!chan->sdu) {
6619 		u16 sdu_len;
6620 
6621 		sdu_len = get_unaligned_le16(skb->data);
6622 		skb_pull(skb, L2CAP_SDULEN_SIZE);
6623 
6624 		BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6625 		       sdu_len, skb->len, chan->imtu);
6626 
6627 		if (sdu_len > chan->imtu) {
6628 			BT_ERR("Too big LE L2CAP SDU length received");
6629 			err = -EMSGSIZE;
6630 			goto failed;
6631 		}
6632 
6633 		if (skb->len > sdu_len) {
6634 			BT_ERR("Too much LE L2CAP data received");
6635 			err = -EINVAL;
6636 			goto failed;
6637 		}
6638 
6639 		if (skb->len == sdu_len)
6640 			return l2cap_ecred_recv(chan, skb);
6641 
6642 		chan->sdu = skb;
6643 		chan->sdu_len = sdu_len;
6644 		chan->sdu_last_frag = skb;
6645 
6646 		/* Detect if remote is not able to use the selected MPS */
6647 		if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
6648 			u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
6649 
6650 			/* Adjust the number of credits */
6651 			BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
6652 			chan->mps = mps_len;
6653 			l2cap_chan_le_send_credits(chan);
6654 		}
6655 
6656 		return 0;
6657 	}
6658 
6659 	BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6660 	       chan->sdu->len, skb->len, chan->sdu_len);
6661 
6662 	if (chan->sdu->len + skb->len > chan->sdu_len) {
6663 		BT_ERR("Too much LE L2CAP data received");
6664 		err = -EINVAL;
6665 		goto failed;
6666 	}
6667 
6668 	append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6669 	skb = NULL;
6670 
6671 	if (chan->sdu->len == chan->sdu_len) {
6672 		err = l2cap_ecred_recv(chan, chan->sdu);
6673 		if (!err) {
6674 			chan->sdu = NULL;
6675 			chan->sdu_last_frag = NULL;
6676 			chan->sdu_len = 0;
6677 		}
6678 	}
6679 
6680 failed:
6681 	if (err) {
6682 		kfree_skb(skb);
6683 		kfree_skb(chan->sdu);
6684 		chan->sdu = NULL;
6685 		chan->sdu_last_frag = NULL;
6686 		chan->sdu_len = 0;
6687 	}
6688 
6689 	/* We can't return an error here since we took care of the skb
6690 	 * freeing internally. An error return would cause the caller to
6691 	 * do a double-free of the skb.
6692 	 */
6693 	return 0;
6694 }
6695 
l2cap_data_channel(struct l2cap_conn * conn,u16 cid,struct sk_buff * skb)6696 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6697 			       struct sk_buff *skb)
6698 {
6699 	struct l2cap_chan *chan;
6700 
6701 	chan = l2cap_get_chan_by_scid(conn, cid);
6702 	if (!chan) {
6703 		BT_DBG("unknown cid 0x%4.4x", cid);
6704 		/* Drop packet and return */
6705 		kfree_skb(skb);
6706 		return;
6707 	}
6708 
6709 	BT_DBG("chan %p, len %d", chan, skb->len);
6710 
6711 	/* If we receive data on a fixed channel before the info req/rsp
6712 	 * procedure is done simply assume that the channel is supported
6713 	 * and mark it as ready.
6714 	 */
6715 	if (chan->chan_type == L2CAP_CHAN_FIXED)
6716 		l2cap_chan_ready(chan);
6717 
6718 	if (chan->state != BT_CONNECTED)
6719 		goto drop;
6720 
6721 	switch (chan->mode) {
6722 	case L2CAP_MODE_LE_FLOWCTL:
6723 	case L2CAP_MODE_EXT_FLOWCTL:
6724 		if (l2cap_ecred_data_rcv(chan, skb) < 0)
6725 			goto drop;
6726 
6727 		goto done;
6728 
6729 	case L2CAP_MODE_BASIC:
6730 		/* If socket recv buffers overflows we drop data here
6731 		 * which is *bad* because L2CAP has to be reliable.
6732 		 * But we don't have any other choice. L2CAP doesn't
6733 		 * provide flow control mechanism. */
6734 
6735 		if (chan->imtu < skb->len) {
6736 			BT_ERR("Dropping L2CAP data: receive buffer overflow");
6737 			goto drop;
6738 		}
6739 
6740 		if (!chan->ops->recv(chan, skb))
6741 			goto done;
6742 		break;
6743 
6744 	case L2CAP_MODE_ERTM:
6745 	case L2CAP_MODE_STREAMING:
6746 		l2cap_data_rcv(chan, skb);
6747 		goto done;
6748 
6749 	default:
6750 		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6751 		break;
6752 	}
6753 
6754 drop:
6755 	kfree_skb(skb);
6756 
6757 done:
6758 	l2cap_chan_unlock(chan);
6759 	l2cap_chan_put(chan);
6760 }
6761 
l2cap_conless_channel(struct l2cap_conn * conn,__le16 psm,struct sk_buff * skb)6762 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6763 				  struct sk_buff *skb)
6764 {
6765 	struct hci_conn *hcon = conn->hcon;
6766 	struct l2cap_chan *chan;
6767 
6768 	if (hcon->type != ACL_LINK)
6769 		goto free_skb;
6770 
6771 	chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6772 					ACL_LINK);
6773 	if (!chan)
6774 		goto free_skb;
6775 
6776 	BT_DBG("chan %p, len %d", chan, skb->len);
6777 
6778 	l2cap_chan_lock(chan);
6779 
6780 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6781 		goto drop;
6782 
6783 	if (chan->imtu < skb->len)
6784 		goto drop;
6785 
6786 	/* Store remote BD_ADDR and PSM for msg_name */
6787 	bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
6788 	bt_cb(skb)->l2cap.psm = psm;
6789 
6790 	if (!chan->ops->recv(chan, skb)) {
6791 		l2cap_chan_unlock(chan);
6792 		l2cap_chan_put(chan);
6793 		return;
6794 	}
6795 
6796 drop:
6797 	l2cap_chan_unlock(chan);
6798 	l2cap_chan_put(chan);
6799 free_skb:
6800 	kfree_skb(skb);
6801 }
6802 
l2cap_recv_frame(struct l2cap_conn * conn,struct sk_buff * skb)6803 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6804 {
6805 	struct l2cap_hdr *lh = (void *) skb->data;
6806 	struct hci_conn *hcon = conn->hcon;
6807 	u16 cid, len;
6808 	__le16 psm;
6809 
6810 	if (hcon->state != BT_CONNECTED) {
6811 		BT_DBG("queueing pending rx skb");
6812 		skb_queue_tail(&conn->pending_rx, skb);
6813 		return;
6814 	}
6815 
6816 	skb_pull(skb, L2CAP_HDR_SIZE);
6817 	cid = __le16_to_cpu(lh->cid);
6818 	len = __le16_to_cpu(lh->len);
6819 
6820 	if (len != skb->len) {
6821 		kfree_skb(skb);
6822 		return;
6823 	}
6824 
6825 	/* Since we can't actively block incoming LE connections we must
6826 	 * at least ensure that we ignore incoming data from them.
6827 	 */
6828 	if (hcon->type == LE_LINK &&
6829 	    hci_bdaddr_list_lookup(&hcon->hdev->reject_list, &hcon->dst,
6830 				   bdaddr_dst_type(hcon))) {
6831 		kfree_skb(skb);
6832 		return;
6833 	}
6834 
6835 	BT_DBG("len %d, cid 0x%4.4x", len, cid);
6836 
6837 	switch (cid) {
6838 	case L2CAP_CID_SIGNALING:
6839 		l2cap_sig_channel(conn, skb);
6840 		break;
6841 
6842 	case L2CAP_CID_CONN_LESS:
6843 		psm = get_unaligned((__le16 *) skb->data);
6844 		skb_pull(skb, L2CAP_PSMLEN_SIZE);
6845 		l2cap_conless_channel(conn, psm, skb);
6846 		break;
6847 
6848 	case L2CAP_CID_LE_SIGNALING:
6849 		l2cap_le_sig_channel(conn, skb);
6850 		break;
6851 
6852 	default:
6853 		l2cap_data_channel(conn, cid, skb);
6854 		break;
6855 	}
6856 }
6857 
process_pending_rx(struct work_struct * work)6858 static void process_pending_rx(struct work_struct *work)
6859 {
6860 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
6861 					       pending_rx_work);
6862 	struct sk_buff *skb;
6863 
6864 	BT_DBG("");
6865 
6866 	mutex_lock(&conn->lock);
6867 
6868 	while ((skb = skb_dequeue(&conn->pending_rx)))
6869 		l2cap_recv_frame(conn, skb);
6870 
6871 	mutex_unlock(&conn->lock);
6872 }
6873 
l2cap_conn_add(struct hci_conn * hcon)6874 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
6875 {
6876 	struct l2cap_conn *conn = hcon->l2cap_data;
6877 	struct hci_chan *hchan;
6878 
6879 	if (conn)
6880 		return conn;
6881 
6882 	hchan = hci_chan_create(hcon);
6883 	if (!hchan)
6884 		return NULL;
6885 
6886 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
6887 	if (!conn) {
6888 		hci_chan_del(hchan);
6889 		return NULL;
6890 	}
6891 
6892 	kref_init(&conn->ref);
6893 	hcon->l2cap_data = conn;
6894 	conn->hcon = hci_conn_get(hcon);
6895 	conn->hchan = hchan;
6896 
6897 	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
6898 
6899 	conn->mtu = hcon->mtu;
6900 	conn->feat_mask = 0;
6901 
6902 	conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
6903 
6904 	if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
6905 	    (bredr_sc_enabled(hcon->hdev) ||
6906 	     hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
6907 		conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
6908 
6909 	mutex_init(&conn->ident_lock);
6910 	mutex_init(&conn->lock);
6911 
6912 	INIT_LIST_HEAD(&conn->chan_l);
6913 	INIT_LIST_HEAD(&conn->users);
6914 
6915 	INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
6916 
6917 	skb_queue_head_init(&conn->pending_rx);
6918 	INIT_WORK(&conn->pending_rx_work, process_pending_rx);
6919 	INIT_DELAYED_WORK(&conn->id_addr_timer, l2cap_conn_update_id_addr);
6920 
6921 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
6922 
6923 	return conn;
6924 }
6925 
is_valid_psm(u16 psm,u8 dst_type)6926 static bool is_valid_psm(u16 psm, u8 dst_type)
6927 {
6928 	if (!psm)
6929 		return false;
6930 
6931 	if (bdaddr_type_is_le(dst_type))
6932 		return (psm <= 0x00ff);
6933 
6934 	/* PSM must be odd and lsb of upper byte must be 0 */
6935 	return ((psm & 0x0101) == 0x0001);
6936 }
6937 
6938 struct l2cap_chan_data {
6939 	struct l2cap_chan *chan;
6940 	struct pid *pid;
6941 	int count;
6942 };
6943 
l2cap_chan_by_pid(struct l2cap_chan * chan,void * data)6944 static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data)
6945 {
6946 	struct l2cap_chan_data *d = data;
6947 	struct pid *pid;
6948 
6949 	if (chan == d->chan)
6950 		return;
6951 
6952 	if (!test_bit(FLAG_DEFER_SETUP, &chan->flags))
6953 		return;
6954 
6955 	pid = chan->ops->get_peer_pid(chan);
6956 
6957 	/* Only count deferred channels with the same PID/PSM */
6958 	if (d->pid != pid || chan->psm != d->chan->psm || chan->ident ||
6959 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
6960 		return;
6961 
6962 	d->count++;
6963 }
6964 
l2cap_chan_connect(struct l2cap_chan * chan,__le16 psm,u16 cid,bdaddr_t * dst,u8 dst_type,u16 timeout)6965 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
6966 		       bdaddr_t *dst, u8 dst_type, u16 timeout)
6967 {
6968 	struct l2cap_conn *conn;
6969 	struct hci_conn *hcon;
6970 	struct hci_dev *hdev;
6971 	int err;
6972 
6973 	BT_DBG("%pMR -> %pMR (type %u) psm 0x%4.4x mode 0x%2.2x", &chan->src,
6974 	       dst, dst_type, __le16_to_cpu(psm), chan->mode);
6975 
6976 	hdev = hci_get_route(dst, &chan->src, chan->src_type);
6977 	if (!hdev)
6978 		return -EHOSTUNREACH;
6979 
6980 	hci_dev_lock(hdev);
6981 
6982 	if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
6983 	    chan->chan_type != L2CAP_CHAN_RAW) {
6984 		err = -EINVAL;
6985 		goto done;
6986 	}
6987 
6988 	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
6989 		err = -EINVAL;
6990 		goto done;
6991 	}
6992 
6993 	if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
6994 		err = -EINVAL;
6995 		goto done;
6996 	}
6997 
6998 	switch (chan->mode) {
6999 	case L2CAP_MODE_BASIC:
7000 		break;
7001 	case L2CAP_MODE_LE_FLOWCTL:
7002 		break;
7003 	case L2CAP_MODE_EXT_FLOWCTL:
7004 		if (!enable_ecred) {
7005 			err = -EOPNOTSUPP;
7006 			goto done;
7007 		}
7008 		break;
7009 	case L2CAP_MODE_ERTM:
7010 	case L2CAP_MODE_STREAMING:
7011 		if (!disable_ertm)
7012 			break;
7013 		fallthrough;
7014 	default:
7015 		err = -EOPNOTSUPP;
7016 		goto done;
7017 	}
7018 
7019 	switch (chan->state) {
7020 	case BT_CONNECT:
7021 	case BT_CONNECT2:
7022 	case BT_CONFIG:
7023 		/* Already connecting */
7024 		err = 0;
7025 		goto done;
7026 
7027 	case BT_CONNECTED:
7028 		/* Already connected */
7029 		err = -EISCONN;
7030 		goto done;
7031 
7032 	case BT_OPEN:
7033 	case BT_BOUND:
7034 		/* Can connect */
7035 		break;
7036 
7037 	default:
7038 		err = -EBADFD;
7039 		goto done;
7040 	}
7041 
7042 	/* Set destination address and psm */
7043 	bacpy(&chan->dst, dst);
7044 	chan->dst_type = dst_type;
7045 
7046 	chan->psm = psm;
7047 	chan->dcid = cid;
7048 
7049 	if (bdaddr_type_is_le(dst_type)) {
7050 		/* Convert from L2CAP channel address type to HCI address type
7051 		 */
7052 		if (dst_type == BDADDR_LE_PUBLIC)
7053 			dst_type = ADDR_LE_DEV_PUBLIC;
7054 		else
7055 			dst_type = ADDR_LE_DEV_RANDOM;
7056 
7057 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7058 			hcon = hci_connect_le(hdev, dst, dst_type, false,
7059 					      chan->sec_level, timeout,
7060 					      HCI_ROLE_SLAVE, 0, 0);
7061 		else
7062 			hcon = hci_connect_le_scan(hdev, dst, dst_type,
7063 						   chan->sec_level, timeout,
7064 						   CONN_REASON_L2CAP_CHAN);
7065 
7066 	} else {
7067 		u8 auth_type = l2cap_get_auth_type(chan);
7068 		hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type,
7069 				       CONN_REASON_L2CAP_CHAN, timeout);
7070 	}
7071 
7072 	if (IS_ERR(hcon)) {
7073 		err = PTR_ERR(hcon);
7074 		goto done;
7075 	}
7076 
7077 	conn = l2cap_conn_add(hcon);
7078 	if (!conn) {
7079 		hci_conn_drop(hcon);
7080 		err = -ENOMEM;
7081 		goto done;
7082 	}
7083 
7084 	if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) {
7085 		struct l2cap_chan_data data;
7086 
7087 		data.chan = chan;
7088 		data.pid = chan->ops->get_peer_pid(chan);
7089 		data.count = 1;
7090 
7091 		l2cap_chan_list(conn, l2cap_chan_by_pid, &data);
7092 
7093 		/* Check if there isn't too many channels being connected */
7094 		if (data.count > L2CAP_ECRED_CONN_SCID_MAX) {
7095 			hci_conn_drop(hcon);
7096 			err = -EPROTO;
7097 			goto done;
7098 		}
7099 	}
7100 
7101 	mutex_lock(&conn->lock);
7102 	l2cap_chan_lock(chan);
7103 
7104 	if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7105 		hci_conn_drop(hcon);
7106 		err = -EBUSY;
7107 		goto chan_unlock;
7108 	}
7109 
7110 	/* Update source addr of the socket */
7111 	bacpy(&chan->src, &hcon->src);
7112 	chan->src_type = bdaddr_src_type(hcon);
7113 
7114 	__l2cap_chan_add(conn, chan);
7115 
7116 	/* l2cap_chan_add takes its own ref so we can drop this one */
7117 	hci_conn_drop(hcon);
7118 
7119 	l2cap_state_change(chan, BT_CONNECT);
7120 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7121 
7122 	/* Release chan->sport so that it can be reused by other
7123 	 * sockets (as it's only used for listening sockets).
7124 	 */
7125 	write_lock(&chan_list_lock);
7126 	chan->sport = 0;
7127 	write_unlock(&chan_list_lock);
7128 
7129 	if (hcon->state == BT_CONNECTED) {
7130 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7131 			__clear_chan_timer(chan);
7132 			if (l2cap_chan_check_security(chan, true))
7133 				l2cap_state_change(chan, BT_CONNECTED);
7134 		} else
7135 			l2cap_do_start(chan);
7136 	}
7137 
7138 	err = 0;
7139 
7140 chan_unlock:
7141 	l2cap_chan_unlock(chan);
7142 	mutex_unlock(&conn->lock);
7143 done:
7144 	hci_dev_unlock(hdev);
7145 	hci_dev_put(hdev);
7146 	return err;
7147 }
7148 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7149 
l2cap_ecred_reconfigure(struct l2cap_chan * chan)7150 static void l2cap_ecred_reconfigure(struct l2cap_chan *chan)
7151 {
7152 	struct l2cap_conn *conn = chan->conn;
7153 	DEFINE_RAW_FLEX(struct l2cap_ecred_reconf_req, pdu, scid, 1);
7154 
7155 	pdu->mtu = cpu_to_le16(chan->imtu);
7156 	pdu->mps = cpu_to_le16(chan->mps);
7157 	pdu->scid[0] = cpu_to_le16(chan->scid);
7158 
7159 	chan->ident = l2cap_get_ident(conn);
7160 
7161 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_RECONF_REQ,
7162 		       sizeof(pdu), &pdu);
7163 }
7164 
l2cap_chan_reconfigure(struct l2cap_chan * chan,__u16 mtu)7165 int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu)
7166 {
7167 	if (chan->imtu > mtu)
7168 		return -EINVAL;
7169 
7170 	BT_DBG("chan %p mtu 0x%4.4x", chan, mtu);
7171 
7172 	chan->imtu = mtu;
7173 
7174 	l2cap_ecred_reconfigure(chan);
7175 
7176 	return 0;
7177 }
7178 
7179 /* ---- L2CAP interface with lower layer (HCI) ---- */
7180 
l2cap_connect_ind(struct hci_dev * hdev,bdaddr_t * bdaddr)7181 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7182 {
7183 	int exact = 0, lm1 = 0, lm2 = 0;
7184 	struct l2cap_chan *c;
7185 
7186 	BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7187 
7188 	/* Find listening sockets and check their link_mode */
7189 	read_lock(&chan_list_lock);
7190 	list_for_each_entry(c, &chan_list, global_l) {
7191 		if (c->state != BT_LISTEN)
7192 			continue;
7193 
7194 		if (!bacmp(&c->src, &hdev->bdaddr)) {
7195 			lm1 |= HCI_LM_ACCEPT;
7196 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7197 				lm1 |= HCI_LM_MASTER;
7198 			exact++;
7199 		} else if (!bacmp(&c->src, BDADDR_ANY)) {
7200 			lm2 |= HCI_LM_ACCEPT;
7201 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7202 				lm2 |= HCI_LM_MASTER;
7203 		}
7204 	}
7205 	read_unlock(&chan_list_lock);
7206 
7207 	return exact ? lm1 : lm2;
7208 }
7209 
7210 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7211  * from an existing channel in the list or from the beginning of the
7212  * global list (by passing NULL as first parameter).
7213  */
l2cap_global_fixed_chan(struct l2cap_chan * c,struct hci_conn * hcon)7214 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7215 						  struct hci_conn *hcon)
7216 {
7217 	u8 src_type = bdaddr_src_type(hcon);
7218 
7219 	read_lock(&chan_list_lock);
7220 
7221 	if (c)
7222 		c = list_next_entry(c, global_l);
7223 	else
7224 		c = list_entry(chan_list.next, typeof(*c), global_l);
7225 
7226 	list_for_each_entry_from(c, &chan_list, global_l) {
7227 		if (c->chan_type != L2CAP_CHAN_FIXED)
7228 			continue;
7229 		if (c->state != BT_LISTEN)
7230 			continue;
7231 		if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
7232 			continue;
7233 		if (src_type != c->src_type)
7234 			continue;
7235 
7236 		c = l2cap_chan_hold_unless_zero(c);
7237 		read_unlock(&chan_list_lock);
7238 		return c;
7239 	}
7240 
7241 	read_unlock(&chan_list_lock);
7242 
7243 	return NULL;
7244 }
7245 
l2cap_connect_cfm(struct hci_conn * hcon,u8 status)7246 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7247 {
7248 	struct hci_dev *hdev = hcon->hdev;
7249 	struct l2cap_conn *conn;
7250 	struct l2cap_chan *pchan;
7251 	u8 dst_type;
7252 
7253 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7254 		return;
7255 
7256 	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7257 
7258 	if (status) {
7259 		l2cap_conn_del(hcon, bt_to_errno(status));
7260 		return;
7261 	}
7262 
7263 	conn = l2cap_conn_add(hcon);
7264 	if (!conn)
7265 		return;
7266 
7267 	dst_type = bdaddr_dst_type(hcon);
7268 
7269 	/* If device is blocked, do not create channels for it */
7270 	if (hci_bdaddr_list_lookup(&hdev->reject_list, &hcon->dst, dst_type))
7271 		return;
7272 
7273 	/* Find fixed channels and notify them of the new connection. We
7274 	 * use multiple individual lookups, continuing each time where
7275 	 * we left off, because the list lock would prevent calling the
7276 	 * potentially sleeping l2cap_chan_lock() function.
7277 	 */
7278 	pchan = l2cap_global_fixed_chan(NULL, hcon);
7279 	while (pchan) {
7280 		struct l2cap_chan *chan, *next;
7281 
7282 		/* Client fixed channels should override server ones */
7283 		if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7284 			goto next;
7285 
7286 		l2cap_chan_lock(pchan);
7287 		chan = pchan->ops->new_connection(pchan);
7288 		if (chan) {
7289 			bacpy(&chan->src, &hcon->src);
7290 			bacpy(&chan->dst, &hcon->dst);
7291 			chan->src_type = bdaddr_src_type(hcon);
7292 			chan->dst_type = dst_type;
7293 
7294 			__l2cap_chan_add(conn, chan);
7295 		}
7296 
7297 		l2cap_chan_unlock(pchan);
7298 next:
7299 		next = l2cap_global_fixed_chan(pchan, hcon);
7300 		l2cap_chan_put(pchan);
7301 		pchan = next;
7302 	}
7303 
7304 	l2cap_conn_ready(conn);
7305 }
7306 
l2cap_disconn_ind(struct hci_conn * hcon)7307 int l2cap_disconn_ind(struct hci_conn *hcon)
7308 {
7309 	struct l2cap_conn *conn = hcon->l2cap_data;
7310 
7311 	BT_DBG("hcon %p", hcon);
7312 
7313 	if (!conn)
7314 		return HCI_ERROR_REMOTE_USER_TERM;
7315 	return conn->disc_reason;
7316 }
7317 
l2cap_disconn_cfm(struct hci_conn * hcon,u8 reason)7318 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7319 {
7320 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7321 		return;
7322 
7323 	BT_DBG("hcon %p reason %d", hcon, reason);
7324 
7325 	l2cap_conn_del(hcon, bt_to_errno(reason));
7326 }
7327 
l2cap_check_encryption(struct l2cap_chan * chan,u8 encrypt)7328 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7329 {
7330 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7331 		return;
7332 
7333 	if (encrypt == 0x00) {
7334 		if (chan->sec_level == BT_SECURITY_MEDIUM) {
7335 			__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7336 		} else if (chan->sec_level == BT_SECURITY_HIGH ||
7337 			   chan->sec_level == BT_SECURITY_FIPS)
7338 			l2cap_chan_close(chan, ECONNREFUSED);
7339 	} else {
7340 		if (chan->sec_level == BT_SECURITY_MEDIUM)
7341 			__clear_chan_timer(chan);
7342 	}
7343 }
7344 
l2cap_security_cfm(struct hci_conn * hcon,u8 status,u8 encrypt)7345 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7346 {
7347 	struct l2cap_conn *conn = hcon->l2cap_data;
7348 	struct l2cap_chan *chan;
7349 
7350 	if (!conn)
7351 		return;
7352 
7353 	BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7354 
7355 	mutex_lock(&conn->lock);
7356 
7357 	list_for_each_entry(chan, &conn->chan_l, list) {
7358 		l2cap_chan_lock(chan);
7359 
7360 		BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7361 		       state_to_string(chan->state));
7362 
7363 		if (!status && encrypt)
7364 			chan->sec_level = hcon->sec_level;
7365 
7366 		if (!__l2cap_no_conn_pending(chan)) {
7367 			l2cap_chan_unlock(chan);
7368 			continue;
7369 		}
7370 
7371 		if (!status && (chan->state == BT_CONNECTED ||
7372 				chan->state == BT_CONFIG)) {
7373 			chan->ops->resume(chan);
7374 			l2cap_check_encryption(chan, encrypt);
7375 			l2cap_chan_unlock(chan);
7376 			continue;
7377 		}
7378 
7379 		if (chan->state == BT_CONNECT) {
7380 			if (!status && l2cap_check_enc_key_size(hcon, chan))
7381 				l2cap_start_connection(chan);
7382 			else
7383 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7384 		} else if (chan->state == BT_CONNECT2 &&
7385 			   !(chan->mode == L2CAP_MODE_EXT_FLOWCTL ||
7386 			     chan->mode == L2CAP_MODE_LE_FLOWCTL)) {
7387 			struct l2cap_conn_rsp rsp;
7388 			__u16 res, stat;
7389 
7390 			if (!status && l2cap_check_enc_key_size(hcon, chan)) {
7391 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7392 					res = L2CAP_CR_PEND;
7393 					stat = L2CAP_CS_AUTHOR_PEND;
7394 					chan->ops->defer(chan);
7395 				} else {
7396 					l2cap_state_change(chan, BT_CONFIG);
7397 					res = L2CAP_CR_SUCCESS;
7398 					stat = L2CAP_CS_NO_INFO;
7399 				}
7400 			} else {
7401 				l2cap_state_change(chan, BT_DISCONN);
7402 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7403 				res = L2CAP_CR_SEC_BLOCK;
7404 				stat = L2CAP_CS_NO_INFO;
7405 			}
7406 
7407 			rsp.scid   = cpu_to_le16(chan->dcid);
7408 			rsp.dcid   = cpu_to_le16(chan->scid);
7409 			rsp.result = cpu_to_le16(res);
7410 			rsp.status = cpu_to_le16(stat);
7411 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7412 				       sizeof(rsp), &rsp);
7413 
7414 			if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7415 			    res == L2CAP_CR_SUCCESS) {
7416 				char buf[128];
7417 				set_bit(CONF_REQ_SENT, &chan->conf_state);
7418 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
7419 					       L2CAP_CONF_REQ,
7420 					       l2cap_build_conf_req(chan, buf, sizeof(buf)),
7421 					       buf);
7422 				chan->num_conf_req++;
7423 			}
7424 		}
7425 
7426 		l2cap_chan_unlock(chan);
7427 	}
7428 
7429 	mutex_unlock(&conn->lock);
7430 }
7431 
7432 /* Append fragment into frame respecting the maximum len of rx_skb */
l2cap_recv_frag(struct l2cap_conn * conn,struct sk_buff * skb,u16 len)7433 static int l2cap_recv_frag(struct l2cap_conn *conn, struct sk_buff *skb,
7434 			   u16 len)
7435 {
7436 	if (!conn->rx_skb) {
7437 		/* Allocate skb for the complete frame (with header) */
7438 		conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7439 		if (!conn->rx_skb)
7440 			return -ENOMEM;
7441 		/* Init rx_len */
7442 		conn->rx_len = len;
7443 
7444 		skb_set_delivery_time(conn->rx_skb, skb->tstamp,
7445 				      skb->tstamp_type);
7446 	}
7447 
7448 	/* Copy as much as the rx_skb can hold */
7449 	len = min_t(u16, len, skb->len);
7450 	skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, len), len);
7451 	skb_pull(skb, len);
7452 	conn->rx_len -= len;
7453 
7454 	return len;
7455 }
7456 
l2cap_recv_len(struct l2cap_conn * conn,struct sk_buff * skb)7457 static int l2cap_recv_len(struct l2cap_conn *conn, struct sk_buff *skb)
7458 {
7459 	struct sk_buff *rx_skb;
7460 	int len;
7461 
7462 	/* Append just enough to complete the header */
7463 	len = l2cap_recv_frag(conn, skb, L2CAP_LEN_SIZE - conn->rx_skb->len);
7464 
7465 	/* If header could not be read just continue */
7466 	if (len < 0 || conn->rx_skb->len < L2CAP_LEN_SIZE)
7467 		return len;
7468 
7469 	rx_skb = conn->rx_skb;
7470 	len = get_unaligned_le16(rx_skb->data);
7471 
7472 	/* Check if rx_skb has enough space to received all fragments */
7473 	if (len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE) <= skb_tailroom(rx_skb)) {
7474 		/* Update expected len */
7475 		conn->rx_len = len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE);
7476 		return L2CAP_LEN_SIZE;
7477 	}
7478 
7479 	/* Reset conn->rx_skb since it will need to be reallocated in order to
7480 	 * fit all fragments.
7481 	 */
7482 	conn->rx_skb = NULL;
7483 
7484 	/* Reallocates rx_skb using the exact expected length */
7485 	len = l2cap_recv_frag(conn, rx_skb,
7486 			      len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE));
7487 	kfree_skb(rx_skb);
7488 
7489 	return len;
7490 }
7491 
l2cap_recv_reset(struct l2cap_conn * conn)7492 static void l2cap_recv_reset(struct l2cap_conn *conn)
7493 {
7494 	kfree_skb(conn->rx_skb);
7495 	conn->rx_skb = NULL;
7496 	conn->rx_len = 0;
7497 }
7498 
l2cap_conn_hold_unless_zero(struct l2cap_conn * c)7499 struct l2cap_conn *l2cap_conn_hold_unless_zero(struct l2cap_conn *c)
7500 {
7501 	if (!c)
7502 		return NULL;
7503 
7504 	BT_DBG("conn %p orig refcnt %u", c, kref_read(&c->ref));
7505 
7506 	if (!kref_get_unless_zero(&c->ref))
7507 		return NULL;
7508 
7509 	return c;
7510 }
7511 
l2cap_recv_acldata(struct hci_conn * hcon,struct sk_buff * skb,u16 flags)7512 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7513 {
7514 	struct l2cap_conn *conn;
7515 	int len;
7516 
7517 	/* Lock hdev to access l2cap_data to avoid race with l2cap_conn_del */
7518 	hci_dev_lock(hcon->hdev);
7519 
7520 	conn = hcon->l2cap_data;
7521 
7522 	if (!conn)
7523 		conn = l2cap_conn_add(hcon);
7524 
7525 	conn = l2cap_conn_hold_unless_zero(conn);
7526 
7527 	hci_dev_unlock(hcon->hdev);
7528 
7529 	if (!conn) {
7530 		kfree_skb(skb);
7531 		return;
7532 	}
7533 
7534 	BT_DBG("conn %p len %u flags 0x%x", conn, skb->len, flags);
7535 
7536 	mutex_lock(&conn->lock);
7537 
7538 	switch (flags) {
7539 	case ACL_START:
7540 	case ACL_START_NO_FLUSH:
7541 	case ACL_COMPLETE:
7542 		if (conn->rx_skb) {
7543 			BT_ERR("Unexpected start frame (len %d)", skb->len);
7544 			l2cap_recv_reset(conn);
7545 			l2cap_conn_unreliable(conn, ECOMM);
7546 		}
7547 
7548 		/* Start fragment may not contain the L2CAP length so just
7549 		 * copy the initial byte when that happens and use conn->mtu as
7550 		 * expected length.
7551 		 */
7552 		if (skb->len < L2CAP_LEN_SIZE) {
7553 			l2cap_recv_frag(conn, skb, conn->mtu);
7554 			break;
7555 		}
7556 
7557 		len = get_unaligned_le16(skb->data) + L2CAP_HDR_SIZE;
7558 
7559 		if (len == skb->len) {
7560 			/* Complete frame received */
7561 			l2cap_recv_frame(conn, skb);
7562 			goto unlock;
7563 		}
7564 
7565 		BT_DBG("Start: total len %d, frag len %u", len, skb->len);
7566 
7567 		if (skb->len > len) {
7568 			BT_ERR("Frame is too long (len %u, expected len %d)",
7569 			       skb->len, len);
7570 			/* PTS test cases L2CAP/COS/CED/BI-14-C and BI-15-C
7571 			 * (Multiple Signaling Command in one PDU, Data
7572 			 * Truncated, BR/EDR) send a C-frame to the IUT with
7573 			 * PDU Length set to 8 and Channel ID set to the
7574 			 * correct signaling channel for the logical link.
7575 			 * The Information payload contains one L2CAP_ECHO_REQ
7576 			 * packet with Data Length set to 0 with 0 octets of
7577 			 * echo data and one invalid command packet due to
7578 			 * data truncated in PDU but present in HCI packet.
7579 			 *
7580 			 * Shorter the socket buffer to the PDU length to
7581 			 * allow to process valid commands from the PDU before
7582 			 * setting the socket unreliable.
7583 			 */
7584 			skb->len = len;
7585 			l2cap_recv_frame(conn, skb);
7586 			l2cap_conn_unreliable(conn, ECOMM);
7587 			goto unlock;
7588 		}
7589 
7590 		/* Append fragment into frame (with header) */
7591 		if (l2cap_recv_frag(conn, skb, len) < 0)
7592 			goto drop;
7593 
7594 		break;
7595 
7596 	case ACL_CONT:
7597 		BT_DBG("Cont: frag len %u (expecting %u)", skb->len, conn->rx_len);
7598 
7599 		if (!conn->rx_skb) {
7600 			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7601 			l2cap_conn_unreliable(conn, ECOMM);
7602 			goto drop;
7603 		}
7604 
7605 		/* Complete the L2CAP length if it has not been read */
7606 		if (conn->rx_skb->len < L2CAP_LEN_SIZE) {
7607 			if (l2cap_recv_len(conn, skb) < 0) {
7608 				l2cap_conn_unreliable(conn, ECOMM);
7609 				goto drop;
7610 			}
7611 
7612 			/* Header still could not be read just continue */
7613 			if (conn->rx_skb->len < L2CAP_LEN_SIZE)
7614 				break;
7615 		}
7616 
7617 		if (skb->len > conn->rx_len) {
7618 			BT_ERR("Fragment is too long (len %u, expected %u)",
7619 			       skb->len, conn->rx_len);
7620 			l2cap_recv_reset(conn);
7621 			l2cap_conn_unreliable(conn, ECOMM);
7622 			goto drop;
7623 		}
7624 
7625 		/* Append fragment into frame (with header) */
7626 		l2cap_recv_frag(conn, skb, skb->len);
7627 
7628 		if (!conn->rx_len) {
7629 			/* Complete frame received. l2cap_recv_frame
7630 			 * takes ownership of the skb so set the global
7631 			 * rx_skb pointer to NULL first.
7632 			 */
7633 			struct sk_buff *rx_skb = conn->rx_skb;
7634 			conn->rx_skb = NULL;
7635 			l2cap_recv_frame(conn, rx_skb);
7636 		}
7637 		break;
7638 	}
7639 
7640 drop:
7641 	kfree_skb(skb);
7642 unlock:
7643 	mutex_unlock(&conn->lock);
7644 	l2cap_conn_put(conn);
7645 }
7646 
7647 static struct hci_cb l2cap_cb = {
7648 	.name		= "L2CAP",
7649 	.connect_cfm	= l2cap_connect_cfm,
7650 	.disconn_cfm	= l2cap_disconn_cfm,
7651 	.security_cfm	= l2cap_security_cfm,
7652 };
7653 
l2cap_debugfs_show(struct seq_file * f,void * p)7654 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7655 {
7656 	struct l2cap_chan *c;
7657 
7658 	read_lock(&chan_list_lock);
7659 
7660 	list_for_each_entry(c, &chan_list, global_l) {
7661 		seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7662 			   &c->src, c->src_type, &c->dst, c->dst_type,
7663 			   c->state, __le16_to_cpu(c->psm),
7664 			   c->scid, c->dcid, c->imtu, c->omtu,
7665 			   c->sec_level, c->mode);
7666 	}
7667 
7668 	read_unlock(&chan_list_lock);
7669 
7670 	return 0;
7671 }
7672 
7673 DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs);
7674 
7675 static struct dentry *l2cap_debugfs;
7676 
l2cap_init(void)7677 int __init l2cap_init(void)
7678 {
7679 	int err;
7680 
7681 	err = l2cap_init_sockets();
7682 	if (err < 0)
7683 		return err;
7684 
7685 	hci_register_cb(&l2cap_cb);
7686 
7687 	if (IS_ERR_OR_NULL(bt_debugfs))
7688 		return 0;
7689 
7690 	l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7691 					    NULL, &l2cap_debugfs_fops);
7692 
7693 	return 0;
7694 }
7695 
l2cap_exit(void)7696 void l2cap_exit(void)
7697 {
7698 	debugfs_remove(l2cap_debugfs);
7699 	hci_unregister_cb(&l2cap_cb);
7700 	l2cap_cleanup_sockets();
7701 }
7702 
7703 module_param(disable_ertm, bool, 0644);
7704 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
7705 
7706 module_param(enable_ecred, bool, 0644);
7707 MODULE_PARM_DESC(enable_ecred, "Enable enhanced credit flow control mode");
7708