xref: /linux/net/bluetooth/l2cap_core.c (revision 4003c9e78778e93188a09d6043a74f7154449d43)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5    Copyright (C) 2010 Google Inc.
6    Copyright (C) 2011 ProFUSION Embedded Systems
7    Copyright (c) 2012 Code Aurora Forum.  All rights reserved.
8 
9    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License version 2 as
13    published by the Free Software Foundation;
14 
15    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 
24    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26    SOFTWARE IS DISCLAIMED.
27 */
28 
29 /* Bluetooth L2CAP core. */
30 
31 #include <linux/module.h>
32 
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
36 
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 
41 #include "smp.h"
42 
43 #define LE_FLOWCTL_MAX_CREDITS 65535
44 
45 bool disable_ertm;
46 bool enable_ecred = IS_ENABLED(CONFIG_BT_LE_L2CAP_ECRED);
47 
48 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
49 
50 static LIST_HEAD(chan_list);
51 static DEFINE_RWLOCK(chan_list_lock);
52 
53 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
54 				       u8 code, u8 ident, u16 dlen, void *data);
55 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
56 			   void *data);
57 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
58 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
59 
60 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
61 		     struct sk_buff_head *skbs, u8 event);
62 static void l2cap_retrans_timeout(struct work_struct *work);
63 static void l2cap_monitor_timeout(struct work_struct *work);
64 static void l2cap_ack_timeout(struct work_struct *work);
65 
bdaddr_type(u8 link_type,u8 bdaddr_type)66 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
67 {
68 	if (link_type == LE_LINK) {
69 		if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
70 			return BDADDR_LE_PUBLIC;
71 		else
72 			return BDADDR_LE_RANDOM;
73 	}
74 
75 	return BDADDR_BREDR;
76 }
77 
bdaddr_src_type(struct hci_conn * hcon)78 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
79 {
80 	return bdaddr_type(hcon->type, hcon->src_type);
81 }
82 
bdaddr_dst_type(struct hci_conn * hcon)83 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
84 {
85 	return bdaddr_type(hcon->type, hcon->dst_type);
86 }
87 
88 /* ---- L2CAP channels ---- */
89 
__l2cap_get_chan_by_dcid(struct l2cap_conn * conn,u16 cid)90 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
91 						   u16 cid)
92 {
93 	struct l2cap_chan *c;
94 
95 	list_for_each_entry(c, &conn->chan_l, list) {
96 		if (c->dcid == cid)
97 			return c;
98 	}
99 	return NULL;
100 }
101 
__l2cap_get_chan_by_scid(struct l2cap_conn * conn,u16 cid)102 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
103 						   u16 cid)
104 {
105 	struct l2cap_chan *c;
106 
107 	list_for_each_entry(c, &conn->chan_l, list) {
108 		if (c->scid == cid)
109 			return c;
110 	}
111 	return NULL;
112 }
113 
114 /* Find channel with given SCID.
115  * Returns a reference locked channel.
116  */
l2cap_get_chan_by_scid(struct l2cap_conn * conn,u16 cid)117 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
118 						 u16 cid)
119 {
120 	struct l2cap_chan *c;
121 
122 	c = __l2cap_get_chan_by_scid(conn, cid);
123 	if (c) {
124 		/* Only lock if chan reference is not 0 */
125 		c = l2cap_chan_hold_unless_zero(c);
126 		if (c)
127 			l2cap_chan_lock(c);
128 	}
129 
130 	return c;
131 }
132 
133 /* Find channel with given DCID.
134  * Returns a reference locked channel.
135  */
l2cap_get_chan_by_dcid(struct l2cap_conn * conn,u16 cid)136 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
137 						 u16 cid)
138 {
139 	struct l2cap_chan *c;
140 
141 	c = __l2cap_get_chan_by_dcid(conn, cid);
142 	if (c) {
143 		/* Only lock if chan reference is not 0 */
144 		c = l2cap_chan_hold_unless_zero(c);
145 		if (c)
146 			l2cap_chan_lock(c);
147 	}
148 
149 	return c;
150 }
151 
__l2cap_get_chan_by_ident(struct l2cap_conn * conn,u8 ident)152 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
153 						    u8 ident)
154 {
155 	struct l2cap_chan *c;
156 
157 	list_for_each_entry(c, &conn->chan_l, list) {
158 		if (c->ident == ident)
159 			return c;
160 	}
161 	return NULL;
162 }
163 
__l2cap_global_chan_by_addr(__le16 psm,bdaddr_t * src,u8 src_type)164 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src,
165 						      u8 src_type)
166 {
167 	struct l2cap_chan *c;
168 
169 	list_for_each_entry(c, &chan_list, global_l) {
170 		if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR)
171 			continue;
172 
173 		if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR)
174 			continue;
175 
176 		if (c->sport == psm && !bacmp(&c->src, src))
177 			return c;
178 	}
179 	return NULL;
180 }
181 
l2cap_add_psm(struct l2cap_chan * chan,bdaddr_t * src,__le16 psm)182 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
183 {
184 	int err;
185 
186 	write_lock(&chan_list_lock);
187 
188 	if (psm && __l2cap_global_chan_by_addr(psm, src, chan->src_type)) {
189 		err = -EADDRINUSE;
190 		goto done;
191 	}
192 
193 	if (psm) {
194 		chan->psm = psm;
195 		chan->sport = psm;
196 		err = 0;
197 	} else {
198 		u16 p, start, end, incr;
199 
200 		if (chan->src_type == BDADDR_BREDR) {
201 			start = L2CAP_PSM_DYN_START;
202 			end = L2CAP_PSM_AUTO_END;
203 			incr = 2;
204 		} else {
205 			start = L2CAP_PSM_LE_DYN_START;
206 			end = L2CAP_PSM_LE_DYN_END;
207 			incr = 1;
208 		}
209 
210 		err = -EINVAL;
211 		for (p = start; p <= end; p += incr)
212 			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src,
213 							 chan->src_type)) {
214 				chan->psm   = cpu_to_le16(p);
215 				chan->sport = cpu_to_le16(p);
216 				err = 0;
217 				break;
218 			}
219 	}
220 
221 done:
222 	write_unlock(&chan_list_lock);
223 	return err;
224 }
225 EXPORT_SYMBOL_GPL(l2cap_add_psm);
226 
l2cap_add_scid(struct l2cap_chan * chan,__u16 scid)227 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
228 {
229 	write_lock(&chan_list_lock);
230 
231 	/* Override the defaults (which are for conn-oriented) */
232 	chan->omtu = L2CAP_DEFAULT_MTU;
233 	chan->chan_type = L2CAP_CHAN_FIXED;
234 
235 	chan->scid = scid;
236 
237 	write_unlock(&chan_list_lock);
238 
239 	return 0;
240 }
241 
l2cap_alloc_cid(struct l2cap_conn * conn)242 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
243 {
244 	u16 cid, dyn_end;
245 
246 	if (conn->hcon->type == LE_LINK)
247 		dyn_end = L2CAP_CID_LE_DYN_END;
248 	else
249 		dyn_end = L2CAP_CID_DYN_END;
250 
251 	for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
252 		if (!__l2cap_get_chan_by_scid(conn, cid))
253 			return cid;
254 	}
255 
256 	return 0;
257 }
258 
l2cap_state_change(struct l2cap_chan * chan,int state)259 static void l2cap_state_change(struct l2cap_chan *chan, int state)
260 {
261 	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
262 	       state_to_string(state));
263 
264 	chan->state = state;
265 	chan->ops->state_change(chan, state, 0);
266 }
267 
l2cap_state_change_and_error(struct l2cap_chan * chan,int state,int err)268 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
269 						int state, int err)
270 {
271 	chan->state = state;
272 	chan->ops->state_change(chan, chan->state, err);
273 }
274 
l2cap_chan_set_err(struct l2cap_chan * chan,int err)275 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
276 {
277 	chan->ops->state_change(chan, chan->state, err);
278 }
279 
__set_retrans_timer(struct l2cap_chan * chan)280 static void __set_retrans_timer(struct l2cap_chan *chan)
281 {
282 	if (!delayed_work_pending(&chan->monitor_timer) &&
283 	    chan->retrans_timeout) {
284 		l2cap_set_timer(chan, &chan->retrans_timer,
285 				msecs_to_jiffies(chan->retrans_timeout));
286 	}
287 }
288 
__set_monitor_timer(struct l2cap_chan * chan)289 static void __set_monitor_timer(struct l2cap_chan *chan)
290 {
291 	__clear_retrans_timer(chan);
292 	if (chan->monitor_timeout) {
293 		l2cap_set_timer(chan, &chan->monitor_timer,
294 				msecs_to_jiffies(chan->monitor_timeout));
295 	}
296 }
297 
l2cap_ertm_seq_in_queue(struct sk_buff_head * head,u16 seq)298 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
299 					       u16 seq)
300 {
301 	struct sk_buff *skb;
302 
303 	skb_queue_walk(head, skb) {
304 		if (bt_cb(skb)->l2cap.txseq == seq)
305 			return skb;
306 	}
307 
308 	return NULL;
309 }
310 
311 /* ---- L2CAP sequence number lists ---- */
312 
313 /* For ERTM, ordered lists of sequence numbers must be tracked for
314  * SREJ requests that are received and for frames that are to be
315  * retransmitted. These seq_list functions implement a singly-linked
316  * list in an array, where membership in the list can also be checked
317  * in constant time. Items can also be added to the tail of the list
318  * and removed from the head in constant time, without further memory
319  * allocs or frees.
320  */
321 
l2cap_seq_list_init(struct l2cap_seq_list * seq_list,u16 size)322 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
323 {
324 	size_t alloc_size, i;
325 
326 	/* Allocated size is a power of 2 to map sequence numbers
327 	 * (which may be up to 14 bits) in to a smaller array that is
328 	 * sized for the negotiated ERTM transmit windows.
329 	 */
330 	alloc_size = roundup_pow_of_two(size);
331 
332 	seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
333 	if (!seq_list->list)
334 		return -ENOMEM;
335 
336 	seq_list->mask = alloc_size - 1;
337 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
338 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
339 	for (i = 0; i < alloc_size; i++)
340 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
341 
342 	return 0;
343 }
344 
l2cap_seq_list_free(struct l2cap_seq_list * seq_list)345 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
346 {
347 	kfree(seq_list->list);
348 }
349 
l2cap_seq_list_contains(struct l2cap_seq_list * seq_list,u16 seq)350 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
351 					   u16 seq)
352 {
353 	/* Constant-time check for list membership */
354 	return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
355 }
356 
l2cap_seq_list_pop(struct l2cap_seq_list * seq_list)357 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
358 {
359 	u16 seq = seq_list->head;
360 	u16 mask = seq_list->mask;
361 
362 	seq_list->head = seq_list->list[seq & mask];
363 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
364 
365 	if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
366 		seq_list->head = L2CAP_SEQ_LIST_CLEAR;
367 		seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
368 	}
369 
370 	return seq;
371 }
372 
l2cap_seq_list_clear(struct l2cap_seq_list * seq_list)373 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
374 {
375 	u16 i;
376 
377 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
378 		return;
379 
380 	for (i = 0; i <= seq_list->mask; i++)
381 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
382 
383 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
384 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
385 }
386 
l2cap_seq_list_append(struct l2cap_seq_list * seq_list,u16 seq)387 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
388 {
389 	u16 mask = seq_list->mask;
390 
391 	/* All appends happen in constant time */
392 
393 	if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
394 		return;
395 
396 	if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
397 		seq_list->head = seq;
398 	else
399 		seq_list->list[seq_list->tail & mask] = seq;
400 
401 	seq_list->tail = seq;
402 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
403 }
404 
l2cap_chan_timeout(struct work_struct * work)405 static void l2cap_chan_timeout(struct work_struct *work)
406 {
407 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
408 					       chan_timer.work);
409 	struct l2cap_conn *conn = chan->conn;
410 	int reason;
411 
412 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
413 
414 	if (!conn)
415 		return;
416 
417 	mutex_lock(&conn->lock);
418 	/* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
419 	 * this work. No need to call l2cap_chan_hold(chan) here again.
420 	 */
421 	l2cap_chan_lock(chan);
422 
423 	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
424 		reason = ECONNREFUSED;
425 	else if (chan->state == BT_CONNECT &&
426 		 chan->sec_level != BT_SECURITY_SDP)
427 		reason = ECONNREFUSED;
428 	else
429 		reason = ETIMEDOUT;
430 
431 	l2cap_chan_close(chan, reason);
432 
433 	chan->ops->close(chan);
434 
435 	l2cap_chan_unlock(chan);
436 	l2cap_chan_put(chan);
437 
438 	mutex_unlock(&conn->lock);
439 }
440 
l2cap_chan_create(void)441 struct l2cap_chan *l2cap_chan_create(void)
442 {
443 	struct l2cap_chan *chan;
444 
445 	chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
446 	if (!chan)
447 		return NULL;
448 
449 	skb_queue_head_init(&chan->tx_q);
450 	skb_queue_head_init(&chan->srej_q);
451 	mutex_init(&chan->lock);
452 
453 	/* Set default lock nesting level */
454 	atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
455 
456 	/* Available receive buffer space is initially unknown */
457 	chan->rx_avail = -1;
458 
459 	write_lock(&chan_list_lock);
460 	list_add(&chan->global_l, &chan_list);
461 	write_unlock(&chan_list_lock);
462 
463 	INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
464 	INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
465 	INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
466 	INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
467 
468 	chan->state = BT_OPEN;
469 
470 	kref_init(&chan->kref);
471 
472 	/* This flag is cleared in l2cap_chan_ready() */
473 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
474 
475 	BT_DBG("chan %p", chan);
476 
477 	return chan;
478 }
479 EXPORT_SYMBOL_GPL(l2cap_chan_create);
480 
l2cap_chan_destroy(struct kref * kref)481 static void l2cap_chan_destroy(struct kref *kref)
482 {
483 	struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
484 
485 	BT_DBG("chan %p", chan);
486 
487 	write_lock(&chan_list_lock);
488 	list_del(&chan->global_l);
489 	write_unlock(&chan_list_lock);
490 
491 	kfree(chan);
492 }
493 
l2cap_chan_hold(struct l2cap_chan * c)494 void l2cap_chan_hold(struct l2cap_chan *c)
495 {
496 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
497 
498 	kref_get(&c->kref);
499 }
500 
l2cap_chan_hold_unless_zero(struct l2cap_chan * c)501 struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c)
502 {
503 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
504 
505 	if (!kref_get_unless_zero(&c->kref))
506 		return NULL;
507 
508 	return c;
509 }
510 
l2cap_chan_put(struct l2cap_chan * c)511 void l2cap_chan_put(struct l2cap_chan *c)
512 {
513 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
514 
515 	kref_put(&c->kref, l2cap_chan_destroy);
516 }
517 EXPORT_SYMBOL_GPL(l2cap_chan_put);
518 
l2cap_chan_set_defaults(struct l2cap_chan * chan)519 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
520 {
521 	chan->fcs  = L2CAP_FCS_CRC16;
522 	chan->max_tx = L2CAP_DEFAULT_MAX_TX;
523 	chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
524 	chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
525 	chan->remote_max_tx = chan->max_tx;
526 	chan->remote_tx_win = chan->tx_win;
527 	chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
528 	chan->sec_level = BT_SECURITY_LOW;
529 	chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
530 	chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
531 	chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
532 
533 	chan->conf_state = 0;
534 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
535 
536 	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
537 }
538 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
539 
l2cap_le_rx_credits(struct l2cap_chan * chan)540 static __u16 l2cap_le_rx_credits(struct l2cap_chan *chan)
541 {
542 	size_t sdu_len = chan->sdu ? chan->sdu->len : 0;
543 
544 	if (chan->mps == 0)
545 		return 0;
546 
547 	/* If we don't know the available space in the receiver buffer, give
548 	 * enough credits for a full packet.
549 	 */
550 	if (chan->rx_avail == -1)
551 		return (chan->imtu / chan->mps) + 1;
552 
553 	/* If we know how much space is available in the receive buffer, give
554 	 * out as many credits as would fill the buffer.
555 	 */
556 	if (chan->rx_avail <= sdu_len)
557 		return 0;
558 
559 	return DIV_ROUND_UP(chan->rx_avail - sdu_len, chan->mps);
560 }
561 
l2cap_le_flowctl_init(struct l2cap_chan * chan,u16 tx_credits)562 static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
563 {
564 	chan->sdu = NULL;
565 	chan->sdu_last_frag = NULL;
566 	chan->sdu_len = 0;
567 	chan->tx_credits = tx_credits;
568 	/* Derive MPS from connection MTU to stop HCI fragmentation */
569 	chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
570 	chan->rx_credits = l2cap_le_rx_credits(chan);
571 
572 	skb_queue_head_init(&chan->tx_q);
573 }
574 
l2cap_ecred_init(struct l2cap_chan * chan,u16 tx_credits)575 static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits)
576 {
577 	l2cap_le_flowctl_init(chan, tx_credits);
578 
579 	/* L2CAP implementations shall support a minimum MPS of 64 octets */
580 	if (chan->mps < L2CAP_ECRED_MIN_MPS) {
581 		chan->mps = L2CAP_ECRED_MIN_MPS;
582 		chan->rx_credits = l2cap_le_rx_credits(chan);
583 	}
584 }
585 
__l2cap_chan_add(struct l2cap_conn * conn,struct l2cap_chan * chan)586 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
587 {
588 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
589 	       __le16_to_cpu(chan->psm), chan->dcid);
590 
591 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
592 
593 	chan->conn = conn;
594 
595 	switch (chan->chan_type) {
596 	case L2CAP_CHAN_CONN_ORIENTED:
597 		/* Alloc CID for connection-oriented socket */
598 		chan->scid = l2cap_alloc_cid(conn);
599 		if (conn->hcon->type == ACL_LINK)
600 			chan->omtu = L2CAP_DEFAULT_MTU;
601 		break;
602 
603 	case L2CAP_CHAN_CONN_LESS:
604 		/* Connectionless socket */
605 		chan->scid = L2CAP_CID_CONN_LESS;
606 		chan->dcid = L2CAP_CID_CONN_LESS;
607 		chan->omtu = L2CAP_DEFAULT_MTU;
608 		break;
609 
610 	case L2CAP_CHAN_FIXED:
611 		/* Caller will set CID and CID specific MTU values */
612 		break;
613 
614 	default:
615 		/* Raw socket can send/recv signalling messages only */
616 		chan->scid = L2CAP_CID_SIGNALING;
617 		chan->dcid = L2CAP_CID_SIGNALING;
618 		chan->omtu = L2CAP_DEFAULT_MTU;
619 	}
620 
621 	chan->local_id		= L2CAP_BESTEFFORT_ID;
622 	chan->local_stype	= L2CAP_SERV_BESTEFFORT;
623 	chan->local_msdu	= L2CAP_DEFAULT_MAX_SDU_SIZE;
624 	chan->local_sdu_itime	= L2CAP_DEFAULT_SDU_ITIME;
625 	chan->local_acc_lat	= L2CAP_DEFAULT_ACC_LAT;
626 	chan->local_flush_to	= L2CAP_EFS_DEFAULT_FLUSH_TO;
627 
628 	l2cap_chan_hold(chan);
629 
630 	/* Only keep a reference for fixed channels if they requested it */
631 	if (chan->chan_type != L2CAP_CHAN_FIXED ||
632 	    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
633 		hci_conn_hold(conn->hcon);
634 
635 	/* Append to the list since the order matters for ECRED */
636 	list_add_tail(&chan->list, &conn->chan_l);
637 }
638 
l2cap_chan_add(struct l2cap_conn * conn,struct l2cap_chan * chan)639 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
640 {
641 	mutex_lock(&conn->lock);
642 	__l2cap_chan_add(conn, chan);
643 	mutex_unlock(&conn->lock);
644 }
645 
l2cap_chan_del(struct l2cap_chan * chan,int err)646 void l2cap_chan_del(struct l2cap_chan *chan, int err)
647 {
648 	struct l2cap_conn *conn = chan->conn;
649 
650 	__clear_chan_timer(chan);
651 
652 	BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
653 	       state_to_string(chan->state));
654 
655 	chan->ops->teardown(chan, err);
656 
657 	if (conn) {
658 		/* Delete from channel list */
659 		list_del(&chan->list);
660 
661 		l2cap_chan_put(chan);
662 
663 		chan->conn = NULL;
664 
665 		/* Reference was only held for non-fixed channels or
666 		 * fixed channels that explicitly requested it using the
667 		 * FLAG_HOLD_HCI_CONN flag.
668 		 */
669 		if (chan->chan_type != L2CAP_CHAN_FIXED ||
670 		    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
671 			hci_conn_drop(conn->hcon);
672 	}
673 
674 	if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
675 		return;
676 
677 	switch (chan->mode) {
678 	case L2CAP_MODE_BASIC:
679 		break;
680 
681 	case L2CAP_MODE_LE_FLOWCTL:
682 	case L2CAP_MODE_EXT_FLOWCTL:
683 		skb_queue_purge(&chan->tx_q);
684 		break;
685 
686 	case L2CAP_MODE_ERTM:
687 		__clear_retrans_timer(chan);
688 		__clear_monitor_timer(chan);
689 		__clear_ack_timer(chan);
690 
691 		skb_queue_purge(&chan->srej_q);
692 
693 		l2cap_seq_list_free(&chan->srej_list);
694 		l2cap_seq_list_free(&chan->retrans_list);
695 		fallthrough;
696 
697 	case L2CAP_MODE_STREAMING:
698 		skb_queue_purge(&chan->tx_q);
699 		break;
700 	}
701 }
702 EXPORT_SYMBOL_GPL(l2cap_chan_del);
703 
__l2cap_chan_list_id(struct l2cap_conn * conn,u16 id,l2cap_chan_func_t func,void * data)704 static void __l2cap_chan_list_id(struct l2cap_conn *conn, u16 id,
705 				 l2cap_chan_func_t func, void *data)
706 {
707 	struct l2cap_chan *chan, *l;
708 
709 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
710 		if (chan->ident == id)
711 			func(chan, data);
712 	}
713 }
714 
__l2cap_chan_list(struct l2cap_conn * conn,l2cap_chan_func_t func,void * data)715 static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
716 			      void *data)
717 {
718 	struct l2cap_chan *chan;
719 
720 	list_for_each_entry(chan, &conn->chan_l, list) {
721 		func(chan, data);
722 	}
723 }
724 
l2cap_chan_list(struct l2cap_conn * conn,l2cap_chan_func_t func,void * data)725 void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
726 		     void *data)
727 {
728 	if (!conn)
729 		return;
730 
731 	mutex_lock(&conn->lock);
732 	__l2cap_chan_list(conn, func, data);
733 	mutex_unlock(&conn->lock);
734 }
735 
736 EXPORT_SYMBOL_GPL(l2cap_chan_list);
737 
l2cap_conn_update_id_addr(struct work_struct * work)738 static void l2cap_conn_update_id_addr(struct work_struct *work)
739 {
740 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
741 					       id_addr_timer.work);
742 	struct hci_conn *hcon = conn->hcon;
743 	struct l2cap_chan *chan;
744 
745 	mutex_lock(&conn->lock);
746 
747 	list_for_each_entry(chan, &conn->chan_l, list) {
748 		l2cap_chan_lock(chan);
749 		bacpy(&chan->dst, &hcon->dst);
750 		chan->dst_type = bdaddr_dst_type(hcon);
751 		l2cap_chan_unlock(chan);
752 	}
753 
754 	mutex_unlock(&conn->lock);
755 }
756 
l2cap_chan_le_connect_reject(struct l2cap_chan * chan)757 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
758 {
759 	struct l2cap_conn *conn = chan->conn;
760 	struct l2cap_le_conn_rsp rsp;
761 	u16 result;
762 
763 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
764 		result = L2CAP_CR_LE_AUTHORIZATION;
765 	else
766 		result = L2CAP_CR_LE_BAD_PSM;
767 
768 	l2cap_state_change(chan, BT_DISCONN);
769 
770 	rsp.dcid    = cpu_to_le16(chan->scid);
771 	rsp.mtu     = cpu_to_le16(chan->imtu);
772 	rsp.mps     = cpu_to_le16(chan->mps);
773 	rsp.credits = cpu_to_le16(chan->rx_credits);
774 	rsp.result  = cpu_to_le16(result);
775 
776 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
777 		       &rsp);
778 }
779 
l2cap_chan_ecred_connect_reject(struct l2cap_chan * chan)780 static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan)
781 {
782 	l2cap_state_change(chan, BT_DISCONN);
783 
784 	__l2cap_ecred_conn_rsp_defer(chan);
785 }
786 
l2cap_chan_connect_reject(struct l2cap_chan * chan)787 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
788 {
789 	struct l2cap_conn *conn = chan->conn;
790 	struct l2cap_conn_rsp rsp;
791 	u16 result;
792 
793 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
794 		result = L2CAP_CR_SEC_BLOCK;
795 	else
796 		result = L2CAP_CR_BAD_PSM;
797 
798 	l2cap_state_change(chan, BT_DISCONN);
799 
800 	rsp.scid   = cpu_to_le16(chan->dcid);
801 	rsp.dcid   = cpu_to_le16(chan->scid);
802 	rsp.result = cpu_to_le16(result);
803 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
804 
805 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
806 }
807 
l2cap_chan_close(struct l2cap_chan * chan,int reason)808 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
809 {
810 	struct l2cap_conn *conn = chan->conn;
811 
812 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
813 
814 	switch (chan->state) {
815 	case BT_LISTEN:
816 		chan->ops->teardown(chan, 0);
817 		break;
818 
819 	case BT_CONNECTED:
820 	case BT_CONFIG:
821 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
822 			__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
823 			l2cap_send_disconn_req(chan, reason);
824 		} else
825 			l2cap_chan_del(chan, reason);
826 		break;
827 
828 	case BT_CONNECT2:
829 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
830 			if (conn->hcon->type == ACL_LINK)
831 				l2cap_chan_connect_reject(chan);
832 			else if (conn->hcon->type == LE_LINK) {
833 				switch (chan->mode) {
834 				case L2CAP_MODE_LE_FLOWCTL:
835 					l2cap_chan_le_connect_reject(chan);
836 					break;
837 				case L2CAP_MODE_EXT_FLOWCTL:
838 					l2cap_chan_ecred_connect_reject(chan);
839 					return;
840 				}
841 			}
842 		}
843 
844 		l2cap_chan_del(chan, reason);
845 		break;
846 
847 	case BT_CONNECT:
848 	case BT_DISCONN:
849 		l2cap_chan_del(chan, reason);
850 		break;
851 
852 	default:
853 		chan->ops->teardown(chan, 0);
854 		break;
855 	}
856 }
857 EXPORT_SYMBOL(l2cap_chan_close);
858 
l2cap_get_auth_type(struct l2cap_chan * chan)859 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
860 {
861 	switch (chan->chan_type) {
862 	case L2CAP_CHAN_RAW:
863 		switch (chan->sec_level) {
864 		case BT_SECURITY_HIGH:
865 		case BT_SECURITY_FIPS:
866 			return HCI_AT_DEDICATED_BONDING_MITM;
867 		case BT_SECURITY_MEDIUM:
868 			return HCI_AT_DEDICATED_BONDING;
869 		default:
870 			return HCI_AT_NO_BONDING;
871 		}
872 		break;
873 	case L2CAP_CHAN_CONN_LESS:
874 		if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
875 			if (chan->sec_level == BT_SECURITY_LOW)
876 				chan->sec_level = BT_SECURITY_SDP;
877 		}
878 		if (chan->sec_level == BT_SECURITY_HIGH ||
879 		    chan->sec_level == BT_SECURITY_FIPS)
880 			return HCI_AT_NO_BONDING_MITM;
881 		else
882 			return HCI_AT_NO_BONDING;
883 		break;
884 	case L2CAP_CHAN_CONN_ORIENTED:
885 		if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
886 			if (chan->sec_level == BT_SECURITY_LOW)
887 				chan->sec_level = BT_SECURITY_SDP;
888 
889 			if (chan->sec_level == BT_SECURITY_HIGH ||
890 			    chan->sec_level == BT_SECURITY_FIPS)
891 				return HCI_AT_NO_BONDING_MITM;
892 			else
893 				return HCI_AT_NO_BONDING;
894 		}
895 		fallthrough;
896 
897 	default:
898 		switch (chan->sec_level) {
899 		case BT_SECURITY_HIGH:
900 		case BT_SECURITY_FIPS:
901 			return HCI_AT_GENERAL_BONDING_MITM;
902 		case BT_SECURITY_MEDIUM:
903 			return HCI_AT_GENERAL_BONDING;
904 		default:
905 			return HCI_AT_NO_BONDING;
906 		}
907 		break;
908 	}
909 }
910 
911 /* Service level security */
l2cap_chan_check_security(struct l2cap_chan * chan,bool initiator)912 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
913 {
914 	struct l2cap_conn *conn = chan->conn;
915 	__u8 auth_type;
916 
917 	if (conn->hcon->type == LE_LINK)
918 		return smp_conn_security(conn->hcon, chan->sec_level);
919 
920 	auth_type = l2cap_get_auth_type(chan);
921 
922 	return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
923 				 initiator);
924 }
925 
l2cap_get_ident(struct l2cap_conn * conn)926 static u8 l2cap_get_ident(struct l2cap_conn *conn)
927 {
928 	u8 id;
929 
930 	/* Get next available identificator.
931 	 *    1 - 128 are used by kernel.
932 	 *  129 - 199 are reserved.
933 	 *  200 - 254 are used by utilities like l2ping, etc.
934 	 */
935 
936 	mutex_lock(&conn->ident_lock);
937 
938 	if (++conn->tx_ident > 128)
939 		conn->tx_ident = 1;
940 
941 	id = conn->tx_ident;
942 
943 	mutex_unlock(&conn->ident_lock);
944 
945 	return id;
946 }
947 
l2cap_send_acl(struct l2cap_conn * conn,struct sk_buff * skb,u8 flags)948 static void l2cap_send_acl(struct l2cap_conn *conn, struct sk_buff *skb,
949 			   u8 flags)
950 {
951 	/* Check if the hcon still valid before attempting to send */
952 	if (hci_conn_valid(conn->hcon->hdev, conn->hcon))
953 		hci_send_acl(conn->hchan, skb, flags);
954 	else
955 		kfree_skb(skb);
956 }
957 
l2cap_send_cmd(struct l2cap_conn * conn,u8 ident,u8 code,u16 len,void * data)958 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
959 			   void *data)
960 {
961 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
962 	u8 flags;
963 
964 	BT_DBG("code 0x%2.2x", code);
965 
966 	if (!skb)
967 		return;
968 
969 	/* Use NO_FLUSH if supported or we have an LE link (which does
970 	 * not support auto-flushing packets) */
971 	if (lmp_no_flush_capable(conn->hcon->hdev) ||
972 	    conn->hcon->type == LE_LINK)
973 		flags = ACL_START_NO_FLUSH;
974 	else
975 		flags = ACL_START;
976 
977 	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
978 	skb->priority = HCI_PRIO_MAX;
979 
980 	l2cap_send_acl(conn, skb, flags);
981 }
982 
l2cap_do_send(struct l2cap_chan * chan,struct sk_buff * skb)983 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
984 {
985 	struct hci_conn *hcon = chan->conn->hcon;
986 	u16 flags;
987 
988 	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
989 	       skb->priority);
990 
991 	/* Use NO_FLUSH for LE links (where this is the only option) or
992 	 * if the BR/EDR link supports it and flushing has not been
993 	 * explicitly requested (through FLAG_FLUSHABLE).
994 	 */
995 	if (hcon->type == LE_LINK ||
996 	    (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
997 	     lmp_no_flush_capable(hcon->hdev)))
998 		flags = ACL_START_NO_FLUSH;
999 	else
1000 		flags = ACL_START;
1001 
1002 	bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
1003 	hci_send_acl(chan->conn->hchan, skb, flags);
1004 }
1005 
__unpack_enhanced_control(u16 enh,struct l2cap_ctrl * control)1006 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
1007 {
1008 	control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
1009 	control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
1010 
1011 	if (enh & L2CAP_CTRL_FRAME_TYPE) {
1012 		/* S-Frame */
1013 		control->sframe = 1;
1014 		control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
1015 		control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
1016 
1017 		control->sar = 0;
1018 		control->txseq = 0;
1019 	} else {
1020 		/* I-Frame */
1021 		control->sframe = 0;
1022 		control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
1023 		control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
1024 
1025 		control->poll = 0;
1026 		control->super = 0;
1027 	}
1028 }
1029 
__unpack_extended_control(u32 ext,struct l2cap_ctrl * control)1030 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
1031 {
1032 	control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1033 	control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
1034 
1035 	if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
1036 		/* S-Frame */
1037 		control->sframe = 1;
1038 		control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
1039 		control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
1040 
1041 		control->sar = 0;
1042 		control->txseq = 0;
1043 	} else {
1044 		/* I-Frame */
1045 		control->sframe = 0;
1046 		control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
1047 		control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1048 
1049 		control->poll = 0;
1050 		control->super = 0;
1051 	}
1052 }
1053 
__unpack_control(struct l2cap_chan * chan,struct sk_buff * skb)1054 static inline void __unpack_control(struct l2cap_chan *chan,
1055 				    struct sk_buff *skb)
1056 {
1057 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1058 		__unpack_extended_control(get_unaligned_le32(skb->data),
1059 					  &bt_cb(skb)->l2cap);
1060 		skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
1061 	} else {
1062 		__unpack_enhanced_control(get_unaligned_le16(skb->data),
1063 					  &bt_cb(skb)->l2cap);
1064 		skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
1065 	}
1066 }
1067 
__pack_extended_control(struct l2cap_ctrl * control)1068 static u32 __pack_extended_control(struct l2cap_ctrl *control)
1069 {
1070 	u32 packed;
1071 
1072 	packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1073 	packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
1074 
1075 	if (control->sframe) {
1076 		packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
1077 		packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
1078 		packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
1079 	} else {
1080 		packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
1081 		packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1082 	}
1083 
1084 	return packed;
1085 }
1086 
__pack_enhanced_control(struct l2cap_ctrl * control)1087 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1088 {
1089 	u16 packed;
1090 
1091 	packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1092 	packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1093 
1094 	if (control->sframe) {
1095 		packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1096 		packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1097 		packed |= L2CAP_CTRL_FRAME_TYPE;
1098 	} else {
1099 		packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1100 		packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1101 	}
1102 
1103 	return packed;
1104 }
1105 
__pack_control(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb)1106 static inline void __pack_control(struct l2cap_chan *chan,
1107 				  struct l2cap_ctrl *control,
1108 				  struct sk_buff *skb)
1109 {
1110 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1111 		put_unaligned_le32(__pack_extended_control(control),
1112 				   skb->data + L2CAP_HDR_SIZE);
1113 	} else {
1114 		put_unaligned_le16(__pack_enhanced_control(control),
1115 				   skb->data + L2CAP_HDR_SIZE);
1116 	}
1117 }
1118 
__ertm_hdr_size(struct l2cap_chan * chan)1119 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1120 {
1121 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1122 		return L2CAP_EXT_HDR_SIZE;
1123 	else
1124 		return L2CAP_ENH_HDR_SIZE;
1125 }
1126 
l2cap_create_sframe_pdu(struct l2cap_chan * chan,u32 control)1127 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1128 					       u32 control)
1129 {
1130 	struct sk_buff *skb;
1131 	struct l2cap_hdr *lh;
1132 	int hlen = __ertm_hdr_size(chan);
1133 
1134 	if (chan->fcs == L2CAP_FCS_CRC16)
1135 		hlen += L2CAP_FCS_SIZE;
1136 
1137 	skb = bt_skb_alloc(hlen, GFP_KERNEL);
1138 
1139 	if (!skb)
1140 		return ERR_PTR(-ENOMEM);
1141 
1142 	lh = skb_put(skb, L2CAP_HDR_SIZE);
1143 	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1144 	lh->cid = cpu_to_le16(chan->dcid);
1145 
1146 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1147 		put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1148 	else
1149 		put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1150 
1151 	if (chan->fcs == L2CAP_FCS_CRC16) {
1152 		u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1153 		put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1154 	}
1155 
1156 	skb->priority = HCI_PRIO_MAX;
1157 	return skb;
1158 }
1159 
l2cap_send_sframe(struct l2cap_chan * chan,struct l2cap_ctrl * control)1160 static void l2cap_send_sframe(struct l2cap_chan *chan,
1161 			      struct l2cap_ctrl *control)
1162 {
1163 	struct sk_buff *skb;
1164 	u32 control_field;
1165 
1166 	BT_DBG("chan %p, control %p", chan, control);
1167 
1168 	if (!control->sframe)
1169 		return;
1170 
1171 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1172 	    !control->poll)
1173 		control->final = 1;
1174 
1175 	if (control->super == L2CAP_SUPER_RR)
1176 		clear_bit(CONN_RNR_SENT, &chan->conn_state);
1177 	else if (control->super == L2CAP_SUPER_RNR)
1178 		set_bit(CONN_RNR_SENT, &chan->conn_state);
1179 
1180 	if (control->super != L2CAP_SUPER_SREJ) {
1181 		chan->last_acked_seq = control->reqseq;
1182 		__clear_ack_timer(chan);
1183 	}
1184 
1185 	BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1186 	       control->final, control->poll, control->super);
1187 
1188 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1189 		control_field = __pack_extended_control(control);
1190 	else
1191 		control_field = __pack_enhanced_control(control);
1192 
1193 	skb = l2cap_create_sframe_pdu(chan, control_field);
1194 	if (!IS_ERR(skb))
1195 		l2cap_do_send(chan, skb);
1196 }
1197 
l2cap_send_rr_or_rnr(struct l2cap_chan * chan,bool poll)1198 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1199 {
1200 	struct l2cap_ctrl control;
1201 
1202 	BT_DBG("chan %p, poll %d", chan, poll);
1203 
1204 	memset(&control, 0, sizeof(control));
1205 	control.sframe = 1;
1206 	control.poll = poll;
1207 
1208 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1209 		control.super = L2CAP_SUPER_RNR;
1210 	else
1211 		control.super = L2CAP_SUPER_RR;
1212 
1213 	control.reqseq = chan->buffer_seq;
1214 	l2cap_send_sframe(chan, &control);
1215 }
1216 
__l2cap_no_conn_pending(struct l2cap_chan * chan)1217 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1218 {
1219 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1220 		return true;
1221 
1222 	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1223 }
1224 
l2cap_send_conn_req(struct l2cap_chan * chan)1225 void l2cap_send_conn_req(struct l2cap_chan *chan)
1226 {
1227 	struct l2cap_conn *conn = chan->conn;
1228 	struct l2cap_conn_req req;
1229 
1230 	req.scid = cpu_to_le16(chan->scid);
1231 	req.psm  = chan->psm;
1232 
1233 	chan->ident = l2cap_get_ident(conn);
1234 
1235 	set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1236 
1237 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1238 }
1239 
l2cap_chan_ready(struct l2cap_chan * chan)1240 static void l2cap_chan_ready(struct l2cap_chan *chan)
1241 {
1242 	/* The channel may have already been flagged as connected in
1243 	 * case of receiving data before the L2CAP info req/rsp
1244 	 * procedure is complete.
1245 	 */
1246 	if (chan->state == BT_CONNECTED)
1247 		return;
1248 
1249 	/* This clears all conf flags, including CONF_NOT_COMPLETE */
1250 	chan->conf_state = 0;
1251 	__clear_chan_timer(chan);
1252 
1253 	switch (chan->mode) {
1254 	case L2CAP_MODE_LE_FLOWCTL:
1255 	case L2CAP_MODE_EXT_FLOWCTL:
1256 		if (!chan->tx_credits)
1257 			chan->ops->suspend(chan);
1258 		break;
1259 	}
1260 
1261 	chan->state = BT_CONNECTED;
1262 
1263 	chan->ops->ready(chan);
1264 }
1265 
l2cap_le_connect(struct l2cap_chan * chan)1266 static void l2cap_le_connect(struct l2cap_chan *chan)
1267 {
1268 	struct l2cap_conn *conn = chan->conn;
1269 	struct l2cap_le_conn_req req;
1270 
1271 	if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1272 		return;
1273 
1274 	if (!chan->imtu)
1275 		chan->imtu = chan->conn->mtu;
1276 
1277 	l2cap_le_flowctl_init(chan, 0);
1278 
1279 	memset(&req, 0, sizeof(req));
1280 	req.psm     = chan->psm;
1281 	req.scid    = cpu_to_le16(chan->scid);
1282 	req.mtu     = cpu_to_le16(chan->imtu);
1283 	req.mps     = cpu_to_le16(chan->mps);
1284 	req.credits = cpu_to_le16(chan->rx_credits);
1285 
1286 	chan->ident = l2cap_get_ident(conn);
1287 
1288 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1289 		       sizeof(req), &req);
1290 }
1291 
1292 struct l2cap_ecred_conn_data {
1293 	struct {
1294 		struct l2cap_ecred_conn_req_hdr req;
1295 		__le16 scid[5];
1296 	} __packed pdu;
1297 	struct l2cap_chan *chan;
1298 	struct pid *pid;
1299 	int count;
1300 };
1301 
l2cap_ecred_defer_connect(struct l2cap_chan * chan,void * data)1302 static void l2cap_ecred_defer_connect(struct l2cap_chan *chan, void *data)
1303 {
1304 	struct l2cap_ecred_conn_data *conn = data;
1305 	struct pid *pid;
1306 
1307 	if (chan == conn->chan)
1308 		return;
1309 
1310 	if (!test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
1311 		return;
1312 
1313 	pid = chan->ops->get_peer_pid(chan);
1314 
1315 	/* Only add deferred channels with the same PID/PSM */
1316 	if (conn->pid != pid || chan->psm != conn->chan->psm || chan->ident ||
1317 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
1318 		return;
1319 
1320 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1321 		return;
1322 
1323 	l2cap_ecred_init(chan, 0);
1324 
1325 	/* Set the same ident so we can match on the rsp */
1326 	chan->ident = conn->chan->ident;
1327 
1328 	/* Include all channels deferred */
1329 	conn->pdu.scid[conn->count] = cpu_to_le16(chan->scid);
1330 
1331 	conn->count++;
1332 }
1333 
l2cap_ecred_connect(struct l2cap_chan * chan)1334 static void l2cap_ecred_connect(struct l2cap_chan *chan)
1335 {
1336 	struct l2cap_conn *conn = chan->conn;
1337 	struct l2cap_ecred_conn_data data;
1338 
1339 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
1340 		return;
1341 
1342 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1343 		return;
1344 
1345 	l2cap_ecred_init(chan, 0);
1346 
1347 	memset(&data, 0, sizeof(data));
1348 	data.pdu.req.psm     = chan->psm;
1349 	data.pdu.req.mtu     = cpu_to_le16(chan->imtu);
1350 	data.pdu.req.mps     = cpu_to_le16(chan->mps);
1351 	data.pdu.req.credits = cpu_to_le16(chan->rx_credits);
1352 	data.pdu.scid[0]     = cpu_to_le16(chan->scid);
1353 
1354 	chan->ident = l2cap_get_ident(conn);
1355 
1356 	data.count = 1;
1357 	data.chan = chan;
1358 	data.pid = chan->ops->get_peer_pid(chan);
1359 
1360 	__l2cap_chan_list(conn, l2cap_ecred_defer_connect, &data);
1361 
1362 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_CONN_REQ,
1363 		       sizeof(data.pdu.req) + data.count * sizeof(__le16),
1364 		       &data.pdu);
1365 }
1366 
l2cap_le_start(struct l2cap_chan * chan)1367 static void l2cap_le_start(struct l2cap_chan *chan)
1368 {
1369 	struct l2cap_conn *conn = chan->conn;
1370 
1371 	if (!smp_conn_security(conn->hcon, chan->sec_level))
1372 		return;
1373 
1374 	if (!chan->psm) {
1375 		l2cap_chan_ready(chan);
1376 		return;
1377 	}
1378 
1379 	if (chan->state == BT_CONNECT) {
1380 		if (chan->mode == L2CAP_MODE_EXT_FLOWCTL)
1381 			l2cap_ecred_connect(chan);
1382 		else
1383 			l2cap_le_connect(chan);
1384 	}
1385 }
1386 
l2cap_start_connection(struct l2cap_chan * chan)1387 static void l2cap_start_connection(struct l2cap_chan *chan)
1388 {
1389 	if (chan->conn->hcon->type == LE_LINK) {
1390 		l2cap_le_start(chan);
1391 	} else {
1392 		l2cap_send_conn_req(chan);
1393 	}
1394 }
1395 
l2cap_request_info(struct l2cap_conn * conn)1396 static void l2cap_request_info(struct l2cap_conn *conn)
1397 {
1398 	struct l2cap_info_req req;
1399 
1400 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1401 		return;
1402 
1403 	req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1404 
1405 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1406 	conn->info_ident = l2cap_get_ident(conn);
1407 
1408 	schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1409 
1410 	l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1411 		       sizeof(req), &req);
1412 }
1413 
l2cap_check_enc_key_size(struct hci_conn * hcon)1414 static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
1415 {
1416 	/* The minimum encryption key size needs to be enforced by the
1417 	 * host stack before establishing any L2CAP connections. The
1418 	 * specification in theory allows a minimum of 1, but to align
1419 	 * BR/EDR and LE transports, a minimum of 7 is chosen.
1420 	 *
1421 	 * This check might also be called for unencrypted connections
1422 	 * that have no key size requirements. Ensure that the link is
1423 	 * actually encrypted before enforcing a key size.
1424 	 */
1425 	int min_key_size = hcon->hdev->min_enc_key_size;
1426 
1427 	/* On FIPS security level, key size must be 16 bytes */
1428 	if (hcon->sec_level == BT_SECURITY_FIPS)
1429 		min_key_size = 16;
1430 
1431 	return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1432 		hcon->enc_key_size >= min_key_size);
1433 }
1434 
l2cap_do_start(struct l2cap_chan * chan)1435 static void l2cap_do_start(struct l2cap_chan *chan)
1436 {
1437 	struct l2cap_conn *conn = chan->conn;
1438 
1439 	if (conn->hcon->type == LE_LINK) {
1440 		l2cap_le_start(chan);
1441 		return;
1442 	}
1443 
1444 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1445 		l2cap_request_info(conn);
1446 		return;
1447 	}
1448 
1449 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1450 		return;
1451 
1452 	if (!l2cap_chan_check_security(chan, true) ||
1453 	    !__l2cap_no_conn_pending(chan))
1454 		return;
1455 
1456 	if (l2cap_check_enc_key_size(conn->hcon))
1457 		l2cap_start_connection(chan);
1458 	else
1459 		__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1460 }
1461 
l2cap_mode_supported(__u8 mode,__u32 feat_mask)1462 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1463 {
1464 	u32 local_feat_mask = l2cap_feat_mask;
1465 	if (!disable_ertm)
1466 		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1467 
1468 	switch (mode) {
1469 	case L2CAP_MODE_ERTM:
1470 		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1471 	case L2CAP_MODE_STREAMING:
1472 		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1473 	default:
1474 		return 0x00;
1475 	}
1476 }
1477 
l2cap_send_disconn_req(struct l2cap_chan * chan,int err)1478 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1479 {
1480 	struct l2cap_conn *conn = chan->conn;
1481 	struct l2cap_disconn_req req;
1482 
1483 	if (!conn)
1484 		return;
1485 
1486 	if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1487 		__clear_retrans_timer(chan);
1488 		__clear_monitor_timer(chan);
1489 		__clear_ack_timer(chan);
1490 	}
1491 
1492 	req.dcid = cpu_to_le16(chan->dcid);
1493 	req.scid = cpu_to_le16(chan->scid);
1494 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1495 		       sizeof(req), &req);
1496 
1497 	l2cap_state_change_and_error(chan, BT_DISCONN, err);
1498 }
1499 
1500 /* ---- L2CAP connections ---- */
l2cap_conn_start(struct l2cap_conn * conn)1501 static void l2cap_conn_start(struct l2cap_conn *conn)
1502 {
1503 	struct l2cap_chan *chan, *tmp;
1504 
1505 	BT_DBG("conn %p", conn);
1506 
1507 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1508 		l2cap_chan_lock(chan);
1509 
1510 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1511 			l2cap_chan_ready(chan);
1512 			l2cap_chan_unlock(chan);
1513 			continue;
1514 		}
1515 
1516 		if (chan->state == BT_CONNECT) {
1517 			if (!l2cap_chan_check_security(chan, true) ||
1518 			    !__l2cap_no_conn_pending(chan)) {
1519 				l2cap_chan_unlock(chan);
1520 				continue;
1521 			}
1522 
1523 			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1524 			    && test_bit(CONF_STATE2_DEVICE,
1525 					&chan->conf_state)) {
1526 				l2cap_chan_close(chan, ECONNRESET);
1527 				l2cap_chan_unlock(chan);
1528 				continue;
1529 			}
1530 
1531 			if (l2cap_check_enc_key_size(conn->hcon))
1532 				l2cap_start_connection(chan);
1533 			else
1534 				l2cap_chan_close(chan, ECONNREFUSED);
1535 
1536 		} else if (chan->state == BT_CONNECT2) {
1537 			struct l2cap_conn_rsp rsp;
1538 			char buf[128];
1539 			rsp.scid = cpu_to_le16(chan->dcid);
1540 			rsp.dcid = cpu_to_le16(chan->scid);
1541 
1542 			if (l2cap_chan_check_security(chan, false)) {
1543 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1544 					rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1545 					rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1546 					chan->ops->defer(chan);
1547 
1548 				} else {
1549 					l2cap_state_change(chan, BT_CONFIG);
1550 					rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1551 					rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1552 				}
1553 			} else {
1554 				rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1555 				rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1556 			}
1557 
1558 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1559 				       sizeof(rsp), &rsp);
1560 
1561 			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1562 			    rsp.result != L2CAP_CR_SUCCESS) {
1563 				l2cap_chan_unlock(chan);
1564 				continue;
1565 			}
1566 
1567 			set_bit(CONF_REQ_SENT, &chan->conf_state);
1568 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1569 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1570 			chan->num_conf_req++;
1571 		}
1572 
1573 		l2cap_chan_unlock(chan);
1574 	}
1575 }
1576 
l2cap_le_conn_ready(struct l2cap_conn * conn)1577 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1578 {
1579 	struct hci_conn *hcon = conn->hcon;
1580 	struct hci_dev *hdev = hcon->hdev;
1581 
1582 	BT_DBG("%s conn %p", hdev->name, conn);
1583 
1584 	/* For outgoing pairing which doesn't necessarily have an
1585 	 * associated socket (e.g. mgmt_pair_device).
1586 	 */
1587 	if (hcon->out)
1588 		smp_conn_security(hcon, hcon->pending_sec_level);
1589 
1590 	/* For LE peripheral connections, make sure the connection interval
1591 	 * is in the range of the minimum and maximum interval that has
1592 	 * been configured for this connection. If not, then trigger
1593 	 * the connection update procedure.
1594 	 */
1595 	if (hcon->role == HCI_ROLE_SLAVE &&
1596 	    (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1597 	     hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1598 		struct l2cap_conn_param_update_req req;
1599 
1600 		req.min = cpu_to_le16(hcon->le_conn_min_interval);
1601 		req.max = cpu_to_le16(hcon->le_conn_max_interval);
1602 		req.latency = cpu_to_le16(hcon->le_conn_latency);
1603 		req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1604 
1605 		l2cap_send_cmd(conn, l2cap_get_ident(conn),
1606 			       L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1607 	}
1608 }
1609 
l2cap_conn_ready(struct l2cap_conn * conn)1610 static void l2cap_conn_ready(struct l2cap_conn *conn)
1611 {
1612 	struct l2cap_chan *chan;
1613 	struct hci_conn *hcon = conn->hcon;
1614 
1615 	BT_DBG("conn %p", conn);
1616 
1617 	if (hcon->type == ACL_LINK)
1618 		l2cap_request_info(conn);
1619 
1620 	mutex_lock(&conn->lock);
1621 
1622 	list_for_each_entry(chan, &conn->chan_l, list) {
1623 
1624 		l2cap_chan_lock(chan);
1625 
1626 		if (hcon->type == LE_LINK) {
1627 			l2cap_le_start(chan);
1628 		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1629 			if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1630 				l2cap_chan_ready(chan);
1631 		} else if (chan->state == BT_CONNECT) {
1632 			l2cap_do_start(chan);
1633 		}
1634 
1635 		l2cap_chan_unlock(chan);
1636 	}
1637 
1638 	mutex_unlock(&conn->lock);
1639 
1640 	if (hcon->type == LE_LINK)
1641 		l2cap_le_conn_ready(conn);
1642 
1643 	queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1644 }
1645 
1646 /* Notify sockets that we cannot guaranty reliability anymore */
l2cap_conn_unreliable(struct l2cap_conn * conn,int err)1647 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1648 {
1649 	struct l2cap_chan *chan;
1650 
1651 	BT_DBG("conn %p", conn);
1652 
1653 	list_for_each_entry(chan, &conn->chan_l, list) {
1654 		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1655 			l2cap_chan_set_err(chan, err);
1656 	}
1657 }
1658 
l2cap_info_timeout(struct work_struct * work)1659 static void l2cap_info_timeout(struct work_struct *work)
1660 {
1661 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1662 					       info_timer.work);
1663 
1664 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1665 	conn->info_ident = 0;
1666 
1667 	mutex_lock(&conn->lock);
1668 	l2cap_conn_start(conn);
1669 	mutex_unlock(&conn->lock);
1670 }
1671 
1672 /*
1673  * l2cap_user
1674  * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1675  * callback is called during registration. The ->remove callback is called
1676  * during unregistration.
1677  * An l2cap_user object can either be explicitly unregistered or when the
1678  * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1679  * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1680  * External modules must own a reference to the l2cap_conn object if they intend
1681  * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1682  * any time if they don't.
1683  */
1684 
l2cap_register_user(struct l2cap_conn * conn,struct l2cap_user * user)1685 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1686 {
1687 	struct hci_dev *hdev = conn->hcon->hdev;
1688 	int ret;
1689 
1690 	/* We need to check whether l2cap_conn is registered. If it is not, we
1691 	 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1692 	 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1693 	 * relies on the parent hci_conn object to be locked. This itself relies
1694 	 * on the hci_dev object to be locked. So we must lock the hci device
1695 	 * here, too. */
1696 
1697 	hci_dev_lock(hdev);
1698 
1699 	if (!list_empty(&user->list)) {
1700 		ret = -EINVAL;
1701 		goto out_unlock;
1702 	}
1703 
1704 	/* conn->hchan is NULL after l2cap_conn_del() was called */
1705 	if (!conn->hchan) {
1706 		ret = -ENODEV;
1707 		goto out_unlock;
1708 	}
1709 
1710 	ret = user->probe(conn, user);
1711 	if (ret)
1712 		goto out_unlock;
1713 
1714 	list_add(&user->list, &conn->users);
1715 	ret = 0;
1716 
1717 out_unlock:
1718 	hci_dev_unlock(hdev);
1719 	return ret;
1720 }
1721 EXPORT_SYMBOL(l2cap_register_user);
1722 
l2cap_unregister_user(struct l2cap_conn * conn,struct l2cap_user * user)1723 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1724 {
1725 	struct hci_dev *hdev = conn->hcon->hdev;
1726 
1727 	hci_dev_lock(hdev);
1728 
1729 	if (list_empty(&user->list))
1730 		goto out_unlock;
1731 
1732 	list_del_init(&user->list);
1733 	user->remove(conn, user);
1734 
1735 out_unlock:
1736 	hci_dev_unlock(hdev);
1737 }
1738 EXPORT_SYMBOL(l2cap_unregister_user);
1739 
l2cap_unregister_all_users(struct l2cap_conn * conn)1740 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1741 {
1742 	struct l2cap_user *user;
1743 
1744 	while (!list_empty(&conn->users)) {
1745 		user = list_first_entry(&conn->users, struct l2cap_user, list);
1746 		list_del_init(&user->list);
1747 		user->remove(conn, user);
1748 	}
1749 }
1750 
l2cap_conn_del(struct hci_conn * hcon,int err)1751 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1752 {
1753 	struct l2cap_conn *conn = hcon->l2cap_data;
1754 	struct l2cap_chan *chan, *l;
1755 
1756 	if (!conn)
1757 		return;
1758 
1759 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1760 
1761 	mutex_lock(&conn->lock);
1762 
1763 	kfree_skb(conn->rx_skb);
1764 
1765 	skb_queue_purge(&conn->pending_rx);
1766 
1767 	/* We can not call flush_work(&conn->pending_rx_work) here since we
1768 	 * might block if we are running on a worker from the same workqueue
1769 	 * pending_rx_work is waiting on.
1770 	 */
1771 	if (work_pending(&conn->pending_rx_work))
1772 		cancel_work_sync(&conn->pending_rx_work);
1773 
1774 	cancel_delayed_work_sync(&conn->id_addr_timer);
1775 
1776 	l2cap_unregister_all_users(conn);
1777 
1778 	/* Force the connection to be immediately dropped */
1779 	hcon->disc_timeout = 0;
1780 
1781 	/* Kill channels */
1782 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1783 		l2cap_chan_hold(chan);
1784 		l2cap_chan_lock(chan);
1785 
1786 		l2cap_chan_del(chan, err);
1787 
1788 		chan->ops->close(chan);
1789 
1790 		l2cap_chan_unlock(chan);
1791 		l2cap_chan_put(chan);
1792 	}
1793 
1794 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1795 		cancel_delayed_work_sync(&conn->info_timer);
1796 
1797 	hci_chan_del(conn->hchan);
1798 	conn->hchan = NULL;
1799 
1800 	hcon->l2cap_data = NULL;
1801 	mutex_unlock(&conn->lock);
1802 	l2cap_conn_put(conn);
1803 }
1804 
l2cap_conn_free(struct kref * ref)1805 static void l2cap_conn_free(struct kref *ref)
1806 {
1807 	struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1808 
1809 	hci_conn_put(conn->hcon);
1810 	kfree(conn);
1811 }
1812 
l2cap_conn_get(struct l2cap_conn * conn)1813 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1814 {
1815 	kref_get(&conn->ref);
1816 	return conn;
1817 }
1818 EXPORT_SYMBOL(l2cap_conn_get);
1819 
l2cap_conn_put(struct l2cap_conn * conn)1820 void l2cap_conn_put(struct l2cap_conn *conn)
1821 {
1822 	kref_put(&conn->ref, l2cap_conn_free);
1823 }
1824 EXPORT_SYMBOL(l2cap_conn_put);
1825 
1826 /* ---- Socket interface ---- */
1827 
1828 /* Find socket with psm and source / destination bdaddr.
1829  * Returns closest match.
1830  */
l2cap_global_chan_by_psm(int state,__le16 psm,bdaddr_t * src,bdaddr_t * dst,u8 link_type)1831 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1832 						   bdaddr_t *src,
1833 						   bdaddr_t *dst,
1834 						   u8 link_type)
1835 {
1836 	struct l2cap_chan *c, *tmp, *c1 = NULL;
1837 
1838 	read_lock(&chan_list_lock);
1839 
1840 	list_for_each_entry_safe(c, tmp, &chan_list, global_l) {
1841 		if (state && c->state != state)
1842 			continue;
1843 
1844 		if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1845 			continue;
1846 
1847 		if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1848 			continue;
1849 
1850 		if (c->chan_type != L2CAP_CHAN_FIXED && c->psm == psm) {
1851 			int src_match, dst_match;
1852 			int src_any, dst_any;
1853 
1854 			/* Exact match. */
1855 			src_match = !bacmp(&c->src, src);
1856 			dst_match = !bacmp(&c->dst, dst);
1857 			if (src_match && dst_match) {
1858 				if (!l2cap_chan_hold_unless_zero(c))
1859 					continue;
1860 
1861 				read_unlock(&chan_list_lock);
1862 				return c;
1863 			}
1864 
1865 			/* Closest match */
1866 			src_any = !bacmp(&c->src, BDADDR_ANY);
1867 			dst_any = !bacmp(&c->dst, BDADDR_ANY);
1868 			if ((src_match && dst_any) || (src_any && dst_match) ||
1869 			    (src_any && dst_any))
1870 				c1 = c;
1871 		}
1872 	}
1873 
1874 	if (c1)
1875 		c1 = l2cap_chan_hold_unless_zero(c1);
1876 
1877 	read_unlock(&chan_list_lock);
1878 
1879 	return c1;
1880 }
1881 
l2cap_monitor_timeout(struct work_struct * work)1882 static void l2cap_monitor_timeout(struct work_struct *work)
1883 {
1884 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1885 					       monitor_timer.work);
1886 
1887 	BT_DBG("chan %p", chan);
1888 
1889 	l2cap_chan_lock(chan);
1890 
1891 	if (!chan->conn) {
1892 		l2cap_chan_unlock(chan);
1893 		l2cap_chan_put(chan);
1894 		return;
1895 	}
1896 
1897 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1898 
1899 	l2cap_chan_unlock(chan);
1900 	l2cap_chan_put(chan);
1901 }
1902 
l2cap_retrans_timeout(struct work_struct * work)1903 static void l2cap_retrans_timeout(struct work_struct *work)
1904 {
1905 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1906 					       retrans_timer.work);
1907 
1908 	BT_DBG("chan %p", chan);
1909 
1910 	l2cap_chan_lock(chan);
1911 
1912 	if (!chan->conn) {
1913 		l2cap_chan_unlock(chan);
1914 		l2cap_chan_put(chan);
1915 		return;
1916 	}
1917 
1918 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1919 	l2cap_chan_unlock(chan);
1920 	l2cap_chan_put(chan);
1921 }
1922 
l2cap_streaming_send(struct l2cap_chan * chan,struct sk_buff_head * skbs)1923 static void l2cap_streaming_send(struct l2cap_chan *chan,
1924 				 struct sk_buff_head *skbs)
1925 {
1926 	struct sk_buff *skb;
1927 	struct l2cap_ctrl *control;
1928 
1929 	BT_DBG("chan %p, skbs %p", chan, skbs);
1930 
1931 	skb_queue_splice_tail_init(skbs, &chan->tx_q);
1932 
1933 	while (!skb_queue_empty(&chan->tx_q)) {
1934 
1935 		skb = skb_dequeue(&chan->tx_q);
1936 
1937 		bt_cb(skb)->l2cap.retries = 1;
1938 		control = &bt_cb(skb)->l2cap;
1939 
1940 		control->reqseq = 0;
1941 		control->txseq = chan->next_tx_seq;
1942 
1943 		__pack_control(chan, control, skb);
1944 
1945 		if (chan->fcs == L2CAP_FCS_CRC16) {
1946 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1947 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1948 		}
1949 
1950 		l2cap_do_send(chan, skb);
1951 
1952 		BT_DBG("Sent txseq %u", control->txseq);
1953 
1954 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1955 		chan->frames_sent++;
1956 	}
1957 }
1958 
l2cap_ertm_send(struct l2cap_chan * chan)1959 static int l2cap_ertm_send(struct l2cap_chan *chan)
1960 {
1961 	struct sk_buff *skb, *tx_skb;
1962 	struct l2cap_ctrl *control;
1963 	int sent = 0;
1964 
1965 	BT_DBG("chan %p", chan);
1966 
1967 	if (chan->state != BT_CONNECTED)
1968 		return -ENOTCONN;
1969 
1970 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1971 		return 0;
1972 
1973 	while (chan->tx_send_head &&
1974 	       chan->unacked_frames < chan->remote_tx_win &&
1975 	       chan->tx_state == L2CAP_TX_STATE_XMIT) {
1976 
1977 		skb = chan->tx_send_head;
1978 
1979 		bt_cb(skb)->l2cap.retries = 1;
1980 		control = &bt_cb(skb)->l2cap;
1981 
1982 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1983 			control->final = 1;
1984 
1985 		control->reqseq = chan->buffer_seq;
1986 		chan->last_acked_seq = chan->buffer_seq;
1987 		control->txseq = chan->next_tx_seq;
1988 
1989 		__pack_control(chan, control, skb);
1990 
1991 		if (chan->fcs == L2CAP_FCS_CRC16) {
1992 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1993 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1994 		}
1995 
1996 		/* Clone after data has been modified. Data is assumed to be
1997 		   read-only (for locking purposes) on cloned sk_buffs.
1998 		 */
1999 		tx_skb = skb_clone(skb, GFP_KERNEL);
2000 
2001 		if (!tx_skb)
2002 			break;
2003 
2004 		__set_retrans_timer(chan);
2005 
2006 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2007 		chan->unacked_frames++;
2008 		chan->frames_sent++;
2009 		sent++;
2010 
2011 		if (skb_queue_is_last(&chan->tx_q, skb))
2012 			chan->tx_send_head = NULL;
2013 		else
2014 			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2015 
2016 		l2cap_do_send(chan, tx_skb);
2017 		BT_DBG("Sent txseq %u", control->txseq);
2018 	}
2019 
2020 	BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2021 	       chan->unacked_frames, skb_queue_len(&chan->tx_q));
2022 
2023 	return sent;
2024 }
2025 
l2cap_ertm_resend(struct l2cap_chan * chan)2026 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2027 {
2028 	struct l2cap_ctrl control;
2029 	struct sk_buff *skb;
2030 	struct sk_buff *tx_skb;
2031 	u16 seq;
2032 
2033 	BT_DBG("chan %p", chan);
2034 
2035 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2036 		return;
2037 
2038 	while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2039 		seq = l2cap_seq_list_pop(&chan->retrans_list);
2040 
2041 		skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2042 		if (!skb) {
2043 			BT_DBG("Error: Can't retransmit seq %d, frame missing",
2044 			       seq);
2045 			continue;
2046 		}
2047 
2048 		bt_cb(skb)->l2cap.retries++;
2049 		control = bt_cb(skb)->l2cap;
2050 
2051 		if (chan->max_tx != 0 &&
2052 		    bt_cb(skb)->l2cap.retries > chan->max_tx) {
2053 			BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2054 			l2cap_send_disconn_req(chan, ECONNRESET);
2055 			l2cap_seq_list_clear(&chan->retrans_list);
2056 			break;
2057 		}
2058 
2059 		control.reqseq = chan->buffer_seq;
2060 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2061 			control.final = 1;
2062 		else
2063 			control.final = 0;
2064 
2065 		if (skb_cloned(skb)) {
2066 			/* Cloned sk_buffs are read-only, so we need a
2067 			 * writeable copy
2068 			 */
2069 			tx_skb = skb_copy(skb, GFP_KERNEL);
2070 		} else {
2071 			tx_skb = skb_clone(skb, GFP_KERNEL);
2072 		}
2073 
2074 		if (!tx_skb) {
2075 			l2cap_seq_list_clear(&chan->retrans_list);
2076 			break;
2077 		}
2078 
2079 		/* Update skb contents */
2080 		if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2081 			put_unaligned_le32(__pack_extended_control(&control),
2082 					   tx_skb->data + L2CAP_HDR_SIZE);
2083 		} else {
2084 			put_unaligned_le16(__pack_enhanced_control(&control),
2085 					   tx_skb->data + L2CAP_HDR_SIZE);
2086 		}
2087 
2088 		/* Update FCS */
2089 		if (chan->fcs == L2CAP_FCS_CRC16) {
2090 			u16 fcs = crc16(0, (u8 *) tx_skb->data,
2091 					tx_skb->len - L2CAP_FCS_SIZE);
2092 			put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2093 						L2CAP_FCS_SIZE);
2094 		}
2095 
2096 		l2cap_do_send(chan, tx_skb);
2097 
2098 		BT_DBG("Resent txseq %d", control.txseq);
2099 
2100 		chan->last_acked_seq = chan->buffer_seq;
2101 	}
2102 }
2103 
l2cap_retransmit(struct l2cap_chan * chan,struct l2cap_ctrl * control)2104 static void l2cap_retransmit(struct l2cap_chan *chan,
2105 			     struct l2cap_ctrl *control)
2106 {
2107 	BT_DBG("chan %p, control %p", chan, control);
2108 
2109 	l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2110 	l2cap_ertm_resend(chan);
2111 }
2112 
l2cap_retransmit_all(struct l2cap_chan * chan,struct l2cap_ctrl * control)2113 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2114 				 struct l2cap_ctrl *control)
2115 {
2116 	struct sk_buff *skb;
2117 
2118 	BT_DBG("chan %p, control %p", chan, control);
2119 
2120 	if (control->poll)
2121 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
2122 
2123 	l2cap_seq_list_clear(&chan->retrans_list);
2124 
2125 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2126 		return;
2127 
2128 	if (chan->unacked_frames) {
2129 		skb_queue_walk(&chan->tx_q, skb) {
2130 			if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2131 			    skb == chan->tx_send_head)
2132 				break;
2133 		}
2134 
2135 		skb_queue_walk_from(&chan->tx_q, skb) {
2136 			if (skb == chan->tx_send_head)
2137 				break;
2138 
2139 			l2cap_seq_list_append(&chan->retrans_list,
2140 					      bt_cb(skb)->l2cap.txseq);
2141 		}
2142 
2143 		l2cap_ertm_resend(chan);
2144 	}
2145 }
2146 
l2cap_send_ack(struct l2cap_chan * chan)2147 static void l2cap_send_ack(struct l2cap_chan *chan)
2148 {
2149 	struct l2cap_ctrl control;
2150 	u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2151 					 chan->last_acked_seq);
2152 	int threshold;
2153 
2154 	BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2155 	       chan, chan->last_acked_seq, chan->buffer_seq);
2156 
2157 	memset(&control, 0, sizeof(control));
2158 	control.sframe = 1;
2159 
2160 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2161 	    chan->rx_state == L2CAP_RX_STATE_RECV) {
2162 		__clear_ack_timer(chan);
2163 		control.super = L2CAP_SUPER_RNR;
2164 		control.reqseq = chan->buffer_seq;
2165 		l2cap_send_sframe(chan, &control);
2166 	} else {
2167 		if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2168 			l2cap_ertm_send(chan);
2169 			/* If any i-frames were sent, they included an ack */
2170 			if (chan->buffer_seq == chan->last_acked_seq)
2171 				frames_to_ack = 0;
2172 		}
2173 
2174 		/* Ack now if the window is 3/4ths full.
2175 		 * Calculate without mul or div
2176 		 */
2177 		threshold = chan->ack_win;
2178 		threshold += threshold << 1;
2179 		threshold >>= 2;
2180 
2181 		BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2182 		       threshold);
2183 
2184 		if (frames_to_ack >= threshold) {
2185 			__clear_ack_timer(chan);
2186 			control.super = L2CAP_SUPER_RR;
2187 			control.reqseq = chan->buffer_seq;
2188 			l2cap_send_sframe(chan, &control);
2189 			frames_to_ack = 0;
2190 		}
2191 
2192 		if (frames_to_ack)
2193 			__set_ack_timer(chan);
2194 	}
2195 }
2196 
l2cap_skbuff_fromiovec(struct l2cap_chan * chan,struct msghdr * msg,int len,int count,struct sk_buff * skb)2197 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2198 					 struct msghdr *msg, int len,
2199 					 int count, struct sk_buff *skb)
2200 {
2201 	struct l2cap_conn *conn = chan->conn;
2202 	struct sk_buff **frag;
2203 	int sent = 0;
2204 
2205 	if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2206 		return -EFAULT;
2207 
2208 	sent += count;
2209 	len  -= count;
2210 
2211 	/* Continuation fragments (no L2CAP header) */
2212 	frag = &skb_shinfo(skb)->frag_list;
2213 	while (len) {
2214 		struct sk_buff *tmp;
2215 
2216 		count = min_t(unsigned int, conn->mtu, len);
2217 
2218 		tmp = chan->ops->alloc_skb(chan, 0, count,
2219 					   msg->msg_flags & MSG_DONTWAIT);
2220 		if (IS_ERR(tmp))
2221 			return PTR_ERR(tmp);
2222 
2223 		*frag = tmp;
2224 
2225 		if (!copy_from_iter_full(skb_put(*frag, count), count,
2226 				   &msg->msg_iter))
2227 			return -EFAULT;
2228 
2229 		sent += count;
2230 		len  -= count;
2231 
2232 		skb->len += (*frag)->len;
2233 		skb->data_len += (*frag)->len;
2234 
2235 		frag = &(*frag)->next;
2236 	}
2237 
2238 	return sent;
2239 }
2240 
l2cap_create_connless_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2241 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2242 						 struct msghdr *msg, size_t len)
2243 {
2244 	struct l2cap_conn *conn = chan->conn;
2245 	struct sk_buff *skb;
2246 	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2247 	struct l2cap_hdr *lh;
2248 
2249 	BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2250 	       __le16_to_cpu(chan->psm), len);
2251 
2252 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2253 
2254 	skb = chan->ops->alloc_skb(chan, hlen, count,
2255 				   msg->msg_flags & MSG_DONTWAIT);
2256 	if (IS_ERR(skb))
2257 		return skb;
2258 
2259 	/* Create L2CAP header */
2260 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2261 	lh->cid = cpu_to_le16(chan->dcid);
2262 	lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2263 	put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2264 
2265 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2266 	if (unlikely(err < 0)) {
2267 		kfree_skb(skb);
2268 		return ERR_PTR(err);
2269 	}
2270 	return skb;
2271 }
2272 
l2cap_create_basic_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2273 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2274 					      struct msghdr *msg, size_t len)
2275 {
2276 	struct l2cap_conn *conn = chan->conn;
2277 	struct sk_buff *skb;
2278 	int err, count;
2279 	struct l2cap_hdr *lh;
2280 
2281 	BT_DBG("chan %p len %zu", chan, len);
2282 
2283 	count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2284 
2285 	skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2286 				   msg->msg_flags & MSG_DONTWAIT);
2287 	if (IS_ERR(skb))
2288 		return skb;
2289 
2290 	/* Create L2CAP header */
2291 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2292 	lh->cid = cpu_to_le16(chan->dcid);
2293 	lh->len = cpu_to_le16(len);
2294 
2295 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2296 	if (unlikely(err < 0)) {
2297 		kfree_skb(skb);
2298 		return ERR_PTR(err);
2299 	}
2300 	return skb;
2301 }
2302 
l2cap_create_iframe_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len,u16 sdulen)2303 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2304 					       struct msghdr *msg, size_t len,
2305 					       u16 sdulen)
2306 {
2307 	struct l2cap_conn *conn = chan->conn;
2308 	struct sk_buff *skb;
2309 	int err, count, hlen;
2310 	struct l2cap_hdr *lh;
2311 
2312 	BT_DBG("chan %p len %zu", chan, len);
2313 
2314 	if (!conn)
2315 		return ERR_PTR(-ENOTCONN);
2316 
2317 	hlen = __ertm_hdr_size(chan);
2318 
2319 	if (sdulen)
2320 		hlen += L2CAP_SDULEN_SIZE;
2321 
2322 	if (chan->fcs == L2CAP_FCS_CRC16)
2323 		hlen += L2CAP_FCS_SIZE;
2324 
2325 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2326 
2327 	skb = chan->ops->alloc_skb(chan, hlen, count,
2328 				   msg->msg_flags & MSG_DONTWAIT);
2329 	if (IS_ERR(skb))
2330 		return skb;
2331 
2332 	/* Create L2CAP header */
2333 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2334 	lh->cid = cpu_to_le16(chan->dcid);
2335 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2336 
2337 	/* Control header is populated later */
2338 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2339 		put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2340 	else
2341 		put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2342 
2343 	if (sdulen)
2344 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2345 
2346 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2347 	if (unlikely(err < 0)) {
2348 		kfree_skb(skb);
2349 		return ERR_PTR(err);
2350 	}
2351 
2352 	bt_cb(skb)->l2cap.fcs = chan->fcs;
2353 	bt_cb(skb)->l2cap.retries = 0;
2354 	return skb;
2355 }
2356 
l2cap_segment_sdu(struct l2cap_chan * chan,struct sk_buff_head * seg_queue,struct msghdr * msg,size_t len)2357 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2358 			     struct sk_buff_head *seg_queue,
2359 			     struct msghdr *msg, size_t len)
2360 {
2361 	struct sk_buff *skb;
2362 	u16 sdu_len;
2363 	size_t pdu_len;
2364 	u8 sar;
2365 
2366 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2367 
2368 	/* It is critical that ERTM PDUs fit in a single HCI fragment,
2369 	 * so fragmented skbs are not used.  The HCI layer's handling
2370 	 * of fragmented skbs is not compatible with ERTM's queueing.
2371 	 */
2372 
2373 	/* PDU size is derived from the HCI MTU */
2374 	pdu_len = chan->conn->mtu;
2375 
2376 	/* Constrain PDU size for BR/EDR connections */
2377 	pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2378 
2379 	/* Adjust for largest possible L2CAP overhead. */
2380 	if (chan->fcs)
2381 		pdu_len -= L2CAP_FCS_SIZE;
2382 
2383 	pdu_len -= __ertm_hdr_size(chan);
2384 
2385 	/* Remote device may have requested smaller PDUs */
2386 	pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2387 
2388 	if (len <= pdu_len) {
2389 		sar = L2CAP_SAR_UNSEGMENTED;
2390 		sdu_len = 0;
2391 		pdu_len = len;
2392 	} else {
2393 		sar = L2CAP_SAR_START;
2394 		sdu_len = len;
2395 	}
2396 
2397 	while (len > 0) {
2398 		skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2399 
2400 		if (IS_ERR(skb)) {
2401 			__skb_queue_purge(seg_queue);
2402 			return PTR_ERR(skb);
2403 		}
2404 
2405 		bt_cb(skb)->l2cap.sar = sar;
2406 		__skb_queue_tail(seg_queue, skb);
2407 
2408 		len -= pdu_len;
2409 		if (sdu_len)
2410 			sdu_len = 0;
2411 
2412 		if (len <= pdu_len) {
2413 			sar = L2CAP_SAR_END;
2414 			pdu_len = len;
2415 		} else {
2416 			sar = L2CAP_SAR_CONTINUE;
2417 		}
2418 	}
2419 
2420 	return 0;
2421 }
2422 
l2cap_create_le_flowctl_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len,u16 sdulen)2423 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2424 						   struct msghdr *msg,
2425 						   size_t len, u16 sdulen)
2426 {
2427 	struct l2cap_conn *conn = chan->conn;
2428 	struct sk_buff *skb;
2429 	int err, count, hlen;
2430 	struct l2cap_hdr *lh;
2431 
2432 	BT_DBG("chan %p len %zu", chan, len);
2433 
2434 	if (!conn)
2435 		return ERR_PTR(-ENOTCONN);
2436 
2437 	hlen = L2CAP_HDR_SIZE;
2438 
2439 	if (sdulen)
2440 		hlen += L2CAP_SDULEN_SIZE;
2441 
2442 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2443 
2444 	skb = chan->ops->alloc_skb(chan, hlen, count,
2445 				   msg->msg_flags & MSG_DONTWAIT);
2446 	if (IS_ERR(skb))
2447 		return skb;
2448 
2449 	/* Create L2CAP header */
2450 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2451 	lh->cid = cpu_to_le16(chan->dcid);
2452 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2453 
2454 	if (sdulen)
2455 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2456 
2457 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2458 	if (unlikely(err < 0)) {
2459 		kfree_skb(skb);
2460 		return ERR_PTR(err);
2461 	}
2462 
2463 	return skb;
2464 }
2465 
l2cap_segment_le_sdu(struct l2cap_chan * chan,struct sk_buff_head * seg_queue,struct msghdr * msg,size_t len)2466 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2467 				struct sk_buff_head *seg_queue,
2468 				struct msghdr *msg, size_t len)
2469 {
2470 	struct sk_buff *skb;
2471 	size_t pdu_len;
2472 	u16 sdu_len;
2473 
2474 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2475 
2476 	sdu_len = len;
2477 	pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2478 
2479 	while (len > 0) {
2480 		if (len <= pdu_len)
2481 			pdu_len = len;
2482 
2483 		skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2484 		if (IS_ERR(skb)) {
2485 			__skb_queue_purge(seg_queue);
2486 			return PTR_ERR(skb);
2487 		}
2488 
2489 		__skb_queue_tail(seg_queue, skb);
2490 
2491 		len -= pdu_len;
2492 
2493 		if (sdu_len) {
2494 			sdu_len = 0;
2495 			pdu_len += L2CAP_SDULEN_SIZE;
2496 		}
2497 	}
2498 
2499 	return 0;
2500 }
2501 
l2cap_le_flowctl_send(struct l2cap_chan * chan)2502 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2503 {
2504 	int sent = 0;
2505 
2506 	BT_DBG("chan %p", chan);
2507 
2508 	while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2509 		l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2510 		chan->tx_credits--;
2511 		sent++;
2512 	}
2513 
2514 	BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2515 	       skb_queue_len(&chan->tx_q));
2516 }
2517 
l2cap_chan_send(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2518 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2519 {
2520 	struct sk_buff *skb;
2521 	int err;
2522 	struct sk_buff_head seg_queue;
2523 
2524 	if (!chan->conn)
2525 		return -ENOTCONN;
2526 
2527 	/* Connectionless channel */
2528 	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2529 		skb = l2cap_create_connless_pdu(chan, msg, len);
2530 		if (IS_ERR(skb))
2531 			return PTR_ERR(skb);
2532 
2533 		l2cap_do_send(chan, skb);
2534 		return len;
2535 	}
2536 
2537 	switch (chan->mode) {
2538 	case L2CAP_MODE_LE_FLOWCTL:
2539 	case L2CAP_MODE_EXT_FLOWCTL:
2540 		/* Check outgoing MTU */
2541 		if (len > chan->omtu)
2542 			return -EMSGSIZE;
2543 
2544 		__skb_queue_head_init(&seg_queue);
2545 
2546 		err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2547 
2548 		if (chan->state != BT_CONNECTED) {
2549 			__skb_queue_purge(&seg_queue);
2550 			err = -ENOTCONN;
2551 		}
2552 
2553 		if (err)
2554 			return err;
2555 
2556 		skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2557 
2558 		l2cap_le_flowctl_send(chan);
2559 
2560 		if (!chan->tx_credits)
2561 			chan->ops->suspend(chan);
2562 
2563 		err = len;
2564 
2565 		break;
2566 
2567 	case L2CAP_MODE_BASIC:
2568 		/* Check outgoing MTU */
2569 		if (len > chan->omtu)
2570 			return -EMSGSIZE;
2571 
2572 		/* Create a basic PDU */
2573 		skb = l2cap_create_basic_pdu(chan, msg, len);
2574 		if (IS_ERR(skb))
2575 			return PTR_ERR(skb);
2576 
2577 		l2cap_do_send(chan, skb);
2578 		err = len;
2579 		break;
2580 
2581 	case L2CAP_MODE_ERTM:
2582 	case L2CAP_MODE_STREAMING:
2583 		/* Check outgoing MTU */
2584 		if (len > chan->omtu) {
2585 			err = -EMSGSIZE;
2586 			break;
2587 		}
2588 
2589 		__skb_queue_head_init(&seg_queue);
2590 
2591 		/* Do segmentation before calling in to the state machine,
2592 		 * since it's possible to block while waiting for memory
2593 		 * allocation.
2594 		 */
2595 		err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2596 
2597 		if (err)
2598 			break;
2599 
2600 		if (chan->mode == L2CAP_MODE_ERTM)
2601 			l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2602 		else
2603 			l2cap_streaming_send(chan, &seg_queue);
2604 
2605 		err = len;
2606 
2607 		/* If the skbs were not queued for sending, they'll still be in
2608 		 * seg_queue and need to be purged.
2609 		 */
2610 		__skb_queue_purge(&seg_queue);
2611 		break;
2612 
2613 	default:
2614 		BT_DBG("bad state %1.1x", chan->mode);
2615 		err = -EBADFD;
2616 	}
2617 
2618 	return err;
2619 }
2620 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2621 
l2cap_send_srej(struct l2cap_chan * chan,u16 txseq)2622 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2623 {
2624 	struct l2cap_ctrl control;
2625 	u16 seq;
2626 
2627 	BT_DBG("chan %p, txseq %u", chan, txseq);
2628 
2629 	memset(&control, 0, sizeof(control));
2630 	control.sframe = 1;
2631 	control.super = L2CAP_SUPER_SREJ;
2632 
2633 	for (seq = chan->expected_tx_seq; seq != txseq;
2634 	     seq = __next_seq(chan, seq)) {
2635 		if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2636 			control.reqseq = seq;
2637 			l2cap_send_sframe(chan, &control);
2638 			l2cap_seq_list_append(&chan->srej_list, seq);
2639 		}
2640 	}
2641 
2642 	chan->expected_tx_seq = __next_seq(chan, txseq);
2643 }
2644 
l2cap_send_srej_tail(struct l2cap_chan * chan)2645 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2646 {
2647 	struct l2cap_ctrl control;
2648 
2649 	BT_DBG("chan %p", chan);
2650 
2651 	if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2652 		return;
2653 
2654 	memset(&control, 0, sizeof(control));
2655 	control.sframe = 1;
2656 	control.super = L2CAP_SUPER_SREJ;
2657 	control.reqseq = chan->srej_list.tail;
2658 	l2cap_send_sframe(chan, &control);
2659 }
2660 
l2cap_send_srej_list(struct l2cap_chan * chan,u16 txseq)2661 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2662 {
2663 	struct l2cap_ctrl control;
2664 	u16 initial_head;
2665 	u16 seq;
2666 
2667 	BT_DBG("chan %p, txseq %u", chan, txseq);
2668 
2669 	memset(&control, 0, sizeof(control));
2670 	control.sframe = 1;
2671 	control.super = L2CAP_SUPER_SREJ;
2672 
2673 	/* Capture initial list head to allow only one pass through the list. */
2674 	initial_head = chan->srej_list.head;
2675 
2676 	do {
2677 		seq = l2cap_seq_list_pop(&chan->srej_list);
2678 		if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2679 			break;
2680 
2681 		control.reqseq = seq;
2682 		l2cap_send_sframe(chan, &control);
2683 		l2cap_seq_list_append(&chan->srej_list, seq);
2684 	} while (chan->srej_list.head != initial_head);
2685 }
2686 
l2cap_process_reqseq(struct l2cap_chan * chan,u16 reqseq)2687 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2688 {
2689 	struct sk_buff *acked_skb;
2690 	u16 ackseq;
2691 
2692 	BT_DBG("chan %p, reqseq %u", chan, reqseq);
2693 
2694 	if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2695 		return;
2696 
2697 	BT_DBG("expected_ack_seq %u, unacked_frames %u",
2698 	       chan->expected_ack_seq, chan->unacked_frames);
2699 
2700 	for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2701 	     ackseq = __next_seq(chan, ackseq)) {
2702 
2703 		acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2704 		if (acked_skb) {
2705 			skb_unlink(acked_skb, &chan->tx_q);
2706 			kfree_skb(acked_skb);
2707 			chan->unacked_frames--;
2708 		}
2709 	}
2710 
2711 	chan->expected_ack_seq = reqseq;
2712 
2713 	if (chan->unacked_frames == 0)
2714 		__clear_retrans_timer(chan);
2715 
2716 	BT_DBG("unacked_frames %u", chan->unacked_frames);
2717 }
2718 
l2cap_abort_rx_srej_sent(struct l2cap_chan * chan)2719 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2720 {
2721 	BT_DBG("chan %p", chan);
2722 
2723 	chan->expected_tx_seq = chan->buffer_seq;
2724 	l2cap_seq_list_clear(&chan->srej_list);
2725 	skb_queue_purge(&chan->srej_q);
2726 	chan->rx_state = L2CAP_RX_STATE_RECV;
2727 }
2728 
l2cap_tx_state_xmit(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2729 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2730 				struct l2cap_ctrl *control,
2731 				struct sk_buff_head *skbs, u8 event)
2732 {
2733 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2734 	       event);
2735 
2736 	switch (event) {
2737 	case L2CAP_EV_DATA_REQUEST:
2738 		if (chan->tx_send_head == NULL)
2739 			chan->tx_send_head = skb_peek(skbs);
2740 
2741 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2742 		l2cap_ertm_send(chan);
2743 		break;
2744 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2745 		BT_DBG("Enter LOCAL_BUSY");
2746 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2747 
2748 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2749 			/* The SREJ_SENT state must be aborted if we are to
2750 			 * enter the LOCAL_BUSY state.
2751 			 */
2752 			l2cap_abort_rx_srej_sent(chan);
2753 		}
2754 
2755 		l2cap_send_ack(chan);
2756 
2757 		break;
2758 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2759 		BT_DBG("Exit LOCAL_BUSY");
2760 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2761 
2762 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2763 			struct l2cap_ctrl local_control;
2764 
2765 			memset(&local_control, 0, sizeof(local_control));
2766 			local_control.sframe = 1;
2767 			local_control.super = L2CAP_SUPER_RR;
2768 			local_control.poll = 1;
2769 			local_control.reqseq = chan->buffer_seq;
2770 			l2cap_send_sframe(chan, &local_control);
2771 
2772 			chan->retry_count = 1;
2773 			__set_monitor_timer(chan);
2774 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2775 		}
2776 		break;
2777 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2778 		l2cap_process_reqseq(chan, control->reqseq);
2779 		break;
2780 	case L2CAP_EV_EXPLICIT_POLL:
2781 		l2cap_send_rr_or_rnr(chan, 1);
2782 		chan->retry_count = 1;
2783 		__set_monitor_timer(chan);
2784 		__clear_ack_timer(chan);
2785 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2786 		break;
2787 	case L2CAP_EV_RETRANS_TO:
2788 		l2cap_send_rr_or_rnr(chan, 1);
2789 		chan->retry_count = 1;
2790 		__set_monitor_timer(chan);
2791 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2792 		break;
2793 	case L2CAP_EV_RECV_FBIT:
2794 		/* Nothing to process */
2795 		break;
2796 	default:
2797 		break;
2798 	}
2799 }
2800 
l2cap_tx_state_wait_f(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2801 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2802 				  struct l2cap_ctrl *control,
2803 				  struct sk_buff_head *skbs, u8 event)
2804 {
2805 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2806 	       event);
2807 
2808 	switch (event) {
2809 	case L2CAP_EV_DATA_REQUEST:
2810 		if (chan->tx_send_head == NULL)
2811 			chan->tx_send_head = skb_peek(skbs);
2812 		/* Queue data, but don't send. */
2813 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2814 		break;
2815 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2816 		BT_DBG("Enter LOCAL_BUSY");
2817 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2818 
2819 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2820 			/* The SREJ_SENT state must be aborted if we are to
2821 			 * enter the LOCAL_BUSY state.
2822 			 */
2823 			l2cap_abort_rx_srej_sent(chan);
2824 		}
2825 
2826 		l2cap_send_ack(chan);
2827 
2828 		break;
2829 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2830 		BT_DBG("Exit LOCAL_BUSY");
2831 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2832 
2833 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2834 			struct l2cap_ctrl local_control;
2835 			memset(&local_control, 0, sizeof(local_control));
2836 			local_control.sframe = 1;
2837 			local_control.super = L2CAP_SUPER_RR;
2838 			local_control.poll = 1;
2839 			local_control.reqseq = chan->buffer_seq;
2840 			l2cap_send_sframe(chan, &local_control);
2841 
2842 			chan->retry_count = 1;
2843 			__set_monitor_timer(chan);
2844 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2845 		}
2846 		break;
2847 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2848 		l2cap_process_reqseq(chan, control->reqseq);
2849 		fallthrough;
2850 
2851 	case L2CAP_EV_RECV_FBIT:
2852 		if (control && control->final) {
2853 			__clear_monitor_timer(chan);
2854 			if (chan->unacked_frames > 0)
2855 				__set_retrans_timer(chan);
2856 			chan->retry_count = 0;
2857 			chan->tx_state = L2CAP_TX_STATE_XMIT;
2858 			BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2859 		}
2860 		break;
2861 	case L2CAP_EV_EXPLICIT_POLL:
2862 		/* Ignore */
2863 		break;
2864 	case L2CAP_EV_MONITOR_TO:
2865 		if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2866 			l2cap_send_rr_or_rnr(chan, 1);
2867 			__set_monitor_timer(chan);
2868 			chan->retry_count++;
2869 		} else {
2870 			l2cap_send_disconn_req(chan, ECONNABORTED);
2871 		}
2872 		break;
2873 	default:
2874 		break;
2875 	}
2876 }
2877 
l2cap_tx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2878 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2879 		     struct sk_buff_head *skbs, u8 event)
2880 {
2881 	BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2882 	       chan, control, skbs, event, chan->tx_state);
2883 
2884 	switch (chan->tx_state) {
2885 	case L2CAP_TX_STATE_XMIT:
2886 		l2cap_tx_state_xmit(chan, control, skbs, event);
2887 		break;
2888 	case L2CAP_TX_STATE_WAIT_F:
2889 		l2cap_tx_state_wait_f(chan, control, skbs, event);
2890 		break;
2891 	default:
2892 		/* Ignore event */
2893 		break;
2894 	}
2895 }
2896 
l2cap_pass_to_tx(struct l2cap_chan * chan,struct l2cap_ctrl * control)2897 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2898 			     struct l2cap_ctrl *control)
2899 {
2900 	BT_DBG("chan %p, control %p", chan, control);
2901 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2902 }
2903 
l2cap_pass_to_tx_fbit(struct l2cap_chan * chan,struct l2cap_ctrl * control)2904 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2905 				  struct l2cap_ctrl *control)
2906 {
2907 	BT_DBG("chan %p, control %p", chan, control);
2908 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2909 }
2910 
2911 /* Copy frame to all raw sockets on that connection */
l2cap_raw_recv(struct l2cap_conn * conn,struct sk_buff * skb)2912 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2913 {
2914 	struct sk_buff *nskb;
2915 	struct l2cap_chan *chan;
2916 
2917 	BT_DBG("conn %p", conn);
2918 
2919 	list_for_each_entry(chan, &conn->chan_l, list) {
2920 		if (chan->chan_type != L2CAP_CHAN_RAW)
2921 			continue;
2922 
2923 		/* Don't send frame to the channel it came from */
2924 		if (bt_cb(skb)->l2cap.chan == chan)
2925 			continue;
2926 
2927 		nskb = skb_clone(skb, GFP_KERNEL);
2928 		if (!nskb)
2929 			continue;
2930 		if (chan->ops->recv(chan, nskb))
2931 			kfree_skb(nskb);
2932 	}
2933 }
2934 
2935 /* ---- L2CAP signalling commands ---- */
l2cap_build_cmd(struct l2cap_conn * conn,u8 code,u8 ident,u16 dlen,void * data)2936 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2937 				       u8 ident, u16 dlen, void *data)
2938 {
2939 	struct sk_buff *skb, **frag;
2940 	struct l2cap_cmd_hdr *cmd;
2941 	struct l2cap_hdr *lh;
2942 	int len, count;
2943 
2944 	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2945 	       conn, code, ident, dlen);
2946 
2947 	if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2948 		return NULL;
2949 
2950 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2951 	count = min_t(unsigned int, conn->mtu, len);
2952 
2953 	skb = bt_skb_alloc(count, GFP_KERNEL);
2954 	if (!skb)
2955 		return NULL;
2956 
2957 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2958 	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2959 
2960 	if (conn->hcon->type == LE_LINK)
2961 		lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2962 	else
2963 		lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2964 
2965 	cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
2966 	cmd->code  = code;
2967 	cmd->ident = ident;
2968 	cmd->len   = cpu_to_le16(dlen);
2969 
2970 	if (dlen) {
2971 		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2972 		skb_put_data(skb, data, count);
2973 		data += count;
2974 	}
2975 
2976 	len -= skb->len;
2977 
2978 	/* Continuation fragments (no L2CAP header) */
2979 	frag = &skb_shinfo(skb)->frag_list;
2980 	while (len) {
2981 		count = min_t(unsigned int, conn->mtu, len);
2982 
2983 		*frag = bt_skb_alloc(count, GFP_KERNEL);
2984 		if (!*frag)
2985 			goto fail;
2986 
2987 		skb_put_data(*frag, data, count);
2988 
2989 		len  -= count;
2990 		data += count;
2991 
2992 		frag = &(*frag)->next;
2993 	}
2994 
2995 	return skb;
2996 
2997 fail:
2998 	kfree_skb(skb);
2999 	return NULL;
3000 }
3001 
l2cap_get_conf_opt(void ** ptr,int * type,int * olen,unsigned long * val)3002 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
3003 				     unsigned long *val)
3004 {
3005 	struct l2cap_conf_opt *opt = *ptr;
3006 	int len;
3007 
3008 	len = L2CAP_CONF_OPT_SIZE + opt->len;
3009 	*ptr += len;
3010 
3011 	*type = opt->type;
3012 	*olen = opt->len;
3013 
3014 	switch (opt->len) {
3015 	case 1:
3016 		*val = *((u8 *) opt->val);
3017 		break;
3018 
3019 	case 2:
3020 		*val = get_unaligned_le16(opt->val);
3021 		break;
3022 
3023 	case 4:
3024 		*val = get_unaligned_le32(opt->val);
3025 		break;
3026 
3027 	default:
3028 		*val = (unsigned long) opt->val;
3029 		break;
3030 	}
3031 
3032 	BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3033 	return len;
3034 }
3035 
l2cap_add_conf_opt(void ** ptr,u8 type,u8 len,unsigned long val,size_t size)3036 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3037 {
3038 	struct l2cap_conf_opt *opt = *ptr;
3039 
3040 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3041 
3042 	if (size < L2CAP_CONF_OPT_SIZE + len)
3043 		return;
3044 
3045 	opt->type = type;
3046 	opt->len  = len;
3047 
3048 	switch (len) {
3049 	case 1:
3050 		*((u8 *) opt->val)  = val;
3051 		break;
3052 
3053 	case 2:
3054 		put_unaligned_le16(val, opt->val);
3055 		break;
3056 
3057 	case 4:
3058 		put_unaligned_le32(val, opt->val);
3059 		break;
3060 
3061 	default:
3062 		memcpy(opt->val, (void *) val, len);
3063 		break;
3064 	}
3065 
3066 	*ptr += L2CAP_CONF_OPT_SIZE + len;
3067 }
3068 
l2cap_add_opt_efs(void ** ptr,struct l2cap_chan * chan,size_t size)3069 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3070 {
3071 	struct l2cap_conf_efs efs;
3072 
3073 	switch (chan->mode) {
3074 	case L2CAP_MODE_ERTM:
3075 		efs.id		= chan->local_id;
3076 		efs.stype	= chan->local_stype;
3077 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3078 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3079 		efs.acc_lat	= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3080 		efs.flush_to	= cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3081 		break;
3082 
3083 	case L2CAP_MODE_STREAMING:
3084 		efs.id		= 1;
3085 		efs.stype	= L2CAP_SERV_BESTEFFORT;
3086 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3087 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3088 		efs.acc_lat	= 0;
3089 		efs.flush_to	= 0;
3090 		break;
3091 
3092 	default:
3093 		return;
3094 	}
3095 
3096 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3097 			   (unsigned long) &efs, size);
3098 }
3099 
l2cap_ack_timeout(struct work_struct * work)3100 static void l2cap_ack_timeout(struct work_struct *work)
3101 {
3102 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3103 					       ack_timer.work);
3104 	u16 frames_to_ack;
3105 
3106 	BT_DBG("chan %p", chan);
3107 
3108 	l2cap_chan_lock(chan);
3109 
3110 	frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3111 				     chan->last_acked_seq);
3112 
3113 	if (frames_to_ack)
3114 		l2cap_send_rr_or_rnr(chan, 0);
3115 
3116 	l2cap_chan_unlock(chan);
3117 	l2cap_chan_put(chan);
3118 }
3119 
l2cap_ertm_init(struct l2cap_chan * chan)3120 int l2cap_ertm_init(struct l2cap_chan *chan)
3121 {
3122 	int err;
3123 
3124 	chan->next_tx_seq = 0;
3125 	chan->expected_tx_seq = 0;
3126 	chan->expected_ack_seq = 0;
3127 	chan->unacked_frames = 0;
3128 	chan->buffer_seq = 0;
3129 	chan->frames_sent = 0;
3130 	chan->last_acked_seq = 0;
3131 	chan->sdu = NULL;
3132 	chan->sdu_last_frag = NULL;
3133 	chan->sdu_len = 0;
3134 
3135 	skb_queue_head_init(&chan->tx_q);
3136 
3137 	if (chan->mode != L2CAP_MODE_ERTM)
3138 		return 0;
3139 
3140 	chan->rx_state = L2CAP_RX_STATE_RECV;
3141 	chan->tx_state = L2CAP_TX_STATE_XMIT;
3142 
3143 	skb_queue_head_init(&chan->srej_q);
3144 
3145 	err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3146 	if (err < 0)
3147 		return err;
3148 
3149 	err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3150 	if (err < 0)
3151 		l2cap_seq_list_free(&chan->srej_list);
3152 
3153 	return err;
3154 }
3155 
l2cap_select_mode(__u8 mode,__u16 remote_feat_mask)3156 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3157 {
3158 	switch (mode) {
3159 	case L2CAP_MODE_STREAMING:
3160 	case L2CAP_MODE_ERTM:
3161 		if (l2cap_mode_supported(mode, remote_feat_mask))
3162 			return mode;
3163 		fallthrough;
3164 	default:
3165 		return L2CAP_MODE_BASIC;
3166 	}
3167 }
3168 
__l2cap_ews_supported(struct l2cap_conn * conn)3169 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3170 {
3171 	return (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW);
3172 }
3173 
__l2cap_efs_supported(struct l2cap_conn * conn)3174 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3175 {
3176 	return (conn->feat_mask & L2CAP_FEAT_EXT_FLOW);
3177 }
3178 
__l2cap_set_ertm_timeouts(struct l2cap_chan * chan,struct l2cap_conf_rfc * rfc)3179 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3180 				      struct l2cap_conf_rfc *rfc)
3181 {
3182 	rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3183 	rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3184 }
3185 
l2cap_txwin_setup(struct l2cap_chan * chan)3186 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3187 {
3188 	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3189 	    __l2cap_ews_supported(chan->conn)) {
3190 		/* use extended control field */
3191 		set_bit(FLAG_EXT_CTRL, &chan->flags);
3192 		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3193 	} else {
3194 		chan->tx_win = min_t(u16, chan->tx_win,
3195 				     L2CAP_DEFAULT_TX_WINDOW);
3196 		chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3197 	}
3198 	chan->ack_win = chan->tx_win;
3199 }
3200 
l2cap_mtu_auto(struct l2cap_chan * chan)3201 static void l2cap_mtu_auto(struct l2cap_chan *chan)
3202 {
3203 	struct hci_conn *conn = chan->conn->hcon;
3204 
3205 	chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3206 
3207 	/* The 2-DH1 packet has between 2 and 56 information bytes
3208 	 * (including the 2-byte payload header)
3209 	 */
3210 	if (!(conn->pkt_type & HCI_2DH1))
3211 		chan->imtu = 54;
3212 
3213 	/* The 3-DH1 packet has between 2 and 85 information bytes
3214 	 * (including the 2-byte payload header)
3215 	 */
3216 	if (!(conn->pkt_type & HCI_3DH1))
3217 		chan->imtu = 83;
3218 
3219 	/* The 2-DH3 packet has between 2 and 369 information bytes
3220 	 * (including the 2-byte payload header)
3221 	 */
3222 	if (!(conn->pkt_type & HCI_2DH3))
3223 		chan->imtu = 367;
3224 
3225 	/* The 3-DH3 packet has between 2 and 554 information bytes
3226 	 * (including the 2-byte payload header)
3227 	 */
3228 	if (!(conn->pkt_type & HCI_3DH3))
3229 		chan->imtu = 552;
3230 
3231 	/* The 2-DH5 packet has between 2 and 681 information bytes
3232 	 * (including the 2-byte payload header)
3233 	 */
3234 	if (!(conn->pkt_type & HCI_2DH5))
3235 		chan->imtu = 679;
3236 
3237 	/* The 3-DH5 packet has between 2 and 1023 information bytes
3238 	 * (including the 2-byte payload header)
3239 	 */
3240 	if (!(conn->pkt_type & HCI_3DH5))
3241 		chan->imtu = 1021;
3242 }
3243 
l2cap_build_conf_req(struct l2cap_chan * chan,void * data,size_t data_size)3244 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3245 {
3246 	struct l2cap_conf_req *req = data;
3247 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3248 	void *ptr = req->data;
3249 	void *endptr = data + data_size;
3250 	u16 size;
3251 
3252 	BT_DBG("chan %p", chan);
3253 
3254 	if (chan->num_conf_req || chan->num_conf_rsp)
3255 		goto done;
3256 
3257 	switch (chan->mode) {
3258 	case L2CAP_MODE_STREAMING:
3259 	case L2CAP_MODE_ERTM:
3260 		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3261 			break;
3262 
3263 		if (__l2cap_efs_supported(chan->conn))
3264 			set_bit(FLAG_EFS_ENABLE, &chan->flags);
3265 
3266 		fallthrough;
3267 	default:
3268 		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3269 		break;
3270 	}
3271 
3272 done:
3273 	if (chan->imtu != L2CAP_DEFAULT_MTU) {
3274 		if (!chan->imtu)
3275 			l2cap_mtu_auto(chan);
3276 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3277 				   endptr - ptr);
3278 	}
3279 
3280 	switch (chan->mode) {
3281 	case L2CAP_MODE_BASIC:
3282 		if (disable_ertm)
3283 			break;
3284 
3285 		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3286 		    !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3287 			break;
3288 
3289 		rfc.mode            = L2CAP_MODE_BASIC;
3290 		rfc.txwin_size      = 0;
3291 		rfc.max_transmit    = 0;
3292 		rfc.retrans_timeout = 0;
3293 		rfc.monitor_timeout = 0;
3294 		rfc.max_pdu_size    = 0;
3295 
3296 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3297 				   (unsigned long) &rfc, endptr - ptr);
3298 		break;
3299 
3300 	case L2CAP_MODE_ERTM:
3301 		rfc.mode            = L2CAP_MODE_ERTM;
3302 		rfc.max_transmit    = chan->max_tx;
3303 
3304 		__l2cap_set_ertm_timeouts(chan, &rfc);
3305 
3306 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3307 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3308 			     L2CAP_FCS_SIZE);
3309 		rfc.max_pdu_size = cpu_to_le16(size);
3310 
3311 		l2cap_txwin_setup(chan);
3312 
3313 		rfc.txwin_size = min_t(u16, chan->tx_win,
3314 				       L2CAP_DEFAULT_TX_WINDOW);
3315 
3316 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3317 				   (unsigned long) &rfc, endptr - ptr);
3318 
3319 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3320 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3321 
3322 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3323 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3324 					   chan->tx_win, endptr - ptr);
3325 
3326 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3327 			if (chan->fcs == L2CAP_FCS_NONE ||
3328 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3329 				chan->fcs = L2CAP_FCS_NONE;
3330 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3331 						   chan->fcs, endptr - ptr);
3332 			}
3333 		break;
3334 
3335 	case L2CAP_MODE_STREAMING:
3336 		l2cap_txwin_setup(chan);
3337 		rfc.mode            = L2CAP_MODE_STREAMING;
3338 		rfc.txwin_size      = 0;
3339 		rfc.max_transmit    = 0;
3340 		rfc.retrans_timeout = 0;
3341 		rfc.monitor_timeout = 0;
3342 
3343 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3344 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3345 			     L2CAP_FCS_SIZE);
3346 		rfc.max_pdu_size = cpu_to_le16(size);
3347 
3348 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3349 				   (unsigned long) &rfc, endptr - ptr);
3350 
3351 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3352 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3353 
3354 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3355 			if (chan->fcs == L2CAP_FCS_NONE ||
3356 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3357 				chan->fcs = L2CAP_FCS_NONE;
3358 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3359 						   chan->fcs, endptr - ptr);
3360 			}
3361 		break;
3362 	}
3363 
3364 	req->dcid  = cpu_to_le16(chan->dcid);
3365 	req->flags = cpu_to_le16(0);
3366 
3367 	return ptr - data;
3368 }
3369 
l2cap_parse_conf_req(struct l2cap_chan * chan,void * data,size_t data_size)3370 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3371 {
3372 	struct l2cap_conf_rsp *rsp = data;
3373 	void *ptr = rsp->data;
3374 	void *endptr = data + data_size;
3375 	void *req = chan->conf_req;
3376 	int len = chan->conf_len;
3377 	int type, hint, olen;
3378 	unsigned long val;
3379 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3380 	struct l2cap_conf_efs efs;
3381 	u8 remote_efs = 0;
3382 	u16 mtu = L2CAP_DEFAULT_MTU;
3383 	u16 result = L2CAP_CONF_SUCCESS;
3384 	u16 size;
3385 
3386 	BT_DBG("chan %p", chan);
3387 
3388 	while (len >= L2CAP_CONF_OPT_SIZE) {
3389 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3390 		if (len < 0)
3391 			break;
3392 
3393 		hint  = type & L2CAP_CONF_HINT;
3394 		type &= L2CAP_CONF_MASK;
3395 
3396 		switch (type) {
3397 		case L2CAP_CONF_MTU:
3398 			if (olen != 2)
3399 				break;
3400 			mtu = val;
3401 			break;
3402 
3403 		case L2CAP_CONF_FLUSH_TO:
3404 			if (olen != 2)
3405 				break;
3406 			chan->flush_to = val;
3407 			break;
3408 
3409 		case L2CAP_CONF_QOS:
3410 			break;
3411 
3412 		case L2CAP_CONF_RFC:
3413 			if (olen != sizeof(rfc))
3414 				break;
3415 			memcpy(&rfc, (void *) val, olen);
3416 			break;
3417 
3418 		case L2CAP_CONF_FCS:
3419 			if (olen != 1)
3420 				break;
3421 			if (val == L2CAP_FCS_NONE)
3422 				set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3423 			break;
3424 
3425 		case L2CAP_CONF_EFS:
3426 			if (olen != sizeof(efs))
3427 				break;
3428 			remote_efs = 1;
3429 			memcpy(&efs, (void *) val, olen);
3430 			break;
3431 
3432 		case L2CAP_CONF_EWS:
3433 			if (olen != 2)
3434 				break;
3435 			return -ECONNREFUSED;
3436 
3437 		default:
3438 			if (hint)
3439 				break;
3440 			result = L2CAP_CONF_UNKNOWN;
3441 			l2cap_add_conf_opt(&ptr, (u8)type, sizeof(u8), type, endptr - ptr);
3442 			break;
3443 		}
3444 	}
3445 
3446 	if (chan->num_conf_rsp || chan->num_conf_req > 1)
3447 		goto done;
3448 
3449 	switch (chan->mode) {
3450 	case L2CAP_MODE_STREAMING:
3451 	case L2CAP_MODE_ERTM:
3452 		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3453 			chan->mode = l2cap_select_mode(rfc.mode,
3454 						       chan->conn->feat_mask);
3455 			break;
3456 		}
3457 
3458 		if (remote_efs) {
3459 			if (__l2cap_efs_supported(chan->conn))
3460 				set_bit(FLAG_EFS_ENABLE, &chan->flags);
3461 			else
3462 				return -ECONNREFUSED;
3463 		}
3464 
3465 		if (chan->mode != rfc.mode)
3466 			return -ECONNREFUSED;
3467 
3468 		break;
3469 	}
3470 
3471 done:
3472 	if (chan->mode != rfc.mode) {
3473 		result = L2CAP_CONF_UNACCEPT;
3474 		rfc.mode = chan->mode;
3475 
3476 		if (chan->num_conf_rsp == 1)
3477 			return -ECONNREFUSED;
3478 
3479 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3480 				   (unsigned long) &rfc, endptr - ptr);
3481 	}
3482 
3483 	if (result == L2CAP_CONF_SUCCESS) {
3484 		/* Configure output options and let the other side know
3485 		 * which ones we don't like. */
3486 
3487 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
3488 			result = L2CAP_CONF_UNACCEPT;
3489 		else {
3490 			chan->omtu = mtu;
3491 			set_bit(CONF_MTU_DONE, &chan->conf_state);
3492 		}
3493 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3494 
3495 		if (remote_efs) {
3496 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3497 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3498 			    efs.stype != chan->local_stype) {
3499 
3500 				result = L2CAP_CONF_UNACCEPT;
3501 
3502 				if (chan->num_conf_req >= 1)
3503 					return -ECONNREFUSED;
3504 
3505 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3506 						   sizeof(efs),
3507 						   (unsigned long) &efs, endptr - ptr);
3508 			} else {
3509 				/* Send PENDING Conf Rsp */
3510 				result = L2CAP_CONF_PENDING;
3511 				set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3512 			}
3513 		}
3514 
3515 		switch (rfc.mode) {
3516 		case L2CAP_MODE_BASIC:
3517 			chan->fcs = L2CAP_FCS_NONE;
3518 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3519 			break;
3520 
3521 		case L2CAP_MODE_ERTM:
3522 			if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3523 				chan->remote_tx_win = rfc.txwin_size;
3524 			else
3525 				rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3526 
3527 			chan->remote_max_tx = rfc.max_transmit;
3528 
3529 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3530 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3531 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3532 			rfc.max_pdu_size = cpu_to_le16(size);
3533 			chan->remote_mps = size;
3534 
3535 			__l2cap_set_ertm_timeouts(chan, &rfc);
3536 
3537 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3538 
3539 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3540 					   sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3541 
3542 			if (remote_efs &&
3543 			    test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3544 				chan->remote_id = efs.id;
3545 				chan->remote_stype = efs.stype;
3546 				chan->remote_msdu = le16_to_cpu(efs.msdu);
3547 				chan->remote_flush_to =
3548 					le32_to_cpu(efs.flush_to);
3549 				chan->remote_acc_lat =
3550 					le32_to_cpu(efs.acc_lat);
3551 				chan->remote_sdu_itime =
3552 					le32_to_cpu(efs.sdu_itime);
3553 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3554 						   sizeof(efs),
3555 						   (unsigned long) &efs, endptr - ptr);
3556 			}
3557 			break;
3558 
3559 		case L2CAP_MODE_STREAMING:
3560 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3561 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3562 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3563 			rfc.max_pdu_size = cpu_to_le16(size);
3564 			chan->remote_mps = size;
3565 
3566 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3567 
3568 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3569 					   (unsigned long) &rfc, endptr - ptr);
3570 
3571 			break;
3572 
3573 		default:
3574 			result = L2CAP_CONF_UNACCEPT;
3575 
3576 			memset(&rfc, 0, sizeof(rfc));
3577 			rfc.mode = chan->mode;
3578 		}
3579 
3580 		if (result == L2CAP_CONF_SUCCESS)
3581 			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3582 	}
3583 	rsp->scid   = cpu_to_le16(chan->dcid);
3584 	rsp->result = cpu_to_le16(result);
3585 	rsp->flags  = cpu_to_le16(0);
3586 
3587 	return ptr - data;
3588 }
3589 
l2cap_parse_conf_rsp(struct l2cap_chan * chan,void * rsp,int len,void * data,size_t size,u16 * result)3590 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3591 				void *data, size_t size, u16 *result)
3592 {
3593 	struct l2cap_conf_req *req = data;
3594 	void *ptr = req->data;
3595 	void *endptr = data + size;
3596 	int type, olen;
3597 	unsigned long val;
3598 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3599 	struct l2cap_conf_efs efs;
3600 
3601 	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3602 
3603 	while (len >= L2CAP_CONF_OPT_SIZE) {
3604 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3605 		if (len < 0)
3606 			break;
3607 
3608 		switch (type) {
3609 		case L2CAP_CONF_MTU:
3610 			if (olen != 2)
3611 				break;
3612 			if (val < L2CAP_DEFAULT_MIN_MTU) {
3613 				*result = L2CAP_CONF_UNACCEPT;
3614 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3615 			} else
3616 				chan->imtu = val;
3617 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3618 					   endptr - ptr);
3619 			break;
3620 
3621 		case L2CAP_CONF_FLUSH_TO:
3622 			if (olen != 2)
3623 				break;
3624 			chan->flush_to = val;
3625 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3626 					   chan->flush_to, endptr - ptr);
3627 			break;
3628 
3629 		case L2CAP_CONF_RFC:
3630 			if (olen != sizeof(rfc))
3631 				break;
3632 			memcpy(&rfc, (void *)val, olen);
3633 			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3634 			    rfc.mode != chan->mode)
3635 				return -ECONNREFUSED;
3636 			chan->fcs = 0;
3637 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3638 					   (unsigned long) &rfc, endptr - ptr);
3639 			break;
3640 
3641 		case L2CAP_CONF_EWS:
3642 			if (olen != 2)
3643 				break;
3644 			chan->ack_win = min_t(u16, val, chan->ack_win);
3645 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3646 					   chan->tx_win, endptr - ptr);
3647 			break;
3648 
3649 		case L2CAP_CONF_EFS:
3650 			if (olen != sizeof(efs))
3651 				break;
3652 			memcpy(&efs, (void *)val, olen);
3653 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3654 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3655 			    efs.stype != chan->local_stype)
3656 				return -ECONNREFUSED;
3657 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3658 					   (unsigned long) &efs, endptr - ptr);
3659 			break;
3660 
3661 		case L2CAP_CONF_FCS:
3662 			if (olen != 1)
3663 				break;
3664 			if (*result == L2CAP_CONF_PENDING)
3665 				if (val == L2CAP_FCS_NONE)
3666 					set_bit(CONF_RECV_NO_FCS,
3667 						&chan->conf_state);
3668 			break;
3669 		}
3670 	}
3671 
3672 	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3673 		return -ECONNREFUSED;
3674 
3675 	chan->mode = rfc.mode;
3676 
3677 	if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3678 		switch (rfc.mode) {
3679 		case L2CAP_MODE_ERTM:
3680 			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3681 			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3682 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3683 			if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3684 				chan->ack_win = min_t(u16, chan->ack_win,
3685 						      rfc.txwin_size);
3686 
3687 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3688 				chan->local_msdu = le16_to_cpu(efs.msdu);
3689 				chan->local_sdu_itime =
3690 					le32_to_cpu(efs.sdu_itime);
3691 				chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3692 				chan->local_flush_to =
3693 					le32_to_cpu(efs.flush_to);
3694 			}
3695 			break;
3696 
3697 		case L2CAP_MODE_STREAMING:
3698 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3699 		}
3700 	}
3701 
3702 	req->dcid   = cpu_to_le16(chan->dcid);
3703 	req->flags  = cpu_to_le16(0);
3704 
3705 	return ptr - data;
3706 }
3707 
l2cap_build_conf_rsp(struct l2cap_chan * chan,void * data,u16 result,u16 flags)3708 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3709 				u16 result, u16 flags)
3710 {
3711 	struct l2cap_conf_rsp *rsp = data;
3712 	void *ptr = rsp->data;
3713 
3714 	BT_DBG("chan %p", chan);
3715 
3716 	rsp->scid   = cpu_to_le16(chan->dcid);
3717 	rsp->result = cpu_to_le16(result);
3718 	rsp->flags  = cpu_to_le16(flags);
3719 
3720 	return ptr - data;
3721 }
3722 
__l2cap_le_connect_rsp_defer(struct l2cap_chan * chan)3723 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3724 {
3725 	struct l2cap_le_conn_rsp rsp;
3726 	struct l2cap_conn *conn = chan->conn;
3727 
3728 	BT_DBG("chan %p", chan);
3729 
3730 	rsp.dcid    = cpu_to_le16(chan->scid);
3731 	rsp.mtu     = cpu_to_le16(chan->imtu);
3732 	rsp.mps     = cpu_to_le16(chan->mps);
3733 	rsp.credits = cpu_to_le16(chan->rx_credits);
3734 	rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3735 
3736 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3737 		       &rsp);
3738 }
3739 
l2cap_ecred_list_defer(struct l2cap_chan * chan,void * data)3740 static void l2cap_ecred_list_defer(struct l2cap_chan *chan, void *data)
3741 {
3742 	int *result = data;
3743 
3744 	if (*result || test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
3745 		return;
3746 
3747 	switch (chan->state) {
3748 	case BT_CONNECT2:
3749 		/* If channel still pending accept add to result */
3750 		(*result)++;
3751 		return;
3752 	case BT_CONNECTED:
3753 		return;
3754 	default:
3755 		/* If not connected or pending accept it has been refused */
3756 		*result = -ECONNREFUSED;
3757 		return;
3758 	}
3759 }
3760 
3761 struct l2cap_ecred_rsp_data {
3762 	struct {
3763 		struct l2cap_ecred_conn_rsp_hdr rsp;
3764 		__le16 scid[L2CAP_ECRED_MAX_CID];
3765 	} __packed pdu;
3766 	int count;
3767 };
3768 
l2cap_ecred_rsp_defer(struct l2cap_chan * chan,void * data)3769 static void l2cap_ecred_rsp_defer(struct l2cap_chan *chan, void *data)
3770 {
3771 	struct l2cap_ecred_rsp_data *rsp = data;
3772 	struct l2cap_ecred_conn_rsp *rsp_flex =
3773 		container_of(&rsp->pdu.rsp, struct l2cap_ecred_conn_rsp, hdr);
3774 
3775 	/* Check if channel for outgoing connection or if it wasn't deferred
3776 	 * since in those cases it must be skipped.
3777 	 */
3778 	if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags) ||
3779 	    !test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
3780 		return;
3781 
3782 	/* Reset ident so only one response is sent */
3783 	chan->ident = 0;
3784 
3785 	/* Include all channels pending with the same ident */
3786 	if (!rsp->pdu.rsp.result)
3787 		rsp_flex->dcid[rsp->count++] = cpu_to_le16(chan->scid);
3788 	else
3789 		l2cap_chan_del(chan, ECONNRESET);
3790 }
3791 
__l2cap_ecred_conn_rsp_defer(struct l2cap_chan * chan)3792 void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
3793 {
3794 	struct l2cap_conn *conn = chan->conn;
3795 	struct l2cap_ecred_rsp_data data;
3796 	u16 id = chan->ident;
3797 	int result = 0;
3798 
3799 	if (!id)
3800 		return;
3801 
3802 	BT_DBG("chan %p id %d", chan, id);
3803 
3804 	memset(&data, 0, sizeof(data));
3805 
3806 	data.pdu.rsp.mtu     = cpu_to_le16(chan->imtu);
3807 	data.pdu.rsp.mps     = cpu_to_le16(chan->mps);
3808 	data.pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
3809 	data.pdu.rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3810 
3811 	/* Verify that all channels are ready */
3812 	__l2cap_chan_list_id(conn, id, l2cap_ecred_list_defer, &result);
3813 
3814 	if (result > 0)
3815 		return;
3816 
3817 	if (result < 0)
3818 		data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_AUTHORIZATION);
3819 
3820 	/* Build response */
3821 	__l2cap_chan_list_id(conn, id, l2cap_ecred_rsp_defer, &data);
3822 
3823 	l2cap_send_cmd(conn, id, L2CAP_ECRED_CONN_RSP,
3824 		       sizeof(data.pdu.rsp) + (data.count * sizeof(__le16)),
3825 		       &data.pdu);
3826 }
3827 
__l2cap_connect_rsp_defer(struct l2cap_chan * chan)3828 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3829 {
3830 	struct l2cap_conn_rsp rsp;
3831 	struct l2cap_conn *conn = chan->conn;
3832 	u8 buf[128];
3833 	u8 rsp_code;
3834 
3835 	rsp.scid   = cpu_to_le16(chan->dcid);
3836 	rsp.dcid   = cpu_to_le16(chan->scid);
3837 	rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3838 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3839 	rsp_code = L2CAP_CONN_RSP;
3840 
3841 	BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3842 
3843 	l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3844 
3845 	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3846 		return;
3847 
3848 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3849 		       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3850 	chan->num_conf_req++;
3851 }
3852 
l2cap_conf_rfc_get(struct l2cap_chan * chan,void * rsp,int len)3853 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3854 {
3855 	int type, olen;
3856 	unsigned long val;
3857 	/* Use sane default values in case a misbehaving remote device
3858 	 * did not send an RFC or extended window size option.
3859 	 */
3860 	u16 txwin_ext = chan->ack_win;
3861 	struct l2cap_conf_rfc rfc = {
3862 		.mode = chan->mode,
3863 		.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3864 		.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3865 		.max_pdu_size = cpu_to_le16(chan->imtu),
3866 		.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3867 	};
3868 
3869 	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3870 
3871 	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3872 		return;
3873 
3874 	while (len >= L2CAP_CONF_OPT_SIZE) {
3875 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3876 		if (len < 0)
3877 			break;
3878 
3879 		switch (type) {
3880 		case L2CAP_CONF_RFC:
3881 			if (olen != sizeof(rfc))
3882 				break;
3883 			memcpy(&rfc, (void *)val, olen);
3884 			break;
3885 		case L2CAP_CONF_EWS:
3886 			if (olen != 2)
3887 				break;
3888 			txwin_ext = val;
3889 			break;
3890 		}
3891 	}
3892 
3893 	switch (rfc.mode) {
3894 	case L2CAP_MODE_ERTM:
3895 		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3896 		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3897 		chan->mps = le16_to_cpu(rfc.max_pdu_size);
3898 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3899 			chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3900 		else
3901 			chan->ack_win = min_t(u16, chan->ack_win,
3902 					      rfc.txwin_size);
3903 		break;
3904 	case L2CAP_MODE_STREAMING:
3905 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3906 	}
3907 }
3908 
l2cap_command_rej(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)3909 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3910 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3911 				    u8 *data)
3912 {
3913 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3914 
3915 	if (cmd_len < sizeof(*rej))
3916 		return -EPROTO;
3917 
3918 	if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3919 		return 0;
3920 
3921 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3922 	    cmd->ident == conn->info_ident) {
3923 		cancel_delayed_work(&conn->info_timer);
3924 
3925 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3926 		conn->info_ident = 0;
3927 
3928 		l2cap_conn_start(conn);
3929 	}
3930 
3931 	return 0;
3932 }
3933 
l2cap_connect(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u8 * data,u8 rsp_code)3934 static void l2cap_connect(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd,
3935 			  u8 *data, u8 rsp_code)
3936 {
3937 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3938 	struct l2cap_conn_rsp rsp;
3939 	struct l2cap_chan *chan = NULL, *pchan = NULL;
3940 	int result, status = L2CAP_CS_NO_INFO;
3941 
3942 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3943 	__le16 psm = req->psm;
3944 
3945 	BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3946 
3947 	/* Check if we have socket listening on psm */
3948 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3949 					 &conn->hcon->dst, ACL_LINK);
3950 	if (!pchan) {
3951 		result = L2CAP_CR_BAD_PSM;
3952 		goto response;
3953 	}
3954 
3955 	l2cap_chan_lock(pchan);
3956 
3957 	/* Check if the ACL is secure enough (if not SDP) */
3958 	if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3959 	    !hci_conn_check_link_mode(conn->hcon)) {
3960 		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3961 		result = L2CAP_CR_SEC_BLOCK;
3962 		goto response;
3963 	}
3964 
3965 	result = L2CAP_CR_NO_MEM;
3966 
3967 	/* Check for valid dynamic CID range (as per Erratum 3253) */
3968 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) {
3969 		result = L2CAP_CR_INVALID_SCID;
3970 		goto response;
3971 	}
3972 
3973 	/* Check if we already have channel with that dcid */
3974 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
3975 		result = L2CAP_CR_SCID_IN_USE;
3976 		goto response;
3977 	}
3978 
3979 	chan = pchan->ops->new_connection(pchan);
3980 	if (!chan)
3981 		goto response;
3982 
3983 	/* For certain devices (ex: HID mouse), support for authentication,
3984 	 * pairing and bonding is optional. For such devices, inorder to avoid
3985 	 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3986 	 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3987 	 */
3988 	conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3989 
3990 	bacpy(&chan->src, &conn->hcon->src);
3991 	bacpy(&chan->dst, &conn->hcon->dst);
3992 	chan->src_type = bdaddr_src_type(conn->hcon);
3993 	chan->dst_type = bdaddr_dst_type(conn->hcon);
3994 	chan->psm  = psm;
3995 	chan->dcid = scid;
3996 
3997 	__l2cap_chan_add(conn, chan);
3998 
3999 	dcid = chan->scid;
4000 
4001 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4002 
4003 	chan->ident = cmd->ident;
4004 
4005 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
4006 		if (l2cap_chan_check_security(chan, false)) {
4007 			if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4008 				l2cap_state_change(chan, BT_CONNECT2);
4009 				result = L2CAP_CR_PEND;
4010 				status = L2CAP_CS_AUTHOR_PEND;
4011 				chan->ops->defer(chan);
4012 			} else {
4013 				l2cap_state_change(chan, BT_CONFIG);
4014 				result = L2CAP_CR_SUCCESS;
4015 				status = L2CAP_CS_NO_INFO;
4016 			}
4017 		} else {
4018 			l2cap_state_change(chan, BT_CONNECT2);
4019 			result = L2CAP_CR_PEND;
4020 			status = L2CAP_CS_AUTHEN_PEND;
4021 		}
4022 	} else {
4023 		l2cap_state_change(chan, BT_CONNECT2);
4024 		result = L2CAP_CR_PEND;
4025 		status = L2CAP_CS_NO_INFO;
4026 	}
4027 
4028 response:
4029 	rsp.scid   = cpu_to_le16(scid);
4030 	rsp.dcid   = cpu_to_le16(dcid);
4031 	rsp.result = cpu_to_le16(result);
4032 	rsp.status = cpu_to_le16(status);
4033 	l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4034 
4035 	if (!pchan)
4036 		return;
4037 
4038 	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4039 		struct l2cap_info_req info;
4040 		info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4041 
4042 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4043 		conn->info_ident = l2cap_get_ident(conn);
4044 
4045 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4046 
4047 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4048 			       sizeof(info), &info);
4049 	}
4050 
4051 	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4052 	    result == L2CAP_CR_SUCCESS) {
4053 		u8 buf[128];
4054 		set_bit(CONF_REQ_SENT, &chan->conf_state);
4055 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4056 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4057 		chan->num_conf_req++;
4058 	}
4059 
4060 	l2cap_chan_unlock(pchan);
4061 	l2cap_chan_put(pchan);
4062 }
4063 
l2cap_connect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4064 static int l2cap_connect_req(struct l2cap_conn *conn,
4065 			     struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4066 {
4067 	if (cmd_len < sizeof(struct l2cap_conn_req))
4068 		return -EPROTO;
4069 
4070 	l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP);
4071 	return 0;
4072 }
4073 
l2cap_connect_create_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4074 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4075 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4076 				    u8 *data)
4077 {
4078 	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4079 	u16 scid, dcid, result, status;
4080 	struct l2cap_chan *chan;
4081 	u8 req[128];
4082 	int err;
4083 
4084 	if (cmd_len < sizeof(*rsp))
4085 		return -EPROTO;
4086 
4087 	scid   = __le16_to_cpu(rsp->scid);
4088 	dcid   = __le16_to_cpu(rsp->dcid);
4089 	result = __le16_to_cpu(rsp->result);
4090 	status = __le16_to_cpu(rsp->status);
4091 
4092 	if (result == L2CAP_CR_SUCCESS && (dcid < L2CAP_CID_DYN_START ||
4093 					   dcid > L2CAP_CID_DYN_END))
4094 		return -EPROTO;
4095 
4096 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4097 	       dcid, scid, result, status);
4098 
4099 	if (scid) {
4100 		chan = __l2cap_get_chan_by_scid(conn, scid);
4101 		if (!chan)
4102 			return -EBADSLT;
4103 	} else {
4104 		chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4105 		if (!chan)
4106 			return -EBADSLT;
4107 	}
4108 
4109 	chan = l2cap_chan_hold_unless_zero(chan);
4110 	if (!chan)
4111 		return -EBADSLT;
4112 
4113 	err = 0;
4114 
4115 	l2cap_chan_lock(chan);
4116 
4117 	switch (result) {
4118 	case L2CAP_CR_SUCCESS:
4119 		if (__l2cap_get_chan_by_dcid(conn, dcid)) {
4120 			err = -EBADSLT;
4121 			break;
4122 		}
4123 
4124 		l2cap_state_change(chan, BT_CONFIG);
4125 		chan->ident = 0;
4126 		chan->dcid = dcid;
4127 		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4128 
4129 		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4130 			break;
4131 
4132 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4133 			       l2cap_build_conf_req(chan, req, sizeof(req)), req);
4134 		chan->num_conf_req++;
4135 		break;
4136 
4137 	case L2CAP_CR_PEND:
4138 		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4139 		break;
4140 
4141 	default:
4142 		l2cap_chan_del(chan, ECONNREFUSED);
4143 		break;
4144 	}
4145 
4146 	l2cap_chan_unlock(chan);
4147 	l2cap_chan_put(chan);
4148 
4149 	return err;
4150 }
4151 
set_default_fcs(struct l2cap_chan * chan)4152 static inline void set_default_fcs(struct l2cap_chan *chan)
4153 {
4154 	/* FCS is enabled only in ERTM or streaming mode, if one or both
4155 	 * sides request it.
4156 	 */
4157 	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4158 		chan->fcs = L2CAP_FCS_NONE;
4159 	else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4160 		chan->fcs = L2CAP_FCS_CRC16;
4161 }
4162 
l2cap_send_efs_conf_rsp(struct l2cap_chan * chan,void * data,u8 ident,u16 flags)4163 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4164 				    u8 ident, u16 flags)
4165 {
4166 	struct l2cap_conn *conn = chan->conn;
4167 
4168 	BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4169 	       flags);
4170 
4171 	clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4172 	set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4173 
4174 	l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4175 		       l2cap_build_conf_rsp(chan, data,
4176 					    L2CAP_CONF_SUCCESS, flags), data);
4177 }
4178 
cmd_reject_invalid_cid(struct l2cap_conn * conn,u8 ident,u16 scid,u16 dcid)4179 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4180 				   u16 scid, u16 dcid)
4181 {
4182 	struct l2cap_cmd_rej_cid rej;
4183 
4184 	rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4185 	rej.scid = __cpu_to_le16(scid);
4186 	rej.dcid = __cpu_to_le16(dcid);
4187 
4188 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4189 }
4190 
l2cap_config_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4191 static inline int l2cap_config_req(struct l2cap_conn *conn,
4192 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4193 				   u8 *data)
4194 {
4195 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4196 	u16 dcid, flags;
4197 	u8 rsp[64];
4198 	struct l2cap_chan *chan;
4199 	int len, err = 0;
4200 
4201 	if (cmd_len < sizeof(*req))
4202 		return -EPROTO;
4203 
4204 	dcid  = __le16_to_cpu(req->dcid);
4205 	flags = __le16_to_cpu(req->flags);
4206 
4207 	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4208 
4209 	chan = l2cap_get_chan_by_scid(conn, dcid);
4210 	if (!chan) {
4211 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4212 		return 0;
4213 	}
4214 
4215 	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
4216 	    chan->state != BT_CONNECTED) {
4217 		cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4218 				       chan->dcid);
4219 		goto unlock;
4220 	}
4221 
4222 	/* Reject if config buffer is too small. */
4223 	len = cmd_len - sizeof(*req);
4224 	if (chan->conf_len + len > sizeof(chan->conf_req)) {
4225 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4226 			       l2cap_build_conf_rsp(chan, rsp,
4227 			       L2CAP_CONF_REJECT, flags), rsp);
4228 		goto unlock;
4229 	}
4230 
4231 	/* Store config. */
4232 	memcpy(chan->conf_req + chan->conf_len, req->data, len);
4233 	chan->conf_len += len;
4234 
4235 	if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4236 		/* Incomplete config. Send empty response. */
4237 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4238 			       l2cap_build_conf_rsp(chan, rsp,
4239 			       L2CAP_CONF_SUCCESS, flags), rsp);
4240 		goto unlock;
4241 	}
4242 
4243 	/* Complete config. */
4244 	len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4245 	if (len < 0) {
4246 		l2cap_send_disconn_req(chan, ECONNRESET);
4247 		goto unlock;
4248 	}
4249 
4250 	chan->ident = cmd->ident;
4251 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4252 	if (chan->num_conf_rsp < L2CAP_CONF_MAX_CONF_RSP)
4253 		chan->num_conf_rsp++;
4254 
4255 	/* Reset config buffer. */
4256 	chan->conf_len = 0;
4257 
4258 	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4259 		goto unlock;
4260 
4261 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4262 		set_default_fcs(chan);
4263 
4264 		if (chan->mode == L2CAP_MODE_ERTM ||
4265 		    chan->mode == L2CAP_MODE_STREAMING)
4266 			err = l2cap_ertm_init(chan);
4267 
4268 		if (err < 0)
4269 			l2cap_send_disconn_req(chan, -err);
4270 		else
4271 			l2cap_chan_ready(chan);
4272 
4273 		goto unlock;
4274 	}
4275 
4276 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4277 		u8 buf[64];
4278 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4279 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4280 		chan->num_conf_req++;
4281 	}
4282 
4283 	/* Got Conf Rsp PENDING from remote side and assume we sent
4284 	   Conf Rsp PENDING in the code above */
4285 	if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4286 	    test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4287 
4288 		/* check compatibility */
4289 
4290 		/* Send rsp for BR/EDR channel */
4291 		l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4292 	}
4293 
4294 unlock:
4295 	l2cap_chan_unlock(chan);
4296 	l2cap_chan_put(chan);
4297 	return err;
4298 }
4299 
l2cap_config_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4300 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4301 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4302 				   u8 *data)
4303 {
4304 	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4305 	u16 scid, flags, result;
4306 	struct l2cap_chan *chan;
4307 	int len = cmd_len - sizeof(*rsp);
4308 	int err = 0;
4309 
4310 	if (cmd_len < sizeof(*rsp))
4311 		return -EPROTO;
4312 
4313 	scid   = __le16_to_cpu(rsp->scid);
4314 	flags  = __le16_to_cpu(rsp->flags);
4315 	result = __le16_to_cpu(rsp->result);
4316 
4317 	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4318 	       result, len);
4319 
4320 	chan = l2cap_get_chan_by_scid(conn, scid);
4321 	if (!chan)
4322 		return 0;
4323 
4324 	switch (result) {
4325 	case L2CAP_CONF_SUCCESS:
4326 		l2cap_conf_rfc_get(chan, rsp->data, len);
4327 		clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4328 		break;
4329 
4330 	case L2CAP_CONF_PENDING:
4331 		set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4332 
4333 		if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4334 			char buf[64];
4335 
4336 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4337 						   buf, sizeof(buf), &result);
4338 			if (len < 0) {
4339 				l2cap_send_disconn_req(chan, ECONNRESET);
4340 				goto done;
4341 			}
4342 
4343 			l2cap_send_efs_conf_rsp(chan, buf, cmd->ident, 0);
4344 		}
4345 		goto done;
4346 
4347 	case L2CAP_CONF_UNKNOWN:
4348 	case L2CAP_CONF_UNACCEPT:
4349 		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4350 			char req[64];
4351 
4352 			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4353 				l2cap_send_disconn_req(chan, ECONNRESET);
4354 				goto done;
4355 			}
4356 
4357 			/* throw out any old stored conf requests */
4358 			result = L2CAP_CONF_SUCCESS;
4359 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4360 						   req, sizeof(req), &result);
4361 			if (len < 0) {
4362 				l2cap_send_disconn_req(chan, ECONNRESET);
4363 				goto done;
4364 			}
4365 
4366 			l2cap_send_cmd(conn, l2cap_get_ident(conn),
4367 				       L2CAP_CONF_REQ, len, req);
4368 			chan->num_conf_req++;
4369 			if (result != L2CAP_CONF_SUCCESS)
4370 				goto done;
4371 			break;
4372 		}
4373 		fallthrough;
4374 
4375 	default:
4376 		l2cap_chan_set_err(chan, ECONNRESET);
4377 
4378 		__set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4379 		l2cap_send_disconn_req(chan, ECONNRESET);
4380 		goto done;
4381 	}
4382 
4383 	if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4384 		goto done;
4385 
4386 	set_bit(CONF_INPUT_DONE, &chan->conf_state);
4387 
4388 	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4389 		set_default_fcs(chan);
4390 
4391 		if (chan->mode == L2CAP_MODE_ERTM ||
4392 		    chan->mode == L2CAP_MODE_STREAMING)
4393 			err = l2cap_ertm_init(chan);
4394 
4395 		if (err < 0)
4396 			l2cap_send_disconn_req(chan, -err);
4397 		else
4398 			l2cap_chan_ready(chan);
4399 	}
4400 
4401 done:
4402 	l2cap_chan_unlock(chan);
4403 	l2cap_chan_put(chan);
4404 	return err;
4405 }
4406 
l2cap_disconnect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4407 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4408 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4409 				       u8 *data)
4410 {
4411 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4412 	struct l2cap_disconn_rsp rsp;
4413 	u16 dcid, scid;
4414 	struct l2cap_chan *chan;
4415 
4416 	if (cmd_len != sizeof(*req))
4417 		return -EPROTO;
4418 
4419 	scid = __le16_to_cpu(req->scid);
4420 	dcid = __le16_to_cpu(req->dcid);
4421 
4422 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4423 
4424 	chan = l2cap_get_chan_by_scid(conn, dcid);
4425 	if (!chan) {
4426 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4427 		return 0;
4428 	}
4429 
4430 	rsp.dcid = cpu_to_le16(chan->scid);
4431 	rsp.scid = cpu_to_le16(chan->dcid);
4432 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4433 
4434 	chan->ops->set_shutdown(chan);
4435 
4436 	l2cap_chan_del(chan, ECONNRESET);
4437 
4438 	chan->ops->close(chan);
4439 
4440 	l2cap_chan_unlock(chan);
4441 	l2cap_chan_put(chan);
4442 
4443 	return 0;
4444 }
4445 
l2cap_disconnect_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4446 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4447 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4448 				       u8 *data)
4449 {
4450 	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4451 	u16 dcid, scid;
4452 	struct l2cap_chan *chan;
4453 
4454 	if (cmd_len != sizeof(*rsp))
4455 		return -EPROTO;
4456 
4457 	scid = __le16_to_cpu(rsp->scid);
4458 	dcid = __le16_to_cpu(rsp->dcid);
4459 
4460 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4461 
4462 	chan = l2cap_get_chan_by_scid(conn, scid);
4463 	if (!chan) {
4464 		return 0;
4465 	}
4466 
4467 	if (chan->state != BT_DISCONN) {
4468 		l2cap_chan_unlock(chan);
4469 		l2cap_chan_put(chan);
4470 		return 0;
4471 	}
4472 
4473 	l2cap_chan_del(chan, 0);
4474 
4475 	chan->ops->close(chan);
4476 
4477 	l2cap_chan_unlock(chan);
4478 	l2cap_chan_put(chan);
4479 
4480 	return 0;
4481 }
4482 
l2cap_information_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4483 static inline int l2cap_information_req(struct l2cap_conn *conn,
4484 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4485 					u8 *data)
4486 {
4487 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4488 	u16 type;
4489 
4490 	if (cmd_len != sizeof(*req))
4491 		return -EPROTO;
4492 
4493 	type = __le16_to_cpu(req->type);
4494 
4495 	BT_DBG("type 0x%4.4x", type);
4496 
4497 	if (type == L2CAP_IT_FEAT_MASK) {
4498 		u8 buf[8];
4499 		u32 feat_mask = l2cap_feat_mask;
4500 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4501 		rsp->type   = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4502 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4503 		if (!disable_ertm)
4504 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4505 				| L2CAP_FEAT_FCS;
4506 
4507 		put_unaligned_le32(feat_mask, rsp->data);
4508 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4509 			       buf);
4510 	} else if (type == L2CAP_IT_FIXED_CHAN) {
4511 		u8 buf[12];
4512 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4513 
4514 		rsp->type   = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4515 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4516 		rsp->data[0] = conn->local_fixed_chan;
4517 		memset(rsp->data + 1, 0, 7);
4518 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4519 			       buf);
4520 	} else {
4521 		struct l2cap_info_rsp rsp;
4522 		rsp.type   = cpu_to_le16(type);
4523 		rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4524 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4525 			       &rsp);
4526 	}
4527 
4528 	return 0;
4529 }
4530 
l2cap_information_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4531 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4532 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4533 					u8 *data)
4534 {
4535 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4536 	u16 type, result;
4537 
4538 	if (cmd_len < sizeof(*rsp))
4539 		return -EPROTO;
4540 
4541 	type   = __le16_to_cpu(rsp->type);
4542 	result = __le16_to_cpu(rsp->result);
4543 
4544 	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4545 
4546 	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
4547 	if (cmd->ident != conn->info_ident ||
4548 	    conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4549 		return 0;
4550 
4551 	cancel_delayed_work(&conn->info_timer);
4552 
4553 	if (result != L2CAP_IR_SUCCESS) {
4554 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4555 		conn->info_ident = 0;
4556 
4557 		l2cap_conn_start(conn);
4558 
4559 		return 0;
4560 	}
4561 
4562 	switch (type) {
4563 	case L2CAP_IT_FEAT_MASK:
4564 		conn->feat_mask = get_unaligned_le32(rsp->data);
4565 
4566 		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4567 			struct l2cap_info_req req;
4568 			req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4569 
4570 			conn->info_ident = l2cap_get_ident(conn);
4571 
4572 			l2cap_send_cmd(conn, conn->info_ident,
4573 				       L2CAP_INFO_REQ, sizeof(req), &req);
4574 		} else {
4575 			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4576 			conn->info_ident = 0;
4577 
4578 			l2cap_conn_start(conn);
4579 		}
4580 		break;
4581 
4582 	case L2CAP_IT_FIXED_CHAN:
4583 		conn->remote_fixed_chan = rsp->data[0];
4584 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4585 		conn->info_ident = 0;
4586 
4587 		l2cap_conn_start(conn);
4588 		break;
4589 	}
4590 
4591 	return 0;
4592 }
4593 
l2cap_conn_param_update_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4594 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4595 					      struct l2cap_cmd_hdr *cmd,
4596 					      u16 cmd_len, u8 *data)
4597 {
4598 	struct hci_conn *hcon = conn->hcon;
4599 	struct l2cap_conn_param_update_req *req;
4600 	struct l2cap_conn_param_update_rsp rsp;
4601 	u16 min, max, latency, to_multiplier;
4602 	int err;
4603 
4604 	if (hcon->role != HCI_ROLE_MASTER)
4605 		return -EINVAL;
4606 
4607 	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4608 		return -EPROTO;
4609 
4610 	req = (struct l2cap_conn_param_update_req *) data;
4611 	min		= __le16_to_cpu(req->min);
4612 	max		= __le16_to_cpu(req->max);
4613 	latency		= __le16_to_cpu(req->latency);
4614 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
4615 
4616 	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4617 	       min, max, latency, to_multiplier);
4618 
4619 	memset(&rsp, 0, sizeof(rsp));
4620 
4621 	err = hci_check_conn_params(min, max, latency, to_multiplier);
4622 	if (err)
4623 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4624 	else
4625 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4626 
4627 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4628 		       sizeof(rsp), &rsp);
4629 
4630 	if (!err) {
4631 		u8 store_hint;
4632 
4633 		store_hint = hci_le_conn_update(hcon, min, max, latency,
4634 						to_multiplier);
4635 		mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
4636 				    store_hint, min, max, latency,
4637 				    to_multiplier);
4638 
4639 	}
4640 
4641 	return 0;
4642 }
4643 
l2cap_le_connect_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4644 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
4645 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4646 				u8 *data)
4647 {
4648 	struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
4649 	struct hci_conn *hcon = conn->hcon;
4650 	u16 dcid, mtu, mps, credits, result;
4651 	struct l2cap_chan *chan;
4652 	int err, sec_level;
4653 
4654 	if (cmd_len < sizeof(*rsp))
4655 		return -EPROTO;
4656 
4657 	dcid    = __le16_to_cpu(rsp->dcid);
4658 	mtu     = __le16_to_cpu(rsp->mtu);
4659 	mps     = __le16_to_cpu(rsp->mps);
4660 	credits = __le16_to_cpu(rsp->credits);
4661 	result  = __le16_to_cpu(rsp->result);
4662 
4663 	if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
4664 					   dcid < L2CAP_CID_DYN_START ||
4665 					   dcid > L2CAP_CID_LE_DYN_END))
4666 		return -EPROTO;
4667 
4668 	BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
4669 	       dcid, mtu, mps, credits, result);
4670 
4671 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4672 	if (!chan)
4673 		return -EBADSLT;
4674 
4675 	err = 0;
4676 
4677 	l2cap_chan_lock(chan);
4678 
4679 	switch (result) {
4680 	case L2CAP_CR_LE_SUCCESS:
4681 		if (__l2cap_get_chan_by_dcid(conn, dcid)) {
4682 			err = -EBADSLT;
4683 			break;
4684 		}
4685 
4686 		chan->ident = 0;
4687 		chan->dcid = dcid;
4688 		chan->omtu = mtu;
4689 		chan->remote_mps = mps;
4690 		chan->tx_credits = credits;
4691 		l2cap_chan_ready(chan);
4692 		break;
4693 
4694 	case L2CAP_CR_LE_AUTHENTICATION:
4695 	case L2CAP_CR_LE_ENCRYPTION:
4696 		/* If we already have MITM protection we can't do
4697 		 * anything.
4698 		 */
4699 		if (hcon->sec_level > BT_SECURITY_MEDIUM) {
4700 			l2cap_chan_del(chan, ECONNREFUSED);
4701 			break;
4702 		}
4703 
4704 		sec_level = hcon->sec_level + 1;
4705 		if (chan->sec_level < sec_level)
4706 			chan->sec_level = sec_level;
4707 
4708 		/* We'll need to send a new Connect Request */
4709 		clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
4710 
4711 		smp_conn_security(hcon, chan->sec_level);
4712 		break;
4713 
4714 	default:
4715 		l2cap_chan_del(chan, ECONNREFUSED);
4716 		break;
4717 	}
4718 
4719 	l2cap_chan_unlock(chan);
4720 
4721 	return err;
4722 }
4723 
l2cap_bredr_sig_cmd(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4724 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4725 				      struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4726 				      u8 *data)
4727 {
4728 	int err = 0;
4729 
4730 	switch (cmd->code) {
4731 	case L2CAP_COMMAND_REJ:
4732 		l2cap_command_rej(conn, cmd, cmd_len, data);
4733 		break;
4734 
4735 	case L2CAP_CONN_REQ:
4736 		err = l2cap_connect_req(conn, cmd, cmd_len, data);
4737 		break;
4738 
4739 	case L2CAP_CONN_RSP:
4740 		l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
4741 		break;
4742 
4743 	case L2CAP_CONF_REQ:
4744 		err = l2cap_config_req(conn, cmd, cmd_len, data);
4745 		break;
4746 
4747 	case L2CAP_CONF_RSP:
4748 		l2cap_config_rsp(conn, cmd, cmd_len, data);
4749 		break;
4750 
4751 	case L2CAP_DISCONN_REQ:
4752 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
4753 		break;
4754 
4755 	case L2CAP_DISCONN_RSP:
4756 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
4757 		break;
4758 
4759 	case L2CAP_ECHO_REQ:
4760 		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4761 		break;
4762 
4763 	case L2CAP_ECHO_RSP:
4764 		break;
4765 
4766 	case L2CAP_INFO_REQ:
4767 		err = l2cap_information_req(conn, cmd, cmd_len, data);
4768 		break;
4769 
4770 	case L2CAP_INFO_RSP:
4771 		l2cap_information_rsp(conn, cmd, cmd_len, data);
4772 		break;
4773 
4774 	default:
4775 		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4776 		err = -EINVAL;
4777 		break;
4778 	}
4779 
4780 	return err;
4781 }
4782 
l2cap_le_connect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4783 static int l2cap_le_connect_req(struct l2cap_conn *conn,
4784 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4785 				u8 *data)
4786 {
4787 	struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
4788 	struct l2cap_le_conn_rsp rsp;
4789 	struct l2cap_chan *chan, *pchan;
4790 	u16 dcid, scid, credits, mtu, mps;
4791 	__le16 psm;
4792 	u8 result;
4793 
4794 	if (cmd_len != sizeof(*req))
4795 		return -EPROTO;
4796 
4797 	scid = __le16_to_cpu(req->scid);
4798 	mtu  = __le16_to_cpu(req->mtu);
4799 	mps  = __le16_to_cpu(req->mps);
4800 	psm  = req->psm;
4801 	dcid = 0;
4802 	credits = 0;
4803 
4804 	if (mtu < 23 || mps < 23)
4805 		return -EPROTO;
4806 
4807 	BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
4808 	       scid, mtu, mps);
4809 
4810 	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
4811 	 * page 1059:
4812 	 *
4813 	 * Valid range: 0x0001-0x00ff
4814 	 *
4815 	 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
4816 	 */
4817 	if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
4818 		result = L2CAP_CR_LE_BAD_PSM;
4819 		chan = NULL;
4820 		goto response;
4821 	}
4822 
4823 	/* Check if we have socket listening on psm */
4824 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
4825 					 &conn->hcon->dst, LE_LINK);
4826 	if (!pchan) {
4827 		result = L2CAP_CR_LE_BAD_PSM;
4828 		chan = NULL;
4829 		goto response;
4830 	}
4831 
4832 	l2cap_chan_lock(pchan);
4833 
4834 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
4835 				     SMP_ALLOW_STK)) {
4836 		result = L2CAP_CR_LE_AUTHENTICATION;
4837 		chan = NULL;
4838 		goto response_unlock;
4839 	}
4840 
4841 	/* Check for valid dynamic CID range */
4842 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
4843 		result = L2CAP_CR_LE_INVALID_SCID;
4844 		chan = NULL;
4845 		goto response_unlock;
4846 	}
4847 
4848 	/* Check if we already have channel with that dcid */
4849 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
4850 		result = L2CAP_CR_LE_SCID_IN_USE;
4851 		chan = NULL;
4852 		goto response_unlock;
4853 	}
4854 
4855 	chan = pchan->ops->new_connection(pchan);
4856 	if (!chan) {
4857 		result = L2CAP_CR_LE_NO_MEM;
4858 		goto response_unlock;
4859 	}
4860 
4861 	bacpy(&chan->src, &conn->hcon->src);
4862 	bacpy(&chan->dst, &conn->hcon->dst);
4863 	chan->src_type = bdaddr_src_type(conn->hcon);
4864 	chan->dst_type = bdaddr_dst_type(conn->hcon);
4865 	chan->psm  = psm;
4866 	chan->dcid = scid;
4867 	chan->omtu = mtu;
4868 	chan->remote_mps = mps;
4869 
4870 	__l2cap_chan_add(conn, chan);
4871 
4872 	l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits));
4873 
4874 	dcid = chan->scid;
4875 	credits = chan->rx_credits;
4876 
4877 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4878 
4879 	chan->ident = cmd->ident;
4880 
4881 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4882 		l2cap_state_change(chan, BT_CONNECT2);
4883 		/* The following result value is actually not defined
4884 		 * for LE CoC but we use it to let the function know
4885 		 * that it should bail out after doing its cleanup
4886 		 * instead of sending a response.
4887 		 */
4888 		result = L2CAP_CR_PEND;
4889 		chan->ops->defer(chan);
4890 	} else {
4891 		l2cap_chan_ready(chan);
4892 		result = L2CAP_CR_LE_SUCCESS;
4893 	}
4894 
4895 response_unlock:
4896 	l2cap_chan_unlock(pchan);
4897 	l2cap_chan_put(pchan);
4898 
4899 	if (result == L2CAP_CR_PEND)
4900 		return 0;
4901 
4902 response:
4903 	if (chan) {
4904 		rsp.mtu = cpu_to_le16(chan->imtu);
4905 		rsp.mps = cpu_to_le16(chan->mps);
4906 	} else {
4907 		rsp.mtu = 0;
4908 		rsp.mps = 0;
4909 	}
4910 
4911 	rsp.dcid    = cpu_to_le16(dcid);
4912 	rsp.credits = cpu_to_le16(credits);
4913 	rsp.result  = cpu_to_le16(result);
4914 
4915 	l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
4916 
4917 	return 0;
4918 }
4919 
l2cap_le_credits(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4920 static inline int l2cap_le_credits(struct l2cap_conn *conn,
4921 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4922 				   u8 *data)
4923 {
4924 	struct l2cap_le_credits *pkt;
4925 	struct l2cap_chan *chan;
4926 	u16 cid, credits, max_credits;
4927 
4928 	if (cmd_len != sizeof(*pkt))
4929 		return -EPROTO;
4930 
4931 	pkt = (struct l2cap_le_credits *) data;
4932 	cid	= __le16_to_cpu(pkt->cid);
4933 	credits	= __le16_to_cpu(pkt->credits);
4934 
4935 	BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
4936 
4937 	chan = l2cap_get_chan_by_dcid(conn, cid);
4938 	if (!chan)
4939 		return -EBADSLT;
4940 
4941 	max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
4942 	if (credits > max_credits) {
4943 		BT_ERR("LE credits overflow");
4944 		l2cap_send_disconn_req(chan, ECONNRESET);
4945 
4946 		/* Return 0 so that we don't trigger an unnecessary
4947 		 * command reject packet.
4948 		 */
4949 		goto unlock;
4950 	}
4951 
4952 	chan->tx_credits += credits;
4953 
4954 	/* Resume sending */
4955 	l2cap_le_flowctl_send(chan);
4956 
4957 	if (chan->tx_credits)
4958 		chan->ops->resume(chan);
4959 
4960 unlock:
4961 	l2cap_chan_unlock(chan);
4962 	l2cap_chan_put(chan);
4963 
4964 	return 0;
4965 }
4966 
l2cap_ecred_conn_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4967 static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
4968 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4969 				       u8 *data)
4970 {
4971 	struct l2cap_ecred_conn_req *req = (void *) data;
4972 	DEFINE_RAW_FLEX(struct l2cap_ecred_conn_rsp, pdu, dcid, L2CAP_ECRED_MAX_CID);
4973 	struct l2cap_chan *chan, *pchan;
4974 	u16 mtu, mps;
4975 	__le16 psm;
4976 	u8 result, len = 0;
4977 	int i, num_scid;
4978 	bool defer = false;
4979 
4980 	if (!enable_ecred)
4981 		return -EINVAL;
4982 
4983 	if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) {
4984 		result = L2CAP_CR_LE_INVALID_PARAMS;
4985 		goto response;
4986 	}
4987 
4988 	cmd_len -= sizeof(*req);
4989 	num_scid = cmd_len / sizeof(u16);
4990 
4991 	if (num_scid > L2CAP_ECRED_MAX_CID) {
4992 		result = L2CAP_CR_LE_INVALID_PARAMS;
4993 		goto response;
4994 	}
4995 
4996 	mtu  = __le16_to_cpu(req->mtu);
4997 	mps  = __le16_to_cpu(req->mps);
4998 
4999 	if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MPS) {
5000 		result = L2CAP_CR_LE_UNACCEPT_PARAMS;
5001 		goto response;
5002 	}
5003 
5004 	psm  = req->psm;
5005 
5006 	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
5007 	 * page 1059:
5008 	 *
5009 	 * Valid range: 0x0001-0x00ff
5010 	 *
5011 	 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
5012 	 */
5013 	if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
5014 		result = L2CAP_CR_LE_BAD_PSM;
5015 		goto response;
5016 	}
5017 
5018 	BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps);
5019 
5020 	memset(pdu, 0, sizeof(*pdu));
5021 
5022 	/* Check if we have socket listening on psm */
5023 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5024 					 &conn->hcon->dst, LE_LINK);
5025 	if (!pchan) {
5026 		result = L2CAP_CR_LE_BAD_PSM;
5027 		goto response;
5028 	}
5029 
5030 	l2cap_chan_lock(pchan);
5031 
5032 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5033 				     SMP_ALLOW_STK)) {
5034 		result = L2CAP_CR_LE_AUTHENTICATION;
5035 		goto unlock;
5036 	}
5037 
5038 	result = L2CAP_CR_LE_SUCCESS;
5039 
5040 	for (i = 0; i < num_scid; i++) {
5041 		u16 scid = __le16_to_cpu(req->scid[i]);
5042 
5043 		BT_DBG("scid[%d] 0x%4.4x", i, scid);
5044 
5045 		pdu->dcid[i] = 0x0000;
5046 		len += sizeof(*pdu->dcid);
5047 
5048 		/* Check for valid dynamic CID range */
5049 		if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5050 			result = L2CAP_CR_LE_INVALID_SCID;
5051 			continue;
5052 		}
5053 
5054 		/* Check if we already have channel with that dcid */
5055 		if (__l2cap_get_chan_by_dcid(conn, scid)) {
5056 			result = L2CAP_CR_LE_SCID_IN_USE;
5057 			continue;
5058 		}
5059 
5060 		chan = pchan->ops->new_connection(pchan);
5061 		if (!chan) {
5062 			result = L2CAP_CR_LE_NO_MEM;
5063 			continue;
5064 		}
5065 
5066 		bacpy(&chan->src, &conn->hcon->src);
5067 		bacpy(&chan->dst, &conn->hcon->dst);
5068 		chan->src_type = bdaddr_src_type(conn->hcon);
5069 		chan->dst_type = bdaddr_dst_type(conn->hcon);
5070 		chan->psm  = psm;
5071 		chan->dcid = scid;
5072 		chan->omtu = mtu;
5073 		chan->remote_mps = mps;
5074 
5075 		__l2cap_chan_add(conn, chan);
5076 
5077 		l2cap_ecred_init(chan, __le16_to_cpu(req->credits));
5078 
5079 		/* Init response */
5080 		if (!pdu->credits) {
5081 			pdu->mtu = cpu_to_le16(chan->imtu);
5082 			pdu->mps = cpu_to_le16(chan->mps);
5083 			pdu->credits = cpu_to_le16(chan->rx_credits);
5084 		}
5085 
5086 		pdu->dcid[i] = cpu_to_le16(chan->scid);
5087 
5088 		__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5089 
5090 		chan->ident = cmd->ident;
5091 		chan->mode = L2CAP_MODE_EXT_FLOWCTL;
5092 
5093 		if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5094 			l2cap_state_change(chan, BT_CONNECT2);
5095 			defer = true;
5096 			chan->ops->defer(chan);
5097 		} else {
5098 			l2cap_chan_ready(chan);
5099 		}
5100 	}
5101 
5102 unlock:
5103 	l2cap_chan_unlock(pchan);
5104 	l2cap_chan_put(pchan);
5105 
5106 response:
5107 	pdu->result = cpu_to_le16(result);
5108 
5109 	if (defer)
5110 		return 0;
5111 
5112 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP,
5113 		       sizeof(*pdu) + len, pdu);
5114 
5115 	return 0;
5116 }
5117 
l2cap_ecred_conn_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5118 static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
5119 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5120 				       u8 *data)
5121 {
5122 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
5123 	struct hci_conn *hcon = conn->hcon;
5124 	u16 mtu, mps, credits, result;
5125 	struct l2cap_chan *chan, *tmp;
5126 	int err = 0, sec_level;
5127 	int i = 0;
5128 
5129 	if (cmd_len < sizeof(*rsp))
5130 		return -EPROTO;
5131 
5132 	mtu     = __le16_to_cpu(rsp->mtu);
5133 	mps     = __le16_to_cpu(rsp->mps);
5134 	credits = __le16_to_cpu(rsp->credits);
5135 	result  = __le16_to_cpu(rsp->result);
5136 
5137 	BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits,
5138 	       result);
5139 
5140 	cmd_len -= sizeof(*rsp);
5141 
5142 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
5143 		u16 dcid;
5144 
5145 		if (chan->ident != cmd->ident ||
5146 		    chan->mode != L2CAP_MODE_EXT_FLOWCTL ||
5147 		    chan->state == BT_CONNECTED)
5148 			continue;
5149 
5150 		l2cap_chan_lock(chan);
5151 
5152 		/* Check that there is a dcid for each pending channel */
5153 		if (cmd_len < sizeof(dcid)) {
5154 			l2cap_chan_del(chan, ECONNREFUSED);
5155 			l2cap_chan_unlock(chan);
5156 			continue;
5157 		}
5158 
5159 		dcid = __le16_to_cpu(rsp->dcid[i++]);
5160 		cmd_len -= sizeof(u16);
5161 
5162 		BT_DBG("dcid[%d] 0x%4.4x", i, dcid);
5163 
5164 		/* Check if dcid is already in use */
5165 		if (dcid && __l2cap_get_chan_by_dcid(conn, dcid)) {
5166 			/* If a device receives a
5167 			 * L2CAP_CREDIT_BASED_CONNECTION_RSP packet with an
5168 			 * already-assigned Destination CID, then both the
5169 			 * original channel and the new channel shall be
5170 			 * immediately discarded and not used.
5171 			 */
5172 			l2cap_chan_del(chan, ECONNREFUSED);
5173 			l2cap_chan_unlock(chan);
5174 			chan = __l2cap_get_chan_by_dcid(conn, dcid);
5175 			l2cap_chan_lock(chan);
5176 			l2cap_chan_del(chan, ECONNRESET);
5177 			l2cap_chan_unlock(chan);
5178 			continue;
5179 		}
5180 
5181 		switch (result) {
5182 		case L2CAP_CR_LE_AUTHENTICATION:
5183 		case L2CAP_CR_LE_ENCRYPTION:
5184 			/* If we already have MITM protection we can't do
5185 			 * anything.
5186 			 */
5187 			if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5188 				l2cap_chan_del(chan, ECONNREFUSED);
5189 				break;
5190 			}
5191 
5192 			sec_level = hcon->sec_level + 1;
5193 			if (chan->sec_level < sec_level)
5194 				chan->sec_level = sec_level;
5195 
5196 			/* We'll need to send a new Connect Request */
5197 			clear_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags);
5198 
5199 			smp_conn_security(hcon, chan->sec_level);
5200 			break;
5201 
5202 		case L2CAP_CR_LE_BAD_PSM:
5203 			l2cap_chan_del(chan, ECONNREFUSED);
5204 			break;
5205 
5206 		default:
5207 			/* If dcid was not set it means channels was refused */
5208 			if (!dcid) {
5209 				l2cap_chan_del(chan, ECONNREFUSED);
5210 				break;
5211 			}
5212 
5213 			chan->ident = 0;
5214 			chan->dcid = dcid;
5215 			chan->omtu = mtu;
5216 			chan->remote_mps = mps;
5217 			chan->tx_credits = credits;
5218 			l2cap_chan_ready(chan);
5219 			break;
5220 		}
5221 
5222 		l2cap_chan_unlock(chan);
5223 	}
5224 
5225 	return err;
5226 }
5227 
l2cap_ecred_reconf_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5228 static inline int l2cap_ecred_reconf_req(struct l2cap_conn *conn,
5229 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5230 					 u8 *data)
5231 {
5232 	struct l2cap_ecred_reconf_req *req = (void *) data;
5233 	struct l2cap_ecred_reconf_rsp rsp;
5234 	u16 mtu, mps, result;
5235 	struct l2cap_chan *chan;
5236 	int i, num_scid;
5237 
5238 	if (!enable_ecred)
5239 		return -EINVAL;
5240 
5241 	if (cmd_len < sizeof(*req) || cmd_len - sizeof(*req) % sizeof(u16)) {
5242 		result = L2CAP_CR_LE_INVALID_PARAMS;
5243 		goto respond;
5244 	}
5245 
5246 	mtu = __le16_to_cpu(req->mtu);
5247 	mps = __le16_to_cpu(req->mps);
5248 
5249 	BT_DBG("mtu %u mps %u", mtu, mps);
5250 
5251 	if (mtu < L2CAP_ECRED_MIN_MTU) {
5252 		result = L2CAP_RECONF_INVALID_MTU;
5253 		goto respond;
5254 	}
5255 
5256 	if (mps < L2CAP_ECRED_MIN_MPS) {
5257 		result = L2CAP_RECONF_INVALID_MPS;
5258 		goto respond;
5259 	}
5260 
5261 	cmd_len -= sizeof(*req);
5262 	num_scid = cmd_len / sizeof(u16);
5263 	result = L2CAP_RECONF_SUCCESS;
5264 
5265 	for (i = 0; i < num_scid; i++) {
5266 		u16 scid;
5267 
5268 		scid = __le16_to_cpu(req->scid[i]);
5269 		if (!scid)
5270 			return -EPROTO;
5271 
5272 		chan = __l2cap_get_chan_by_dcid(conn, scid);
5273 		if (!chan)
5274 			continue;
5275 
5276 		/* If the MTU value is decreased for any of the included
5277 		 * channels, then the receiver shall disconnect all
5278 		 * included channels.
5279 		 */
5280 		if (chan->omtu > mtu) {
5281 			BT_ERR("chan %p decreased MTU %u -> %u", chan,
5282 			       chan->omtu, mtu);
5283 			result = L2CAP_RECONF_INVALID_MTU;
5284 		}
5285 
5286 		chan->omtu = mtu;
5287 		chan->remote_mps = mps;
5288 	}
5289 
5290 respond:
5291 	rsp.result = cpu_to_le16(result);
5292 
5293 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_RECONF_RSP, sizeof(rsp),
5294 		       &rsp);
5295 
5296 	return 0;
5297 }
5298 
l2cap_ecred_reconf_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5299 static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn,
5300 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5301 					 u8 *data)
5302 {
5303 	struct l2cap_chan *chan, *tmp;
5304 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
5305 	u16 result;
5306 
5307 	if (cmd_len < sizeof(*rsp))
5308 		return -EPROTO;
5309 
5310 	result = __le16_to_cpu(rsp->result);
5311 
5312 	BT_DBG("result 0x%4.4x", rsp->result);
5313 
5314 	if (!result)
5315 		return 0;
5316 
5317 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
5318 		if (chan->ident != cmd->ident)
5319 			continue;
5320 
5321 		l2cap_chan_del(chan, ECONNRESET);
5322 	}
5323 
5324 	return 0;
5325 }
5326 
l2cap_le_command_rej(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5327 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5328 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5329 				       u8 *data)
5330 {
5331 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5332 	struct l2cap_chan *chan;
5333 
5334 	if (cmd_len < sizeof(*rej))
5335 		return -EPROTO;
5336 
5337 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5338 	if (!chan)
5339 		goto done;
5340 
5341 	chan = l2cap_chan_hold_unless_zero(chan);
5342 	if (!chan)
5343 		goto done;
5344 
5345 	l2cap_chan_lock(chan);
5346 	l2cap_chan_del(chan, ECONNREFUSED);
5347 	l2cap_chan_unlock(chan);
5348 	l2cap_chan_put(chan);
5349 
5350 done:
5351 	return 0;
5352 }
5353 
l2cap_le_sig_cmd(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5354 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5355 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5356 				   u8 *data)
5357 {
5358 	int err = 0;
5359 
5360 	switch (cmd->code) {
5361 	case L2CAP_COMMAND_REJ:
5362 		l2cap_le_command_rej(conn, cmd, cmd_len, data);
5363 		break;
5364 
5365 	case L2CAP_CONN_PARAM_UPDATE_REQ:
5366 		err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5367 		break;
5368 
5369 	case L2CAP_CONN_PARAM_UPDATE_RSP:
5370 		break;
5371 
5372 	case L2CAP_LE_CONN_RSP:
5373 		l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5374 		break;
5375 
5376 	case L2CAP_LE_CONN_REQ:
5377 		err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5378 		break;
5379 
5380 	case L2CAP_LE_CREDITS:
5381 		err = l2cap_le_credits(conn, cmd, cmd_len, data);
5382 		break;
5383 
5384 	case L2CAP_ECRED_CONN_REQ:
5385 		err = l2cap_ecred_conn_req(conn, cmd, cmd_len, data);
5386 		break;
5387 
5388 	case L2CAP_ECRED_CONN_RSP:
5389 		err = l2cap_ecred_conn_rsp(conn, cmd, cmd_len, data);
5390 		break;
5391 
5392 	case L2CAP_ECRED_RECONF_REQ:
5393 		err = l2cap_ecred_reconf_req(conn, cmd, cmd_len, data);
5394 		break;
5395 
5396 	case L2CAP_ECRED_RECONF_RSP:
5397 		err = l2cap_ecred_reconf_rsp(conn, cmd, cmd_len, data);
5398 		break;
5399 
5400 	case L2CAP_DISCONN_REQ:
5401 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5402 		break;
5403 
5404 	case L2CAP_DISCONN_RSP:
5405 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5406 		break;
5407 
5408 	default:
5409 		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5410 		err = -EINVAL;
5411 		break;
5412 	}
5413 
5414 	return err;
5415 }
5416 
l2cap_le_sig_channel(struct l2cap_conn * conn,struct sk_buff * skb)5417 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5418 					struct sk_buff *skb)
5419 {
5420 	struct hci_conn *hcon = conn->hcon;
5421 	struct l2cap_cmd_hdr *cmd;
5422 	u16 len;
5423 	int err;
5424 
5425 	if (hcon->type != LE_LINK)
5426 		goto drop;
5427 
5428 	if (skb->len < L2CAP_CMD_HDR_SIZE)
5429 		goto drop;
5430 
5431 	cmd = (void *) skb->data;
5432 	skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5433 
5434 	len = le16_to_cpu(cmd->len);
5435 
5436 	BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5437 
5438 	if (len != skb->len || !cmd->ident) {
5439 		BT_DBG("corrupted command");
5440 		goto drop;
5441 	}
5442 
5443 	err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5444 	if (err) {
5445 		struct l2cap_cmd_rej_unk rej;
5446 
5447 		BT_ERR("Wrong link type (%d)", err);
5448 
5449 		rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5450 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5451 			       sizeof(rej), &rej);
5452 	}
5453 
5454 drop:
5455 	kfree_skb(skb);
5456 }
5457 
l2cap_sig_send_rej(struct l2cap_conn * conn,u16 ident)5458 static inline void l2cap_sig_send_rej(struct l2cap_conn *conn, u16 ident)
5459 {
5460 	struct l2cap_cmd_rej_unk rej;
5461 
5462 	rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5463 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
5464 }
5465 
l2cap_sig_channel(struct l2cap_conn * conn,struct sk_buff * skb)5466 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5467 				     struct sk_buff *skb)
5468 {
5469 	struct hci_conn *hcon = conn->hcon;
5470 	struct l2cap_cmd_hdr *cmd;
5471 	int err;
5472 
5473 	l2cap_raw_recv(conn, skb);
5474 
5475 	if (hcon->type != ACL_LINK)
5476 		goto drop;
5477 
5478 	while (skb->len >= L2CAP_CMD_HDR_SIZE) {
5479 		u16 len;
5480 
5481 		cmd = (void *) skb->data;
5482 		skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5483 
5484 		len = le16_to_cpu(cmd->len);
5485 
5486 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len,
5487 		       cmd->ident);
5488 
5489 		if (len > skb->len || !cmd->ident) {
5490 			BT_DBG("corrupted command");
5491 			l2cap_sig_send_rej(conn, cmd->ident);
5492 			skb_pull(skb, len > skb->len ? skb->len : len);
5493 			continue;
5494 		}
5495 
5496 		err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data);
5497 		if (err) {
5498 			BT_ERR("Wrong link type (%d)", err);
5499 			l2cap_sig_send_rej(conn, cmd->ident);
5500 		}
5501 
5502 		skb_pull(skb, len);
5503 	}
5504 
5505 	if (skb->len > 0) {
5506 		BT_DBG("corrupted command");
5507 		l2cap_sig_send_rej(conn, 0);
5508 	}
5509 
5510 drop:
5511 	kfree_skb(skb);
5512 }
5513 
l2cap_check_fcs(struct l2cap_chan * chan,struct sk_buff * skb)5514 static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
5515 {
5516 	u16 our_fcs, rcv_fcs;
5517 	int hdr_size;
5518 
5519 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5520 		hdr_size = L2CAP_EXT_HDR_SIZE;
5521 	else
5522 		hdr_size = L2CAP_ENH_HDR_SIZE;
5523 
5524 	if (chan->fcs == L2CAP_FCS_CRC16) {
5525 		skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5526 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5527 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5528 
5529 		if (our_fcs != rcv_fcs)
5530 			return -EBADMSG;
5531 	}
5532 	return 0;
5533 }
5534 
l2cap_send_i_or_rr_or_rnr(struct l2cap_chan * chan)5535 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5536 {
5537 	struct l2cap_ctrl control;
5538 
5539 	BT_DBG("chan %p", chan);
5540 
5541 	memset(&control, 0, sizeof(control));
5542 	control.sframe = 1;
5543 	control.final = 1;
5544 	control.reqseq = chan->buffer_seq;
5545 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
5546 
5547 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5548 		control.super = L2CAP_SUPER_RNR;
5549 		l2cap_send_sframe(chan, &control);
5550 	}
5551 
5552 	if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5553 	    chan->unacked_frames > 0)
5554 		__set_retrans_timer(chan);
5555 
5556 	/* Send pending iframes */
5557 	l2cap_ertm_send(chan);
5558 
5559 	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5560 	    test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5561 		/* F-bit wasn't sent in an s-frame or i-frame yet, so
5562 		 * send it now.
5563 		 */
5564 		control.super = L2CAP_SUPER_RR;
5565 		l2cap_send_sframe(chan, &control);
5566 	}
5567 }
5568 
append_skb_frag(struct sk_buff * skb,struct sk_buff * new_frag,struct sk_buff ** last_frag)5569 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5570 			    struct sk_buff **last_frag)
5571 {
5572 	/* skb->len reflects data in skb as well as all fragments
5573 	 * skb->data_len reflects only data in fragments
5574 	 */
5575 	if (!skb_has_frag_list(skb))
5576 		skb_shinfo(skb)->frag_list = new_frag;
5577 
5578 	new_frag->next = NULL;
5579 
5580 	(*last_frag)->next = new_frag;
5581 	*last_frag = new_frag;
5582 
5583 	skb->len += new_frag->len;
5584 	skb->data_len += new_frag->len;
5585 	skb->truesize += new_frag->truesize;
5586 }
5587 
l2cap_reassemble_sdu(struct l2cap_chan * chan,struct sk_buff * skb,struct l2cap_ctrl * control)5588 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5589 				struct l2cap_ctrl *control)
5590 {
5591 	int err = -EINVAL;
5592 
5593 	switch (control->sar) {
5594 	case L2CAP_SAR_UNSEGMENTED:
5595 		if (chan->sdu)
5596 			break;
5597 
5598 		err = chan->ops->recv(chan, skb);
5599 		break;
5600 
5601 	case L2CAP_SAR_START:
5602 		if (chan->sdu)
5603 			break;
5604 
5605 		if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
5606 			break;
5607 
5608 		chan->sdu_len = get_unaligned_le16(skb->data);
5609 		skb_pull(skb, L2CAP_SDULEN_SIZE);
5610 
5611 		if (chan->sdu_len > chan->imtu) {
5612 			err = -EMSGSIZE;
5613 			break;
5614 		}
5615 
5616 		if (skb->len >= chan->sdu_len)
5617 			break;
5618 
5619 		chan->sdu = skb;
5620 		chan->sdu_last_frag = skb;
5621 
5622 		skb = NULL;
5623 		err = 0;
5624 		break;
5625 
5626 	case L2CAP_SAR_CONTINUE:
5627 		if (!chan->sdu)
5628 			break;
5629 
5630 		append_skb_frag(chan->sdu, skb,
5631 				&chan->sdu_last_frag);
5632 		skb = NULL;
5633 
5634 		if (chan->sdu->len >= chan->sdu_len)
5635 			break;
5636 
5637 		err = 0;
5638 		break;
5639 
5640 	case L2CAP_SAR_END:
5641 		if (!chan->sdu)
5642 			break;
5643 
5644 		append_skb_frag(chan->sdu, skb,
5645 				&chan->sdu_last_frag);
5646 		skb = NULL;
5647 
5648 		if (chan->sdu->len != chan->sdu_len)
5649 			break;
5650 
5651 		err = chan->ops->recv(chan, chan->sdu);
5652 
5653 		if (!err) {
5654 			/* Reassembly complete */
5655 			chan->sdu = NULL;
5656 			chan->sdu_last_frag = NULL;
5657 			chan->sdu_len = 0;
5658 		}
5659 		break;
5660 	}
5661 
5662 	if (err) {
5663 		kfree_skb(skb);
5664 		kfree_skb(chan->sdu);
5665 		chan->sdu = NULL;
5666 		chan->sdu_last_frag = NULL;
5667 		chan->sdu_len = 0;
5668 	}
5669 
5670 	return err;
5671 }
5672 
l2cap_resegment(struct l2cap_chan * chan)5673 static int l2cap_resegment(struct l2cap_chan *chan)
5674 {
5675 	/* Placeholder */
5676 	return 0;
5677 }
5678 
l2cap_chan_busy(struct l2cap_chan * chan,int busy)5679 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5680 {
5681 	u8 event;
5682 
5683 	if (chan->mode != L2CAP_MODE_ERTM)
5684 		return;
5685 
5686 	event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5687 	l2cap_tx(chan, NULL, NULL, event);
5688 }
5689 
l2cap_rx_queued_iframes(struct l2cap_chan * chan)5690 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5691 {
5692 	int err = 0;
5693 	/* Pass sequential frames to l2cap_reassemble_sdu()
5694 	 * until a gap is encountered.
5695 	 */
5696 
5697 	BT_DBG("chan %p", chan);
5698 
5699 	while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5700 		struct sk_buff *skb;
5701 		BT_DBG("Searching for skb with txseq %d (queue len %d)",
5702 		       chan->buffer_seq, skb_queue_len(&chan->srej_q));
5703 
5704 		skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5705 
5706 		if (!skb)
5707 			break;
5708 
5709 		skb_unlink(skb, &chan->srej_q);
5710 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5711 		err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
5712 		if (err)
5713 			break;
5714 	}
5715 
5716 	if (skb_queue_empty(&chan->srej_q)) {
5717 		chan->rx_state = L2CAP_RX_STATE_RECV;
5718 		l2cap_send_ack(chan);
5719 	}
5720 
5721 	return err;
5722 }
5723 
l2cap_handle_srej(struct l2cap_chan * chan,struct l2cap_ctrl * control)5724 static void l2cap_handle_srej(struct l2cap_chan *chan,
5725 			      struct l2cap_ctrl *control)
5726 {
5727 	struct sk_buff *skb;
5728 
5729 	BT_DBG("chan %p, control %p", chan, control);
5730 
5731 	if (control->reqseq == chan->next_tx_seq) {
5732 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5733 		l2cap_send_disconn_req(chan, ECONNRESET);
5734 		return;
5735 	}
5736 
5737 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5738 
5739 	if (skb == NULL) {
5740 		BT_DBG("Seq %d not available for retransmission",
5741 		       control->reqseq);
5742 		return;
5743 	}
5744 
5745 	if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
5746 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5747 		l2cap_send_disconn_req(chan, ECONNRESET);
5748 		return;
5749 	}
5750 
5751 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5752 
5753 	if (control->poll) {
5754 		l2cap_pass_to_tx(chan, control);
5755 
5756 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
5757 		l2cap_retransmit(chan, control);
5758 		l2cap_ertm_send(chan);
5759 
5760 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5761 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
5762 			chan->srej_save_reqseq = control->reqseq;
5763 		}
5764 	} else {
5765 		l2cap_pass_to_tx_fbit(chan, control);
5766 
5767 		if (control->final) {
5768 			if (chan->srej_save_reqseq != control->reqseq ||
5769 			    !test_and_clear_bit(CONN_SREJ_ACT,
5770 						&chan->conn_state))
5771 				l2cap_retransmit(chan, control);
5772 		} else {
5773 			l2cap_retransmit(chan, control);
5774 			if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5775 				set_bit(CONN_SREJ_ACT, &chan->conn_state);
5776 				chan->srej_save_reqseq = control->reqseq;
5777 			}
5778 		}
5779 	}
5780 }
5781 
l2cap_handle_rej(struct l2cap_chan * chan,struct l2cap_ctrl * control)5782 static void l2cap_handle_rej(struct l2cap_chan *chan,
5783 			     struct l2cap_ctrl *control)
5784 {
5785 	struct sk_buff *skb;
5786 
5787 	BT_DBG("chan %p, control %p", chan, control);
5788 
5789 	if (control->reqseq == chan->next_tx_seq) {
5790 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5791 		l2cap_send_disconn_req(chan, ECONNRESET);
5792 		return;
5793 	}
5794 
5795 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5796 
5797 	if (chan->max_tx && skb &&
5798 	    bt_cb(skb)->l2cap.retries >= chan->max_tx) {
5799 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5800 		l2cap_send_disconn_req(chan, ECONNRESET);
5801 		return;
5802 	}
5803 
5804 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5805 
5806 	l2cap_pass_to_tx(chan, control);
5807 
5808 	if (control->final) {
5809 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5810 			l2cap_retransmit_all(chan, control);
5811 	} else {
5812 		l2cap_retransmit_all(chan, control);
5813 		l2cap_ertm_send(chan);
5814 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5815 			set_bit(CONN_REJ_ACT, &chan->conn_state);
5816 	}
5817 }
5818 
l2cap_classify_txseq(struct l2cap_chan * chan,u16 txseq)5819 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5820 {
5821 	BT_DBG("chan %p, txseq %d", chan, txseq);
5822 
5823 	BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5824 	       chan->expected_tx_seq);
5825 
5826 	if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5827 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5828 		    chan->tx_win) {
5829 			/* See notes below regarding "double poll" and
5830 			 * invalid packets.
5831 			 */
5832 			if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5833 				BT_DBG("Invalid/Ignore - after SREJ");
5834 				return L2CAP_TXSEQ_INVALID_IGNORE;
5835 			} else {
5836 				BT_DBG("Invalid - in window after SREJ sent");
5837 				return L2CAP_TXSEQ_INVALID;
5838 			}
5839 		}
5840 
5841 		if (chan->srej_list.head == txseq) {
5842 			BT_DBG("Expected SREJ");
5843 			return L2CAP_TXSEQ_EXPECTED_SREJ;
5844 		}
5845 
5846 		if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
5847 			BT_DBG("Duplicate SREJ - txseq already stored");
5848 			return L2CAP_TXSEQ_DUPLICATE_SREJ;
5849 		}
5850 
5851 		if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
5852 			BT_DBG("Unexpected SREJ - not requested");
5853 			return L2CAP_TXSEQ_UNEXPECTED_SREJ;
5854 		}
5855 	}
5856 
5857 	if (chan->expected_tx_seq == txseq) {
5858 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5859 		    chan->tx_win) {
5860 			BT_DBG("Invalid - txseq outside tx window");
5861 			return L2CAP_TXSEQ_INVALID;
5862 		} else {
5863 			BT_DBG("Expected");
5864 			return L2CAP_TXSEQ_EXPECTED;
5865 		}
5866 	}
5867 
5868 	if (__seq_offset(chan, txseq, chan->last_acked_seq) <
5869 	    __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
5870 		BT_DBG("Duplicate - expected_tx_seq later than txseq");
5871 		return L2CAP_TXSEQ_DUPLICATE;
5872 	}
5873 
5874 	if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
5875 		/* A source of invalid packets is a "double poll" condition,
5876 		 * where delays cause us to send multiple poll packets.  If
5877 		 * the remote stack receives and processes both polls,
5878 		 * sequence numbers can wrap around in such a way that a
5879 		 * resent frame has a sequence number that looks like new data
5880 		 * with a sequence gap.  This would trigger an erroneous SREJ
5881 		 * request.
5882 		 *
5883 		 * Fortunately, this is impossible with a tx window that's
5884 		 * less than half of the maximum sequence number, which allows
5885 		 * invalid frames to be safely ignored.
5886 		 *
5887 		 * With tx window sizes greater than half of the tx window
5888 		 * maximum, the frame is invalid and cannot be ignored.  This
5889 		 * causes a disconnect.
5890 		 */
5891 
5892 		if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5893 			BT_DBG("Invalid/Ignore - txseq outside tx window");
5894 			return L2CAP_TXSEQ_INVALID_IGNORE;
5895 		} else {
5896 			BT_DBG("Invalid - txseq outside tx window");
5897 			return L2CAP_TXSEQ_INVALID;
5898 		}
5899 	} else {
5900 		BT_DBG("Unexpected - txseq indicates missing frames");
5901 		return L2CAP_TXSEQ_UNEXPECTED;
5902 	}
5903 }
5904 
l2cap_rx_state_recv(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)5905 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
5906 			       struct l2cap_ctrl *control,
5907 			       struct sk_buff *skb, u8 event)
5908 {
5909 	struct l2cap_ctrl local_control;
5910 	int err = 0;
5911 	bool skb_in_use = false;
5912 
5913 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5914 	       event);
5915 
5916 	switch (event) {
5917 	case L2CAP_EV_RECV_IFRAME:
5918 		switch (l2cap_classify_txseq(chan, control->txseq)) {
5919 		case L2CAP_TXSEQ_EXPECTED:
5920 			l2cap_pass_to_tx(chan, control);
5921 
5922 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5923 				BT_DBG("Busy, discarding expected seq %d",
5924 				       control->txseq);
5925 				break;
5926 			}
5927 
5928 			chan->expected_tx_seq = __next_seq(chan,
5929 							   control->txseq);
5930 
5931 			chan->buffer_seq = chan->expected_tx_seq;
5932 			skb_in_use = true;
5933 
5934 			/* l2cap_reassemble_sdu may free skb, hence invalidate
5935 			 * control, so make a copy in advance to use it after
5936 			 * l2cap_reassemble_sdu returns and to avoid the race
5937 			 * condition, for example:
5938 			 *
5939 			 * The current thread calls:
5940 			 *   l2cap_reassemble_sdu
5941 			 *     chan->ops->recv == l2cap_sock_recv_cb
5942 			 *       __sock_queue_rcv_skb
5943 			 * Another thread calls:
5944 			 *   bt_sock_recvmsg
5945 			 *     skb_recv_datagram
5946 			 *     skb_free_datagram
5947 			 * Then the current thread tries to access control, but
5948 			 * it was freed by skb_free_datagram.
5949 			 */
5950 			local_control = *control;
5951 			err = l2cap_reassemble_sdu(chan, skb, control);
5952 			if (err)
5953 				break;
5954 
5955 			if (local_control.final) {
5956 				if (!test_and_clear_bit(CONN_REJ_ACT,
5957 							&chan->conn_state)) {
5958 					local_control.final = 0;
5959 					l2cap_retransmit_all(chan, &local_control);
5960 					l2cap_ertm_send(chan);
5961 				}
5962 			}
5963 
5964 			if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
5965 				l2cap_send_ack(chan);
5966 			break;
5967 		case L2CAP_TXSEQ_UNEXPECTED:
5968 			l2cap_pass_to_tx(chan, control);
5969 
5970 			/* Can't issue SREJ frames in the local busy state.
5971 			 * Drop this frame, it will be seen as missing
5972 			 * when local busy is exited.
5973 			 */
5974 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5975 				BT_DBG("Busy, discarding unexpected seq %d",
5976 				       control->txseq);
5977 				break;
5978 			}
5979 
5980 			/* There was a gap in the sequence, so an SREJ
5981 			 * must be sent for each missing frame.  The
5982 			 * current frame is stored for later use.
5983 			 */
5984 			skb_queue_tail(&chan->srej_q, skb);
5985 			skb_in_use = true;
5986 			BT_DBG("Queued %p (queue len %d)", skb,
5987 			       skb_queue_len(&chan->srej_q));
5988 
5989 			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
5990 			l2cap_seq_list_clear(&chan->srej_list);
5991 			l2cap_send_srej(chan, control->txseq);
5992 
5993 			chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
5994 			break;
5995 		case L2CAP_TXSEQ_DUPLICATE:
5996 			l2cap_pass_to_tx(chan, control);
5997 			break;
5998 		case L2CAP_TXSEQ_INVALID_IGNORE:
5999 			break;
6000 		case L2CAP_TXSEQ_INVALID:
6001 		default:
6002 			l2cap_send_disconn_req(chan, ECONNRESET);
6003 			break;
6004 		}
6005 		break;
6006 	case L2CAP_EV_RECV_RR:
6007 		l2cap_pass_to_tx(chan, control);
6008 		if (control->final) {
6009 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6010 
6011 			if (!test_and_clear_bit(CONN_REJ_ACT,
6012 						&chan->conn_state)) {
6013 				control->final = 0;
6014 				l2cap_retransmit_all(chan, control);
6015 			}
6016 
6017 			l2cap_ertm_send(chan);
6018 		} else if (control->poll) {
6019 			l2cap_send_i_or_rr_or_rnr(chan);
6020 		} else {
6021 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6022 					       &chan->conn_state) &&
6023 			    chan->unacked_frames)
6024 				__set_retrans_timer(chan);
6025 
6026 			l2cap_ertm_send(chan);
6027 		}
6028 		break;
6029 	case L2CAP_EV_RECV_RNR:
6030 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6031 		l2cap_pass_to_tx(chan, control);
6032 		if (control && control->poll) {
6033 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6034 			l2cap_send_rr_or_rnr(chan, 0);
6035 		}
6036 		__clear_retrans_timer(chan);
6037 		l2cap_seq_list_clear(&chan->retrans_list);
6038 		break;
6039 	case L2CAP_EV_RECV_REJ:
6040 		l2cap_handle_rej(chan, control);
6041 		break;
6042 	case L2CAP_EV_RECV_SREJ:
6043 		l2cap_handle_srej(chan, control);
6044 		break;
6045 	default:
6046 		break;
6047 	}
6048 
6049 	if (skb && !skb_in_use) {
6050 		BT_DBG("Freeing %p", skb);
6051 		kfree_skb(skb);
6052 	}
6053 
6054 	return err;
6055 }
6056 
l2cap_rx_state_srej_sent(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6057 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6058 				    struct l2cap_ctrl *control,
6059 				    struct sk_buff *skb, u8 event)
6060 {
6061 	int err = 0;
6062 	u16 txseq = control->txseq;
6063 	bool skb_in_use = false;
6064 
6065 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6066 	       event);
6067 
6068 	switch (event) {
6069 	case L2CAP_EV_RECV_IFRAME:
6070 		switch (l2cap_classify_txseq(chan, txseq)) {
6071 		case L2CAP_TXSEQ_EXPECTED:
6072 			/* Keep frame for reassembly later */
6073 			l2cap_pass_to_tx(chan, control);
6074 			skb_queue_tail(&chan->srej_q, skb);
6075 			skb_in_use = true;
6076 			BT_DBG("Queued %p (queue len %d)", skb,
6077 			       skb_queue_len(&chan->srej_q));
6078 
6079 			chan->expected_tx_seq = __next_seq(chan, txseq);
6080 			break;
6081 		case L2CAP_TXSEQ_EXPECTED_SREJ:
6082 			l2cap_seq_list_pop(&chan->srej_list);
6083 
6084 			l2cap_pass_to_tx(chan, control);
6085 			skb_queue_tail(&chan->srej_q, skb);
6086 			skb_in_use = true;
6087 			BT_DBG("Queued %p (queue len %d)", skb,
6088 			       skb_queue_len(&chan->srej_q));
6089 
6090 			err = l2cap_rx_queued_iframes(chan);
6091 			if (err)
6092 				break;
6093 
6094 			break;
6095 		case L2CAP_TXSEQ_UNEXPECTED:
6096 			/* Got a frame that can't be reassembled yet.
6097 			 * Save it for later, and send SREJs to cover
6098 			 * the missing frames.
6099 			 */
6100 			skb_queue_tail(&chan->srej_q, skb);
6101 			skb_in_use = true;
6102 			BT_DBG("Queued %p (queue len %d)", skb,
6103 			       skb_queue_len(&chan->srej_q));
6104 
6105 			l2cap_pass_to_tx(chan, control);
6106 			l2cap_send_srej(chan, control->txseq);
6107 			break;
6108 		case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6109 			/* This frame was requested with an SREJ, but
6110 			 * some expected retransmitted frames are
6111 			 * missing.  Request retransmission of missing
6112 			 * SREJ'd frames.
6113 			 */
6114 			skb_queue_tail(&chan->srej_q, skb);
6115 			skb_in_use = true;
6116 			BT_DBG("Queued %p (queue len %d)", skb,
6117 			       skb_queue_len(&chan->srej_q));
6118 
6119 			l2cap_pass_to_tx(chan, control);
6120 			l2cap_send_srej_list(chan, control->txseq);
6121 			break;
6122 		case L2CAP_TXSEQ_DUPLICATE_SREJ:
6123 			/* We've already queued this frame.  Drop this copy. */
6124 			l2cap_pass_to_tx(chan, control);
6125 			break;
6126 		case L2CAP_TXSEQ_DUPLICATE:
6127 			/* Expecting a later sequence number, so this frame
6128 			 * was already received.  Ignore it completely.
6129 			 */
6130 			break;
6131 		case L2CAP_TXSEQ_INVALID_IGNORE:
6132 			break;
6133 		case L2CAP_TXSEQ_INVALID:
6134 		default:
6135 			l2cap_send_disconn_req(chan, ECONNRESET);
6136 			break;
6137 		}
6138 		break;
6139 	case L2CAP_EV_RECV_RR:
6140 		l2cap_pass_to_tx(chan, control);
6141 		if (control->final) {
6142 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6143 
6144 			if (!test_and_clear_bit(CONN_REJ_ACT,
6145 						&chan->conn_state)) {
6146 				control->final = 0;
6147 				l2cap_retransmit_all(chan, control);
6148 			}
6149 
6150 			l2cap_ertm_send(chan);
6151 		} else if (control->poll) {
6152 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6153 					       &chan->conn_state) &&
6154 			    chan->unacked_frames) {
6155 				__set_retrans_timer(chan);
6156 			}
6157 
6158 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6159 			l2cap_send_srej_tail(chan);
6160 		} else {
6161 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6162 					       &chan->conn_state) &&
6163 			    chan->unacked_frames)
6164 				__set_retrans_timer(chan);
6165 
6166 			l2cap_send_ack(chan);
6167 		}
6168 		break;
6169 	case L2CAP_EV_RECV_RNR:
6170 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6171 		l2cap_pass_to_tx(chan, control);
6172 		if (control->poll) {
6173 			l2cap_send_srej_tail(chan);
6174 		} else {
6175 			struct l2cap_ctrl rr_control;
6176 			memset(&rr_control, 0, sizeof(rr_control));
6177 			rr_control.sframe = 1;
6178 			rr_control.super = L2CAP_SUPER_RR;
6179 			rr_control.reqseq = chan->buffer_seq;
6180 			l2cap_send_sframe(chan, &rr_control);
6181 		}
6182 
6183 		break;
6184 	case L2CAP_EV_RECV_REJ:
6185 		l2cap_handle_rej(chan, control);
6186 		break;
6187 	case L2CAP_EV_RECV_SREJ:
6188 		l2cap_handle_srej(chan, control);
6189 		break;
6190 	}
6191 
6192 	if (skb && !skb_in_use) {
6193 		BT_DBG("Freeing %p", skb);
6194 		kfree_skb(skb);
6195 	}
6196 
6197 	return err;
6198 }
6199 
l2cap_finish_move(struct l2cap_chan * chan)6200 static int l2cap_finish_move(struct l2cap_chan *chan)
6201 {
6202 	BT_DBG("chan %p", chan);
6203 
6204 	chan->rx_state = L2CAP_RX_STATE_RECV;
6205 	chan->conn->mtu = chan->conn->hcon->mtu;
6206 
6207 	return l2cap_resegment(chan);
6208 }
6209 
l2cap_rx_state_wait_p(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6210 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6211 				 struct l2cap_ctrl *control,
6212 				 struct sk_buff *skb, u8 event)
6213 {
6214 	int err;
6215 
6216 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6217 	       event);
6218 
6219 	if (!control->poll)
6220 		return -EPROTO;
6221 
6222 	l2cap_process_reqseq(chan, control->reqseq);
6223 
6224 	if (!skb_queue_empty(&chan->tx_q))
6225 		chan->tx_send_head = skb_peek(&chan->tx_q);
6226 	else
6227 		chan->tx_send_head = NULL;
6228 
6229 	/* Rewind next_tx_seq to the point expected
6230 	 * by the receiver.
6231 	 */
6232 	chan->next_tx_seq = control->reqseq;
6233 	chan->unacked_frames = 0;
6234 
6235 	err = l2cap_finish_move(chan);
6236 	if (err)
6237 		return err;
6238 
6239 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
6240 	l2cap_send_i_or_rr_or_rnr(chan);
6241 
6242 	if (event == L2CAP_EV_RECV_IFRAME)
6243 		return -EPROTO;
6244 
6245 	return l2cap_rx_state_recv(chan, control, NULL, event);
6246 }
6247 
l2cap_rx_state_wait_f(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6248 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6249 				 struct l2cap_ctrl *control,
6250 				 struct sk_buff *skb, u8 event)
6251 {
6252 	int err;
6253 
6254 	if (!control->final)
6255 		return -EPROTO;
6256 
6257 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6258 
6259 	chan->rx_state = L2CAP_RX_STATE_RECV;
6260 	l2cap_process_reqseq(chan, control->reqseq);
6261 
6262 	if (!skb_queue_empty(&chan->tx_q))
6263 		chan->tx_send_head = skb_peek(&chan->tx_q);
6264 	else
6265 		chan->tx_send_head = NULL;
6266 
6267 	/* Rewind next_tx_seq to the point expected
6268 	 * by the receiver.
6269 	 */
6270 	chan->next_tx_seq = control->reqseq;
6271 	chan->unacked_frames = 0;
6272 	chan->conn->mtu = chan->conn->hcon->mtu;
6273 
6274 	err = l2cap_resegment(chan);
6275 
6276 	if (!err)
6277 		err = l2cap_rx_state_recv(chan, control, skb, event);
6278 
6279 	return err;
6280 }
6281 
__valid_reqseq(struct l2cap_chan * chan,u16 reqseq)6282 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6283 {
6284 	/* Make sure reqseq is for a packet that has been sent but not acked */
6285 	u16 unacked;
6286 
6287 	unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6288 	return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6289 }
6290 
l2cap_rx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6291 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6292 		    struct sk_buff *skb, u8 event)
6293 {
6294 	int err = 0;
6295 
6296 	BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6297 	       control, skb, event, chan->rx_state);
6298 
6299 	if (__valid_reqseq(chan, control->reqseq)) {
6300 		switch (chan->rx_state) {
6301 		case L2CAP_RX_STATE_RECV:
6302 			err = l2cap_rx_state_recv(chan, control, skb, event);
6303 			break;
6304 		case L2CAP_RX_STATE_SREJ_SENT:
6305 			err = l2cap_rx_state_srej_sent(chan, control, skb,
6306 						       event);
6307 			break;
6308 		case L2CAP_RX_STATE_WAIT_P:
6309 			err = l2cap_rx_state_wait_p(chan, control, skb, event);
6310 			break;
6311 		case L2CAP_RX_STATE_WAIT_F:
6312 			err = l2cap_rx_state_wait_f(chan, control, skb, event);
6313 			break;
6314 		default:
6315 			/* shut it down */
6316 			break;
6317 		}
6318 	} else {
6319 		BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6320 		       control->reqseq, chan->next_tx_seq,
6321 		       chan->expected_ack_seq);
6322 		l2cap_send_disconn_req(chan, ECONNRESET);
6323 	}
6324 
6325 	return err;
6326 }
6327 
l2cap_stream_rx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb)6328 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6329 			   struct sk_buff *skb)
6330 {
6331 	/* l2cap_reassemble_sdu may free skb, hence invalidate control, so store
6332 	 * the txseq field in advance to use it after l2cap_reassemble_sdu
6333 	 * returns and to avoid the race condition, for example:
6334 	 *
6335 	 * The current thread calls:
6336 	 *   l2cap_reassemble_sdu
6337 	 *     chan->ops->recv == l2cap_sock_recv_cb
6338 	 *       __sock_queue_rcv_skb
6339 	 * Another thread calls:
6340 	 *   bt_sock_recvmsg
6341 	 *     skb_recv_datagram
6342 	 *     skb_free_datagram
6343 	 * Then the current thread tries to access control, but it was freed by
6344 	 * skb_free_datagram.
6345 	 */
6346 	u16 txseq = control->txseq;
6347 
6348 	BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6349 	       chan->rx_state);
6350 
6351 	if (l2cap_classify_txseq(chan, txseq) == L2CAP_TXSEQ_EXPECTED) {
6352 		l2cap_pass_to_tx(chan, control);
6353 
6354 		BT_DBG("buffer_seq %u->%u", chan->buffer_seq,
6355 		       __next_seq(chan, chan->buffer_seq));
6356 
6357 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6358 
6359 		l2cap_reassemble_sdu(chan, skb, control);
6360 	} else {
6361 		if (chan->sdu) {
6362 			kfree_skb(chan->sdu);
6363 			chan->sdu = NULL;
6364 		}
6365 		chan->sdu_last_frag = NULL;
6366 		chan->sdu_len = 0;
6367 
6368 		if (skb) {
6369 			BT_DBG("Freeing %p", skb);
6370 			kfree_skb(skb);
6371 		}
6372 	}
6373 
6374 	chan->last_acked_seq = txseq;
6375 	chan->expected_tx_seq = __next_seq(chan, txseq);
6376 
6377 	return 0;
6378 }
6379 
l2cap_data_rcv(struct l2cap_chan * chan,struct sk_buff * skb)6380 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6381 {
6382 	struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
6383 	u16 len;
6384 	u8 event;
6385 
6386 	__unpack_control(chan, skb);
6387 
6388 	len = skb->len;
6389 
6390 	/*
6391 	 * We can just drop the corrupted I-frame here.
6392 	 * Receiver will miss it and start proper recovery
6393 	 * procedures and ask for retransmission.
6394 	 */
6395 	if (l2cap_check_fcs(chan, skb))
6396 		goto drop;
6397 
6398 	if (!control->sframe && control->sar == L2CAP_SAR_START)
6399 		len -= L2CAP_SDULEN_SIZE;
6400 
6401 	if (chan->fcs == L2CAP_FCS_CRC16)
6402 		len -= L2CAP_FCS_SIZE;
6403 
6404 	if (len > chan->mps) {
6405 		l2cap_send_disconn_req(chan, ECONNRESET);
6406 		goto drop;
6407 	}
6408 
6409 	if (chan->ops->filter) {
6410 		if (chan->ops->filter(chan, skb))
6411 			goto drop;
6412 	}
6413 
6414 	if (!control->sframe) {
6415 		int err;
6416 
6417 		BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6418 		       control->sar, control->reqseq, control->final,
6419 		       control->txseq);
6420 
6421 		/* Validate F-bit - F=0 always valid, F=1 only
6422 		 * valid in TX WAIT_F
6423 		 */
6424 		if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6425 			goto drop;
6426 
6427 		if (chan->mode != L2CAP_MODE_STREAMING) {
6428 			event = L2CAP_EV_RECV_IFRAME;
6429 			err = l2cap_rx(chan, control, skb, event);
6430 		} else {
6431 			err = l2cap_stream_rx(chan, control, skb);
6432 		}
6433 
6434 		if (err)
6435 			l2cap_send_disconn_req(chan, ECONNRESET);
6436 	} else {
6437 		const u8 rx_func_to_event[4] = {
6438 			L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6439 			L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6440 		};
6441 
6442 		/* Only I-frames are expected in streaming mode */
6443 		if (chan->mode == L2CAP_MODE_STREAMING)
6444 			goto drop;
6445 
6446 		BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6447 		       control->reqseq, control->final, control->poll,
6448 		       control->super);
6449 
6450 		if (len != 0) {
6451 			BT_ERR("Trailing bytes: %d in sframe", len);
6452 			l2cap_send_disconn_req(chan, ECONNRESET);
6453 			goto drop;
6454 		}
6455 
6456 		/* Validate F and P bits */
6457 		if (control->final && (control->poll ||
6458 				       chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6459 			goto drop;
6460 
6461 		event = rx_func_to_event[control->super];
6462 		if (l2cap_rx(chan, control, skb, event))
6463 			l2cap_send_disconn_req(chan, ECONNRESET);
6464 	}
6465 
6466 	return 0;
6467 
6468 drop:
6469 	kfree_skb(skb);
6470 	return 0;
6471 }
6472 
l2cap_chan_le_send_credits(struct l2cap_chan * chan)6473 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6474 {
6475 	struct l2cap_conn *conn = chan->conn;
6476 	struct l2cap_le_credits pkt;
6477 	u16 return_credits = l2cap_le_rx_credits(chan);
6478 
6479 	if (chan->rx_credits >= return_credits)
6480 		return;
6481 
6482 	return_credits -= chan->rx_credits;
6483 
6484 	BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6485 
6486 	chan->rx_credits += return_credits;
6487 
6488 	pkt.cid     = cpu_to_le16(chan->scid);
6489 	pkt.credits = cpu_to_le16(return_credits);
6490 
6491 	chan->ident = l2cap_get_ident(conn);
6492 
6493 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6494 }
6495 
l2cap_chan_rx_avail(struct l2cap_chan * chan,ssize_t rx_avail)6496 void l2cap_chan_rx_avail(struct l2cap_chan *chan, ssize_t rx_avail)
6497 {
6498 	if (chan->rx_avail == rx_avail)
6499 		return;
6500 
6501 	BT_DBG("chan %p has %zd bytes avail for rx", chan, rx_avail);
6502 
6503 	chan->rx_avail = rx_avail;
6504 
6505 	if (chan->state == BT_CONNECTED)
6506 		l2cap_chan_le_send_credits(chan);
6507 }
6508 
l2cap_ecred_recv(struct l2cap_chan * chan,struct sk_buff * skb)6509 static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
6510 {
6511 	int err;
6512 
6513 	BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
6514 
6515 	/* Wait recv to confirm reception before updating the credits */
6516 	err = chan->ops->recv(chan, skb);
6517 
6518 	if (err < 0 && chan->rx_avail != -1) {
6519 		BT_ERR("Queueing received LE L2CAP data failed");
6520 		l2cap_send_disconn_req(chan, ECONNRESET);
6521 		return err;
6522 	}
6523 
6524 	/* Update credits whenever an SDU is received */
6525 	l2cap_chan_le_send_credits(chan);
6526 
6527 	return err;
6528 }
6529 
l2cap_ecred_data_rcv(struct l2cap_chan * chan,struct sk_buff * skb)6530 static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6531 {
6532 	int err;
6533 
6534 	if (!chan->rx_credits) {
6535 		BT_ERR("No credits to receive LE L2CAP data");
6536 		l2cap_send_disconn_req(chan, ECONNRESET);
6537 		return -ENOBUFS;
6538 	}
6539 
6540 	if (chan->imtu < skb->len) {
6541 		BT_ERR("Too big LE L2CAP PDU");
6542 		return -ENOBUFS;
6543 	}
6544 
6545 	chan->rx_credits--;
6546 	BT_DBG("chan %p: rx_credits %u -> %u",
6547 	       chan, chan->rx_credits + 1, chan->rx_credits);
6548 
6549 	/* Update if remote had run out of credits, this should only happens
6550 	 * if the remote is not using the entire MPS.
6551 	 */
6552 	if (!chan->rx_credits)
6553 		l2cap_chan_le_send_credits(chan);
6554 
6555 	err = 0;
6556 
6557 	if (!chan->sdu) {
6558 		u16 sdu_len;
6559 
6560 		sdu_len = get_unaligned_le16(skb->data);
6561 		skb_pull(skb, L2CAP_SDULEN_SIZE);
6562 
6563 		BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6564 		       sdu_len, skb->len, chan->imtu);
6565 
6566 		if (sdu_len > chan->imtu) {
6567 			BT_ERR("Too big LE L2CAP SDU length received");
6568 			err = -EMSGSIZE;
6569 			goto failed;
6570 		}
6571 
6572 		if (skb->len > sdu_len) {
6573 			BT_ERR("Too much LE L2CAP data received");
6574 			err = -EINVAL;
6575 			goto failed;
6576 		}
6577 
6578 		if (skb->len == sdu_len)
6579 			return l2cap_ecred_recv(chan, skb);
6580 
6581 		chan->sdu = skb;
6582 		chan->sdu_len = sdu_len;
6583 		chan->sdu_last_frag = skb;
6584 
6585 		/* Detect if remote is not able to use the selected MPS */
6586 		if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
6587 			u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
6588 
6589 			/* Adjust the number of credits */
6590 			BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
6591 			chan->mps = mps_len;
6592 			l2cap_chan_le_send_credits(chan);
6593 		}
6594 
6595 		return 0;
6596 	}
6597 
6598 	BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6599 	       chan->sdu->len, skb->len, chan->sdu_len);
6600 
6601 	if (chan->sdu->len + skb->len > chan->sdu_len) {
6602 		BT_ERR("Too much LE L2CAP data received");
6603 		err = -EINVAL;
6604 		goto failed;
6605 	}
6606 
6607 	append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6608 	skb = NULL;
6609 
6610 	if (chan->sdu->len == chan->sdu_len) {
6611 		err = l2cap_ecred_recv(chan, chan->sdu);
6612 		if (!err) {
6613 			chan->sdu = NULL;
6614 			chan->sdu_last_frag = NULL;
6615 			chan->sdu_len = 0;
6616 		}
6617 	}
6618 
6619 failed:
6620 	if (err) {
6621 		kfree_skb(skb);
6622 		kfree_skb(chan->sdu);
6623 		chan->sdu = NULL;
6624 		chan->sdu_last_frag = NULL;
6625 		chan->sdu_len = 0;
6626 	}
6627 
6628 	/* We can't return an error here since we took care of the skb
6629 	 * freeing internally. An error return would cause the caller to
6630 	 * do a double-free of the skb.
6631 	 */
6632 	return 0;
6633 }
6634 
l2cap_data_channel(struct l2cap_conn * conn,u16 cid,struct sk_buff * skb)6635 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6636 			       struct sk_buff *skb)
6637 {
6638 	struct l2cap_chan *chan;
6639 
6640 	chan = l2cap_get_chan_by_scid(conn, cid);
6641 	if (!chan) {
6642 		BT_DBG("unknown cid 0x%4.4x", cid);
6643 		/* Drop packet and return */
6644 		kfree_skb(skb);
6645 		return;
6646 	}
6647 
6648 	BT_DBG("chan %p, len %d", chan, skb->len);
6649 
6650 	/* If we receive data on a fixed channel before the info req/rsp
6651 	 * procedure is done simply assume that the channel is supported
6652 	 * and mark it as ready.
6653 	 */
6654 	if (chan->chan_type == L2CAP_CHAN_FIXED)
6655 		l2cap_chan_ready(chan);
6656 
6657 	if (chan->state != BT_CONNECTED)
6658 		goto drop;
6659 
6660 	switch (chan->mode) {
6661 	case L2CAP_MODE_LE_FLOWCTL:
6662 	case L2CAP_MODE_EXT_FLOWCTL:
6663 		if (l2cap_ecred_data_rcv(chan, skb) < 0)
6664 			goto drop;
6665 
6666 		goto done;
6667 
6668 	case L2CAP_MODE_BASIC:
6669 		/* If socket recv buffers overflows we drop data here
6670 		 * which is *bad* because L2CAP has to be reliable.
6671 		 * But we don't have any other choice. L2CAP doesn't
6672 		 * provide flow control mechanism. */
6673 
6674 		if (chan->imtu < skb->len) {
6675 			BT_ERR("Dropping L2CAP data: receive buffer overflow");
6676 			goto drop;
6677 		}
6678 
6679 		if (!chan->ops->recv(chan, skb))
6680 			goto done;
6681 		break;
6682 
6683 	case L2CAP_MODE_ERTM:
6684 	case L2CAP_MODE_STREAMING:
6685 		l2cap_data_rcv(chan, skb);
6686 		goto done;
6687 
6688 	default:
6689 		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6690 		break;
6691 	}
6692 
6693 drop:
6694 	kfree_skb(skb);
6695 
6696 done:
6697 	l2cap_chan_unlock(chan);
6698 	l2cap_chan_put(chan);
6699 }
6700 
l2cap_conless_channel(struct l2cap_conn * conn,__le16 psm,struct sk_buff * skb)6701 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6702 				  struct sk_buff *skb)
6703 {
6704 	struct hci_conn *hcon = conn->hcon;
6705 	struct l2cap_chan *chan;
6706 
6707 	if (hcon->type != ACL_LINK)
6708 		goto free_skb;
6709 
6710 	chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6711 					ACL_LINK);
6712 	if (!chan)
6713 		goto free_skb;
6714 
6715 	BT_DBG("chan %p, len %d", chan, skb->len);
6716 
6717 	l2cap_chan_lock(chan);
6718 
6719 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6720 		goto drop;
6721 
6722 	if (chan->imtu < skb->len)
6723 		goto drop;
6724 
6725 	/* Store remote BD_ADDR and PSM for msg_name */
6726 	bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
6727 	bt_cb(skb)->l2cap.psm = psm;
6728 
6729 	if (!chan->ops->recv(chan, skb)) {
6730 		l2cap_chan_unlock(chan);
6731 		l2cap_chan_put(chan);
6732 		return;
6733 	}
6734 
6735 drop:
6736 	l2cap_chan_unlock(chan);
6737 	l2cap_chan_put(chan);
6738 free_skb:
6739 	kfree_skb(skb);
6740 }
6741 
l2cap_recv_frame(struct l2cap_conn * conn,struct sk_buff * skb)6742 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6743 {
6744 	struct l2cap_hdr *lh = (void *) skb->data;
6745 	struct hci_conn *hcon = conn->hcon;
6746 	u16 cid, len;
6747 	__le16 psm;
6748 
6749 	if (hcon->state != BT_CONNECTED) {
6750 		BT_DBG("queueing pending rx skb");
6751 		skb_queue_tail(&conn->pending_rx, skb);
6752 		return;
6753 	}
6754 
6755 	skb_pull(skb, L2CAP_HDR_SIZE);
6756 	cid = __le16_to_cpu(lh->cid);
6757 	len = __le16_to_cpu(lh->len);
6758 
6759 	if (len != skb->len) {
6760 		kfree_skb(skb);
6761 		return;
6762 	}
6763 
6764 	/* Since we can't actively block incoming LE connections we must
6765 	 * at least ensure that we ignore incoming data from them.
6766 	 */
6767 	if (hcon->type == LE_LINK &&
6768 	    hci_bdaddr_list_lookup(&hcon->hdev->reject_list, &hcon->dst,
6769 				   bdaddr_dst_type(hcon))) {
6770 		kfree_skb(skb);
6771 		return;
6772 	}
6773 
6774 	BT_DBG("len %d, cid 0x%4.4x", len, cid);
6775 
6776 	switch (cid) {
6777 	case L2CAP_CID_SIGNALING:
6778 		l2cap_sig_channel(conn, skb);
6779 		break;
6780 
6781 	case L2CAP_CID_CONN_LESS:
6782 		psm = get_unaligned((__le16 *) skb->data);
6783 		skb_pull(skb, L2CAP_PSMLEN_SIZE);
6784 		l2cap_conless_channel(conn, psm, skb);
6785 		break;
6786 
6787 	case L2CAP_CID_LE_SIGNALING:
6788 		l2cap_le_sig_channel(conn, skb);
6789 		break;
6790 
6791 	default:
6792 		l2cap_data_channel(conn, cid, skb);
6793 		break;
6794 	}
6795 }
6796 
process_pending_rx(struct work_struct * work)6797 static void process_pending_rx(struct work_struct *work)
6798 {
6799 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
6800 					       pending_rx_work);
6801 	struct sk_buff *skb;
6802 
6803 	BT_DBG("");
6804 
6805 	mutex_lock(&conn->lock);
6806 
6807 	while ((skb = skb_dequeue(&conn->pending_rx)))
6808 		l2cap_recv_frame(conn, skb);
6809 
6810 	mutex_unlock(&conn->lock);
6811 }
6812 
l2cap_conn_add(struct hci_conn * hcon)6813 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
6814 {
6815 	struct l2cap_conn *conn = hcon->l2cap_data;
6816 	struct hci_chan *hchan;
6817 
6818 	if (conn)
6819 		return conn;
6820 
6821 	hchan = hci_chan_create(hcon);
6822 	if (!hchan)
6823 		return NULL;
6824 
6825 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
6826 	if (!conn) {
6827 		hci_chan_del(hchan);
6828 		return NULL;
6829 	}
6830 
6831 	kref_init(&conn->ref);
6832 	hcon->l2cap_data = conn;
6833 	conn->hcon = hci_conn_get(hcon);
6834 	conn->hchan = hchan;
6835 
6836 	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
6837 
6838 	conn->mtu = hcon->mtu;
6839 	conn->feat_mask = 0;
6840 
6841 	conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
6842 
6843 	if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
6844 	    (bredr_sc_enabled(hcon->hdev) ||
6845 	     hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
6846 		conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
6847 
6848 	mutex_init(&conn->ident_lock);
6849 	mutex_init(&conn->lock);
6850 
6851 	INIT_LIST_HEAD(&conn->chan_l);
6852 	INIT_LIST_HEAD(&conn->users);
6853 
6854 	INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
6855 
6856 	skb_queue_head_init(&conn->pending_rx);
6857 	INIT_WORK(&conn->pending_rx_work, process_pending_rx);
6858 	INIT_DELAYED_WORK(&conn->id_addr_timer, l2cap_conn_update_id_addr);
6859 
6860 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
6861 
6862 	return conn;
6863 }
6864 
is_valid_psm(u16 psm,u8 dst_type)6865 static bool is_valid_psm(u16 psm, u8 dst_type)
6866 {
6867 	if (!psm)
6868 		return false;
6869 
6870 	if (bdaddr_type_is_le(dst_type))
6871 		return (psm <= 0x00ff);
6872 
6873 	/* PSM must be odd and lsb of upper byte must be 0 */
6874 	return ((psm & 0x0101) == 0x0001);
6875 }
6876 
6877 struct l2cap_chan_data {
6878 	struct l2cap_chan *chan;
6879 	struct pid *pid;
6880 	int count;
6881 };
6882 
l2cap_chan_by_pid(struct l2cap_chan * chan,void * data)6883 static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data)
6884 {
6885 	struct l2cap_chan_data *d = data;
6886 	struct pid *pid;
6887 
6888 	if (chan == d->chan)
6889 		return;
6890 
6891 	if (!test_bit(FLAG_DEFER_SETUP, &chan->flags))
6892 		return;
6893 
6894 	pid = chan->ops->get_peer_pid(chan);
6895 
6896 	/* Only count deferred channels with the same PID/PSM */
6897 	if (d->pid != pid || chan->psm != d->chan->psm || chan->ident ||
6898 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
6899 		return;
6900 
6901 	d->count++;
6902 }
6903 
l2cap_chan_connect(struct l2cap_chan * chan,__le16 psm,u16 cid,bdaddr_t * dst,u8 dst_type,u16 timeout)6904 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
6905 		       bdaddr_t *dst, u8 dst_type, u16 timeout)
6906 {
6907 	struct l2cap_conn *conn;
6908 	struct hci_conn *hcon;
6909 	struct hci_dev *hdev;
6910 	int err;
6911 
6912 	BT_DBG("%pMR -> %pMR (type %u) psm 0x%4.4x mode 0x%2.2x", &chan->src,
6913 	       dst, dst_type, __le16_to_cpu(psm), chan->mode);
6914 
6915 	hdev = hci_get_route(dst, &chan->src, chan->src_type);
6916 	if (!hdev)
6917 		return -EHOSTUNREACH;
6918 
6919 	hci_dev_lock(hdev);
6920 
6921 	if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
6922 	    chan->chan_type != L2CAP_CHAN_RAW) {
6923 		err = -EINVAL;
6924 		goto done;
6925 	}
6926 
6927 	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
6928 		err = -EINVAL;
6929 		goto done;
6930 	}
6931 
6932 	if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
6933 		err = -EINVAL;
6934 		goto done;
6935 	}
6936 
6937 	switch (chan->mode) {
6938 	case L2CAP_MODE_BASIC:
6939 		break;
6940 	case L2CAP_MODE_LE_FLOWCTL:
6941 		break;
6942 	case L2CAP_MODE_EXT_FLOWCTL:
6943 		if (!enable_ecred) {
6944 			err = -EOPNOTSUPP;
6945 			goto done;
6946 		}
6947 		break;
6948 	case L2CAP_MODE_ERTM:
6949 	case L2CAP_MODE_STREAMING:
6950 		if (!disable_ertm)
6951 			break;
6952 		fallthrough;
6953 	default:
6954 		err = -EOPNOTSUPP;
6955 		goto done;
6956 	}
6957 
6958 	switch (chan->state) {
6959 	case BT_CONNECT:
6960 	case BT_CONNECT2:
6961 	case BT_CONFIG:
6962 		/* Already connecting */
6963 		err = 0;
6964 		goto done;
6965 
6966 	case BT_CONNECTED:
6967 		/* Already connected */
6968 		err = -EISCONN;
6969 		goto done;
6970 
6971 	case BT_OPEN:
6972 	case BT_BOUND:
6973 		/* Can connect */
6974 		break;
6975 
6976 	default:
6977 		err = -EBADFD;
6978 		goto done;
6979 	}
6980 
6981 	/* Set destination address and psm */
6982 	bacpy(&chan->dst, dst);
6983 	chan->dst_type = dst_type;
6984 
6985 	chan->psm = psm;
6986 	chan->dcid = cid;
6987 
6988 	if (bdaddr_type_is_le(dst_type)) {
6989 		/* Convert from L2CAP channel address type to HCI address type
6990 		 */
6991 		if (dst_type == BDADDR_LE_PUBLIC)
6992 			dst_type = ADDR_LE_DEV_PUBLIC;
6993 		else
6994 			dst_type = ADDR_LE_DEV_RANDOM;
6995 
6996 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
6997 			hcon = hci_connect_le(hdev, dst, dst_type, false,
6998 					      chan->sec_level, timeout,
6999 					      HCI_ROLE_SLAVE, 0, 0);
7000 		else
7001 			hcon = hci_connect_le_scan(hdev, dst, dst_type,
7002 						   chan->sec_level, timeout,
7003 						   CONN_REASON_L2CAP_CHAN);
7004 
7005 	} else {
7006 		u8 auth_type = l2cap_get_auth_type(chan);
7007 		hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type,
7008 				       CONN_REASON_L2CAP_CHAN, timeout);
7009 	}
7010 
7011 	if (IS_ERR(hcon)) {
7012 		err = PTR_ERR(hcon);
7013 		goto done;
7014 	}
7015 
7016 	conn = l2cap_conn_add(hcon);
7017 	if (!conn) {
7018 		hci_conn_drop(hcon);
7019 		err = -ENOMEM;
7020 		goto done;
7021 	}
7022 
7023 	if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) {
7024 		struct l2cap_chan_data data;
7025 
7026 		data.chan = chan;
7027 		data.pid = chan->ops->get_peer_pid(chan);
7028 		data.count = 1;
7029 
7030 		l2cap_chan_list(conn, l2cap_chan_by_pid, &data);
7031 
7032 		/* Check if there isn't too many channels being connected */
7033 		if (data.count > L2CAP_ECRED_CONN_SCID_MAX) {
7034 			hci_conn_drop(hcon);
7035 			err = -EPROTO;
7036 			goto done;
7037 		}
7038 	}
7039 
7040 	mutex_lock(&conn->lock);
7041 	l2cap_chan_lock(chan);
7042 
7043 	if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7044 		hci_conn_drop(hcon);
7045 		err = -EBUSY;
7046 		goto chan_unlock;
7047 	}
7048 
7049 	/* Update source addr of the socket */
7050 	bacpy(&chan->src, &hcon->src);
7051 	chan->src_type = bdaddr_src_type(hcon);
7052 
7053 	__l2cap_chan_add(conn, chan);
7054 
7055 	/* l2cap_chan_add takes its own ref so we can drop this one */
7056 	hci_conn_drop(hcon);
7057 
7058 	l2cap_state_change(chan, BT_CONNECT);
7059 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7060 
7061 	/* Release chan->sport so that it can be reused by other
7062 	 * sockets (as it's only used for listening sockets).
7063 	 */
7064 	write_lock(&chan_list_lock);
7065 	chan->sport = 0;
7066 	write_unlock(&chan_list_lock);
7067 
7068 	if (hcon->state == BT_CONNECTED) {
7069 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7070 			__clear_chan_timer(chan);
7071 			if (l2cap_chan_check_security(chan, true))
7072 				l2cap_state_change(chan, BT_CONNECTED);
7073 		} else
7074 			l2cap_do_start(chan);
7075 	}
7076 
7077 	err = 0;
7078 
7079 chan_unlock:
7080 	l2cap_chan_unlock(chan);
7081 	mutex_unlock(&conn->lock);
7082 done:
7083 	hci_dev_unlock(hdev);
7084 	hci_dev_put(hdev);
7085 	return err;
7086 }
7087 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7088 
l2cap_ecred_reconfigure(struct l2cap_chan * chan)7089 static void l2cap_ecred_reconfigure(struct l2cap_chan *chan)
7090 {
7091 	struct l2cap_conn *conn = chan->conn;
7092 	DEFINE_RAW_FLEX(struct l2cap_ecred_reconf_req, pdu, scid, 1);
7093 
7094 	pdu->mtu = cpu_to_le16(chan->imtu);
7095 	pdu->mps = cpu_to_le16(chan->mps);
7096 	pdu->scid[0] = cpu_to_le16(chan->scid);
7097 
7098 	chan->ident = l2cap_get_ident(conn);
7099 
7100 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_RECONF_REQ,
7101 		       sizeof(pdu), &pdu);
7102 }
7103 
l2cap_chan_reconfigure(struct l2cap_chan * chan,__u16 mtu)7104 int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu)
7105 {
7106 	if (chan->imtu > mtu)
7107 		return -EINVAL;
7108 
7109 	BT_DBG("chan %p mtu 0x%4.4x", chan, mtu);
7110 
7111 	chan->imtu = mtu;
7112 
7113 	l2cap_ecred_reconfigure(chan);
7114 
7115 	return 0;
7116 }
7117 
7118 /* ---- L2CAP interface with lower layer (HCI) ---- */
7119 
l2cap_connect_ind(struct hci_dev * hdev,bdaddr_t * bdaddr)7120 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7121 {
7122 	int exact = 0, lm1 = 0, lm2 = 0;
7123 	struct l2cap_chan *c;
7124 
7125 	BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7126 
7127 	/* Find listening sockets and check their link_mode */
7128 	read_lock(&chan_list_lock);
7129 	list_for_each_entry(c, &chan_list, global_l) {
7130 		if (c->state != BT_LISTEN)
7131 			continue;
7132 
7133 		if (!bacmp(&c->src, &hdev->bdaddr)) {
7134 			lm1 |= HCI_LM_ACCEPT;
7135 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7136 				lm1 |= HCI_LM_MASTER;
7137 			exact++;
7138 		} else if (!bacmp(&c->src, BDADDR_ANY)) {
7139 			lm2 |= HCI_LM_ACCEPT;
7140 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7141 				lm2 |= HCI_LM_MASTER;
7142 		}
7143 	}
7144 	read_unlock(&chan_list_lock);
7145 
7146 	return exact ? lm1 : lm2;
7147 }
7148 
7149 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7150  * from an existing channel in the list or from the beginning of the
7151  * global list (by passing NULL as first parameter).
7152  */
l2cap_global_fixed_chan(struct l2cap_chan * c,struct hci_conn * hcon)7153 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7154 						  struct hci_conn *hcon)
7155 {
7156 	u8 src_type = bdaddr_src_type(hcon);
7157 
7158 	read_lock(&chan_list_lock);
7159 
7160 	if (c)
7161 		c = list_next_entry(c, global_l);
7162 	else
7163 		c = list_entry(chan_list.next, typeof(*c), global_l);
7164 
7165 	list_for_each_entry_from(c, &chan_list, global_l) {
7166 		if (c->chan_type != L2CAP_CHAN_FIXED)
7167 			continue;
7168 		if (c->state != BT_LISTEN)
7169 			continue;
7170 		if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
7171 			continue;
7172 		if (src_type != c->src_type)
7173 			continue;
7174 
7175 		c = l2cap_chan_hold_unless_zero(c);
7176 		read_unlock(&chan_list_lock);
7177 		return c;
7178 	}
7179 
7180 	read_unlock(&chan_list_lock);
7181 
7182 	return NULL;
7183 }
7184 
l2cap_connect_cfm(struct hci_conn * hcon,u8 status)7185 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7186 {
7187 	struct hci_dev *hdev = hcon->hdev;
7188 	struct l2cap_conn *conn;
7189 	struct l2cap_chan *pchan;
7190 	u8 dst_type;
7191 
7192 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7193 		return;
7194 
7195 	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7196 
7197 	if (status) {
7198 		l2cap_conn_del(hcon, bt_to_errno(status));
7199 		return;
7200 	}
7201 
7202 	conn = l2cap_conn_add(hcon);
7203 	if (!conn)
7204 		return;
7205 
7206 	dst_type = bdaddr_dst_type(hcon);
7207 
7208 	/* If device is blocked, do not create channels for it */
7209 	if (hci_bdaddr_list_lookup(&hdev->reject_list, &hcon->dst, dst_type))
7210 		return;
7211 
7212 	/* Find fixed channels and notify them of the new connection. We
7213 	 * use multiple individual lookups, continuing each time where
7214 	 * we left off, because the list lock would prevent calling the
7215 	 * potentially sleeping l2cap_chan_lock() function.
7216 	 */
7217 	pchan = l2cap_global_fixed_chan(NULL, hcon);
7218 	while (pchan) {
7219 		struct l2cap_chan *chan, *next;
7220 
7221 		/* Client fixed channels should override server ones */
7222 		if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7223 			goto next;
7224 
7225 		l2cap_chan_lock(pchan);
7226 		chan = pchan->ops->new_connection(pchan);
7227 		if (chan) {
7228 			bacpy(&chan->src, &hcon->src);
7229 			bacpy(&chan->dst, &hcon->dst);
7230 			chan->src_type = bdaddr_src_type(hcon);
7231 			chan->dst_type = dst_type;
7232 
7233 			__l2cap_chan_add(conn, chan);
7234 		}
7235 
7236 		l2cap_chan_unlock(pchan);
7237 next:
7238 		next = l2cap_global_fixed_chan(pchan, hcon);
7239 		l2cap_chan_put(pchan);
7240 		pchan = next;
7241 	}
7242 
7243 	l2cap_conn_ready(conn);
7244 }
7245 
l2cap_disconn_ind(struct hci_conn * hcon)7246 int l2cap_disconn_ind(struct hci_conn *hcon)
7247 {
7248 	struct l2cap_conn *conn = hcon->l2cap_data;
7249 
7250 	BT_DBG("hcon %p", hcon);
7251 
7252 	if (!conn)
7253 		return HCI_ERROR_REMOTE_USER_TERM;
7254 	return conn->disc_reason;
7255 }
7256 
l2cap_disconn_cfm(struct hci_conn * hcon,u8 reason)7257 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7258 {
7259 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7260 		return;
7261 
7262 	BT_DBG("hcon %p reason %d", hcon, reason);
7263 
7264 	l2cap_conn_del(hcon, bt_to_errno(reason));
7265 }
7266 
l2cap_check_encryption(struct l2cap_chan * chan,u8 encrypt)7267 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7268 {
7269 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7270 		return;
7271 
7272 	if (encrypt == 0x00) {
7273 		if (chan->sec_level == BT_SECURITY_MEDIUM) {
7274 			__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7275 		} else if (chan->sec_level == BT_SECURITY_HIGH ||
7276 			   chan->sec_level == BT_SECURITY_FIPS)
7277 			l2cap_chan_close(chan, ECONNREFUSED);
7278 	} else {
7279 		if (chan->sec_level == BT_SECURITY_MEDIUM)
7280 			__clear_chan_timer(chan);
7281 	}
7282 }
7283 
l2cap_security_cfm(struct hci_conn * hcon,u8 status,u8 encrypt)7284 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7285 {
7286 	struct l2cap_conn *conn = hcon->l2cap_data;
7287 	struct l2cap_chan *chan;
7288 
7289 	if (!conn)
7290 		return;
7291 
7292 	BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7293 
7294 	mutex_lock(&conn->lock);
7295 
7296 	list_for_each_entry(chan, &conn->chan_l, list) {
7297 		l2cap_chan_lock(chan);
7298 
7299 		BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7300 		       state_to_string(chan->state));
7301 
7302 		if (!status && encrypt)
7303 			chan->sec_level = hcon->sec_level;
7304 
7305 		if (!__l2cap_no_conn_pending(chan)) {
7306 			l2cap_chan_unlock(chan);
7307 			continue;
7308 		}
7309 
7310 		if (!status && (chan->state == BT_CONNECTED ||
7311 				chan->state == BT_CONFIG)) {
7312 			chan->ops->resume(chan);
7313 			l2cap_check_encryption(chan, encrypt);
7314 			l2cap_chan_unlock(chan);
7315 			continue;
7316 		}
7317 
7318 		if (chan->state == BT_CONNECT) {
7319 			if (!status && l2cap_check_enc_key_size(hcon))
7320 				l2cap_start_connection(chan);
7321 			else
7322 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7323 		} else if (chan->state == BT_CONNECT2 &&
7324 			   !(chan->mode == L2CAP_MODE_EXT_FLOWCTL ||
7325 			     chan->mode == L2CAP_MODE_LE_FLOWCTL)) {
7326 			struct l2cap_conn_rsp rsp;
7327 			__u16 res, stat;
7328 
7329 			if (!status && l2cap_check_enc_key_size(hcon)) {
7330 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7331 					res = L2CAP_CR_PEND;
7332 					stat = L2CAP_CS_AUTHOR_PEND;
7333 					chan->ops->defer(chan);
7334 				} else {
7335 					l2cap_state_change(chan, BT_CONFIG);
7336 					res = L2CAP_CR_SUCCESS;
7337 					stat = L2CAP_CS_NO_INFO;
7338 				}
7339 			} else {
7340 				l2cap_state_change(chan, BT_DISCONN);
7341 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7342 				res = L2CAP_CR_SEC_BLOCK;
7343 				stat = L2CAP_CS_NO_INFO;
7344 			}
7345 
7346 			rsp.scid   = cpu_to_le16(chan->dcid);
7347 			rsp.dcid   = cpu_to_le16(chan->scid);
7348 			rsp.result = cpu_to_le16(res);
7349 			rsp.status = cpu_to_le16(stat);
7350 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7351 				       sizeof(rsp), &rsp);
7352 
7353 			if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7354 			    res == L2CAP_CR_SUCCESS) {
7355 				char buf[128];
7356 				set_bit(CONF_REQ_SENT, &chan->conf_state);
7357 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
7358 					       L2CAP_CONF_REQ,
7359 					       l2cap_build_conf_req(chan, buf, sizeof(buf)),
7360 					       buf);
7361 				chan->num_conf_req++;
7362 			}
7363 		}
7364 
7365 		l2cap_chan_unlock(chan);
7366 	}
7367 
7368 	mutex_unlock(&conn->lock);
7369 }
7370 
7371 /* Append fragment into frame respecting the maximum len of rx_skb */
l2cap_recv_frag(struct l2cap_conn * conn,struct sk_buff * skb,u16 len)7372 static int l2cap_recv_frag(struct l2cap_conn *conn, struct sk_buff *skb,
7373 			   u16 len)
7374 {
7375 	if (!conn->rx_skb) {
7376 		/* Allocate skb for the complete frame (with header) */
7377 		conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7378 		if (!conn->rx_skb)
7379 			return -ENOMEM;
7380 		/* Init rx_len */
7381 		conn->rx_len = len;
7382 	}
7383 
7384 	/* Copy as much as the rx_skb can hold */
7385 	len = min_t(u16, len, skb->len);
7386 	skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, len), len);
7387 	skb_pull(skb, len);
7388 	conn->rx_len -= len;
7389 
7390 	return len;
7391 }
7392 
l2cap_recv_len(struct l2cap_conn * conn,struct sk_buff * skb)7393 static int l2cap_recv_len(struct l2cap_conn *conn, struct sk_buff *skb)
7394 {
7395 	struct sk_buff *rx_skb;
7396 	int len;
7397 
7398 	/* Append just enough to complete the header */
7399 	len = l2cap_recv_frag(conn, skb, L2CAP_LEN_SIZE - conn->rx_skb->len);
7400 
7401 	/* If header could not be read just continue */
7402 	if (len < 0 || conn->rx_skb->len < L2CAP_LEN_SIZE)
7403 		return len;
7404 
7405 	rx_skb = conn->rx_skb;
7406 	len = get_unaligned_le16(rx_skb->data);
7407 
7408 	/* Check if rx_skb has enough space to received all fragments */
7409 	if (len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE) <= skb_tailroom(rx_skb)) {
7410 		/* Update expected len */
7411 		conn->rx_len = len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE);
7412 		return L2CAP_LEN_SIZE;
7413 	}
7414 
7415 	/* Reset conn->rx_skb since it will need to be reallocated in order to
7416 	 * fit all fragments.
7417 	 */
7418 	conn->rx_skb = NULL;
7419 
7420 	/* Reallocates rx_skb using the exact expected length */
7421 	len = l2cap_recv_frag(conn, rx_skb,
7422 			      len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE));
7423 	kfree_skb(rx_skb);
7424 
7425 	return len;
7426 }
7427 
l2cap_recv_reset(struct l2cap_conn * conn)7428 static void l2cap_recv_reset(struct l2cap_conn *conn)
7429 {
7430 	kfree_skb(conn->rx_skb);
7431 	conn->rx_skb = NULL;
7432 	conn->rx_len = 0;
7433 }
7434 
l2cap_conn_hold_unless_zero(struct l2cap_conn * c)7435 struct l2cap_conn *l2cap_conn_hold_unless_zero(struct l2cap_conn *c)
7436 {
7437 	if (!c)
7438 		return NULL;
7439 
7440 	BT_DBG("conn %p orig refcnt %u", c, kref_read(&c->ref));
7441 
7442 	if (!kref_get_unless_zero(&c->ref))
7443 		return NULL;
7444 
7445 	return c;
7446 }
7447 
l2cap_recv_acldata(struct hci_conn * hcon,struct sk_buff * skb,u16 flags)7448 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7449 {
7450 	struct l2cap_conn *conn;
7451 	int len;
7452 
7453 	/* Lock hdev to access l2cap_data to avoid race with l2cap_conn_del */
7454 	hci_dev_lock(hcon->hdev);
7455 
7456 	conn = hcon->l2cap_data;
7457 
7458 	if (!conn)
7459 		conn = l2cap_conn_add(hcon);
7460 
7461 	conn = l2cap_conn_hold_unless_zero(conn);
7462 
7463 	hci_dev_unlock(hcon->hdev);
7464 
7465 	if (!conn) {
7466 		kfree_skb(skb);
7467 		return;
7468 	}
7469 
7470 	BT_DBG("conn %p len %u flags 0x%x", conn, skb->len, flags);
7471 
7472 	mutex_lock(&conn->lock);
7473 
7474 	switch (flags) {
7475 	case ACL_START:
7476 	case ACL_START_NO_FLUSH:
7477 	case ACL_COMPLETE:
7478 		if (conn->rx_skb) {
7479 			BT_ERR("Unexpected start frame (len %d)", skb->len);
7480 			l2cap_recv_reset(conn);
7481 			l2cap_conn_unreliable(conn, ECOMM);
7482 		}
7483 
7484 		/* Start fragment may not contain the L2CAP length so just
7485 		 * copy the initial byte when that happens and use conn->mtu as
7486 		 * expected length.
7487 		 */
7488 		if (skb->len < L2CAP_LEN_SIZE) {
7489 			l2cap_recv_frag(conn, skb, conn->mtu);
7490 			break;
7491 		}
7492 
7493 		len = get_unaligned_le16(skb->data) + L2CAP_HDR_SIZE;
7494 
7495 		if (len == skb->len) {
7496 			/* Complete frame received */
7497 			l2cap_recv_frame(conn, skb);
7498 			goto unlock;
7499 		}
7500 
7501 		BT_DBG("Start: total len %d, frag len %u", len, skb->len);
7502 
7503 		if (skb->len > len) {
7504 			BT_ERR("Frame is too long (len %u, expected len %d)",
7505 			       skb->len, len);
7506 			l2cap_conn_unreliable(conn, ECOMM);
7507 			goto drop;
7508 		}
7509 
7510 		/* Append fragment into frame (with header) */
7511 		if (l2cap_recv_frag(conn, skb, len) < 0)
7512 			goto drop;
7513 
7514 		break;
7515 
7516 	case ACL_CONT:
7517 		BT_DBG("Cont: frag len %u (expecting %u)", skb->len, conn->rx_len);
7518 
7519 		if (!conn->rx_skb) {
7520 			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7521 			l2cap_conn_unreliable(conn, ECOMM);
7522 			goto drop;
7523 		}
7524 
7525 		/* Complete the L2CAP length if it has not been read */
7526 		if (conn->rx_skb->len < L2CAP_LEN_SIZE) {
7527 			if (l2cap_recv_len(conn, skb) < 0) {
7528 				l2cap_conn_unreliable(conn, ECOMM);
7529 				goto drop;
7530 			}
7531 
7532 			/* Header still could not be read just continue */
7533 			if (conn->rx_skb->len < L2CAP_LEN_SIZE)
7534 				break;
7535 		}
7536 
7537 		if (skb->len > conn->rx_len) {
7538 			BT_ERR("Fragment is too long (len %u, expected %u)",
7539 			       skb->len, conn->rx_len);
7540 			l2cap_recv_reset(conn);
7541 			l2cap_conn_unreliable(conn, ECOMM);
7542 			goto drop;
7543 		}
7544 
7545 		/* Append fragment into frame (with header) */
7546 		l2cap_recv_frag(conn, skb, skb->len);
7547 
7548 		if (!conn->rx_len) {
7549 			/* Complete frame received. l2cap_recv_frame
7550 			 * takes ownership of the skb so set the global
7551 			 * rx_skb pointer to NULL first.
7552 			 */
7553 			struct sk_buff *rx_skb = conn->rx_skb;
7554 			conn->rx_skb = NULL;
7555 			l2cap_recv_frame(conn, rx_skb);
7556 		}
7557 		break;
7558 	}
7559 
7560 drop:
7561 	kfree_skb(skb);
7562 unlock:
7563 	mutex_unlock(&conn->lock);
7564 	l2cap_conn_put(conn);
7565 }
7566 
7567 static struct hci_cb l2cap_cb = {
7568 	.name		= "L2CAP",
7569 	.connect_cfm	= l2cap_connect_cfm,
7570 	.disconn_cfm	= l2cap_disconn_cfm,
7571 	.security_cfm	= l2cap_security_cfm,
7572 };
7573 
l2cap_debugfs_show(struct seq_file * f,void * p)7574 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7575 {
7576 	struct l2cap_chan *c;
7577 
7578 	read_lock(&chan_list_lock);
7579 
7580 	list_for_each_entry(c, &chan_list, global_l) {
7581 		seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7582 			   &c->src, c->src_type, &c->dst, c->dst_type,
7583 			   c->state, __le16_to_cpu(c->psm),
7584 			   c->scid, c->dcid, c->imtu, c->omtu,
7585 			   c->sec_level, c->mode);
7586 	}
7587 
7588 	read_unlock(&chan_list_lock);
7589 
7590 	return 0;
7591 }
7592 
7593 DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs);
7594 
7595 static struct dentry *l2cap_debugfs;
7596 
l2cap_init(void)7597 int __init l2cap_init(void)
7598 {
7599 	int err;
7600 
7601 	err = l2cap_init_sockets();
7602 	if (err < 0)
7603 		return err;
7604 
7605 	hci_register_cb(&l2cap_cb);
7606 
7607 	if (IS_ERR_OR_NULL(bt_debugfs))
7608 		return 0;
7609 
7610 	l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7611 					    NULL, &l2cap_debugfs_fops);
7612 
7613 	return 0;
7614 }
7615 
l2cap_exit(void)7616 void l2cap_exit(void)
7617 {
7618 	debugfs_remove(l2cap_debugfs);
7619 	hci_unregister_cb(&l2cap_cb);
7620 	l2cap_cleanup_sockets();
7621 }
7622 
7623 module_param(disable_ertm, bool, 0644);
7624 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
7625 
7626 module_param(enable_ecred, bool, 0644);
7627 MODULE_PARM_DESC(enable_ecred, "Enable enhanced credit flow control mode");
7628