1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <[email protected]>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <[email protected]>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27 */
28
29 /* Bluetooth L2CAP core. */
30
31 #include <linux/module.h>
32
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
36
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40
41 #include "smp.h"
42
43 #define LE_FLOWCTL_MAX_CREDITS 65535
44
45 bool disable_ertm;
46 bool enable_ecred = IS_ENABLED(CONFIG_BT_LE_L2CAP_ECRED);
47
48 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
49
50 static LIST_HEAD(chan_list);
51 static DEFINE_RWLOCK(chan_list_lock);
52
53 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
54 u8 code, u8 ident, u16 dlen, void *data);
55 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
56 void *data);
57 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
58 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
59
60 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
61 struct sk_buff_head *skbs, u8 event);
62 static void l2cap_retrans_timeout(struct work_struct *work);
63 static void l2cap_monitor_timeout(struct work_struct *work);
64 static void l2cap_ack_timeout(struct work_struct *work);
65
bdaddr_type(u8 link_type,u8 bdaddr_type)66 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
67 {
68 if (link_type == LE_LINK) {
69 if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
70 return BDADDR_LE_PUBLIC;
71 else
72 return BDADDR_LE_RANDOM;
73 }
74
75 return BDADDR_BREDR;
76 }
77
bdaddr_src_type(struct hci_conn * hcon)78 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
79 {
80 return bdaddr_type(hcon->type, hcon->src_type);
81 }
82
bdaddr_dst_type(struct hci_conn * hcon)83 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
84 {
85 return bdaddr_type(hcon->type, hcon->dst_type);
86 }
87
88 /* ---- L2CAP channels ---- */
89
__l2cap_get_chan_by_dcid(struct l2cap_conn * conn,u16 cid)90 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
91 u16 cid)
92 {
93 struct l2cap_chan *c;
94
95 list_for_each_entry(c, &conn->chan_l, list) {
96 if (c->dcid == cid)
97 return c;
98 }
99 return NULL;
100 }
101
__l2cap_get_chan_by_scid(struct l2cap_conn * conn,u16 cid)102 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
103 u16 cid)
104 {
105 struct l2cap_chan *c;
106
107 list_for_each_entry(c, &conn->chan_l, list) {
108 if (c->scid == cid)
109 return c;
110 }
111 return NULL;
112 }
113
114 /* Find channel with given SCID.
115 * Returns a reference locked channel.
116 */
l2cap_get_chan_by_scid(struct l2cap_conn * conn,u16 cid)117 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
118 u16 cid)
119 {
120 struct l2cap_chan *c;
121
122 c = __l2cap_get_chan_by_scid(conn, cid);
123 if (c) {
124 /* Only lock if chan reference is not 0 */
125 c = l2cap_chan_hold_unless_zero(c);
126 if (c)
127 l2cap_chan_lock(c);
128 }
129
130 return c;
131 }
132
133 /* Find channel with given DCID.
134 * Returns a reference locked channel.
135 */
l2cap_get_chan_by_dcid(struct l2cap_conn * conn,u16 cid)136 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
137 u16 cid)
138 {
139 struct l2cap_chan *c;
140
141 c = __l2cap_get_chan_by_dcid(conn, cid);
142 if (c) {
143 /* Only lock if chan reference is not 0 */
144 c = l2cap_chan_hold_unless_zero(c);
145 if (c)
146 l2cap_chan_lock(c);
147 }
148
149 return c;
150 }
151
__l2cap_get_chan_by_ident(struct l2cap_conn * conn,u8 ident)152 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
153 u8 ident)
154 {
155 struct l2cap_chan *c;
156
157 list_for_each_entry(c, &conn->chan_l, list) {
158 if (c->ident == ident)
159 return c;
160 }
161 return NULL;
162 }
163
__l2cap_global_chan_by_addr(__le16 psm,bdaddr_t * src,u8 src_type)164 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src,
165 u8 src_type)
166 {
167 struct l2cap_chan *c;
168
169 list_for_each_entry(c, &chan_list, global_l) {
170 if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR)
171 continue;
172
173 if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR)
174 continue;
175
176 if (c->sport == psm && !bacmp(&c->src, src))
177 return c;
178 }
179 return NULL;
180 }
181
l2cap_add_psm(struct l2cap_chan * chan,bdaddr_t * src,__le16 psm)182 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
183 {
184 int err;
185
186 write_lock(&chan_list_lock);
187
188 if (psm && __l2cap_global_chan_by_addr(psm, src, chan->src_type)) {
189 err = -EADDRINUSE;
190 goto done;
191 }
192
193 if (psm) {
194 chan->psm = psm;
195 chan->sport = psm;
196 err = 0;
197 } else {
198 u16 p, start, end, incr;
199
200 if (chan->src_type == BDADDR_BREDR) {
201 start = L2CAP_PSM_DYN_START;
202 end = L2CAP_PSM_AUTO_END;
203 incr = 2;
204 } else {
205 start = L2CAP_PSM_LE_DYN_START;
206 end = L2CAP_PSM_LE_DYN_END;
207 incr = 1;
208 }
209
210 err = -EINVAL;
211 for (p = start; p <= end; p += incr)
212 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src,
213 chan->src_type)) {
214 chan->psm = cpu_to_le16(p);
215 chan->sport = cpu_to_le16(p);
216 err = 0;
217 break;
218 }
219 }
220
221 done:
222 write_unlock(&chan_list_lock);
223 return err;
224 }
225 EXPORT_SYMBOL_GPL(l2cap_add_psm);
226
l2cap_add_scid(struct l2cap_chan * chan,__u16 scid)227 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
228 {
229 write_lock(&chan_list_lock);
230
231 /* Override the defaults (which are for conn-oriented) */
232 chan->omtu = L2CAP_DEFAULT_MTU;
233 chan->chan_type = L2CAP_CHAN_FIXED;
234
235 chan->scid = scid;
236
237 write_unlock(&chan_list_lock);
238
239 return 0;
240 }
241
l2cap_alloc_cid(struct l2cap_conn * conn)242 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
243 {
244 u16 cid, dyn_end;
245
246 if (conn->hcon->type == LE_LINK)
247 dyn_end = L2CAP_CID_LE_DYN_END;
248 else
249 dyn_end = L2CAP_CID_DYN_END;
250
251 for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
252 if (!__l2cap_get_chan_by_scid(conn, cid))
253 return cid;
254 }
255
256 return 0;
257 }
258
l2cap_state_change(struct l2cap_chan * chan,int state)259 static void l2cap_state_change(struct l2cap_chan *chan, int state)
260 {
261 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
262 state_to_string(state));
263
264 chan->state = state;
265 chan->ops->state_change(chan, state, 0);
266 }
267
l2cap_state_change_and_error(struct l2cap_chan * chan,int state,int err)268 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
269 int state, int err)
270 {
271 chan->state = state;
272 chan->ops->state_change(chan, chan->state, err);
273 }
274
l2cap_chan_set_err(struct l2cap_chan * chan,int err)275 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
276 {
277 chan->ops->state_change(chan, chan->state, err);
278 }
279
__set_retrans_timer(struct l2cap_chan * chan)280 static void __set_retrans_timer(struct l2cap_chan *chan)
281 {
282 if (!delayed_work_pending(&chan->monitor_timer) &&
283 chan->retrans_timeout) {
284 l2cap_set_timer(chan, &chan->retrans_timer,
285 msecs_to_jiffies(chan->retrans_timeout));
286 }
287 }
288
__set_monitor_timer(struct l2cap_chan * chan)289 static void __set_monitor_timer(struct l2cap_chan *chan)
290 {
291 __clear_retrans_timer(chan);
292 if (chan->monitor_timeout) {
293 l2cap_set_timer(chan, &chan->monitor_timer,
294 msecs_to_jiffies(chan->monitor_timeout));
295 }
296 }
297
l2cap_ertm_seq_in_queue(struct sk_buff_head * head,u16 seq)298 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
299 u16 seq)
300 {
301 struct sk_buff *skb;
302
303 skb_queue_walk(head, skb) {
304 if (bt_cb(skb)->l2cap.txseq == seq)
305 return skb;
306 }
307
308 return NULL;
309 }
310
311 /* ---- L2CAP sequence number lists ---- */
312
313 /* For ERTM, ordered lists of sequence numbers must be tracked for
314 * SREJ requests that are received and for frames that are to be
315 * retransmitted. These seq_list functions implement a singly-linked
316 * list in an array, where membership in the list can also be checked
317 * in constant time. Items can also be added to the tail of the list
318 * and removed from the head in constant time, without further memory
319 * allocs or frees.
320 */
321
l2cap_seq_list_init(struct l2cap_seq_list * seq_list,u16 size)322 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
323 {
324 size_t alloc_size, i;
325
326 /* Allocated size is a power of 2 to map sequence numbers
327 * (which may be up to 14 bits) in to a smaller array that is
328 * sized for the negotiated ERTM transmit windows.
329 */
330 alloc_size = roundup_pow_of_two(size);
331
332 seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
333 if (!seq_list->list)
334 return -ENOMEM;
335
336 seq_list->mask = alloc_size - 1;
337 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
338 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
339 for (i = 0; i < alloc_size; i++)
340 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
341
342 return 0;
343 }
344
l2cap_seq_list_free(struct l2cap_seq_list * seq_list)345 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
346 {
347 kfree(seq_list->list);
348 }
349
l2cap_seq_list_contains(struct l2cap_seq_list * seq_list,u16 seq)350 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
351 u16 seq)
352 {
353 /* Constant-time check for list membership */
354 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
355 }
356
l2cap_seq_list_pop(struct l2cap_seq_list * seq_list)357 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
358 {
359 u16 seq = seq_list->head;
360 u16 mask = seq_list->mask;
361
362 seq_list->head = seq_list->list[seq & mask];
363 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
364
365 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
366 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
367 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
368 }
369
370 return seq;
371 }
372
l2cap_seq_list_clear(struct l2cap_seq_list * seq_list)373 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
374 {
375 u16 i;
376
377 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
378 return;
379
380 for (i = 0; i <= seq_list->mask; i++)
381 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
382
383 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
384 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
385 }
386
l2cap_seq_list_append(struct l2cap_seq_list * seq_list,u16 seq)387 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
388 {
389 u16 mask = seq_list->mask;
390
391 /* All appends happen in constant time */
392
393 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
394 return;
395
396 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
397 seq_list->head = seq;
398 else
399 seq_list->list[seq_list->tail & mask] = seq;
400
401 seq_list->tail = seq;
402 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
403 }
404
l2cap_chan_timeout(struct work_struct * work)405 static void l2cap_chan_timeout(struct work_struct *work)
406 {
407 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
408 chan_timer.work);
409 struct l2cap_conn *conn = chan->conn;
410 int reason;
411
412 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
413
414 if (!conn)
415 return;
416
417 mutex_lock(&conn->lock);
418 /* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
419 * this work. No need to call l2cap_chan_hold(chan) here again.
420 */
421 l2cap_chan_lock(chan);
422
423 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
424 reason = ECONNREFUSED;
425 else if (chan->state == BT_CONNECT &&
426 chan->sec_level != BT_SECURITY_SDP)
427 reason = ECONNREFUSED;
428 else
429 reason = ETIMEDOUT;
430
431 l2cap_chan_close(chan, reason);
432
433 chan->ops->close(chan);
434
435 l2cap_chan_unlock(chan);
436 l2cap_chan_put(chan);
437
438 mutex_unlock(&conn->lock);
439 }
440
l2cap_chan_create(void)441 struct l2cap_chan *l2cap_chan_create(void)
442 {
443 struct l2cap_chan *chan;
444
445 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
446 if (!chan)
447 return NULL;
448
449 skb_queue_head_init(&chan->tx_q);
450 skb_queue_head_init(&chan->srej_q);
451 mutex_init(&chan->lock);
452
453 /* Set default lock nesting level */
454 atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
455
456 /* Available receive buffer space is initially unknown */
457 chan->rx_avail = -1;
458
459 write_lock(&chan_list_lock);
460 list_add(&chan->global_l, &chan_list);
461 write_unlock(&chan_list_lock);
462
463 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
464 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
465 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
466 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
467
468 chan->state = BT_OPEN;
469
470 kref_init(&chan->kref);
471
472 /* This flag is cleared in l2cap_chan_ready() */
473 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
474
475 BT_DBG("chan %p", chan);
476
477 return chan;
478 }
479 EXPORT_SYMBOL_GPL(l2cap_chan_create);
480
l2cap_chan_destroy(struct kref * kref)481 static void l2cap_chan_destroy(struct kref *kref)
482 {
483 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
484
485 BT_DBG("chan %p", chan);
486
487 write_lock(&chan_list_lock);
488 list_del(&chan->global_l);
489 write_unlock(&chan_list_lock);
490
491 kfree(chan);
492 }
493
l2cap_chan_hold(struct l2cap_chan * c)494 void l2cap_chan_hold(struct l2cap_chan *c)
495 {
496 BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
497
498 kref_get(&c->kref);
499 }
500
l2cap_chan_hold_unless_zero(struct l2cap_chan * c)501 struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c)
502 {
503 BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
504
505 if (!kref_get_unless_zero(&c->kref))
506 return NULL;
507
508 return c;
509 }
510
l2cap_chan_put(struct l2cap_chan * c)511 void l2cap_chan_put(struct l2cap_chan *c)
512 {
513 BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
514
515 kref_put(&c->kref, l2cap_chan_destroy);
516 }
517 EXPORT_SYMBOL_GPL(l2cap_chan_put);
518
l2cap_chan_set_defaults(struct l2cap_chan * chan)519 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
520 {
521 chan->fcs = L2CAP_FCS_CRC16;
522 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
523 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
524 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
525 chan->remote_max_tx = chan->max_tx;
526 chan->remote_tx_win = chan->tx_win;
527 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
528 chan->sec_level = BT_SECURITY_LOW;
529 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
530 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
531 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
532
533 chan->conf_state = 0;
534 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
535
536 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
537 }
538 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
539
l2cap_le_rx_credits(struct l2cap_chan * chan)540 static __u16 l2cap_le_rx_credits(struct l2cap_chan *chan)
541 {
542 size_t sdu_len = chan->sdu ? chan->sdu->len : 0;
543
544 if (chan->mps == 0)
545 return 0;
546
547 /* If we don't know the available space in the receiver buffer, give
548 * enough credits for a full packet.
549 */
550 if (chan->rx_avail == -1)
551 return (chan->imtu / chan->mps) + 1;
552
553 /* If we know how much space is available in the receive buffer, give
554 * out as many credits as would fill the buffer.
555 */
556 if (chan->rx_avail <= sdu_len)
557 return 0;
558
559 return DIV_ROUND_UP(chan->rx_avail - sdu_len, chan->mps);
560 }
561
l2cap_le_flowctl_init(struct l2cap_chan * chan,u16 tx_credits)562 static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
563 {
564 chan->sdu = NULL;
565 chan->sdu_last_frag = NULL;
566 chan->sdu_len = 0;
567 chan->tx_credits = tx_credits;
568 /* Derive MPS from connection MTU to stop HCI fragmentation */
569 chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
570 chan->rx_credits = l2cap_le_rx_credits(chan);
571
572 skb_queue_head_init(&chan->tx_q);
573 }
574
l2cap_ecred_init(struct l2cap_chan * chan,u16 tx_credits)575 static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits)
576 {
577 l2cap_le_flowctl_init(chan, tx_credits);
578
579 /* L2CAP implementations shall support a minimum MPS of 64 octets */
580 if (chan->mps < L2CAP_ECRED_MIN_MPS) {
581 chan->mps = L2CAP_ECRED_MIN_MPS;
582 chan->rx_credits = l2cap_le_rx_credits(chan);
583 }
584 }
585
__l2cap_chan_add(struct l2cap_conn * conn,struct l2cap_chan * chan)586 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
587 {
588 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
589 __le16_to_cpu(chan->psm), chan->dcid);
590
591 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
592
593 chan->conn = conn;
594
595 switch (chan->chan_type) {
596 case L2CAP_CHAN_CONN_ORIENTED:
597 /* Alloc CID for connection-oriented socket */
598 chan->scid = l2cap_alloc_cid(conn);
599 if (conn->hcon->type == ACL_LINK)
600 chan->omtu = L2CAP_DEFAULT_MTU;
601 break;
602
603 case L2CAP_CHAN_CONN_LESS:
604 /* Connectionless socket */
605 chan->scid = L2CAP_CID_CONN_LESS;
606 chan->dcid = L2CAP_CID_CONN_LESS;
607 chan->omtu = L2CAP_DEFAULT_MTU;
608 break;
609
610 case L2CAP_CHAN_FIXED:
611 /* Caller will set CID and CID specific MTU values */
612 break;
613
614 default:
615 /* Raw socket can send/recv signalling messages only */
616 chan->scid = L2CAP_CID_SIGNALING;
617 chan->dcid = L2CAP_CID_SIGNALING;
618 chan->omtu = L2CAP_DEFAULT_MTU;
619 }
620
621 chan->local_id = L2CAP_BESTEFFORT_ID;
622 chan->local_stype = L2CAP_SERV_BESTEFFORT;
623 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
624 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
625 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
626 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
627
628 l2cap_chan_hold(chan);
629
630 /* Only keep a reference for fixed channels if they requested it */
631 if (chan->chan_type != L2CAP_CHAN_FIXED ||
632 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
633 hci_conn_hold(conn->hcon);
634
635 /* Append to the list since the order matters for ECRED */
636 list_add_tail(&chan->list, &conn->chan_l);
637 }
638
l2cap_chan_add(struct l2cap_conn * conn,struct l2cap_chan * chan)639 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
640 {
641 mutex_lock(&conn->lock);
642 __l2cap_chan_add(conn, chan);
643 mutex_unlock(&conn->lock);
644 }
645
l2cap_chan_del(struct l2cap_chan * chan,int err)646 void l2cap_chan_del(struct l2cap_chan *chan, int err)
647 {
648 struct l2cap_conn *conn = chan->conn;
649
650 __clear_chan_timer(chan);
651
652 BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
653 state_to_string(chan->state));
654
655 chan->ops->teardown(chan, err);
656
657 if (conn) {
658 /* Delete from channel list */
659 list_del(&chan->list);
660
661 l2cap_chan_put(chan);
662
663 chan->conn = NULL;
664
665 /* Reference was only held for non-fixed channels or
666 * fixed channels that explicitly requested it using the
667 * FLAG_HOLD_HCI_CONN flag.
668 */
669 if (chan->chan_type != L2CAP_CHAN_FIXED ||
670 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
671 hci_conn_drop(conn->hcon);
672 }
673
674 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
675 return;
676
677 switch (chan->mode) {
678 case L2CAP_MODE_BASIC:
679 break;
680
681 case L2CAP_MODE_LE_FLOWCTL:
682 case L2CAP_MODE_EXT_FLOWCTL:
683 skb_queue_purge(&chan->tx_q);
684 break;
685
686 case L2CAP_MODE_ERTM:
687 __clear_retrans_timer(chan);
688 __clear_monitor_timer(chan);
689 __clear_ack_timer(chan);
690
691 skb_queue_purge(&chan->srej_q);
692
693 l2cap_seq_list_free(&chan->srej_list);
694 l2cap_seq_list_free(&chan->retrans_list);
695 fallthrough;
696
697 case L2CAP_MODE_STREAMING:
698 skb_queue_purge(&chan->tx_q);
699 break;
700 }
701 }
702 EXPORT_SYMBOL_GPL(l2cap_chan_del);
703
__l2cap_chan_list_id(struct l2cap_conn * conn,u16 id,l2cap_chan_func_t func,void * data)704 static void __l2cap_chan_list_id(struct l2cap_conn *conn, u16 id,
705 l2cap_chan_func_t func, void *data)
706 {
707 struct l2cap_chan *chan, *l;
708
709 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
710 if (chan->ident == id)
711 func(chan, data);
712 }
713 }
714
__l2cap_chan_list(struct l2cap_conn * conn,l2cap_chan_func_t func,void * data)715 static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
716 void *data)
717 {
718 struct l2cap_chan *chan;
719
720 list_for_each_entry(chan, &conn->chan_l, list) {
721 func(chan, data);
722 }
723 }
724
l2cap_chan_list(struct l2cap_conn * conn,l2cap_chan_func_t func,void * data)725 void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
726 void *data)
727 {
728 if (!conn)
729 return;
730
731 mutex_lock(&conn->lock);
732 __l2cap_chan_list(conn, func, data);
733 mutex_unlock(&conn->lock);
734 }
735
736 EXPORT_SYMBOL_GPL(l2cap_chan_list);
737
l2cap_conn_update_id_addr(struct work_struct * work)738 static void l2cap_conn_update_id_addr(struct work_struct *work)
739 {
740 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
741 id_addr_timer.work);
742 struct hci_conn *hcon = conn->hcon;
743 struct l2cap_chan *chan;
744
745 mutex_lock(&conn->lock);
746
747 list_for_each_entry(chan, &conn->chan_l, list) {
748 l2cap_chan_lock(chan);
749 bacpy(&chan->dst, &hcon->dst);
750 chan->dst_type = bdaddr_dst_type(hcon);
751 l2cap_chan_unlock(chan);
752 }
753
754 mutex_unlock(&conn->lock);
755 }
756
l2cap_chan_le_connect_reject(struct l2cap_chan * chan)757 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
758 {
759 struct l2cap_conn *conn = chan->conn;
760 struct l2cap_le_conn_rsp rsp;
761 u16 result;
762
763 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
764 result = L2CAP_CR_LE_AUTHORIZATION;
765 else
766 result = L2CAP_CR_LE_BAD_PSM;
767
768 l2cap_state_change(chan, BT_DISCONN);
769
770 rsp.dcid = cpu_to_le16(chan->scid);
771 rsp.mtu = cpu_to_le16(chan->imtu);
772 rsp.mps = cpu_to_le16(chan->mps);
773 rsp.credits = cpu_to_le16(chan->rx_credits);
774 rsp.result = cpu_to_le16(result);
775
776 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
777 &rsp);
778 }
779
l2cap_chan_ecred_connect_reject(struct l2cap_chan * chan)780 static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan)
781 {
782 l2cap_state_change(chan, BT_DISCONN);
783
784 __l2cap_ecred_conn_rsp_defer(chan);
785 }
786
l2cap_chan_connect_reject(struct l2cap_chan * chan)787 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
788 {
789 struct l2cap_conn *conn = chan->conn;
790 struct l2cap_conn_rsp rsp;
791 u16 result;
792
793 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
794 result = L2CAP_CR_SEC_BLOCK;
795 else
796 result = L2CAP_CR_BAD_PSM;
797
798 l2cap_state_change(chan, BT_DISCONN);
799
800 rsp.scid = cpu_to_le16(chan->dcid);
801 rsp.dcid = cpu_to_le16(chan->scid);
802 rsp.result = cpu_to_le16(result);
803 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
804
805 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
806 }
807
l2cap_chan_close(struct l2cap_chan * chan,int reason)808 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
809 {
810 struct l2cap_conn *conn = chan->conn;
811
812 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
813
814 switch (chan->state) {
815 case BT_LISTEN:
816 chan->ops->teardown(chan, 0);
817 break;
818
819 case BT_CONNECTED:
820 case BT_CONFIG:
821 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
822 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
823 l2cap_send_disconn_req(chan, reason);
824 } else
825 l2cap_chan_del(chan, reason);
826 break;
827
828 case BT_CONNECT2:
829 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
830 if (conn->hcon->type == ACL_LINK)
831 l2cap_chan_connect_reject(chan);
832 else if (conn->hcon->type == LE_LINK) {
833 switch (chan->mode) {
834 case L2CAP_MODE_LE_FLOWCTL:
835 l2cap_chan_le_connect_reject(chan);
836 break;
837 case L2CAP_MODE_EXT_FLOWCTL:
838 l2cap_chan_ecred_connect_reject(chan);
839 return;
840 }
841 }
842 }
843
844 l2cap_chan_del(chan, reason);
845 break;
846
847 case BT_CONNECT:
848 case BT_DISCONN:
849 l2cap_chan_del(chan, reason);
850 break;
851
852 default:
853 chan->ops->teardown(chan, 0);
854 break;
855 }
856 }
857 EXPORT_SYMBOL(l2cap_chan_close);
858
l2cap_get_auth_type(struct l2cap_chan * chan)859 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
860 {
861 switch (chan->chan_type) {
862 case L2CAP_CHAN_RAW:
863 switch (chan->sec_level) {
864 case BT_SECURITY_HIGH:
865 case BT_SECURITY_FIPS:
866 return HCI_AT_DEDICATED_BONDING_MITM;
867 case BT_SECURITY_MEDIUM:
868 return HCI_AT_DEDICATED_BONDING;
869 default:
870 return HCI_AT_NO_BONDING;
871 }
872 break;
873 case L2CAP_CHAN_CONN_LESS:
874 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
875 if (chan->sec_level == BT_SECURITY_LOW)
876 chan->sec_level = BT_SECURITY_SDP;
877 }
878 if (chan->sec_level == BT_SECURITY_HIGH ||
879 chan->sec_level == BT_SECURITY_FIPS)
880 return HCI_AT_NO_BONDING_MITM;
881 else
882 return HCI_AT_NO_BONDING;
883 break;
884 case L2CAP_CHAN_CONN_ORIENTED:
885 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
886 if (chan->sec_level == BT_SECURITY_LOW)
887 chan->sec_level = BT_SECURITY_SDP;
888
889 if (chan->sec_level == BT_SECURITY_HIGH ||
890 chan->sec_level == BT_SECURITY_FIPS)
891 return HCI_AT_NO_BONDING_MITM;
892 else
893 return HCI_AT_NO_BONDING;
894 }
895 fallthrough;
896
897 default:
898 switch (chan->sec_level) {
899 case BT_SECURITY_HIGH:
900 case BT_SECURITY_FIPS:
901 return HCI_AT_GENERAL_BONDING_MITM;
902 case BT_SECURITY_MEDIUM:
903 return HCI_AT_GENERAL_BONDING;
904 default:
905 return HCI_AT_NO_BONDING;
906 }
907 break;
908 }
909 }
910
911 /* Service level security */
l2cap_chan_check_security(struct l2cap_chan * chan,bool initiator)912 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
913 {
914 struct l2cap_conn *conn = chan->conn;
915 __u8 auth_type;
916
917 if (conn->hcon->type == LE_LINK)
918 return smp_conn_security(conn->hcon, chan->sec_level);
919
920 auth_type = l2cap_get_auth_type(chan);
921
922 return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
923 initiator);
924 }
925
l2cap_get_ident(struct l2cap_conn * conn)926 static u8 l2cap_get_ident(struct l2cap_conn *conn)
927 {
928 u8 id;
929
930 /* Get next available identificator.
931 * 1 - 128 are used by kernel.
932 * 129 - 199 are reserved.
933 * 200 - 254 are used by utilities like l2ping, etc.
934 */
935
936 mutex_lock(&conn->ident_lock);
937
938 if (++conn->tx_ident > 128)
939 conn->tx_ident = 1;
940
941 id = conn->tx_ident;
942
943 mutex_unlock(&conn->ident_lock);
944
945 return id;
946 }
947
l2cap_send_acl(struct l2cap_conn * conn,struct sk_buff * skb,u8 flags)948 static void l2cap_send_acl(struct l2cap_conn *conn, struct sk_buff *skb,
949 u8 flags)
950 {
951 /* Check if the hcon still valid before attempting to send */
952 if (hci_conn_valid(conn->hcon->hdev, conn->hcon))
953 hci_send_acl(conn->hchan, skb, flags);
954 else
955 kfree_skb(skb);
956 }
957
l2cap_send_cmd(struct l2cap_conn * conn,u8 ident,u8 code,u16 len,void * data)958 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
959 void *data)
960 {
961 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
962 u8 flags;
963
964 BT_DBG("code 0x%2.2x", code);
965
966 if (!skb)
967 return;
968
969 /* Use NO_FLUSH if supported or we have an LE link (which does
970 * not support auto-flushing packets) */
971 if (lmp_no_flush_capable(conn->hcon->hdev) ||
972 conn->hcon->type == LE_LINK)
973 flags = ACL_START_NO_FLUSH;
974 else
975 flags = ACL_START;
976
977 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
978 skb->priority = HCI_PRIO_MAX;
979
980 l2cap_send_acl(conn, skb, flags);
981 }
982
l2cap_do_send(struct l2cap_chan * chan,struct sk_buff * skb)983 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
984 {
985 struct hci_conn *hcon = chan->conn->hcon;
986 u16 flags;
987
988 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
989 skb->priority);
990
991 /* Use NO_FLUSH for LE links (where this is the only option) or
992 * if the BR/EDR link supports it and flushing has not been
993 * explicitly requested (through FLAG_FLUSHABLE).
994 */
995 if (hcon->type == LE_LINK ||
996 (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
997 lmp_no_flush_capable(hcon->hdev)))
998 flags = ACL_START_NO_FLUSH;
999 else
1000 flags = ACL_START;
1001
1002 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
1003 hci_send_acl(chan->conn->hchan, skb, flags);
1004 }
1005
__unpack_enhanced_control(u16 enh,struct l2cap_ctrl * control)1006 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
1007 {
1008 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
1009 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
1010
1011 if (enh & L2CAP_CTRL_FRAME_TYPE) {
1012 /* S-Frame */
1013 control->sframe = 1;
1014 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
1015 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
1016
1017 control->sar = 0;
1018 control->txseq = 0;
1019 } else {
1020 /* I-Frame */
1021 control->sframe = 0;
1022 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
1023 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
1024
1025 control->poll = 0;
1026 control->super = 0;
1027 }
1028 }
1029
__unpack_extended_control(u32 ext,struct l2cap_ctrl * control)1030 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
1031 {
1032 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1033 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
1034
1035 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
1036 /* S-Frame */
1037 control->sframe = 1;
1038 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
1039 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
1040
1041 control->sar = 0;
1042 control->txseq = 0;
1043 } else {
1044 /* I-Frame */
1045 control->sframe = 0;
1046 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
1047 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1048
1049 control->poll = 0;
1050 control->super = 0;
1051 }
1052 }
1053
__unpack_control(struct l2cap_chan * chan,struct sk_buff * skb)1054 static inline void __unpack_control(struct l2cap_chan *chan,
1055 struct sk_buff *skb)
1056 {
1057 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1058 __unpack_extended_control(get_unaligned_le32(skb->data),
1059 &bt_cb(skb)->l2cap);
1060 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
1061 } else {
1062 __unpack_enhanced_control(get_unaligned_le16(skb->data),
1063 &bt_cb(skb)->l2cap);
1064 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
1065 }
1066 }
1067
__pack_extended_control(struct l2cap_ctrl * control)1068 static u32 __pack_extended_control(struct l2cap_ctrl *control)
1069 {
1070 u32 packed;
1071
1072 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1073 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
1074
1075 if (control->sframe) {
1076 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
1077 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
1078 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
1079 } else {
1080 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
1081 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1082 }
1083
1084 return packed;
1085 }
1086
__pack_enhanced_control(struct l2cap_ctrl * control)1087 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1088 {
1089 u16 packed;
1090
1091 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1092 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1093
1094 if (control->sframe) {
1095 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1096 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1097 packed |= L2CAP_CTRL_FRAME_TYPE;
1098 } else {
1099 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1100 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1101 }
1102
1103 return packed;
1104 }
1105
__pack_control(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb)1106 static inline void __pack_control(struct l2cap_chan *chan,
1107 struct l2cap_ctrl *control,
1108 struct sk_buff *skb)
1109 {
1110 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1111 put_unaligned_le32(__pack_extended_control(control),
1112 skb->data + L2CAP_HDR_SIZE);
1113 } else {
1114 put_unaligned_le16(__pack_enhanced_control(control),
1115 skb->data + L2CAP_HDR_SIZE);
1116 }
1117 }
1118
__ertm_hdr_size(struct l2cap_chan * chan)1119 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1120 {
1121 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1122 return L2CAP_EXT_HDR_SIZE;
1123 else
1124 return L2CAP_ENH_HDR_SIZE;
1125 }
1126
l2cap_create_sframe_pdu(struct l2cap_chan * chan,u32 control)1127 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1128 u32 control)
1129 {
1130 struct sk_buff *skb;
1131 struct l2cap_hdr *lh;
1132 int hlen = __ertm_hdr_size(chan);
1133
1134 if (chan->fcs == L2CAP_FCS_CRC16)
1135 hlen += L2CAP_FCS_SIZE;
1136
1137 skb = bt_skb_alloc(hlen, GFP_KERNEL);
1138
1139 if (!skb)
1140 return ERR_PTR(-ENOMEM);
1141
1142 lh = skb_put(skb, L2CAP_HDR_SIZE);
1143 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1144 lh->cid = cpu_to_le16(chan->dcid);
1145
1146 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1147 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1148 else
1149 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1150
1151 if (chan->fcs == L2CAP_FCS_CRC16) {
1152 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1153 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1154 }
1155
1156 skb->priority = HCI_PRIO_MAX;
1157 return skb;
1158 }
1159
l2cap_send_sframe(struct l2cap_chan * chan,struct l2cap_ctrl * control)1160 static void l2cap_send_sframe(struct l2cap_chan *chan,
1161 struct l2cap_ctrl *control)
1162 {
1163 struct sk_buff *skb;
1164 u32 control_field;
1165
1166 BT_DBG("chan %p, control %p", chan, control);
1167
1168 if (!control->sframe)
1169 return;
1170
1171 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1172 !control->poll)
1173 control->final = 1;
1174
1175 if (control->super == L2CAP_SUPER_RR)
1176 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1177 else if (control->super == L2CAP_SUPER_RNR)
1178 set_bit(CONN_RNR_SENT, &chan->conn_state);
1179
1180 if (control->super != L2CAP_SUPER_SREJ) {
1181 chan->last_acked_seq = control->reqseq;
1182 __clear_ack_timer(chan);
1183 }
1184
1185 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1186 control->final, control->poll, control->super);
1187
1188 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1189 control_field = __pack_extended_control(control);
1190 else
1191 control_field = __pack_enhanced_control(control);
1192
1193 skb = l2cap_create_sframe_pdu(chan, control_field);
1194 if (!IS_ERR(skb))
1195 l2cap_do_send(chan, skb);
1196 }
1197
l2cap_send_rr_or_rnr(struct l2cap_chan * chan,bool poll)1198 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1199 {
1200 struct l2cap_ctrl control;
1201
1202 BT_DBG("chan %p, poll %d", chan, poll);
1203
1204 memset(&control, 0, sizeof(control));
1205 control.sframe = 1;
1206 control.poll = poll;
1207
1208 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1209 control.super = L2CAP_SUPER_RNR;
1210 else
1211 control.super = L2CAP_SUPER_RR;
1212
1213 control.reqseq = chan->buffer_seq;
1214 l2cap_send_sframe(chan, &control);
1215 }
1216
__l2cap_no_conn_pending(struct l2cap_chan * chan)1217 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1218 {
1219 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1220 return true;
1221
1222 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1223 }
1224
l2cap_send_conn_req(struct l2cap_chan * chan)1225 void l2cap_send_conn_req(struct l2cap_chan *chan)
1226 {
1227 struct l2cap_conn *conn = chan->conn;
1228 struct l2cap_conn_req req;
1229
1230 req.scid = cpu_to_le16(chan->scid);
1231 req.psm = chan->psm;
1232
1233 chan->ident = l2cap_get_ident(conn);
1234
1235 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1236
1237 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1238 }
1239
l2cap_chan_ready(struct l2cap_chan * chan)1240 static void l2cap_chan_ready(struct l2cap_chan *chan)
1241 {
1242 /* The channel may have already been flagged as connected in
1243 * case of receiving data before the L2CAP info req/rsp
1244 * procedure is complete.
1245 */
1246 if (chan->state == BT_CONNECTED)
1247 return;
1248
1249 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1250 chan->conf_state = 0;
1251 __clear_chan_timer(chan);
1252
1253 switch (chan->mode) {
1254 case L2CAP_MODE_LE_FLOWCTL:
1255 case L2CAP_MODE_EXT_FLOWCTL:
1256 if (!chan->tx_credits)
1257 chan->ops->suspend(chan);
1258 break;
1259 }
1260
1261 chan->state = BT_CONNECTED;
1262
1263 chan->ops->ready(chan);
1264 }
1265
l2cap_le_connect(struct l2cap_chan * chan)1266 static void l2cap_le_connect(struct l2cap_chan *chan)
1267 {
1268 struct l2cap_conn *conn = chan->conn;
1269 struct l2cap_le_conn_req req;
1270
1271 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1272 return;
1273
1274 if (!chan->imtu)
1275 chan->imtu = chan->conn->mtu;
1276
1277 l2cap_le_flowctl_init(chan, 0);
1278
1279 memset(&req, 0, sizeof(req));
1280 req.psm = chan->psm;
1281 req.scid = cpu_to_le16(chan->scid);
1282 req.mtu = cpu_to_le16(chan->imtu);
1283 req.mps = cpu_to_le16(chan->mps);
1284 req.credits = cpu_to_le16(chan->rx_credits);
1285
1286 chan->ident = l2cap_get_ident(conn);
1287
1288 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1289 sizeof(req), &req);
1290 }
1291
1292 struct l2cap_ecred_conn_data {
1293 struct {
1294 struct l2cap_ecred_conn_req_hdr req;
1295 __le16 scid[5];
1296 } __packed pdu;
1297 struct l2cap_chan *chan;
1298 struct pid *pid;
1299 int count;
1300 };
1301
l2cap_ecred_defer_connect(struct l2cap_chan * chan,void * data)1302 static void l2cap_ecred_defer_connect(struct l2cap_chan *chan, void *data)
1303 {
1304 struct l2cap_ecred_conn_data *conn = data;
1305 struct pid *pid;
1306
1307 if (chan == conn->chan)
1308 return;
1309
1310 if (!test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
1311 return;
1312
1313 pid = chan->ops->get_peer_pid(chan);
1314
1315 /* Only add deferred channels with the same PID/PSM */
1316 if (conn->pid != pid || chan->psm != conn->chan->psm || chan->ident ||
1317 chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
1318 return;
1319
1320 if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1321 return;
1322
1323 l2cap_ecred_init(chan, 0);
1324
1325 /* Set the same ident so we can match on the rsp */
1326 chan->ident = conn->chan->ident;
1327
1328 /* Include all channels deferred */
1329 conn->pdu.scid[conn->count] = cpu_to_le16(chan->scid);
1330
1331 conn->count++;
1332 }
1333
l2cap_ecred_connect(struct l2cap_chan * chan)1334 static void l2cap_ecred_connect(struct l2cap_chan *chan)
1335 {
1336 struct l2cap_conn *conn = chan->conn;
1337 struct l2cap_ecred_conn_data data;
1338
1339 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
1340 return;
1341
1342 if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1343 return;
1344
1345 l2cap_ecred_init(chan, 0);
1346
1347 memset(&data, 0, sizeof(data));
1348 data.pdu.req.psm = chan->psm;
1349 data.pdu.req.mtu = cpu_to_le16(chan->imtu);
1350 data.pdu.req.mps = cpu_to_le16(chan->mps);
1351 data.pdu.req.credits = cpu_to_le16(chan->rx_credits);
1352 data.pdu.scid[0] = cpu_to_le16(chan->scid);
1353
1354 chan->ident = l2cap_get_ident(conn);
1355
1356 data.count = 1;
1357 data.chan = chan;
1358 data.pid = chan->ops->get_peer_pid(chan);
1359
1360 __l2cap_chan_list(conn, l2cap_ecred_defer_connect, &data);
1361
1362 l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_CONN_REQ,
1363 sizeof(data.pdu.req) + data.count * sizeof(__le16),
1364 &data.pdu);
1365 }
1366
l2cap_le_start(struct l2cap_chan * chan)1367 static void l2cap_le_start(struct l2cap_chan *chan)
1368 {
1369 struct l2cap_conn *conn = chan->conn;
1370
1371 if (!smp_conn_security(conn->hcon, chan->sec_level))
1372 return;
1373
1374 if (!chan->psm) {
1375 l2cap_chan_ready(chan);
1376 return;
1377 }
1378
1379 if (chan->state == BT_CONNECT) {
1380 if (chan->mode == L2CAP_MODE_EXT_FLOWCTL)
1381 l2cap_ecred_connect(chan);
1382 else
1383 l2cap_le_connect(chan);
1384 }
1385 }
1386
l2cap_start_connection(struct l2cap_chan * chan)1387 static void l2cap_start_connection(struct l2cap_chan *chan)
1388 {
1389 if (chan->conn->hcon->type == LE_LINK) {
1390 l2cap_le_start(chan);
1391 } else {
1392 l2cap_send_conn_req(chan);
1393 }
1394 }
1395
l2cap_request_info(struct l2cap_conn * conn)1396 static void l2cap_request_info(struct l2cap_conn *conn)
1397 {
1398 struct l2cap_info_req req;
1399
1400 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1401 return;
1402
1403 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1404
1405 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1406 conn->info_ident = l2cap_get_ident(conn);
1407
1408 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1409
1410 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1411 sizeof(req), &req);
1412 }
1413
l2cap_check_enc_key_size(struct hci_conn * hcon)1414 static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
1415 {
1416 /* The minimum encryption key size needs to be enforced by the
1417 * host stack before establishing any L2CAP connections. The
1418 * specification in theory allows a minimum of 1, but to align
1419 * BR/EDR and LE transports, a minimum of 7 is chosen.
1420 *
1421 * This check might also be called for unencrypted connections
1422 * that have no key size requirements. Ensure that the link is
1423 * actually encrypted before enforcing a key size.
1424 */
1425 int min_key_size = hcon->hdev->min_enc_key_size;
1426
1427 /* On FIPS security level, key size must be 16 bytes */
1428 if (hcon->sec_level == BT_SECURITY_FIPS)
1429 min_key_size = 16;
1430
1431 return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1432 hcon->enc_key_size >= min_key_size);
1433 }
1434
l2cap_do_start(struct l2cap_chan * chan)1435 static void l2cap_do_start(struct l2cap_chan *chan)
1436 {
1437 struct l2cap_conn *conn = chan->conn;
1438
1439 if (conn->hcon->type == LE_LINK) {
1440 l2cap_le_start(chan);
1441 return;
1442 }
1443
1444 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1445 l2cap_request_info(conn);
1446 return;
1447 }
1448
1449 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1450 return;
1451
1452 if (!l2cap_chan_check_security(chan, true) ||
1453 !__l2cap_no_conn_pending(chan))
1454 return;
1455
1456 if (l2cap_check_enc_key_size(conn->hcon))
1457 l2cap_start_connection(chan);
1458 else
1459 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1460 }
1461
l2cap_mode_supported(__u8 mode,__u32 feat_mask)1462 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1463 {
1464 u32 local_feat_mask = l2cap_feat_mask;
1465 if (!disable_ertm)
1466 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1467
1468 switch (mode) {
1469 case L2CAP_MODE_ERTM:
1470 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1471 case L2CAP_MODE_STREAMING:
1472 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1473 default:
1474 return 0x00;
1475 }
1476 }
1477
l2cap_send_disconn_req(struct l2cap_chan * chan,int err)1478 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1479 {
1480 struct l2cap_conn *conn = chan->conn;
1481 struct l2cap_disconn_req req;
1482
1483 if (!conn)
1484 return;
1485
1486 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1487 __clear_retrans_timer(chan);
1488 __clear_monitor_timer(chan);
1489 __clear_ack_timer(chan);
1490 }
1491
1492 req.dcid = cpu_to_le16(chan->dcid);
1493 req.scid = cpu_to_le16(chan->scid);
1494 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1495 sizeof(req), &req);
1496
1497 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1498 }
1499
1500 /* ---- L2CAP connections ---- */
l2cap_conn_start(struct l2cap_conn * conn)1501 static void l2cap_conn_start(struct l2cap_conn *conn)
1502 {
1503 struct l2cap_chan *chan, *tmp;
1504
1505 BT_DBG("conn %p", conn);
1506
1507 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1508 l2cap_chan_lock(chan);
1509
1510 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1511 l2cap_chan_ready(chan);
1512 l2cap_chan_unlock(chan);
1513 continue;
1514 }
1515
1516 if (chan->state == BT_CONNECT) {
1517 if (!l2cap_chan_check_security(chan, true) ||
1518 !__l2cap_no_conn_pending(chan)) {
1519 l2cap_chan_unlock(chan);
1520 continue;
1521 }
1522
1523 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1524 && test_bit(CONF_STATE2_DEVICE,
1525 &chan->conf_state)) {
1526 l2cap_chan_close(chan, ECONNRESET);
1527 l2cap_chan_unlock(chan);
1528 continue;
1529 }
1530
1531 if (l2cap_check_enc_key_size(conn->hcon))
1532 l2cap_start_connection(chan);
1533 else
1534 l2cap_chan_close(chan, ECONNREFUSED);
1535
1536 } else if (chan->state == BT_CONNECT2) {
1537 struct l2cap_conn_rsp rsp;
1538 char buf[128];
1539 rsp.scid = cpu_to_le16(chan->dcid);
1540 rsp.dcid = cpu_to_le16(chan->scid);
1541
1542 if (l2cap_chan_check_security(chan, false)) {
1543 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1544 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1545 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1546 chan->ops->defer(chan);
1547
1548 } else {
1549 l2cap_state_change(chan, BT_CONFIG);
1550 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1551 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1552 }
1553 } else {
1554 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1555 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1556 }
1557
1558 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1559 sizeof(rsp), &rsp);
1560
1561 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1562 rsp.result != L2CAP_CR_SUCCESS) {
1563 l2cap_chan_unlock(chan);
1564 continue;
1565 }
1566
1567 set_bit(CONF_REQ_SENT, &chan->conf_state);
1568 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1569 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1570 chan->num_conf_req++;
1571 }
1572
1573 l2cap_chan_unlock(chan);
1574 }
1575 }
1576
l2cap_le_conn_ready(struct l2cap_conn * conn)1577 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1578 {
1579 struct hci_conn *hcon = conn->hcon;
1580 struct hci_dev *hdev = hcon->hdev;
1581
1582 BT_DBG("%s conn %p", hdev->name, conn);
1583
1584 /* For outgoing pairing which doesn't necessarily have an
1585 * associated socket (e.g. mgmt_pair_device).
1586 */
1587 if (hcon->out)
1588 smp_conn_security(hcon, hcon->pending_sec_level);
1589
1590 /* For LE peripheral connections, make sure the connection interval
1591 * is in the range of the minimum and maximum interval that has
1592 * been configured for this connection. If not, then trigger
1593 * the connection update procedure.
1594 */
1595 if (hcon->role == HCI_ROLE_SLAVE &&
1596 (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1597 hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1598 struct l2cap_conn_param_update_req req;
1599
1600 req.min = cpu_to_le16(hcon->le_conn_min_interval);
1601 req.max = cpu_to_le16(hcon->le_conn_max_interval);
1602 req.latency = cpu_to_le16(hcon->le_conn_latency);
1603 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1604
1605 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1606 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1607 }
1608 }
1609
l2cap_conn_ready(struct l2cap_conn * conn)1610 static void l2cap_conn_ready(struct l2cap_conn *conn)
1611 {
1612 struct l2cap_chan *chan;
1613 struct hci_conn *hcon = conn->hcon;
1614
1615 BT_DBG("conn %p", conn);
1616
1617 if (hcon->type == ACL_LINK)
1618 l2cap_request_info(conn);
1619
1620 mutex_lock(&conn->lock);
1621
1622 list_for_each_entry(chan, &conn->chan_l, list) {
1623
1624 l2cap_chan_lock(chan);
1625
1626 if (hcon->type == LE_LINK) {
1627 l2cap_le_start(chan);
1628 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1629 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1630 l2cap_chan_ready(chan);
1631 } else if (chan->state == BT_CONNECT) {
1632 l2cap_do_start(chan);
1633 }
1634
1635 l2cap_chan_unlock(chan);
1636 }
1637
1638 mutex_unlock(&conn->lock);
1639
1640 if (hcon->type == LE_LINK)
1641 l2cap_le_conn_ready(conn);
1642
1643 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1644 }
1645
1646 /* Notify sockets that we cannot guaranty reliability anymore */
l2cap_conn_unreliable(struct l2cap_conn * conn,int err)1647 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1648 {
1649 struct l2cap_chan *chan;
1650
1651 BT_DBG("conn %p", conn);
1652
1653 list_for_each_entry(chan, &conn->chan_l, list) {
1654 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1655 l2cap_chan_set_err(chan, err);
1656 }
1657 }
1658
l2cap_info_timeout(struct work_struct * work)1659 static void l2cap_info_timeout(struct work_struct *work)
1660 {
1661 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1662 info_timer.work);
1663
1664 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1665 conn->info_ident = 0;
1666
1667 mutex_lock(&conn->lock);
1668 l2cap_conn_start(conn);
1669 mutex_unlock(&conn->lock);
1670 }
1671
1672 /*
1673 * l2cap_user
1674 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1675 * callback is called during registration. The ->remove callback is called
1676 * during unregistration.
1677 * An l2cap_user object can either be explicitly unregistered or when the
1678 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1679 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1680 * External modules must own a reference to the l2cap_conn object if they intend
1681 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1682 * any time if they don't.
1683 */
1684
l2cap_register_user(struct l2cap_conn * conn,struct l2cap_user * user)1685 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1686 {
1687 struct hci_dev *hdev = conn->hcon->hdev;
1688 int ret;
1689
1690 /* We need to check whether l2cap_conn is registered. If it is not, we
1691 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1692 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1693 * relies on the parent hci_conn object to be locked. This itself relies
1694 * on the hci_dev object to be locked. So we must lock the hci device
1695 * here, too. */
1696
1697 hci_dev_lock(hdev);
1698
1699 if (!list_empty(&user->list)) {
1700 ret = -EINVAL;
1701 goto out_unlock;
1702 }
1703
1704 /* conn->hchan is NULL after l2cap_conn_del() was called */
1705 if (!conn->hchan) {
1706 ret = -ENODEV;
1707 goto out_unlock;
1708 }
1709
1710 ret = user->probe(conn, user);
1711 if (ret)
1712 goto out_unlock;
1713
1714 list_add(&user->list, &conn->users);
1715 ret = 0;
1716
1717 out_unlock:
1718 hci_dev_unlock(hdev);
1719 return ret;
1720 }
1721 EXPORT_SYMBOL(l2cap_register_user);
1722
l2cap_unregister_user(struct l2cap_conn * conn,struct l2cap_user * user)1723 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1724 {
1725 struct hci_dev *hdev = conn->hcon->hdev;
1726
1727 hci_dev_lock(hdev);
1728
1729 if (list_empty(&user->list))
1730 goto out_unlock;
1731
1732 list_del_init(&user->list);
1733 user->remove(conn, user);
1734
1735 out_unlock:
1736 hci_dev_unlock(hdev);
1737 }
1738 EXPORT_SYMBOL(l2cap_unregister_user);
1739
l2cap_unregister_all_users(struct l2cap_conn * conn)1740 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1741 {
1742 struct l2cap_user *user;
1743
1744 while (!list_empty(&conn->users)) {
1745 user = list_first_entry(&conn->users, struct l2cap_user, list);
1746 list_del_init(&user->list);
1747 user->remove(conn, user);
1748 }
1749 }
1750
l2cap_conn_del(struct hci_conn * hcon,int err)1751 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1752 {
1753 struct l2cap_conn *conn = hcon->l2cap_data;
1754 struct l2cap_chan *chan, *l;
1755
1756 if (!conn)
1757 return;
1758
1759 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1760
1761 mutex_lock(&conn->lock);
1762
1763 kfree_skb(conn->rx_skb);
1764
1765 skb_queue_purge(&conn->pending_rx);
1766
1767 /* We can not call flush_work(&conn->pending_rx_work) here since we
1768 * might block if we are running on a worker from the same workqueue
1769 * pending_rx_work is waiting on.
1770 */
1771 if (work_pending(&conn->pending_rx_work))
1772 cancel_work_sync(&conn->pending_rx_work);
1773
1774 cancel_delayed_work_sync(&conn->id_addr_timer);
1775
1776 l2cap_unregister_all_users(conn);
1777
1778 /* Force the connection to be immediately dropped */
1779 hcon->disc_timeout = 0;
1780
1781 /* Kill channels */
1782 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1783 l2cap_chan_hold(chan);
1784 l2cap_chan_lock(chan);
1785
1786 l2cap_chan_del(chan, err);
1787
1788 chan->ops->close(chan);
1789
1790 l2cap_chan_unlock(chan);
1791 l2cap_chan_put(chan);
1792 }
1793
1794 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1795 cancel_delayed_work_sync(&conn->info_timer);
1796
1797 hci_chan_del(conn->hchan);
1798 conn->hchan = NULL;
1799
1800 hcon->l2cap_data = NULL;
1801 mutex_unlock(&conn->lock);
1802 l2cap_conn_put(conn);
1803 }
1804
l2cap_conn_free(struct kref * ref)1805 static void l2cap_conn_free(struct kref *ref)
1806 {
1807 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1808
1809 hci_conn_put(conn->hcon);
1810 kfree(conn);
1811 }
1812
l2cap_conn_get(struct l2cap_conn * conn)1813 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1814 {
1815 kref_get(&conn->ref);
1816 return conn;
1817 }
1818 EXPORT_SYMBOL(l2cap_conn_get);
1819
l2cap_conn_put(struct l2cap_conn * conn)1820 void l2cap_conn_put(struct l2cap_conn *conn)
1821 {
1822 kref_put(&conn->ref, l2cap_conn_free);
1823 }
1824 EXPORT_SYMBOL(l2cap_conn_put);
1825
1826 /* ---- Socket interface ---- */
1827
1828 /* Find socket with psm and source / destination bdaddr.
1829 * Returns closest match.
1830 */
l2cap_global_chan_by_psm(int state,__le16 psm,bdaddr_t * src,bdaddr_t * dst,u8 link_type)1831 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1832 bdaddr_t *src,
1833 bdaddr_t *dst,
1834 u8 link_type)
1835 {
1836 struct l2cap_chan *c, *tmp, *c1 = NULL;
1837
1838 read_lock(&chan_list_lock);
1839
1840 list_for_each_entry_safe(c, tmp, &chan_list, global_l) {
1841 if (state && c->state != state)
1842 continue;
1843
1844 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1845 continue;
1846
1847 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1848 continue;
1849
1850 if (c->chan_type != L2CAP_CHAN_FIXED && c->psm == psm) {
1851 int src_match, dst_match;
1852 int src_any, dst_any;
1853
1854 /* Exact match. */
1855 src_match = !bacmp(&c->src, src);
1856 dst_match = !bacmp(&c->dst, dst);
1857 if (src_match && dst_match) {
1858 if (!l2cap_chan_hold_unless_zero(c))
1859 continue;
1860
1861 read_unlock(&chan_list_lock);
1862 return c;
1863 }
1864
1865 /* Closest match */
1866 src_any = !bacmp(&c->src, BDADDR_ANY);
1867 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1868 if ((src_match && dst_any) || (src_any && dst_match) ||
1869 (src_any && dst_any))
1870 c1 = c;
1871 }
1872 }
1873
1874 if (c1)
1875 c1 = l2cap_chan_hold_unless_zero(c1);
1876
1877 read_unlock(&chan_list_lock);
1878
1879 return c1;
1880 }
1881
l2cap_monitor_timeout(struct work_struct * work)1882 static void l2cap_monitor_timeout(struct work_struct *work)
1883 {
1884 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1885 monitor_timer.work);
1886
1887 BT_DBG("chan %p", chan);
1888
1889 l2cap_chan_lock(chan);
1890
1891 if (!chan->conn) {
1892 l2cap_chan_unlock(chan);
1893 l2cap_chan_put(chan);
1894 return;
1895 }
1896
1897 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1898
1899 l2cap_chan_unlock(chan);
1900 l2cap_chan_put(chan);
1901 }
1902
l2cap_retrans_timeout(struct work_struct * work)1903 static void l2cap_retrans_timeout(struct work_struct *work)
1904 {
1905 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1906 retrans_timer.work);
1907
1908 BT_DBG("chan %p", chan);
1909
1910 l2cap_chan_lock(chan);
1911
1912 if (!chan->conn) {
1913 l2cap_chan_unlock(chan);
1914 l2cap_chan_put(chan);
1915 return;
1916 }
1917
1918 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1919 l2cap_chan_unlock(chan);
1920 l2cap_chan_put(chan);
1921 }
1922
l2cap_streaming_send(struct l2cap_chan * chan,struct sk_buff_head * skbs)1923 static void l2cap_streaming_send(struct l2cap_chan *chan,
1924 struct sk_buff_head *skbs)
1925 {
1926 struct sk_buff *skb;
1927 struct l2cap_ctrl *control;
1928
1929 BT_DBG("chan %p, skbs %p", chan, skbs);
1930
1931 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1932
1933 while (!skb_queue_empty(&chan->tx_q)) {
1934
1935 skb = skb_dequeue(&chan->tx_q);
1936
1937 bt_cb(skb)->l2cap.retries = 1;
1938 control = &bt_cb(skb)->l2cap;
1939
1940 control->reqseq = 0;
1941 control->txseq = chan->next_tx_seq;
1942
1943 __pack_control(chan, control, skb);
1944
1945 if (chan->fcs == L2CAP_FCS_CRC16) {
1946 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1947 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1948 }
1949
1950 l2cap_do_send(chan, skb);
1951
1952 BT_DBG("Sent txseq %u", control->txseq);
1953
1954 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1955 chan->frames_sent++;
1956 }
1957 }
1958
l2cap_ertm_send(struct l2cap_chan * chan)1959 static int l2cap_ertm_send(struct l2cap_chan *chan)
1960 {
1961 struct sk_buff *skb, *tx_skb;
1962 struct l2cap_ctrl *control;
1963 int sent = 0;
1964
1965 BT_DBG("chan %p", chan);
1966
1967 if (chan->state != BT_CONNECTED)
1968 return -ENOTCONN;
1969
1970 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1971 return 0;
1972
1973 while (chan->tx_send_head &&
1974 chan->unacked_frames < chan->remote_tx_win &&
1975 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1976
1977 skb = chan->tx_send_head;
1978
1979 bt_cb(skb)->l2cap.retries = 1;
1980 control = &bt_cb(skb)->l2cap;
1981
1982 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1983 control->final = 1;
1984
1985 control->reqseq = chan->buffer_seq;
1986 chan->last_acked_seq = chan->buffer_seq;
1987 control->txseq = chan->next_tx_seq;
1988
1989 __pack_control(chan, control, skb);
1990
1991 if (chan->fcs == L2CAP_FCS_CRC16) {
1992 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1993 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1994 }
1995
1996 /* Clone after data has been modified. Data is assumed to be
1997 read-only (for locking purposes) on cloned sk_buffs.
1998 */
1999 tx_skb = skb_clone(skb, GFP_KERNEL);
2000
2001 if (!tx_skb)
2002 break;
2003
2004 __set_retrans_timer(chan);
2005
2006 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2007 chan->unacked_frames++;
2008 chan->frames_sent++;
2009 sent++;
2010
2011 if (skb_queue_is_last(&chan->tx_q, skb))
2012 chan->tx_send_head = NULL;
2013 else
2014 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2015
2016 l2cap_do_send(chan, tx_skb);
2017 BT_DBG("Sent txseq %u", control->txseq);
2018 }
2019
2020 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2021 chan->unacked_frames, skb_queue_len(&chan->tx_q));
2022
2023 return sent;
2024 }
2025
l2cap_ertm_resend(struct l2cap_chan * chan)2026 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2027 {
2028 struct l2cap_ctrl control;
2029 struct sk_buff *skb;
2030 struct sk_buff *tx_skb;
2031 u16 seq;
2032
2033 BT_DBG("chan %p", chan);
2034
2035 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2036 return;
2037
2038 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2039 seq = l2cap_seq_list_pop(&chan->retrans_list);
2040
2041 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2042 if (!skb) {
2043 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2044 seq);
2045 continue;
2046 }
2047
2048 bt_cb(skb)->l2cap.retries++;
2049 control = bt_cb(skb)->l2cap;
2050
2051 if (chan->max_tx != 0 &&
2052 bt_cb(skb)->l2cap.retries > chan->max_tx) {
2053 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2054 l2cap_send_disconn_req(chan, ECONNRESET);
2055 l2cap_seq_list_clear(&chan->retrans_list);
2056 break;
2057 }
2058
2059 control.reqseq = chan->buffer_seq;
2060 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2061 control.final = 1;
2062 else
2063 control.final = 0;
2064
2065 if (skb_cloned(skb)) {
2066 /* Cloned sk_buffs are read-only, so we need a
2067 * writeable copy
2068 */
2069 tx_skb = skb_copy(skb, GFP_KERNEL);
2070 } else {
2071 tx_skb = skb_clone(skb, GFP_KERNEL);
2072 }
2073
2074 if (!tx_skb) {
2075 l2cap_seq_list_clear(&chan->retrans_list);
2076 break;
2077 }
2078
2079 /* Update skb contents */
2080 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2081 put_unaligned_le32(__pack_extended_control(&control),
2082 tx_skb->data + L2CAP_HDR_SIZE);
2083 } else {
2084 put_unaligned_le16(__pack_enhanced_control(&control),
2085 tx_skb->data + L2CAP_HDR_SIZE);
2086 }
2087
2088 /* Update FCS */
2089 if (chan->fcs == L2CAP_FCS_CRC16) {
2090 u16 fcs = crc16(0, (u8 *) tx_skb->data,
2091 tx_skb->len - L2CAP_FCS_SIZE);
2092 put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2093 L2CAP_FCS_SIZE);
2094 }
2095
2096 l2cap_do_send(chan, tx_skb);
2097
2098 BT_DBG("Resent txseq %d", control.txseq);
2099
2100 chan->last_acked_seq = chan->buffer_seq;
2101 }
2102 }
2103
l2cap_retransmit(struct l2cap_chan * chan,struct l2cap_ctrl * control)2104 static void l2cap_retransmit(struct l2cap_chan *chan,
2105 struct l2cap_ctrl *control)
2106 {
2107 BT_DBG("chan %p, control %p", chan, control);
2108
2109 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2110 l2cap_ertm_resend(chan);
2111 }
2112
l2cap_retransmit_all(struct l2cap_chan * chan,struct l2cap_ctrl * control)2113 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2114 struct l2cap_ctrl *control)
2115 {
2116 struct sk_buff *skb;
2117
2118 BT_DBG("chan %p, control %p", chan, control);
2119
2120 if (control->poll)
2121 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2122
2123 l2cap_seq_list_clear(&chan->retrans_list);
2124
2125 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2126 return;
2127
2128 if (chan->unacked_frames) {
2129 skb_queue_walk(&chan->tx_q, skb) {
2130 if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2131 skb == chan->tx_send_head)
2132 break;
2133 }
2134
2135 skb_queue_walk_from(&chan->tx_q, skb) {
2136 if (skb == chan->tx_send_head)
2137 break;
2138
2139 l2cap_seq_list_append(&chan->retrans_list,
2140 bt_cb(skb)->l2cap.txseq);
2141 }
2142
2143 l2cap_ertm_resend(chan);
2144 }
2145 }
2146
l2cap_send_ack(struct l2cap_chan * chan)2147 static void l2cap_send_ack(struct l2cap_chan *chan)
2148 {
2149 struct l2cap_ctrl control;
2150 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2151 chan->last_acked_seq);
2152 int threshold;
2153
2154 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2155 chan, chan->last_acked_seq, chan->buffer_seq);
2156
2157 memset(&control, 0, sizeof(control));
2158 control.sframe = 1;
2159
2160 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2161 chan->rx_state == L2CAP_RX_STATE_RECV) {
2162 __clear_ack_timer(chan);
2163 control.super = L2CAP_SUPER_RNR;
2164 control.reqseq = chan->buffer_seq;
2165 l2cap_send_sframe(chan, &control);
2166 } else {
2167 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2168 l2cap_ertm_send(chan);
2169 /* If any i-frames were sent, they included an ack */
2170 if (chan->buffer_seq == chan->last_acked_seq)
2171 frames_to_ack = 0;
2172 }
2173
2174 /* Ack now if the window is 3/4ths full.
2175 * Calculate without mul or div
2176 */
2177 threshold = chan->ack_win;
2178 threshold += threshold << 1;
2179 threshold >>= 2;
2180
2181 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2182 threshold);
2183
2184 if (frames_to_ack >= threshold) {
2185 __clear_ack_timer(chan);
2186 control.super = L2CAP_SUPER_RR;
2187 control.reqseq = chan->buffer_seq;
2188 l2cap_send_sframe(chan, &control);
2189 frames_to_ack = 0;
2190 }
2191
2192 if (frames_to_ack)
2193 __set_ack_timer(chan);
2194 }
2195 }
2196
l2cap_skbuff_fromiovec(struct l2cap_chan * chan,struct msghdr * msg,int len,int count,struct sk_buff * skb)2197 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2198 struct msghdr *msg, int len,
2199 int count, struct sk_buff *skb)
2200 {
2201 struct l2cap_conn *conn = chan->conn;
2202 struct sk_buff **frag;
2203 int sent = 0;
2204
2205 if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2206 return -EFAULT;
2207
2208 sent += count;
2209 len -= count;
2210
2211 /* Continuation fragments (no L2CAP header) */
2212 frag = &skb_shinfo(skb)->frag_list;
2213 while (len) {
2214 struct sk_buff *tmp;
2215
2216 count = min_t(unsigned int, conn->mtu, len);
2217
2218 tmp = chan->ops->alloc_skb(chan, 0, count,
2219 msg->msg_flags & MSG_DONTWAIT);
2220 if (IS_ERR(tmp))
2221 return PTR_ERR(tmp);
2222
2223 *frag = tmp;
2224
2225 if (!copy_from_iter_full(skb_put(*frag, count), count,
2226 &msg->msg_iter))
2227 return -EFAULT;
2228
2229 sent += count;
2230 len -= count;
2231
2232 skb->len += (*frag)->len;
2233 skb->data_len += (*frag)->len;
2234
2235 frag = &(*frag)->next;
2236 }
2237
2238 return sent;
2239 }
2240
l2cap_create_connless_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2241 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2242 struct msghdr *msg, size_t len)
2243 {
2244 struct l2cap_conn *conn = chan->conn;
2245 struct sk_buff *skb;
2246 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2247 struct l2cap_hdr *lh;
2248
2249 BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2250 __le16_to_cpu(chan->psm), len);
2251
2252 count = min_t(unsigned int, (conn->mtu - hlen), len);
2253
2254 skb = chan->ops->alloc_skb(chan, hlen, count,
2255 msg->msg_flags & MSG_DONTWAIT);
2256 if (IS_ERR(skb))
2257 return skb;
2258
2259 /* Create L2CAP header */
2260 lh = skb_put(skb, L2CAP_HDR_SIZE);
2261 lh->cid = cpu_to_le16(chan->dcid);
2262 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2263 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2264
2265 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2266 if (unlikely(err < 0)) {
2267 kfree_skb(skb);
2268 return ERR_PTR(err);
2269 }
2270 return skb;
2271 }
2272
l2cap_create_basic_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2273 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2274 struct msghdr *msg, size_t len)
2275 {
2276 struct l2cap_conn *conn = chan->conn;
2277 struct sk_buff *skb;
2278 int err, count;
2279 struct l2cap_hdr *lh;
2280
2281 BT_DBG("chan %p len %zu", chan, len);
2282
2283 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2284
2285 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2286 msg->msg_flags & MSG_DONTWAIT);
2287 if (IS_ERR(skb))
2288 return skb;
2289
2290 /* Create L2CAP header */
2291 lh = skb_put(skb, L2CAP_HDR_SIZE);
2292 lh->cid = cpu_to_le16(chan->dcid);
2293 lh->len = cpu_to_le16(len);
2294
2295 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2296 if (unlikely(err < 0)) {
2297 kfree_skb(skb);
2298 return ERR_PTR(err);
2299 }
2300 return skb;
2301 }
2302
l2cap_create_iframe_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len,u16 sdulen)2303 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2304 struct msghdr *msg, size_t len,
2305 u16 sdulen)
2306 {
2307 struct l2cap_conn *conn = chan->conn;
2308 struct sk_buff *skb;
2309 int err, count, hlen;
2310 struct l2cap_hdr *lh;
2311
2312 BT_DBG("chan %p len %zu", chan, len);
2313
2314 if (!conn)
2315 return ERR_PTR(-ENOTCONN);
2316
2317 hlen = __ertm_hdr_size(chan);
2318
2319 if (sdulen)
2320 hlen += L2CAP_SDULEN_SIZE;
2321
2322 if (chan->fcs == L2CAP_FCS_CRC16)
2323 hlen += L2CAP_FCS_SIZE;
2324
2325 count = min_t(unsigned int, (conn->mtu - hlen), len);
2326
2327 skb = chan->ops->alloc_skb(chan, hlen, count,
2328 msg->msg_flags & MSG_DONTWAIT);
2329 if (IS_ERR(skb))
2330 return skb;
2331
2332 /* Create L2CAP header */
2333 lh = skb_put(skb, L2CAP_HDR_SIZE);
2334 lh->cid = cpu_to_le16(chan->dcid);
2335 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2336
2337 /* Control header is populated later */
2338 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2339 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2340 else
2341 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2342
2343 if (sdulen)
2344 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2345
2346 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2347 if (unlikely(err < 0)) {
2348 kfree_skb(skb);
2349 return ERR_PTR(err);
2350 }
2351
2352 bt_cb(skb)->l2cap.fcs = chan->fcs;
2353 bt_cb(skb)->l2cap.retries = 0;
2354 return skb;
2355 }
2356
l2cap_segment_sdu(struct l2cap_chan * chan,struct sk_buff_head * seg_queue,struct msghdr * msg,size_t len)2357 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2358 struct sk_buff_head *seg_queue,
2359 struct msghdr *msg, size_t len)
2360 {
2361 struct sk_buff *skb;
2362 u16 sdu_len;
2363 size_t pdu_len;
2364 u8 sar;
2365
2366 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2367
2368 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2369 * so fragmented skbs are not used. The HCI layer's handling
2370 * of fragmented skbs is not compatible with ERTM's queueing.
2371 */
2372
2373 /* PDU size is derived from the HCI MTU */
2374 pdu_len = chan->conn->mtu;
2375
2376 /* Constrain PDU size for BR/EDR connections */
2377 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2378
2379 /* Adjust for largest possible L2CAP overhead. */
2380 if (chan->fcs)
2381 pdu_len -= L2CAP_FCS_SIZE;
2382
2383 pdu_len -= __ertm_hdr_size(chan);
2384
2385 /* Remote device may have requested smaller PDUs */
2386 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2387
2388 if (len <= pdu_len) {
2389 sar = L2CAP_SAR_UNSEGMENTED;
2390 sdu_len = 0;
2391 pdu_len = len;
2392 } else {
2393 sar = L2CAP_SAR_START;
2394 sdu_len = len;
2395 }
2396
2397 while (len > 0) {
2398 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2399
2400 if (IS_ERR(skb)) {
2401 __skb_queue_purge(seg_queue);
2402 return PTR_ERR(skb);
2403 }
2404
2405 bt_cb(skb)->l2cap.sar = sar;
2406 __skb_queue_tail(seg_queue, skb);
2407
2408 len -= pdu_len;
2409 if (sdu_len)
2410 sdu_len = 0;
2411
2412 if (len <= pdu_len) {
2413 sar = L2CAP_SAR_END;
2414 pdu_len = len;
2415 } else {
2416 sar = L2CAP_SAR_CONTINUE;
2417 }
2418 }
2419
2420 return 0;
2421 }
2422
l2cap_create_le_flowctl_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len,u16 sdulen)2423 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2424 struct msghdr *msg,
2425 size_t len, u16 sdulen)
2426 {
2427 struct l2cap_conn *conn = chan->conn;
2428 struct sk_buff *skb;
2429 int err, count, hlen;
2430 struct l2cap_hdr *lh;
2431
2432 BT_DBG("chan %p len %zu", chan, len);
2433
2434 if (!conn)
2435 return ERR_PTR(-ENOTCONN);
2436
2437 hlen = L2CAP_HDR_SIZE;
2438
2439 if (sdulen)
2440 hlen += L2CAP_SDULEN_SIZE;
2441
2442 count = min_t(unsigned int, (conn->mtu - hlen), len);
2443
2444 skb = chan->ops->alloc_skb(chan, hlen, count,
2445 msg->msg_flags & MSG_DONTWAIT);
2446 if (IS_ERR(skb))
2447 return skb;
2448
2449 /* Create L2CAP header */
2450 lh = skb_put(skb, L2CAP_HDR_SIZE);
2451 lh->cid = cpu_to_le16(chan->dcid);
2452 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2453
2454 if (sdulen)
2455 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2456
2457 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2458 if (unlikely(err < 0)) {
2459 kfree_skb(skb);
2460 return ERR_PTR(err);
2461 }
2462
2463 return skb;
2464 }
2465
l2cap_segment_le_sdu(struct l2cap_chan * chan,struct sk_buff_head * seg_queue,struct msghdr * msg,size_t len)2466 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2467 struct sk_buff_head *seg_queue,
2468 struct msghdr *msg, size_t len)
2469 {
2470 struct sk_buff *skb;
2471 size_t pdu_len;
2472 u16 sdu_len;
2473
2474 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2475
2476 sdu_len = len;
2477 pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2478
2479 while (len > 0) {
2480 if (len <= pdu_len)
2481 pdu_len = len;
2482
2483 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2484 if (IS_ERR(skb)) {
2485 __skb_queue_purge(seg_queue);
2486 return PTR_ERR(skb);
2487 }
2488
2489 __skb_queue_tail(seg_queue, skb);
2490
2491 len -= pdu_len;
2492
2493 if (sdu_len) {
2494 sdu_len = 0;
2495 pdu_len += L2CAP_SDULEN_SIZE;
2496 }
2497 }
2498
2499 return 0;
2500 }
2501
l2cap_le_flowctl_send(struct l2cap_chan * chan)2502 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2503 {
2504 int sent = 0;
2505
2506 BT_DBG("chan %p", chan);
2507
2508 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2509 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2510 chan->tx_credits--;
2511 sent++;
2512 }
2513
2514 BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2515 skb_queue_len(&chan->tx_q));
2516 }
2517
l2cap_chan_send(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2518 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2519 {
2520 struct sk_buff *skb;
2521 int err;
2522 struct sk_buff_head seg_queue;
2523
2524 if (!chan->conn)
2525 return -ENOTCONN;
2526
2527 /* Connectionless channel */
2528 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2529 skb = l2cap_create_connless_pdu(chan, msg, len);
2530 if (IS_ERR(skb))
2531 return PTR_ERR(skb);
2532
2533 l2cap_do_send(chan, skb);
2534 return len;
2535 }
2536
2537 switch (chan->mode) {
2538 case L2CAP_MODE_LE_FLOWCTL:
2539 case L2CAP_MODE_EXT_FLOWCTL:
2540 /* Check outgoing MTU */
2541 if (len > chan->omtu)
2542 return -EMSGSIZE;
2543
2544 __skb_queue_head_init(&seg_queue);
2545
2546 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2547
2548 if (chan->state != BT_CONNECTED) {
2549 __skb_queue_purge(&seg_queue);
2550 err = -ENOTCONN;
2551 }
2552
2553 if (err)
2554 return err;
2555
2556 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2557
2558 l2cap_le_flowctl_send(chan);
2559
2560 if (!chan->tx_credits)
2561 chan->ops->suspend(chan);
2562
2563 err = len;
2564
2565 break;
2566
2567 case L2CAP_MODE_BASIC:
2568 /* Check outgoing MTU */
2569 if (len > chan->omtu)
2570 return -EMSGSIZE;
2571
2572 /* Create a basic PDU */
2573 skb = l2cap_create_basic_pdu(chan, msg, len);
2574 if (IS_ERR(skb))
2575 return PTR_ERR(skb);
2576
2577 l2cap_do_send(chan, skb);
2578 err = len;
2579 break;
2580
2581 case L2CAP_MODE_ERTM:
2582 case L2CAP_MODE_STREAMING:
2583 /* Check outgoing MTU */
2584 if (len > chan->omtu) {
2585 err = -EMSGSIZE;
2586 break;
2587 }
2588
2589 __skb_queue_head_init(&seg_queue);
2590
2591 /* Do segmentation before calling in to the state machine,
2592 * since it's possible to block while waiting for memory
2593 * allocation.
2594 */
2595 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2596
2597 if (err)
2598 break;
2599
2600 if (chan->mode == L2CAP_MODE_ERTM)
2601 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2602 else
2603 l2cap_streaming_send(chan, &seg_queue);
2604
2605 err = len;
2606
2607 /* If the skbs were not queued for sending, they'll still be in
2608 * seg_queue and need to be purged.
2609 */
2610 __skb_queue_purge(&seg_queue);
2611 break;
2612
2613 default:
2614 BT_DBG("bad state %1.1x", chan->mode);
2615 err = -EBADFD;
2616 }
2617
2618 return err;
2619 }
2620 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2621
l2cap_send_srej(struct l2cap_chan * chan,u16 txseq)2622 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2623 {
2624 struct l2cap_ctrl control;
2625 u16 seq;
2626
2627 BT_DBG("chan %p, txseq %u", chan, txseq);
2628
2629 memset(&control, 0, sizeof(control));
2630 control.sframe = 1;
2631 control.super = L2CAP_SUPER_SREJ;
2632
2633 for (seq = chan->expected_tx_seq; seq != txseq;
2634 seq = __next_seq(chan, seq)) {
2635 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2636 control.reqseq = seq;
2637 l2cap_send_sframe(chan, &control);
2638 l2cap_seq_list_append(&chan->srej_list, seq);
2639 }
2640 }
2641
2642 chan->expected_tx_seq = __next_seq(chan, txseq);
2643 }
2644
l2cap_send_srej_tail(struct l2cap_chan * chan)2645 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2646 {
2647 struct l2cap_ctrl control;
2648
2649 BT_DBG("chan %p", chan);
2650
2651 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2652 return;
2653
2654 memset(&control, 0, sizeof(control));
2655 control.sframe = 1;
2656 control.super = L2CAP_SUPER_SREJ;
2657 control.reqseq = chan->srej_list.tail;
2658 l2cap_send_sframe(chan, &control);
2659 }
2660
l2cap_send_srej_list(struct l2cap_chan * chan,u16 txseq)2661 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2662 {
2663 struct l2cap_ctrl control;
2664 u16 initial_head;
2665 u16 seq;
2666
2667 BT_DBG("chan %p, txseq %u", chan, txseq);
2668
2669 memset(&control, 0, sizeof(control));
2670 control.sframe = 1;
2671 control.super = L2CAP_SUPER_SREJ;
2672
2673 /* Capture initial list head to allow only one pass through the list. */
2674 initial_head = chan->srej_list.head;
2675
2676 do {
2677 seq = l2cap_seq_list_pop(&chan->srej_list);
2678 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2679 break;
2680
2681 control.reqseq = seq;
2682 l2cap_send_sframe(chan, &control);
2683 l2cap_seq_list_append(&chan->srej_list, seq);
2684 } while (chan->srej_list.head != initial_head);
2685 }
2686
l2cap_process_reqseq(struct l2cap_chan * chan,u16 reqseq)2687 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2688 {
2689 struct sk_buff *acked_skb;
2690 u16 ackseq;
2691
2692 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2693
2694 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2695 return;
2696
2697 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2698 chan->expected_ack_seq, chan->unacked_frames);
2699
2700 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2701 ackseq = __next_seq(chan, ackseq)) {
2702
2703 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2704 if (acked_skb) {
2705 skb_unlink(acked_skb, &chan->tx_q);
2706 kfree_skb(acked_skb);
2707 chan->unacked_frames--;
2708 }
2709 }
2710
2711 chan->expected_ack_seq = reqseq;
2712
2713 if (chan->unacked_frames == 0)
2714 __clear_retrans_timer(chan);
2715
2716 BT_DBG("unacked_frames %u", chan->unacked_frames);
2717 }
2718
l2cap_abort_rx_srej_sent(struct l2cap_chan * chan)2719 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2720 {
2721 BT_DBG("chan %p", chan);
2722
2723 chan->expected_tx_seq = chan->buffer_seq;
2724 l2cap_seq_list_clear(&chan->srej_list);
2725 skb_queue_purge(&chan->srej_q);
2726 chan->rx_state = L2CAP_RX_STATE_RECV;
2727 }
2728
l2cap_tx_state_xmit(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2729 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2730 struct l2cap_ctrl *control,
2731 struct sk_buff_head *skbs, u8 event)
2732 {
2733 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2734 event);
2735
2736 switch (event) {
2737 case L2CAP_EV_DATA_REQUEST:
2738 if (chan->tx_send_head == NULL)
2739 chan->tx_send_head = skb_peek(skbs);
2740
2741 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2742 l2cap_ertm_send(chan);
2743 break;
2744 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2745 BT_DBG("Enter LOCAL_BUSY");
2746 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2747
2748 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2749 /* The SREJ_SENT state must be aborted if we are to
2750 * enter the LOCAL_BUSY state.
2751 */
2752 l2cap_abort_rx_srej_sent(chan);
2753 }
2754
2755 l2cap_send_ack(chan);
2756
2757 break;
2758 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2759 BT_DBG("Exit LOCAL_BUSY");
2760 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2761
2762 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2763 struct l2cap_ctrl local_control;
2764
2765 memset(&local_control, 0, sizeof(local_control));
2766 local_control.sframe = 1;
2767 local_control.super = L2CAP_SUPER_RR;
2768 local_control.poll = 1;
2769 local_control.reqseq = chan->buffer_seq;
2770 l2cap_send_sframe(chan, &local_control);
2771
2772 chan->retry_count = 1;
2773 __set_monitor_timer(chan);
2774 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2775 }
2776 break;
2777 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2778 l2cap_process_reqseq(chan, control->reqseq);
2779 break;
2780 case L2CAP_EV_EXPLICIT_POLL:
2781 l2cap_send_rr_or_rnr(chan, 1);
2782 chan->retry_count = 1;
2783 __set_monitor_timer(chan);
2784 __clear_ack_timer(chan);
2785 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2786 break;
2787 case L2CAP_EV_RETRANS_TO:
2788 l2cap_send_rr_or_rnr(chan, 1);
2789 chan->retry_count = 1;
2790 __set_monitor_timer(chan);
2791 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2792 break;
2793 case L2CAP_EV_RECV_FBIT:
2794 /* Nothing to process */
2795 break;
2796 default:
2797 break;
2798 }
2799 }
2800
l2cap_tx_state_wait_f(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2801 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2802 struct l2cap_ctrl *control,
2803 struct sk_buff_head *skbs, u8 event)
2804 {
2805 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2806 event);
2807
2808 switch (event) {
2809 case L2CAP_EV_DATA_REQUEST:
2810 if (chan->tx_send_head == NULL)
2811 chan->tx_send_head = skb_peek(skbs);
2812 /* Queue data, but don't send. */
2813 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2814 break;
2815 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2816 BT_DBG("Enter LOCAL_BUSY");
2817 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2818
2819 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2820 /* The SREJ_SENT state must be aborted if we are to
2821 * enter the LOCAL_BUSY state.
2822 */
2823 l2cap_abort_rx_srej_sent(chan);
2824 }
2825
2826 l2cap_send_ack(chan);
2827
2828 break;
2829 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2830 BT_DBG("Exit LOCAL_BUSY");
2831 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2832
2833 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2834 struct l2cap_ctrl local_control;
2835 memset(&local_control, 0, sizeof(local_control));
2836 local_control.sframe = 1;
2837 local_control.super = L2CAP_SUPER_RR;
2838 local_control.poll = 1;
2839 local_control.reqseq = chan->buffer_seq;
2840 l2cap_send_sframe(chan, &local_control);
2841
2842 chan->retry_count = 1;
2843 __set_monitor_timer(chan);
2844 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2845 }
2846 break;
2847 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2848 l2cap_process_reqseq(chan, control->reqseq);
2849 fallthrough;
2850
2851 case L2CAP_EV_RECV_FBIT:
2852 if (control && control->final) {
2853 __clear_monitor_timer(chan);
2854 if (chan->unacked_frames > 0)
2855 __set_retrans_timer(chan);
2856 chan->retry_count = 0;
2857 chan->tx_state = L2CAP_TX_STATE_XMIT;
2858 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2859 }
2860 break;
2861 case L2CAP_EV_EXPLICIT_POLL:
2862 /* Ignore */
2863 break;
2864 case L2CAP_EV_MONITOR_TO:
2865 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2866 l2cap_send_rr_or_rnr(chan, 1);
2867 __set_monitor_timer(chan);
2868 chan->retry_count++;
2869 } else {
2870 l2cap_send_disconn_req(chan, ECONNABORTED);
2871 }
2872 break;
2873 default:
2874 break;
2875 }
2876 }
2877
l2cap_tx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2878 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2879 struct sk_buff_head *skbs, u8 event)
2880 {
2881 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2882 chan, control, skbs, event, chan->tx_state);
2883
2884 switch (chan->tx_state) {
2885 case L2CAP_TX_STATE_XMIT:
2886 l2cap_tx_state_xmit(chan, control, skbs, event);
2887 break;
2888 case L2CAP_TX_STATE_WAIT_F:
2889 l2cap_tx_state_wait_f(chan, control, skbs, event);
2890 break;
2891 default:
2892 /* Ignore event */
2893 break;
2894 }
2895 }
2896
l2cap_pass_to_tx(struct l2cap_chan * chan,struct l2cap_ctrl * control)2897 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2898 struct l2cap_ctrl *control)
2899 {
2900 BT_DBG("chan %p, control %p", chan, control);
2901 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2902 }
2903
l2cap_pass_to_tx_fbit(struct l2cap_chan * chan,struct l2cap_ctrl * control)2904 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2905 struct l2cap_ctrl *control)
2906 {
2907 BT_DBG("chan %p, control %p", chan, control);
2908 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2909 }
2910
2911 /* Copy frame to all raw sockets on that connection */
l2cap_raw_recv(struct l2cap_conn * conn,struct sk_buff * skb)2912 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2913 {
2914 struct sk_buff *nskb;
2915 struct l2cap_chan *chan;
2916
2917 BT_DBG("conn %p", conn);
2918
2919 list_for_each_entry(chan, &conn->chan_l, list) {
2920 if (chan->chan_type != L2CAP_CHAN_RAW)
2921 continue;
2922
2923 /* Don't send frame to the channel it came from */
2924 if (bt_cb(skb)->l2cap.chan == chan)
2925 continue;
2926
2927 nskb = skb_clone(skb, GFP_KERNEL);
2928 if (!nskb)
2929 continue;
2930 if (chan->ops->recv(chan, nskb))
2931 kfree_skb(nskb);
2932 }
2933 }
2934
2935 /* ---- L2CAP signalling commands ---- */
l2cap_build_cmd(struct l2cap_conn * conn,u8 code,u8 ident,u16 dlen,void * data)2936 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2937 u8 ident, u16 dlen, void *data)
2938 {
2939 struct sk_buff *skb, **frag;
2940 struct l2cap_cmd_hdr *cmd;
2941 struct l2cap_hdr *lh;
2942 int len, count;
2943
2944 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2945 conn, code, ident, dlen);
2946
2947 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2948 return NULL;
2949
2950 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2951 count = min_t(unsigned int, conn->mtu, len);
2952
2953 skb = bt_skb_alloc(count, GFP_KERNEL);
2954 if (!skb)
2955 return NULL;
2956
2957 lh = skb_put(skb, L2CAP_HDR_SIZE);
2958 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2959
2960 if (conn->hcon->type == LE_LINK)
2961 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2962 else
2963 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2964
2965 cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
2966 cmd->code = code;
2967 cmd->ident = ident;
2968 cmd->len = cpu_to_le16(dlen);
2969
2970 if (dlen) {
2971 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2972 skb_put_data(skb, data, count);
2973 data += count;
2974 }
2975
2976 len -= skb->len;
2977
2978 /* Continuation fragments (no L2CAP header) */
2979 frag = &skb_shinfo(skb)->frag_list;
2980 while (len) {
2981 count = min_t(unsigned int, conn->mtu, len);
2982
2983 *frag = bt_skb_alloc(count, GFP_KERNEL);
2984 if (!*frag)
2985 goto fail;
2986
2987 skb_put_data(*frag, data, count);
2988
2989 len -= count;
2990 data += count;
2991
2992 frag = &(*frag)->next;
2993 }
2994
2995 return skb;
2996
2997 fail:
2998 kfree_skb(skb);
2999 return NULL;
3000 }
3001
l2cap_get_conf_opt(void ** ptr,int * type,int * olen,unsigned long * val)3002 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
3003 unsigned long *val)
3004 {
3005 struct l2cap_conf_opt *opt = *ptr;
3006 int len;
3007
3008 len = L2CAP_CONF_OPT_SIZE + opt->len;
3009 *ptr += len;
3010
3011 *type = opt->type;
3012 *olen = opt->len;
3013
3014 switch (opt->len) {
3015 case 1:
3016 *val = *((u8 *) opt->val);
3017 break;
3018
3019 case 2:
3020 *val = get_unaligned_le16(opt->val);
3021 break;
3022
3023 case 4:
3024 *val = get_unaligned_le32(opt->val);
3025 break;
3026
3027 default:
3028 *val = (unsigned long) opt->val;
3029 break;
3030 }
3031
3032 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3033 return len;
3034 }
3035
l2cap_add_conf_opt(void ** ptr,u8 type,u8 len,unsigned long val,size_t size)3036 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3037 {
3038 struct l2cap_conf_opt *opt = *ptr;
3039
3040 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3041
3042 if (size < L2CAP_CONF_OPT_SIZE + len)
3043 return;
3044
3045 opt->type = type;
3046 opt->len = len;
3047
3048 switch (len) {
3049 case 1:
3050 *((u8 *) opt->val) = val;
3051 break;
3052
3053 case 2:
3054 put_unaligned_le16(val, opt->val);
3055 break;
3056
3057 case 4:
3058 put_unaligned_le32(val, opt->val);
3059 break;
3060
3061 default:
3062 memcpy(opt->val, (void *) val, len);
3063 break;
3064 }
3065
3066 *ptr += L2CAP_CONF_OPT_SIZE + len;
3067 }
3068
l2cap_add_opt_efs(void ** ptr,struct l2cap_chan * chan,size_t size)3069 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3070 {
3071 struct l2cap_conf_efs efs;
3072
3073 switch (chan->mode) {
3074 case L2CAP_MODE_ERTM:
3075 efs.id = chan->local_id;
3076 efs.stype = chan->local_stype;
3077 efs.msdu = cpu_to_le16(chan->local_msdu);
3078 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3079 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3080 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3081 break;
3082
3083 case L2CAP_MODE_STREAMING:
3084 efs.id = 1;
3085 efs.stype = L2CAP_SERV_BESTEFFORT;
3086 efs.msdu = cpu_to_le16(chan->local_msdu);
3087 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3088 efs.acc_lat = 0;
3089 efs.flush_to = 0;
3090 break;
3091
3092 default:
3093 return;
3094 }
3095
3096 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3097 (unsigned long) &efs, size);
3098 }
3099
l2cap_ack_timeout(struct work_struct * work)3100 static void l2cap_ack_timeout(struct work_struct *work)
3101 {
3102 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3103 ack_timer.work);
3104 u16 frames_to_ack;
3105
3106 BT_DBG("chan %p", chan);
3107
3108 l2cap_chan_lock(chan);
3109
3110 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3111 chan->last_acked_seq);
3112
3113 if (frames_to_ack)
3114 l2cap_send_rr_or_rnr(chan, 0);
3115
3116 l2cap_chan_unlock(chan);
3117 l2cap_chan_put(chan);
3118 }
3119
l2cap_ertm_init(struct l2cap_chan * chan)3120 int l2cap_ertm_init(struct l2cap_chan *chan)
3121 {
3122 int err;
3123
3124 chan->next_tx_seq = 0;
3125 chan->expected_tx_seq = 0;
3126 chan->expected_ack_seq = 0;
3127 chan->unacked_frames = 0;
3128 chan->buffer_seq = 0;
3129 chan->frames_sent = 0;
3130 chan->last_acked_seq = 0;
3131 chan->sdu = NULL;
3132 chan->sdu_last_frag = NULL;
3133 chan->sdu_len = 0;
3134
3135 skb_queue_head_init(&chan->tx_q);
3136
3137 if (chan->mode != L2CAP_MODE_ERTM)
3138 return 0;
3139
3140 chan->rx_state = L2CAP_RX_STATE_RECV;
3141 chan->tx_state = L2CAP_TX_STATE_XMIT;
3142
3143 skb_queue_head_init(&chan->srej_q);
3144
3145 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3146 if (err < 0)
3147 return err;
3148
3149 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3150 if (err < 0)
3151 l2cap_seq_list_free(&chan->srej_list);
3152
3153 return err;
3154 }
3155
l2cap_select_mode(__u8 mode,__u16 remote_feat_mask)3156 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3157 {
3158 switch (mode) {
3159 case L2CAP_MODE_STREAMING:
3160 case L2CAP_MODE_ERTM:
3161 if (l2cap_mode_supported(mode, remote_feat_mask))
3162 return mode;
3163 fallthrough;
3164 default:
3165 return L2CAP_MODE_BASIC;
3166 }
3167 }
3168
__l2cap_ews_supported(struct l2cap_conn * conn)3169 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3170 {
3171 return (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW);
3172 }
3173
__l2cap_efs_supported(struct l2cap_conn * conn)3174 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3175 {
3176 return (conn->feat_mask & L2CAP_FEAT_EXT_FLOW);
3177 }
3178
__l2cap_set_ertm_timeouts(struct l2cap_chan * chan,struct l2cap_conf_rfc * rfc)3179 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3180 struct l2cap_conf_rfc *rfc)
3181 {
3182 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3183 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3184 }
3185
l2cap_txwin_setup(struct l2cap_chan * chan)3186 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3187 {
3188 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3189 __l2cap_ews_supported(chan->conn)) {
3190 /* use extended control field */
3191 set_bit(FLAG_EXT_CTRL, &chan->flags);
3192 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3193 } else {
3194 chan->tx_win = min_t(u16, chan->tx_win,
3195 L2CAP_DEFAULT_TX_WINDOW);
3196 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3197 }
3198 chan->ack_win = chan->tx_win;
3199 }
3200
l2cap_mtu_auto(struct l2cap_chan * chan)3201 static void l2cap_mtu_auto(struct l2cap_chan *chan)
3202 {
3203 struct hci_conn *conn = chan->conn->hcon;
3204
3205 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3206
3207 /* The 2-DH1 packet has between 2 and 56 information bytes
3208 * (including the 2-byte payload header)
3209 */
3210 if (!(conn->pkt_type & HCI_2DH1))
3211 chan->imtu = 54;
3212
3213 /* The 3-DH1 packet has between 2 and 85 information bytes
3214 * (including the 2-byte payload header)
3215 */
3216 if (!(conn->pkt_type & HCI_3DH1))
3217 chan->imtu = 83;
3218
3219 /* The 2-DH3 packet has between 2 and 369 information bytes
3220 * (including the 2-byte payload header)
3221 */
3222 if (!(conn->pkt_type & HCI_2DH3))
3223 chan->imtu = 367;
3224
3225 /* The 3-DH3 packet has between 2 and 554 information bytes
3226 * (including the 2-byte payload header)
3227 */
3228 if (!(conn->pkt_type & HCI_3DH3))
3229 chan->imtu = 552;
3230
3231 /* The 2-DH5 packet has between 2 and 681 information bytes
3232 * (including the 2-byte payload header)
3233 */
3234 if (!(conn->pkt_type & HCI_2DH5))
3235 chan->imtu = 679;
3236
3237 /* The 3-DH5 packet has between 2 and 1023 information bytes
3238 * (including the 2-byte payload header)
3239 */
3240 if (!(conn->pkt_type & HCI_3DH5))
3241 chan->imtu = 1021;
3242 }
3243
l2cap_build_conf_req(struct l2cap_chan * chan,void * data,size_t data_size)3244 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3245 {
3246 struct l2cap_conf_req *req = data;
3247 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3248 void *ptr = req->data;
3249 void *endptr = data + data_size;
3250 u16 size;
3251
3252 BT_DBG("chan %p", chan);
3253
3254 if (chan->num_conf_req || chan->num_conf_rsp)
3255 goto done;
3256
3257 switch (chan->mode) {
3258 case L2CAP_MODE_STREAMING:
3259 case L2CAP_MODE_ERTM:
3260 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3261 break;
3262
3263 if (__l2cap_efs_supported(chan->conn))
3264 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3265
3266 fallthrough;
3267 default:
3268 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3269 break;
3270 }
3271
3272 done:
3273 if (chan->imtu != L2CAP_DEFAULT_MTU) {
3274 if (!chan->imtu)
3275 l2cap_mtu_auto(chan);
3276 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3277 endptr - ptr);
3278 }
3279
3280 switch (chan->mode) {
3281 case L2CAP_MODE_BASIC:
3282 if (disable_ertm)
3283 break;
3284
3285 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3286 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3287 break;
3288
3289 rfc.mode = L2CAP_MODE_BASIC;
3290 rfc.txwin_size = 0;
3291 rfc.max_transmit = 0;
3292 rfc.retrans_timeout = 0;
3293 rfc.monitor_timeout = 0;
3294 rfc.max_pdu_size = 0;
3295
3296 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3297 (unsigned long) &rfc, endptr - ptr);
3298 break;
3299
3300 case L2CAP_MODE_ERTM:
3301 rfc.mode = L2CAP_MODE_ERTM;
3302 rfc.max_transmit = chan->max_tx;
3303
3304 __l2cap_set_ertm_timeouts(chan, &rfc);
3305
3306 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3307 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3308 L2CAP_FCS_SIZE);
3309 rfc.max_pdu_size = cpu_to_le16(size);
3310
3311 l2cap_txwin_setup(chan);
3312
3313 rfc.txwin_size = min_t(u16, chan->tx_win,
3314 L2CAP_DEFAULT_TX_WINDOW);
3315
3316 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3317 (unsigned long) &rfc, endptr - ptr);
3318
3319 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3320 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3321
3322 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3323 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3324 chan->tx_win, endptr - ptr);
3325
3326 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3327 if (chan->fcs == L2CAP_FCS_NONE ||
3328 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3329 chan->fcs = L2CAP_FCS_NONE;
3330 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3331 chan->fcs, endptr - ptr);
3332 }
3333 break;
3334
3335 case L2CAP_MODE_STREAMING:
3336 l2cap_txwin_setup(chan);
3337 rfc.mode = L2CAP_MODE_STREAMING;
3338 rfc.txwin_size = 0;
3339 rfc.max_transmit = 0;
3340 rfc.retrans_timeout = 0;
3341 rfc.monitor_timeout = 0;
3342
3343 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3344 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3345 L2CAP_FCS_SIZE);
3346 rfc.max_pdu_size = cpu_to_le16(size);
3347
3348 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3349 (unsigned long) &rfc, endptr - ptr);
3350
3351 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3352 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3353
3354 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3355 if (chan->fcs == L2CAP_FCS_NONE ||
3356 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3357 chan->fcs = L2CAP_FCS_NONE;
3358 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3359 chan->fcs, endptr - ptr);
3360 }
3361 break;
3362 }
3363
3364 req->dcid = cpu_to_le16(chan->dcid);
3365 req->flags = cpu_to_le16(0);
3366
3367 return ptr - data;
3368 }
3369
l2cap_parse_conf_req(struct l2cap_chan * chan,void * data,size_t data_size)3370 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3371 {
3372 struct l2cap_conf_rsp *rsp = data;
3373 void *ptr = rsp->data;
3374 void *endptr = data + data_size;
3375 void *req = chan->conf_req;
3376 int len = chan->conf_len;
3377 int type, hint, olen;
3378 unsigned long val;
3379 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3380 struct l2cap_conf_efs efs;
3381 u8 remote_efs = 0;
3382 u16 mtu = L2CAP_DEFAULT_MTU;
3383 u16 result = L2CAP_CONF_SUCCESS;
3384 u16 size;
3385
3386 BT_DBG("chan %p", chan);
3387
3388 while (len >= L2CAP_CONF_OPT_SIZE) {
3389 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3390 if (len < 0)
3391 break;
3392
3393 hint = type & L2CAP_CONF_HINT;
3394 type &= L2CAP_CONF_MASK;
3395
3396 switch (type) {
3397 case L2CAP_CONF_MTU:
3398 if (olen != 2)
3399 break;
3400 mtu = val;
3401 break;
3402
3403 case L2CAP_CONF_FLUSH_TO:
3404 if (olen != 2)
3405 break;
3406 chan->flush_to = val;
3407 break;
3408
3409 case L2CAP_CONF_QOS:
3410 break;
3411
3412 case L2CAP_CONF_RFC:
3413 if (olen != sizeof(rfc))
3414 break;
3415 memcpy(&rfc, (void *) val, olen);
3416 break;
3417
3418 case L2CAP_CONF_FCS:
3419 if (olen != 1)
3420 break;
3421 if (val == L2CAP_FCS_NONE)
3422 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3423 break;
3424
3425 case L2CAP_CONF_EFS:
3426 if (olen != sizeof(efs))
3427 break;
3428 remote_efs = 1;
3429 memcpy(&efs, (void *) val, olen);
3430 break;
3431
3432 case L2CAP_CONF_EWS:
3433 if (olen != 2)
3434 break;
3435 return -ECONNREFUSED;
3436
3437 default:
3438 if (hint)
3439 break;
3440 result = L2CAP_CONF_UNKNOWN;
3441 l2cap_add_conf_opt(&ptr, (u8)type, sizeof(u8), type, endptr - ptr);
3442 break;
3443 }
3444 }
3445
3446 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3447 goto done;
3448
3449 switch (chan->mode) {
3450 case L2CAP_MODE_STREAMING:
3451 case L2CAP_MODE_ERTM:
3452 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3453 chan->mode = l2cap_select_mode(rfc.mode,
3454 chan->conn->feat_mask);
3455 break;
3456 }
3457
3458 if (remote_efs) {
3459 if (__l2cap_efs_supported(chan->conn))
3460 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3461 else
3462 return -ECONNREFUSED;
3463 }
3464
3465 if (chan->mode != rfc.mode)
3466 return -ECONNREFUSED;
3467
3468 break;
3469 }
3470
3471 done:
3472 if (chan->mode != rfc.mode) {
3473 result = L2CAP_CONF_UNACCEPT;
3474 rfc.mode = chan->mode;
3475
3476 if (chan->num_conf_rsp == 1)
3477 return -ECONNREFUSED;
3478
3479 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3480 (unsigned long) &rfc, endptr - ptr);
3481 }
3482
3483 if (result == L2CAP_CONF_SUCCESS) {
3484 /* Configure output options and let the other side know
3485 * which ones we don't like. */
3486
3487 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3488 result = L2CAP_CONF_UNACCEPT;
3489 else {
3490 chan->omtu = mtu;
3491 set_bit(CONF_MTU_DONE, &chan->conf_state);
3492 }
3493 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3494
3495 if (remote_efs) {
3496 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3497 efs.stype != L2CAP_SERV_NOTRAFIC &&
3498 efs.stype != chan->local_stype) {
3499
3500 result = L2CAP_CONF_UNACCEPT;
3501
3502 if (chan->num_conf_req >= 1)
3503 return -ECONNREFUSED;
3504
3505 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3506 sizeof(efs),
3507 (unsigned long) &efs, endptr - ptr);
3508 } else {
3509 /* Send PENDING Conf Rsp */
3510 result = L2CAP_CONF_PENDING;
3511 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3512 }
3513 }
3514
3515 switch (rfc.mode) {
3516 case L2CAP_MODE_BASIC:
3517 chan->fcs = L2CAP_FCS_NONE;
3518 set_bit(CONF_MODE_DONE, &chan->conf_state);
3519 break;
3520
3521 case L2CAP_MODE_ERTM:
3522 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3523 chan->remote_tx_win = rfc.txwin_size;
3524 else
3525 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3526
3527 chan->remote_max_tx = rfc.max_transmit;
3528
3529 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3530 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3531 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3532 rfc.max_pdu_size = cpu_to_le16(size);
3533 chan->remote_mps = size;
3534
3535 __l2cap_set_ertm_timeouts(chan, &rfc);
3536
3537 set_bit(CONF_MODE_DONE, &chan->conf_state);
3538
3539 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3540 sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3541
3542 if (remote_efs &&
3543 test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3544 chan->remote_id = efs.id;
3545 chan->remote_stype = efs.stype;
3546 chan->remote_msdu = le16_to_cpu(efs.msdu);
3547 chan->remote_flush_to =
3548 le32_to_cpu(efs.flush_to);
3549 chan->remote_acc_lat =
3550 le32_to_cpu(efs.acc_lat);
3551 chan->remote_sdu_itime =
3552 le32_to_cpu(efs.sdu_itime);
3553 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3554 sizeof(efs),
3555 (unsigned long) &efs, endptr - ptr);
3556 }
3557 break;
3558
3559 case L2CAP_MODE_STREAMING:
3560 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3561 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3562 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3563 rfc.max_pdu_size = cpu_to_le16(size);
3564 chan->remote_mps = size;
3565
3566 set_bit(CONF_MODE_DONE, &chan->conf_state);
3567
3568 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3569 (unsigned long) &rfc, endptr - ptr);
3570
3571 break;
3572
3573 default:
3574 result = L2CAP_CONF_UNACCEPT;
3575
3576 memset(&rfc, 0, sizeof(rfc));
3577 rfc.mode = chan->mode;
3578 }
3579
3580 if (result == L2CAP_CONF_SUCCESS)
3581 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3582 }
3583 rsp->scid = cpu_to_le16(chan->dcid);
3584 rsp->result = cpu_to_le16(result);
3585 rsp->flags = cpu_to_le16(0);
3586
3587 return ptr - data;
3588 }
3589
l2cap_parse_conf_rsp(struct l2cap_chan * chan,void * rsp,int len,void * data,size_t size,u16 * result)3590 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3591 void *data, size_t size, u16 *result)
3592 {
3593 struct l2cap_conf_req *req = data;
3594 void *ptr = req->data;
3595 void *endptr = data + size;
3596 int type, olen;
3597 unsigned long val;
3598 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3599 struct l2cap_conf_efs efs;
3600
3601 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3602
3603 while (len >= L2CAP_CONF_OPT_SIZE) {
3604 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3605 if (len < 0)
3606 break;
3607
3608 switch (type) {
3609 case L2CAP_CONF_MTU:
3610 if (olen != 2)
3611 break;
3612 if (val < L2CAP_DEFAULT_MIN_MTU) {
3613 *result = L2CAP_CONF_UNACCEPT;
3614 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3615 } else
3616 chan->imtu = val;
3617 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3618 endptr - ptr);
3619 break;
3620
3621 case L2CAP_CONF_FLUSH_TO:
3622 if (olen != 2)
3623 break;
3624 chan->flush_to = val;
3625 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3626 chan->flush_to, endptr - ptr);
3627 break;
3628
3629 case L2CAP_CONF_RFC:
3630 if (olen != sizeof(rfc))
3631 break;
3632 memcpy(&rfc, (void *)val, olen);
3633 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3634 rfc.mode != chan->mode)
3635 return -ECONNREFUSED;
3636 chan->fcs = 0;
3637 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3638 (unsigned long) &rfc, endptr - ptr);
3639 break;
3640
3641 case L2CAP_CONF_EWS:
3642 if (olen != 2)
3643 break;
3644 chan->ack_win = min_t(u16, val, chan->ack_win);
3645 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3646 chan->tx_win, endptr - ptr);
3647 break;
3648
3649 case L2CAP_CONF_EFS:
3650 if (olen != sizeof(efs))
3651 break;
3652 memcpy(&efs, (void *)val, olen);
3653 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3654 efs.stype != L2CAP_SERV_NOTRAFIC &&
3655 efs.stype != chan->local_stype)
3656 return -ECONNREFUSED;
3657 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3658 (unsigned long) &efs, endptr - ptr);
3659 break;
3660
3661 case L2CAP_CONF_FCS:
3662 if (olen != 1)
3663 break;
3664 if (*result == L2CAP_CONF_PENDING)
3665 if (val == L2CAP_FCS_NONE)
3666 set_bit(CONF_RECV_NO_FCS,
3667 &chan->conf_state);
3668 break;
3669 }
3670 }
3671
3672 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3673 return -ECONNREFUSED;
3674
3675 chan->mode = rfc.mode;
3676
3677 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3678 switch (rfc.mode) {
3679 case L2CAP_MODE_ERTM:
3680 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3681 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3682 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3683 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3684 chan->ack_win = min_t(u16, chan->ack_win,
3685 rfc.txwin_size);
3686
3687 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3688 chan->local_msdu = le16_to_cpu(efs.msdu);
3689 chan->local_sdu_itime =
3690 le32_to_cpu(efs.sdu_itime);
3691 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3692 chan->local_flush_to =
3693 le32_to_cpu(efs.flush_to);
3694 }
3695 break;
3696
3697 case L2CAP_MODE_STREAMING:
3698 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3699 }
3700 }
3701
3702 req->dcid = cpu_to_le16(chan->dcid);
3703 req->flags = cpu_to_le16(0);
3704
3705 return ptr - data;
3706 }
3707
l2cap_build_conf_rsp(struct l2cap_chan * chan,void * data,u16 result,u16 flags)3708 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3709 u16 result, u16 flags)
3710 {
3711 struct l2cap_conf_rsp *rsp = data;
3712 void *ptr = rsp->data;
3713
3714 BT_DBG("chan %p", chan);
3715
3716 rsp->scid = cpu_to_le16(chan->dcid);
3717 rsp->result = cpu_to_le16(result);
3718 rsp->flags = cpu_to_le16(flags);
3719
3720 return ptr - data;
3721 }
3722
__l2cap_le_connect_rsp_defer(struct l2cap_chan * chan)3723 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3724 {
3725 struct l2cap_le_conn_rsp rsp;
3726 struct l2cap_conn *conn = chan->conn;
3727
3728 BT_DBG("chan %p", chan);
3729
3730 rsp.dcid = cpu_to_le16(chan->scid);
3731 rsp.mtu = cpu_to_le16(chan->imtu);
3732 rsp.mps = cpu_to_le16(chan->mps);
3733 rsp.credits = cpu_to_le16(chan->rx_credits);
3734 rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3735
3736 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3737 &rsp);
3738 }
3739
l2cap_ecred_list_defer(struct l2cap_chan * chan,void * data)3740 static void l2cap_ecred_list_defer(struct l2cap_chan *chan, void *data)
3741 {
3742 int *result = data;
3743
3744 if (*result || test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
3745 return;
3746
3747 switch (chan->state) {
3748 case BT_CONNECT2:
3749 /* If channel still pending accept add to result */
3750 (*result)++;
3751 return;
3752 case BT_CONNECTED:
3753 return;
3754 default:
3755 /* If not connected or pending accept it has been refused */
3756 *result = -ECONNREFUSED;
3757 return;
3758 }
3759 }
3760
3761 struct l2cap_ecred_rsp_data {
3762 struct {
3763 struct l2cap_ecred_conn_rsp_hdr rsp;
3764 __le16 scid[L2CAP_ECRED_MAX_CID];
3765 } __packed pdu;
3766 int count;
3767 };
3768
l2cap_ecred_rsp_defer(struct l2cap_chan * chan,void * data)3769 static void l2cap_ecred_rsp_defer(struct l2cap_chan *chan, void *data)
3770 {
3771 struct l2cap_ecred_rsp_data *rsp = data;
3772 struct l2cap_ecred_conn_rsp *rsp_flex =
3773 container_of(&rsp->pdu.rsp, struct l2cap_ecred_conn_rsp, hdr);
3774
3775 /* Check if channel for outgoing connection or if it wasn't deferred
3776 * since in those cases it must be skipped.
3777 */
3778 if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags) ||
3779 !test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
3780 return;
3781
3782 /* Reset ident so only one response is sent */
3783 chan->ident = 0;
3784
3785 /* Include all channels pending with the same ident */
3786 if (!rsp->pdu.rsp.result)
3787 rsp_flex->dcid[rsp->count++] = cpu_to_le16(chan->scid);
3788 else
3789 l2cap_chan_del(chan, ECONNRESET);
3790 }
3791
__l2cap_ecred_conn_rsp_defer(struct l2cap_chan * chan)3792 void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
3793 {
3794 struct l2cap_conn *conn = chan->conn;
3795 struct l2cap_ecred_rsp_data data;
3796 u16 id = chan->ident;
3797 int result = 0;
3798
3799 if (!id)
3800 return;
3801
3802 BT_DBG("chan %p id %d", chan, id);
3803
3804 memset(&data, 0, sizeof(data));
3805
3806 data.pdu.rsp.mtu = cpu_to_le16(chan->imtu);
3807 data.pdu.rsp.mps = cpu_to_le16(chan->mps);
3808 data.pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
3809 data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3810
3811 /* Verify that all channels are ready */
3812 __l2cap_chan_list_id(conn, id, l2cap_ecred_list_defer, &result);
3813
3814 if (result > 0)
3815 return;
3816
3817 if (result < 0)
3818 data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_AUTHORIZATION);
3819
3820 /* Build response */
3821 __l2cap_chan_list_id(conn, id, l2cap_ecred_rsp_defer, &data);
3822
3823 l2cap_send_cmd(conn, id, L2CAP_ECRED_CONN_RSP,
3824 sizeof(data.pdu.rsp) + (data.count * sizeof(__le16)),
3825 &data.pdu);
3826 }
3827
__l2cap_connect_rsp_defer(struct l2cap_chan * chan)3828 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3829 {
3830 struct l2cap_conn_rsp rsp;
3831 struct l2cap_conn *conn = chan->conn;
3832 u8 buf[128];
3833 u8 rsp_code;
3834
3835 rsp.scid = cpu_to_le16(chan->dcid);
3836 rsp.dcid = cpu_to_le16(chan->scid);
3837 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3838 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3839 rsp_code = L2CAP_CONN_RSP;
3840
3841 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3842
3843 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3844
3845 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3846 return;
3847
3848 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3849 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3850 chan->num_conf_req++;
3851 }
3852
l2cap_conf_rfc_get(struct l2cap_chan * chan,void * rsp,int len)3853 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3854 {
3855 int type, olen;
3856 unsigned long val;
3857 /* Use sane default values in case a misbehaving remote device
3858 * did not send an RFC or extended window size option.
3859 */
3860 u16 txwin_ext = chan->ack_win;
3861 struct l2cap_conf_rfc rfc = {
3862 .mode = chan->mode,
3863 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3864 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3865 .max_pdu_size = cpu_to_le16(chan->imtu),
3866 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3867 };
3868
3869 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3870
3871 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3872 return;
3873
3874 while (len >= L2CAP_CONF_OPT_SIZE) {
3875 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3876 if (len < 0)
3877 break;
3878
3879 switch (type) {
3880 case L2CAP_CONF_RFC:
3881 if (olen != sizeof(rfc))
3882 break;
3883 memcpy(&rfc, (void *)val, olen);
3884 break;
3885 case L2CAP_CONF_EWS:
3886 if (olen != 2)
3887 break;
3888 txwin_ext = val;
3889 break;
3890 }
3891 }
3892
3893 switch (rfc.mode) {
3894 case L2CAP_MODE_ERTM:
3895 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3896 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3897 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3898 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3899 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3900 else
3901 chan->ack_win = min_t(u16, chan->ack_win,
3902 rfc.txwin_size);
3903 break;
3904 case L2CAP_MODE_STREAMING:
3905 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3906 }
3907 }
3908
l2cap_command_rej(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)3909 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3910 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3911 u8 *data)
3912 {
3913 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3914
3915 if (cmd_len < sizeof(*rej))
3916 return -EPROTO;
3917
3918 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3919 return 0;
3920
3921 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3922 cmd->ident == conn->info_ident) {
3923 cancel_delayed_work(&conn->info_timer);
3924
3925 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3926 conn->info_ident = 0;
3927
3928 l2cap_conn_start(conn);
3929 }
3930
3931 return 0;
3932 }
3933
l2cap_connect(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u8 * data,u8 rsp_code)3934 static void l2cap_connect(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd,
3935 u8 *data, u8 rsp_code)
3936 {
3937 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3938 struct l2cap_conn_rsp rsp;
3939 struct l2cap_chan *chan = NULL, *pchan = NULL;
3940 int result, status = L2CAP_CS_NO_INFO;
3941
3942 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3943 __le16 psm = req->psm;
3944
3945 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3946
3947 /* Check if we have socket listening on psm */
3948 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3949 &conn->hcon->dst, ACL_LINK);
3950 if (!pchan) {
3951 result = L2CAP_CR_BAD_PSM;
3952 goto response;
3953 }
3954
3955 l2cap_chan_lock(pchan);
3956
3957 /* Check if the ACL is secure enough (if not SDP) */
3958 if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3959 (!hci_conn_check_link_mode(conn->hcon) ||
3960 !l2cap_check_enc_key_size(conn->hcon))) {
3961 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3962 result = L2CAP_CR_SEC_BLOCK;
3963 goto response;
3964 }
3965
3966 result = L2CAP_CR_NO_MEM;
3967
3968 /* Check for valid dynamic CID range (as per Erratum 3253) */
3969 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) {
3970 result = L2CAP_CR_INVALID_SCID;
3971 goto response;
3972 }
3973
3974 /* Check if we already have channel with that dcid */
3975 if (__l2cap_get_chan_by_dcid(conn, scid)) {
3976 result = L2CAP_CR_SCID_IN_USE;
3977 goto response;
3978 }
3979
3980 chan = pchan->ops->new_connection(pchan);
3981 if (!chan)
3982 goto response;
3983
3984 /* For certain devices (ex: HID mouse), support for authentication,
3985 * pairing and bonding is optional. For such devices, inorder to avoid
3986 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3987 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3988 */
3989 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3990
3991 bacpy(&chan->src, &conn->hcon->src);
3992 bacpy(&chan->dst, &conn->hcon->dst);
3993 chan->src_type = bdaddr_src_type(conn->hcon);
3994 chan->dst_type = bdaddr_dst_type(conn->hcon);
3995 chan->psm = psm;
3996 chan->dcid = scid;
3997
3998 __l2cap_chan_add(conn, chan);
3999
4000 dcid = chan->scid;
4001
4002 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4003
4004 chan->ident = cmd->ident;
4005
4006 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
4007 if (l2cap_chan_check_security(chan, false)) {
4008 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4009 l2cap_state_change(chan, BT_CONNECT2);
4010 result = L2CAP_CR_PEND;
4011 status = L2CAP_CS_AUTHOR_PEND;
4012 chan->ops->defer(chan);
4013 } else {
4014 l2cap_state_change(chan, BT_CONFIG);
4015 result = L2CAP_CR_SUCCESS;
4016 status = L2CAP_CS_NO_INFO;
4017 }
4018 } else {
4019 l2cap_state_change(chan, BT_CONNECT2);
4020 result = L2CAP_CR_PEND;
4021 status = L2CAP_CS_AUTHEN_PEND;
4022 }
4023 } else {
4024 l2cap_state_change(chan, BT_CONNECT2);
4025 result = L2CAP_CR_PEND;
4026 status = L2CAP_CS_NO_INFO;
4027 }
4028
4029 response:
4030 rsp.scid = cpu_to_le16(scid);
4031 rsp.dcid = cpu_to_le16(dcid);
4032 rsp.result = cpu_to_le16(result);
4033 rsp.status = cpu_to_le16(status);
4034 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4035
4036 if (!pchan)
4037 return;
4038
4039 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4040 struct l2cap_info_req info;
4041 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4042
4043 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4044 conn->info_ident = l2cap_get_ident(conn);
4045
4046 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4047
4048 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4049 sizeof(info), &info);
4050 }
4051
4052 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4053 result == L2CAP_CR_SUCCESS) {
4054 u8 buf[128];
4055 set_bit(CONF_REQ_SENT, &chan->conf_state);
4056 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4057 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4058 chan->num_conf_req++;
4059 }
4060
4061 l2cap_chan_unlock(pchan);
4062 l2cap_chan_put(pchan);
4063 }
4064
l2cap_connect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4065 static int l2cap_connect_req(struct l2cap_conn *conn,
4066 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4067 {
4068 if (cmd_len < sizeof(struct l2cap_conn_req))
4069 return -EPROTO;
4070
4071 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP);
4072 return 0;
4073 }
4074
l2cap_connect_create_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4075 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4076 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4077 u8 *data)
4078 {
4079 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4080 u16 scid, dcid, result, status;
4081 struct l2cap_chan *chan;
4082 u8 req[128];
4083 int err;
4084
4085 if (cmd_len < sizeof(*rsp))
4086 return -EPROTO;
4087
4088 scid = __le16_to_cpu(rsp->scid);
4089 dcid = __le16_to_cpu(rsp->dcid);
4090 result = __le16_to_cpu(rsp->result);
4091 status = __le16_to_cpu(rsp->status);
4092
4093 if (result == L2CAP_CR_SUCCESS && (dcid < L2CAP_CID_DYN_START ||
4094 dcid > L2CAP_CID_DYN_END))
4095 return -EPROTO;
4096
4097 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4098 dcid, scid, result, status);
4099
4100 if (scid) {
4101 chan = __l2cap_get_chan_by_scid(conn, scid);
4102 if (!chan)
4103 return -EBADSLT;
4104 } else {
4105 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4106 if (!chan)
4107 return -EBADSLT;
4108 }
4109
4110 chan = l2cap_chan_hold_unless_zero(chan);
4111 if (!chan)
4112 return -EBADSLT;
4113
4114 err = 0;
4115
4116 l2cap_chan_lock(chan);
4117
4118 switch (result) {
4119 case L2CAP_CR_SUCCESS:
4120 if (__l2cap_get_chan_by_dcid(conn, dcid)) {
4121 err = -EBADSLT;
4122 break;
4123 }
4124
4125 l2cap_state_change(chan, BT_CONFIG);
4126 chan->ident = 0;
4127 chan->dcid = dcid;
4128 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4129
4130 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4131 break;
4132
4133 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4134 l2cap_build_conf_req(chan, req, sizeof(req)), req);
4135 chan->num_conf_req++;
4136 break;
4137
4138 case L2CAP_CR_PEND:
4139 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4140 break;
4141
4142 default:
4143 l2cap_chan_del(chan, ECONNREFUSED);
4144 break;
4145 }
4146
4147 l2cap_chan_unlock(chan);
4148 l2cap_chan_put(chan);
4149
4150 return err;
4151 }
4152
set_default_fcs(struct l2cap_chan * chan)4153 static inline void set_default_fcs(struct l2cap_chan *chan)
4154 {
4155 /* FCS is enabled only in ERTM or streaming mode, if one or both
4156 * sides request it.
4157 */
4158 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4159 chan->fcs = L2CAP_FCS_NONE;
4160 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4161 chan->fcs = L2CAP_FCS_CRC16;
4162 }
4163
l2cap_send_efs_conf_rsp(struct l2cap_chan * chan,void * data,u8 ident,u16 flags)4164 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4165 u8 ident, u16 flags)
4166 {
4167 struct l2cap_conn *conn = chan->conn;
4168
4169 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4170 flags);
4171
4172 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4173 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4174
4175 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4176 l2cap_build_conf_rsp(chan, data,
4177 L2CAP_CONF_SUCCESS, flags), data);
4178 }
4179
cmd_reject_invalid_cid(struct l2cap_conn * conn,u8 ident,u16 scid,u16 dcid)4180 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4181 u16 scid, u16 dcid)
4182 {
4183 struct l2cap_cmd_rej_cid rej;
4184
4185 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4186 rej.scid = __cpu_to_le16(scid);
4187 rej.dcid = __cpu_to_le16(dcid);
4188
4189 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4190 }
4191
l2cap_config_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4192 static inline int l2cap_config_req(struct l2cap_conn *conn,
4193 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4194 u8 *data)
4195 {
4196 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4197 u16 dcid, flags;
4198 u8 rsp[64];
4199 struct l2cap_chan *chan;
4200 int len, err = 0;
4201
4202 if (cmd_len < sizeof(*req))
4203 return -EPROTO;
4204
4205 dcid = __le16_to_cpu(req->dcid);
4206 flags = __le16_to_cpu(req->flags);
4207
4208 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4209
4210 chan = l2cap_get_chan_by_scid(conn, dcid);
4211 if (!chan) {
4212 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4213 return 0;
4214 }
4215
4216 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
4217 chan->state != BT_CONNECTED) {
4218 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4219 chan->dcid);
4220 goto unlock;
4221 }
4222
4223 /* Reject if config buffer is too small. */
4224 len = cmd_len - sizeof(*req);
4225 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4226 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4227 l2cap_build_conf_rsp(chan, rsp,
4228 L2CAP_CONF_REJECT, flags), rsp);
4229 goto unlock;
4230 }
4231
4232 /* Store config. */
4233 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4234 chan->conf_len += len;
4235
4236 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4237 /* Incomplete config. Send empty response. */
4238 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4239 l2cap_build_conf_rsp(chan, rsp,
4240 L2CAP_CONF_SUCCESS, flags), rsp);
4241 goto unlock;
4242 }
4243
4244 /* Complete config. */
4245 len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4246 if (len < 0) {
4247 l2cap_send_disconn_req(chan, ECONNRESET);
4248 goto unlock;
4249 }
4250
4251 chan->ident = cmd->ident;
4252 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4253 if (chan->num_conf_rsp < L2CAP_CONF_MAX_CONF_RSP)
4254 chan->num_conf_rsp++;
4255
4256 /* Reset config buffer. */
4257 chan->conf_len = 0;
4258
4259 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4260 goto unlock;
4261
4262 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4263 set_default_fcs(chan);
4264
4265 if (chan->mode == L2CAP_MODE_ERTM ||
4266 chan->mode == L2CAP_MODE_STREAMING)
4267 err = l2cap_ertm_init(chan);
4268
4269 if (err < 0)
4270 l2cap_send_disconn_req(chan, -err);
4271 else
4272 l2cap_chan_ready(chan);
4273
4274 goto unlock;
4275 }
4276
4277 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4278 u8 buf[64];
4279 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4280 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4281 chan->num_conf_req++;
4282 }
4283
4284 /* Got Conf Rsp PENDING from remote side and assume we sent
4285 Conf Rsp PENDING in the code above */
4286 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4287 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4288
4289 /* check compatibility */
4290
4291 /* Send rsp for BR/EDR channel */
4292 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4293 }
4294
4295 unlock:
4296 l2cap_chan_unlock(chan);
4297 l2cap_chan_put(chan);
4298 return err;
4299 }
4300
l2cap_config_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4301 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4302 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4303 u8 *data)
4304 {
4305 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4306 u16 scid, flags, result;
4307 struct l2cap_chan *chan;
4308 int len = cmd_len - sizeof(*rsp);
4309 int err = 0;
4310
4311 if (cmd_len < sizeof(*rsp))
4312 return -EPROTO;
4313
4314 scid = __le16_to_cpu(rsp->scid);
4315 flags = __le16_to_cpu(rsp->flags);
4316 result = __le16_to_cpu(rsp->result);
4317
4318 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4319 result, len);
4320
4321 chan = l2cap_get_chan_by_scid(conn, scid);
4322 if (!chan)
4323 return 0;
4324
4325 switch (result) {
4326 case L2CAP_CONF_SUCCESS:
4327 l2cap_conf_rfc_get(chan, rsp->data, len);
4328 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4329 break;
4330
4331 case L2CAP_CONF_PENDING:
4332 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4333
4334 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4335 char buf[64];
4336
4337 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4338 buf, sizeof(buf), &result);
4339 if (len < 0) {
4340 l2cap_send_disconn_req(chan, ECONNRESET);
4341 goto done;
4342 }
4343
4344 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident, 0);
4345 }
4346 goto done;
4347
4348 case L2CAP_CONF_UNKNOWN:
4349 case L2CAP_CONF_UNACCEPT:
4350 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4351 char req[64];
4352
4353 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4354 l2cap_send_disconn_req(chan, ECONNRESET);
4355 goto done;
4356 }
4357
4358 /* throw out any old stored conf requests */
4359 result = L2CAP_CONF_SUCCESS;
4360 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4361 req, sizeof(req), &result);
4362 if (len < 0) {
4363 l2cap_send_disconn_req(chan, ECONNRESET);
4364 goto done;
4365 }
4366
4367 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4368 L2CAP_CONF_REQ, len, req);
4369 chan->num_conf_req++;
4370 if (result != L2CAP_CONF_SUCCESS)
4371 goto done;
4372 break;
4373 }
4374 fallthrough;
4375
4376 default:
4377 l2cap_chan_set_err(chan, ECONNRESET);
4378
4379 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4380 l2cap_send_disconn_req(chan, ECONNRESET);
4381 goto done;
4382 }
4383
4384 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4385 goto done;
4386
4387 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4388
4389 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4390 set_default_fcs(chan);
4391
4392 if (chan->mode == L2CAP_MODE_ERTM ||
4393 chan->mode == L2CAP_MODE_STREAMING)
4394 err = l2cap_ertm_init(chan);
4395
4396 if (err < 0)
4397 l2cap_send_disconn_req(chan, -err);
4398 else
4399 l2cap_chan_ready(chan);
4400 }
4401
4402 done:
4403 l2cap_chan_unlock(chan);
4404 l2cap_chan_put(chan);
4405 return err;
4406 }
4407
l2cap_disconnect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4408 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4409 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4410 u8 *data)
4411 {
4412 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4413 struct l2cap_disconn_rsp rsp;
4414 u16 dcid, scid;
4415 struct l2cap_chan *chan;
4416
4417 if (cmd_len != sizeof(*req))
4418 return -EPROTO;
4419
4420 scid = __le16_to_cpu(req->scid);
4421 dcid = __le16_to_cpu(req->dcid);
4422
4423 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4424
4425 chan = l2cap_get_chan_by_scid(conn, dcid);
4426 if (!chan) {
4427 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4428 return 0;
4429 }
4430
4431 rsp.dcid = cpu_to_le16(chan->scid);
4432 rsp.scid = cpu_to_le16(chan->dcid);
4433 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4434
4435 chan->ops->set_shutdown(chan);
4436
4437 l2cap_chan_del(chan, ECONNRESET);
4438
4439 chan->ops->close(chan);
4440
4441 l2cap_chan_unlock(chan);
4442 l2cap_chan_put(chan);
4443
4444 return 0;
4445 }
4446
l2cap_disconnect_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4447 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4448 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4449 u8 *data)
4450 {
4451 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4452 u16 dcid, scid;
4453 struct l2cap_chan *chan;
4454
4455 if (cmd_len != sizeof(*rsp))
4456 return -EPROTO;
4457
4458 scid = __le16_to_cpu(rsp->scid);
4459 dcid = __le16_to_cpu(rsp->dcid);
4460
4461 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4462
4463 chan = l2cap_get_chan_by_scid(conn, scid);
4464 if (!chan) {
4465 return 0;
4466 }
4467
4468 if (chan->state != BT_DISCONN) {
4469 l2cap_chan_unlock(chan);
4470 l2cap_chan_put(chan);
4471 return 0;
4472 }
4473
4474 l2cap_chan_del(chan, 0);
4475
4476 chan->ops->close(chan);
4477
4478 l2cap_chan_unlock(chan);
4479 l2cap_chan_put(chan);
4480
4481 return 0;
4482 }
4483
l2cap_information_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4484 static inline int l2cap_information_req(struct l2cap_conn *conn,
4485 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4486 u8 *data)
4487 {
4488 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4489 u16 type;
4490
4491 if (cmd_len != sizeof(*req))
4492 return -EPROTO;
4493
4494 type = __le16_to_cpu(req->type);
4495
4496 BT_DBG("type 0x%4.4x", type);
4497
4498 if (type == L2CAP_IT_FEAT_MASK) {
4499 u8 buf[8];
4500 u32 feat_mask = l2cap_feat_mask;
4501 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4502 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4503 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4504 if (!disable_ertm)
4505 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4506 | L2CAP_FEAT_FCS;
4507
4508 put_unaligned_le32(feat_mask, rsp->data);
4509 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4510 buf);
4511 } else if (type == L2CAP_IT_FIXED_CHAN) {
4512 u8 buf[12];
4513 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4514
4515 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4516 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4517 rsp->data[0] = conn->local_fixed_chan;
4518 memset(rsp->data + 1, 0, 7);
4519 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4520 buf);
4521 } else {
4522 struct l2cap_info_rsp rsp;
4523 rsp.type = cpu_to_le16(type);
4524 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4525 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4526 &rsp);
4527 }
4528
4529 return 0;
4530 }
4531
l2cap_information_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4532 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4533 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4534 u8 *data)
4535 {
4536 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4537 u16 type, result;
4538
4539 if (cmd_len < sizeof(*rsp))
4540 return -EPROTO;
4541
4542 type = __le16_to_cpu(rsp->type);
4543 result = __le16_to_cpu(rsp->result);
4544
4545 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4546
4547 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4548 if (cmd->ident != conn->info_ident ||
4549 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4550 return 0;
4551
4552 cancel_delayed_work(&conn->info_timer);
4553
4554 if (result != L2CAP_IR_SUCCESS) {
4555 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4556 conn->info_ident = 0;
4557
4558 l2cap_conn_start(conn);
4559
4560 return 0;
4561 }
4562
4563 switch (type) {
4564 case L2CAP_IT_FEAT_MASK:
4565 conn->feat_mask = get_unaligned_le32(rsp->data);
4566
4567 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4568 struct l2cap_info_req req;
4569 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4570
4571 conn->info_ident = l2cap_get_ident(conn);
4572
4573 l2cap_send_cmd(conn, conn->info_ident,
4574 L2CAP_INFO_REQ, sizeof(req), &req);
4575 } else {
4576 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4577 conn->info_ident = 0;
4578
4579 l2cap_conn_start(conn);
4580 }
4581 break;
4582
4583 case L2CAP_IT_FIXED_CHAN:
4584 conn->remote_fixed_chan = rsp->data[0];
4585 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4586 conn->info_ident = 0;
4587
4588 l2cap_conn_start(conn);
4589 break;
4590 }
4591
4592 return 0;
4593 }
4594
l2cap_conn_param_update_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4595 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4596 struct l2cap_cmd_hdr *cmd,
4597 u16 cmd_len, u8 *data)
4598 {
4599 struct hci_conn *hcon = conn->hcon;
4600 struct l2cap_conn_param_update_req *req;
4601 struct l2cap_conn_param_update_rsp rsp;
4602 u16 min, max, latency, to_multiplier;
4603 int err;
4604
4605 if (hcon->role != HCI_ROLE_MASTER)
4606 return -EINVAL;
4607
4608 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4609 return -EPROTO;
4610
4611 req = (struct l2cap_conn_param_update_req *) data;
4612 min = __le16_to_cpu(req->min);
4613 max = __le16_to_cpu(req->max);
4614 latency = __le16_to_cpu(req->latency);
4615 to_multiplier = __le16_to_cpu(req->to_multiplier);
4616
4617 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4618 min, max, latency, to_multiplier);
4619
4620 memset(&rsp, 0, sizeof(rsp));
4621
4622 err = hci_check_conn_params(min, max, latency, to_multiplier);
4623 if (err)
4624 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4625 else
4626 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4627
4628 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4629 sizeof(rsp), &rsp);
4630
4631 if (!err) {
4632 u8 store_hint;
4633
4634 store_hint = hci_le_conn_update(hcon, min, max, latency,
4635 to_multiplier);
4636 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
4637 store_hint, min, max, latency,
4638 to_multiplier);
4639
4640 }
4641
4642 return 0;
4643 }
4644
l2cap_le_connect_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4645 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
4646 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4647 u8 *data)
4648 {
4649 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
4650 struct hci_conn *hcon = conn->hcon;
4651 u16 dcid, mtu, mps, credits, result;
4652 struct l2cap_chan *chan;
4653 int err, sec_level;
4654
4655 if (cmd_len < sizeof(*rsp))
4656 return -EPROTO;
4657
4658 dcid = __le16_to_cpu(rsp->dcid);
4659 mtu = __le16_to_cpu(rsp->mtu);
4660 mps = __le16_to_cpu(rsp->mps);
4661 credits = __le16_to_cpu(rsp->credits);
4662 result = __le16_to_cpu(rsp->result);
4663
4664 if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
4665 dcid < L2CAP_CID_DYN_START ||
4666 dcid > L2CAP_CID_LE_DYN_END))
4667 return -EPROTO;
4668
4669 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
4670 dcid, mtu, mps, credits, result);
4671
4672 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4673 if (!chan)
4674 return -EBADSLT;
4675
4676 err = 0;
4677
4678 l2cap_chan_lock(chan);
4679
4680 switch (result) {
4681 case L2CAP_CR_LE_SUCCESS:
4682 if (__l2cap_get_chan_by_dcid(conn, dcid)) {
4683 err = -EBADSLT;
4684 break;
4685 }
4686
4687 chan->ident = 0;
4688 chan->dcid = dcid;
4689 chan->omtu = mtu;
4690 chan->remote_mps = mps;
4691 chan->tx_credits = credits;
4692 l2cap_chan_ready(chan);
4693 break;
4694
4695 case L2CAP_CR_LE_AUTHENTICATION:
4696 case L2CAP_CR_LE_ENCRYPTION:
4697 /* If we already have MITM protection we can't do
4698 * anything.
4699 */
4700 if (hcon->sec_level > BT_SECURITY_MEDIUM) {
4701 l2cap_chan_del(chan, ECONNREFUSED);
4702 break;
4703 }
4704
4705 sec_level = hcon->sec_level + 1;
4706 if (chan->sec_level < sec_level)
4707 chan->sec_level = sec_level;
4708
4709 /* We'll need to send a new Connect Request */
4710 clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
4711
4712 smp_conn_security(hcon, chan->sec_level);
4713 break;
4714
4715 default:
4716 l2cap_chan_del(chan, ECONNREFUSED);
4717 break;
4718 }
4719
4720 l2cap_chan_unlock(chan);
4721
4722 return err;
4723 }
4724
l2cap_bredr_sig_cmd(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4725 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4726 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4727 u8 *data)
4728 {
4729 int err = 0;
4730
4731 switch (cmd->code) {
4732 case L2CAP_COMMAND_REJ:
4733 l2cap_command_rej(conn, cmd, cmd_len, data);
4734 break;
4735
4736 case L2CAP_CONN_REQ:
4737 err = l2cap_connect_req(conn, cmd, cmd_len, data);
4738 break;
4739
4740 case L2CAP_CONN_RSP:
4741 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
4742 break;
4743
4744 case L2CAP_CONF_REQ:
4745 err = l2cap_config_req(conn, cmd, cmd_len, data);
4746 break;
4747
4748 case L2CAP_CONF_RSP:
4749 l2cap_config_rsp(conn, cmd, cmd_len, data);
4750 break;
4751
4752 case L2CAP_DISCONN_REQ:
4753 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
4754 break;
4755
4756 case L2CAP_DISCONN_RSP:
4757 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
4758 break;
4759
4760 case L2CAP_ECHO_REQ:
4761 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4762 break;
4763
4764 case L2CAP_ECHO_RSP:
4765 break;
4766
4767 case L2CAP_INFO_REQ:
4768 err = l2cap_information_req(conn, cmd, cmd_len, data);
4769 break;
4770
4771 case L2CAP_INFO_RSP:
4772 l2cap_information_rsp(conn, cmd, cmd_len, data);
4773 break;
4774
4775 default:
4776 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4777 err = -EINVAL;
4778 break;
4779 }
4780
4781 return err;
4782 }
4783
l2cap_le_connect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4784 static int l2cap_le_connect_req(struct l2cap_conn *conn,
4785 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4786 u8 *data)
4787 {
4788 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
4789 struct l2cap_le_conn_rsp rsp;
4790 struct l2cap_chan *chan, *pchan;
4791 u16 dcid, scid, credits, mtu, mps;
4792 __le16 psm;
4793 u8 result;
4794
4795 if (cmd_len != sizeof(*req))
4796 return -EPROTO;
4797
4798 scid = __le16_to_cpu(req->scid);
4799 mtu = __le16_to_cpu(req->mtu);
4800 mps = __le16_to_cpu(req->mps);
4801 psm = req->psm;
4802 dcid = 0;
4803 credits = 0;
4804
4805 if (mtu < 23 || mps < 23)
4806 return -EPROTO;
4807
4808 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
4809 scid, mtu, mps);
4810
4811 /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
4812 * page 1059:
4813 *
4814 * Valid range: 0x0001-0x00ff
4815 *
4816 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
4817 */
4818 if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
4819 result = L2CAP_CR_LE_BAD_PSM;
4820 chan = NULL;
4821 goto response;
4822 }
4823
4824 /* Check if we have socket listening on psm */
4825 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
4826 &conn->hcon->dst, LE_LINK);
4827 if (!pchan) {
4828 result = L2CAP_CR_LE_BAD_PSM;
4829 chan = NULL;
4830 goto response;
4831 }
4832
4833 l2cap_chan_lock(pchan);
4834
4835 if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
4836 SMP_ALLOW_STK)) {
4837 result = L2CAP_CR_LE_AUTHENTICATION;
4838 chan = NULL;
4839 goto response_unlock;
4840 }
4841
4842 /* Check for valid dynamic CID range */
4843 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
4844 result = L2CAP_CR_LE_INVALID_SCID;
4845 chan = NULL;
4846 goto response_unlock;
4847 }
4848
4849 /* Check if we already have channel with that dcid */
4850 if (__l2cap_get_chan_by_dcid(conn, scid)) {
4851 result = L2CAP_CR_LE_SCID_IN_USE;
4852 chan = NULL;
4853 goto response_unlock;
4854 }
4855
4856 chan = pchan->ops->new_connection(pchan);
4857 if (!chan) {
4858 result = L2CAP_CR_LE_NO_MEM;
4859 goto response_unlock;
4860 }
4861
4862 bacpy(&chan->src, &conn->hcon->src);
4863 bacpy(&chan->dst, &conn->hcon->dst);
4864 chan->src_type = bdaddr_src_type(conn->hcon);
4865 chan->dst_type = bdaddr_dst_type(conn->hcon);
4866 chan->psm = psm;
4867 chan->dcid = scid;
4868 chan->omtu = mtu;
4869 chan->remote_mps = mps;
4870
4871 __l2cap_chan_add(conn, chan);
4872
4873 l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits));
4874
4875 dcid = chan->scid;
4876 credits = chan->rx_credits;
4877
4878 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4879
4880 chan->ident = cmd->ident;
4881
4882 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4883 l2cap_state_change(chan, BT_CONNECT2);
4884 /* The following result value is actually not defined
4885 * for LE CoC but we use it to let the function know
4886 * that it should bail out after doing its cleanup
4887 * instead of sending a response.
4888 */
4889 result = L2CAP_CR_PEND;
4890 chan->ops->defer(chan);
4891 } else {
4892 l2cap_chan_ready(chan);
4893 result = L2CAP_CR_LE_SUCCESS;
4894 }
4895
4896 response_unlock:
4897 l2cap_chan_unlock(pchan);
4898 l2cap_chan_put(pchan);
4899
4900 if (result == L2CAP_CR_PEND)
4901 return 0;
4902
4903 response:
4904 if (chan) {
4905 rsp.mtu = cpu_to_le16(chan->imtu);
4906 rsp.mps = cpu_to_le16(chan->mps);
4907 } else {
4908 rsp.mtu = 0;
4909 rsp.mps = 0;
4910 }
4911
4912 rsp.dcid = cpu_to_le16(dcid);
4913 rsp.credits = cpu_to_le16(credits);
4914 rsp.result = cpu_to_le16(result);
4915
4916 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
4917
4918 return 0;
4919 }
4920
l2cap_le_credits(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4921 static inline int l2cap_le_credits(struct l2cap_conn *conn,
4922 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4923 u8 *data)
4924 {
4925 struct l2cap_le_credits *pkt;
4926 struct l2cap_chan *chan;
4927 u16 cid, credits, max_credits;
4928
4929 if (cmd_len != sizeof(*pkt))
4930 return -EPROTO;
4931
4932 pkt = (struct l2cap_le_credits *) data;
4933 cid = __le16_to_cpu(pkt->cid);
4934 credits = __le16_to_cpu(pkt->credits);
4935
4936 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
4937
4938 chan = l2cap_get_chan_by_dcid(conn, cid);
4939 if (!chan)
4940 return -EBADSLT;
4941
4942 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
4943 if (credits > max_credits) {
4944 BT_ERR("LE credits overflow");
4945 l2cap_send_disconn_req(chan, ECONNRESET);
4946
4947 /* Return 0 so that we don't trigger an unnecessary
4948 * command reject packet.
4949 */
4950 goto unlock;
4951 }
4952
4953 chan->tx_credits += credits;
4954
4955 /* Resume sending */
4956 l2cap_le_flowctl_send(chan);
4957
4958 if (chan->tx_credits)
4959 chan->ops->resume(chan);
4960
4961 unlock:
4962 l2cap_chan_unlock(chan);
4963 l2cap_chan_put(chan);
4964
4965 return 0;
4966 }
4967
l2cap_ecred_conn_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4968 static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
4969 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4970 u8 *data)
4971 {
4972 struct l2cap_ecred_conn_req *req = (void *) data;
4973 DEFINE_RAW_FLEX(struct l2cap_ecred_conn_rsp, pdu, dcid, L2CAP_ECRED_MAX_CID);
4974 struct l2cap_chan *chan, *pchan;
4975 u16 mtu, mps;
4976 __le16 psm;
4977 u8 result, len = 0;
4978 int i, num_scid;
4979 bool defer = false;
4980
4981 if (!enable_ecred)
4982 return -EINVAL;
4983
4984 if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) {
4985 result = L2CAP_CR_LE_INVALID_PARAMS;
4986 goto response;
4987 }
4988
4989 cmd_len -= sizeof(*req);
4990 num_scid = cmd_len / sizeof(u16);
4991
4992 if (num_scid > L2CAP_ECRED_MAX_CID) {
4993 result = L2CAP_CR_LE_INVALID_PARAMS;
4994 goto response;
4995 }
4996
4997 mtu = __le16_to_cpu(req->mtu);
4998 mps = __le16_to_cpu(req->mps);
4999
5000 if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MPS) {
5001 result = L2CAP_CR_LE_UNACCEPT_PARAMS;
5002 goto response;
5003 }
5004
5005 psm = req->psm;
5006
5007 /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
5008 * page 1059:
5009 *
5010 * Valid range: 0x0001-0x00ff
5011 *
5012 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
5013 */
5014 if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
5015 result = L2CAP_CR_LE_BAD_PSM;
5016 goto response;
5017 }
5018
5019 BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps);
5020
5021 memset(pdu, 0, sizeof(*pdu));
5022
5023 /* Check if we have socket listening on psm */
5024 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5025 &conn->hcon->dst, LE_LINK);
5026 if (!pchan) {
5027 result = L2CAP_CR_LE_BAD_PSM;
5028 goto response;
5029 }
5030
5031 l2cap_chan_lock(pchan);
5032
5033 if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5034 SMP_ALLOW_STK)) {
5035 result = L2CAP_CR_LE_AUTHENTICATION;
5036 goto unlock;
5037 }
5038
5039 result = L2CAP_CR_LE_SUCCESS;
5040
5041 for (i = 0; i < num_scid; i++) {
5042 u16 scid = __le16_to_cpu(req->scid[i]);
5043
5044 BT_DBG("scid[%d] 0x%4.4x", i, scid);
5045
5046 pdu->dcid[i] = 0x0000;
5047 len += sizeof(*pdu->dcid);
5048
5049 /* Check for valid dynamic CID range */
5050 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5051 result = L2CAP_CR_LE_INVALID_SCID;
5052 continue;
5053 }
5054
5055 /* Check if we already have channel with that dcid */
5056 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5057 result = L2CAP_CR_LE_SCID_IN_USE;
5058 continue;
5059 }
5060
5061 chan = pchan->ops->new_connection(pchan);
5062 if (!chan) {
5063 result = L2CAP_CR_LE_NO_MEM;
5064 continue;
5065 }
5066
5067 bacpy(&chan->src, &conn->hcon->src);
5068 bacpy(&chan->dst, &conn->hcon->dst);
5069 chan->src_type = bdaddr_src_type(conn->hcon);
5070 chan->dst_type = bdaddr_dst_type(conn->hcon);
5071 chan->psm = psm;
5072 chan->dcid = scid;
5073 chan->omtu = mtu;
5074 chan->remote_mps = mps;
5075
5076 __l2cap_chan_add(conn, chan);
5077
5078 l2cap_ecred_init(chan, __le16_to_cpu(req->credits));
5079
5080 /* Init response */
5081 if (!pdu->credits) {
5082 pdu->mtu = cpu_to_le16(chan->imtu);
5083 pdu->mps = cpu_to_le16(chan->mps);
5084 pdu->credits = cpu_to_le16(chan->rx_credits);
5085 }
5086
5087 pdu->dcid[i] = cpu_to_le16(chan->scid);
5088
5089 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5090
5091 chan->ident = cmd->ident;
5092 chan->mode = L2CAP_MODE_EXT_FLOWCTL;
5093
5094 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5095 l2cap_state_change(chan, BT_CONNECT2);
5096 defer = true;
5097 chan->ops->defer(chan);
5098 } else {
5099 l2cap_chan_ready(chan);
5100 }
5101 }
5102
5103 unlock:
5104 l2cap_chan_unlock(pchan);
5105 l2cap_chan_put(pchan);
5106
5107 response:
5108 pdu->result = cpu_to_le16(result);
5109
5110 if (defer)
5111 return 0;
5112
5113 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP,
5114 sizeof(*pdu) + len, pdu);
5115
5116 return 0;
5117 }
5118
l2cap_ecred_conn_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5119 static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
5120 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5121 u8 *data)
5122 {
5123 struct l2cap_ecred_conn_rsp *rsp = (void *) data;
5124 struct hci_conn *hcon = conn->hcon;
5125 u16 mtu, mps, credits, result;
5126 struct l2cap_chan *chan, *tmp;
5127 int err = 0, sec_level;
5128 int i = 0;
5129
5130 if (cmd_len < sizeof(*rsp))
5131 return -EPROTO;
5132
5133 mtu = __le16_to_cpu(rsp->mtu);
5134 mps = __le16_to_cpu(rsp->mps);
5135 credits = __le16_to_cpu(rsp->credits);
5136 result = __le16_to_cpu(rsp->result);
5137
5138 BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits,
5139 result);
5140
5141 cmd_len -= sizeof(*rsp);
5142
5143 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
5144 u16 dcid;
5145
5146 if (chan->ident != cmd->ident ||
5147 chan->mode != L2CAP_MODE_EXT_FLOWCTL ||
5148 chan->state == BT_CONNECTED)
5149 continue;
5150
5151 l2cap_chan_lock(chan);
5152
5153 /* Check that there is a dcid for each pending channel */
5154 if (cmd_len < sizeof(dcid)) {
5155 l2cap_chan_del(chan, ECONNREFUSED);
5156 l2cap_chan_unlock(chan);
5157 continue;
5158 }
5159
5160 dcid = __le16_to_cpu(rsp->dcid[i++]);
5161 cmd_len -= sizeof(u16);
5162
5163 BT_DBG("dcid[%d] 0x%4.4x", i, dcid);
5164
5165 /* Check if dcid is already in use */
5166 if (dcid && __l2cap_get_chan_by_dcid(conn, dcid)) {
5167 /* If a device receives a
5168 * L2CAP_CREDIT_BASED_CONNECTION_RSP packet with an
5169 * already-assigned Destination CID, then both the
5170 * original channel and the new channel shall be
5171 * immediately discarded and not used.
5172 */
5173 l2cap_chan_del(chan, ECONNREFUSED);
5174 l2cap_chan_unlock(chan);
5175 chan = __l2cap_get_chan_by_dcid(conn, dcid);
5176 l2cap_chan_lock(chan);
5177 l2cap_chan_del(chan, ECONNRESET);
5178 l2cap_chan_unlock(chan);
5179 continue;
5180 }
5181
5182 switch (result) {
5183 case L2CAP_CR_LE_AUTHENTICATION:
5184 case L2CAP_CR_LE_ENCRYPTION:
5185 /* If we already have MITM protection we can't do
5186 * anything.
5187 */
5188 if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5189 l2cap_chan_del(chan, ECONNREFUSED);
5190 break;
5191 }
5192
5193 sec_level = hcon->sec_level + 1;
5194 if (chan->sec_level < sec_level)
5195 chan->sec_level = sec_level;
5196
5197 /* We'll need to send a new Connect Request */
5198 clear_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags);
5199
5200 smp_conn_security(hcon, chan->sec_level);
5201 break;
5202
5203 case L2CAP_CR_LE_BAD_PSM:
5204 l2cap_chan_del(chan, ECONNREFUSED);
5205 break;
5206
5207 default:
5208 /* If dcid was not set it means channels was refused */
5209 if (!dcid) {
5210 l2cap_chan_del(chan, ECONNREFUSED);
5211 break;
5212 }
5213
5214 chan->ident = 0;
5215 chan->dcid = dcid;
5216 chan->omtu = mtu;
5217 chan->remote_mps = mps;
5218 chan->tx_credits = credits;
5219 l2cap_chan_ready(chan);
5220 break;
5221 }
5222
5223 l2cap_chan_unlock(chan);
5224 }
5225
5226 return err;
5227 }
5228
l2cap_ecred_reconf_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5229 static inline int l2cap_ecred_reconf_req(struct l2cap_conn *conn,
5230 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5231 u8 *data)
5232 {
5233 struct l2cap_ecred_reconf_req *req = (void *) data;
5234 struct l2cap_ecred_reconf_rsp rsp;
5235 u16 mtu, mps, result;
5236 struct l2cap_chan *chan;
5237 int i, num_scid;
5238
5239 if (!enable_ecred)
5240 return -EINVAL;
5241
5242 if (cmd_len < sizeof(*req) || cmd_len - sizeof(*req) % sizeof(u16)) {
5243 result = L2CAP_CR_LE_INVALID_PARAMS;
5244 goto respond;
5245 }
5246
5247 mtu = __le16_to_cpu(req->mtu);
5248 mps = __le16_to_cpu(req->mps);
5249
5250 BT_DBG("mtu %u mps %u", mtu, mps);
5251
5252 if (mtu < L2CAP_ECRED_MIN_MTU) {
5253 result = L2CAP_RECONF_INVALID_MTU;
5254 goto respond;
5255 }
5256
5257 if (mps < L2CAP_ECRED_MIN_MPS) {
5258 result = L2CAP_RECONF_INVALID_MPS;
5259 goto respond;
5260 }
5261
5262 cmd_len -= sizeof(*req);
5263 num_scid = cmd_len / sizeof(u16);
5264 result = L2CAP_RECONF_SUCCESS;
5265
5266 for (i = 0; i < num_scid; i++) {
5267 u16 scid;
5268
5269 scid = __le16_to_cpu(req->scid[i]);
5270 if (!scid)
5271 return -EPROTO;
5272
5273 chan = __l2cap_get_chan_by_dcid(conn, scid);
5274 if (!chan)
5275 continue;
5276
5277 /* If the MTU value is decreased for any of the included
5278 * channels, then the receiver shall disconnect all
5279 * included channels.
5280 */
5281 if (chan->omtu > mtu) {
5282 BT_ERR("chan %p decreased MTU %u -> %u", chan,
5283 chan->omtu, mtu);
5284 result = L2CAP_RECONF_INVALID_MTU;
5285 }
5286
5287 chan->omtu = mtu;
5288 chan->remote_mps = mps;
5289 }
5290
5291 respond:
5292 rsp.result = cpu_to_le16(result);
5293
5294 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_RECONF_RSP, sizeof(rsp),
5295 &rsp);
5296
5297 return 0;
5298 }
5299
l2cap_ecred_reconf_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5300 static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn,
5301 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5302 u8 *data)
5303 {
5304 struct l2cap_chan *chan, *tmp;
5305 struct l2cap_ecred_conn_rsp *rsp = (void *) data;
5306 u16 result;
5307
5308 if (cmd_len < sizeof(*rsp))
5309 return -EPROTO;
5310
5311 result = __le16_to_cpu(rsp->result);
5312
5313 BT_DBG("result 0x%4.4x", rsp->result);
5314
5315 if (!result)
5316 return 0;
5317
5318 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
5319 if (chan->ident != cmd->ident)
5320 continue;
5321
5322 l2cap_chan_del(chan, ECONNRESET);
5323 }
5324
5325 return 0;
5326 }
5327
l2cap_le_command_rej(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5328 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5329 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5330 u8 *data)
5331 {
5332 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5333 struct l2cap_chan *chan;
5334
5335 if (cmd_len < sizeof(*rej))
5336 return -EPROTO;
5337
5338 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5339 if (!chan)
5340 goto done;
5341
5342 chan = l2cap_chan_hold_unless_zero(chan);
5343 if (!chan)
5344 goto done;
5345
5346 l2cap_chan_lock(chan);
5347 l2cap_chan_del(chan, ECONNREFUSED);
5348 l2cap_chan_unlock(chan);
5349 l2cap_chan_put(chan);
5350
5351 done:
5352 return 0;
5353 }
5354
l2cap_le_sig_cmd(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5355 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5356 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5357 u8 *data)
5358 {
5359 int err = 0;
5360
5361 switch (cmd->code) {
5362 case L2CAP_COMMAND_REJ:
5363 l2cap_le_command_rej(conn, cmd, cmd_len, data);
5364 break;
5365
5366 case L2CAP_CONN_PARAM_UPDATE_REQ:
5367 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5368 break;
5369
5370 case L2CAP_CONN_PARAM_UPDATE_RSP:
5371 break;
5372
5373 case L2CAP_LE_CONN_RSP:
5374 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5375 break;
5376
5377 case L2CAP_LE_CONN_REQ:
5378 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5379 break;
5380
5381 case L2CAP_LE_CREDITS:
5382 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5383 break;
5384
5385 case L2CAP_ECRED_CONN_REQ:
5386 err = l2cap_ecred_conn_req(conn, cmd, cmd_len, data);
5387 break;
5388
5389 case L2CAP_ECRED_CONN_RSP:
5390 err = l2cap_ecred_conn_rsp(conn, cmd, cmd_len, data);
5391 break;
5392
5393 case L2CAP_ECRED_RECONF_REQ:
5394 err = l2cap_ecred_reconf_req(conn, cmd, cmd_len, data);
5395 break;
5396
5397 case L2CAP_ECRED_RECONF_RSP:
5398 err = l2cap_ecred_reconf_rsp(conn, cmd, cmd_len, data);
5399 break;
5400
5401 case L2CAP_DISCONN_REQ:
5402 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5403 break;
5404
5405 case L2CAP_DISCONN_RSP:
5406 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5407 break;
5408
5409 default:
5410 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5411 err = -EINVAL;
5412 break;
5413 }
5414
5415 return err;
5416 }
5417
l2cap_le_sig_channel(struct l2cap_conn * conn,struct sk_buff * skb)5418 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5419 struct sk_buff *skb)
5420 {
5421 struct hci_conn *hcon = conn->hcon;
5422 struct l2cap_cmd_hdr *cmd;
5423 u16 len;
5424 int err;
5425
5426 if (hcon->type != LE_LINK)
5427 goto drop;
5428
5429 if (skb->len < L2CAP_CMD_HDR_SIZE)
5430 goto drop;
5431
5432 cmd = (void *) skb->data;
5433 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5434
5435 len = le16_to_cpu(cmd->len);
5436
5437 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5438
5439 if (len != skb->len || !cmd->ident) {
5440 BT_DBG("corrupted command");
5441 goto drop;
5442 }
5443
5444 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5445 if (err) {
5446 struct l2cap_cmd_rej_unk rej;
5447
5448 BT_ERR("Wrong link type (%d)", err);
5449
5450 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5451 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5452 sizeof(rej), &rej);
5453 }
5454
5455 drop:
5456 kfree_skb(skb);
5457 }
5458
l2cap_sig_send_rej(struct l2cap_conn * conn,u16 ident)5459 static inline void l2cap_sig_send_rej(struct l2cap_conn *conn, u16 ident)
5460 {
5461 struct l2cap_cmd_rej_unk rej;
5462
5463 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5464 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
5465 }
5466
l2cap_sig_channel(struct l2cap_conn * conn,struct sk_buff * skb)5467 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5468 struct sk_buff *skb)
5469 {
5470 struct hci_conn *hcon = conn->hcon;
5471 struct l2cap_cmd_hdr *cmd;
5472 int err;
5473
5474 l2cap_raw_recv(conn, skb);
5475
5476 if (hcon->type != ACL_LINK)
5477 goto drop;
5478
5479 while (skb->len >= L2CAP_CMD_HDR_SIZE) {
5480 u16 len;
5481
5482 cmd = (void *) skb->data;
5483 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5484
5485 len = le16_to_cpu(cmd->len);
5486
5487 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len,
5488 cmd->ident);
5489
5490 if (len > skb->len || !cmd->ident) {
5491 BT_DBG("corrupted command");
5492 l2cap_sig_send_rej(conn, cmd->ident);
5493 skb_pull(skb, len > skb->len ? skb->len : len);
5494 continue;
5495 }
5496
5497 err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data);
5498 if (err) {
5499 BT_ERR("Wrong link type (%d)", err);
5500 l2cap_sig_send_rej(conn, cmd->ident);
5501 }
5502
5503 skb_pull(skb, len);
5504 }
5505
5506 if (skb->len > 0) {
5507 BT_DBG("corrupted command");
5508 l2cap_sig_send_rej(conn, 0);
5509 }
5510
5511 drop:
5512 kfree_skb(skb);
5513 }
5514
l2cap_check_fcs(struct l2cap_chan * chan,struct sk_buff * skb)5515 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5516 {
5517 u16 our_fcs, rcv_fcs;
5518 int hdr_size;
5519
5520 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5521 hdr_size = L2CAP_EXT_HDR_SIZE;
5522 else
5523 hdr_size = L2CAP_ENH_HDR_SIZE;
5524
5525 if (chan->fcs == L2CAP_FCS_CRC16) {
5526 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5527 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5528 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5529
5530 if (our_fcs != rcv_fcs)
5531 return -EBADMSG;
5532 }
5533 return 0;
5534 }
5535
l2cap_send_i_or_rr_or_rnr(struct l2cap_chan * chan)5536 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5537 {
5538 struct l2cap_ctrl control;
5539
5540 BT_DBG("chan %p", chan);
5541
5542 memset(&control, 0, sizeof(control));
5543 control.sframe = 1;
5544 control.final = 1;
5545 control.reqseq = chan->buffer_seq;
5546 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5547
5548 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5549 control.super = L2CAP_SUPER_RNR;
5550 l2cap_send_sframe(chan, &control);
5551 }
5552
5553 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5554 chan->unacked_frames > 0)
5555 __set_retrans_timer(chan);
5556
5557 /* Send pending iframes */
5558 l2cap_ertm_send(chan);
5559
5560 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5561 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5562 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5563 * send it now.
5564 */
5565 control.super = L2CAP_SUPER_RR;
5566 l2cap_send_sframe(chan, &control);
5567 }
5568 }
5569
append_skb_frag(struct sk_buff * skb,struct sk_buff * new_frag,struct sk_buff ** last_frag)5570 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5571 struct sk_buff **last_frag)
5572 {
5573 /* skb->len reflects data in skb as well as all fragments
5574 * skb->data_len reflects only data in fragments
5575 */
5576 if (!skb_has_frag_list(skb))
5577 skb_shinfo(skb)->frag_list = new_frag;
5578
5579 new_frag->next = NULL;
5580
5581 (*last_frag)->next = new_frag;
5582 *last_frag = new_frag;
5583
5584 skb->len += new_frag->len;
5585 skb->data_len += new_frag->len;
5586 skb->truesize += new_frag->truesize;
5587 }
5588
l2cap_reassemble_sdu(struct l2cap_chan * chan,struct sk_buff * skb,struct l2cap_ctrl * control)5589 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5590 struct l2cap_ctrl *control)
5591 {
5592 int err = -EINVAL;
5593
5594 switch (control->sar) {
5595 case L2CAP_SAR_UNSEGMENTED:
5596 if (chan->sdu)
5597 break;
5598
5599 err = chan->ops->recv(chan, skb);
5600 break;
5601
5602 case L2CAP_SAR_START:
5603 if (chan->sdu)
5604 break;
5605
5606 if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
5607 break;
5608
5609 chan->sdu_len = get_unaligned_le16(skb->data);
5610 skb_pull(skb, L2CAP_SDULEN_SIZE);
5611
5612 if (chan->sdu_len > chan->imtu) {
5613 err = -EMSGSIZE;
5614 break;
5615 }
5616
5617 if (skb->len >= chan->sdu_len)
5618 break;
5619
5620 chan->sdu = skb;
5621 chan->sdu_last_frag = skb;
5622
5623 skb = NULL;
5624 err = 0;
5625 break;
5626
5627 case L2CAP_SAR_CONTINUE:
5628 if (!chan->sdu)
5629 break;
5630
5631 append_skb_frag(chan->sdu, skb,
5632 &chan->sdu_last_frag);
5633 skb = NULL;
5634
5635 if (chan->sdu->len >= chan->sdu_len)
5636 break;
5637
5638 err = 0;
5639 break;
5640
5641 case L2CAP_SAR_END:
5642 if (!chan->sdu)
5643 break;
5644
5645 append_skb_frag(chan->sdu, skb,
5646 &chan->sdu_last_frag);
5647 skb = NULL;
5648
5649 if (chan->sdu->len != chan->sdu_len)
5650 break;
5651
5652 err = chan->ops->recv(chan, chan->sdu);
5653
5654 if (!err) {
5655 /* Reassembly complete */
5656 chan->sdu = NULL;
5657 chan->sdu_last_frag = NULL;
5658 chan->sdu_len = 0;
5659 }
5660 break;
5661 }
5662
5663 if (err) {
5664 kfree_skb(skb);
5665 kfree_skb(chan->sdu);
5666 chan->sdu = NULL;
5667 chan->sdu_last_frag = NULL;
5668 chan->sdu_len = 0;
5669 }
5670
5671 return err;
5672 }
5673
l2cap_resegment(struct l2cap_chan * chan)5674 static int l2cap_resegment(struct l2cap_chan *chan)
5675 {
5676 /* Placeholder */
5677 return 0;
5678 }
5679
l2cap_chan_busy(struct l2cap_chan * chan,int busy)5680 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5681 {
5682 u8 event;
5683
5684 if (chan->mode != L2CAP_MODE_ERTM)
5685 return;
5686
5687 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5688 l2cap_tx(chan, NULL, NULL, event);
5689 }
5690
l2cap_rx_queued_iframes(struct l2cap_chan * chan)5691 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5692 {
5693 int err = 0;
5694 /* Pass sequential frames to l2cap_reassemble_sdu()
5695 * until a gap is encountered.
5696 */
5697
5698 BT_DBG("chan %p", chan);
5699
5700 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5701 struct sk_buff *skb;
5702 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5703 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5704
5705 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5706
5707 if (!skb)
5708 break;
5709
5710 skb_unlink(skb, &chan->srej_q);
5711 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5712 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
5713 if (err)
5714 break;
5715 }
5716
5717 if (skb_queue_empty(&chan->srej_q)) {
5718 chan->rx_state = L2CAP_RX_STATE_RECV;
5719 l2cap_send_ack(chan);
5720 }
5721
5722 return err;
5723 }
5724
l2cap_handle_srej(struct l2cap_chan * chan,struct l2cap_ctrl * control)5725 static void l2cap_handle_srej(struct l2cap_chan *chan,
5726 struct l2cap_ctrl *control)
5727 {
5728 struct sk_buff *skb;
5729
5730 BT_DBG("chan %p, control %p", chan, control);
5731
5732 if (control->reqseq == chan->next_tx_seq) {
5733 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5734 l2cap_send_disconn_req(chan, ECONNRESET);
5735 return;
5736 }
5737
5738 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5739
5740 if (skb == NULL) {
5741 BT_DBG("Seq %d not available for retransmission",
5742 control->reqseq);
5743 return;
5744 }
5745
5746 if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
5747 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5748 l2cap_send_disconn_req(chan, ECONNRESET);
5749 return;
5750 }
5751
5752 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5753
5754 if (control->poll) {
5755 l2cap_pass_to_tx(chan, control);
5756
5757 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5758 l2cap_retransmit(chan, control);
5759 l2cap_ertm_send(chan);
5760
5761 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5762 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5763 chan->srej_save_reqseq = control->reqseq;
5764 }
5765 } else {
5766 l2cap_pass_to_tx_fbit(chan, control);
5767
5768 if (control->final) {
5769 if (chan->srej_save_reqseq != control->reqseq ||
5770 !test_and_clear_bit(CONN_SREJ_ACT,
5771 &chan->conn_state))
5772 l2cap_retransmit(chan, control);
5773 } else {
5774 l2cap_retransmit(chan, control);
5775 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5776 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5777 chan->srej_save_reqseq = control->reqseq;
5778 }
5779 }
5780 }
5781 }
5782
l2cap_handle_rej(struct l2cap_chan * chan,struct l2cap_ctrl * control)5783 static void l2cap_handle_rej(struct l2cap_chan *chan,
5784 struct l2cap_ctrl *control)
5785 {
5786 struct sk_buff *skb;
5787
5788 BT_DBG("chan %p, control %p", chan, control);
5789
5790 if (control->reqseq == chan->next_tx_seq) {
5791 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5792 l2cap_send_disconn_req(chan, ECONNRESET);
5793 return;
5794 }
5795
5796 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5797
5798 if (chan->max_tx && skb &&
5799 bt_cb(skb)->l2cap.retries >= chan->max_tx) {
5800 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5801 l2cap_send_disconn_req(chan, ECONNRESET);
5802 return;
5803 }
5804
5805 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5806
5807 l2cap_pass_to_tx(chan, control);
5808
5809 if (control->final) {
5810 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5811 l2cap_retransmit_all(chan, control);
5812 } else {
5813 l2cap_retransmit_all(chan, control);
5814 l2cap_ertm_send(chan);
5815 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5816 set_bit(CONN_REJ_ACT, &chan->conn_state);
5817 }
5818 }
5819
l2cap_classify_txseq(struct l2cap_chan * chan,u16 txseq)5820 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5821 {
5822 BT_DBG("chan %p, txseq %d", chan, txseq);
5823
5824 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5825 chan->expected_tx_seq);
5826
5827 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5828 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5829 chan->tx_win) {
5830 /* See notes below regarding "double poll" and
5831 * invalid packets.
5832 */
5833 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5834 BT_DBG("Invalid/Ignore - after SREJ");
5835 return L2CAP_TXSEQ_INVALID_IGNORE;
5836 } else {
5837 BT_DBG("Invalid - in window after SREJ sent");
5838 return L2CAP_TXSEQ_INVALID;
5839 }
5840 }
5841
5842 if (chan->srej_list.head == txseq) {
5843 BT_DBG("Expected SREJ");
5844 return L2CAP_TXSEQ_EXPECTED_SREJ;
5845 }
5846
5847 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
5848 BT_DBG("Duplicate SREJ - txseq already stored");
5849 return L2CAP_TXSEQ_DUPLICATE_SREJ;
5850 }
5851
5852 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
5853 BT_DBG("Unexpected SREJ - not requested");
5854 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
5855 }
5856 }
5857
5858 if (chan->expected_tx_seq == txseq) {
5859 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5860 chan->tx_win) {
5861 BT_DBG("Invalid - txseq outside tx window");
5862 return L2CAP_TXSEQ_INVALID;
5863 } else {
5864 BT_DBG("Expected");
5865 return L2CAP_TXSEQ_EXPECTED;
5866 }
5867 }
5868
5869 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
5870 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
5871 BT_DBG("Duplicate - expected_tx_seq later than txseq");
5872 return L2CAP_TXSEQ_DUPLICATE;
5873 }
5874
5875 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
5876 /* A source of invalid packets is a "double poll" condition,
5877 * where delays cause us to send multiple poll packets. If
5878 * the remote stack receives and processes both polls,
5879 * sequence numbers can wrap around in such a way that a
5880 * resent frame has a sequence number that looks like new data
5881 * with a sequence gap. This would trigger an erroneous SREJ
5882 * request.
5883 *
5884 * Fortunately, this is impossible with a tx window that's
5885 * less than half of the maximum sequence number, which allows
5886 * invalid frames to be safely ignored.
5887 *
5888 * With tx window sizes greater than half of the tx window
5889 * maximum, the frame is invalid and cannot be ignored. This
5890 * causes a disconnect.
5891 */
5892
5893 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5894 BT_DBG("Invalid/Ignore - txseq outside tx window");
5895 return L2CAP_TXSEQ_INVALID_IGNORE;
5896 } else {
5897 BT_DBG("Invalid - txseq outside tx window");
5898 return L2CAP_TXSEQ_INVALID;
5899 }
5900 } else {
5901 BT_DBG("Unexpected - txseq indicates missing frames");
5902 return L2CAP_TXSEQ_UNEXPECTED;
5903 }
5904 }
5905
l2cap_rx_state_recv(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)5906 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
5907 struct l2cap_ctrl *control,
5908 struct sk_buff *skb, u8 event)
5909 {
5910 struct l2cap_ctrl local_control;
5911 int err = 0;
5912 bool skb_in_use = false;
5913
5914 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5915 event);
5916
5917 switch (event) {
5918 case L2CAP_EV_RECV_IFRAME:
5919 switch (l2cap_classify_txseq(chan, control->txseq)) {
5920 case L2CAP_TXSEQ_EXPECTED:
5921 l2cap_pass_to_tx(chan, control);
5922
5923 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5924 BT_DBG("Busy, discarding expected seq %d",
5925 control->txseq);
5926 break;
5927 }
5928
5929 chan->expected_tx_seq = __next_seq(chan,
5930 control->txseq);
5931
5932 chan->buffer_seq = chan->expected_tx_seq;
5933 skb_in_use = true;
5934
5935 /* l2cap_reassemble_sdu may free skb, hence invalidate
5936 * control, so make a copy in advance to use it after
5937 * l2cap_reassemble_sdu returns and to avoid the race
5938 * condition, for example:
5939 *
5940 * The current thread calls:
5941 * l2cap_reassemble_sdu
5942 * chan->ops->recv == l2cap_sock_recv_cb
5943 * __sock_queue_rcv_skb
5944 * Another thread calls:
5945 * bt_sock_recvmsg
5946 * skb_recv_datagram
5947 * skb_free_datagram
5948 * Then the current thread tries to access control, but
5949 * it was freed by skb_free_datagram.
5950 */
5951 local_control = *control;
5952 err = l2cap_reassemble_sdu(chan, skb, control);
5953 if (err)
5954 break;
5955
5956 if (local_control.final) {
5957 if (!test_and_clear_bit(CONN_REJ_ACT,
5958 &chan->conn_state)) {
5959 local_control.final = 0;
5960 l2cap_retransmit_all(chan, &local_control);
5961 l2cap_ertm_send(chan);
5962 }
5963 }
5964
5965 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
5966 l2cap_send_ack(chan);
5967 break;
5968 case L2CAP_TXSEQ_UNEXPECTED:
5969 l2cap_pass_to_tx(chan, control);
5970
5971 /* Can't issue SREJ frames in the local busy state.
5972 * Drop this frame, it will be seen as missing
5973 * when local busy is exited.
5974 */
5975 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5976 BT_DBG("Busy, discarding unexpected seq %d",
5977 control->txseq);
5978 break;
5979 }
5980
5981 /* There was a gap in the sequence, so an SREJ
5982 * must be sent for each missing frame. The
5983 * current frame is stored for later use.
5984 */
5985 skb_queue_tail(&chan->srej_q, skb);
5986 skb_in_use = true;
5987 BT_DBG("Queued %p (queue len %d)", skb,
5988 skb_queue_len(&chan->srej_q));
5989
5990 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
5991 l2cap_seq_list_clear(&chan->srej_list);
5992 l2cap_send_srej(chan, control->txseq);
5993
5994 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
5995 break;
5996 case L2CAP_TXSEQ_DUPLICATE:
5997 l2cap_pass_to_tx(chan, control);
5998 break;
5999 case L2CAP_TXSEQ_INVALID_IGNORE:
6000 break;
6001 case L2CAP_TXSEQ_INVALID:
6002 default:
6003 l2cap_send_disconn_req(chan, ECONNRESET);
6004 break;
6005 }
6006 break;
6007 case L2CAP_EV_RECV_RR:
6008 l2cap_pass_to_tx(chan, control);
6009 if (control->final) {
6010 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6011
6012 if (!test_and_clear_bit(CONN_REJ_ACT,
6013 &chan->conn_state)) {
6014 control->final = 0;
6015 l2cap_retransmit_all(chan, control);
6016 }
6017
6018 l2cap_ertm_send(chan);
6019 } else if (control->poll) {
6020 l2cap_send_i_or_rr_or_rnr(chan);
6021 } else {
6022 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6023 &chan->conn_state) &&
6024 chan->unacked_frames)
6025 __set_retrans_timer(chan);
6026
6027 l2cap_ertm_send(chan);
6028 }
6029 break;
6030 case L2CAP_EV_RECV_RNR:
6031 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6032 l2cap_pass_to_tx(chan, control);
6033 if (control && control->poll) {
6034 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6035 l2cap_send_rr_or_rnr(chan, 0);
6036 }
6037 __clear_retrans_timer(chan);
6038 l2cap_seq_list_clear(&chan->retrans_list);
6039 break;
6040 case L2CAP_EV_RECV_REJ:
6041 l2cap_handle_rej(chan, control);
6042 break;
6043 case L2CAP_EV_RECV_SREJ:
6044 l2cap_handle_srej(chan, control);
6045 break;
6046 default:
6047 break;
6048 }
6049
6050 if (skb && !skb_in_use) {
6051 BT_DBG("Freeing %p", skb);
6052 kfree_skb(skb);
6053 }
6054
6055 return err;
6056 }
6057
l2cap_rx_state_srej_sent(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6058 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6059 struct l2cap_ctrl *control,
6060 struct sk_buff *skb, u8 event)
6061 {
6062 int err = 0;
6063 u16 txseq = control->txseq;
6064 bool skb_in_use = false;
6065
6066 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6067 event);
6068
6069 switch (event) {
6070 case L2CAP_EV_RECV_IFRAME:
6071 switch (l2cap_classify_txseq(chan, txseq)) {
6072 case L2CAP_TXSEQ_EXPECTED:
6073 /* Keep frame for reassembly later */
6074 l2cap_pass_to_tx(chan, control);
6075 skb_queue_tail(&chan->srej_q, skb);
6076 skb_in_use = true;
6077 BT_DBG("Queued %p (queue len %d)", skb,
6078 skb_queue_len(&chan->srej_q));
6079
6080 chan->expected_tx_seq = __next_seq(chan, txseq);
6081 break;
6082 case L2CAP_TXSEQ_EXPECTED_SREJ:
6083 l2cap_seq_list_pop(&chan->srej_list);
6084
6085 l2cap_pass_to_tx(chan, control);
6086 skb_queue_tail(&chan->srej_q, skb);
6087 skb_in_use = true;
6088 BT_DBG("Queued %p (queue len %d)", skb,
6089 skb_queue_len(&chan->srej_q));
6090
6091 err = l2cap_rx_queued_iframes(chan);
6092 if (err)
6093 break;
6094
6095 break;
6096 case L2CAP_TXSEQ_UNEXPECTED:
6097 /* Got a frame that can't be reassembled yet.
6098 * Save it for later, and send SREJs to cover
6099 * the missing frames.
6100 */
6101 skb_queue_tail(&chan->srej_q, skb);
6102 skb_in_use = true;
6103 BT_DBG("Queued %p (queue len %d)", skb,
6104 skb_queue_len(&chan->srej_q));
6105
6106 l2cap_pass_to_tx(chan, control);
6107 l2cap_send_srej(chan, control->txseq);
6108 break;
6109 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6110 /* This frame was requested with an SREJ, but
6111 * some expected retransmitted frames are
6112 * missing. Request retransmission of missing
6113 * SREJ'd frames.
6114 */
6115 skb_queue_tail(&chan->srej_q, skb);
6116 skb_in_use = true;
6117 BT_DBG("Queued %p (queue len %d)", skb,
6118 skb_queue_len(&chan->srej_q));
6119
6120 l2cap_pass_to_tx(chan, control);
6121 l2cap_send_srej_list(chan, control->txseq);
6122 break;
6123 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6124 /* We've already queued this frame. Drop this copy. */
6125 l2cap_pass_to_tx(chan, control);
6126 break;
6127 case L2CAP_TXSEQ_DUPLICATE:
6128 /* Expecting a later sequence number, so this frame
6129 * was already received. Ignore it completely.
6130 */
6131 break;
6132 case L2CAP_TXSEQ_INVALID_IGNORE:
6133 break;
6134 case L2CAP_TXSEQ_INVALID:
6135 default:
6136 l2cap_send_disconn_req(chan, ECONNRESET);
6137 break;
6138 }
6139 break;
6140 case L2CAP_EV_RECV_RR:
6141 l2cap_pass_to_tx(chan, control);
6142 if (control->final) {
6143 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6144
6145 if (!test_and_clear_bit(CONN_REJ_ACT,
6146 &chan->conn_state)) {
6147 control->final = 0;
6148 l2cap_retransmit_all(chan, control);
6149 }
6150
6151 l2cap_ertm_send(chan);
6152 } else if (control->poll) {
6153 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6154 &chan->conn_state) &&
6155 chan->unacked_frames) {
6156 __set_retrans_timer(chan);
6157 }
6158
6159 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6160 l2cap_send_srej_tail(chan);
6161 } else {
6162 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6163 &chan->conn_state) &&
6164 chan->unacked_frames)
6165 __set_retrans_timer(chan);
6166
6167 l2cap_send_ack(chan);
6168 }
6169 break;
6170 case L2CAP_EV_RECV_RNR:
6171 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6172 l2cap_pass_to_tx(chan, control);
6173 if (control->poll) {
6174 l2cap_send_srej_tail(chan);
6175 } else {
6176 struct l2cap_ctrl rr_control;
6177 memset(&rr_control, 0, sizeof(rr_control));
6178 rr_control.sframe = 1;
6179 rr_control.super = L2CAP_SUPER_RR;
6180 rr_control.reqseq = chan->buffer_seq;
6181 l2cap_send_sframe(chan, &rr_control);
6182 }
6183
6184 break;
6185 case L2CAP_EV_RECV_REJ:
6186 l2cap_handle_rej(chan, control);
6187 break;
6188 case L2CAP_EV_RECV_SREJ:
6189 l2cap_handle_srej(chan, control);
6190 break;
6191 }
6192
6193 if (skb && !skb_in_use) {
6194 BT_DBG("Freeing %p", skb);
6195 kfree_skb(skb);
6196 }
6197
6198 return err;
6199 }
6200
l2cap_finish_move(struct l2cap_chan * chan)6201 static int l2cap_finish_move(struct l2cap_chan *chan)
6202 {
6203 BT_DBG("chan %p", chan);
6204
6205 chan->rx_state = L2CAP_RX_STATE_RECV;
6206 chan->conn->mtu = chan->conn->hcon->mtu;
6207
6208 return l2cap_resegment(chan);
6209 }
6210
l2cap_rx_state_wait_p(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6211 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6212 struct l2cap_ctrl *control,
6213 struct sk_buff *skb, u8 event)
6214 {
6215 int err;
6216
6217 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6218 event);
6219
6220 if (!control->poll)
6221 return -EPROTO;
6222
6223 l2cap_process_reqseq(chan, control->reqseq);
6224
6225 if (!skb_queue_empty(&chan->tx_q))
6226 chan->tx_send_head = skb_peek(&chan->tx_q);
6227 else
6228 chan->tx_send_head = NULL;
6229
6230 /* Rewind next_tx_seq to the point expected
6231 * by the receiver.
6232 */
6233 chan->next_tx_seq = control->reqseq;
6234 chan->unacked_frames = 0;
6235
6236 err = l2cap_finish_move(chan);
6237 if (err)
6238 return err;
6239
6240 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6241 l2cap_send_i_or_rr_or_rnr(chan);
6242
6243 if (event == L2CAP_EV_RECV_IFRAME)
6244 return -EPROTO;
6245
6246 return l2cap_rx_state_recv(chan, control, NULL, event);
6247 }
6248
l2cap_rx_state_wait_f(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6249 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6250 struct l2cap_ctrl *control,
6251 struct sk_buff *skb, u8 event)
6252 {
6253 int err;
6254
6255 if (!control->final)
6256 return -EPROTO;
6257
6258 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6259
6260 chan->rx_state = L2CAP_RX_STATE_RECV;
6261 l2cap_process_reqseq(chan, control->reqseq);
6262
6263 if (!skb_queue_empty(&chan->tx_q))
6264 chan->tx_send_head = skb_peek(&chan->tx_q);
6265 else
6266 chan->tx_send_head = NULL;
6267
6268 /* Rewind next_tx_seq to the point expected
6269 * by the receiver.
6270 */
6271 chan->next_tx_seq = control->reqseq;
6272 chan->unacked_frames = 0;
6273 chan->conn->mtu = chan->conn->hcon->mtu;
6274
6275 err = l2cap_resegment(chan);
6276
6277 if (!err)
6278 err = l2cap_rx_state_recv(chan, control, skb, event);
6279
6280 return err;
6281 }
6282
__valid_reqseq(struct l2cap_chan * chan,u16 reqseq)6283 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6284 {
6285 /* Make sure reqseq is for a packet that has been sent but not acked */
6286 u16 unacked;
6287
6288 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6289 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6290 }
6291
l2cap_rx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6292 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6293 struct sk_buff *skb, u8 event)
6294 {
6295 int err = 0;
6296
6297 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6298 control, skb, event, chan->rx_state);
6299
6300 if (__valid_reqseq(chan, control->reqseq)) {
6301 switch (chan->rx_state) {
6302 case L2CAP_RX_STATE_RECV:
6303 err = l2cap_rx_state_recv(chan, control, skb, event);
6304 break;
6305 case L2CAP_RX_STATE_SREJ_SENT:
6306 err = l2cap_rx_state_srej_sent(chan, control, skb,
6307 event);
6308 break;
6309 case L2CAP_RX_STATE_WAIT_P:
6310 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6311 break;
6312 case L2CAP_RX_STATE_WAIT_F:
6313 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6314 break;
6315 default:
6316 /* shut it down */
6317 break;
6318 }
6319 } else {
6320 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6321 control->reqseq, chan->next_tx_seq,
6322 chan->expected_ack_seq);
6323 l2cap_send_disconn_req(chan, ECONNRESET);
6324 }
6325
6326 return err;
6327 }
6328
l2cap_stream_rx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb)6329 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6330 struct sk_buff *skb)
6331 {
6332 /* l2cap_reassemble_sdu may free skb, hence invalidate control, so store
6333 * the txseq field in advance to use it after l2cap_reassemble_sdu
6334 * returns and to avoid the race condition, for example:
6335 *
6336 * The current thread calls:
6337 * l2cap_reassemble_sdu
6338 * chan->ops->recv == l2cap_sock_recv_cb
6339 * __sock_queue_rcv_skb
6340 * Another thread calls:
6341 * bt_sock_recvmsg
6342 * skb_recv_datagram
6343 * skb_free_datagram
6344 * Then the current thread tries to access control, but it was freed by
6345 * skb_free_datagram.
6346 */
6347 u16 txseq = control->txseq;
6348
6349 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6350 chan->rx_state);
6351
6352 if (l2cap_classify_txseq(chan, txseq) == L2CAP_TXSEQ_EXPECTED) {
6353 l2cap_pass_to_tx(chan, control);
6354
6355 BT_DBG("buffer_seq %u->%u", chan->buffer_seq,
6356 __next_seq(chan, chan->buffer_seq));
6357
6358 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6359
6360 l2cap_reassemble_sdu(chan, skb, control);
6361 } else {
6362 if (chan->sdu) {
6363 kfree_skb(chan->sdu);
6364 chan->sdu = NULL;
6365 }
6366 chan->sdu_last_frag = NULL;
6367 chan->sdu_len = 0;
6368
6369 if (skb) {
6370 BT_DBG("Freeing %p", skb);
6371 kfree_skb(skb);
6372 }
6373 }
6374
6375 chan->last_acked_seq = txseq;
6376 chan->expected_tx_seq = __next_seq(chan, txseq);
6377
6378 return 0;
6379 }
6380
l2cap_data_rcv(struct l2cap_chan * chan,struct sk_buff * skb)6381 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6382 {
6383 struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
6384 u16 len;
6385 u8 event;
6386
6387 __unpack_control(chan, skb);
6388
6389 len = skb->len;
6390
6391 /*
6392 * We can just drop the corrupted I-frame here.
6393 * Receiver will miss it and start proper recovery
6394 * procedures and ask for retransmission.
6395 */
6396 if (l2cap_check_fcs(chan, skb))
6397 goto drop;
6398
6399 if (!control->sframe && control->sar == L2CAP_SAR_START)
6400 len -= L2CAP_SDULEN_SIZE;
6401
6402 if (chan->fcs == L2CAP_FCS_CRC16)
6403 len -= L2CAP_FCS_SIZE;
6404
6405 if (len > chan->mps) {
6406 l2cap_send_disconn_req(chan, ECONNRESET);
6407 goto drop;
6408 }
6409
6410 if (chan->ops->filter) {
6411 if (chan->ops->filter(chan, skb))
6412 goto drop;
6413 }
6414
6415 if (!control->sframe) {
6416 int err;
6417
6418 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6419 control->sar, control->reqseq, control->final,
6420 control->txseq);
6421
6422 /* Validate F-bit - F=0 always valid, F=1 only
6423 * valid in TX WAIT_F
6424 */
6425 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6426 goto drop;
6427
6428 if (chan->mode != L2CAP_MODE_STREAMING) {
6429 event = L2CAP_EV_RECV_IFRAME;
6430 err = l2cap_rx(chan, control, skb, event);
6431 } else {
6432 err = l2cap_stream_rx(chan, control, skb);
6433 }
6434
6435 if (err)
6436 l2cap_send_disconn_req(chan, ECONNRESET);
6437 } else {
6438 const u8 rx_func_to_event[4] = {
6439 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6440 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6441 };
6442
6443 /* Only I-frames are expected in streaming mode */
6444 if (chan->mode == L2CAP_MODE_STREAMING)
6445 goto drop;
6446
6447 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6448 control->reqseq, control->final, control->poll,
6449 control->super);
6450
6451 if (len != 0) {
6452 BT_ERR("Trailing bytes: %d in sframe", len);
6453 l2cap_send_disconn_req(chan, ECONNRESET);
6454 goto drop;
6455 }
6456
6457 /* Validate F and P bits */
6458 if (control->final && (control->poll ||
6459 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6460 goto drop;
6461
6462 event = rx_func_to_event[control->super];
6463 if (l2cap_rx(chan, control, skb, event))
6464 l2cap_send_disconn_req(chan, ECONNRESET);
6465 }
6466
6467 return 0;
6468
6469 drop:
6470 kfree_skb(skb);
6471 return 0;
6472 }
6473
l2cap_chan_le_send_credits(struct l2cap_chan * chan)6474 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6475 {
6476 struct l2cap_conn *conn = chan->conn;
6477 struct l2cap_le_credits pkt;
6478 u16 return_credits = l2cap_le_rx_credits(chan);
6479
6480 if (chan->rx_credits >= return_credits)
6481 return;
6482
6483 return_credits -= chan->rx_credits;
6484
6485 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6486
6487 chan->rx_credits += return_credits;
6488
6489 pkt.cid = cpu_to_le16(chan->scid);
6490 pkt.credits = cpu_to_le16(return_credits);
6491
6492 chan->ident = l2cap_get_ident(conn);
6493
6494 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6495 }
6496
l2cap_chan_rx_avail(struct l2cap_chan * chan,ssize_t rx_avail)6497 void l2cap_chan_rx_avail(struct l2cap_chan *chan, ssize_t rx_avail)
6498 {
6499 if (chan->rx_avail == rx_avail)
6500 return;
6501
6502 BT_DBG("chan %p has %zd bytes avail for rx", chan, rx_avail);
6503
6504 chan->rx_avail = rx_avail;
6505
6506 if (chan->state == BT_CONNECTED)
6507 l2cap_chan_le_send_credits(chan);
6508 }
6509
l2cap_ecred_recv(struct l2cap_chan * chan,struct sk_buff * skb)6510 static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
6511 {
6512 int err;
6513
6514 BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
6515
6516 /* Wait recv to confirm reception before updating the credits */
6517 err = chan->ops->recv(chan, skb);
6518
6519 if (err < 0 && chan->rx_avail != -1) {
6520 BT_ERR("Queueing received LE L2CAP data failed");
6521 l2cap_send_disconn_req(chan, ECONNRESET);
6522 return err;
6523 }
6524
6525 /* Update credits whenever an SDU is received */
6526 l2cap_chan_le_send_credits(chan);
6527
6528 return err;
6529 }
6530
l2cap_ecred_data_rcv(struct l2cap_chan * chan,struct sk_buff * skb)6531 static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6532 {
6533 int err;
6534
6535 if (!chan->rx_credits) {
6536 BT_ERR("No credits to receive LE L2CAP data");
6537 l2cap_send_disconn_req(chan, ECONNRESET);
6538 return -ENOBUFS;
6539 }
6540
6541 if (chan->imtu < skb->len) {
6542 BT_ERR("Too big LE L2CAP PDU");
6543 return -ENOBUFS;
6544 }
6545
6546 chan->rx_credits--;
6547 BT_DBG("chan %p: rx_credits %u -> %u",
6548 chan, chan->rx_credits + 1, chan->rx_credits);
6549
6550 /* Update if remote had run out of credits, this should only happens
6551 * if the remote is not using the entire MPS.
6552 */
6553 if (!chan->rx_credits)
6554 l2cap_chan_le_send_credits(chan);
6555
6556 err = 0;
6557
6558 if (!chan->sdu) {
6559 u16 sdu_len;
6560
6561 sdu_len = get_unaligned_le16(skb->data);
6562 skb_pull(skb, L2CAP_SDULEN_SIZE);
6563
6564 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6565 sdu_len, skb->len, chan->imtu);
6566
6567 if (sdu_len > chan->imtu) {
6568 BT_ERR("Too big LE L2CAP SDU length received");
6569 err = -EMSGSIZE;
6570 goto failed;
6571 }
6572
6573 if (skb->len > sdu_len) {
6574 BT_ERR("Too much LE L2CAP data received");
6575 err = -EINVAL;
6576 goto failed;
6577 }
6578
6579 if (skb->len == sdu_len)
6580 return l2cap_ecred_recv(chan, skb);
6581
6582 chan->sdu = skb;
6583 chan->sdu_len = sdu_len;
6584 chan->sdu_last_frag = skb;
6585
6586 /* Detect if remote is not able to use the selected MPS */
6587 if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
6588 u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
6589
6590 /* Adjust the number of credits */
6591 BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
6592 chan->mps = mps_len;
6593 l2cap_chan_le_send_credits(chan);
6594 }
6595
6596 return 0;
6597 }
6598
6599 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6600 chan->sdu->len, skb->len, chan->sdu_len);
6601
6602 if (chan->sdu->len + skb->len > chan->sdu_len) {
6603 BT_ERR("Too much LE L2CAP data received");
6604 err = -EINVAL;
6605 goto failed;
6606 }
6607
6608 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6609 skb = NULL;
6610
6611 if (chan->sdu->len == chan->sdu_len) {
6612 err = l2cap_ecred_recv(chan, chan->sdu);
6613 if (!err) {
6614 chan->sdu = NULL;
6615 chan->sdu_last_frag = NULL;
6616 chan->sdu_len = 0;
6617 }
6618 }
6619
6620 failed:
6621 if (err) {
6622 kfree_skb(skb);
6623 kfree_skb(chan->sdu);
6624 chan->sdu = NULL;
6625 chan->sdu_last_frag = NULL;
6626 chan->sdu_len = 0;
6627 }
6628
6629 /* We can't return an error here since we took care of the skb
6630 * freeing internally. An error return would cause the caller to
6631 * do a double-free of the skb.
6632 */
6633 return 0;
6634 }
6635
l2cap_data_channel(struct l2cap_conn * conn,u16 cid,struct sk_buff * skb)6636 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6637 struct sk_buff *skb)
6638 {
6639 struct l2cap_chan *chan;
6640
6641 chan = l2cap_get_chan_by_scid(conn, cid);
6642 if (!chan) {
6643 BT_DBG("unknown cid 0x%4.4x", cid);
6644 /* Drop packet and return */
6645 kfree_skb(skb);
6646 return;
6647 }
6648
6649 BT_DBG("chan %p, len %d", chan, skb->len);
6650
6651 /* If we receive data on a fixed channel before the info req/rsp
6652 * procedure is done simply assume that the channel is supported
6653 * and mark it as ready.
6654 */
6655 if (chan->chan_type == L2CAP_CHAN_FIXED)
6656 l2cap_chan_ready(chan);
6657
6658 if (chan->state != BT_CONNECTED)
6659 goto drop;
6660
6661 switch (chan->mode) {
6662 case L2CAP_MODE_LE_FLOWCTL:
6663 case L2CAP_MODE_EXT_FLOWCTL:
6664 if (l2cap_ecred_data_rcv(chan, skb) < 0)
6665 goto drop;
6666
6667 goto done;
6668
6669 case L2CAP_MODE_BASIC:
6670 /* If socket recv buffers overflows we drop data here
6671 * which is *bad* because L2CAP has to be reliable.
6672 * But we don't have any other choice. L2CAP doesn't
6673 * provide flow control mechanism. */
6674
6675 if (chan->imtu < skb->len) {
6676 BT_ERR("Dropping L2CAP data: receive buffer overflow");
6677 goto drop;
6678 }
6679
6680 if (!chan->ops->recv(chan, skb))
6681 goto done;
6682 break;
6683
6684 case L2CAP_MODE_ERTM:
6685 case L2CAP_MODE_STREAMING:
6686 l2cap_data_rcv(chan, skb);
6687 goto done;
6688
6689 default:
6690 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6691 break;
6692 }
6693
6694 drop:
6695 kfree_skb(skb);
6696
6697 done:
6698 l2cap_chan_unlock(chan);
6699 l2cap_chan_put(chan);
6700 }
6701
l2cap_conless_channel(struct l2cap_conn * conn,__le16 psm,struct sk_buff * skb)6702 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6703 struct sk_buff *skb)
6704 {
6705 struct hci_conn *hcon = conn->hcon;
6706 struct l2cap_chan *chan;
6707
6708 if (hcon->type != ACL_LINK)
6709 goto free_skb;
6710
6711 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6712 ACL_LINK);
6713 if (!chan)
6714 goto free_skb;
6715
6716 BT_DBG("chan %p, len %d", chan, skb->len);
6717
6718 l2cap_chan_lock(chan);
6719
6720 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6721 goto drop;
6722
6723 if (chan->imtu < skb->len)
6724 goto drop;
6725
6726 /* Store remote BD_ADDR and PSM for msg_name */
6727 bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
6728 bt_cb(skb)->l2cap.psm = psm;
6729
6730 if (!chan->ops->recv(chan, skb)) {
6731 l2cap_chan_unlock(chan);
6732 l2cap_chan_put(chan);
6733 return;
6734 }
6735
6736 drop:
6737 l2cap_chan_unlock(chan);
6738 l2cap_chan_put(chan);
6739 free_skb:
6740 kfree_skb(skb);
6741 }
6742
l2cap_recv_frame(struct l2cap_conn * conn,struct sk_buff * skb)6743 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6744 {
6745 struct l2cap_hdr *lh = (void *) skb->data;
6746 struct hci_conn *hcon = conn->hcon;
6747 u16 cid, len;
6748 __le16 psm;
6749
6750 if (hcon->state != BT_CONNECTED) {
6751 BT_DBG("queueing pending rx skb");
6752 skb_queue_tail(&conn->pending_rx, skb);
6753 return;
6754 }
6755
6756 skb_pull(skb, L2CAP_HDR_SIZE);
6757 cid = __le16_to_cpu(lh->cid);
6758 len = __le16_to_cpu(lh->len);
6759
6760 if (len != skb->len) {
6761 kfree_skb(skb);
6762 return;
6763 }
6764
6765 /* Since we can't actively block incoming LE connections we must
6766 * at least ensure that we ignore incoming data from them.
6767 */
6768 if (hcon->type == LE_LINK &&
6769 hci_bdaddr_list_lookup(&hcon->hdev->reject_list, &hcon->dst,
6770 bdaddr_dst_type(hcon))) {
6771 kfree_skb(skb);
6772 return;
6773 }
6774
6775 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6776
6777 switch (cid) {
6778 case L2CAP_CID_SIGNALING:
6779 l2cap_sig_channel(conn, skb);
6780 break;
6781
6782 case L2CAP_CID_CONN_LESS:
6783 psm = get_unaligned((__le16 *) skb->data);
6784 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6785 l2cap_conless_channel(conn, psm, skb);
6786 break;
6787
6788 case L2CAP_CID_LE_SIGNALING:
6789 l2cap_le_sig_channel(conn, skb);
6790 break;
6791
6792 default:
6793 l2cap_data_channel(conn, cid, skb);
6794 break;
6795 }
6796 }
6797
process_pending_rx(struct work_struct * work)6798 static void process_pending_rx(struct work_struct *work)
6799 {
6800 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
6801 pending_rx_work);
6802 struct sk_buff *skb;
6803
6804 BT_DBG("");
6805
6806 mutex_lock(&conn->lock);
6807
6808 while ((skb = skb_dequeue(&conn->pending_rx)))
6809 l2cap_recv_frame(conn, skb);
6810
6811 mutex_unlock(&conn->lock);
6812 }
6813
l2cap_conn_add(struct hci_conn * hcon)6814 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
6815 {
6816 struct l2cap_conn *conn = hcon->l2cap_data;
6817 struct hci_chan *hchan;
6818
6819 if (conn)
6820 return conn;
6821
6822 hchan = hci_chan_create(hcon);
6823 if (!hchan)
6824 return NULL;
6825
6826 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
6827 if (!conn) {
6828 hci_chan_del(hchan);
6829 return NULL;
6830 }
6831
6832 kref_init(&conn->ref);
6833 hcon->l2cap_data = conn;
6834 conn->hcon = hci_conn_get(hcon);
6835 conn->hchan = hchan;
6836
6837 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
6838
6839 conn->mtu = hcon->mtu;
6840 conn->feat_mask = 0;
6841
6842 conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
6843
6844 if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
6845 (bredr_sc_enabled(hcon->hdev) ||
6846 hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
6847 conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
6848
6849 mutex_init(&conn->ident_lock);
6850 mutex_init(&conn->lock);
6851
6852 INIT_LIST_HEAD(&conn->chan_l);
6853 INIT_LIST_HEAD(&conn->users);
6854
6855 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
6856
6857 skb_queue_head_init(&conn->pending_rx);
6858 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
6859 INIT_DELAYED_WORK(&conn->id_addr_timer, l2cap_conn_update_id_addr);
6860
6861 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
6862
6863 return conn;
6864 }
6865
is_valid_psm(u16 psm,u8 dst_type)6866 static bool is_valid_psm(u16 psm, u8 dst_type)
6867 {
6868 if (!psm)
6869 return false;
6870
6871 if (bdaddr_type_is_le(dst_type))
6872 return (psm <= 0x00ff);
6873
6874 /* PSM must be odd and lsb of upper byte must be 0 */
6875 return ((psm & 0x0101) == 0x0001);
6876 }
6877
6878 struct l2cap_chan_data {
6879 struct l2cap_chan *chan;
6880 struct pid *pid;
6881 int count;
6882 };
6883
l2cap_chan_by_pid(struct l2cap_chan * chan,void * data)6884 static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data)
6885 {
6886 struct l2cap_chan_data *d = data;
6887 struct pid *pid;
6888
6889 if (chan == d->chan)
6890 return;
6891
6892 if (!test_bit(FLAG_DEFER_SETUP, &chan->flags))
6893 return;
6894
6895 pid = chan->ops->get_peer_pid(chan);
6896
6897 /* Only count deferred channels with the same PID/PSM */
6898 if (d->pid != pid || chan->psm != d->chan->psm || chan->ident ||
6899 chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
6900 return;
6901
6902 d->count++;
6903 }
6904
l2cap_chan_connect(struct l2cap_chan * chan,__le16 psm,u16 cid,bdaddr_t * dst,u8 dst_type,u16 timeout)6905 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
6906 bdaddr_t *dst, u8 dst_type, u16 timeout)
6907 {
6908 struct l2cap_conn *conn;
6909 struct hci_conn *hcon;
6910 struct hci_dev *hdev;
6911 int err;
6912
6913 BT_DBG("%pMR -> %pMR (type %u) psm 0x%4.4x mode 0x%2.2x", &chan->src,
6914 dst, dst_type, __le16_to_cpu(psm), chan->mode);
6915
6916 hdev = hci_get_route(dst, &chan->src, chan->src_type);
6917 if (!hdev)
6918 return -EHOSTUNREACH;
6919
6920 hci_dev_lock(hdev);
6921
6922 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
6923 chan->chan_type != L2CAP_CHAN_RAW) {
6924 err = -EINVAL;
6925 goto done;
6926 }
6927
6928 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
6929 err = -EINVAL;
6930 goto done;
6931 }
6932
6933 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
6934 err = -EINVAL;
6935 goto done;
6936 }
6937
6938 switch (chan->mode) {
6939 case L2CAP_MODE_BASIC:
6940 break;
6941 case L2CAP_MODE_LE_FLOWCTL:
6942 break;
6943 case L2CAP_MODE_EXT_FLOWCTL:
6944 if (!enable_ecred) {
6945 err = -EOPNOTSUPP;
6946 goto done;
6947 }
6948 break;
6949 case L2CAP_MODE_ERTM:
6950 case L2CAP_MODE_STREAMING:
6951 if (!disable_ertm)
6952 break;
6953 fallthrough;
6954 default:
6955 err = -EOPNOTSUPP;
6956 goto done;
6957 }
6958
6959 switch (chan->state) {
6960 case BT_CONNECT:
6961 case BT_CONNECT2:
6962 case BT_CONFIG:
6963 /* Already connecting */
6964 err = 0;
6965 goto done;
6966
6967 case BT_CONNECTED:
6968 /* Already connected */
6969 err = -EISCONN;
6970 goto done;
6971
6972 case BT_OPEN:
6973 case BT_BOUND:
6974 /* Can connect */
6975 break;
6976
6977 default:
6978 err = -EBADFD;
6979 goto done;
6980 }
6981
6982 /* Set destination address and psm */
6983 bacpy(&chan->dst, dst);
6984 chan->dst_type = dst_type;
6985
6986 chan->psm = psm;
6987 chan->dcid = cid;
6988
6989 if (bdaddr_type_is_le(dst_type)) {
6990 /* Convert from L2CAP channel address type to HCI address type
6991 */
6992 if (dst_type == BDADDR_LE_PUBLIC)
6993 dst_type = ADDR_LE_DEV_PUBLIC;
6994 else
6995 dst_type = ADDR_LE_DEV_RANDOM;
6996
6997 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
6998 hcon = hci_connect_le(hdev, dst, dst_type, false,
6999 chan->sec_level, timeout,
7000 HCI_ROLE_SLAVE, 0, 0);
7001 else
7002 hcon = hci_connect_le_scan(hdev, dst, dst_type,
7003 chan->sec_level, timeout,
7004 CONN_REASON_L2CAP_CHAN);
7005
7006 } else {
7007 u8 auth_type = l2cap_get_auth_type(chan);
7008 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type,
7009 CONN_REASON_L2CAP_CHAN, timeout);
7010 }
7011
7012 if (IS_ERR(hcon)) {
7013 err = PTR_ERR(hcon);
7014 goto done;
7015 }
7016
7017 conn = l2cap_conn_add(hcon);
7018 if (!conn) {
7019 hci_conn_drop(hcon);
7020 err = -ENOMEM;
7021 goto done;
7022 }
7023
7024 if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) {
7025 struct l2cap_chan_data data;
7026
7027 data.chan = chan;
7028 data.pid = chan->ops->get_peer_pid(chan);
7029 data.count = 1;
7030
7031 l2cap_chan_list(conn, l2cap_chan_by_pid, &data);
7032
7033 /* Check if there isn't too many channels being connected */
7034 if (data.count > L2CAP_ECRED_CONN_SCID_MAX) {
7035 hci_conn_drop(hcon);
7036 err = -EPROTO;
7037 goto done;
7038 }
7039 }
7040
7041 mutex_lock(&conn->lock);
7042 l2cap_chan_lock(chan);
7043
7044 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7045 hci_conn_drop(hcon);
7046 err = -EBUSY;
7047 goto chan_unlock;
7048 }
7049
7050 /* Update source addr of the socket */
7051 bacpy(&chan->src, &hcon->src);
7052 chan->src_type = bdaddr_src_type(hcon);
7053
7054 __l2cap_chan_add(conn, chan);
7055
7056 /* l2cap_chan_add takes its own ref so we can drop this one */
7057 hci_conn_drop(hcon);
7058
7059 l2cap_state_change(chan, BT_CONNECT);
7060 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7061
7062 /* Release chan->sport so that it can be reused by other
7063 * sockets (as it's only used for listening sockets).
7064 */
7065 write_lock(&chan_list_lock);
7066 chan->sport = 0;
7067 write_unlock(&chan_list_lock);
7068
7069 if (hcon->state == BT_CONNECTED) {
7070 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7071 __clear_chan_timer(chan);
7072 if (l2cap_chan_check_security(chan, true))
7073 l2cap_state_change(chan, BT_CONNECTED);
7074 } else
7075 l2cap_do_start(chan);
7076 }
7077
7078 err = 0;
7079
7080 chan_unlock:
7081 l2cap_chan_unlock(chan);
7082 mutex_unlock(&conn->lock);
7083 done:
7084 hci_dev_unlock(hdev);
7085 hci_dev_put(hdev);
7086 return err;
7087 }
7088 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7089
l2cap_ecred_reconfigure(struct l2cap_chan * chan)7090 static void l2cap_ecred_reconfigure(struct l2cap_chan *chan)
7091 {
7092 struct l2cap_conn *conn = chan->conn;
7093 DEFINE_RAW_FLEX(struct l2cap_ecred_reconf_req, pdu, scid, 1);
7094
7095 pdu->mtu = cpu_to_le16(chan->imtu);
7096 pdu->mps = cpu_to_le16(chan->mps);
7097 pdu->scid[0] = cpu_to_le16(chan->scid);
7098
7099 chan->ident = l2cap_get_ident(conn);
7100
7101 l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_RECONF_REQ,
7102 sizeof(pdu), &pdu);
7103 }
7104
l2cap_chan_reconfigure(struct l2cap_chan * chan,__u16 mtu)7105 int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu)
7106 {
7107 if (chan->imtu > mtu)
7108 return -EINVAL;
7109
7110 BT_DBG("chan %p mtu 0x%4.4x", chan, mtu);
7111
7112 chan->imtu = mtu;
7113
7114 l2cap_ecred_reconfigure(chan);
7115
7116 return 0;
7117 }
7118
7119 /* ---- L2CAP interface with lower layer (HCI) ---- */
7120
l2cap_connect_ind(struct hci_dev * hdev,bdaddr_t * bdaddr)7121 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7122 {
7123 int exact = 0, lm1 = 0, lm2 = 0;
7124 struct l2cap_chan *c;
7125
7126 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7127
7128 /* Find listening sockets and check their link_mode */
7129 read_lock(&chan_list_lock);
7130 list_for_each_entry(c, &chan_list, global_l) {
7131 if (c->state != BT_LISTEN)
7132 continue;
7133
7134 if (!bacmp(&c->src, &hdev->bdaddr)) {
7135 lm1 |= HCI_LM_ACCEPT;
7136 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7137 lm1 |= HCI_LM_MASTER;
7138 exact++;
7139 } else if (!bacmp(&c->src, BDADDR_ANY)) {
7140 lm2 |= HCI_LM_ACCEPT;
7141 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7142 lm2 |= HCI_LM_MASTER;
7143 }
7144 }
7145 read_unlock(&chan_list_lock);
7146
7147 return exact ? lm1 : lm2;
7148 }
7149
7150 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7151 * from an existing channel in the list or from the beginning of the
7152 * global list (by passing NULL as first parameter).
7153 */
l2cap_global_fixed_chan(struct l2cap_chan * c,struct hci_conn * hcon)7154 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7155 struct hci_conn *hcon)
7156 {
7157 u8 src_type = bdaddr_src_type(hcon);
7158
7159 read_lock(&chan_list_lock);
7160
7161 if (c)
7162 c = list_next_entry(c, global_l);
7163 else
7164 c = list_entry(chan_list.next, typeof(*c), global_l);
7165
7166 list_for_each_entry_from(c, &chan_list, global_l) {
7167 if (c->chan_type != L2CAP_CHAN_FIXED)
7168 continue;
7169 if (c->state != BT_LISTEN)
7170 continue;
7171 if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
7172 continue;
7173 if (src_type != c->src_type)
7174 continue;
7175
7176 c = l2cap_chan_hold_unless_zero(c);
7177 read_unlock(&chan_list_lock);
7178 return c;
7179 }
7180
7181 read_unlock(&chan_list_lock);
7182
7183 return NULL;
7184 }
7185
l2cap_connect_cfm(struct hci_conn * hcon,u8 status)7186 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7187 {
7188 struct hci_dev *hdev = hcon->hdev;
7189 struct l2cap_conn *conn;
7190 struct l2cap_chan *pchan;
7191 u8 dst_type;
7192
7193 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7194 return;
7195
7196 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7197
7198 if (status) {
7199 l2cap_conn_del(hcon, bt_to_errno(status));
7200 return;
7201 }
7202
7203 conn = l2cap_conn_add(hcon);
7204 if (!conn)
7205 return;
7206
7207 dst_type = bdaddr_dst_type(hcon);
7208
7209 /* If device is blocked, do not create channels for it */
7210 if (hci_bdaddr_list_lookup(&hdev->reject_list, &hcon->dst, dst_type))
7211 return;
7212
7213 /* Find fixed channels and notify them of the new connection. We
7214 * use multiple individual lookups, continuing each time where
7215 * we left off, because the list lock would prevent calling the
7216 * potentially sleeping l2cap_chan_lock() function.
7217 */
7218 pchan = l2cap_global_fixed_chan(NULL, hcon);
7219 while (pchan) {
7220 struct l2cap_chan *chan, *next;
7221
7222 /* Client fixed channels should override server ones */
7223 if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7224 goto next;
7225
7226 l2cap_chan_lock(pchan);
7227 chan = pchan->ops->new_connection(pchan);
7228 if (chan) {
7229 bacpy(&chan->src, &hcon->src);
7230 bacpy(&chan->dst, &hcon->dst);
7231 chan->src_type = bdaddr_src_type(hcon);
7232 chan->dst_type = dst_type;
7233
7234 __l2cap_chan_add(conn, chan);
7235 }
7236
7237 l2cap_chan_unlock(pchan);
7238 next:
7239 next = l2cap_global_fixed_chan(pchan, hcon);
7240 l2cap_chan_put(pchan);
7241 pchan = next;
7242 }
7243
7244 l2cap_conn_ready(conn);
7245 }
7246
l2cap_disconn_ind(struct hci_conn * hcon)7247 int l2cap_disconn_ind(struct hci_conn *hcon)
7248 {
7249 struct l2cap_conn *conn = hcon->l2cap_data;
7250
7251 BT_DBG("hcon %p", hcon);
7252
7253 if (!conn)
7254 return HCI_ERROR_REMOTE_USER_TERM;
7255 return conn->disc_reason;
7256 }
7257
l2cap_disconn_cfm(struct hci_conn * hcon,u8 reason)7258 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7259 {
7260 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7261 return;
7262
7263 BT_DBG("hcon %p reason %d", hcon, reason);
7264
7265 l2cap_conn_del(hcon, bt_to_errno(reason));
7266 }
7267
l2cap_check_encryption(struct l2cap_chan * chan,u8 encrypt)7268 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7269 {
7270 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7271 return;
7272
7273 if (encrypt == 0x00) {
7274 if (chan->sec_level == BT_SECURITY_MEDIUM) {
7275 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7276 } else if (chan->sec_level == BT_SECURITY_HIGH ||
7277 chan->sec_level == BT_SECURITY_FIPS)
7278 l2cap_chan_close(chan, ECONNREFUSED);
7279 } else {
7280 if (chan->sec_level == BT_SECURITY_MEDIUM)
7281 __clear_chan_timer(chan);
7282 }
7283 }
7284
l2cap_security_cfm(struct hci_conn * hcon,u8 status,u8 encrypt)7285 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7286 {
7287 struct l2cap_conn *conn = hcon->l2cap_data;
7288 struct l2cap_chan *chan;
7289
7290 if (!conn)
7291 return;
7292
7293 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7294
7295 mutex_lock(&conn->lock);
7296
7297 list_for_each_entry(chan, &conn->chan_l, list) {
7298 l2cap_chan_lock(chan);
7299
7300 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7301 state_to_string(chan->state));
7302
7303 if (!status && encrypt)
7304 chan->sec_level = hcon->sec_level;
7305
7306 if (!__l2cap_no_conn_pending(chan)) {
7307 l2cap_chan_unlock(chan);
7308 continue;
7309 }
7310
7311 if (!status && (chan->state == BT_CONNECTED ||
7312 chan->state == BT_CONFIG)) {
7313 chan->ops->resume(chan);
7314 l2cap_check_encryption(chan, encrypt);
7315 l2cap_chan_unlock(chan);
7316 continue;
7317 }
7318
7319 if (chan->state == BT_CONNECT) {
7320 if (!status && l2cap_check_enc_key_size(hcon))
7321 l2cap_start_connection(chan);
7322 else
7323 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7324 } else if (chan->state == BT_CONNECT2 &&
7325 !(chan->mode == L2CAP_MODE_EXT_FLOWCTL ||
7326 chan->mode == L2CAP_MODE_LE_FLOWCTL)) {
7327 struct l2cap_conn_rsp rsp;
7328 __u16 res, stat;
7329
7330 if (!status && l2cap_check_enc_key_size(hcon)) {
7331 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7332 res = L2CAP_CR_PEND;
7333 stat = L2CAP_CS_AUTHOR_PEND;
7334 chan->ops->defer(chan);
7335 } else {
7336 l2cap_state_change(chan, BT_CONFIG);
7337 res = L2CAP_CR_SUCCESS;
7338 stat = L2CAP_CS_NO_INFO;
7339 }
7340 } else {
7341 l2cap_state_change(chan, BT_DISCONN);
7342 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7343 res = L2CAP_CR_SEC_BLOCK;
7344 stat = L2CAP_CS_NO_INFO;
7345 }
7346
7347 rsp.scid = cpu_to_le16(chan->dcid);
7348 rsp.dcid = cpu_to_le16(chan->scid);
7349 rsp.result = cpu_to_le16(res);
7350 rsp.status = cpu_to_le16(stat);
7351 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7352 sizeof(rsp), &rsp);
7353
7354 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7355 res == L2CAP_CR_SUCCESS) {
7356 char buf[128];
7357 set_bit(CONF_REQ_SENT, &chan->conf_state);
7358 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7359 L2CAP_CONF_REQ,
7360 l2cap_build_conf_req(chan, buf, sizeof(buf)),
7361 buf);
7362 chan->num_conf_req++;
7363 }
7364 }
7365
7366 l2cap_chan_unlock(chan);
7367 }
7368
7369 mutex_unlock(&conn->lock);
7370 }
7371
7372 /* Append fragment into frame respecting the maximum len of rx_skb */
l2cap_recv_frag(struct l2cap_conn * conn,struct sk_buff * skb,u16 len)7373 static int l2cap_recv_frag(struct l2cap_conn *conn, struct sk_buff *skb,
7374 u16 len)
7375 {
7376 if (!conn->rx_skb) {
7377 /* Allocate skb for the complete frame (with header) */
7378 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7379 if (!conn->rx_skb)
7380 return -ENOMEM;
7381 /* Init rx_len */
7382 conn->rx_len = len;
7383 }
7384
7385 /* Copy as much as the rx_skb can hold */
7386 len = min_t(u16, len, skb->len);
7387 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, len), len);
7388 skb_pull(skb, len);
7389 conn->rx_len -= len;
7390
7391 return len;
7392 }
7393
l2cap_recv_len(struct l2cap_conn * conn,struct sk_buff * skb)7394 static int l2cap_recv_len(struct l2cap_conn *conn, struct sk_buff *skb)
7395 {
7396 struct sk_buff *rx_skb;
7397 int len;
7398
7399 /* Append just enough to complete the header */
7400 len = l2cap_recv_frag(conn, skb, L2CAP_LEN_SIZE - conn->rx_skb->len);
7401
7402 /* If header could not be read just continue */
7403 if (len < 0 || conn->rx_skb->len < L2CAP_LEN_SIZE)
7404 return len;
7405
7406 rx_skb = conn->rx_skb;
7407 len = get_unaligned_le16(rx_skb->data);
7408
7409 /* Check if rx_skb has enough space to received all fragments */
7410 if (len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE) <= skb_tailroom(rx_skb)) {
7411 /* Update expected len */
7412 conn->rx_len = len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE);
7413 return L2CAP_LEN_SIZE;
7414 }
7415
7416 /* Reset conn->rx_skb since it will need to be reallocated in order to
7417 * fit all fragments.
7418 */
7419 conn->rx_skb = NULL;
7420
7421 /* Reallocates rx_skb using the exact expected length */
7422 len = l2cap_recv_frag(conn, rx_skb,
7423 len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE));
7424 kfree_skb(rx_skb);
7425
7426 return len;
7427 }
7428
l2cap_recv_reset(struct l2cap_conn * conn)7429 static void l2cap_recv_reset(struct l2cap_conn *conn)
7430 {
7431 kfree_skb(conn->rx_skb);
7432 conn->rx_skb = NULL;
7433 conn->rx_len = 0;
7434 }
7435
l2cap_conn_hold_unless_zero(struct l2cap_conn * c)7436 struct l2cap_conn *l2cap_conn_hold_unless_zero(struct l2cap_conn *c)
7437 {
7438 if (!c)
7439 return NULL;
7440
7441 BT_DBG("conn %p orig refcnt %u", c, kref_read(&c->ref));
7442
7443 if (!kref_get_unless_zero(&c->ref))
7444 return NULL;
7445
7446 return c;
7447 }
7448
l2cap_recv_acldata(struct hci_conn * hcon,struct sk_buff * skb,u16 flags)7449 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7450 {
7451 struct l2cap_conn *conn;
7452 int len;
7453
7454 /* Lock hdev to access l2cap_data to avoid race with l2cap_conn_del */
7455 hci_dev_lock(hcon->hdev);
7456
7457 conn = hcon->l2cap_data;
7458
7459 if (!conn)
7460 conn = l2cap_conn_add(hcon);
7461
7462 conn = l2cap_conn_hold_unless_zero(conn);
7463
7464 hci_dev_unlock(hcon->hdev);
7465
7466 if (!conn) {
7467 kfree_skb(skb);
7468 return;
7469 }
7470
7471 BT_DBG("conn %p len %u flags 0x%x", conn, skb->len, flags);
7472
7473 mutex_lock(&conn->lock);
7474
7475 switch (flags) {
7476 case ACL_START:
7477 case ACL_START_NO_FLUSH:
7478 case ACL_COMPLETE:
7479 if (conn->rx_skb) {
7480 BT_ERR("Unexpected start frame (len %d)", skb->len);
7481 l2cap_recv_reset(conn);
7482 l2cap_conn_unreliable(conn, ECOMM);
7483 }
7484
7485 /* Start fragment may not contain the L2CAP length so just
7486 * copy the initial byte when that happens and use conn->mtu as
7487 * expected length.
7488 */
7489 if (skb->len < L2CAP_LEN_SIZE) {
7490 l2cap_recv_frag(conn, skb, conn->mtu);
7491 break;
7492 }
7493
7494 len = get_unaligned_le16(skb->data) + L2CAP_HDR_SIZE;
7495
7496 if (len == skb->len) {
7497 /* Complete frame received */
7498 l2cap_recv_frame(conn, skb);
7499 goto unlock;
7500 }
7501
7502 BT_DBG("Start: total len %d, frag len %u", len, skb->len);
7503
7504 if (skb->len > len) {
7505 BT_ERR("Frame is too long (len %u, expected len %d)",
7506 skb->len, len);
7507 /* PTS test cases L2CAP/COS/CED/BI-14-C and BI-15-C
7508 * (Multiple Signaling Command in one PDU, Data
7509 * Truncated, BR/EDR) send a C-frame to the IUT with
7510 * PDU Length set to 8 and Channel ID set to the
7511 * correct signaling channel for the logical link.
7512 * The Information payload contains one L2CAP_ECHO_REQ
7513 * packet with Data Length set to 0 with 0 octets of
7514 * echo data and one invalid command packet due to
7515 * data truncated in PDU but present in HCI packet.
7516 *
7517 * Shorter the socket buffer to the PDU length to
7518 * allow to process valid commands from the PDU before
7519 * setting the socket unreliable.
7520 */
7521 skb->len = len;
7522 l2cap_recv_frame(conn, skb);
7523 l2cap_conn_unreliable(conn, ECOMM);
7524 goto unlock;
7525 }
7526
7527 /* Append fragment into frame (with header) */
7528 if (l2cap_recv_frag(conn, skb, len) < 0)
7529 goto drop;
7530
7531 break;
7532
7533 case ACL_CONT:
7534 BT_DBG("Cont: frag len %u (expecting %u)", skb->len, conn->rx_len);
7535
7536 if (!conn->rx_skb) {
7537 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7538 l2cap_conn_unreliable(conn, ECOMM);
7539 goto drop;
7540 }
7541
7542 /* Complete the L2CAP length if it has not been read */
7543 if (conn->rx_skb->len < L2CAP_LEN_SIZE) {
7544 if (l2cap_recv_len(conn, skb) < 0) {
7545 l2cap_conn_unreliable(conn, ECOMM);
7546 goto drop;
7547 }
7548
7549 /* Header still could not be read just continue */
7550 if (conn->rx_skb->len < L2CAP_LEN_SIZE)
7551 break;
7552 }
7553
7554 if (skb->len > conn->rx_len) {
7555 BT_ERR("Fragment is too long (len %u, expected %u)",
7556 skb->len, conn->rx_len);
7557 l2cap_recv_reset(conn);
7558 l2cap_conn_unreliable(conn, ECOMM);
7559 goto drop;
7560 }
7561
7562 /* Append fragment into frame (with header) */
7563 l2cap_recv_frag(conn, skb, skb->len);
7564
7565 if (!conn->rx_len) {
7566 /* Complete frame received. l2cap_recv_frame
7567 * takes ownership of the skb so set the global
7568 * rx_skb pointer to NULL first.
7569 */
7570 struct sk_buff *rx_skb = conn->rx_skb;
7571 conn->rx_skb = NULL;
7572 l2cap_recv_frame(conn, rx_skb);
7573 }
7574 break;
7575 }
7576
7577 drop:
7578 kfree_skb(skb);
7579 unlock:
7580 mutex_unlock(&conn->lock);
7581 l2cap_conn_put(conn);
7582 }
7583
7584 static struct hci_cb l2cap_cb = {
7585 .name = "L2CAP",
7586 .connect_cfm = l2cap_connect_cfm,
7587 .disconn_cfm = l2cap_disconn_cfm,
7588 .security_cfm = l2cap_security_cfm,
7589 };
7590
l2cap_debugfs_show(struct seq_file * f,void * p)7591 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7592 {
7593 struct l2cap_chan *c;
7594
7595 read_lock(&chan_list_lock);
7596
7597 list_for_each_entry(c, &chan_list, global_l) {
7598 seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7599 &c->src, c->src_type, &c->dst, c->dst_type,
7600 c->state, __le16_to_cpu(c->psm),
7601 c->scid, c->dcid, c->imtu, c->omtu,
7602 c->sec_level, c->mode);
7603 }
7604
7605 read_unlock(&chan_list_lock);
7606
7607 return 0;
7608 }
7609
7610 DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs);
7611
7612 static struct dentry *l2cap_debugfs;
7613
l2cap_init(void)7614 int __init l2cap_init(void)
7615 {
7616 int err;
7617
7618 err = l2cap_init_sockets();
7619 if (err < 0)
7620 return err;
7621
7622 hci_register_cb(&l2cap_cb);
7623
7624 if (IS_ERR_OR_NULL(bt_debugfs))
7625 return 0;
7626
7627 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7628 NULL, &l2cap_debugfs_fops);
7629
7630 return 0;
7631 }
7632
l2cap_exit(void)7633 void l2cap_exit(void)
7634 {
7635 debugfs_remove(l2cap_debugfs);
7636 hci_unregister_cb(&l2cap_cb);
7637 l2cap_cleanup_sockets();
7638 }
7639
7640 module_param(disable_ertm, bool, 0644);
7641 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
7642
7643 module_param(enable_ecred, bool, 0644);
7644 MODULE_PARM_DESC(enable_ecred, "Enable enhanced credit flow control mode");
7645