1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35
36 #include "smp.h"
37 #include "mgmt_util.h"
38 #include "mgmt_config.h"
39 #include "msft.h"
40 #include "eir.h"
41 #include "aosp.h"
42
43 #define MGMT_VERSION 1
44 #define MGMT_REVISION 23
45
46 static const u16 mgmt_commands[] = {
47 MGMT_OP_READ_INDEX_LIST,
48 MGMT_OP_READ_INFO,
49 MGMT_OP_SET_POWERED,
50 MGMT_OP_SET_DISCOVERABLE,
51 MGMT_OP_SET_CONNECTABLE,
52 MGMT_OP_SET_FAST_CONNECTABLE,
53 MGMT_OP_SET_BONDABLE,
54 MGMT_OP_SET_LINK_SECURITY,
55 MGMT_OP_SET_SSP,
56 MGMT_OP_SET_HS,
57 MGMT_OP_SET_LE,
58 MGMT_OP_SET_DEV_CLASS,
59 MGMT_OP_SET_LOCAL_NAME,
60 MGMT_OP_ADD_UUID,
61 MGMT_OP_REMOVE_UUID,
62 MGMT_OP_LOAD_LINK_KEYS,
63 MGMT_OP_LOAD_LONG_TERM_KEYS,
64 MGMT_OP_DISCONNECT,
65 MGMT_OP_GET_CONNECTIONS,
66 MGMT_OP_PIN_CODE_REPLY,
67 MGMT_OP_PIN_CODE_NEG_REPLY,
68 MGMT_OP_SET_IO_CAPABILITY,
69 MGMT_OP_PAIR_DEVICE,
70 MGMT_OP_CANCEL_PAIR_DEVICE,
71 MGMT_OP_UNPAIR_DEVICE,
72 MGMT_OP_USER_CONFIRM_REPLY,
73 MGMT_OP_USER_CONFIRM_NEG_REPLY,
74 MGMT_OP_USER_PASSKEY_REPLY,
75 MGMT_OP_USER_PASSKEY_NEG_REPLY,
76 MGMT_OP_READ_LOCAL_OOB_DATA,
77 MGMT_OP_ADD_REMOTE_OOB_DATA,
78 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
79 MGMT_OP_START_DISCOVERY,
80 MGMT_OP_STOP_DISCOVERY,
81 MGMT_OP_CONFIRM_NAME,
82 MGMT_OP_BLOCK_DEVICE,
83 MGMT_OP_UNBLOCK_DEVICE,
84 MGMT_OP_SET_DEVICE_ID,
85 MGMT_OP_SET_ADVERTISING,
86 MGMT_OP_SET_BREDR,
87 MGMT_OP_SET_STATIC_ADDRESS,
88 MGMT_OP_SET_SCAN_PARAMS,
89 MGMT_OP_SET_SECURE_CONN,
90 MGMT_OP_SET_DEBUG_KEYS,
91 MGMT_OP_SET_PRIVACY,
92 MGMT_OP_LOAD_IRKS,
93 MGMT_OP_GET_CONN_INFO,
94 MGMT_OP_GET_CLOCK_INFO,
95 MGMT_OP_ADD_DEVICE,
96 MGMT_OP_REMOVE_DEVICE,
97 MGMT_OP_LOAD_CONN_PARAM,
98 MGMT_OP_READ_UNCONF_INDEX_LIST,
99 MGMT_OP_READ_CONFIG_INFO,
100 MGMT_OP_SET_EXTERNAL_CONFIG,
101 MGMT_OP_SET_PUBLIC_ADDRESS,
102 MGMT_OP_START_SERVICE_DISCOVERY,
103 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
104 MGMT_OP_READ_EXT_INDEX_LIST,
105 MGMT_OP_READ_ADV_FEATURES,
106 MGMT_OP_ADD_ADVERTISING,
107 MGMT_OP_REMOVE_ADVERTISING,
108 MGMT_OP_GET_ADV_SIZE_INFO,
109 MGMT_OP_START_LIMITED_DISCOVERY,
110 MGMT_OP_READ_EXT_INFO,
111 MGMT_OP_SET_APPEARANCE,
112 MGMT_OP_GET_PHY_CONFIGURATION,
113 MGMT_OP_SET_PHY_CONFIGURATION,
114 MGMT_OP_SET_BLOCKED_KEYS,
115 MGMT_OP_SET_WIDEBAND_SPEECH,
116 MGMT_OP_READ_CONTROLLER_CAP,
117 MGMT_OP_READ_EXP_FEATURES_INFO,
118 MGMT_OP_SET_EXP_FEATURE,
119 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
120 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
121 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
122 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
123 MGMT_OP_GET_DEVICE_FLAGS,
124 MGMT_OP_SET_DEVICE_FLAGS,
125 MGMT_OP_READ_ADV_MONITOR_FEATURES,
126 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
127 MGMT_OP_REMOVE_ADV_MONITOR,
128 MGMT_OP_ADD_EXT_ADV_PARAMS,
129 MGMT_OP_ADD_EXT_ADV_DATA,
130 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
131 MGMT_OP_SET_MESH_RECEIVER,
132 MGMT_OP_MESH_READ_FEATURES,
133 MGMT_OP_MESH_SEND,
134 MGMT_OP_MESH_SEND_CANCEL,
135 };
136
137 static const u16 mgmt_events[] = {
138 MGMT_EV_CONTROLLER_ERROR,
139 MGMT_EV_INDEX_ADDED,
140 MGMT_EV_INDEX_REMOVED,
141 MGMT_EV_NEW_SETTINGS,
142 MGMT_EV_CLASS_OF_DEV_CHANGED,
143 MGMT_EV_LOCAL_NAME_CHANGED,
144 MGMT_EV_NEW_LINK_KEY,
145 MGMT_EV_NEW_LONG_TERM_KEY,
146 MGMT_EV_DEVICE_CONNECTED,
147 MGMT_EV_DEVICE_DISCONNECTED,
148 MGMT_EV_CONNECT_FAILED,
149 MGMT_EV_PIN_CODE_REQUEST,
150 MGMT_EV_USER_CONFIRM_REQUEST,
151 MGMT_EV_USER_PASSKEY_REQUEST,
152 MGMT_EV_AUTH_FAILED,
153 MGMT_EV_DEVICE_FOUND,
154 MGMT_EV_DISCOVERING,
155 MGMT_EV_DEVICE_BLOCKED,
156 MGMT_EV_DEVICE_UNBLOCKED,
157 MGMT_EV_DEVICE_UNPAIRED,
158 MGMT_EV_PASSKEY_NOTIFY,
159 MGMT_EV_NEW_IRK,
160 MGMT_EV_NEW_CSRK,
161 MGMT_EV_DEVICE_ADDED,
162 MGMT_EV_DEVICE_REMOVED,
163 MGMT_EV_NEW_CONN_PARAM,
164 MGMT_EV_UNCONF_INDEX_ADDED,
165 MGMT_EV_UNCONF_INDEX_REMOVED,
166 MGMT_EV_NEW_CONFIG_OPTIONS,
167 MGMT_EV_EXT_INDEX_ADDED,
168 MGMT_EV_EXT_INDEX_REMOVED,
169 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
170 MGMT_EV_ADVERTISING_ADDED,
171 MGMT_EV_ADVERTISING_REMOVED,
172 MGMT_EV_EXT_INFO_CHANGED,
173 MGMT_EV_PHY_CONFIGURATION_CHANGED,
174 MGMT_EV_EXP_FEATURE_CHANGED,
175 MGMT_EV_DEVICE_FLAGS_CHANGED,
176 MGMT_EV_ADV_MONITOR_ADDED,
177 MGMT_EV_ADV_MONITOR_REMOVED,
178 MGMT_EV_CONTROLLER_SUSPEND,
179 MGMT_EV_CONTROLLER_RESUME,
180 MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
181 MGMT_EV_ADV_MONITOR_DEVICE_LOST,
182 };
183
184 static const u16 mgmt_untrusted_commands[] = {
185 MGMT_OP_READ_INDEX_LIST,
186 MGMT_OP_READ_INFO,
187 MGMT_OP_READ_UNCONF_INDEX_LIST,
188 MGMT_OP_READ_CONFIG_INFO,
189 MGMT_OP_READ_EXT_INDEX_LIST,
190 MGMT_OP_READ_EXT_INFO,
191 MGMT_OP_READ_CONTROLLER_CAP,
192 MGMT_OP_READ_EXP_FEATURES_INFO,
193 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
194 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
195 };
196
197 static const u16 mgmt_untrusted_events[] = {
198 MGMT_EV_INDEX_ADDED,
199 MGMT_EV_INDEX_REMOVED,
200 MGMT_EV_NEW_SETTINGS,
201 MGMT_EV_CLASS_OF_DEV_CHANGED,
202 MGMT_EV_LOCAL_NAME_CHANGED,
203 MGMT_EV_UNCONF_INDEX_ADDED,
204 MGMT_EV_UNCONF_INDEX_REMOVED,
205 MGMT_EV_NEW_CONFIG_OPTIONS,
206 MGMT_EV_EXT_INDEX_ADDED,
207 MGMT_EV_EXT_INDEX_REMOVED,
208 MGMT_EV_EXT_INFO_CHANGED,
209 MGMT_EV_EXP_FEATURE_CHANGED,
210 };
211
212 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
213
214 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
215 "\x00\x00\x00\x00\x00\x00\x00\x00"
216
217 /* HCI to MGMT error code conversion table */
218 static const u8 mgmt_status_table[] = {
219 MGMT_STATUS_SUCCESS,
220 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
221 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
222 MGMT_STATUS_FAILED, /* Hardware Failure */
223 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
224 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
225 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
226 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
227 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
228 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
229 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
230 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
231 MGMT_STATUS_BUSY, /* Command Disallowed */
232 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
233 MGMT_STATUS_REJECTED, /* Rejected Security */
234 MGMT_STATUS_REJECTED, /* Rejected Personal */
235 MGMT_STATUS_TIMEOUT, /* Host Timeout */
236 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
237 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
238 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
239 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
240 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
241 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
242 MGMT_STATUS_BUSY, /* Repeated Attempts */
243 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
244 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
245 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
246 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
247 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
248 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
249 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
250 MGMT_STATUS_FAILED, /* Unspecified Error */
251 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
252 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
253 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
254 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
255 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
256 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
257 MGMT_STATUS_FAILED, /* Unit Link Key Used */
258 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
259 MGMT_STATUS_TIMEOUT, /* Instant Passed */
260 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
261 MGMT_STATUS_FAILED, /* Transaction Collision */
262 MGMT_STATUS_FAILED, /* Reserved for future use */
263 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
264 MGMT_STATUS_REJECTED, /* QoS Rejected */
265 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
266 MGMT_STATUS_REJECTED, /* Insufficient Security */
267 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
268 MGMT_STATUS_FAILED, /* Reserved for future use */
269 MGMT_STATUS_BUSY, /* Role Switch Pending */
270 MGMT_STATUS_FAILED, /* Reserved for future use */
271 MGMT_STATUS_FAILED, /* Slot Violation */
272 MGMT_STATUS_FAILED, /* Role Switch Failed */
273 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
274 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
275 MGMT_STATUS_BUSY, /* Host Busy Pairing */
276 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
277 MGMT_STATUS_BUSY, /* Controller Busy */
278 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
279 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
280 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
281 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
282 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
283 };
284
mgmt_errno_status(int err)285 static u8 mgmt_errno_status(int err)
286 {
287 switch (err) {
288 case 0:
289 return MGMT_STATUS_SUCCESS;
290 case -EPERM:
291 return MGMT_STATUS_REJECTED;
292 case -EINVAL:
293 return MGMT_STATUS_INVALID_PARAMS;
294 case -EOPNOTSUPP:
295 return MGMT_STATUS_NOT_SUPPORTED;
296 case -EBUSY:
297 return MGMT_STATUS_BUSY;
298 case -ETIMEDOUT:
299 return MGMT_STATUS_AUTH_FAILED;
300 case -ENOMEM:
301 return MGMT_STATUS_NO_RESOURCES;
302 case -EISCONN:
303 return MGMT_STATUS_ALREADY_CONNECTED;
304 case -ENOTCONN:
305 return MGMT_STATUS_DISCONNECTED;
306 }
307
308 return MGMT_STATUS_FAILED;
309 }
310
mgmt_status(int err)311 static u8 mgmt_status(int err)
312 {
313 if (err < 0)
314 return mgmt_errno_status(err);
315
316 if (err < ARRAY_SIZE(mgmt_status_table))
317 return mgmt_status_table[err];
318
319 return MGMT_STATUS_FAILED;
320 }
321
mgmt_index_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag)322 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
323 u16 len, int flag)
324 {
325 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
326 flag, NULL);
327 }
328
mgmt_limited_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag,struct sock * skip_sk)329 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
330 u16 len, int flag, struct sock *skip_sk)
331 {
332 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
333 flag, skip_sk);
334 }
335
mgmt_event(u16 event,struct hci_dev * hdev,void * data,u16 len,struct sock * skip_sk)336 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
337 struct sock *skip_sk)
338 {
339 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
340 HCI_SOCK_TRUSTED, skip_sk);
341 }
342
mgmt_event_skb(struct sk_buff * skb,struct sock * skip_sk)343 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
344 {
345 return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
346 skip_sk);
347 }
348
le_addr_type(u8 mgmt_addr_type)349 static u8 le_addr_type(u8 mgmt_addr_type)
350 {
351 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
352 return ADDR_LE_DEV_PUBLIC;
353 else
354 return ADDR_LE_DEV_RANDOM;
355 }
356
mgmt_fill_version_info(void * ver)357 void mgmt_fill_version_info(void *ver)
358 {
359 struct mgmt_rp_read_version *rp = ver;
360
361 rp->version = MGMT_VERSION;
362 rp->revision = cpu_to_le16(MGMT_REVISION);
363 }
364
read_version(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)365 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
366 u16 data_len)
367 {
368 struct mgmt_rp_read_version rp;
369
370 bt_dev_dbg(hdev, "sock %p", sk);
371
372 mgmt_fill_version_info(&rp);
373
374 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
375 &rp, sizeof(rp));
376 }
377
read_commands(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)378 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
379 u16 data_len)
380 {
381 struct mgmt_rp_read_commands *rp;
382 u16 num_commands, num_events;
383 size_t rp_size;
384 int i, err;
385
386 bt_dev_dbg(hdev, "sock %p", sk);
387
388 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
389 num_commands = ARRAY_SIZE(mgmt_commands);
390 num_events = ARRAY_SIZE(mgmt_events);
391 } else {
392 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
393 num_events = ARRAY_SIZE(mgmt_untrusted_events);
394 }
395
396 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
397
398 rp = kmalloc(rp_size, GFP_KERNEL);
399 if (!rp)
400 return -ENOMEM;
401
402 rp->num_commands = cpu_to_le16(num_commands);
403 rp->num_events = cpu_to_le16(num_events);
404
405 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
406 __le16 *opcode = rp->opcodes;
407
408 for (i = 0; i < num_commands; i++, opcode++)
409 put_unaligned_le16(mgmt_commands[i], opcode);
410
411 for (i = 0; i < num_events; i++, opcode++)
412 put_unaligned_le16(mgmt_events[i], opcode);
413 } else {
414 __le16 *opcode = rp->opcodes;
415
416 for (i = 0; i < num_commands; i++, opcode++)
417 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
418
419 for (i = 0; i < num_events; i++, opcode++)
420 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
421 }
422
423 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
424 rp, rp_size);
425 kfree(rp);
426
427 return err;
428 }
429
read_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)430 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
431 u16 data_len)
432 {
433 struct mgmt_rp_read_index_list *rp;
434 struct hci_dev *d;
435 size_t rp_len;
436 u16 count;
437 int err;
438
439 bt_dev_dbg(hdev, "sock %p", sk);
440
441 read_lock(&hci_dev_list_lock);
442
443 count = 0;
444 list_for_each_entry(d, &hci_dev_list, list) {
445 if (!hci_dev_test_flag(d, HCI_UNCONFIGURED))
446 count++;
447 }
448
449 rp_len = sizeof(*rp) + (2 * count);
450 rp = kmalloc(rp_len, GFP_ATOMIC);
451 if (!rp) {
452 read_unlock(&hci_dev_list_lock);
453 return -ENOMEM;
454 }
455
456 count = 0;
457 list_for_each_entry(d, &hci_dev_list, list) {
458 if (hci_dev_test_flag(d, HCI_SETUP) ||
459 hci_dev_test_flag(d, HCI_CONFIG) ||
460 hci_dev_test_flag(d, HCI_USER_CHANNEL))
461 continue;
462
463 /* Devices marked as raw-only are neither configured
464 * nor unconfigured controllers.
465 */
466 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
467 continue;
468
469 if (!hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
470 rp->index[count++] = cpu_to_le16(d->id);
471 bt_dev_dbg(hdev, "Added hci%u", d->id);
472 }
473 }
474
475 rp->num_controllers = cpu_to_le16(count);
476 rp_len = sizeof(*rp) + (2 * count);
477
478 read_unlock(&hci_dev_list_lock);
479
480 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
481 0, rp, rp_len);
482
483 kfree(rp);
484
485 return err;
486 }
487
read_unconf_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)488 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
489 void *data, u16 data_len)
490 {
491 struct mgmt_rp_read_unconf_index_list *rp;
492 struct hci_dev *d;
493 size_t rp_len;
494 u16 count;
495 int err;
496
497 bt_dev_dbg(hdev, "sock %p", sk);
498
499 read_lock(&hci_dev_list_lock);
500
501 count = 0;
502 list_for_each_entry(d, &hci_dev_list, list) {
503 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
504 count++;
505 }
506
507 rp_len = sizeof(*rp) + (2 * count);
508 rp = kmalloc(rp_len, GFP_ATOMIC);
509 if (!rp) {
510 read_unlock(&hci_dev_list_lock);
511 return -ENOMEM;
512 }
513
514 count = 0;
515 list_for_each_entry(d, &hci_dev_list, list) {
516 if (hci_dev_test_flag(d, HCI_SETUP) ||
517 hci_dev_test_flag(d, HCI_CONFIG) ||
518 hci_dev_test_flag(d, HCI_USER_CHANNEL))
519 continue;
520
521 /* Devices marked as raw-only are neither configured
522 * nor unconfigured controllers.
523 */
524 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
525 continue;
526
527 if (hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
528 rp->index[count++] = cpu_to_le16(d->id);
529 bt_dev_dbg(hdev, "Added hci%u", d->id);
530 }
531 }
532
533 rp->num_controllers = cpu_to_le16(count);
534 rp_len = sizeof(*rp) + (2 * count);
535
536 read_unlock(&hci_dev_list_lock);
537
538 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
539 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
540
541 kfree(rp);
542
543 return err;
544 }
545
read_ext_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)546 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
547 void *data, u16 data_len)
548 {
549 struct mgmt_rp_read_ext_index_list *rp;
550 struct hci_dev *d;
551 u16 count;
552 int err;
553
554 bt_dev_dbg(hdev, "sock %p", sk);
555
556 read_lock(&hci_dev_list_lock);
557
558 count = 0;
559 list_for_each_entry(d, &hci_dev_list, list)
560 count++;
561
562 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
563 if (!rp) {
564 read_unlock(&hci_dev_list_lock);
565 return -ENOMEM;
566 }
567
568 count = 0;
569 list_for_each_entry(d, &hci_dev_list, list) {
570 if (hci_dev_test_flag(d, HCI_SETUP) ||
571 hci_dev_test_flag(d, HCI_CONFIG) ||
572 hci_dev_test_flag(d, HCI_USER_CHANNEL))
573 continue;
574
575 /* Devices marked as raw-only are neither configured
576 * nor unconfigured controllers.
577 */
578 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
579 continue;
580
581 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
582 rp->entry[count].type = 0x01;
583 else
584 rp->entry[count].type = 0x00;
585
586 rp->entry[count].bus = d->bus;
587 rp->entry[count++].index = cpu_to_le16(d->id);
588 bt_dev_dbg(hdev, "Added hci%u", d->id);
589 }
590
591 rp->num_controllers = cpu_to_le16(count);
592
593 read_unlock(&hci_dev_list_lock);
594
595 /* If this command is called at least once, then all the
596 * default index and unconfigured index events are disabled
597 * and from now on only extended index events are used.
598 */
599 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
600 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
601 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
602
603 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
604 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
605 struct_size(rp, entry, count));
606
607 kfree(rp);
608
609 return err;
610 }
611
is_configured(struct hci_dev * hdev)612 static bool is_configured(struct hci_dev *hdev)
613 {
614 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
615 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
616 return false;
617
618 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
619 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
620 !bacmp(&hdev->public_addr, BDADDR_ANY))
621 return false;
622
623 return true;
624 }
625
get_missing_options(struct hci_dev * hdev)626 static __le32 get_missing_options(struct hci_dev *hdev)
627 {
628 u32 options = 0;
629
630 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
631 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
632 options |= MGMT_OPTION_EXTERNAL_CONFIG;
633
634 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
635 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
636 !bacmp(&hdev->public_addr, BDADDR_ANY))
637 options |= MGMT_OPTION_PUBLIC_ADDRESS;
638
639 return cpu_to_le32(options);
640 }
641
new_options(struct hci_dev * hdev,struct sock * skip)642 static int new_options(struct hci_dev *hdev, struct sock *skip)
643 {
644 __le32 options = get_missing_options(hdev);
645
646 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
647 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
648 }
649
send_options_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)650 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
651 {
652 __le32 options = get_missing_options(hdev);
653
654 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
655 sizeof(options));
656 }
657
read_config_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)658 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
659 void *data, u16 data_len)
660 {
661 struct mgmt_rp_read_config_info rp;
662 u32 options = 0;
663
664 bt_dev_dbg(hdev, "sock %p", sk);
665
666 hci_dev_lock(hdev);
667
668 memset(&rp, 0, sizeof(rp));
669 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
670
671 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
672 options |= MGMT_OPTION_EXTERNAL_CONFIG;
673
674 if (hdev->set_bdaddr)
675 options |= MGMT_OPTION_PUBLIC_ADDRESS;
676
677 rp.supported_options = cpu_to_le32(options);
678 rp.missing_options = get_missing_options(hdev);
679
680 hci_dev_unlock(hdev);
681
682 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
683 &rp, sizeof(rp));
684 }
685
get_supported_phys(struct hci_dev * hdev)686 static u32 get_supported_phys(struct hci_dev *hdev)
687 {
688 u32 supported_phys = 0;
689
690 if (lmp_bredr_capable(hdev)) {
691 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
692
693 if (hdev->features[0][0] & LMP_3SLOT)
694 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
695
696 if (hdev->features[0][0] & LMP_5SLOT)
697 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
698
699 if (lmp_edr_2m_capable(hdev)) {
700 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
701
702 if (lmp_edr_3slot_capable(hdev))
703 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
704
705 if (lmp_edr_5slot_capable(hdev))
706 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
707
708 if (lmp_edr_3m_capable(hdev)) {
709 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
710
711 if (lmp_edr_3slot_capable(hdev))
712 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
713
714 if (lmp_edr_5slot_capable(hdev))
715 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
716 }
717 }
718 }
719
720 if (lmp_le_capable(hdev)) {
721 supported_phys |= MGMT_PHY_LE_1M_TX;
722 supported_phys |= MGMT_PHY_LE_1M_RX;
723
724 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
725 supported_phys |= MGMT_PHY_LE_2M_TX;
726 supported_phys |= MGMT_PHY_LE_2M_RX;
727 }
728
729 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
730 supported_phys |= MGMT_PHY_LE_CODED_TX;
731 supported_phys |= MGMT_PHY_LE_CODED_RX;
732 }
733 }
734
735 return supported_phys;
736 }
737
get_selected_phys(struct hci_dev * hdev)738 static u32 get_selected_phys(struct hci_dev *hdev)
739 {
740 u32 selected_phys = 0;
741
742 if (lmp_bredr_capable(hdev)) {
743 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
744
745 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
746 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
747
748 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
749 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
750
751 if (lmp_edr_2m_capable(hdev)) {
752 if (!(hdev->pkt_type & HCI_2DH1))
753 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
754
755 if (lmp_edr_3slot_capable(hdev) &&
756 !(hdev->pkt_type & HCI_2DH3))
757 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
758
759 if (lmp_edr_5slot_capable(hdev) &&
760 !(hdev->pkt_type & HCI_2DH5))
761 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
762
763 if (lmp_edr_3m_capable(hdev)) {
764 if (!(hdev->pkt_type & HCI_3DH1))
765 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
766
767 if (lmp_edr_3slot_capable(hdev) &&
768 !(hdev->pkt_type & HCI_3DH3))
769 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
770
771 if (lmp_edr_5slot_capable(hdev) &&
772 !(hdev->pkt_type & HCI_3DH5))
773 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
774 }
775 }
776 }
777
778 if (lmp_le_capable(hdev)) {
779 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
780 selected_phys |= MGMT_PHY_LE_1M_TX;
781
782 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
783 selected_phys |= MGMT_PHY_LE_1M_RX;
784
785 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
786 selected_phys |= MGMT_PHY_LE_2M_TX;
787
788 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
789 selected_phys |= MGMT_PHY_LE_2M_RX;
790
791 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
792 selected_phys |= MGMT_PHY_LE_CODED_TX;
793
794 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
795 selected_phys |= MGMT_PHY_LE_CODED_RX;
796 }
797
798 return selected_phys;
799 }
800
get_configurable_phys(struct hci_dev * hdev)801 static u32 get_configurable_phys(struct hci_dev *hdev)
802 {
803 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
804 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
805 }
806
get_supported_settings(struct hci_dev * hdev)807 static u32 get_supported_settings(struct hci_dev *hdev)
808 {
809 u32 settings = 0;
810
811 settings |= MGMT_SETTING_POWERED;
812 settings |= MGMT_SETTING_BONDABLE;
813 settings |= MGMT_SETTING_DEBUG_KEYS;
814 settings |= MGMT_SETTING_CONNECTABLE;
815 settings |= MGMT_SETTING_DISCOVERABLE;
816
817 if (lmp_bredr_capable(hdev)) {
818 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
819 settings |= MGMT_SETTING_FAST_CONNECTABLE;
820 settings |= MGMT_SETTING_BREDR;
821 settings |= MGMT_SETTING_LINK_SECURITY;
822
823 if (lmp_ssp_capable(hdev)) {
824 settings |= MGMT_SETTING_SSP;
825 }
826
827 if (lmp_sc_capable(hdev))
828 settings |= MGMT_SETTING_SECURE_CONN;
829
830 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
831 &hdev->quirks))
832 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
833 }
834
835 if (lmp_le_capable(hdev)) {
836 settings |= MGMT_SETTING_LE;
837 settings |= MGMT_SETTING_SECURE_CONN;
838 settings |= MGMT_SETTING_PRIVACY;
839 settings |= MGMT_SETTING_STATIC_ADDRESS;
840 settings |= MGMT_SETTING_ADVERTISING;
841 }
842
843 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
844 hdev->set_bdaddr)
845 settings |= MGMT_SETTING_CONFIGURATION;
846
847 if (cis_central_capable(hdev))
848 settings |= MGMT_SETTING_CIS_CENTRAL;
849
850 if (cis_peripheral_capable(hdev))
851 settings |= MGMT_SETTING_CIS_PERIPHERAL;
852
853 settings |= MGMT_SETTING_PHY_CONFIGURATION;
854
855 return settings;
856 }
857
get_current_settings(struct hci_dev * hdev)858 static u32 get_current_settings(struct hci_dev *hdev)
859 {
860 u32 settings = 0;
861
862 if (hdev_is_powered(hdev))
863 settings |= MGMT_SETTING_POWERED;
864
865 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
866 settings |= MGMT_SETTING_CONNECTABLE;
867
868 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
869 settings |= MGMT_SETTING_FAST_CONNECTABLE;
870
871 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
872 settings |= MGMT_SETTING_DISCOVERABLE;
873
874 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
875 settings |= MGMT_SETTING_BONDABLE;
876
877 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
878 settings |= MGMT_SETTING_BREDR;
879
880 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
881 settings |= MGMT_SETTING_LE;
882
883 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
884 settings |= MGMT_SETTING_LINK_SECURITY;
885
886 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
887 settings |= MGMT_SETTING_SSP;
888
889 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
890 settings |= MGMT_SETTING_ADVERTISING;
891
892 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
893 settings |= MGMT_SETTING_SECURE_CONN;
894
895 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
896 settings |= MGMT_SETTING_DEBUG_KEYS;
897
898 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
899 settings |= MGMT_SETTING_PRIVACY;
900
901 /* The current setting for static address has two purposes. The
902 * first is to indicate if the static address will be used and
903 * the second is to indicate if it is actually set.
904 *
905 * This means if the static address is not configured, this flag
906 * will never be set. If the address is configured, then if the
907 * address is actually used decides if the flag is set or not.
908 *
909 * For single mode LE only controllers and dual-mode controllers
910 * with BR/EDR disabled, the existence of the static address will
911 * be evaluated.
912 */
913 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
914 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
915 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
916 if (bacmp(&hdev->static_addr, BDADDR_ANY))
917 settings |= MGMT_SETTING_STATIC_ADDRESS;
918 }
919
920 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
921 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
922
923 if (cis_central_capable(hdev))
924 settings |= MGMT_SETTING_CIS_CENTRAL;
925
926 if (cis_peripheral_capable(hdev))
927 settings |= MGMT_SETTING_CIS_PERIPHERAL;
928
929 if (bis_capable(hdev))
930 settings |= MGMT_SETTING_ISO_BROADCASTER;
931
932 if (sync_recv_capable(hdev))
933 settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
934
935 return settings;
936 }
937
pending_find(u16 opcode,struct hci_dev * hdev)938 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
939 {
940 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
941 }
942
mgmt_get_adv_discov_flags(struct hci_dev * hdev)943 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
944 {
945 struct mgmt_pending_cmd *cmd;
946
947 /* If there's a pending mgmt command the flags will not yet have
948 * their final values, so check for this first.
949 */
950 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
951 if (cmd) {
952 struct mgmt_mode *cp = cmd->param;
953 if (cp->val == 0x01)
954 return LE_AD_GENERAL;
955 else if (cp->val == 0x02)
956 return LE_AD_LIMITED;
957 } else {
958 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
959 return LE_AD_LIMITED;
960 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
961 return LE_AD_GENERAL;
962 }
963
964 return 0;
965 }
966
mgmt_get_connectable(struct hci_dev * hdev)967 bool mgmt_get_connectable(struct hci_dev *hdev)
968 {
969 struct mgmt_pending_cmd *cmd;
970
971 /* If there's a pending mgmt command the flag will not yet have
972 * it's final value, so check for this first.
973 */
974 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
975 if (cmd) {
976 struct mgmt_mode *cp = cmd->param;
977
978 return cp->val;
979 }
980
981 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
982 }
983
service_cache_sync(struct hci_dev * hdev,void * data)984 static int service_cache_sync(struct hci_dev *hdev, void *data)
985 {
986 hci_update_eir_sync(hdev);
987 hci_update_class_sync(hdev);
988
989 return 0;
990 }
991
service_cache_off(struct work_struct * work)992 static void service_cache_off(struct work_struct *work)
993 {
994 struct hci_dev *hdev = container_of(work, struct hci_dev,
995 service_cache.work);
996
997 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
998 return;
999
1000 hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1001 }
1002
rpa_expired_sync(struct hci_dev * hdev,void * data)1003 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1004 {
1005 /* The generation of a new RPA and programming it into the
1006 * controller happens in the hci_req_enable_advertising()
1007 * function.
1008 */
1009 if (ext_adv_capable(hdev))
1010 return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1011 else
1012 return hci_enable_advertising_sync(hdev);
1013 }
1014
rpa_expired(struct work_struct * work)1015 static void rpa_expired(struct work_struct *work)
1016 {
1017 struct hci_dev *hdev = container_of(work, struct hci_dev,
1018 rpa_expired.work);
1019
1020 bt_dev_dbg(hdev, "");
1021
1022 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1023
1024 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1025 return;
1026
1027 hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1028 }
1029
1030 static int set_discoverable_sync(struct hci_dev *hdev, void *data);
1031
discov_off(struct work_struct * work)1032 static void discov_off(struct work_struct *work)
1033 {
1034 struct hci_dev *hdev = container_of(work, struct hci_dev,
1035 discov_off.work);
1036
1037 bt_dev_dbg(hdev, "");
1038
1039 hci_dev_lock(hdev);
1040
1041 /* When discoverable timeout triggers, then just make sure
1042 * the limited discoverable flag is cleared. Even in the case
1043 * of a timeout triggered from general discoverable, it is
1044 * safe to unconditionally clear the flag.
1045 */
1046 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1047 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1048 hdev->discov_timeout = 0;
1049
1050 hci_cmd_sync_queue(hdev, set_discoverable_sync, NULL, NULL);
1051
1052 mgmt_new_settings(hdev);
1053
1054 hci_dev_unlock(hdev);
1055 }
1056
1057 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1058
mesh_send_complete(struct hci_dev * hdev,struct mgmt_mesh_tx * mesh_tx,bool silent)1059 static void mesh_send_complete(struct hci_dev *hdev,
1060 struct mgmt_mesh_tx *mesh_tx, bool silent)
1061 {
1062 u8 handle = mesh_tx->handle;
1063
1064 if (!silent)
1065 mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1066 sizeof(handle), NULL);
1067
1068 mgmt_mesh_remove(mesh_tx);
1069 }
1070
mesh_send_done_sync(struct hci_dev * hdev,void * data)1071 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1072 {
1073 struct mgmt_mesh_tx *mesh_tx;
1074
1075 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1076 hci_disable_advertising_sync(hdev);
1077 mesh_tx = mgmt_mesh_next(hdev, NULL);
1078
1079 if (mesh_tx)
1080 mesh_send_complete(hdev, mesh_tx, false);
1081
1082 return 0;
1083 }
1084
1085 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1086 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
mesh_next(struct hci_dev * hdev,void * data,int err)1087 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1088 {
1089 struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1090
1091 if (!mesh_tx)
1092 return;
1093
1094 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1095 mesh_send_start_complete);
1096
1097 if (err < 0)
1098 mesh_send_complete(hdev, mesh_tx, false);
1099 else
1100 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1101 }
1102
mesh_send_done(struct work_struct * work)1103 static void mesh_send_done(struct work_struct *work)
1104 {
1105 struct hci_dev *hdev = container_of(work, struct hci_dev,
1106 mesh_send_done.work);
1107
1108 if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1109 return;
1110
1111 hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1112 }
1113
mgmt_init_hdev(struct sock * sk,struct hci_dev * hdev)1114 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1115 {
1116 if (hci_dev_test_flag(hdev, HCI_MGMT))
1117 return;
1118
1119 BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1120
1121 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1122 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1123 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1124 INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1125
1126 /* Non-mgmt controlled devices get this bit set
1127 * implicitly so that pairing works for them, however
1128 * for mgmt we require user-space to explicitly enable
1129 * it
1130 */
1131 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1132
1133 hci_dev_set_flag(hdev, HCI_MGMT);
1134 }
1135
read_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1136 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1137 void *data, u16 data_len)
1138 {
1139 struct mgmt_rp_read_info rp;
1140
1141 bt_dev_dbg(hdev, "sock %p", sk);
1142
1143 hci_dev_lock(hdev);
1144
1145 memset(&rp, 0, sizeof(rp));
1146
1147 bacpy(&rp.bdaddr, &hdev->bdaddr);
1148
1149 rp.version = hdev->hci_ver;
1150 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1151
1152 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1153 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1154
1155 memcpy(rp.dev_class, hdev->dev_class, 3);
1156
1157 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1158 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1159
1160 hci_dev_unlock(hdev);
1161
1162 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1163 sizeof(rp));
1164 }
1165
append_eir_data_to_buf(struct hci_dev * hdev,u8 * eir)1166 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1167 {
1168 u16 eir_len = 0;
1169 size_t name_len;
1170
1171 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1172 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1173 hdev->dev_class, 3);
1174
1175 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1176 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1177 hdev->appearance);
1178
1179 name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1180 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1181 hdev->dev_name, name_len);
1182
1183 name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1184 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1185 hdev->short_name, name_len);
1186
1187 return eir_len;
1188 }
1189
read_ext_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1190 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1191 void *data, u16 data_len)
1192 {
1193 char buf[512];
1194 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1195 u16 eir_len;
1196
1197 bt_dev_dbg(hdev, "sock %p", sk);
1198
1199 memset(&buf, 0, sizeof(buf));
1200
1201 hci_dev_lock(hdev);
1202
1203 bacpy(&rp->bdaddr, &hdev->bdaddr);
1204
1205 rp->version = hdev->hci_ver;
1206 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1207
1208 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1209 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1210
1211
1212 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1213 rp->eir_len = cpu_to_le16(eir_len);
1214
1215 hci_dev_unlock(hdev);
1216
1217 /* If this command is called at least once, then the events
1218 * for class of device and local name changes are disabled
1219 * and only the new extended controller information event
1220 * is used.
1221 */
1222 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1223 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1224 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1225
1226 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1227 sizeof(*rp) + eir_len);
1228 }
1229
ext_info_changed(struct hci_dev * hdev,struct sock * skip)1230 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1231 {
1232 char buf[512];
1233 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1234 u16 eir_len;
1235
1236 memset(buf, 0, sizeof(buf));
1237
1238 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1239 ev->eir_len = cpu_to_le16(eir_len);
1240
1241 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1242 sizeof(*ev) + eir_len,
1243 HCI_MGMT_EXT_INFO_EVENTS, skip);
1244 }
1245
send_settings_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)1246 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1247 {
1248 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1249
1250 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1251 sizeof(settings));
1252 }
1253
mgmt_advertising_added(struct sock * sk,struct hci_dev * hdev,u8 instance)1254 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1255 {
1256 struct mgmt_ev_advertising_added ev;
1257
1258 ev.instance = instance;
1259
1260 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1261 }
1262
mgmt_advertising_removed(struct sock * sk,struct hci_dev * hdev,u8 instance)1263 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1264 u8 instance)
1265 {
1266 struct mgmt_ev_advertising_removed ev;
1267
1268 ev.instance = instance;
1269
1270 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1271 }
1272
cancel_adv_timeout(struct hci_dev * hdev)1273 static void cancel_adv_timeout(struct hci_dev *hdev)
1274 {
1275 if (hdev->adv_instance_timeout) {
1276 hdev->adv_instance_timeout = 0;
1277 cancel_delayed_work(&hdev->adv_instance_expire);
1278 }
1279 }
1280
1281 /* This function requires the caller holds hdev->lock */
restart_le_actions(struct hci_dev * hdev)1282 static void restart_le_actions(struct hci_dev *hdev)
1283 {
1284 struct hci_conn_params *p;
1285
1286 list_for_each_entry(p, &hdev->le_conn_params, list) {
1287 /* Needed for AUTO_OFF case where might not "really"
1288 * have been powered off.
1289 */
1290 hci_pend_le_list_del_init(p);
1291
1292 switch (p->auto_connect) {
1293 case HCI_AUTO_CONN_DIRECT:
1294 case HCI_AUTO_CONN_ALWAYS:
1295 hci_pend_le_list_add(p, &hdev->pend_le_conns);
1296 break;
1297 case HCI_AUTO_CONN_REPORT:
1298 hci_pend_le_list_add(p, &hdev->pend_le_reports);
1299 break;
1300 default:
1301 break;
1302 }
1303 }
1304 }
1305
new_settings(struct hci_dev * hdev,struct sock * skip)1306 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1307 {
1308 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1309
1310 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1311 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1312 }
1313
mgmt_set_powered_complete(struct hci_dev * hdev,void * data,int err)1314 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1315 {
1316 struct mgmt_pending_cmd *cmd = data;
1317 struct mgmt_mode *cp;
1318
1319 /* Make sure cmd still outstanding. */
1320 if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1321 return;
1322
1323 cp = cmd->param;
1324
1325 bt_dev_dbg(hdev, "err %d", err);
1326
1327 if (!err) {
1328 if (cp->val) {
1329 hci_dev_lock(hdev);
1330 restart_le_actions(hdev);
1331 hci_update_passive_scan(hdev);
1332 hci_dev_unlock(hdev);
1333 }
1334
1335 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1336
1337 /* Only call new_setting for power on as power off is deferred
1338 * to hdev->power_off work which does call hci_dev_do_close.
1339 */
1340 if (cp->val)
1341 new_settings(hdev, cmd->sk);
1342 } else {
1343 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1344 mgmt_status(err));
1345 }
1346
1347 mgmt_pending_remove(cmd);
1348 }
1349
set_powered_sync(struct hci_dev * hdev,void * data)1350 static int set_powered_sync(struct hci_dev *hdev, void *data)
1351 {
1352 struct mgmt_pending_cmd *cmd = data;
1353 struct mgmt_mode *cp = cmd->param;
1354
1355 BT_DBG("%s", hdev->name);
1356
1357 return hci_set_powered_sync(hdev, cp->val);
1358 }
1359
set_powered(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1360 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1361 u16 len)
1362 {
1363 struct mgmt_mode *cp = data;
1364 struct mgmt_pending_cmd *cmd;
1365 int err;
1366
1367 bt_dev_dbg(hdev, "sock %p", sk);
1368
1369 if (cp->val != 0x00 && cp->val != 0x01)
1370 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1371 MGMT_STATUS_INVALID_PARAMS);
1372
1373 hci_dev_lock(hdev);
1374
1375 if (!cp->val) {
1376 if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN)) {
1377 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1378 MGMT_STATUS_BUSY);
1379 goto failed;
1380 }
1381 }
1382
1383 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1384 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1385 MGMT_STATUS_BUSY);
1386 goto failed;
1387 }
1388
1389 if (!!cp->val == hdev_is_powered(hdev)) {
1390 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1391 goto failed;
1392 }
1393
1394 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1395 if (!cmd) {
1396 err = -ENOMEM;
1397 goto failed;
1398 }
1399
1400 /* Cancel potentially blocking sync operation before power off */
1401 if (cp->val == 0x00) {
1402 hci_cmd_sync_cancel_sync(hdev, -EHOSTDOWN);
1403 err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1404 mgmt_set_powered_complete);
1405 } else {
1406 /* Use hci_cmd_sync_submit since hdev might not be running */
1407 err = hci_cmd_sync_submit(hdev, set_powered_sync, cmd,
1408 mgmt_set_powered_complete);
1409 }
1410
1411 if (err < 0)
1412 mgmt_pending_remove(cmd);
1413
1414 failed:
1415 hci_dev_unlock(hdev);
1416 return err;
1417 }
1418
mgmt_new_settings(struct hci_dev * hdev)1419 int mgmt_new_settings(struct hci_dev *hdev)
1420 {
1421 return new_settings(hdev, NULL);
1422 }
1423
1424 struct cmd_lookup {
1425 struct sock *sk;
1426 struct hci_dev *hdev;
1427 u8 mgmt_status;
1428 };
1429
settings_rsp(struct mgmt_pending_cmd * cmd,void * data)1430 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1431 {
1432 struct cmd_lookup *match = data;
1433
1434 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1435
1436 list_del(&cmd->list);
1437
1438 if (match->sk == NULL) {
1439 match->sk = cmd->sk;
1440 sock_hold(match->sk);
1441 }
1442
1443 mgmt_pending_free(cmd);
1444 }
1445
cmd_status_rsp(struct mgmt_pending_cmd * cmd,void * data)1446 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1447 {
1448 u8 *status = data;
1449
1450 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1451 mgmt_pending_remove(cmd);
1452 }
1453
cmd_complete_rsp(struct mgmt_pending_cmd * cmd,void * data)1454 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1455 {
1456 if (cmd->cmd_complete) {
1457 u8 *status = data;
1458
1459 cmd->cmd_complete(cmd, *status);
1460 mgmt_pending_remove(cmd);
1461
1462 return;
1463 }
1464
1465 cmd_status_rsp(cmd, data);
1466 }
1467
generic_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1468 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1469 {
1470 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1471 cmd->param, cmd->param_len);
1472 }
1473
addr_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1474 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1475 {
1476 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1477 cmd->param, sizeof(struct mgmt_addr_info));
1478 }
1479
mgmt_bredr_support(struct hci_dev * hdev)1480 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1481 {
1482 if (!lmp_bredr_capable(hdev))
1483 return MGMT_STATUS_NOT_SUPPORTED;
1484 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1485 return MGMT_STATUS_REJECTED;
1486 else
1487 return MGMT_STATUS_SUCCESS;
1488 }
1489
mgmt_le_support(struct hci_dev * hdev)1490 static u8 mgmt_le_support(struct hci_dev *hdev)
1491 {
1492 if (!lmp_le_capable(hdev))
1493 return MGMT_STATUS_NOT_SUPPORTED;
1494 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1495 return MGMT_STATUS_REJECTED;
1496 else
1497 return MGMT_STATUS_SUCCESS;
1498 }
1499
mgmt_set_discoverable_complete(struct hci_dev * hdev,void * data,int err)1500 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1501 int err)
1502 {
1503 struct mgmt_pending_cmd *cmd = data;
1504
1505 bt_dev_dbg(hdev, "err %d", err);
1506
1507 /* Make sure cmd still outstanding. */
1508 if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1509 return;
1510
1511 hci_dev_lock(hdev);
1512
1513 if (err) {
1514 u8 mgmt_err = mgmt_status(err);
1515 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1516 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1517 goto done;
1518 }
1519
1520 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1521 hdev->discov_timeout > 0) {
1522 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1523 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1524 }
1525
1526 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1527 new_settings(hdev, cmd->sk);
1528
1529 done:
1530 mgmt_pending_remove(cmd);
1531 hci_dev_unlock(hdev);
1532 }
1533
set_discoverable_sync(struct hci_dev * hdev,void * data)1534 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1535 {
1536 BT_DBG("%s", hdev->name);
1537
1538 return hci_update_discoverable_sync(hdev);
1539 }
1540
set_discoverable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1541 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1542 u16 len)
1543 {
1544 struct mgmt_cp_set_discoverable *cp = data;
1545 struct mgmt_pending_cmd *cmd;
1546 u16 timeout;
1547 int err;
1548
1549 bt_dev_dbg(hdev, "sock %p", sk);
1550
1551 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1552 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1553 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1554 MGMT_STATUS_REJECTED);
1555
1556 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1557 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1558 MGMT_STATUS_INVALID_PARAMS);
1559
1560 timeout = __le16_to_cpu(cp->timeout);
1561
1562 /* Disabling discoverable requires that no timeout is set,
1563 * and enabling limited discoverable requires a timeout.
1564 */
1565 if ((cp->val == 0x00 && timeout > 0) ||
1566 (cp->val == 0x02 && timeout == 0))
1567 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1568 MGMT_STATUS_INVALID_PARAMS);
1569
1570 hci_dev_lock(hdev);
1571
1572 if (!hdev_is_powered(hdev) && timeout > 0) {
1573 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1574 MGMT_STATUS_NOT_POWERED);
1575 goto failed;
1576 }
1577
1578 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1579 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1580 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1581 MGMT_STATUS_BUSY);
1582 goto failed;
1583 }
1584
1585 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1586 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1587 MGMT_STATUS_REJECTED);
1588 goto failed;
1589 }
1590
1591 if (hdev->advertising_paused) {
1592 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1593 MGMT_STATUS_BUSY);
1594 goto failed;
1595 }
1596
1597 if (!hdev_is_powered(hdev)) {
1598 bool changed = false;
1599
1600 /* Setting limited discoverable when powered off is
1601 * not a valid operation since it requires a timeout
1602 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1603 */
1604 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1605 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1606 changed = true;
1607 }
1608
1609 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1610 if (err < 0)
1611 goto failed;
1612
1613 if (changed)
1614 err = new_settings(hdev, sk);
1615
1616 goto failed;
1617 }
1618
1619 /* If the current mode is the same, then just update the timeout
1620 * value with the new value. And if only the timeout gets updated,
1621 * then no need for any HCI transactions.
1622 */
1623 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1624 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1625 HCI_LIMITED_DISCOVERABLE)) {
1626 cancel_delayed_work(&hdev->discov_off);
1627 hdev->discov_timeout = timeout;
1628
1629 if (cp->val && hdev->discov_timeout > 0) {
1630 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1631 queue_delayed_work(hdev->req_workqueue,
1632 &hdev->discov_off, to);
1633 }
1634
1635 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1636 goto failed;
1637 }
1638
1639 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1640 if (!cmd) {
1641 err = -ENOMEM;
1642 goto failed;
1643 }
1644
1645 /* Cancel any potential discoverable timeout that might be
1646 * still active and store new timeout value. The arming of
1647 * the timeout happens in the complete handler.
1648 */
1649 cancel_delayed_work(&hdev->discov_off);
1650 hdev->discov_timeout = timeout;
1651
1652 if (cp->val)
1653 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1654 else
1655 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1656
1657 /* Limited discoverable mode */
1658 if (cp->val == 0x02)
1659 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1660 else
1661 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1662
1663 err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1664 mgmt_set_discoverable_complete);
1665
1666 if (err < 0)
1667 mgmt_pending_remove(cmd);
1668
1669 failed:
1670 hci_dev_unlock(hdev);
1671 return err;
1672 }
1673
mgmt_set_connectable_complete(struct hci_dev * hdev,void * data,int err)1674 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1675 int err)
1676 {
1677 struct mgmt_pending_cmd *cmd = data;
1678
1679 bt_dev_dbg(hdev, "err %d", err);
1680
1681 /* Make sure cmd still outstanding. */
1682 if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1683 return;
1684
1685 hci_dev_lock(hdev);
1686
1687 if (err) {
1688 u8 mgmt_err = mgmt_status(err);
1689 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1690 goto done;
1691 }
1692
1693 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1694 new_settings(hdev, cmd->sk);
1695
1696 done:
1697 mgmt_pending_remove(cmd);
1698
1699 hci_dev_unlock(hdev);
1700 }
1701
set_connectable_update_settings(struct hci_dev * hdev,struct sock * sk,u8 val)1702 static int set_connectable_update_settings(struct hci_dev *hdev,
1703 struct sock *sk, u8 val)
1704 {
1705 bool changed = false;
1706 int err;
1707
1708 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1709 changed = true;
1710
1711 if (val) {
1712 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1713 } else {
1714 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1715 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1716 }
1717
1718 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1719 if (err < 0)
1720 return err;
1721
1722 if (changed) {
1723 hci_update_scan(hdev);
1724 hci_update_passive_scan(hdev);
1725 return new_settings(hdev, sk);
1726 }
1727
1728 return 0;
1729 }
1730
set_connectable_sync(struct hci_dev * hdev,void * data)1731 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1732 {
1733 BT_DBG("%s", hdev->name);
1734
1735 return hci_update_connectable_sync(hdev);
1736 }
1737
set_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1738 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1739 u16 len)
1740 {
1741 struct mgmt_mode *cp = data;
1742 struct mgmt_pending_cmd *cmd;
1743 int err;
1744
1745 bt_dev_dbg(hdev, "sock %p", sk);
1746
1747 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1748 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1749 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1750 MGMT_STATUS_REJECTED);
1751
1752 if (cp->val != 0x00 && cp->val != 0x01)
1753 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1754 MGMT_STATUS_INVALID_PARAMS);
1755
1756 hci_dev_lock(hdev);
1757
1758 if (!hdev_is_powered(hdev)) {
1759 err = set_connectable_update_settings(hdev, sk, cp->val);
1760 goto failed;
1761 }
1762
1763 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1764 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1765 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1766 MGMT_STATUS_BUSY);
1767 goto failed;
1768 }
1769
1770 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1771 if (!cmd) {
1772 err = -ENOMEM;
1773 goto failed;
1774 }
1775
1776 if (cp->val) {
1777 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1778 } else {
1779 if (hdev->discov_timeout > 0)
1780 cancel_delayed_work(&hdev->discov_off);
1781
1782 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1783 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1784 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1785 }
1786
1787 err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1788 mgmt_set_connectable_complete);
1789
1790 if (err < 0)
1791 mgmt_pending_remove(cmd);
1792
1793 failed:
1794 hci_dev_unlock(hdev);
1795 return err;
1796 }
1797
set_bondable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1798 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1799 u16 len)
1800 {
1801 struct mgmt_mode *cp = data;
1802 bool changed;
1803 int err;
1804
1805 bt_dev_dbg(hdev, "sock %p", sk);
1806
1807 if (cp->val != 0x00 && cp->val != 0x01)
1808 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1809 MGMT_STATUS_INVALID_PARAMS);
1810
1811 hci_dev_lock(hdev);
1812
1813 if (cp->val)
1814 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1815 else
1816 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1817
1818 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1819 if (err < 0)
1820 goto unlock;
1821
1822 if (changed) {
1823 /* In limited privacy mode the change of bondable mode
1824 * may affect the local advertising address.
1825 */
1826 hci_update_discoverable(hdev);
1827
1828 err = new_settings(hdev, sk);
1829 }
1830
1831 unlock:
1832 hci_dev_unlock(hdev);
1833 return err;
1834 }
1835
set_link_security(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1836 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1837 u16 len)
1838 {
1839 struct mgmt_mode *cp = data;
1840 struct mgmt_pending_cmd *cmd;
1841 u8 val, status;
1842 int err;
1843
1844 bt_dev_dbg(hdev, "sock %p", sk);
1845
1846 status = mgmt_bredr_support(hdev);
1847 if (status)
1848 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1849 status);
1850
1851 if (cp->val != 0x00 && cp->val != 0x01)
1852 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1853 MGMT_STATUS_INVALID_PARAMS);
1854
1855 hci_dev_lock(hdev);
1856
1857 if (!hdev_is_powered(hdev)) {
1858 bool changed = false;
1859
1860 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1861 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1862 changed = true;
1863 }
1864
1865 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1866 if (err < 0)
1867 goto failed;
1868
1869 if (changed)
1870 err = new_settings(hdev, sk);
1871
1872 goto failed;
1873 }
1874
1875 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1876 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1877 MGMT_STATUS_BUSY);
1878 goto failed;
1879 }
1880
1881 val = !!cp->val;
1882
1883 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1884 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1885 goto failed;
1886 }
1887
1888 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1889 if (!cmd) {
1890 err = -ENOMEM;
1891 goto failed;
1892 }
1893
1894 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1895 if (err < 0) {
1896 mgmt_pending_remove(cmd);
1897 goto failed;
1898 }
1899
1900 failed:
1901 hci_dev_unlock(hdev);
1902 return err;
1903 }
1904
set_ssp_complete(struct hci_dev * hdev,void * data,int err)1905 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1906 {
1907 struct cmd_lookup match = { NULL, hdev };
1908 struct mgmt_pending_cmd *cmd = data;
1909 struct mgmt_mode *cp = cmd->param;
1910 u8 enable = cp->val;
1911 bool changed;
1912
1913 /* Make sure cmd still outstanding. */
1914 if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1915 return;
1916
1917 if (err) {
1918 u8 mgmt_err = mgmt_status(err);
1919
1920 if (enable && hci_dev_test_and_clear_flag(hdev,
1921 HCI_SSP_ENABLED)) {
1922 new_settings(hdev, NULL);
1923 }
1924
1925 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1926 &mgmt_err);
1927 return;
1928 }
1929
1930 if (enable) {
1931 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1932 } else {
1933 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1934 }
1935
1936 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1937
1938 if (changed)
1939 new_settings(hdev, match.sk);
1940
1941 if (match.sk)
1942 sock_put(match.sk);
1943
1944 hci_update_eir_sync(hdev);
1945 }
1946
set_ssp_sync(struct hci_dev * hdev,void * data)1947 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1948 {
1949 struct mgmt_pending_cmd *cmd = data;
1950 struct mgmt_mode *cp = cmd->param;
1951 bool changed = false;
1952 int err;
1953
1954 if (cp->val)
1955 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1956
1957 err = hci_write_ssp_mode_sync(hdev, cp->val);
1958
1959 if (!err && changed)
1960 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1961
1962 return err;
1963 }
1964
set_ssp(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1965 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1966 {
1967 struct mgmt_mode *cp = data;
1968 struct mgmt_pending_cmd *cmd;
1969 u8 status;
1970 int err;
1971
1972 bt_dev_dbg(hdev, "sock %p", sk);
1973
1974 status = mgmt_bredr_support(hdev);
1975 if (status)
1976 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1977
1978 if (!lmp_ssp_capable(hdev))
1979 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1980 MGMT_STATUS_NOT_SUPPORTED);
1981
1982 if (cp->val != 0x00 && cp->val != 0x01)
1983 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1984 MGMT_STATUS_INVALID_PARAMS);
1985
1986 hci_dev_lock(hdev);
1987
1988 if (!hdev_is_powered(hdev)) {
1989 bool changed;
1990
1991 if (cp->val) {
1992 changed = !hci_dev_test_and_set_flag(hdev,
1993 HCI_SSP_ENABLED);
1994 } else {
1995 changed = hci_dev_test_and_clear_flag(hdev,
1996 HCI_SSP_ENABLED);
1997 }
1998
1999 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2000 if (err < 0)
2001 goto failed;
2002
2003 if (changed)
2004 err = new_settings(hdev, sk);
2005
2006 goto failed;
2007 }
2008
2009 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2010 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2011 MGMT_STATUS_BUSY);
2012 goto failed;
2013 }
2014
2015 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2016 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2017 goto failed;
2018 }
2019
2020 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2021 if (!cmd)
2022 err = -ENOMEM;
2023 else
2024 err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2025 set_ssp_complete);
2026
2027 if (err < 0) {
2028 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2029 MGMT_STATUS_FAILED);
2030
2031 if (cmd)
2032 mgmt_pending_remove(cmd);
2033 }
2034
2035 failed:
2036 hci_dev_unlock(hdev);
2037 return err;
2038 }
2039
set_hs(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2040 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2041 {
2042 bt_dev_dbg(hdev, "sock %p", sk);
2043
2044 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2045 MGMT_STATUS_NOT_SUPPORTED);
2046 }
2047
set_le_complete(struct hci_dev * hdev,void * data,int err)2048 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2049 {
2050 struct cmd_lookup match = { NULL, hdev };
2051 u8 status = mgmt_status(err);
2052
2053 bt_dev_dbg(hdev, "err %d", err);
2054
2055 if (status) {
2056 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2057 &status);
2058 return;
2059 }
2060
2061 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2062
2063 new_settings(hdev, match.sk);
2064
2065 if (match.sk)
2066 sock_put(match.sk);
2067 }
2068
set_le_sync(struct hci_dev * hdev,void * data)2069 static int set_le_sync(struct hci_dev *hdev, void *data)
2070 {
2071 struct mgmt_pending_cmd *cmd = data;
2072 struct mgmt_mode *cp = cmd->param;
2073 u8 val = !!cp->val;
2074 int err;
2075
2076 if (!val) {
2077 hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2078
2079 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2080 hci_disable_advertising_sync(hdev);
2081
2082 if (ext_adv_capable(hdev))
2083 hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2084 } else {
2085 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2086 }
2087
2088 err = hci_write_le_host_supported_sync(hdev, val, 0);
2089
2090 /* Make sure the controller has a good default for
2091 * advertising data. Restrict the update to when LE
2092 * has actually been enabled. During power on, the
2093 * update in powered_update_hci will take care of it.
2094 */
2095 if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2096 if (ext_adv_capable(hdev)) {
2097 int status;
2098
2099 status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2100 if (!status)
2101 hci_update_scan_rsp_data_sync(hdev, 0x00);
2102 } else {
2103 hci_update_adv_data_sync(hdev, 0x00);
2104 hci_update_scan_rsp_data_sync(hdev, 0x00);
2105 }
2106
2107 hci_update_passive_scan(hdev);
2108 }
2109
2110 return err;
2111 }
2112
set_mesh_complete(struct hci_dev * hdev,void * data,int err)2113 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2114 {
2115 struct mgmt_pending_cmd *cmd = data;
2116 u8 status = mgmt_status(err);
2117 struct sock *sk = cmd->sk;
2118
2119 if (status) {
2120 mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev,
2121 cmd_status_rsp, &status);
2122 return;
2123 }
2124
2125 mgmt_pending_remove(cmd);
2126 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2127 }
2128
set_mesh_sync(struct hci_dev * hdev,void * data)2129 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2130 {
2131 struct mgmt_pending_cmd *cmd = data;
2132 struct mgmt_cp_set_mesh *cp = cmd->param;
2133 size_t len = cmd->param_len;
2134
2135 memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2136
2137 if (cp->enable)
2138 hci_dev_set_flag(hdev, HCI_MESH);
2139 else
2140 hci_dev_clear_flag(hdev, HCI_MESH);
2141
2142 len -= sizeof(*cp);
2143
2144 /* If filters don't fit, forward all adv pkts */
2145 if (len <= sizeof(hdev->mesh_ad_types))
2146 memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2147
2148 hci_update_passive_scan_sync(hdev);
2149 return 0;
2150 }
2151
set_mesh(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2152 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2153 {
2154 struct mgmt_cp_set_mesh *cp = data;
2155 struct mgmt_pending_cmd *cmd;
2156 int err = 0;
2157
2158 bt_dev_dbg(hdev, "sock %p", sk);
2159
2160 if (!lmp_le_capable(hdev) ||
2161 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2162 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2163 MGMT_STATUS_NOT_SUPPORTED);
2164
2165 if (cp->enable != 0x00 && cp->enable != 0x01)
2166 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2167 MGMT_STATUS_INVALID_PARAMS);
2168
2169 hci_dev_lock(hdev);
2170
2171 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2172 if (!cmd)
2173 err = -ENOMEM;
2174 else
2175 err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2176 set_mesh_complete);
2177
2178 if (err < 0) {
2179 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2180 MGMT_STATUS_FAILED);
2181
2182 if (cmd)
2183 mgmt_pending_remove(cmd);
2184 }
2185
2186 hci_dev_unlock(hdev);
2187 return err;
2188 }
2189
mesh_send_start_complete(struct hci_dev * hdev,void * data,int err)2190 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2191 {
2192 struct mgmt_mesh_tx *mesh_tx = data;
2193 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2194 unsigned long mesh_send_interval;
2195 u8 mgmt_err = mgmt_status(err);
2196
2197 /* Report any errors here, but don't report completion */
2198
2199 if (mgmt_err) {
2200 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2201 /* Send Complete Error Code for handle */
2202 mesh_send_complete(hdev, mesh_tx, false);
2203 return;
2204 }
2205
2206 mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2207 queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2208 mesh_send_interval);
2209 }
2210
mesh_send_sync(struct hci_dev * hdev,void * data)2211 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2212 {
2213 struct mgmt_mesh_tx *mesh_tx = data;
2214 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2215 struct adv_info *adv, *next_instance;
2216 u8 instance = hdev->le_num_of_adv_sets + 1;
2217 u16 timeout, duration;
2218 int err = 0;
2219
2220 if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2221 return MGMT_STATUS_BUSY;
2222
2223 timeout = 1000;
2224 duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2225 adv = hci_add_adv_instance(hdev, instance, 0,
2226 send->adv_data_len, send->adv_data,
2227 0, NULL,
2228 timeout, duration,
2229 HCI_ADV_TX_POWER_NO_PREFERENCE,
2230 hdev->le_adv_min_interval,
2231 hdev->le_adv_max_interval,
2232 mesh_tx->handle);
2233
2234 if (!IS_ERR(adv))
2235 mesh_tx->instance = instance;
2236 else
2237 err = PTR_ERR(adv);
2238
2239 if (hdev->cur_adv_instance == instance) {
2240 /* If the currently advertised instance is being changed then
2241 * cancel the current advertising and schedule the next
2242 * instance. If there is only one instance then the overridden
2243 * advertising data will be visible right away.
2244 */
2245 cancel_adv_timeout(hdev);
2246
2247 next_instance = hci_get_next_instance(hdev, instance);
2248 if (next_instance)
2249 instance = next_instance->instance;
2250 else
2251 instance = 0;
2252 } else if (hdev->adv_instance_timeout) {
2253 /* Immediately advertise the new instance if no other, or
2254 * let it go naturally from queue if ADV is already happening
2255 */
2256 instance = 0;
2257 }
2258
2259 if (instance)
2260 return hci_schedule_adv_instance_sync(hdev, instance, true);
2261
2262 return err;
2263 }
2264
send_count(struct mgmt_mesh_tx * mesh_tx,void * data)2265 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2266 {
2267 struct mgmt_rp_mesh_read_features *rp = data;
2268
2269 if (rp->used_handles >= rp->max_handles)
2270 return;
2271
2272 rp->handles[rp->used_handles++] = mesh_tx->handle;
2273 }
2274
mesh_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2275 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2276 void *data, u16 len)
2277 {
2278 struct mgmt_rp_mesh_read_features rp;
2279
2280 if (!lmp_le_capable(hdev) ||
2281 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2282 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2283 MGMT_STATUS_NOT_SUPPORTED);
2284
2285 memset(&rp, 0, sizeof(rp));
2286 rp.index = cpu_to_le16(hdev->id);
2287 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2288 rp.max_handles = MESH_HANDLES_MAX;
2289
2290 hci_dev_lock(hdev);
2291
2292 if (rp.max_handles)
2293 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2294
2295 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2296 rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2297
2298 hci_dev_unlock(hdev);
2299 return 0;
2300 }
2301
send_cancel(struct hci_dev * hdev,void * data)2302 static int send_cancel(struct hci_dev *hdev, void *data)
2303 {
2304 struct mgmt_pending_cmd *cmd = data;
2305 struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2306 struct mgmt_mesh_tx *mesh_tx;
2307
2308 if (!cancel->handle) {
2309 do {
2310 mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2311
2312 if (mesh_tx)
2313 mesh_send_complete(hdev, mesh_tx, false);
2314 } while (mesh_tx);
2315 } else {
2316 mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2317
2318 if (mesh_tx && mesh_tx->sk == cmd->sk)
2319 mesh_send_complete(hdev, mesh_tx, false);
2320 }
2321
2322 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2323 0, NULL, 0);
2324 mgmt_pending_free(cmd);
2325
2326 return 0;
2327 }
2328
mesh_send_cancel(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2329 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2330 void *data, u16 len)
2331 {
2332 struct mgmt_pending_cmd *cmd;
2333 int err;
2334
2335 if (!lmp_le_capable(hdev) ||
2336 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2337 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2338 MGMT_STATUS_NOT_SUPPORTED);
2339
2340 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2341 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2342 MGMT_STATUS_REJECTED);
2343
2344 hci_dev_lock(hdev);
2345 cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2346 if (!cmd)
2347 err = -ENOMEM;
2348 else
2349 err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2350
2351 if (err < 0) {
2352 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2353 MGMT_STATUS_FAILED);
2354
2355 if (cmd)
2356 mgmt_pending_free(cmd);
2357 }
2358
2359 hci_dev_unlock(hdev);
2360 return err;
2361 }
2362
mesh_send(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2363 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2364 {
2365 struct mgmt_mesh_tx *mesh_tx;
2366 struct mgmt_cp_mesh_send *send = data;
2367 struct mgmt_rp_mesh_read_features rp;
2368 bool sending;
2369 int err = 0;
2370
2371 if (!lmp_le_capable(hdev) ||
2372 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2373 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2374 MGMT_STATUS_NOT_SUPPORTED);
2375 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2376 len <= MGMT_MESH_SEND_SIZE ||
2377 len > (MGMT_MESH_SEND_SIZE + 31))
2378 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2379 MGMT_STATUS_REJECTED);
2380
2381 hci_dev_lock(hdev);
2382
2383 memset(&rp, 0, sizeof(rp));
2384 rp.max_handles = MESH_HANDLES_MAX;
2385
2386 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2387
2388 if (rp.max_handles <= rp.used_handles) {
2389 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2390 MGMT_STATUS_BUSY);
2391 goto done;
2392 }
2393
2394 sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2395 mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2396
2397 if (!mesh_tx)
2398 err = -ENOMEM;
2399 else if (!sending)
2400 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2401 mesh_send_start_complete);
2402
2403 if (err < 0) {
2404 bt_dev_err(hdev, "Send Mesh Failed %d", err);
2405 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2406 MGMT_STATUS_FAILED);
2407
2408 if (mesh_tx) {
2409 if (sending)
2410 mgmt_mesh_remove(mesh_tx);
2411 }
2412 } else {
2413 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2414
2415 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2416 &mesh_tx->handle, 1);
2417 }
2418
2419 done:
2420 hci_dev_unlock(hdev);
2421 return err;
2422 }
2423
set_le(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2424 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2425 {
2426 struct mgmt_mode *cp = data;
2427 struct mgmt_pending_cmd *cmd;
2428 int err;
2429 u8 val, enabled;
2430
2431 bt_dev_dbg(hdev, "sock %p", sk);
2432
2433 if (!lmp_le_capable(hdev))
2434 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2435 MGMT_STATUS_NOT_SUPPORTED);
2436
2437 if (cp->val != 0x00 && cp->val != 0x01)
2438 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2439 MGMT_STATUS_INVALID_PARAMS);
2440
2441 /* Bluetooth single mode LE only controllers or dual-mode
2442 * controllers configured as LE only devices, do not allow
2443 * switching LE off. These have either LE enabled explicitly
2444 * or BR/EDR has been previously switched off.
2445 *
2446 * When trying to enable an already enabled LE, then gracefully
2447 * send a positive response. Trying to disable it however will
2448 * result into rejection.
2449 */
2450 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2451 if (cp->val == 0x01)
2452 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2453
2454 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2455 MGMT_STATUS_REJECTED);
2456 }
2457
2458 hci_dev_lock(hdev);
2459
2460 val = !!cp->val;
2461 enabled = lmp_host_le_capable(hdev);
2462
2463 if (!hdev_is_powered(hdev) || val == enabled) {
2464 bool changed = false;
2465
2466 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2467 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2468 changed = true;
2469 }
2470
2471 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2472 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2473 changed = true;
2474 }
2475
2476 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2477 if (err < 0)
2478 goto unlock;
2479
2480 if (changed)
2481 err = new_settings(hdev, sk);
2482
2483 goto unlock;
2484 }
2485
2486 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2487 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2488 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2489 MGMT_STATUS_BUSY);
2490 goto unlock;
2491 }
2492
2493 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2494 if (!cmd)
2495 err = -ENOMEM;
2496 else
2497 err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2498 set_le_complete);
2499
2500 if (err < 0) {
2501 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2502 MGMT_STATUS_FAILED);
2503
2504 if (cmd)
2505 mgmt_pending_remove(cmd);
2506 }
2507
2508 unlock:
2509 hci_dev_unlock(hdev);
2510 return err;
2511 }
2512
2513 /* This is a helper function to test for pending mgmt commands that can
2514 * cause CoD or EIR HCI commands. We can only allow one such pending
2515 * mgmt command at a time since otherwise we cannot easily track what
2516 * the current values are, will be, and based on that calculate if a new
2517 * HCI command needs to be sent and if yes with what value.
2518 */
pending_eir_or_class(struct hci_dev * hdev)2519 static bool pending_eir_or_class(struct hci_dev *hdev)
2520 {
2521 struct mgmt_pending_cmd *cmd;
2522
2523 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2524 switch (cmd->opcode) {
2525 case MGMT_OP_ADD_UUID:
2526 case MGMT_OP_REMOVE_UUID:
2527 case MGMT_OP_SET_DEV_CLASS:
2528 case MGMT_OP_SET_POWERED:
2529 return true;
2530 }
2531 }
2532
2533 return false;
2534 }
2535
2536 static const u8 bluetooth_base_uuid[] = {
2537 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2538 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2539 };
2540
get_uuid_size(const u8 * uuid)2541 static u8 get_uuid_size(const u8 *uuid)
2542 {
2543 u32 val;
2544
2545 if (memcmp(uuid, bluetooth_base_uuid, 12))
2546 return 128;
2547
2548 val = get_unaligned_le32(&uuid[12]);
2549 if (val > 0xffff)
2550 return 32;
2551
2552 return 16;
2553 }
2554
mgmt_class_complete(struct hci_dev * hdev,void * data,int err)2555 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2556 {
2557 struct mgmt_pending_cmd *cmd = data;
2558
2559 bt_dev_dbg(hdev, "err %d", err);
2560
2561 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2562 mgmt_status(err), hdev->dev_class, 3);
2563
2564 mgmt_pending_free(cmd);
2565 }
2566
add_uuid_sync(struct hci_dev * hdev,void * data)2567 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2568 {
2569 int err;
2570
2571 err = hci_update_class_sync(hdev);
2572 if (err)
2573 return err;
2574
2575 return hci_update_eir_sync(hdev);
2576 }
2577
add_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2578 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2579 {
2580 struct mgmt_cp_add_uuid *cp = data;
2581 struct mgmt_pending_cmd *cmd;
2582 struct bt_uuid *uuid;
2583 int err;
2584
2585 bt_dev_dbg(hdev, "sock %p", sk);
2586
2587 hci_dev_lock(hdev);
2588
2589 if (pending_eir_or_class(hdev)) {
2590 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2591 MGMT_STATUS_BUSY);
2592 goto failed;
2593 }
2594
2595 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2596 if (!uuid) {
2597 err = -ENOMEM;
2598 goto failed;
2599 }
2600
2601 memcpy(uuid->uuid, cp->uuid, 16);
2602 uuid->svc_hint = cp->svc_hint;
2603 uuid->size = get_uuid_size(cp->uuid);
2604
2605 list_add_tail(&uuid->list, &hdev->uuids);
2606
2607 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2608 if (!cmd) {
2609 err = -ENOMEM;
2610 goto failed;
2611 }
2612
2613 /* MGMT_OP_ADD_UUID don't require adapter the UP/Running so use
2614 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2615 */
2616 err = hci_cmd_sync_submit(hdev, add_uuid_sync, cmd,
2617 mgmt_class_complete);
2618 if (err < 0) {
2619 mgmt_pending_free(cmd);
2620 goto failed;
2621 }
2622
2623 failed:
2624 hci_dev_unlock(hdev);
2625 return err;
2626 }
2627
enable_service_cache(struct hci_dev * hdev)2628 static bool enable_service_cache(struct hci_dev *hdev)
2629 {
2630 if (!hdev_is_powered(hdev))
2631 return false;
2632
2633 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2634 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2635 CACHE_TIMEOUT);
2636 return true;
2637 }
2638
2639 return false;
2640 }
2641
remove_uuid_sync(struct hci_dev * hdev,void * data)2642 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2643 {
2644 int err;
2645
2646 err = hci_update_class_sync(hdev);
2647 if (err)
2648 return err;
2649
2650 return hci_update_eir_sync(hdev);
2651 }
2652
remove_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2653 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2654 u16 len)
2655 {
2656 struct mgmt_cp_remove_uuid *cp = data;
2657 struct mgmt_pending_cmd *cmd;
2658 struct bt_uuid *match, *tmp;
2659 static const u8 bt_uuid_any[] = {
2660 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2661 };
2662 int err, found;
2663
2664 bt_dev_dbg(hdev, "sock %p", sk);
2665
2666 hci_dev_lock(hdev);
2667
2668 if (pending_eir_or_class(hdev)) {
2669 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2670 MGMT_STATUS_BUSY);
2671 goto unlock;
2672 }
2673
2674 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2675 hci_uuids_clear(hdev);
2676
2677 if (enable_service_cache(hdev)) {
2678 err = mgmt_cmd_complete(sk, hdev->id,
2679 MGMT_OP_REMOVE_UUID,
2680 0, hdev->dev_class, 3);
2681 goto unlock;
2682 }
2683
2684 goto update_class;
2685 }
2686
2687 found = 0;
2688
2689 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2690 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2691 continue;
2692
2693 list_del(&match->list);
2694 kfree(match);
2695 found++;
2696 }
2697
2698 if (found == 0) {
2699 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2700 MGMT_STATUS_INVALID_PARAMS);
2701 goto unlock;
2702 }
2703
2704 update_class:
2705 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2706 if (!cmd) {
2707 err = -ENOMEM;
2708 goto unlock;
2709 }
2710
2711 /* MGMT_OP_REMOVE_UUID don't require adapter the UP/Running so use
2712 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2713 */
2714 err = hci_cmd_sync_submit(hdev, remove_uuid_sync, cmd,
2715 mgmt_class_complete);
2716 if (err < 0)
2717 mgmt_pending_free(cmd);
2718
2719 unlock:
2720 hci_dev_unlock(hdev);
2721 return err;
2722 }
2723
set_class_sync(struct hci_dev * hdev,void * data)2724 static int set_class_sync(struct hci_dev *hdev, void *data)
2725 {
2726 int err = 0;
2727
2728 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2729 cancel_delayed_work_sync(&hdev->service_cache);
2730 err = hci_update_eir_sync(hdev);
2731 }
2732
2733 if (err)
2734 return err;
2735
2736 return hci_update_class_sync(hdev);
2737 }
2738
set_dev_class(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2739 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2740 u16 len)
2741 {
2742 struct mgmt_cp_set_dev_class *cp = data;
2743 struct mgmt_pending_cmd *cmd;
2744 int err;
2745
2746 bt_dev_dbg(hdev, "sock %p", sk);
2747
2748 if (!lmp_bredr_capable(hdev))
2749 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2750 MGMT_STATUS_NOT_SUPPORTED);
2751
2752 hci_dev_lock(hdev);
2753
2754 if (pending_eir_or_class(hdev)) {
2755 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2756 MGMT_STATUS_BUSY);
2757 goto unlock;
2758 }
2759
2760 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2761 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2762 MGMT_STATUS_INVALID_PARAMS);
2763 goto unlock;
2764 }
2765
2766 hdev->major_class = cp->major;
2767 hdev->minor_class = cp->minor;
2768
2769 if (!hdev_is_powered(hdev)) {
2770 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2771 hdev->dev_class, 3);
2772 goto unlock;
2773 }
2774
2775 cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2776 if (!cmd) {
2777 err = -ENOMEM;
2778 goto unlock;
2779 }
2780
2781 /* MGMT_OP_SET_DEV_CLASS don't require adapter the UP/Running so use
2782 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2783 */
2784 err = hci_cmd_sync_submit(hdev, set_class_sync, cmd,
2785 mgmt_class_complete);
2786 if (err < 0)
2787 mgmt_pending_free(cmd);
2788
2789 unlock:
2790 hci_dev_unlock(hdev);
2791 return err;
2792 }
2793
load_link_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2794 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2795 u16 len)
2796 {
2797 struct mgmt_cp_load_link_keys *cp = data;
2798 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2799 sizeof(struct mgmt_link_key_info));
2800 u16 key_count, expected_len;
2801 bool changed;
2802 int i;
2803
2804 bt_dev_dbg(hdev, "sock %p", sk);
2805
2806 if (!lmp_bredr_capable(hdev))
2807 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2808 MGMT_STATUS_NOT_SUPPORTED);
2809
2810 key_count = __le16_to_cpu(cp->key_count);
2811 if (key_count > max_key_count) {
2812 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2813 key_count);
2814 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2815 MGMT_STATUS_INVALID_PARAMS);
2816 }
2817
2818 expected_len = struct_size(cp, keys, key_count);
2819 if (expected_len != len) {
2820 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2821 expected_len, len);
2822 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2823 MGMT_STATUS_INVALID_PARAMS);
2824 }
2825
2826 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2827 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2828 MGMT_STATUS_INVALID_PARAMS);
2829
2830 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2831 key_count);
2832
2833 hci_dev_lock(hdev);
2834
2835 hci_link_keys_clear(hdev);
2836
2837 if (cp->debug_keys)
2838 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2839 else
2840 changed = hci_dev_test_and_clear_flag(hdev,
2841 HCI_KEEP_DEBUG_KEYS);
2842
2843 if (changed)
2844 new_settings(hdev, NULL);
2845
2846 for (i = 0; i < key_count; i++) {
2847 struct mgmt_link_key_info *key = &cp->keys[i];
2848
2849 if (hci_is_blocked_key(hdev,
2850 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2851 key->val)) {
2852 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2853 &key->addr.bdaddr);
2854 continue;
2855 }
2856
2857 if (key->addr.type != BDADDR_BREDR) {
2858 bt_dev_warn(hdev,
2859 "Invalid link address type %u for %pMR",
2860 key->addr.type, &key->addr.bdaddr);
2861 continue;
2862 }
2863
2864 if (key->type > 0x08) {
2865 bt_dev_warn(hdev, "Invalid link key type %u for %pMR",
2866 key->type, &key->addr.bdaddr);
2867 continue;
2868 }
2869
2870 /* Always ignore debug keys and require a new pairing if
2871 * the user wants to use them.
2872 */
2873 if (key->type == HCI_LK_DEBUG_COMBINATION)
2874 continue;
2875
2876 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2877 key->type, key->pin_len, NULL);
2878 }
2879
2880 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2881
2882 hci_dev_unlock(hdev);
2883
2884 return 0;
2885 }
2886
device_unpaired(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,struct sock * skip_sk)2887 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2888 u8 addr_type, struct sock *skip_sk)
2889 {
2890 struct mgmt_ev_device_unpaired ev;
2891
2892 bacpy(&ev.addr.bdaddr, bdaddr);
2893 ev.addr.type = addr_type;
2894
2895 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2896 skip_sk);
2897 }
2898
unpair_device_complete(struct hci_dev * hdev,void * data,int err)2899 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2900 {
2901 struct mgmt_pending_cmd *cmd = data;
2902 struct mgmt_cp_unpair_device *cp = cmd->param;
2903
2904 if (!err)
2905 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2906
2907 cmd->cmd_complete(cmd, err);
2908 mgmt_pending_free(cmd);
2909 }
2910
unpair_device_sync(struct hci_dev * hdev,void * data)2911 static int unpair_device_sync(struct hci_dev *hdev, void *data)
2912 {
2913 struct mgmt_pending_cmd *cmd = data;
2914 struct mgmt_cp_unpair_device *cp = cmd->param;
2915 struct hci_conn *conn;
2916
2917 if (cp->addr.type == BDADDR_BREDR)
2918 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2919 &cp->addr.bdaddr);
2920 else
2921 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2922 le_addr_type(cp->addr.type));
2923
2924 if (!conn)
2925 return 0;
2926
2927 /* Disregard any possible error since the likes of hci_abort_conn_sync
2928 * will clean up the connection no matter the error.
2929 */
2930 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2931
2932 return 0;
2933 }
2934
unpair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2935 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2936 u16 len)
2937 {
2938 struct mgmt_cp_unpair_device *cp = data;
2939 struct mgmt_rp_unpair_device rp;
2940 struct hci_conn_params *params;
2941 struct mgmt_pending_cmd *cmd;
2942 struct hci_conn *conn;
2943 u8 addr_type;
2944 int err;
2945
2946 memset(&rp, 0, sizeof(rp));
2947 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2948 rp.addr.type = cp->addr.type;
2949
2950 if (!bdaddr_type_is_valid(cp->addr.type))
2951 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2952 MGMT_STATUS_INVALID_PARAMS,
2953 &rp, sizeof(rp));
2954
2955 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2956 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2957 MGMT_STATUS_INVALID_PARAMS,
2958 &rp, sizeof(rp));
2959
2960 hci_dev_lock(hdev);
2961
2962 if (!hdev_is_powered(hdev)) {
2963 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2964 MGMT_STATUS_NOT_POWERED, &rp,
2965 sizeof(rp));
2966 goto unlock;
2967 }
2968
2969 if (cp->addr.type == BDADDR_BREDR) {
2970 /* If disconnection is requested, then look up the
2971 * connection. If the remote device is connected, it
2972 * will be later used to terminate the link.
2973 *
2974 * Setting it to NULL explicitly will cause no
2975 * termination of the link.
2976 */
2977 if (cp->disconnect)
2978 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2979 &cp->addr.bdaddr);
2980 else
2981 conn = NULL;
2982
2983 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2984 if (err < 0) {
2985 err = mgmt_cmd_complete(sk, hdev->id,
2986 MGMT_OP_UNPAIR_DEVICE,
2987 MGMT_STATUS_NOT_PAIRED, &rp,
2988 sizeof(rp));
2989 goto unlock;
2990 }
2991
2992 goto done;
2993 }
2994
2995 /* LE address type */
2996 addr_type = le_addr_type(cp->addr.type);
2997
2998 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2999 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
3000 if (err < 0) {
3001 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3002 MGMT_STATUS_NOT_PAIRED, &rp,
3003 sizeof(rp));
3004 goto unlock;
3005 }
3006
3007 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3008 if (!conn) {
3009 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3010 goto done;
3011 }
3012
3013
3014 /* Defer clearing up the connection parameters until closing to
3015 * give a chance of keeping them if a repairing happens.
3016 */
3017 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3018
3019 /* Disable auto-connection parameters if present */
3020 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3021 if (params) {
3022 if (params->explicit_connect)
3023 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3024 else
3025 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3026 }
3027
3028 /* If disconnection is not requested, then clear the connection
3029 * variable so that the link is not terminated.
3030 */
3031 if (!cp->disconnect)
3032 conn = NULL;
3033
3034 done:
3035 /* If the connection variable is set, then termination of the
3036 * link is requested.
3037 */
3038 if (!conn) {
3039 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3040 &rp, sizeof(rp));
3041 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3042 goto unlock;
3043 }
3044
3045 cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3046 sizeof(*cp));
3047 if (!cmd) {
3048 err = -ENOMEM;
3049 goto unlock;
3050 }
3051
3052 cmd->cmd_complete = addr_cmd_complete;
3053
3054 err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3055 unpair_device_complete);
3056 if (err < 0)
3057 mgmt_pending_free(cmd);
3058
3059 unlock:
3060 hci_dev_unlock(hdev);
3061 return err;
3062 }
3063
disconnect_complete(struct hci_dev * hdev,void * data,int err)3064 static void disconnect_complete(struct hci_dev *hdev, void *data, int err)
3065 {
3066 struct mgmt_pending_cmd *cmd = data;
3067
3068 cmd->cmd_complete(cmd, mgmt_status(err));
3069 mgmt_pending_free(cmd);
3070 }
3071
disconnect_sync(struct hci_dev * hdev,void * data)3072 static int disconnect_sync(struct hci_dev *hdev, void *data)
3073 {
3074 struct mgmt_pending_cmd *cmd = data;
3075 struct mgmt_cp_disconnect *cp = cmd->param;
3076 struct hci_conn *conn;
3077
3078 if (cp->addr.type == BDADDR_BREDR)
3079 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3080 &cp->addr.bdaddr);
3081 else
3082 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3083 le_addr_type(cp->addr.type));
3084
3085 if (!conn)
3086 return -ENOTCONN;
3087
3088 /* Disregard any possible error since the likes of hci_abort_conn_sync
3089 * will clean up the connection no matter the error.
3090 */
3091 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3092
3093 return 0;
3094 }
3095
disconnect(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3096 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3097 u16 len)
3098 {
3099 struct mgmt_cp_disconnect *cp = data;
3100 struct mgmt_rp_disconnect rp;
3101 struct mgmt_pending_cmd *cmd;
3102 int err;
3103
3104 bt_dev_dbg(hdev, "sock %p", sk);
3105
3106 memset(&rp, 0, sizeof(rp));
3107 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3108 rp.addr.type = cp->addr.type;
3109
3110 if (!bdaddr_type_is_valid(cp->addr.type))
3111 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3112 MGMT_STATUS_INVALID_PARAMS,
3113 &rp, sizeof(rp));
3114
3115 hci_dev_lock(hdev);
3116
3117 if (!test_bit(HCI_UP, &hdev->flags)) {
3118 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3119 MGMT_STATUS_NOT_POWERED, &rp,
3120 sizeof(rp));
3121 goto failed;
3122 }
3123
3124 cmd = mgmt_pending_new(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3125 if (!cmd) {
3126 err = -ENOMEM;
3127 goto failed;
3128 }
3129
3130 cmd->cmd_complete = generic_cmd_complete;
3131
3132 err = hci_cmd_sync_queue(hdev, disconnect_sync, cmd,
3133 disconnect_complete);
3134 if (err < 0)
3135 mgmt_pending_free(cmd);
3136
3137 failed:
3138 hci_dev_unlock(hdev);
3139 return err;
3140 }
3141
link_to_bdaddr(u8 link_type,u8 addr_type)3142 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3143 {
3144 switch (link_type) {
3145 case ISO_LINK:
3146 case LE_LINK:
3147 switch (addr_type) {
3148 case ADDR_LE_DEV_PUBLIC:
3149 return BDADDR_LE_PUBLIC;
3150
3151 default:
3152 /* Fallback to LE Random address type */
3153 return BDADDR_LE_RANDOM;
3154 }
3155
3156 default:
3157 /* Fallback to BR/EDR type */
3158 return BDADDR_BREDR;
3159 }
3160 }
3161
get_connections(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)3162 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3163 u16 data_len)
3164 {
3165 struct mgmt_rp_get_connections *rp;
3166 struct hci_conn *c;
3167 int err;
3168 u16 i;
3169
3170 bt_dev_dbg(hdev, "sock %p", sk);
3171
3172 hci_dev_lock(hdev);
3173
3174 if (!hdev_is_powered(hdev)) {
3175 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3176 MGMT_STATUS_NOT_POWERED);
3177 goto unlock;
3178 }
3179
3180 i = 0;
3181 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3182 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3183 i++;
3184 }
3185
3186 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3187 if (!rp) {
3188 err = -ENOMEM;
3189 goto unlock;
3190 }
3191
3192 i = 0;
3193 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3194 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3195 continue;
3196 bacpy(&rp->addr[i].bdaddr, &c->dst);
3197 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3198 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3199 continue;
3200 i++;
3201 }
3202
3203 rp->conn_count = cpu_to_le16(i);
3204
3205 /* Recalculate length in case of filtered SCO connections, etc */
3206 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3207 struct_size(rp, addr, i));
3208
3209 kfree(rp);
3210
3211 unlock:
3212 hci_dev_unlock(hdev);
3213 return err;
3214 }
3215
send_pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_pin_code_neg_reply * cp)3216 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3217 struct mgmt_cp_pin_code_neg_reply *cp)
3218 {
3219 struct mgmt_pending_cmd *cmd;
3220 int err;
3221
3222 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3223 sizeof(*cp));
3224 if (!cmd)
3225 return -ENOMEM;
3226
3227 cmd->cmd_complete = addr_cmd_complete;
3228
3229 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3230 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3231 if (err < 0)
3232 mgmt_pending_remove(cmd);
3233
3234 return err;
3235 }
3236
pin_code_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3237 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3238 u16 len)
3239 {
3240 struct hci_conn *conn;
3241 struct mgmt_cp_pin_code_reply *cp = data;
3242 struct hci_cp_pin_code_reply reply;
3243 struct mgmt_pending_cmd *cmd;
3244 int err;
3245
3246 bt_dev_dbg(hdev, "sock %p", sk);
3247
3248 hci_dev_lock(hdev);
3249
3250 if (!hdev_is_powered(hdev)) {
3251 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3252 MGMT_STATUS_NOT_POWERED);
3253 goto failed;
3254 }
3255
3256 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3257 if (!conn) {
3258 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3259 MGMT_STATUS_NOT_CONNECTED);
3260 goto failed;
3261 }
3262
3263 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3264 struct mgmt_cp_pin_code_neg_reply ncp;
3265
3266 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3267
3268 bt_dev_err(hdev, "PIN code is not 16 bytes long");
3269
3270 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3271 if (err >= 0)
3272 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3273 MGMT_STATUS_INVALID_PARAMS);
3274
3275 goto failed;
3276 }
3277
3278 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3279 if (!cmd) {
3280 err = -ENOMEM;
3281 goto failed;
3282 }
3283
3284 cmd->cmd_complete = addr_cmd_complete;
3285
3286 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3287 reply.pin_len = cp->pin_len;
3288 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3289
3290 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3291 if (err < 0)
3292 mgmt_pending_remove(cmd);
3293
3294 failed:
3295 hci_dev_unlock(hdev);
3296 return err;
3297 }
3298
set_io_capability(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3299 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3300 u16 len)
3301 {
3302 struct mgmt_cp_set_io_capability *cp = data;
3303
3304 bt_dev_dbg(hdev, "sock %p", sk);
3305
3306 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3307 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3308 MGMT_STATUS_INVALID_PARAMS);
3309
3310 hci_dev_lock(hdev);
3311
3312 hdev->io_capability = cp->io_capability;
3313
3314 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3315
3316 hci_dev_unlock(hdev);
3317
3318 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3319 NULL, 0);
3320 }
3321
find_pairing(struct hci_conn * conn)3322 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3323 {
3324 struct hci_dev *hdev = conn->hdev;
3325 struct mgmt_pending_cmd *cmd;
3326
3327 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3328 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3329 continue;
3330
3331 if (cmd->user_data != conn)
3332 continue;
3333
3334 return cmd;
3335 }
3336
3337 return NULL;
3338 }
3339
pairing_complete(struct mgmt_pending_cmd * cmd,u8 status)3340 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3341 {
3342 struct mgmt_rp_pair_device rp;
3343 struct hci_conn *conn = cmd->user_data;
3344 int err;
3345
3346 bacpy(&rp.addr.bdaddr, &conn->dst);
3347 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3348
3349 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3350 status, &rp, sizeof(rp));
3351
3352 /* So we don't get further callbacks for this connection */
3353 conn->connect_cfm_cb = NULL;
3354 conn->security_cfm_cb = NULL;
3355 conn->disconn_cfm_cb = NULL;
3356
3357 hci_conn_drop(conn);
3358
3359 /* The device is paired so there is no need to remove
3360 * its connection parameters anymore.
3361 */
3362 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3363
3364 hci_conn_put(conn);
3365
3366 return err;
3367 }
3368
mgmt_smp_complete(struct hci_conn * conn,bool complete)3369 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3370 {
3371 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3372 struct mgmt_pending_cmd *cmd;
3373
3374 cmd = find_pairing(conn);
3375 if (cmd) {
3376 cmd->cmd_complete(cmd, status);
3377 mgmt_pending_remove(cmd);
3378 }
3379 }
3380
pairing_complete_cb(struct hci_conn * conn,u8 status)3381 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3382 {
3383 struct mgmt_pending_cmd *cmd;
3384
3385 BT_DBG("status %u", status);
3386
3387 cmd = find_pairing(conn);
3388 if (!cmd) {
3389 BT_DBG("Unable to find a pending command");
3390 return;
3391 }
3392
3393 cmd->cmd_complete(cmd, mgmt_status(status));
3394 mgmt_pending_remove(cmd);
3395 }
3396
le_pairing_complete_cb(struct hci_conn * conn,u8 status)3397 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3398 {
3399 struct mgmt_pending_cmd *cmd;
3400
3401 BT_DBG("status %u", status);
3402
3403 if (!status)
3404 return;
3405
3406 cmd = find_pairing(conn);
3407 if (!cmd) {
3408 BT_DBG("Unable to find a pending command");
3409 return;
3410 }
3411
3412 cmd->cmd_complete(cmd, mgmt_status(status));
3413 mgmt_pending_remove(cmd);
3414 }
3415
pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3416 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3417 u16 len)
3418 {
3419 struct mgmt_cp_pair_device *cp = data;
3420 struct mgmt_rp_pair_device rp;
3421 struct mgmt_pending_cmd *cmd;
3422 u8 sec_level, auth_type;
3423 struct hci_conn *conn;
3424 int err;
3425
3426 bt_dev_dbg(hdev, "sock %p", sk);
3427
3428 memset(&rp, 0, sizeof(rp));
3429 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3430 rp.addr.type = cp->addr.type;
3431
3432 if (!bdaddr_type_is_valid(cp->addr.type))
3433 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3434 MGMT_STATUS_INVALID_PARAMS,
3435 &rp, sizeof(rp));
3436
3437 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3438 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3439 MGMT_STATUS_INVALID_PARAMS,
3440 &rp, sizeof(rp));
3441
3442 hci_dev_lock(hdev);
3443
3444 if (!hdev_is_powered(hdev)) {
3445 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3446 MGMT_STATUS_NOT_POWERED, &rp,
3447 sizeof(rp));
3448 goto unlock;
3449 }
3450
3451 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3452 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3453 MGMT_STATUS_ALREADY_PAIRED, &rp,
3454 sizeof(rp));
3455 goto unlock;
3456 }
3457
3458 sec_level = BT_SECURITY_MEDIUM;
3459 auth_type = HCI_AT_DEDICATED_BONDING;
3460
3461 if (cp->addr.type == BDADDR_BREDR) {
3462 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3463 auth_type, CONN_REASON_PAIR_DEVICE,
3464 HCI_ACL_CONN_TIMEOUT);
3465 } else {
3466 u8 addr_type = le_addr_type(cp->addr.type);
3467 struct hci_conn_params *p;
3468
3469 /* When pairing a new device, it is expected to remember
3470 * this device for future connections. Adding the connection
3471 * parameter information ahead of time allows tracking
3472 * of the peripheral preferred values and will speed up any
3473 * further connection establishment.
3474 *
3475 * If connection parameters already exist, then they
3476 * will be kept and this function does nothing.
3477 */
3478 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3479 if (!p) {
3480 err = -EIO;
3481 goto unlock;
3482 }
3483
3484 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3485 p->auto_connect = HCI_AUTO_CONN_DISABLED;
3486
3487 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3488 sec_level, HCI_LE_CONN_TIMEOUT,
3489 CONN_REASON_PAIR_DEVICE);
3490 }
3491
3492 if (IS_ERR(conn)) {
3493 int status;
3494
3495 if (PTR_ERR(conn) == -EBUSY)
3496 status = MGMT_STATUS_BUSY;
3497 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3498 status = MGMT_STATUS_NOT_SUPPORTED;
3499 else if (PTR_ERR(conn) == -ECONNREFUSED)
3500 status = MGMT_STATUS_REJECTED;
3501 else
3502 status = MGMT_STATUS_CONNECT_FAILED;
3503
3504 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3505 status, &rp, sizeof(rp));
3506 goto unlock;
3507 }
3508
3509 if (conn->connect_cfm_cb) {
3510 hci_conn_drop(conn);
3511 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3512 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3513 goto unlock;
3514 }
3515
3516 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3517 if (!cmd) {
3518 err = -ENOMEM;
3519 hci_conn_drop(conn);
3520 goto unlock;
3521 }
3522
3523 cmd->cmd_complete = pairing_complete;
3524
3525 /* For LE, just connecting isn't a proof that the pairing finished */
3526 if (cp->addr.type == BDADDR_BREDR) {
3527 conn->connect_cfm_cb = pairing_complete_cb;
3528 conn->security_cfm_cb = pairing_complete_cb;
3529 conn->disconn_cfm_cb = pairing_complete_cb;
3530 } else {
3531 conn->connect_cfm_cb = le_pairing_complete_cb;
3532 conn->security_cfm_cb = le_pairing_complete_cb;
3533 conn->disconn_cfm_cb = le_pairing_complete_cb;
3534 }
3535
3536 conn->io_capability = cp->io_cap;
3537 cmd->user_data = hci_conn_get(conn);
3538
3539 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3540 hci_conn_security(conn, sec_level, auth_type, true)) {
3541 cmd->cmd_complete(cmd, 0);
3542 mgmt_pending_remove(cmd);
3543 }
3544
3545 err = 0;
3546
3547 unlock:
3548 hci_dev_unlock(hdev);
3549 return err;
3550 }
3551
cancel_pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3552 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3553 u16 len)
3554 {
3555 struct mgmt_addr_info *addr = data;
3556 struct mgmt_pending_cmd *cmd;
3557 struct hci_conn *conn;
3558 int err;
3559
3560 bt_dev_dbg(hdev, "sock %p", sk);
3561
3562 hci_dev_lock(hdev);
3563
3564 if (!hdev_is_powered(hdev)) {
3565 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3566 MGMT_STATUS_NOT_POWERED);
3567 goto unlock;
3568 }
3569
3570 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3571 if (!cmd) {
3572 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3573 MGMT_STATUS_INVALID_PARAMS);
3574 goto unlock;
3575 }
3576
3577 conn = cmd->user_data;
3578
3579 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3580 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3581 MGMT_STATUS_INVALID_PARAMS);
3582 goto unlock;
3583 }
3584
3585 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3586 mgmt_pending_remove(cmd);
3587
3588 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3589 addr, sizeof(*addr));
3590
3591 /* Since user doesn't want to proceed with the connection, abort any
3592 * ongoing pairing and then terminate the link if it was created
3593 * because of the pair device action.
3594 */
3595 if (addr->type == BDADDR_BREDR)
3596 hci_remove_link_key(hdev, &addr->bdaddr);
3597 else
3598 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3599 le_addr_type(addr->type));
3600
3601 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3602 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3603
3604 unlock:
3605 hci_dev_unlock(hdev);
3606 return err;
3607 }
3608
user_pairing_resp(struct sock * sk,struct hci_dev * hdev,struct mgmt_addr_info * addr,u16 mgmt_op,u16 hci_op,__le32 passkey)3609 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3610 struct mgmt_addr_info *addr, u16 mgmt_op,
3611 u16 hci_op, __le32 passkey)
3612 {
3613 struct mgmt_pending_cmd *cmd;
3614 struct hci_conn *conn;
3615 int err;
3616
3617 hci_dev_lock(hdev);
3618
3619 if (!hdev_is_powered(hdev)) {
3620 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3621 MGMT_STATUS_NOT_POWERED, addr,
3622 sizeof(*addr));
3623 goto done;
3624 }
3625
3626 if (addr->type == BDADDR_BREDR)
3627 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3628 else
3629 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3630 le_addr_type(addr->type));
3631
3632 if (!conn) {
3633 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3634 MGMT_STATUS_NOT_CONNECTED, addr,
3635 sizeof(*addr));
3636 goto done;
3637 }
3638
3639 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3640 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3641 if (!err)
3642 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3643 MGMT_STATUS_SUCCESS, addr,
3644 sizeof(*addr));
3645 else
3646 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3647 MGMT_STATUS_FAILED, addr,
3648 sizeof(*addr));
3649
3650 goto done;
3651 }
3652
3653 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3654 if (!cmd) {
3655 err = -ENOMEM;
3656 goto done;
3657 }
3658
3659 cmd->cmd_complete = addr_cmd_complete;
3660
3661 /* Continue with pairing via HCI */
3662 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3663 struct hci_cp_user_passkey_reply cp;
3664
3665 bacpy(&cp.bdaddr, &addr->bdaddr);
3666 cp.passkey = passkey;
3667 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3668 } else
3669 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3670 &addr->bdaddr);
3671
3672 if (err < 0)
3673 mgmt_pending_remove(cmd);
3674
3675 done:
3676 hci_dev_unlock(hdev);
3677 return err;
3678 }
3679
pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3680 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3681 void *data, u16 len)
3682 {
3683 struct mgmt_cp_pin_code_neg_reply *cp = data;
3684
3685 bt_dev_dbg(hdev, "sock %p", sk);
3686
3687 return user_pairing_resp(sk, hdev, &cp->addr,
3688 MGMT_OP_PIN_CODE_NEG_REPLY,
3689 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3690 }
3691
user_confirm_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3692 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3693 u16 len)
3694 {
3695 struct mgmt_cp_user_confirm_reply *cp = data;
3696
3697 bt_dev_dbg(hdev, "sock %p", sk);
3698
3699 if (len != sizeof(*cp))
3700 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3701 MGMT_STATUS_INVALID_PARAMS);
3702
3703 return user_pairing_resp(sk, hdev, &cp->addr,
3704 MGMT_OP_USER_CONFIRM_REPLY,
3705 HCI_OP_USER_CONFIRM_REPLY, 0);
3706 }
3707
user_confirm_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3708 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3709 void *data, u16 len)
3710 {
3711 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3712
3713 bt_dev_dbg(hdev, "sock %p", sk);
3714
3715 return user_pairing_resp(sk, hdev, &cp->addr,
3716 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3717 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3718 }
3719
user_passkey_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3720 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3721 u16 len)
3722 {
3723 struct mgmt_cp_user_passkey_reply *cp = data;
3724
3725 bt_dev_dbg(hdev, "sock %p", sk);
3726
3727 return user_pairing_resp(sk, hdev, &cp->addr,
3728 MGMT_OP_USER_PASSKEY_REPLY,
3729 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3730 }
3731
user_passkey_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3732 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3733 void *data, u16 len)
3734 {
3735 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3736
3737 bt_dev_dbg(hdev, "sock %p", sk);
3738
3739 return user_pairing_resp(sk, hdev, &cp->addr,
3740 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3741 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3742 }
3743
adv_expire_sync(struct hci_dev * hdev,u32 flags)3744 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3745 {
3746 struct adv_info *adv_instance;
3747
3748 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3749 if (!adv_instance)
3750 return 0;
3751
3752 /* stop if current instance doesn't need to be changed */
3753 if (!(adv_instance->flags & flags))
3754 return 0;
3755
3756 cancel_adv_timeout(hdev);
3757
3758 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3759 if (!adv_instance)
3760 return 0;
3761
3762 hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3763
3764 return 0;
3765 }
3766
name_changed_sync(struct hci_dev * hdev,void * data)3767 static int name_changed_sync(struct hci_dev *hdev, void *data)
3768 {
3769 return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3770 }
3771
set_name_complete(struct hci_dev * hdev,void * data,int err)3772 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3773 {
3774 struct mgmt_pending_cmd *cmd = data;
3775 struct mgmt_cp_set_local_name *cp = cmd->param;
3776 u8 status = mgmt_status(err);
3777
3778 bt_dev_dbg(hdev, "err %d", err);
3779
3780 if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3781 return;
3782
3783 if (status) {
3784 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3785 status);
3786 } else {
3787 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3788 cp, sizeof(*cp));
3789
3790 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3791 hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3792 }
3793
3794 mgmt_pending_remove(cmd);
3795 }
3796
set_name_sync(struct hci_dev * hdev,void * data)3797 static int set_name_sync(struct hci_dev *hdev, void *data)
3798 {
3799 if (lmp_bredr_capable(hdev)) {
3800 hci_update_name_sync(hdev);
3801 hci_update_eir_sync(hdev);
3802 }
3803
3804 /* The name is stored in the scan response data and so
3805 * no need to update the advertising data here.
3806 */
3807 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3808 hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3809
3810 return 0;
3811 }
3812
set_local_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3813 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3814 u16 len)
3815 {
3816 struct mgmt_cp_set_local_name *cp = data;
3817 struct mgmt_pending_cmd *cmd;
3818 int err;
3819
3820 bt_dev_dbg(hdev, "sock %p", sk);
3821
3822 hci_dev_lock(hdev);
3823
3824 /* If the old values are the same as the new ones just return a
3825 * direct command complete event.
3826 */
3827 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3828 !memcmp(hdev->short_name, cp->short_name,
3829 sizeof(hdev->short_name))) {
3830 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3831 data, len);
3832 goto failed;
3833 }
3834
3835 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3836
3837 if (!hdev_is_powered(hdev)) {
3838 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3839
3840 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3841 data, len);
3842 if (err < 0)
3843 goto failed;
3844
3845 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3846 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3847 ext_info_changed(hdev, sk);
3848
3849 goto failed;
3850 }
3851
3852 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3853 if (!cmd)
3854 err = -ENOMEM;
3855 else
3856 err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3857 set_name_complete);
3858
3859 if (err < 0) {
3860 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3861 MGMT_STATUS_FAILED);
3862
3863 if (cmd)
3864 mgmt_pending_remove(cmd);
3865
3866 goto failed;
3867 }
3868
3869 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3870
3871 failed:
3872 hci_dev_unlock(hdev);
3873 return err;
3874 }
3875
appearance_changed_sync(struct hci_dev * hdev,void * data)3876 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3877 {
3878 return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3879 }
3880
set_appearance(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3881 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3882 u16 len)
3883 {
3884 struct mgmt_cp_set_appearance *cp = data;
3885 u16 appearance;
3886 int err;
3887
3888 bt_dev_dbg(hdev, "sock %p", sk);
3889
3890 if (!lmp_le_capable(hdev))
3891 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3892 MGMT_STATUS_NOT_SUPPORTED);
3893
3894 appearance = le16_to_cpu(cp->appearance);
3895
3896 hci_dev_lock(hdev);
3897
3898 if (hdev->appearance != appearance) {
3899 hdev->appearance = appearance;
3900
3901 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3902 hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3903 NULL);
3904
3905 ext_info_changed(hdev, sk);
3906 }
3907
3908 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3909 0);
3910
3911 hci_dev_unlock(hdev);
3912
3913 return err;
3914 }
3915
get_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3916 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3917 void *data, u16 len)
3918 {
3919 struct mgmt_rp_get_phy_configuration rp;
3920
3921 bt_dev_dbg(hdev, "sock %p", sk);
3922
3923 hci_dev_lock(hdev);
3924
3925 memset(&rp, 0, sizeof(rp));
3926
3927 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3928 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3929 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3930
3931 hci_dev_unlock(hdev);
3932
3933 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3934 &rp, sizeof(rp));
3935 }
3936
mgmt_phy_configuration_changed(struct hci_dev * hdev,struct sock * skip)3937 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3938 {
3939 struct mgmt_ev_phy_configuration_changed ev;
3940
3941 memset(&ev, 0, sizeof(ev));
3942
3943 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3944
3945 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3946 sizeof(ev), skip);
3947 }
3948
set_default_phy_complete(struct hci_dev * hdev,void * data,int err)3949 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3950 {
3951 struct mgmt_pending_cmd *cmd = data;
3952 struct sk_buff *skb = cmd->skb;
3953 u8 status = mgmt_status(err);
3954
3955 if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
3956 return;
3957
3958 if (!status) {
3959 if (!skb)
3960 status = MGMT_STATUS_FAILED;
3961 else if (IS_ERR(skb))
3962 status = mgmt_status(PTR_ERR(skb));
3963 else
3964 status = mgmt_status(skb->data[0]);
3965 }
3966
3967 bt_dev_dbg(hdev, "status %d", status);
3968
3969 if (status) {
3970 mgmt_cmd_status(cmd->sk, hdev->id,
3971 MGMT_OP_SET_PHY_CONFIGURATION, status);
3972 } else {
3973 mgmt_cmd_complete(cmd->sk, hdev->id,
3974 MGMT_OP_SET_PHY_CONFIGURATION, 0,
3975 NULL, 0);
3976
3977 mgmt_phy_configuration_changed(hdev, cmd->sk);
3978 }
3979
3980 if (skb && !IS_ERR(skb))
3981 kfree_skb(skb);
3982
3983 mgmt_pending_remove(cmd);
3984 }
3985
set_default_phy_sync(struct hci_dev * hdev,void * data)3986 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
3987 {
3988 struct mgmt_pending_cmd *cmd = data;
3989 struct mgmt_cp_set_phy_configuration *cp = cmd->param;
3990 struct hci_cp_le_set_default_phy cp_phy;
3991 u32 selected_phys = __le32_to_cpu(cp->selected_phys);
3992
3993 memset(&cp_phy, 0, sizeof(cp_phy));
3994
3995 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3996 cp_phy.all_phys |= 0x01;
3997
3998 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3999 cp_phy.all_phys |= 0x02;
4000
4001 if (selected_phys & MGMT_PHY_LE_1M_TX)
4002 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
4003
4004 if (selected_phys & MGMT_PHY_LE_2M_TX)
4005 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
4006
4007 if (selected_phys & MGMT_PHY_LE_CODED_TX)
4008 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
4009
4010 if (selected_phys & MGMT_PHY_LE_1M_RX)
4011 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
4012
4013 if (selected_phys & MGMT_PHY_LE_2M_RX)
4014 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
4015
4016 if (selected_phys & MGMT_PHY_LE_CODED_RX)
4017 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
4018
4019 cmd->skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4020 sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
4021
4022 return 0;
4023 }
4024
set_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4025 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4026 void *data, u16 len)
4027 {
4028 struct mgmt_cp_set_phy_configuration *cp = data;
4029 struct mgmt_pending_cmd *cmd;
4030 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4031 u16 pkt_type = (HCI_DH1 | HCI_DM1);
4032 bool changed = false;
4033 int err;
4034
4035 bt_dev_dbg(hdev, "sock %p", sk);
4036
4037 configurable_phys = get_configurable_phys(hdev);
4038 supported_phys = get_supported_phys(hdev);
4039 selected_phys = __le32_to_cpu(cp->selected_phys);
4040
4041 if (selected_phys & ~supported_phys)
4042 return mgmt_cmd_status(sk, hdev->id,
4043 MGMT_OP_SET_PHY_CONFIGURATION,
4044 MGMT_STATUS_INVALID_PARAMS);
4045
4046 unconfigure_phys = supported_phys & ~configurable_phys;
4047
4048 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4049 return mgmt_cmd_status(sk, hdev->id,
4050 MGMT_OP_SET_PHY_CONFIGURATION,
4051 MGMT_STATUS_INVALID_PARAMS);
4052
4053 if (selected_phys == get_selected_phys(hdev))
4054 return mgmt_cmd_complete(sk, hdev->id,
4055 MGMT_OP_SET_PHY_CONFIGURATION,
4056 0, NULL, 0);
4057
4058 hci_dev_lock(hdev);
4059
4060 if (!hdev_is_powered(hdev)) {
4061 err = mgmt_cmd_status(sk, hdev->id,
4062 MGMT_OP_SET_PHY_CONFIGURATION,
4063 MGMT_STATUS_REJECTED);
4064 goto unlock;
4065 }
4066
4067 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4068 err = mgmt_cmd_status(sk, hdev->id,
4069 MGMT_OP_SET_PHY_CONFIGURATION,
4070 MGMT_STATUS_BUSY);
4071 goto unlock;
4072 }
4073
4074 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4075 pkt_type |= (HCI_DH3 | HCI_DM3);
4076 else
4077 pkt_type &= ~(HCI_DH3 | HCI_DM3);
4078
4079 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4080 pkt_type |= (HCI_DH5 | HCI_DM5);
4081 else
4082 pkt_type &= ~(HCI_DH5 | HCI_DM5);
4083
4084 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4085 pkt_type &= ~HCI_2DH1;
4086 else
4087 pkt_type |= HCI_2DH1;
4088
4089 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4090 pkt_type &= ~HCI_2DH3;
4091 else
4092 pkt_type |= HCI_2DH3;
4093
4094 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4095 pkt_type &= ~HCI_2DH5;
4096 else
4097 pkt_type |= HCI_2DH5;
4098
4099 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4100 pkt_type &= ~HCI_3DH1;
4101 else
4102 pkt_type |= HCI_3DH1;
4103
4104 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4105 pkt_type &= ~HCI_3DH3;
4106 else
4107 pkt_type |= HCI_3DH3;
4108
4109 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4110 pkt_type &= ~HCI_3DH5;
4111 else
4112 pkt_type |= HCI_3DH5;
4113
4114 if (pkt_type != hdev->pkt_type) {
4115 hdev->pkt_type = pkt_type;
4116 changed = true;
4117 }
4118
4119 if ((selected_phys & MGMT_PHY_LE_MASK) ==
4120 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4121 if (changed)
4122 mgmt_phy_configuration_changed(hdev, sk);
4123
4124 err = mgmt_cmd_complete(sk, hdev->id,
4125 MGMT_OP_SET_PHY_CONFIGURATION,
4126 0, NULL, 0);
4127
4128 goto unlock;
4129 }
4130
4131 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4132 len);
4133 if (!cmd)
4134 err = -ENOMEM;
4135 else
4136 err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4137 set_default_phy_complete);
4138
4139 if (err < 0) {
4140 err = mgmt_cmd_status(sk, hdev->id,
4141 MGMT_OP_SET_PHY_CONFIGURATION,
4142 MGMT_STATUS_FAILED);
4143
4144 if (cmd)
4145 mgmt_pending_remove(cmd);
4146 }
4147
4148 unlock:
4149 hci_dev_unlock(hdev);
4150
4151 return err;
4152 }
4153
set_blocked_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4154 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4155 u16 len)
4156 {
4157 int err = MGMT_STATUS_SUCCESS;
4158 struct mgmt_cp_set_blocked_keys *keys = data;
4159 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4160 sizeof(struct mgmt_blocked_key_info));
4161 u16 key_count, expected_len;
4162 int i;
4163
4164 bt_dev_dbg(hdev, "sock %p", sk);
4165
4166 key_count = __le16_to_cpu(keys->key_count);
4167 if (key_count > max_key_count) {
4168 bt_dev_err(hdev, "too big key_count value %u", key_count);
4169 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4170 MGMT_STATUS_INVALID_PARAMS);
4171 }
4172
4173 expected_len = struct_size(keys, keys, key_count);
4174 if (expected_len != len) {
4175 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4176 expected_len, len);
4177 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4178 MGMT_STATUS_INVALID_PARAMS);
4179 }
4180
4181 hci_dev_lock(hdev);
4182
4183 hci_blocked_keys_clear(hdev);
4184
4185 for (i = 0; i < key_count; ++i) {
4186 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4187
4188 if (!b) {
4189 err = MGMT_STATUS_NO_RESOURCES;
4190 break;
4191 }
4192
4193 b->type = keys->keys[i].type;
4194 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4195 list_add_rcu(&b->list, &hdev->blocked_keys);
4196 }
4197 hci_dev_unlock(hdev);
4198
4199 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4200 err, NULL, 0);
4201 }
4202
set_wideband_speech(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4203 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4204 void *data, u16 len)
4205 {
4206 struct mgmt_mode *cp = data;
4207 int err;
4208 bool changed = false;
4209
4210 bt_dev_dbg(hdev, "sock %p", sk);
4211
4212 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
4213 return mgmt_cmd_status(sk, hdev->id,
4214 MGMT_OP_SET_WIDEBAND_SPEECH,
4215 MGMT_STATUS_NOT_SUPPORTED);
4216
4217 if (cp->val != 0x00 && cp->val != 0x01)
4218 return mgmt_cmd_status(sk, hdev->id,
4219 MGMT_OP_SET_WIDEBAND_SPEECH,
4220 MGMT_STATUS_INVALID_PARAMS);
4221
4222 hci_dev_lock(hdev);
4223
4224 if (hdev_is_powered(hdev) &&
4225 !!cp->val != hci_dev_test_flag(hdev,
4226 HCI_WIDEBAND_SPEECH_ENABLED)) {
4227 err = mgmt_cmd_status(sk, hdev->id,
4228 MGMT_OP_SET_WIDEBAND_SPEECH,
4229 MGMT_STATUS_REJECTED);
4230 goto unlock;
4231 }
4232
4233 if (cp->val)
4234 changed = !hci_dev_test_and_set_flag(hdev,
4235 HCI_WIDEBAND_SPEECH_ENABLED);
4236 else
4237 changed = hci_dev_test_and_clear_flag(hdev,
4238 HCI_WIDEBAND_SPEECH_ENABLED);
4239
4240 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4241 if (err < 0)
4242 goto unlock;
4243
4244 if (changed)
4245 err = new_settings(hdev, sk);
4246
4247 unlock:
4248 hci_dev_unlock(hdev);
4249 return err;
4250 }
4251
read_controller_cap(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4252 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4253 void *data, u16 data_len)
4254 {
4255 char buf[20];
4256 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4257 u16 cap_len = 0;
4258 u8 flags = 0;
4259 u8 tx_power_range[2];
4260
4261 bt_dev_dbg(hdev, "sock %p", sk);
4262
4263 memset(&buf, 0, sizeof(buf));
4264
4265 hci_dev_lock(hdev);
4266
4267 /* When the Read Simple Pairing Options command is supported, then
4268 * the remote public key validation is supported.
4269 *
4270 * Alternatively, when Microsoft extensions are available, they can
4271 * indicate support for public key validation as well.
4272 */
4273 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4274 flags |= 0x01; /* Remote public key validation (BR/EDR) */
4275
4276 flags |= 0x02; /* Remote public key validation (LE) */
4277
4278 /* When the Read Encryption Key Size command is supported, then the
4279 * encryption key size is enforced.
4280 */
4281 if (hdev->commands[20] & 0x10)
4282 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
4283
4284 flags |= 0x08; /* Encryption key size enforcement (LE) */
4285
4286 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4287 &flags, 1);
4288
4289 /* When the Read Simple Pairing Options command is supported, then
4290 * also max encryption key size information is provided.
4291 */
4292 if (hdev->commands[41] & 0x08)
4293 cap_len = eir_append_le16(rp->cap, cap_len,
4294 MGMT_CAP_MAX_ENC_KEY_SIZE,
4295 hdev->max_enc_key_size);
4296
4297 cap_len = eir_append_le16(rp->cap, cap_len,
4298 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4299 SMP_MAX_ENC_KEY_SIZE);
4300
4301 /* Append the min/max LE tx power parameters if we were able to fetch
4302 * it from the controller
4303 */
4304 if (hdev->commands[38] & 0x80) {
4305 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4306 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4307 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4308 tx_power_range, 2);
4309 }
4310
4311 rp->cap_len = cpu_to_le16(cap_len);
4312
4313 hci_dev_unlock(hdev);
4314
4315 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4316 rp, sizeof(*rp) + cap_len);
4317 }
4318
4319 #ifdef CONFIG_BT_FEATURE_DEBUG
4320 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4321 static const u8 debug_uuid[16] = {
4322 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4323 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4324 };
4325 #endif
4326
4327 /* 330859bc-7506-492d-9370-9a6f0614037f */
4328 static const u8 quality_report_uuid[16] = {
4329 0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4330 0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4331 };
4332
4333 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4334 static const u8 offload_codecs_uuid[16] = {
4335 0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4336 0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4337 };
4338
4339 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4340 static const u8 le_simultaneous_roles_uuid[16] = {
4341 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4342 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4343 };
4344
4345 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
4346 static const u8 rpa_resolution_uuid[16] = {
4347 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
4348 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
4349 };
4350
4351 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4352 static const u8 iso_socket_uuid[16] = {
4353 0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4354 0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4355 };
4356
4357 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4358 static const u8 mgmt_mesh_uuid[16] = {
4359 0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4360 0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4361 };
4362
read_exp_features_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4363 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4364 void *data, u16 data_len)
4365 {
4366 struct mgmt_rp_read_exp_features_info *rp;
4367 size_t len;
4368 u16 idx = 0;
4369 u32 flags;
4370 int status;
4371
4372 bt_dev_dbg(hdev, "sock %p", sk);
4373
4374 /* Enough space for 7 features */
4375 len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4376 rp = kzalloc(len, GFP_KERNEL);
4377 if (!rp)
4378 return -ENOMEM;
4379
4380 #ifdef CONFIG_BT_FEATURE_DEBUG
4381 if (!hdev) {
4382 flags = bt_dbg_get() ? BIT(0) : 0;
4383
4384 memcpy(rp->features[idx].uuid, debug_uuid, 16);
4385 rp->features[idx].flags = cpu_to_le32(flags);
4386 idx++;
4387 }
4388 #endif
4389
4390 if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4391 if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4392 flags = BIT(0);
4393 else
4394 flags = 0;
4395
4396 memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4397 rp->features[idx].flags = cpu_to_le32(flags);
4398 idx++;
4399 }
4400
4401 if (hdev && ll_privacy_capable(hdev)) {
4402 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
4403 flags = BIT(0) | BIT(1);
4404 else
4405 flags = BIT(1);
4406
4407 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
4408 rp->features[idx].flags = cpu_to_le32(flags);
4409 idx++;
4410 }
4411
4412 if (hdev && (aosp_has_quality_report(hdev) ||
4413 hdev->set_quality_report)) {
4414 if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4415 flags = BIT(0);
4416 else
4417 flags = 0;
4418
4419 memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4420 rp->features[idx].flags = cpu_to_le32(flags);
4421 idx++;
4422 }
4423
4424 if (hdev && hdev->get_data_path_id) {
4425 if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4426 flags = BIT(0);
4427 else
4428 flags = 0;
4429
4430 memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4431 rp->features[idx].flags = cpu_to_le32(flags);
4432 idx++;
4433 }
4434
4435 if (IS_ENABLED(CONFIG_BT_LE)) {
4436 flags = iso_enabled() ? BIT(0) : 0;
4437 memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4438 rp->features[idx].flags = cpu_to_le32(flags);
4439 idx++;
4440 }
4441
4442 if (hdev && lmp_le_capable(hdev)) {
4443 if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4444 flags = BIT(0);
4445 else
4446 flags = 0;
4447
4448 memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4449 rp->features[idx].flags = cpu_to_le32(flags);
4450 idx++;
4451 }
4452
4453 rp->feature_count = cpu_to_le16(idx);
4454
4455 /* After reading the experimental features information, enable
4456 * the events to update client on any future change.
4457 */
4458 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4459
4460 status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4461 MGMT_OP_READ_EXP_FEATURES_INFO,
4462 0, rp, sizeof(*rp) + (20 * idx));
4463
4464 kfree(rp);
4465 return status;
4466 }
4467
exp_ll_privacy_feature_changed(bool enabled,struct hci_dev * hdev,struct sock * skip)4468 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4469 struct sock *skip)
4470 {
4471 struct mgmt_ev_exp_feature_changed ev;
4472
4473 memset(&ev, 0, sizeof(ev));
4474 memcpy(ev.uuid, rpa_resolution_uuid, 16);
4475 ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4476
4477 // Do we need to be atomic with the conn_flags?
4478 if (enabled && privacy_mode_capable(hdev))
4479 hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4480 else
4481 hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
4482
4483 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4484 &ev, sizeof(ev),
4485 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4486
4487 }
4488
exp_feature_changed(struct hci_dev * hdev,const u8 * uuid,bool enabled,struct sock * skip)4489 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4490 bool enabled, struct sock *skip)
4491 {
4492 struct mgmt_ev_exp_feature_changed ev;
4493
4494 memset(&ev, 0, sizeof(ev));
4495 memcpy(ev.uuid, uuid, 16);
4496 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4497
4498 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4499 &ev, sizeof(ev),
4500 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4501 }
4502
4503 #define EXP_FEAT(_uuid, _set_func) \
4504 { \
4505 .uuid = _uuid, \
4506 .set_func = _set_func, \
4507 }
4508
4509 /* The zero key uuid is special. Multiple exp features are set through it. */
set_zero_key_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4510 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4511 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4512 {
4513 struct mgmt_rp_set_exp_feature rp;
4514
4515 memset(rp.uuid, 0, 16);
4516 rp.flags = cpu_to_le32(0);
4517
4518 #ifdef CONFIG_BT_FEATURE_DEBUG
4519 if (!hdev) {
4520 bool changed = bt_dbg_get();
4521
4522 bt_dbg_set(false);
4523
4524 if (changed)
4525 exp_feature_changed(NULL, ZERO_KEY, false, sk);
4526 }
4527 #endif
4528
4529 if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4530 bool changed;
4531
4532 changed = hci_dev_test_and_clear_flag(hdev,
4533 HCI_ENABLE_LL_PRIVACY);
4534 if (changed)
4535 exp_feature_changed(hdev, rpa_resolution_uuid, false,
4536 sk);
4537 }
4538
4539 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4540
4541 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4542 MGMT_OP_SET_EXP_FEATURE, 0,
4543 &rp, sizeof(rp));
4544 }
4545
4546 #ifdef CONFIG_BT_FEATURE_DEBUG
set_debug_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4547 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4548 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4549 {
4550 struct mgmt_rp_set_exp_feature rp;
4551
4552 bool val, changed;
4553 int err;
4554
4555 /* Command requires to use the non-controller index */
4556 if (hdev)
4557 return mgmt_cmd_status(sk, hdev->id,
4558 MGMT_OP_SET_EXP_FEATURE,
4559 MGMT_STATUS_INVALID_INDEX);
4560
4561 /* Parameters are limited to a single octet */
4562 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4563 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4564 MGMT_OP_SET_EXP_FEATURE,
4565 MGMT_STATUS_INVALID_PARAMS);
4566
4567 /* Only boolean on/off is supported */
4568 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4569 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4570 MGMT_OP_SET_EXP_FEATURE,
4571 MGMT_STATUS_INVALID_PARAMS);
4572
4573 val = !!cp->param[0];
4574 changed = val ? !bt_dbg_get() : bt_dbg_get();
4575 bt_dbg_set(val);
4576
4577 memcpy(rp.uuid, debug_uuid, 16);
4578 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4579
4580 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4581
4582 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4583 MGMT_OP_SET_EXP_FEATURE, 0,
4584 &rp, sizeof(rp));
4585
4586 if (changed)
4587 exp_feature_changed(hdev, debug_uuid, val, sk);
4588
4589 return err;
4590 }
4591 #endif
4592
set_mgmt_mesh_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4593 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4594 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4595 {
4596 struct mgmt_rp_set_exp_feature rp;
4597 bool val, changed;
4598 int err;
4599
4600 /* Command requires to use the controller index */
4601 if (!hdev)
4602 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4603 MGMT_OP_SET_EXP_FEATURE,
4604 MGMT_STATUS_INVALID_INDEX);
4605
4606 /* Parameters are limited to a single octet */
4607 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4608 return mgmt_cmd_status(sk, hdev->id,
4609 MGMT_OP_SET_EXP_FEATURE,
4610 MGMT_STATUS_INVALID_PARAMS);
4611
4612 /* Only boolean on/off is supported */
4613 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4614 return mgmt_cmd_status(sk, hdev->id,
4615 MGMT_OP_SET_EXP_FEATURE,
4616 MGMT_STATUS_INVALID_PARAMS);
4617
4618 val = !!cp->param[0];
4619
4620 if (val) {
4621 changed = !hci_dev_test_and_set_flag(hdev,
4622 HCI_MESH_EXPERIMENTAL);
4623 } else {
4624 hci_dev_clear_flag(hdev, HCI_MESH);
4625 changed = hci_dev_test_and_clear_flag(hdev,
4626 HCI_MESH_EXPERIMENTAL);
4627 }
4628
4629 memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4630 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4631
4632 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4633
4634 err = mgmt_cmd_complete(sk, hdev->id,
4635 MGMT_OP_SET_EXP_FEATURE, 0,
4636 &rp, sizeof(rp));
4637
4638 if (changed)
4639 exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4640
4641 return err;
4642 }
4643
set_rpa_resolution_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4644 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4645 struct mgmt_cp_set_exp_feature *cp,
4646 u16 data_len)
4647 {
4648 struct mgmt_rp_set_exp_feature rp;
4649 bool val, changed;
4650 int err;
4651 u32 flags;
4652
4653 /* Command requires to use the controller index */
4654 if (!hdev)
4655 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4656 MGMT_OP_SET_EXP_FEATURE,
4657 MGMT_STATUS_INVALID_INDEX);
4658
4659 /* Changes can only be made when controller is powered down */
4660 if (hdev_is_powered(hdev))
4661 return mgmt_cmd_status(sk, hdev->id,
4662 MGMT_OP_SET_EXP_FEATURE,
4663 MGMT_STATUS_REJECTED);
4664
4665 /* Parameters are limited to a single octet */
4666 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4667 return mgmt_cmd_status(sk, hdev->id,
4668 MGMT_OP_SET_EXP_FEATURE,
4669 MGMT_STATUS_INVALID_PARAMS);
4670
4671 /* Only boolean on/off is supported */
4672 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4673 return mgmt_cmd_status(sk, hdev->id,
4674 MGMT_OP_SET_EXP_FEATURE,
4675 MGMT_STATUS_INVALID_PARAMS);
4676
4677 val = !!cp->param[0];
4678
4679 if (val) {
4680 changed = !hci_dev_test_and_set_flag(hdev,
4681 HCI_ENABLE_LL_PRIVACY);
4682 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4683
4684 /* Enable LL privacy + supported settings changed */
4685 flags = BIT(0) | BIT(1);
4686 } else {
4687 changed = hci_dev_test_and_clear_flag(hdev,
4688 HCI_ENABLE_LL_PRIVACY);
4689
4690 /* Disable LL privacy + supported settings changed */
4691 flags = BIT(1);
4692 }
4693
4694 memcpy(rp.uuid, rpa_resolution_uuid, 16);
4695 rp.flags = cpu_to_le32(flags);
4696
4697 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4698
4699 err = mgmt_cmd_complete(sk, hdev->id,
4700 MGMT_OP_SET_EXP_FEATURE, 0,
4701 &rp, sizeof(rp));
4702
4703 if (changed)
4704 exp_ll_privacy_feature_changed(val, hdev, sk);
4705
4706 return err;
4707 }
4708
set_quality_report_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4709 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4710 struct mgmt_cp_set_exp_feature *cp,
4711 u16 data_len)
4712 {
4713 struct mgmt_rp_set_exp_feature rp;
4714 bool val, changed;
4715 int err;
4716
4717 /* Command requires to use a valid controller index */
4718 if (!hdev)
4719 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4720 MGMT_OP_SET_EXP_FEATURE,
4721 MGMT_STATUS_INVALID_INDEX);
4722
4723 /* Parameters are limited to a single octet */
4724 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4725 return mgmt_cmd_status(sk, hdev->id,
4726 MGMT_OP_SET_EXP_FEATURE,
4727 MGMT_STATUS_INVALID_PARAMS);
4728
4729 /* Only boolean on/off is supported */
4730 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4731 return mgmt_cmd_status(sk, hdev->id,
4732 MGMT_OP_SET_EXP_FEATURE,
4733 MGMT_STATUS_INVALID_PARAMS);
4734
4735 hci_req_sync_lock(hdev);
4736
4737 val = !!cp->param[0];
4738 changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4739
4740 if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4741 err = mgmt_cmd_status(sk, hdev->id,
4742 MGMT_OP_SET_EXP_FEATURE,
4743 MGMT_STATUS_NOT_SUPPORTED);
4744 goto unlock_quality_report;
4745 }
4746
4747 if (changed) {
4748 if (hdev->set_quality_report)
4749 err = hdev->set_quality_report(hdev, val);
4750 else
4751 err = aosp_set_quality_report(hdev, val);
4752
4753 if (err) {
4754 err = mgmt_cmd_status(sk, hdev->id,
4755 MGMT_OP_SET_EXP_FEATURE,
4756 MGMT_STATUS_FAILED);
4757 goto unlock_quality_report;
4758 }
4759
4760 if (val)
4761 hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4762 else
4763 hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4764 }
4765
4766 bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4767
4768 memcpy(rp.uuid, quality_report_uuid, 16);
4769 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4770 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4771
4772 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4773 &rp, sizeof(rp));
4774
4775 if (changed)
4776 exp_feature_changed(hdev, quality_report_uuid, val, sk);
4777
4778 unlock_quality_report:
4779 hci_req_sync_unlock(hdev);
4780 return err;
4781 }
4782
set_offload_codec_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4783 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4784 struct mgmt_cp_set_exp_feature *cp,
4785 u16 data_len)
4786 {
4787 bool val, changed;
4788 int err;
4789 struct mgmt_rp_set_exp_feature rp;
4790
4791 /* Command requires to use a valid controller index */
4792 if (!hdev)
4793 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4794 MGMT_OP_SET_EXP_FEATURE,
4795 MGMT_STATUS_INVALID_INDEX);
4796
4797 /* Parameters are limited to a single octet */
4798 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4799 return mgmt_cmd_status(sk, hdev->id,
4800 MGMT_OP_SET_EXP_FEATURE,
4801 MGMT_STATUS_INVALID_PARAMS);
4802
4803 /* Only boolean on/off is supported */
4804 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4805 return mgmt_cmd_status(sk, hdev->id,
4806 MGMT_OP_SET_EXP_FEATURE,
4807 MGMT_STATUS_INVALID_PARAMS);
4808
4809 val = !!cp->param[0];
4810 changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4811
4812 if (!hdev->get_data_path_id) {
4813 return mgmt_cmd_status(sk, hdev->id,
4814 MGMT_OP_SET_EXP_FEATURE,
4815 MGMT_STATUS_NOT_SUPPORTED);
4816 }
4817
4818 if (changed) {
4819 if (val)
4820 hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4821 else
4822 hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4823 }
4824
4825 bt_dev_info(hdev, "offload codecs enable %d changed %d",
4826 val, changed);
4827
4828 memcpy(rp.uuid, offload_codecs_uuid, 16);
4829 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4830 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4831 err = mgmt_cmd_complete(sk, hdev->id,
4832 MGMT_OP_SET_EXP_FEATURE, 0,
4833 &rp, sizeof(rp));
4834
4835 if (changed)
4836 exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4837
4838 return err;
4839 }
4840
set_le_simultaneous_roles_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4841 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4842 struct mgmt_cp_set_exp_feature *cp,
4843 u16 data_len)
4844 {
4845 bool val, changed;
4846 int err;
4847 struct mgmt_rp_set_exp_feature rp;
4848
4849 /* Command requires to use a valid controller index */
4850 if (!hdev)
4851 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4852 MGMT_OP_SET_EXP_FEATURE,
4853 MGMT_STATUS_INVALID_INDEX);
4854
4855 /* Parameters are limited to a single octet */
4856 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4857 return mgmt_cmd_status(sk, hdev->id,
4858 MGMT_OP_SET_EXP_FEATURE,
4859 MGMT_STATUS_INVALID_PARAMS);
4860
4861 /* Only boolean on/off is supported */
4862 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4863 return mgmt_cmd_status(sk, hdev->id,
4864 MGMT_OP_SET_EXP_FEATURE,
4865 MGMT_STATUS_INVALID_PARAMS);
4866
4867 val = !!cp->param[0];
4868 changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4869
4870 if (!hci_dev_le_state_simultaneous(hdev)) {
4871 return mgmt_cmd_status(sk, hdev->id,
4872 MGMT_OP_SET_EXP_FEATURE,
4873 MGMT_STATUS_NOT_SUPPORTED);
4874 }
4875
4876 if (changed) {
4877 if (val)
4878 hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4879 else
4880 hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4881 }
4882
4883 bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4884 val, changed);
4885
4886 memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4887 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4888 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4889 err = mgmt_cmd_complete(sk, hdev->id,
4890 MGMT_OP_SET_EXP_FEATURE, 0,
4891 &rp, sizeof(rp));
4892
4893 if (changed)
4894 exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4895
4896 return err;
4897 }
4898
4899 #ifdef CONFIG_BT_LE
set_iso_socket_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4900 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4901 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4902 {
4903 struct mgmt_rp_set_exp_feature rp;
4904 bool val, changed = false;
4905 int err;
4906
4907 /* Command requires to use the non-controller index */
4908 if (hdev)
4909 return mgmt_cmd_status(sk, hdev->id,
4910 MGMT_OP_SET_EXP_FEATURE,
4911 MGMT_STATUS_INVALID_INDEX);
4912
4913 /* Parameters are limited to a single octet */
4914 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4915 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4916 MGMT_OP_SET_EXP_FEATURE,
4917 MGMT_STATUS_INVALID_PARAMS);
4918
4919 /* Only boolean on/off is supported */
4920 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4921 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4922 MGMT_OP_SET_EXP_FEATURE,
4923 MGMT_STATUS_INVALID_PARAMS);
4924
4925 val = cp->param[0] ? true : false;
4926 if (val)
4927 err = iso_init();
4928 else
4929 err = iso_exit();
4930
4931 if (!err)
4932 changed = true;
4933
4934 memcpy(rp.uuid, iso_socket_uuid, 16);
4935 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4936
4937 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4938
4939 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4940 MGMT_OP_SET_EXP_FEATURE, 0,
4941 &rp, sizeof(rp));
4942
4943 if (changed)
4944 exp_feature_changed(hdev, iso_socket_uuid, val, sk);
4945
4946 return err;
4947 }
4948 #endif
4949
4950 static const struct mgmt_exp_feature {
4951 const u8 *uuid;
4952 int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4953 struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4954 } exp_features[] = {
4955 EXP_FEAT(ZERO_KEY, set_zero_key_func),
4956 #ifdef CONFIG_BT_FEATURE_DEBUG
4957 EXP_FEAT(debug_uuid, set_debug_func),
4958 #endif
4959 EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
4960 EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
4961 EXP_FEAT(quality_report_uuid, set_quality_report_func),
4962 EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
4963 EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
4964 #ifdef CONFIG_BT_LE
4965 EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
4966 #endif
4967
4968 /* end with a null feature */
4969 EXP_FEAT(NULL, NULL)
4970 };
4971
set_exp_feature(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4972 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4973 void *data, u16 data_len)
4974 {
4975 struct mgmt_cp_set_exp_feature *cp = data;
4976 size_t i = 0;
4977
4978 bt_dev_dbg(hdev, "sock %p", sk);
4979
4980 for (i = 0; exp_features[i].uuid; i++) {
4981 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
4982 return exp_features[i].set_func(sk, hdev, cp, data_len);
4983 }
4984
4985 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4986 MGMT_OP_SET_EXP_FEATURE,
4987 MGMT_STATUS_NOT_SUPPORTED);
4988 }
4989
get_params_flags(struct hci_dev * hdev,struct hci_conn_params * params)4990 static u32 get_params_flags(struct hci_dev *hdev,
4991 struct hci_conn_params *params)
4992 {
4993 u32 flags = hdev->conn_flags;
4994
4995 /* Devices using RPAs can only be programmed in the acceptlist if
4996 * LL Privacy has been enable otherwise they cannot mark
4997 * HCI_CONN_FLAG_REMOTE_WAKEUP.
4998 */
4999 if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) &&
5000 hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type))
5001 flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP;
5002
5003 return flags;
5004 }
5005
get_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)5006 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5007 u16 data_len)
5008 {
5009 struct mgmt_cp_get_device_flags *cp = data;
5010 struct mgmt_rp_get_device_flags rp;
5011 struct bdaddr_list_with_flags *br_params;
5012 struct hci_conn_params *params;
5013 u32 supported_flags;
5014 u32 current_flags = 0;
5015 u8 status = MGMT_STATUS_INVALID_PARAMS;
5016
5017 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
5018 &cp->addr.bdaddr, cp->addr.type);
5019
5020 hci_dev_lock(hdev);
5021
5022 supported_flags = hdev->conn_flags;
5023
5024 memset(&rp, 0, sizeof(rp));
5025
5026 if (cp->addr.type == BDADDR_BREDR) {
5027 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5028 &cp->addr.bdaddr,
5029 cp->addr.type);
5030 if (!br_params)
5031 goto done;
5032
5033 current_flags = br_params->flags;
5034 } else {
5035 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5036 le_addr_type(cp->addr.type));
5037 if (!params)
5038 goto done;
5039
5040 supported_flags = get_params_flags(hdev, params);
5041 current_flags = params->flags;
5042 }
5043
5044 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5045 rp.addr.type = cp->addr.type;
5046 rp.supported_flags = cpu_to_le32(supported_flags);
5047 rp.current_flags = cpu_to_le32(current_flags);
5048
5049 status = MGMT_STATUS_SUCCESS;
5050
5051 done:
5052 hci_dev_unlock(hdev);
5053
5054 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5055 &rp, sizeof(rp));
5056 }
5057
device_flags_changed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u32 supported_flags,u32 current_flags)5058 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5059 bdaddr_t *bdaddr, u8 bdaddr_type,
5060 u32 supported_flags, u32 current_flags)
5061 {
5062 struct mgmt_ev_device_flags_changed ev;
5063
5064 bacpy(&ev.addr.bdaddr, bdaddr);
5065 ev.addr.type = bdaddr_type;
5066 ev.supported_flags = cpu_to_le32(supported_flags);
5067 ev.current_flags = cpu_to_le32(current_flags);
5068
5069 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5070 }
5071
set_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5072 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5073 u16 len)
5074 {
5075 struct mgmt_cp_set_device_flags *cp = data;
5076 struct bdaddr_list_with_flags *br_params;
5077 struct hci_conn_params *params;
5078 u8 status = MGMT_STATUS_INVALID_PARAMS;
5079 u32 supported_flags;
5080 u32 current_flags = __le32_to_cpu(cp->current_flags);
5081
5082 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5083 &cp->addr.bdaddr, cp->addr.type, current_flags);
5084
5085 // We should take hci_dev_lock() early, I think.. conn_flags can change
5086 supported_flags = hdev->conn_flags;
5087
5088 if ((supported_flags | current_flags) != supported_flags) {
5089 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5090 current_flags, supported_flags);
5091 goto done;
5092 }
5093
5094 hci_dev_lock(hdev);
5095
5096 if (cp->addr.type == BDADDR_BREDR) {
5097 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5098 &cp->addr.bdaddr,
5099 cp->addr.type);
5100
5101 if (br_params) {
5102 br_params->flags = current_flags;
5103 status = MGMT_STATUS_SUCCESS;
5104 } else {
5105 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5106 &cp->addr.bdaddr, cp->addr.type);
5107 }
5108
5109 goto unlock;
5110 }
5111
5112 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5113 le_addr_type(cp->addr.type));
5114 if (!params) {
5115 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5116 &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5117 goto unlock;
5118 }
5119
5120 supported_flags = get_params_flags(hdev, params);
5121
5122 if ((supported_flags | current_flags) != supported_flags) {
5123 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5124 current_flags, supported_flags);
5125 goto unlock;
5126 }
5127
5128 WRITE_ONCE(params->flags, current_flags);
5129 status = MGMT_STATUS_SUCCESS;
5130
5131 /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5132 * has been set.
5133 */
5134 if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5135 hci_update_passive_scan(hdev);
5136
5137 unlock:
5138 hci_dev_unlock(hdev);
5139
5140 done:
5141 if (status == MGMT_STATUS_SUCCESS)
5142 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5143 supported_flags, current_flags);
5144
5145 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5146 &cp->addr, sizeof(cp->addr));
5147 }
5148
mgmt_adv_monitor_added(struct sock * sk,struct hci_dev * hdev,u16 handle)5149 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5150 u16 handle)
5151 {
5152 struct mgmt_ev_adv_monitor_added ev;
5153
5154 ev.monitor_handle = cpu_to_le16(handle);
5155
5156 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5157 }
5158
mgmt_adv_monitor_removed(struct hci_dev * hdev,u16 handle)5159 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
5160 {
5161 struct mgmt_ev_adv_monitor_removed ev;
5162 struct mgmt_pending_cmd *cmd;
5163 struct sock *sk_skip = NULL;
5164 struct mgmt_cp_remove_adv_monitor *cp;
5165
5166 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
5167 if (cmd) {
5168 cp = cmd->param;
5169
5170 if (cp->monitor_handle)
5171 sk_skip = cmd->sk;
5172 }
5173
5174 ev.monitor_handle = cpu_to_le16(handle);
5175
5176 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
5177 }
5178
read_adv_mon_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5179 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5180 void *data, u16 len)
5181 {
5182 struct adv_monitor *monitor = NULL;
5183 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5184 int handle, err;
5185 size_t rp_size = 0;
5186 __u32 supported = 0;
5187 __u32 enabled = 0;
5188 __u16 num_handles = 0;
5189 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5190
5191 BT_DBG("request for %s", hdev->name);
5192
5193 hci_dev_lock(hdev);
5194
5195 if (msft_monitor_supported(hdev))
5196 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5197
5198 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5199 handles[num_handles++] = monitor->handle;
5200
5201 hci_dev_unlock(hdev);
5202
5203 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5204 rp = kmalloc(rp_size, GFP_KERNEL);
5205 if (!rp)
5206 return -ENOMEM;
5207
5208 /* All supported features are currently enabled */
5209 enabled = supported;
5210
5211 rp->supported_features = cpu_to_le32(supported);
5212 rp->enabled_features = cpu_to_le32(enabled);
5213 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5214 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5215 rp->num_handles = cpu_to_le16(num_handles);
5216 if (num_handles)
5217 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5218
5219 err = mgmt_cmd_complete(sk, hdev->id,
5220 MGMT_OP_READ_ADV_MONITOR_FEATURES,
5221 MGMT_STATUS_SUCCESS, rp, rp_size);
5222
5223 kfree(rp);
5224
5225 return err;
5226 }
5227
mgmt_add_adv_patterns_monitor_complete(struct hci_dev * hdev,void * data,int status)5228 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5229 void *data, int status)
5230 {
5231 struct mgmt_rp_add_adv_patterns_monitor rp;
5232 struct mgmt_pending_cmd *cmd = data;
5233 struct adv_monitor *monitor = cmd->user_data;
5234
5235 hci_dev_lock(hdev);
5236
5237 rp.monitor_handle = cpu_to_le16(monitor->handle);
5238
5239 if (!status) {
5240 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5241 hdev->adv_monitors_cnt++;
5242 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5243 monitor->state = ADV_MONITOR_STATE_REGISTERED;
5244 hci_update_passive_scan(hdev);
5245 }
5246
5247 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5248 mgmt_status(status), &rp, sizeof(rp));
5249 mgmt_pending_remove(cmd);
5250
5251 hci_dev_unlock(hdev);
5252 bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5253 rp.monitor_handle, status);
5254 }
5255
mgmt_add_adv_patterns_monitor_sync(struct hci_dev * hdev,void * data)5256 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5257 {
5258 struct mgmt_pending_cmd *cmd = data;
5259 struct adv_monitor *monitor = cmd->user_data;
5260
5261 return hci_add_adv_monitor(hdev, monitor);
5262 }
5263
__add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,struct adv_monitor * m,u8 status,void * data,u16 len,u16 op)5264 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5265 struct adv_monitor *m, u8 status,
5266 void *data, u16 len, u16 op)
5267 {
5268 struct mgmt_pending_cmd *cmd;
5269 int err;
5270
5271 hci_dev_lock(hdev);
5272
5273 if (status)
5274 goto unlock;
5275
5276 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5277 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5278 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
5279 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
5280 status = MGMT_STATUS_BUSY;
5281 goto unlock;
5282 }
5283
5284 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5285 if (!cmd) {
5286 status = MGMT_STATUS_NO_RESOURCES;
5287 goto unlock;
5288 }
5289
5290 cmd->user_data = m;
5291 err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5292 mgmt_add_adv_patterns_monitor_complete);
5293 if (err) {
5294 if (err == -ENOMEM)
5295 status = MGMT_STATUS_NO_RESOURCES;
5296 else
5297 status = MGMT_STATUS_FAILED;
5298
5299 goto unlock;
5300 }
5301
5302 hci_dev_unlock(hdev);
5303
5304 return 0;
5305
5306 unlock:
5307 hci_free_adv_monitor(hdev, m);
5308 hci_dev_unlock(hdev);
5309 return mgmt_cmd_status(sk, hdev->id, op, status);
5310 }
5311
parse_adv_monitor_rssi(struct adv_monitor * m,struct mgmt_adv_rssi_thresholds * rssi)5312 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5313 struct mgmt_adv_rssi_thresholds *rssi)
5314 {
5315 if (rssi) {
5316 m->rssi.low_threshold = rssi->low_threshold;
5317 m->rssi.low_threshold_timeout =
5318 __le16_to_cpu(rssi->low_threshold_timeout);
5319 m->rssi.high_threshold = rssi->high_threshold;
5320 m->rssi.high_threshold_timeout =
5321 __le16_to_cpu(rssi->high_threshold_timeout);
5322 m->rssi.sampling_period = rssi->sampling_period;
5323 } else {
5324 /* Default values. These numbers are the least constricting
5325 * parameters for MSFT API to work, so it behaves as if there
5326 * are no rssi parameter to consider. May need to be changed
5327 * if other API are to be supported.
5328 */
5329 m->rssi.low_threshold = -127;
5330 m->rssi.low_threshold_timeout = 60;
5331 m->rssi.high_threshold = -127;
5332 m->rssi.high_threshold_timeout = 0;
5333 m->rssi.sampling_period = 0;
5334 }
5335 }
5336
parse_adv_monitor_pattern(struct adv_monitor * m,u8 pattern_count,struct mgmt_adv_pattern * patterns)5337 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5338 struct mgmt_adv_pattern *patterns)
5339 {
5340 u8 offset = 0, length = 0;
5341 struct adv_pattern *p = NULL;
5342 int i;
5343
5344 for (i = 0; i < pattern_count; i++) {
5345 offset = patterns[i].offset;
5346 length = patterns[i].length;
5347 if (offset >= HCI_MAX_EXT_AD_LENGTH ||
5348 length > HCI_MAX_EXT_AD_LENGTH ||
5349 (offset + length) > HCI_MAX_EXT_AD_LENGTH)
5350 return MGMT_STATUS_INVALID_PARAMS;
5351
5352 p = kmalloc(sizeof(*p), GFP_KERNEL);
5353 if (!p)
5354 return MGMT_STATUS_NO_RESOURCES;
5355
5356 p->ad_type = patterns[i].ad_type;
5357 p->offset = patterns[i].offset;
5358 p->length = patterns[i].length;
5359 memcpy(p->value, patterns[i].value, p->length);
5360
5361 INIT_LIST_HEAD(&p->list);
5362 list_add(&p->list, &m->patterns);
5363 }
5364
5365 return MGMT_STATUS_SUCCESS;
5366 }
5367
add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5368 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5369 void *data, u16 len)
5370 {
5371 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5372 struct adv_monitor *m = NULL;
5373 u8 status = MGMT_STATUS_SUCCESS;
5374 size_t expected_size = sizeof(*cp);
5375
5376 BT_DBG("request for %s", hdev->name);
5377
5378 if (len <= sizeof(*cp)) {
5379 status = MGMT_STATUS_INVALID_PARAMS;
5380 goto done;
5381 }
5382
5383 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5384 if (len != expected_size) {
5385 status = MGMT_STATUS_INVALID_PARAMS;
5386 goto done;
5387 }
5388
5389 m = kzalloc(sizeof(*m), GFP_KERNEL);
5390 if (!m) {
5391 status = MGMT_STATUS_NO_RESOURCES;
5392 goto done;
5393 }
5394
5395 INIT_LIST_HEAD(&m->patterns);
5396
5397 parse_adv_monitor_rssi(m, NULL);
5398 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5399
5400 done:
5401 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5402 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5403 }
5404
add_adv_patterns_monitor_rssi(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5405 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5406 void *data, u16 len)
5407 {
5408 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5409 struct adv_monitor *m = NULL;
5410 u8 status = MGMT_STATUS_SUCCESS;
5411 size_t expected_size = sizeof(*cp);
5412
5413 BT_DBG("request for %s", hdev->name);
5414
5415 if (len <= sizeof(*cp)) {
5416 status = MGMT_STATUS_INVALID_PARAMS;
5417 goto done;
5418 }
5419
5420 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5421 if (len != expected_size) {
5422 status = MGMT_STATUS_INVALID_PARAMS;
5423 goto done;
5424 }
5425
5426 m = kzalloc(sizeof(*m), GFP_KERNEL);
5427 if (!m) {
5428 status = MGMT_STATUS_NO_RESOURCES;
5429 goto done;
5430 }
5431
5432 INIT_LIST_HEAD(&m->patterns);
5433
5434 parse_adv_monitor_rssi(m, &cp->rssi);
5435 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5436
5437 done:
5438 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5439 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5440 }
5441
mgmt_remove_adv_monitor_complete(struct hci_dev * hdev,void * data,int status)5442 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5443 void *data, int status)
5444 {
5445 struct mgmt_rp_remove_adv_monitor rp;
5446 struct mgmt_pending_cmd *cmd = data;
5447 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5448
5449 hci_dev_lock(hdev);
5450
5451 rp.monitor_handle = cp->monitor_handle;
5452
5453 if (!status)
5454 hci_update_passive_scan(hdev);
5455
5456 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5457 mgmt_status(status), &rp, sizeof(rp));
5458 mgmt_pending_remove(cmd);
5459
5460 hci_dev_unlock(hdev);
5461 bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5462 rp.monitor_handle, status);
5463 }
5464
mgmt_remove_adv_monitor_sync(struct hci_dev * hdev,void * data)5465 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5466 {
5467 struct mgmt_pending_cmd *cmd = data;
5468 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5469 u16 handle = __le16_to_cpu(cp->monitor_handle);
5470
5471 if (!handle)
5472 return hci_remove_all_adv_monitor(hdev);
5473
5474 return hci_remove_single_adv_monitor(hdev, handle);
5475 }
5476
remove_adv_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5477 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5478 void *data, u16 len)
5479 {
5480 struct mgmt_pending_cmd *cmd;
5481 int err, status;
5482
5483 hci_dev_lock(hdev);
5484
5485 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5486 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
5487 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5488 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5489 status = MGMT_STATUS_BUSY;
5490 goto unlock;
5491 }
5492
5493 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5494 if (!cmd) {
5495 status = MGMT_STATUS_NO_RESOURCES;
5496 goto unlock;
5497 }
5498
5499 err = hci_cmd_sync_submit(hdev, mgmt_remove_adv_monitor_sync, cmd,
5500 mgmt_remove_adv_monitor_complete);
5501
5502 if (err) {
5503 mgmt_pending_remove(cmd);
5504
5505 if (err == -ENOMEM)
5506 status = MGMT_STATUS_NO_RESOURCES;
5507 else
5508 status = MGMT_STATUS_FAILED;
5509
5510 goto unlock;
5511 }
5512
5513 hci_dev_unlock(hdev);
5514
5515 return 0;
5516
5517 unlock:
5518 hci_dev_unlock(hdev);
5519 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5520 status);
5521 }
5522
read_local_oob_data_complete(struct hci_dev * hdev,void * data,int err)5523 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5524 {
5525 struct mgmt_rp_read_local_oob_data mgmt_rp;
5526 size_t rp_size = sizeof(mgmt_rp);
5527 struct mgmt_pending_cmd *cmd = data;
5528 struct sk_buff *skb = cmd->skb;
5529 u8 status = mgmt_status(err);
5530
5531 if (!status) {
5532 if (!skb)
5533 status = MGMT_STATUS_FAILED;
5534 else if (IS_ERR(skb))
5535 status = mgmt_status(PTR_ERR(skb));
5536 else
5537 status = mgmt_status(skb->data[0]);
5538 }
5539
5540 bt_dev_dbg(hdev, "status %d", status);
5541
5542 if (status) {
5543 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5544 goto remove;
5545 }
5546
5547 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5548
5549 if (!bredr_sc_enabled(hdev)) {
5550 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5551
5552 if (skb->len < sizeof(*rp)) {
5553 mgmt_cmd_status(cmd->sk, hdev->id,
5554 MGMT_OP_READ_LOCAL_OOB_DATA,
5555 MGMT_STATUS_FAILED);
5556 goto remove;
5557 }
5558
5559 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5560 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5561
5562 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5563 } else {
5564 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5565
5566 if (skb->len < sizeof(*rp)) {
5567 mgmt_cmd_status(cmd->sk, hdev->id,
5568 MGMT_OP_READ_LOCAL_OOB_DATA,
5569 MGMT_STATUS_FAILED);
5570 goto remove;
5571 }
5572
5573 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5574 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5575
5576 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5577 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5578 }
5579
5580 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5581 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5582
5583 remove:
5584 if (skb && !IS_ERR(skb))
5585 kfree_skb(skb);
5586
5587 mgmt_pending_free(cmd);
5588 }
5589
read_local_oob_data_sync(struct hci_dev * hdev,void * data)5590 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5591 {
5592 struct mgmt_pending_cmd *cmd = data;
5593
5594 if (bredr_sc_enabled(hdev))
5595 cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5596 else
5597 cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5598
5599 if (IS_ERR(cmd->skb))
5600 return PTR_ERR(cmd->skb);
5601 else
5602 return 0;
5603 }
5604
read_local_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)5605 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5606 void *data, u16 data_len)
5607 {
5608 struct mgmt_pending_cmd *cmd;
5609 int err;
5610
5611 bt_dev_dbg(hdev, "sock %p", sk);
5612
5613 hci_dev_lock(hdev);
5614
5615 if (!hdev_is_powered(hdev)) {
5616 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5617 MGMT_STATUS_NOT_POWERED);
5618 goto unlock;
5619 }
5620
5621 if (!lmp_ssp_capable(hdev)) {
5622 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5623 MGMT_STATUS_NOT_SUPPORTED);
5624 goto unlock;
5625 }
5626
5627 cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5628 if (!cmd)
5629 err = -ENOMEM;
5630 else
5631 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5632 read_local_oob_data_complete);
5633
5634 if (err < 0) {
5635 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5636 MGMT_STATUS_FAILED);
5637
5638 if (cmd)
5639 mgmt_pending_free(cmd);
5640 }
5641
5642 unlock:
5643 hci_dev_unlock(hdev);
5644 return err;
5645 }
5646
add_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5647 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5648 void *data, u16 len)
5649 {
5650 struct mgmt_addr_info *addr = data;
5651 int err;
5652
5653 bt_dev_dbg(hdev, "sock %p", sk);
5654
5655 if (!bdaddr_type_is_valid(addr->type))
5656 return mgmt_cmd_complete(sk, hdev->id,
5657 MGMT_OP_ADD_REMOTE_OOB_DATA,
5658 MGMT_STATUS_INVALID_PARAMS,
5659 addr, sizeof(*addr));
5660
5661 hci_dev_lock(hdev);
5662
5663 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5664 struct mgmt_cp_add_remote_oob_data *cp = data;
5665 u8 status;
5666
5667 if (cp->addr.type != BDADDR_BREDR) {
5668 err = mgmt_cmd_complete(sk, hdev->id,
5669 MGMT_OP_ADD_REMOTE_OOB_DATA,
5670 MGMT_STATUS_INVALID_PARAMS,
5671 &cp->addr, sizeof(cp->addr));
5672 goto unlock;
5673 }
5674
5675 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5676 cp->addr.type, cp->hash,
5677 cp->rand, NULL, NULL);
5678 if (err < 0)
5679 status = MGMT_STATUS_FAILED;
5680 else
5681 status = MGMT_STATUS_SUCCESS;
5682
5683 err = mgmt_cmd_complete(sk, hdev->id,
5684 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5685 &cp->addr, sizeof(cp->addr));
5686 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5687 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5688 u8 *rand192, *hash192, *rand256, *hash256;
5689 u8 status;
5690
5691 if (bdaddr_type_is_le(cp->addr.type)) {
5692 /* Enforce zero-valued 192-bit parameters as
5693 * long as legacy SMP OOB isn't implemented.
5694 */
5695 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5696 memcmp(cp->hash192, ZERO_KEY, 16)) {
5697 err = mgmt_cmd_complete(sk, hdev->id,
5698 MGMT_OP_ADD_REMOTE_OOB_DATA,
5699 MGMT_STATUS_INVALID_PARAMS,
5700 addr, sizeof(*addr));
5701 goto unlock;
5702 }
5703
5704 rand192 = NULL;
5705 hash192 = NULL;
5706 } else {
5707 /* In case one of the P-192 values is set to zero,
5708 * then just disable OOB data for P-192.
5709 */
5710 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5711 !memcmp(cp->hash192, ZERO_KEY, 16)) {
5712 rand192 = NULL;
5713 hash192 = NULL;
5714 } else {
5715 rand192 = cp->rand192;
5716 hash192 = cp->hash192;
5717 }
5718 }
5719
5720 /* In case one of the P-256 values is set to zero, then just
5721 * disable OOB data for P-256.
5722 */
5723 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5724 !memcmp(cp->hash256, ZERO_KEY, 16)) {
5725 rand256 = NULL;
5726 hash256 = NULL;
5727 } else {
5728 rand256 = cp->rand256;
5729 hash256 = cp->hash256;
5730 }
5731
5732 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5733 cp->addr.type, hash192, rand192,
5734 hash256, rand256);
5735 if (err < 0)
5736 status = MGMT_STATUS_FAILED;
5737 else
5738 status = MGMT_STATUS_SUCCESS;
5739
5740 err = mgmt_cmd_complete(sk, hdev->id,
5741 MGMT_OP_ADD_REMOTE_OOB_DATA,
5742 status, &cp->addr, sizeof(cp->addr));
5743 } else {
5744 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5745 len);
5746 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5747 MGMT_STATUS_INVALID_PARAMS);
5748 }
5749
5750 unlock:
5751 hci_dev_unlock(hdev);
5752 return err;
5753 }
5754
remove_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5755 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5756 void *data, u16 len)
5757 {
5758 struct mgmt_cp_remove_remote_oob_data *cp = data;
5759 u8 status;
5760 int err;
5761
5762 bt_dev_dbg(hdev, "sock %p", sk);
5763
5764 if (cp->addr.type != BDADDR_BREDR)
5765 return mgmt_cmd_complete(sk, hdev->id,
5766 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5767 MGMT_STATUS_INVALID_PARAMS,
5768 &cp->addr, sizeof(cp->addr));
5769
5770 hci_dev_lock(hdev);
5771
5772 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5773 hci_remote_oob_data_clear(hdev);
5774 status = MGMT_STATUS_SUCCESS;
5775 goto done;
5776 }
5777
5778 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5779 if (err < 0)
5780 status = MGMT_STATUS_INVALID_PARAMS;
5781 else
5782 status = MGMT_STATUS_SUCCESS;
5783
5784 done:
5785 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5786 status, &cp->addr, sizeof(cp->addr));
5787
5788 hci_dev_unlock(hdev);
5789 return err;
5790 }
5791
mgmt_start_discovery_complete(struct hci_dev * hdev,u8 status)5792 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5793 {
5794 struct mgmt_pending_cmd *cmd;
5795
5796 bt_dev_dbg(hdev, "status %u", status);
5797
5798 hci_dev_lock(hdev);
5799
5800 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5801 if (!cmd)
5802 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5803
5804 if (!cmd)
5805 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5806
5807 if (cmd) {
5808 cmd->cmd_complete(cmd, mgmt_status(status));
5809 mgmt_pending_remove(cmd);
5810 }
5811
5812 hci_dev_unlock(hdev);
5813 }
5814
discovery_type_is_valid(struct hci_dev * hdev,uint8_t type,uint8_t * mgmt_status)5815 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5816 uint8_t *mgmt_status)
5817 {
5818 switch (type) {
5819 case DISCOV_TYPE_LE:
5820 *mgmt_status = mgmt_le_support(hdev);
5821 if (*mgmt_status)
5822 return false;
5823 break;
5824 case DISCOV_TYPE_INTERLEAVED:
5825 *mgmt_status = mgmt_le_support(hdev);
5826 if (*mgmt_status)
5827 return false;
5828 fallthrough;
5829 case DISCOV_TYPE_BREDR:
5830 *mgmt_status = mgmt_bredr_support(hdev);
5831 if (*mgmt_status)
5832 return false;
5833 break;
5834 default:
5835 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5836 return false;
5837 }
5838
5839 return true;
5840 }
5841
start_discovery_complete(struct hci_dev * hdev,void * data,int err)5842 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5843 {
5844 struct mgmt_pending_cmd *cmd = data;
5845
5846 if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5847 cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5848 cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5849 return;
5850
5851 bt_dev_dbg(hdev, "err %d", err);
5852
5853 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5854 cmd->param, 1);
5855 mgmt_pending_remove(cmd);
5856
5857 hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5858 DISCOVERY_FINDING);
5859 }
5860
start_discovery_sync(struct hci_dev * hdev,void * data)5861 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5862 {
5863 return hci_start_discovery_sync(hdev);
5864 }
5865
start_discovery_internal(struct sock * sk,struct hci_dev * hdev,u16 op,void * data,u16 len)5866 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5867 u16 op, void *data, u16 len)
5868 {
5869 struct mgmt_cp_start_discovery *cp = data;
5870 struct mgmt_pending_cmd *cmd;
5871 u8 status;
5872 int err;
5873
5874 bt_dev_dbg(hdev, "sock %p", sk);
5875
5876 hci_dev_lock(hdev);
5877
5878 if (!hdev_is_powered(hdev)) {
5879 err = mgmt_cmd_complete(sk, hdev->id, op,
5880 MGMT_STATUS_NOT_POWERED,
5881 &cp->type, sizeof(cp->type));
5882 goto failed;
5883 }
5884
5885 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5886 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5887 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5888 &cp->type, sizeof(cp->type));
5889 goto failed;
5890 }
5891
5892 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5893 err = mgmt_cmd_complete(sk, hdev->id, op, status,
5894 &cp->type, sizeof(cp->type));
5895 goto failed;
5896 }
5897
5898 /* Can't start discovery when it is paused */
5899 if (hdev->discovery_paused) {
5900 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5901 &cp->type, sizeof(cp->type));
5902 goto failed;
5903 }
5904
5905 /* Clear the discovery filter first to free any previously
5906 * allocated memory for the UUID list.
5907 */
5908 hci_discovery_filter_clear(hdev);
5909
5910 hdev->discovery.type = cp->type;
5911 hdev->discovery.report_invalid_rssi = false;
5912 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5913 hdev->discovery.limited = true;
5914 else
5915 hdev->discovery.limited = false;
5916
5917 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5918 if (!cmd) {
5919 err = -ENOMEM;
5920 goto failed;
5921 }
5922
5923 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5924 start_discovery_complete);
5925 if (err < 0) {
5926 mgmt_pending_remove(cmd);
5927 goto failed;
5928 }
5929
5930 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5931
5932 failed:
5933 hci_dev_unlock(hdev);
5934 return err;
5935 }
5936
start_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5937 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5938 void *data, u16 len)
5939 {
5940 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5941 data, len);
5942 }
5943
start_limited_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5944 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5945 void *data, u16 len)
5946 {
5947 return start_discovery_internal(sk, hdev,
5948 MGMT_OP_START_LIMITED_DISCOVERY,
5949 data, len);
5950 }
5951
start_service_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5952 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5953 void *data, u16 len)
5954 {
5955 struct mgmt_cp_start_service_discovery *cp = data;
5956 struct mgmt_pending_cmd *cmd;
5957 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5958 u16 uuid_count, expected_len;
5959 u8 status;
5960 int err;
5961
5962 bt_dev_dbg(hdev, "sock %p", sk);
5963
5964 hci_dev_lock(hdev);
5965
5966 if (!hdev_is_powered(hdev)) {
5967 err = mgmt_cmd_complete(sk, hdev->id,
5968 MGMT_OP_START_SERVICE_DISCOVERY,
5969 MGMT_STATUS_NOT_POWERED,
5970 &cp->type, sizeof(cp->type));
5971 goto failed;
5972 }
5973
5974 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5975 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5976 err = mgmt_cmd_complete(sk, hdev->id,
5977 MGMT_OP_START_SERVICE_DISCOVERY,
5978 MGMT_STATUS_BUSY, &cp->type,
5979 sizeof(cp->type));
5980 goto failed;
5981 }
5982
5983 if (hdev->discovery_paused) {
5984 err = mgmt_cmd_complete(sk, hdev->id,
5985 MGMT_OP_START_SERVICE_DISCOVERY,
5986 MGMT_STATUS_BUSY, &cp->type,
5987 sizeof(cp->type));
5988 goto failed;
5989 }
5990
5991 uuid_count = __le16_to_cpu(cp->uuid_count);
5992 if (uuid_count > max_uuid_count) {
5993 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5994 uuid_count);
5995 err = mgmt_cmd_complete(sk, hdev->id,
5996 MGMT_OP_START_SERVICE_DISCOVERY,
5997 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5998 sizeof(cp->type));
5999 goto failed;
6000 }
6001
6002 expected_len = sizeof(*cp) + uuid_count * 16;
6003 if (expected_len != len) {
6004 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
6005 expected_len, len);
6006 err = mgmt_cmd_complete(sk, hdev->id,
6007 MGMT_OP_START_SERVICE_DISCOVERY,
6008 MGMT_STATUS_INVALID_PARAMS, &cp->type,
6009 sizeof(cp->type));
6010 goto failed;
6011 }
6012
6013 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
6014 err = mgmt_cmd_complete(sk, hdev->id,
6015 MGMT_OP_START_SERVICE_DISCOVERY,
6016 status, &cp->type, sizeof(cp->type));
6017 goto failed;
6018 }
6019
6020 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
6021 hdev, data, len);
6022 if (!cmd) {
6023 err = -ENOMEM;
6024 goto failed;
6025 }
6026
6027 /* Clear the discovery filter first to free any previously
6028 * allocated memory for the UUID list.
6029 */
6030 hci_discovery_filter_clear(hdev);
6031
6032 hdev->discovery.result_filtering = true;
6033 hdev->discovery.type = cp->type;
6034 hdev->discovery.rssi = cp->rssi;
6035 hdev->discovery.uuid_count = uuid_count;
6036
6037 if (uuid_count > 0) {
6038 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
6039 GFP_KERNEL);
6040 if (!hdev->discovery.uuids) {
6041 err = mgmt_cmd_complete(sk, hdev->id,
6042 MGMT_OP_START_SERVICE_DISCOVERY,
6043 MGMT_STATUS_FAILED,
6044 &cp->type, sizeof(cp->type));
6045 mgmt_pending_remove(cmd);
6046 goto failed;
6047 }
6048 }
6049
6050 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6051 start_discovery_complete);
6052 if (err < 0) {
6053 mgmt_pending_remove(cmd);
6054 goto failed;
6055 }
6056
6057 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6058
6059 failed:
6060 hci_dev_unlock(hdev);
6061 return err;
6062 }
6063
mgmt_stop_discovery_complete(struct hci_dev * hdev,u8 status)6064 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
6065 {
6066 struct mgmt_pending_cmd *cmd;
6067
6068 bt_dev_dbg(hdev, "status %u", status);
6069
6070 hci_dev_lock(hdev);
6071
6072 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6073 if (cmd) {
6074 cmd->cmd_complete(cmd, mgmt_status(status));
6075 mgmt_pending_remove(cmd);
6076 }
6077
6078 hci_dev_unlock(hdev);
6079 }
6080
stop_discovery_complete(struct hci_dev * hdev,void * data,int err)6081 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6082 {
6083 struct mgmt_pending_cmd *cmd = data;
6084
6085 if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
6086 return;
6087
6088 bt_dev_dbg(hdev, "err %d", err);
6089
6090 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
6091 cmd->param, 1);
6092 mgmt_pending_remove(cmd);
6093
6094 if (!err)
6095 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6096 }
6097
stop_discovery_sync(struct hci_dev * hdev,void * data)6098 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6099 {
6100 return hci_stop_discovery_sync(hdev);
6101 }
6102
stop_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6103 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6104 u16 len)
6105 {
6106 struct mgmt_cp_stop_discovery *mgmt_cp = data;
6107 struct mgmt_pending_cmd *cmd;
6108 int err;
6109
6110 bt_dev_dbg(hdev, "sock %p", sk);
6111
6112 hci_dev_lock(hdev);
6113
6114 if (!hci_discovery_active(hdev)) {
6115 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6116 MGMT_STATUS_REJECTED, &mgmt_cp->type,
6117 sizeof(mgmt_cp->type));
6118 goto unlock;
6119 }
6120
6121 if (hdev->discovery.type != mgmt_cp->type) {
6122 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6123 MGMT_STATUS_INVALID_PARAMS,
6124 &mgmt_cp->type, sizeof(mgmt_cp->type));
6125 goto unlock;
6126 }
6127
6128 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6129 if (!cmd) {
6130 err = -ENOMEM;
6131 goto unlock;
6132 }
6133
6134 err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6135 stop_discovery_complete);
6136 if (err < 0) {
6137 mgmt_pending_remove(cmd);
6138 goto unlock;
6139 }
6140
6141 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6142
6143 unlock:
6144 hci_dev_unlock(hdev);
6145 return err;
6146 }
6147
confirm_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6148 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6149 u16 len)
6150 {
6151 struct mgmt_cp_confirm_name *cp = data;
6152 struct inquiry_entry *e;
6153 int err;
6154
6155 bt_dev_dbg(hdev, "sock %p", sk);
6156
6157 hci_dev_lock(hdev);
6158
6159 if (!hci_discovery_active(hdev)) {
6160 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6161 MGMT_STATUS_FAILED, &cp->addr,
6162 sizeof(cp->addr));
6163 goto failed;
6164 }
6165
6166 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6167 if (!e) {
6168 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6169 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6170 sizeof(cp->addr));
6171 goto failed;
6172 }
6173
6174 if (cp->name_known) {
6175 e->name_state = NAME_KNOWN;
6176 list_del(&e->list);
6177 } else {
6178 e->name_state = NAME_NEEDED;
6179 hci_inquiry_cache_update_resolve(hdev, e);
6180 }
6181
6182 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6183 &cp->addr, sizeof(cp->addr));
6184
6185 failed:
6186 hci_dev_unlock(hdev);
6187 return err;
6188 }
6189
block_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6190 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6191 u16 len)
6192 {
6193 struct mgmt_cp_block_device *cp = data;
6194 u8 status;
6195 int err;
6196
6197 bt_dev_dbg(hdev, "sock %p", sk);
6198
6199 if (!bdaddr_type_is_valid(cp->addr.type))
6200 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6201 MGMT_STATUS_INVALID_PARAMS,
6202 &cp->addr, sizeof(cp->addr));
6203
6204 hci_dev_lock(hdev);
6205
6206 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6207 cp->addr.type);
6208 if (err < 0) {
6209 status = MGMT_STATUS_FAILED;
6210 goto done;
6211 }
6212
6213 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6214 sk);
6215 status = MGMT_STATUS_SUCCESS;
6216
6217 done:
6218 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6219 &cp->addr, sizeof(cp->addr));
6220
6221 hci_dev_unlock(hdev);
6222
6223 return err;
6224 }
6225
unblock_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6226 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6227 u16 len)
6228 {
6229 struct mgmt_cp_unblock_device *cp = data;
6230 u8 status;
6231 int err;
6232
6233 bt_dev_dbg(hdev, "sock %p", sk);
6234
6235 if (!bdaddr_type_is_valid(cp->addr.type))
6236 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6237 MGMT_STATUS_INVALID_PARAMS,
6238 &cp->addr, sizeof(cp->addr));
6239
6240 hci_dev_lock(hdev);
6241
6242 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6243 cp->addr.type);
6244 if (err < 0) {
6245 status = MGMT_STATUS_INVALID_PARAMS;
6246 goto done;
6247 }
6248
6249 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6250 sk);
6251 status = MGMT_STATUS_SUCCESS;
6252
6253 done:
6254 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6255 &cp->addr, sizeof(cp->addr));
6256
6257 hci_dev_unlock(hdev);
6258
6259 return err;
6260 }
6261
set_device_id_sync(struct hci_dev * hdev,void * data)6262 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6263 {
6264 return hci_update_eir_sync(hdev);
6265 }
6266
set_device_id(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6267 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6268 u16 len)
6269 {
6270 struct mgmt_cp_set_device_id *cp = data;
6271 int err;
6272 __u16 source;
6273
6274 bt_dev_dbg(hdev, "sock %p", sk);
6275
6276 source = __le16_to_cpu(cp->source);
6277
6278 if (source > 0x0002)
6279 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6280 MGMT_STATUS_INVALID_PARAMS);
6281
6282 hci_dev_lock(hdev);
6283
6284 hdev->devid_source = source;
6285 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6286 hdev->devid_product = __le16_to_cpu(cp->product);
6287 hdev->devid_version = __le16_to_cpu(cp->version);
6288
6289 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6290 NULL, 0);
6291
6292 hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6293
6294 hci_dev_unlock(hdev);
6295
6296 return err;
6297 }
6298
enable_advertising_instance(struct hci_dev * hdev,int err)6299 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6300 {
6301 if (err)
6302 bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6303 else
6304 bt_dev_dbg(hdev, "status %d", err);
6305 }
6306
set_advertising_complete(struct hci_dev * hdev,void * data,int err)6307 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6308 {
6309 struct cmd_lookup match = { NULL, hdev };
6310 u8 instance;
6311 struct adv_info *adv_instance;
6312 u8 status = mgmt_status(err);
6313
6314 if (status) {
6315 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
6316 cmd_status_rsp, &status);
6317 return;
6318 }
6319
6320 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6321 hci_dev_set_flag(hdev, HCI_ADVERTISING);
6322 else
6323 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6324
6325 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
6326 &match);
6327
6328 new_settings(hdev, match.sk);
6329
6330 if (match.sk)
6331 sock_put(match.sk);
6332
6333 /* If "Set Advertising" was just disabled and instance advertising was
6334 * set up earlier, then re-enable multi-instance advertising.
6335 */
6336 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6337 list_empty(&hdev->adv_instances))
6338 return;
6339
6340 instance = hdev->cur_adv_instance;
6341 if (!instance) {
6342 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6343 struct adv_info, list);
6344 if (!adv_instance)
6345 return;
6346
6347 instance = adv_instance->instance;
6348 }
6349
6350 err = hci_schedule_adv_instance_sync(hdev, instance, true);
6351
6352 enable_advertising_instance(hdev, err);
6353 }
6354
set_adv_sync(struct hci_dev * hdev,void * data)6355 static int set_adv_sync(struct hci_dev *hdev, void *data)
6356 {
6357 struct mgmt_pending_cmd *cmd = data;
6358 struct mgmt_mode *cp = cmd->param;
6359 u8 val = !!cp->val;
6360
6361 if (cp->val == 0x02)
6362 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6363 else
6364 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6365
6366 cancel_adv_timeout(hdev);
6367
6368 if (val) {
6369 /* Switch to instance "0" for the Set Advertising setting.
6370 * We cannot use update_[adv|scan_rsp]_data() here as the
6371 * HCI_ADVERTISING flag is not yet set.
6372 */
6373 hdev->cur_adv_instance = 0x00;
6374
6375 if (ext_adv_capable(hdev)) {
6376 hci_start_ext_adv_sync(hdev, 0x00);
6377 } else {
6378 hci_update_adv_data_sync(hdev, 0x00);
6379 hci_update_scan_rsp_data_sync(hdev, 0x00);
6380 hci_enable_advertising_sync(hdev);
6381 }
6382 } else {
6383 hci_disable_advertising_sync(hdev);
6384 }
6385
6386 return 0;
6387 }
6388
set_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6389 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6390 u16 len)
6391 {
6392 struct mgmt_mode *cp = data;
6393 struct mgmt_pending_cmd *cmd;
6394 u8 val, status;
6395 int err;
6396
6397 bt_dev_dbg(hdev, "sock %p", sk);
6398
6399 status = mgmt_le_support(hdev);
6400 if (status)
6401 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6402 status);
6403
6404 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6405 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6406 MGMT_STATUS_INVALID_PARAMS);
6407
6408 if (hdev->advertising_paused)
6409 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6410 MGMT_STATUS_BUSY);
6411
6412 hci_dev_lock(hdev);
6413
6414 val = !!cp->val;
6415
6416 /* The following conditions are ones which mean that we should
6417 * not do any HCI communication but directly send a mgmt
6418 * response to user space (after toggling the flag if
6419 * necessary).
6420 */
6421 if (!hdev_is_powered(hdev) ||
6422 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6423 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6424 hci_dev_test_flag(hdev, HCI_MESH) ||
6425 hci_conn_num(hdev, LE_LINK) > 0 ||
6426 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6427 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6428 bool changed;
6429
6430 if (cp->val) {
6431 hdev->cur_adv_instance = 0x00;
6432 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6433 if (cp->val == 0x02)
6434 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6435 else
6436 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6437 } else {
6438 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6439 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6440 }
6441
6442 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6443 if (err < 0)
6444 goto unlock;
6445
6446 if (changed)
6447 err = new_settings(hdev, sk);
6448
6449 goto unlock;
6450 }
6451
6452 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6453 pending_find(MGMT_OP_SET_LE, hdev)) {
6454 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6455 MGMT_STATUS_BUSY);
6456 goto unlock;
6457 }
6458
6459 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6460 if (!cmd)
6461 err = -ENOMEM;
6462 else
6463 err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6464 set_advertising_complete);
6465
6466 if (err < 0 && cmd)
6467 mgmt_pending_remove(cmd);
6468
6469 unlock:
6470 hci_dev_unlock(hdev);
6471 return err;
6472 }
6473
set_static_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6474 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6475 void *data, u16 len)
6476 {
6477 struct mgmt_cp_set_static_address *cp = data;
6478 int err;
6479
6480 bt_dev_dbg(hdev, "sock %p", sk);
6481
6482 if (!lmp_le_capable(hdev))
6483 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6484 MGMT_STATUS_NOT_SUPPORTED);
6485
6486 if (hdev_is_powered(hdev))
6487 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6488 MGMT_STATUS_REJECTED);
6489
6490 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6491 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6492 return mgmt_cmd_status(sk, hdev->id,
6493 MGMT_OP_SET_STATIC_ADDRESS,
6494 MGMT_STATUS_INVALID_PARAMS);
6495
6496 /* Two most significant bits shall be set */
6497 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6498 return mgmt_cmd_status(sk, hdev->id,
6499 MGMT_OP_SET_STATIC_ADDRESS,
6500 MGMT_STATUS_INVALID_PARAMS);
6501 }
6502
6503 hci_dev_lock(hdev);
6504
6505 bacpy(&hdev->static_addr, &cp->bdaddr);
6506
6507 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6508 if (err < 0)
6509 goto unlock;
6510
6511 err = new_settings(hdev, sk);
6512
6513 unlock:
6514 hci_dev_unlock(hdev);
6515 return err;
6516 }
6517
set_scan_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6518 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6519 void *data, u16 len)
6520 {
6521 struct mgmt_cp_set_scan_params *cp = data;
6522 __u16 interval, window;
6523 int err;
6524
6525 bt_dev_dbg(hdev, "sock %p", sk);
6526
6527 if (!lmp_le_capable(hdev))
6528 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6529 MGMT_STATUS_NOT_SUPPORTED);
6530
6531 interval = __le16_to_cpu(cp->interval);
6532
6533 if (interval < 0x0004 || interval > 0x4000)
6534 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6535 MGMT_STATUS_INVALID_PARAMS);
6536
6537 window = __le16_to_cpu(cp->window);
6538
6539 if (window < 0x0004 || window > 0x4000)
6540 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6541 MGMT_STATUS_INVALID_PARAMS);
6542
6543 if (window > interval)
6544 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6545 MGMT_STATUS_INVALID_PARAMS);
6546
6547 hci_dev_lock(hdev);
6548
6549 hdev->le_scan_interval = interval;
6550 hdev->le_scan_window = window;
6551
6552 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6553 NULL, 0);
6554
6555 /* If background scan is running, restart it so new parameters are
6556 * loaded.
6557 */
6558 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6559 hdev->discovery.state == DISCOVERY_STOPPED)
6560 hci_update_passive_scan(hdev);
6561
6562 hci_dev_unlock(hdev);
6563
6564 return err;
6565 }
6566
fast_connectable_complete(struct hci_dev * hdev,void * data,int err)6567 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6568 {
6569 struct mgmt_pending_cmd *cmd = data;
6570
6571 bt_dev_dbg(hdev, "err %d", err);
6572
6573 if (err) {
6574 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6575 mgmt_status(err));
6576 } else {
6577 struct mgmt_mode *cp = cmd->param;
6578
6579 if (cp->val)
6580 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6581 else
6582 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6583
6584 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6585 new_settings(hdev, cmd->sk);
6586 }
6587
6588 mgmt_pending_free(cmd);
6589 }
6590
write_fast_connectable_sync(struct hci_dev * hdev,void * data)6591 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6592 {
6593 struct mgmt_pending_cmd *cmd = data;
6594 struct mgmt_mode *cp = cmd->param;
6595
6596 return hci_write_fast_connectable_sync(hdev, cp->val);
6597 }
6598
set_fast_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6599 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6600 void *data, u16 len)
6601 {
6602 struct mgmt_mode *cp = data;
6603 struct mgmt_pending_cmd *cmd;
6604 int err;
6605
6606 bt_dev_dbg(hdev, "sock %p", sk);
6607
6608 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6609 hdev->hci_ver < BLUETOOTH_VER_1_2)
6610 return mgmt_cmd_status(sk, hdev->id,
6611 MGMT_OP_SET_FAST_CONNECTABLE,
6612 MGMT_STATUS_NOT_SUPPORTED);
6613
6614 if (cp->val != 0x00 && cp->val != 0x01)
6615 return mgmt_cmd_status(sk, hdev->id,
6616 MGMT_OP_SET_FAST_CONNECTABLE,
6617 MGMT_STATUS_INVALID_PARAMS);
6618
6619 hci_dev_lock(hdev);
6620
6621 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6622 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6623 goto unlock;
6624 }
6625
6626 if (!hdev_is_powered(hdev)) {
6627 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6628 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6629 new_settings(hdev, sk);
6630 goto unlock;
6631 }
6632
6633 cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6634 len);
6635 if (!cmd)
6636 err = -ENOMEM;
6637 else
6638 err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6639 fast_connectable_complete);
6640
6641 if (err < 0) {
6642 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6643 MGMT_STATUS_FAILED);
6644
6645 if (cmd)
6646 mgmt_pending_free(cmd);
6647 }
6648
6649 unlock:
6650 hci_dev_unlock(hdev);
6651
6652 return err;
6653 }
6654
set_bredr_complete(struct hci_dev * hdev,void * data,int err)6655 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6656 {
6657 struct mgmt_pending_cmd *cmd = data;
6658
6659 bt_dev_dbg(hdev, "err %d", err);
6660
6661 if (err) {
6662 u8 mgmt_err = mgmt_status(err);
6663
6664 /* We need to restore the flag if related HCI commands
6665 * failed.
6666 */
6667 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6668
6669 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6670 } else {
6671 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6672 new_settings(hdev, cmd->sk);
6673 }
6674
6675 mgmt_pending_free(cmd);
6676 }
6677
set_bredr_sync(struct hci_dev * hdev,void * data)6678 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6679 {
6680 int status;
6681
6682 status = hci_write_fast_connectable_sync(hdev, false);
6683
6684 if (!status)
6685 status = hci_update_scan_sync(hdev);
6686
6687 /* Since only the advertising data flags will change, there
6688 * is no need to update the scan response data.
6689 */
6690 if (!status)
6691 status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6692
6693 return status;
6694 }
6695
set_bredr(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6696 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6697 {
6698 struct mgmt_mode *cp = data;
6699 struct mgmt_pending_cmd *cmd;
6700 int err;
6701
6702 bt_dev_dbg(hdev, "sock %p", sk);
6703
6704 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6705 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6706 MGMT_STATUS_NOT_SUPPORTED);
6707
6708 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6709 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6710 MGMT_STATUS_REJECTED);
6711
6712 if (cp->val != 0x00 && cp->val != 0x01)
6713 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6714 MGMT_STATUS_INVALID_PARAMS);
6715
6716 hci_dev_lock(hdev);
6717
6718 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6719 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6720 goto unlock;
6721 }
6722
6723 if (!hdev_is_powered(hdev)) {
6724 if (!cp->val) {
6725 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6726 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6727 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6728 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6729 }
6730
6731 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6732
6733 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6734 if (err < 0)
6735 goto unlock;
6736
6737 err = new_settings(hdev, sk);
6738 goto unlock;
6739 }
6740
6741 /* Reject disabling when powered on */
6742 if (!cp->val) {
6743 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6744 MGMT_STATUS_REJECTED);
6745 goto unlock;
6746 } else {
6747 /* When configuring a dual-mode controller to operate
6748 * with LE only and using a static address, then switching
6749 * BR/EDR back on is not allowed.
6750 *
6751 * Dual-mode controllers shall operate with the public
6752 * address as its identity address for BR/EDR and LE. So
6753 * reject the attempt to create an invalid configuration.
6754 *
6755 * The same restrictions applies when secure connections
6756 * has been enabled. For BR/EDR this is a controller feature
6757 * while for LE it is a host stack feature. This means that
6758 * switching BR/EDR back on when secure connections has been
6759 * enabled is not a supported transaction.
6760 */
6761 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6762 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6763 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6764 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6765 MGMT_STATUS_REJECTED);
6766 goto unlock;
6767 }
6768 }
6769
6770 cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6771 if (!cmd)
6772 err = -ENOMEM;
6773 else
6774 err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6775 set_bredr_complete);
6776
6777 if (err < 0) {
6778 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6779 MGMT_STATUS_FAILED);
6780 if (cmd)
6781 mgmt_pending_free(cmd);
6782
6783 goto unlock;
6784 }
6785
6786 /* We need to flip the bit already here so that
6787 * hci_req_update_adv_data generates the correct flags.
6788 */
6789 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6790
6791 unlock:
6792 hci_dev_unlock(hdev);
6793 return err;
6794 }
6795
set_secure_conn_complete(struct hci_dev * hdev,void * data,int err)6796 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6797 {
6798 struct mgmt_pending_cmd *cmd = data;
6799 struct mgmt_mode *cp;
6800
6801 bt_dev_dbg(hdev, "err %d", err);
6802
6803 if (err) {
6804 u8 mgmt_err = mgmt_status(err);
6805
6806 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6807 goto done;
6808 }
6809
6810 cp = cmd->param;
6811
6812 switch (cp->val) {
6813 case 0x00:
6814 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6815 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6816 break;
6817 case 0x01:
6818 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6819 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6820 break;
6821 case 0x02:
6822 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6823 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6824 break;
6825 }
6826
6827 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6828 new_settings(hdev, cmd->sk);
6829
6830 done:
6831 mgmt_pending_free(cmd);
6832 }
6833
set_secure_conn_sync(struct hci_dev * hdev,void * data)6834 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6835 {
6836 struct mgmt_pending_cmd *cmd = data;
6837 struct mgmt_mode *cp = cmd->param;
6838 u8 val = !!cp->val;
6839
6840 /* Force write of val */
6841 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6842
6843 return hci_write_sc_support_sync(hdev, val);
6844 }
6845
set_secure_conn(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6846 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6847 void *data, u16 len)
6848 {
6849 struct mgmt_mode *cp = data;
6850 struct mgmt_pending_cmd *cmd;
6851 u8 val;
6852 int err;
6853
6854 bt_dev_dbg(hdev, "sock %p", sk);
6855
6856 if (!lmp_sc_capable(hdev) &&
6857 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6858 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6859 MGMT_STATUS_NOT_SUPPORTED);
6860
6861 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6862 lmp_sc_capable(hdev) &&
6863 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6864 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6865 MGMT_STATUS_REJECTED);
6866
6867 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6868 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6869 MGMT_STATUS_INVALID_PARAMS);
6870
6871 hci_dev_lock(hdev);
6872
6873 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6874 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6875 bool changed;
6876
6877 if (cp->val) {
6878 changed = !hci_dev_test_and_set_flag(hdev,
6879 HCI_SC_ENABLED);
6880 if (cp->val == 0x02)
6881 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6882 else
6883 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6884 } else {
6885 changed = hci_dev_test_and_clear_flag(hdev,
6886 HCI_SC_ENABLED);
6887 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6888 }
6889
6890 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6891 if (err < 0)
6892 goto failed;
6893
6894 if (changed)
6895 err = new_settings(hdev, sk);
6896
6897 goto failed;
6898 }
6899
6900 val = !!cp->val;
6901
6902 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6903 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6904 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6905 goto failed;
6906 }
6907
6908 cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6909 if (!cmd)
6910 err = -ENOMEM;
6911 else
6912 err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6913 set_secure_conn_complete);
6914
6915 if (err < 0) {
6916 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6917 MGMT_STATUS_FAILED);
6918 if (cmd)
6919 mgmt_pending_free(cmd);
6920 }
6921
6922 failed:
6923 hci_dev_unlock(hdev);
6924 return err;
6925 }
6926
set_debug_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6927 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6928 void *data, u16 len)
6929 {
6930 struct mgmt_mode *cp = data;
6931 bool changed, use_changed;
6932 int err;
6933
6934 bt_dev_dbg(hdev, "sock %p", sk);
6935
6936 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6937 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6938 MGMT_STATUS_INVALID_PARAMS);
6939
6940 hci_dev_lock(hdev);
6941
6942 if (cp->val)
6943 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6944 else
6945 changed = hci_dev_test_and_clear_flag(hdev,
6946 HCI_KEEP_DEBUG_KEYS);
6947
6948 if (cp->val == 0x02)
6949 use_changed = !hci_dev_test_and_set_flag(hdev,
6950 HCI_USE_DEBUG_KEYS);
6951 else
6952 use_changed = hci_dev_test_and_clear_flag(hdev,
6953 HCI_USE_DEBUG_KEYS);
6954
6955 if (hdev_is_powered(hdev) && use_changed &&
6956 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6957 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6958 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6959 sizeof(mode), &mode);
6960 }
6961
6962 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6963 if (err < 0)
6964 goto unlock;
6965
6966 if (changed)
6967 err = new_settings(hdev, sk);
6968
6969 unlock:
6970 hci_dev_unlock(hdev);
6971 return err;
6972 }
6973
set_privacy(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)6974 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6975 u16 len)
6976 {
6977 struct mgmt_cp_set_privacy *cp = cp_data;
6978 bool changed;
6979 int err;
6980
6981 bt_dev_dbg(hdev, "sock %p", sk);
6982
6983 if (!lmp_le_capable(hdev))
6984 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6985 MGMT_STATUS_NOT_SUPPORTED);
6986
6987 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6988 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6989 MGMT_STATUS_INVALID_PARAMS);
6990
6991 if (hdev_is_powered(hdev))
6992 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6993 MGMT_STATUS_REJECTED);
6994
6995 hci_dev_lock(hdev);
6996
6997 /* If user space supports this command it is also expected to
6998 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
6999 */
7000 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7001
7002 if (cp->privacy) {
7003 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
7004 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
7005 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
7006 hci_adv_instances_set_rpa_expired(hdev, true);
7007 if (cp->privacy == 0x02)
7008 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
7009 else
7010 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7011 } else {
7012 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
7013 memset(hdev->irk, 0, sizeof(hdev->irk));
7014 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
7015 hci_adv_instances_set_rpa_expired(hdev, false);
7016 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7017 }
7018
7019 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
7020 if (err < 0)
7021 goto unlock;
7022
7023 if (changed)
7024 err = new_settings(hdev, sk);
7025
7026 unlock:
7027 hci_dev_unlock(hdev);
7028 return err;
7029 }
7030
irk_is_valid(struct mgmt_irk_info * irk)7031 static bool irk_is_valid(struct mgmt_irk_info *irk)
7032 {
7033 switch (irk->addr.type) {
7034 case BDADDR_LE_PUBLIC:
7035 return true;
7036
7037 case BDADDR_LE_RANDOM:
7038 /* Two most significant bits shall be set */
7039 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7040 return false;
7041 return true;
7042 }
7043
7044 return false;
7045 }
7046
load_irks(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)7047 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7048 u16 len)
7049 {
7050 struct mgmt_cp_load_irks *cp = cp_data;
7051 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
7052 sizeof(struct mgmt_irk_info));
7053 u16 irk_count, expected_len;
7054 int i, err;
7055
7056 bt_dev_dbg(hdev, "sock %p", sk);
7057
7058 if (!lmp_le_capable(hdev))
7059 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7060 MGMT_STATUS_NOT_SUPPORTED);
7061
7062 irk_count = __le16_to_cpu(cp->irk_count);
7063 if (irk_count > max_irk_count) {
7064 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
7065 irk_count);
7066 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7067 MGMT_STATUS_INVALID_PARAMS);
7068 }
7069
7070 expected_len = struct_size(cp, irks, irk_count);
7071 if (expected_len != len) {
7072 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7073 expected_len, len);
7074 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7075 MGMT_STATUS_INVALID_PARAMS);
7076 }
7077
7078 bt_dev_dbg(hdev, "irk_count %u", irk_count);
7079
7080 for (i = 0; i < irk_count; i++) {
7081 struct mgmt_irk_info *key = &cp->irks[i];
7082
7083 if (!irk_is_valid(key))
7084 return mgmt_cmd_status(sk, hdev->id,
7085 MGMT_OP_LOAD_IRKS,
7086 MGMT_STATUS_INVALID_PARAMS);
7087 }
7088
7089 hci_dev_lock(hdev);
7090
7091 hci_smp_irks_clear(hdev);
7092
7093 for (i = 0; i < irk_count; i++) {
7094 struct mgmt_irk_info *irk = &cp->irks[i];
7095
7096 if (hci_is_blocked_key(hdev,
7097 HCI_BLOCKED_KEY_TYPE_IRK,
7098 irk->val)) {
7099 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7100 &irk->addr.bdaddr);
7101 continue;
7102 }
7103
7104 hci_add_irk(hdev, &irk->addr.bdaddr,
7105 le_addr_type(irk->addr.type), irk->val,
7106 BDADDR_ANY);
7107 }
7108
7109 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7110
7111 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7112
7113 hci_dev_unlock(hdev);
7114
7115 return err;
7116 }
7117
ltk_is_valid(struct mgmt_ltk_info * key)7118 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7119 {
7120 if (key->initiator != 0x00 && key->initiator != 0x01)
7121 return false;
7122
7123 switch (key->addr.type) {
7124 case BDADDR_LE_PUBLIC:
7125 return true;
7126
7127 case BDADDR_LE_RANDOM:
7128 /* Two most significant bits shall be set */
7129 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7130 return false;
7131 return true;
7132 }
7133
7134 return false;
7135 }
7136
load_long_term_keys(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)7137 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7138 void *cp_data, u16 len)
7139 {
7140 struct mgmt_cp_load_long_term_keys *cp = cp_data;
7141 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7142 sizeof(struct mgmt_ltk_info));
7143 u16 key_count, expected_len;
7144 int i, err;
7145
7146 bt_dev_dbg(hdev, "sock %p", sk);
7147
7148 if (!lmp_le_capable(hdev))
7149 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7150 MGMT_STATUS_NOT_SUPPORTED);
7151
7152 key_count = __le16_to_cpu(cp->key_count);
7153 if (key_count > max_key_count) {
7154 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7155 key_count);
7156 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7157 MGMT_STATUS_INVALID_PARAMS);
7158 }
7159
7160 expected_len = struct_size(cp, keys, key_count);
7161 if (expected_len != len) {
7162 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7163 expected_len, len);
7164 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7165 MGMT_STATUS_INVALID_PARAMS);
7166 }
7167
7168 bt_dev_dbg(hdev, "key_count %u", key_count);
7169
7170 hci_dev_lock(hdev);
7171
7172 hci_smp_ltks_clear(hdev);
7173
7174 for (i = 0; i < key_count; i++) {
7175 struct mgmt_ltk_info *key = &cp->keys[i];
7176 u8 type, authenticated;
7177
7178 if (hci_is_blocked_key(hdev,
7179 HCI_BLOCKED_KEY_TYPE_LTK,
7180 key->val)) {
7181 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7182 &key->addr.bdaddr);
7183 continue;
7184 }
7185
7186 if (!ltk_is_valid(key)) {
7187 bt_dev_warn(hdev, "Invalid LTK for %pMR",
7188 &key->addr.bdaddr);
7189 continue;
7190 }
7191
7192 switch (key->type) {
7193 case MGMT_LTK_UNAUTHENTICATED:
7194 authenticated = 0x00;
7195 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7196 break;
7197 case MGMT_LTK_AUTHENTICATED:
7198 authenticated = 0x01;
7199 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7200 break;
7201 case MGMT_LTK_P256_UNAUTH:
7202 authenticated = 0x00;
7203 type = SMP_LTK_P256;
7204 break;
7205 case MGMT_LTK_P256_AUTH:
7206 authenticated = 0x01;
7207 type = SMP_LTK_P256;
7208 break;
7209 case MGMT_LTK_P256_DEBUG:
7210 authenticated = 0x00;
7211 type = SMP_LTK_P256_DEBUG;
7212 fallthrough;
7213 default:
7214 continue;
7215 }
7216
7217 hci_add_ltk(hdev, &key->addr.bdaddr,
7218 le_addr_type(key->addr.type), type, authenticated,
7219 key->val, key->enc_size, key->ediv, key->rand);
7220 }
7221
7222 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7223 NULL, 0);
7224
7225 hci_dev_unlock(hdev);
7226
7227 return err;
7228 }
7229
get_conn_info_complete(struct hci_dev * hdev,void * data,int err)7230 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
7231 {
7232 struct mgmt_pending_cmd *cmd = data;
7233 struct hci_conn *conn = cmd->user_data;
7234 struct mgmt_cp_get_conn_info *cp = cmd->param;
7235 struct mgmt_rp_get_conn_info rp;
7236 u8 status;
7237
7238 bt_dev_dbg(hdev, "err %d", err);
7239
7240 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
7241
7242 status = mgmt_status(err);
7243 if (status == MGMT_STATUS_SUCCESS) {
7244 rp.rssi = conn->rssi;
7245 rp.tx_power = conn->tx_power;
7246 rp.max_tx_power = conn->max_tx_power;
7247 } else {
7248 rp.rssi = HCI_RSSI_INVALID;
7249 rp.tx_power = HCI_TX_POWER_INVALID;
7250 rp.max_tx_power = HCI_TX_POWER_INVALID;
7251 }
7252
7253 mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
7254 &rp, sizeof(rp));
7255
7256 mgmt_pending_free(cmd);
7257 }
7258
get_conn_info_sync(struct hci_dev * hdev,void * data)7259 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
7260 {
7261 struct mgmt_pending_cmd *cmd = data;
7262 struct mgmt_cp_get_conn_info *cp = cmd->param;
7263 struct hci_conn *conn;
7264 int err;
7265 __le16 handle;
7266
7267 /* Make sure we are still connected */
7268 if (cp->addr.type == BDADDR_BREDR)
7269 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7270 &cp->addr.bdaddr);
7271 else
7272 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7273
7274 if (!conn || conn->state != BT_CONNECTED)
7275 return MGMT_STATUS_NOT_CONNECTED;
7276
7277 cmd->user_data = conn;
7278 handle = cpu_to_le16(conn->handle);
7279
7280 /* Refresh RSSI each time */
7281 err = hci_read_rssi_sync(hdev, handle);
7282
7283 /* For LE links TX power does not change thus we don't need to
7284 * query for it once value is known.
7285 */
7286 if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
7287 conn->tx_power == HCI_TX_POWER_INVALID))
7288 err = hci_read_tx_power_sync(hdev, handle, 0x00);
7289
7290 /* Max TX power needs to be read only once per connection */
7291 if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
7292 err = hci_read_tx_power_sync(hdev, handle, 0x01);
7293
7294 return err;
7295 }
7296
get_conn_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7297 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7298 u16 len)
7299 {
7300 struct mgmt_cp_get_conn_info *cp = data;
7301 struct mgmt_rp_get_conn_info rp;
7302 struct hci_conn *conn;
7303 unsigned long conn_info_age;
7304 int err = 0;
7305
7306 bt_dev_dbg(hdev, "sock %p", sk);
7307
7308 memset(&rp, 0, sizeof(rp));
7309 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7310 rp.addr.type = cp->addr.type;
7311
7312 if (!bdaddr_type_is_valid(cp->addr.type))
7313 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7314 MGMT_STATUS_INVALID_PARAMS,
7315 &rp, sizeof(rp));
7316
7317 hci_dev_lock(hdev);
7318
7319 if (!hdev_is_powered(hdev)) {
7320 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7321 MGMT_STATUS_NOT_POWERED, &rp,
7322 sizeof(rp));
7323 goto unlock;
7324 }
7325
7326 if (cp->addr.type == BDADDR_BREDR)
7327 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7328 &cp->addr.bdaddr);
7329 else
7330 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7331
7332 if (!conn || conn->state != BT_CONNECTED) {
7333 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7334 MGMT_STATUS_NOT_CONNECTED, &rp,
7335 sizeof(rp));
7336 goto unlock;
7337 }
7338
7339 /* To avoid client trying to guess when to poll again for information we
7340 * calculate conn info age as random value between min/max set in hdev.
7341 */
7342 conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age,
7343 hdev->conn_info_max_age - 1);
7344
7345 /* Query controller to refresh cached values if they are too old or were
7346 * never read.
7347 */
7348 if (time_after(jiffies, conn->conn_info_timestamp +
7349 msecs_to_jiffies(conn_info_age)) ||
7350 !conn->conn_info_timestamp) {
7351 struct mgmt_pending_cmd *cmd;
7352
7353 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
7354 len);
7355 if (!cmd) {
7356 err = -ENOMEM;
7357 } else {
7358 err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
7359 cmd, get_conn_info_complete);
7360 }
7361
7362 if (err < 0) {
7363 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7364 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7365
7366 if (cmd)
7367 mgmt_pending_free(cmd);
7368
7369 goto unlock;
7370 }
7371
7372 conn->conn_info_timestamp = jiffies;
7373 } else {
7374 /* Cache is valid, just reply with values cached in hci_conn */
7375 rp.rssi = conn->rssi;
7376 rp.tx_power = conn->tx_power;
7377 rp.max_tx_power = conn->max_tx_power;
7378
7379 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7380 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7381 }
7382
7383 unlock:
7384 hci_dev_unlock(hdev);
7385 return err;
7386 }
7387
get_clock_info_complete(struct hci_dev * hdev,void * data,int err)7388 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
7389 {
7390 struct mgmt_pending_cmd *cmd = data;
7391 struct mgmt_cp_get_clock_info *cp = cmd->param;
7392 struct mgmt_rp_get_clock_info rp;
7393 struct hci_conn *conn = cmd->user_data;
7394 u8 status = mgmt_status(err);
7395
7396 bt_dev_dbg(hdev, "err %d", err);
7397
7398 memset(&rp, 0, sizeof(rp));
7399 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7400 rp.addr.type = cp->addr.type;
7401
7402 if (err)
7403 goto complete;
7404
7405 rp.local_clock = cpu_to_le32(hdev->clock);
7406
7407 if (conn) {
7408 rp.piconet_clock = cpu_to_le32(conn->clock);
7409 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7410 }
7411
7412 complete:
7413 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
7414 sizeof(rp));
7415
7416 mgmt_pending_free(cmd);
7417 }
7418
get_clock_info_sync(struct hci_dev * hdev,void * data)7419 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
7420 {
7421 struct mgmt_pending_cmd *cmd = data;
7422 struct mgmt_cp_get_clock_info *cp = cmd->param;
7423 struct hci_cp_read_clock hci_cp;
7424 struct hci_conn *conn;
7425
7426 memset(&hci_cp, 0, sizeof(hci_cp));
7427 hci_read_clock_sync(hdev, &hci_cp);
7428
7429 /* Make sure connection still exists */
7430 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
7431 if (!conn || conn->state != BT_CONNECTED)
7432 return MGMT_STATUS_NOT_CONNECTED;
7433
7434 cmd->user_data = conn;
7435 hci_cp.handle = cpu_to_le16(conn->handle);
7436 hci_cp.which = 0x01; /* Piconet clock */
7437
7438 return hci_read_clock_sync(hdev, &hci_cp);
7439 }
7440
get_clock_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7441 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7442 u16 len)
7443 {
7444 struct mgmt_cp_get_clock_info *cp = data;
7445 struct mgmt_rp_get_clock_info rp;
7446 struct mgmt_pending_cmd *cmd;
7447 struct hci_conn *conn;
7448 int err;
7449
7450 bt_dev_dbg(hdev, "sock %p", sk);
7451
7452 memset(&rp, 0, sizeof(rp));
7453 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7454 rp.addr.type = cp->addr.type;
7455
7456 if (cp->addr.type != BDADDR_BREDR)
7457 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7458 MGMT_STATUS_INVALID_PARAMS,
7459 &rp, sizeof(rp));
7460
7461 hci_dev_lock(hdev);
7462
7463 if (!hdev_is_powered(hdev)) {
7464 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7465 MGMT_STATUS_NOT_POWERED, &rp,
7466 sizeof(rp));
7467 goto unlock;
7468 }
7469
7470 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7471 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7472 &cp->addr.bdaddr);
7473 if (!conn || conn->state != BT_CONNECTED) {
7474 err = mgmt_cmd_complete(sk, hdev->id,
7475 MGMT_OP_GET_CLOCK_INFO,
7476 MGMT_STATUS_NOT_CONNECTED,
7477 &rp, sizeof(rp));
7478 goto unlock;
7479 }
7480 } else {
7481 conn = NULL;
7482 }
7483
7484 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7485 if (!cmd)
7486 err = -ENOMEM;
7487 else
7488 err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7489 get_clock_info_complete);
7490
7491 if (err < 0) {
7492 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7493 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7494
7495 if (cmd)
7496 mgmt_pending_free(cmd);
7497 }
7498
7499
7500 unlock:
7501 hci_dev_unlock(hdev);
7502 return err;
7503 }
7504
is_connected(struct hci_dev * hdev,bdaddr_t * addr,u8 type)7505 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7506 {
7507 struct hci_conn *conn;
7508
7509 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7510 if (!conn)
7511 return false;
7512
7513 if (conn->dst_type != type)
7514 return false;
7515
7516 if (conn->state != BT_CONNECTED)
7517 return false;
7518
7519 return true;
7520 }
7521
7522 /* This function requires the caller holds hdev->lock */
hci_conn_params_set(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type,u8 auto_connect)7523 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7524 u8 addr_type, u8 auto_connect)
7525 {
7526 struct hci_conn_params *params;
7527
7528 params = hci_conn_params_add(hdev, addr, addr_type);
7529 if (!params)
7530 return -EIO;
7531
7532 if (params->auto_connect == auto_connect)
7533 return 0;
7534
7535 hci_pend_le_list_del_init(params);
7536
7537 switch (auto_connect) {
7538 case HCI_AUTO_CONN_DISABLED:
7539 case HCI_AUTO_CONN_LINK_LOSS:
7540 /* If auto connect is being disabled when we're trying to
7541 * connect to device, keep connecting.
7542 */
7543 if (params->explicit_connect)
7544 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7545 break;
7546 case HCI_AUTO_CONN_REPORT:
7547 if (params->explicit_connect)
7548 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7549 else
7550 hci_pend_le_list_add(params, &hdev->pend_le_reports);
7551 break;
7552 case HCI_AUTO_CONN_DIRECT:
7553 case HCI_AUTO_CONN_ALWAYS:
7554 if (!is_connected(hdev, addr, addr_type))
7555 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7556 break;
7557 }
7558
7559 params->auto_connect = auto_connect;
7560
7561 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7562 addr, addr_type, auto_connect);
7563
7564 return 0;
7565 }
7566
device_added(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type,u8 action)7567 static void device_added(struct sock *sk, struct hci_dev *hdev,
7568 bdaddr_t *bdaddr, u8 type, u8 action)
7569 {
7570 struct mgmt_ev_device_added ev;
7571
7572 bacpy(&ev.addr.bdaddr, bdaddr);
7573 ev.addr.type = type;
7574 ev.action = action;
7575
7576 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7577 }
7578
add_device_sync(struct hci_dev * hdev,void * data)7579 static int add_device_sync(struct hci_dev *hdev, void *data)
7580 {
7581 return hci_update_passive_scan_sync(hdev);
7582 }
7583
add_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7584 static int add_device(struct sock *sk, struct hci_dev *hdev,
7585 void *data, u16 len)
7586 {
7587 struct mgmt_cp_add_device *cp = data;
7588 u8 auto_conn, addr_type;
7589 struct hci_conn_params *params;
7590 int err;
7591 u32 current_flags = 0;
7592 u32 supported_flags;
7593
7594 bt_dev_dbg(hdev, "sock %p", sk);
7595
7596 if (!bdaddr_type_is_valid(cp->addr.type) ||
7597 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7598 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7599 MGMT_STATUS_INVALID_PARAMS,
7600 &cp->addr, sizeof(cp->addr));
7601
7602 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7603 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7604 MGMT_STATUS_INVALID_PARAMS,
7605 &cp->addr, sizeof(cp->addr));
7606
7607 hci_dev_lock(hdev);
7608
7609 if (cp->addr.type == BDADDR_BREDR) {
7610 /* Only incoming connections action is supported for now */
7611 if (cp->action != 0x01) {
7612 err = mgmt_cmd_complete(sk, hdev->id,
7613 MGMT_OP_ADD_DEVICE,
7614 MGMT_STATUS_INVALID_PARAMS,
7615 &cp->addr, sizeof(cp->addr));
7616 goto unlock;
7617 }
7618
7619 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7620 &cp->addr.bdaddr,
7621 cp->addr.type, 0);
7622 if (err)
7623 goto unlock;
7624
7625 hci_update_scan(hdev);
7626
7627 goto added;
7628 }
7629
7630 addr_type = le_addr_type(cp->addr.type);
7631
7632 if (cp->action == 0x02)
7633 auto_conn = HCI_AUTO_CONN_ALWAYS;
7634 else if (cp->action == 0x01)
7635 auto_conn = HCI_AUTO_CONN_DIRECT;
7636 else
7637 auto_conn = HCI_AUTO_CONN_REPORT;
7638
7639 /* Kernel internally uses conn_params with resolvable private
7640 * address, but Add Device allows only identity addresses.
7641 * Make sure it is enforced before calling
7642 * hci_conn_params_lookup.
7643 */
7644 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7645 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7646 MGMT_STATUS_INVALID_PARAMS,
7647 &cp->addr, sizeof(cp->addr));
7648 goto unlock;
7649 }
7650
7651 /* If the connection parameters don't exist for this device,
7652 * they will be created and configured with defaults.
7653 */
7654 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7655 auto_conn) < 0) {
7656 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7657 MGMT_STATUS_FAILED, &cp->addr,
7658 sizeof(cp->addr));
7659 goto unlock;
7660 } else {
7661 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7662 addr_type);
7663 if (params)
7664 current_flags = params->flags;
7665 }
7666
7667 err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
7668 if (err < 0)
7669 goto unlock;
7670
7671 added:
7672 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7673 supported_flags = hdev->conn_flags;
7674 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7675 supported_flags, current_flags);
7676
7677 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7678 MGMT_STATUS_SUCCESS, &cp->addr,
7679 sizeof(cp->addr));
7680
7681 unlock:
7682 hci_dev_unlock(hdev);
7683 return err;
7684 }
7685
device_removed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)7686 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7687 bdaddr_t *bdaddr, u8 type)
7688 {
7689 struct mgmt_ev_device_removed ev;
7690
7691 bacpy(&ev.addr.bdaddr, bdaddr);
7692 ev.addr.type = type;
7693
7694 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7695 }
7696
remove_device_sync(struct hci_dev * hdev,void * data)7697 static int remove_device_sync(struct hci_dev *hdev, void *data)
7698 {
7699 return hci_update_passive_scan_sync(hdev);
7700 }
7701
remove_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7702 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7703 void *data, u16 len)
7704 {
7705 struct mgmt_cp_remove_device *cp = data;
7706 int err;
7707
7708 bt_dev_dbg(hdev, "sock %p", sk);
7709
7710 hci_dev_lock(hdev);
7711
7712 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7713 struct hci_conn_params *params;
7714 u8 addr_type;
7715
7716 if (!bdaddr_type_is_valid(cp->addr.type)) {
7717 err = mgmt_cmd_complete(sk, hdev->id,
7718 MGMT_OP_REMOVE_DEVICE,
7719 MGMT_STATUS_INVALID_PARAMS,
7720 &cp->addr, sizeof(cp->addr));
7721 goto unlock;
7722 }
7723
7724 if (cp->addr.type == BDADDR_BREDR) {
7725 err = hci_bdaddr_list_del(&hdev->accept_list,
7726 &cp->addr.bdaddr,
7727 cp->addr.type);
7728 if (err) {
7729 err = mgmt_cmd_complete(sk, hdev->id,
7730 MGMT_OP_REMOVE_DEVICE,
7731 MGMT_STATUS_INVALID_PARAMS,
7732 &cp->addr,
7733 sizeof(cp->addr));
7734 goto unlock;
7735 }
7736
7737 hci_update_scan(hdev);
7738
7739 device_removed(sk, hdev, &cp->addr.bdaddr,
7740 cp->addr.type);
7741 goto complete;
7742 }
7743
7744 addr_type = le_addr_type(cp->addr.type);
7745
7746 /* Kernel internally uses conn_params with resolvable private
7747 * address, but Remove Device allows only identity addresses.
7748 * Make sure it is enforced before calling
7749 * hci_conn_params_lookup.
7750 */
7751 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7752 err = mgmt_cmd_complete(sk, hdev->id,
7753 MGMT_OP_REMOVE_DEVICE,
7754 MGMT_STATUS_INVALID_PARAMS,
7755 &cp->addr, sizeof(cp->addr));
7756 goto unlock;
7757 }
7758
7759 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7760 addr_type);
7761 if (!params) {
7762 err = mgmt_cmd_complete(sk, hdev->id,
7763 MGMT_OP_REMOVE_DEVICE,
7764 MGMT_STATUS_INVALID_PARAMS,
7765 &cp->addr, sizeof(cp->addr));
7766 goto unlock;
7767 }
7768
7769 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7770 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7771 err = mgmt_cmd_complete(sk, hdev->id,
7772 MGMT_OP_REMOVE_DEVICE,
7773 MGMT_STATUS_INVALID_PARAMS,
7774 &cp->addr, sizeof(cp->addr));
7775 goto unlock;
7776 }
7777
7778 hci_conn_params_free(params);
7779
7780 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7781 } else {
7782 struct hci_conn_params *p, *tmp;
7783 struct bdaddr_list *b, *btmp;
7784
7785 if (cp->addr.type) {
7786 err = mgmt_cmd_complete(sk, hdev->id,
7787 MGMT_OP_REMOVE_DEVICE,
7788 MGMT_STATUS_INVALID_PARAMS,
7789 &cp->addr, sizeof(cp->addr));
7790 goto unlock;
7791 }
7792
7793 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7794 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7795 list_del(&b->list);
7796 kfree(b);
7797 }
7798
7799 hci_update_scan(hdev);
7800
7801 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7802 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7803 continue;
7804 device_removed(sk, hdev, &p->addr, p->addr_type);
7805 if (p->explicit_connect) {
7806 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7807 continue;
7808 }
7809 hci_conn_params_free(p);
7810 }
7811
7812 bt_dev_dbg(hdev, "All LE connection parameters were removed");
7813 }
7814
7815 hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7816
7817 complete:
7818 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7819 MGMT_STATUS_SUCCESS, &cp->addr,
7820 sizeof(cp->addr));
7821 unlock:
7822 hci_dev_unlock(hdev);
7823 return err;
7824 }
7825
conn_update_sync(struct hci_dev * hdev,void * data)7826 static int conn_update_sync(struct hci_dev *hdev, void *data)
7827 {
7828 struct hci_conn_params *params = data;
7829 struct hci_conn *conn;
7830
7831 conn = hci_conn_hash_lookup_le(hdev, ¶ms->addr, params->addr_type);
7832 if (!conn)
7833 return -ECANCELED;
7834
7835 return hci_le_conn_update_sync(hdev, conn, params);
7836 }
7837
load_conn_param(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7838 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7839 u16 len)
7840 {
7841 struct mgmt_cp_load_conn_param *cp = data;
7842 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7843 sizeof(struct mgmt_conn_param));
7844 u16 param_count, expected_len;
7845 int i;
7846
7847 if (!lmp_le_capable(hdev))
7848 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7849 MGMT_STATUS_NOT_SUPPORTED);
7850
7851 param_count = __le16_to_cpu(cp->param_count);
7852 if (param_count > max_param_count) {
7853 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7854 param_count);
7855 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7856 MGMT_STATUS_INVALID_PARAMS);
7857 }
7858
7859 expected_len = struct_size(cp, params, param_count);
7860 if (expected_len != len) {
7861 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7862 expected_len, len);
7863 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7864 MGMT_STATUS_INVALID_PARAMS);
7865 }
7866
7867 bt_dev_dbg(hdev, "param_count %u", param_count);
7868
7869 hci_dev_lock(hdev);
7870
7871 if (param_count > 1)
7872 hci_conn_params_clear_disabled(hdev);
7873
7874 for (i = 0; i < param_count; i++) {
7875 struct mgmt_conn_param *param = &cp->params[i];
7876 struct hci_conn_params *hci_param;
7877 u16 min, max, latency, timeout;
7878 bool update = false;
7879 u8 addr_type;
7880
7881 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
7882 param->addr.type);
7883
7884 if (param->addr.type == BDADDR_LE_PUBLIC) {
7885 addr_type = ADDR_LE_DEV_PUBLIC;
7886 } else if (param->addr.type == BDADDR_LE_RANDOM) {
7887 addr_type = ADDR_LE_DEV_RANDOM;
7888 } else {
7889 bt_dev_err(hdev, "ignoring invalid connection parameters");
7890 continue;
7891 }
7892
7893 min = le16_to_cpu(param->min_interval);
7894 max = le16_to_cpu(param->max_interval);
7895 latency = le16_to_cpu(param->latency);
7896 timeout = le16_to_cpu(param->timeout);
7897
7898 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7899 min, max, latency, timeout);
7900
7901 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7902 bt_dev_err(hdev, "ignoring invalid connection parameters");
7903 continue;
7904 }
7905
7906 /* Detect when the loading is for an existing parameter then
7907 * attempt to trigger the connection update procedure.
7908 */
7909 if (!i && param_count == 1) {
7910 hci_param = hci_conn_params_lookup(hdev,
7911 ¶m->addr.bdaddr,
7912 addr_type);
7913 if (hci_param)
7914 update = true;
7915 else
7916 hci_conn_params_clear_disabled(hdev);
7917 }
7918
7919 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
7920 addr_type);
7921 if (!hci_param) {
7922 bt_dev_err(hdev, "failed to add connection parameters");
7923 continue;
7924 }
7925
7926 hci_param->conn_min_interval = min;
7927 hci_param->conn_max_interval = max;
7928 hci_param->conn_latency = latency;
7929 hci_param->supervision_timeout = timeout;
7930
7931 /* Check if we need to trigger a connection update */
7932 if (update) {
7933 struct hci_conn *conn;
7934
7935 /* Lookup for existing connection as central and check
7936 * if parameters match and if they don't then trigger
7937 * a connection update.
7938 */
7939 conn = hci_conn_hash_lookup_le(hdev, &hci_param->addr,
7940 addr_type);
7941 if (conn && conn->role == HCI_ROLE_MASTER &&
7942 (conn->le_conn_min_interval != min ||
7943 conn->le_conn_max_interval != max ||
7944 conn->le_conn_latency != latency ||
7945 conn->le_supv_timeout != timeout))
7946 hci_cmd_sync_queue(hdev, conn_update_sync,
7947 hci_param, NULL);
7948 }
7949 }
7950
7951 hci_dev_unlock(hdev);
7952
7953 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7954 NULL, 0);
7955 }
7956
set_external_config(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7957 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7958 void *data, u16 len)
7959 {
7960 struct mgmt_cp_set_external_config *cp = data;
7961 bool changed;
7962 int err;
7963
7964 bt_dev_dbg(hdev, "sock %p", sk);
7965
7966 if (hdev_is_powered(hdev))
7967 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7968 MGMT_STATUS_REJECTED);
7969
7970 if (cp->config != 0x00 && cp->config != 0x01)
7971 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7972 MGMT_STATUS_INVALID_PARAMS);
7973
7974 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7975 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7976 MGMT_STATUS_NOT_SUPPORTED);
7977
7978 hci_dev_lock(hdev);
7979
7980 if (cp->config)
7981 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7982 else
7983 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7984
7985 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7986 if (err < 0)
7987 goto unlock;
7988
7989 if (!changed)
7990 goto unlock;
7991
7992 err = new_options(hdev, sk);
7993
7994 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7995 mgmt_index_removed(hdev);
7996
7997 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7998 hci_dev_set_flag(hdev, HCI_CONFIG);
7999 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8000
8001 queue_work(hdev->req_workqueue, &hdev->power_on);
8002 } else {
8003 set_bit(HCI_RAW, &hdev->flags);
8004 mgmt_index_added(hdev);
8005 }
8006 }
8007
8008 unlock:
8009 hci_dev_unlock(hdev);
8010 return err;
8011 }
8012
set_public_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)8013 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
8014 void *data, u16 len)
8015 {
8016 struct mgmt_cp_set_public_address *cp = data;
8017 bool changed;
8018 int err;
8019
8020 bt_dev_dbg(hdev, "sock %p", sk);
8021
8022 if (hdev_is_powered(hdev))
8023 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8024 MGMT_STATUS_REJECTED);
8025
8026 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
8027 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8028 MGMT_STATUS_INVALID_PARAMS);
8029
8030 if (!hdev->set_bdaddr)
8031 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8032 MGMT_STATUS_NOT_SUPPORTED);
8033
8034 hci_dev_lock(hdev);
8035
8036 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
8037 bacpy(&hdev->public_addr, &cp->bdaddr);
8038
8039 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
8040 if (err < 0)
8041 goto unlock;
8042
8043 if (!changed)
8044 goto unlock;
8045
8046 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
8047 err = new_options(hdev, sk);
8048
8049 if (is_configured(hdev)) {
8050 mgmt_index_removed(hdev);
8051
8052 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
8053
8054 hci_dev_set_flag(hdev, HCI_CONFIG);
8055 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8056
8057 queue_work(hdev->req_workqueue, &hdev->power_on);
8058 }
8059
8060 unlock:
8061 hci_dev_unlock(hdev);
8062 return err;
8063 }
8064
read_local_oob_ext_data_complete(struct hci_dev * hdev,void * data,int err)8065 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
8066 int err)
8067 {
8068 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8069 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8070 u8 *h192, *r192, *h256, *r256;
8071 struct mgmt_pending_cmd *cmd = data;
8072 struct sk_buff *skb = cmd->skb;
8073 u8 status = mgmt_status(err);
8074 u16 eir_len;
8075
8076 if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
8077 return;
8078
8079 if (!status) {
8080 if (!skb)
8081 status = MGMT_STATUS_FAILED;
8082 else if (IS_ERR(skb))
8083 status = mgmt_status(PTR_ERR(skb));
8084 else
8085 status = mgmt_status(skb->data[0]);
8086 }
8087
8088 bt_dev_dbg(hdev, "status %u", status);
8089
8090 mgmt_cp = cmd->param;
8091
8092 if (status) {
8093 status = mgmt_status(status);
8094 eir_len = 0;
8095
8096 h192 = NULL;
8097 r192 = NULL;
8098 h256 = NULL;
8099 r256 = NULL;
8100 } else if (!bredr_sc_enabled(hdev)) {
8101 struct hci_rp_read_local_oob_data *rp;
8102
8103 if (skb->len != sizeof(*rp)) {
8104 status = MGMT_STATUS_FAILED;
8105 eir_len = 0;
8106 } else {
8107 status = MGMT_STATUS_SUCCESS;
8108 rp = (void *)skb->data;
8109
8110 eir_len = 5 + 18 + 18;
8111 h192 = rp->hash;
8112 r192 = rp->rand;
8113 h256 = NULL;
8114 r256 = NULL;
8115 }
8116 } else {
8117 struct hci_rp_read_local_oob_ext_data *rp;
8118
8119 if (skb->len != sizeof(*rp)) {
8120 status = MGMT_STATUS_FAILED;
8121 eir_len = 0;
8122 } else {
8123 status = MGMT_STATUS_SUCCESS;
8124 rp = (void *)skb->data;
8125
8126 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8127 eir_len = 5 + 18 + 18;
8128 h192 = NULL;
8129 r192 = NULL;
8130 } else {
8131 eir_len = 5 + 18 + 18 + 18 + 18;
8132 h192 = rp->hash192;
8133 r192 = rp->rand192;
8134 }
8135
8136 h256 = rp->hash256;
8137 r256 = rp->rand256;
8138 }
8139 }
8140
8141 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8142 if (!mgmt_rp)
8143 goto done;
8144
8145 if (eir_len == 0)
8146 goto send_rsp;
8147
8148 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8149 hdev->dev_class, 3);
8150
8151 if (h192 && r192) {
8152 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8153 EIR_SSP_HASH_C192, h192, 16);
8154 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8155 EIR_SSP_RAND_R192, r192, 16);
8156 }
8157
8158 if (h256 && r256) {
8159 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8160 EIR_SSP_HASH_C256, h256, 16);
8161 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8162 EIR_SSP_RAND_R256, r256, 16);
8163 }
8164
8165 send_rsp:
8166 mgmt_rp->type = mgmt_cp->type;
8167 mgmt_rp->eir_len = cpu_to_le16(eir_len);
8168
8169 err = mgmt_cmd_complete(cmd->sk, hdev->id,
8170 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8171 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8172 if (err < 0 || status)
8173 goto done;
8174
8175 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8176
8177 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8178 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8179 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8180 done:
8181 if (skb && !IS_ERR(skb))
8182 kfree_skb(skb);
8183
8184 kfree(mgmt_rp);
8185 mgmt_pending_remove(cmd);
8186 }
8187
read_local_ssp_oob_req(struct hci_dev * hdev,struct sock * sk,struct mgmt_cp_read_local_oob_ext_data * cp)8188 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8189 struct mgmt_cp_read_local_oob_ext_data *cp)
8190 {
8191 struct mgmt_pending_cmd *cmd;
8192 int err;
8193
8194 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8195 cp, sizeof(*cp));
8196 if (!cmd)
8197 return -ENOMEM;
8198
8199 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
8200 read_local_oob_ext_data_complete);
8201
8202 if (err < 0) {
8203 mgmt_pending_remove(cmd);
8204 return err;
8205 }
8206
8207 return 0;
8208 }
8209
read_local_oob_ext_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8210 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8211 void *data, u16 data_len)
8212 {
8213 struct mgmt_cp_read_local_oob_ext_data *cp = data;
8214 struct mgmt_rp_read_local_oob_ext_data *rp;
8215 size_t rp_len;
8216 u16 eir_len;
8217 u8 status, flags, role, addr[7], hash[16], rand[16];
8218 int err;
8219
8220 bt_dev_dbg(hdev, "sock %p", sk);
8221
8222 if (hdev_is_powered(hdev)) {
8223 switch (cp->type) {
8224 case BIT(BDADDR_BREDR):
8225 status = mgmt_bredr_support(hdev);
8226 if (status)
8227 eir_len = 0;
8228 else
8229 eir_len = 5;
8230 break;
8231 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8232 status = mgmt_le_support(hdev);
8233 if (status)
8234 eir_len = 0;
8235 else
8236 eir_len = 9 + 3 + 18 + 18 + 3;
8237 break;
8238 default:
8239 status = MGMT_STATUS_INVALID_PARAMS;
8240 eir_len = 0;
8241 break;
8242 }
8243 } else {
8244 status = MGMT_STATUS_NOT_POWERED;
8245 eir_len = 0;
8246 }
8247
8248 rp_len = sizeof(*rp) + eir_len;
8249 rp = kmalloc(rp_len, GFP_ATOMIC);
8250 if (!rp)
8251 return -ENOMEM;
8252
8253 if (!status && !lmp_ssp_capable(hdev)) {
8254 status = MGMT_STATUS_NOT_SUPPORTED;
8255 eir_len = 0;
8256 }
8257
8258 if (status)
8259 goto complete;
8260
8261 hci_dev_lock(hdev);
8262
8263 eir_len = 0;
8264 switch (cp->type) {
8265 case BIT(BDADDR_BREDR):
8266 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8267 err = read_local_ssp_oob_req(hdev, sk, cp);
8268 hci_dev_unlock(hdev);
8269 if (!err)
8270 goto done;
8271
8272 status = MGMT_STATUS_FAILED;
8273 goto complete;
8274 } else {
8275 eir_len = eir_append_data(rp->eir, eir_len,
8276 EIR_CLASS_OF_DEV,
8277 hdev->dev_class, 3);
8278 }
8279 break;
8280 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8281 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8282 smp_generate_oob(hdev, hash, rand) < 0) {
8283 hci_dev_unlock(hdev);
8284 status = MGMT_STATUS_FAILED;
8285 goto complete;
8286 }
8287
8288 /* This should return the active RPA, but since the RPA
8289 * is only programmed on demand, it is really hard to fill
8290 * this in at the moment. For now disallow retrieving
8291 * local out-of-band data when privacy is in use.
8292 *
8293 * Returning the identity address will not help here since
8294 * pairing happens before the identity resolving key is
8295 * known and thus the connection establishment happens
8296 * based on the RPA and not the identity address.
8297 */
8298 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8299 hci_dev_unlock(hdev);
8300 status = MGMT_STATUS_REJECTED;
8301 goto complete;
8302 }
8303
8304 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8305 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8306 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8307 bacmp(&hdev->static_addr, BDADDR_ANY))) {
8308 memcpy(addr, &hdev->static_addr, 6);
8309 addr[6] = 0x01;
8310 } else {
8311 memcpy(addr, &hdev->bdaddr, 6);
8312 addr[6] = 0x00;
8313 }
8314
8315 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8316 addr, sizeof(addr));
8317
8318 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8319 role = 0x02;
8320 else
8321 role = 0x01;
8322
8323 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8324 &role, sizeof(role));
8325
8326 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8327 eir_len = eir_append_data(rp->eir, eir_len,
8328 EIR_LE_SC_CONFIRM,
8329 hash, sizeof(hash));
8330
8331 eir_len = eir_append_data(rp->eir, eir_len,
8332 EIR_LE_SC_RANDOM,
8333 rand, sizeof(rand));
8334 }
8335
8336 flags = mgmt_get_adv_discov_flags(hdev);
8337
8338 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8339 flags |= LE_AD_NO_BREDR;
8340
8341 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8342 &flags, sizeof(flags));
8343 break;
8344 }
8345
8346 hci_dev_unlock(hdev);
8347
8348 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8349
8350 status = MGMT_STATUS_SUCCESS;
8351
8352 complete:
8353 rp->type = cp->type;
8354 rp->eir_len = cpu_to_le16(eir_len);
8355
8356 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8357 status, rp, sizeof(*rp) + eir_len);
8358 if (err < 0 || status)
8359 goto done;
8360
8361 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8362 rp, sizeof(*rp) + eir_len,
8363 HCI_MGMT_OOB_DATA_EVENTS, sk);
8364
8365 done:
8366 kfree(rp);
8367
8368 return err;
8369 }
8370
get_supported_adv_flags(struct hci_dev * hdev)8371 static u32 get_supported_adv_flags(struct hci_dev *hdev)
8372 {
8373 u32 flags = 0;
8374
8375 flags |= MGMT_ADV_FLAG_CONNECTABLE;
8376 flags |= MGMT_ADV_FLAG_DISCOV;
8377 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8378 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8379 flags |= MGMT_ADV_FLAG_APPEARANCE;
8380 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8381 flags |= MGMT_ADV_PARAM_DURATION;
8382 flags |= MGMT_ADV_PARAM_TIMEOUT;
8383 flags |= MGMT_ADV_PARAM_INTERVALS;
8384 flags |= MGMT_ADV_PARAM_TX_POWER;
8385 flags |= MGMT_ADV_PARAM_SCAN_RSP;
8386
8387 /* In extended adv TX_POWER returned from Set Adv Param
8388 * will be always valid.
8389 */
8390 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
8391 flags |= MGMT_ADV_FLAG_TX_POWER;
8392
8393 if (ext_adv_capable(hdev)) {
8394 flags |= MGMT_ADV_FLAG_SEC_1M;
8395 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8396 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8397
8398 if (le_2m_capable(hdev))
8399 flags |= MGMT_ADV_FLAG_SEC_2M;
8400
8401 if (le_coded_capable(hdev))
8402 flags |= MGMT_ADV_FLAG_SEC_CODED;
8403 }
8404
8405 return flags;
8406 }
8407
read_adv_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8408 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8409 void *data, u16 data_len)
8410 {
8411 struct mgmt_rp_read_adv_features *rp;
8412 size_t rp_len;
8413 int err;
8414 struct adv_info *adv_instance;
8415 u32 supported_flags;
8416 u8 *instance;
8417
8418 bt_dev_dbg(hdev, "sock %p", sk);
8419
8420 if (!lmp_le_capable(hdev))
8421 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8422 MGMT_STATUS_REJECTED);
8423
8424 hci_dev_lock(hdev);
8425
8426 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8427 rp = kmalloc(rp_len, GFP_ATOMIC);
8428 if (!rp) {
8429 hci_dev_unlock(hdev);
8430 return -ENOMEM;
8431 }
8432
8433 supported_flags = get_supported_adv_flags(hdev);
8434
8435 rp->supported_flags = cpu_to_le32(supported_flags);
8436 rp->max_adv_data_len = max_adv_len(hdev);
8437 rp->max_scan_rsp_len = max_adv_len(hdev);
8438 rp->max_instances = hdev->le_num_of_adv_sets;
8439 rp->num_instances = hdev->adv_instance_cnt;
8440
8441 instance = rp->instance;
8442 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8443 /* Only instances 1-le_num_of_adv_sets are externally visible */
8444 if (adv_instance->instance <= hdev->adv_instance_cnt) {
8445 *instance = adv_instance->instance;
8446 instance++;
8447 } else {
8448 rp->num_instances--;
8449 rp_len--;
8450 }
8451 }
8452
8453 hci_dev_unlock(hdev);
8454
8455 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8456 MGMT_STATUS_SUCCESS, rp, rp_len);
8457
8458 kfree(rp);
8459
8460 return err;
8461 }
8462
calculate_name_len(struct hci_dev * hdev)8463 static u8 calculate_name_len(struct hci_dev *hdev)
8464 {
8465 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 2]; /* len + type + name */
8466
8467 return eir_append_local_name(hdev, buf, 0);
8468 }
8469
tlv_data_max_len(struct hci_dev * hdev,u32 adv_flags,bool is_adv_data)8470 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8471 bool is_adv_data)
8472 {
8473 u8 max_len = max_adv_len(hdev);
8474
8475 if (is_adv_data) {
8476 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8477 MGMT_ADV_FLAG_LIMITED_DISCOV |
8478 MGMT_ADV_FLAG_MANAGED_FLAGS))
8479 max_len -= 3;
8480
8481 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8482 max_len -= 3;
8483 } else {
8484 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8485 max_len -= calculate_name_len(hdev);
8486
8487 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8488 max_len -= 4;
8489 }
8490
8491 return max_len;
8492 }
8493
flags_managed(u32 adv_flags)8494 static bool flags_managed(u32 adv_flags)
8495 {
8496 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8497 MGMT_ADV_FLAG_LIMITED_DISCOV |
8498 MGMT_ADV_FLAG_MANAGED_FLAGS);
8499 }
8500
tx_power_managed(u32 adv_flags)8501 static bool tx_power_managed(u32 adv_flags)
8502 {
8503 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8504 }
8505
name_managed(u32 adv_flags)8506 static bool name_managed(u32 adv_flags)
8507 {
8508 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8509 }
8510
appearance_managed(u32 adv_flags)8511 static bool appearance_managed(u32 adv_flags)
8512 {
8513 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8514 }
8515
tlv_data_is_valid(struct hci_dev * hdev,u32 adv_flags,u8 * data,u8 len,bool is_adv_data)8516 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8517 u8 len, bool is_adv_data)
8518 {
8519 int i, cur_len;
8520 u8 max_len;
8521
8522 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8523
8524 if (len > max_len)
8525 return false;
8526
8527 /* Make sure that the data is correctly formatted. */
8528 for (i = 0; i < len; i += (cur_len + 1)) {
8529 cur_len = data[i];
8530
8531 if (!cur_len)
8532 continue;
8533
8534 if (data[i + 1] == EIR_FLAGS &&
8535 (!is_adv_data || flags_managed(adv_flags)))
8536 return false;
8537
8538 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8539 return false;
8540
8541 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8542 return false;
8543
8544 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8545 return false;
8546
8547 if (data[i + 1] == EIR_APPEARANCE &&
8548 appearance_managed(adv_flags))
8549 return false;
8550
8551 /* If the current field length would exceed the total data
8552 * length, then it's invalid.
8553 */
8554 if (i + cur_len >= len)
8555 return false;
8556 }
8557
8558 return true;
8559 }
8560
requested_adv_flags_are_valid(struct hci_dev * hdev,u32 adv_flags)8561 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8562 {
8563 u32 supported_flags, phy_flags;
8564
8565 /* The current implementation only supports a subset of the specified
8566 * flags. Also need to check mutual exclusiveness of sec flags.
8567 */
8568 supported_flags = get_supported_adv_flags(hdev);
8569 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8570 if (adv_flags & ~supported_flags ||
8571 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8572 return false;
8573
8574 return true;
8575 }
8576
adv_busy(struct hci_dev * hdev)8577 static bool adv_busy(struct hci_dev *hdev)
8578 {
8579 return pending_find(MGMT_OP_SET_LE, hdev);
8580 }
8581
add_adv_complete(struct hci_dev * hdev,struct sock * sk,u8 instance,int err)8582 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8583 int err)
8584 {
8585 struct adv_info *adv, *n;
8586
8587 bt_dev_dbg(hdev, "err %d", err);
8588
8589 hci_dev_lock(hdev);
8590
8591 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8592 u8 instance;
8593
8594 if (!adv->pending)
8595 continue;
8596
8597 if (!err) {
8598 adv->pending = false;
8599 continue;
8600 }
8601
8602 instance = adv->instance;
8603
8604 if (hdev->cur_adv_instance == instance)
8605 cancel_adv_timeout(hdev);
8606
8607 hci_remove_adv_instance(hdev, instance);
8608 mgmt_advertising_removed(sk, hdev, instance);
8609 }
8610
8611 hci_dev_unlock(hdev);
8612 }
8613
add_advertising_complete(struct hci_dev * hdev,void * data,int err)8614 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8615 {
8616 struct mgmt_pending_cmd *cmd = data;
8617 struct mgmt_cp_add_advertising *cp = cmd->param;
8618 struct mgmt_rp_add_advertising rp;
8619
8620 memset(&rp, 0, sizeof(rp));
8621
8622 rp.instance = cp->instance;
8623
8624 if (err)
8625 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8626 mgmt_status(err));
8627 else
8628 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8629 mgmt_status(err), &rp, sizeof(rp));
8630
8631 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8632
8633 mgmt_pending_free(cmd);
8634 }
8635
add_advertising_sync(struct hci_dev * hdev,void * data)8636 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8637 {
8638 struct mgmt_pending_cmd *cmd = data;
8639 struct mgmt_cp_add_advertising *cp = cmd->param;
8640
8641 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8642 }
8643
add_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8644 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8645 void *data, u16 data_len)
8646 {
8647 struct mgmt_cp_add_advertising *cp = data;
8648 struct mgmt_rp_add_advertising rp;
8649 u32 flags;
8650 u8 status;
8651 u16 timeout, duration;
8652 unsigned int prev_instance_cnt;
8653 u8 schedule_instance = 0;
8654 struct adv_info *adv, *next_instance;
8655 int err;
8656 struct mgmt_pending_cmd *cmd;
8657
8658 bt_dev_dbg(hdev, "sock %p", sk);
8659
8660 status = mgmt_le_support(hdev);
8661 if (status)
8662 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8663 status);
8664
8665 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8666 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8667 MGMT_STATUS_INVALID_PARAMS);
8668
8669 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8670 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8671 MGMT_STATUS_INVALID_PARAMS);
8672
8673 flags = __le32_to_cpu(cp->flags);
8674 timeout = __le16_to_cpu(cp->timeout);
8675 duration = __le16_to_cpu(cp->duration);
8676
8677 if (!requested_adv_flags_are_valid(hdev, flags))
8678 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8679 MGMT_STATUS_INVALID_PARAMS);
8680
8681 hci_dev_lock(hdev);
8682
8683 if (timeout && !hdev_is_powered(hdev)) {
8684 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8685 MGMT_STATUS_REJECTED);
8686 goto unlock;
8687 }
8688
8689 if (adv_busy(hdev)) {
8690 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8691 MGMT_STATUS_BUSY);
8692 goto unlock;
8693 }
8694
8695 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8696 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8697 cp->scan_rsp_len, false)) {
8698 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8699 MGMT_STATUS_INVALID_PARAMS);
8700 goto unlock;
8701 }
8702
8703 prev_instance_cnt = hdev->adv_instance_cnt;
8704
8705 adv = hci_add_adv_instance(hdev, cp->instance, flags,
8706 cp->adv_data_len, cp->data,
8707 cp->scan_rsp_len,
8708 cp->data + cp->adv_data_len,
8709 timeout, duration,
8710 HCI_ADV_TX_POWER_NO_PREFERENCE,
8711 hdev->le_adv_min_interval,
8712 hdev->le_adv_max_interval, 0);
8713 if (IS_ERR(adv)) {
8714 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8715 MGMT_STATUS_FAILED);
8716 goto unlock;
8717 }
8718
8719 /* Only trigger an advertising added event if a new instance was
8720 * actually added.
8721 */
8722 if (hdev->adv_instance_cnt > prev_instance_cnt)
8723 mgmt_advertising_added(sk, hdev, cp->instance);
8724
8725 if (hdev->cur_adv_instance == cp->instance) {
8726 /* If the currently advertised instance is being changed then
8727 * cancel the current advertising and schedule the next
8728 * instance. If there is only one instance then the overridden
8729 * advertising data will be visible right away.
8730 */
8731 cancel_adv_timeout(hdev);
8732
8733 next_instance = hci_get_next_instance(hdev, cp->instance);
8734 if (next_instance)
8735 schedule_instance = next_instance->instance;
8736 } else if (!hdev->adv_instance_timeout) {
8737 /* Immediately advertise the new instance if no other
8738 * instance is currently being advertised.
8739 */
8740 schedule_instance = cp->instance;
8741 }
8742
8743 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
8744 * there is no instance to be advertised then we have no HCI
8745 * communication to make. Simply return.
8746 */
8747 if (!hdev_is_powered(hdev) ||
8748 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8749 !schedule_instance) {
8750 rp.instance = cp->instance;
8751 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8752 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8753 goto unlock;
8754 }
8755
8756 /* We're good to go, update advertising data, parameters, and start
8757 * advertising.
8758 */
8759 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8760 data_len);
8761 if (!cmd) {
8762 err = -ENOMEM;
8763 goto unlock;
8764 }
8765
8766 cp->instance = schedule_instance;
8767
8768 err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8769 add_advertising_complete);
8770 if (err < 0)
8771 mgmt_pending_free(cmd);
8772
8773 unlock:
8774 hci_dev_unlock(hdev);
8775
8776 return err;
8777 }
8778
add_ext_adv_params_complete(struct hci_dev * hdev,void * data,int err)8779 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8780 int err)
8781 {
8782 struct mgmt_pending_cmd *cmd = data;
8783 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8784 struct mgmt_rp_add_ext_adv_params rp;
8785 struct adv_info *adv;
8786 u32 flags;
8787
8788 BT_DBG("%s", hdev->name);
8789
8790 hci_dev_lock(hdev);
8791
8792 adv = hci_find_adv_instance(hdev, cp->instance);
8793 if (!adv)
8794 goto unlock;
8795
8796 rp.instance = cp->instance;
8797 rp.tx_power = adv->tx_power;
8798
8799 /* While we're at it, inform userspace of the available space for this
8800 * advertisement, given the flags that will be used.
8801 */
8802 flags = __le32_to_cpu(cp->flags);
8803 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8804 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8805
8806 if (err) {
8807 /* If this advertisement was previously advertising and we
8808 * failed to update it, we signal that it has been removed and
8809 * delete its structure
8810 */
8811 if (!adv->pending)
8812 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8813
8814 hci_remove_adv_instance(hdev, cp->instance);
8815
8816 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8817 mgmt_status(err));
8818 } else {
8819 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8820 mgmt_status(err), &rp, sizeof(rp));
8821 }
8822
8823 unlock:
8824 mgmt_pending_free(cmd);
8825
8826 hci_dev_unlock(hdev);
8827 }
8828
add_ext_adv_params_sync(struct hci_dev * hdev,void * data)8829 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8830 {
8831 struct mgmt_pending_cmd *cmd = data;
8832 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8833
8834 return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8835 }
8836
add_ext_adv_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8837 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8838 void *data, u16 data_len)
8839 {
8840 struct mgmt_cp_add_ext_adv_params *cp = data;
8841 struct mgmt_rp_add_ext_adv_params rp;
8842 struct mgmt_pending_cmd *cmd = NULL;
8843 struct adv_info *adv;
8844 u32 flags, min_interval, max_interval;
8845 u16 timeout, duration;
8846 u8 status;
8847 s8 tx_power;
8848 int err;
8849
8850 BT_DBG("%s", hdev->name);
8851
8852 status = mgmt_le_support(hdev);
8853 if (status)
8854 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8855 status);
8856
8857 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8858 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8859 MGMT_STATUS_INVALID_PARAMS);
8860
8861 /* The purpose of breaking add_advertising into two separate MGMT calls
8862 * for params and data is to allow more parameters to be added to this
8863 * structure in the future. For this reason, we verify that we have the
8864 * bare minimum structure we know of when the interface was defined. Any
8865 * extra parameters we don't know about will be ignored in this request.
8866 */
8867 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8868 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8869 MGMT_STATUS_INVALID_PARAMS);
8870
8871 flags = __le32_to_cpu(cp->flags);
8872
8873 if (!requested_adv_flags_are_valid(hdev, flags))
8874 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8875 MGMT_STATUS_INVALID_PARAMS);
8876
8877 hci_dev_lock(hdev);
8878
8879 /* In new interface, we require that we are powered to register */
8880 if (!hdev_is_powered(hdev)) {
8881 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8882 MGMT_STATUS_REJECTED);
8883 goto unlock;
8884 }
8885
8886 if (adv_busy(hdev)) {
8887 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8888 MGMT_STATUS_BUSY);
8889 goto unlock;
8890 }
8891
8892 /* Parse defined parameters from request, use defaults otherwise */
8893 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8894 __le16_to_cpu(cp->timeout) : 0;
8895
8896 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8897 __le16_to_cpu(cp->duration) :
8898 hdev->def_multi_adv_rotation_duration;
8899
8900 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8901 __le32_to_cpu(cp->min_interval) :
8902 hdev->le_adv_min_interval;
8903
8904 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8905 __le32_to_cpu(cp->max_interval) :
8906 hdev->le_adv_max_interval;
8907
8908 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8909 cp->tx_power :
8910 HCI_ADV_TX_POWER_NO_PREFERENCE;
8911
8912 /* Create advertising instance with no advertising or response data */
8913 adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
8914 timeout, duration, tx_power, min_interval,
8915 max_interval, 0);
8916
8917 if (IS_ERR(adv)) {
8918 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8919 MGMT_STATUS_FAILED);
8920 goto unlock;
8921 }
8922
8923 /* Submit request for advertising params if ext adv available */
8924 if (ext_adv_capable(hdev)) {
8925 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8926 data, data_len);
8927 if (!cmd) {
8928 err = -ENOMEM;
8929 hci_remove_adv_instance(hdev, cp->instance);
8930 goto unlock;
8931 }
8932
8933 err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8934 add_ext_adv_params_complete);
8935 if (err < 0)
8936 mgmt_pending_free(cmd);
8937 } else {
8938 rp.instance = cp->instance;
8939 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8940 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8941 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8942 err = mgmt_cmd_complete(sk, hdev->id,
8943 MGMT_OP_ADD_EXT_ADV_PARAMS,
8944 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8945 }
8946
8947 unlock:
8948 hci_dev_unlock(hdev);
8949
8950 return err;
8951 }
8952
add_ext_adv_data_complete(struct hci_dev * hdev,void * data,int err)8953 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8954 {
8955 struct mgmt_pending_cmd *cmd = data;
8956 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8957 struct mgmt_rp_add_advertising rp;
8958
8959 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8960
8961 memset(&rp, 0, sizeof(rp));
8962
8963 rp.instance = cp->instance;
8964
8965 if (err)
8966 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8967 mgmt_status(err));
8968 else
8969 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8970 mgmt_status(err), &rp, sizeof(rp));
8971
8972 mgmt_pending_free(cmd);
8973 }
8974
add_ext_adv_data_sync(struct hci_dev * hdev,void * data)8975 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8976 {
8977 struct mgmt_pending_cmd *cmd = data;
8978 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8979 int err;
8980
8981 if (ext_adv_capable(hdev)) {
8982 err = hci_update_adv_data_sync(hdev, cp->instance);
8983 if (err)
8984 return err;
8985
8986 err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
8987 if (err)
8988 return err;
8989
8990 return hci_enable_ext_advertising_sync(hdev, cp->instance);
8991 }
8992
8993 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8994 }
8995
add_ext_adv_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8996 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8997 u16 data_len)
8998 {
8999 struct mgmt_cp_add_ext_adv_data *cp = data;
9000 struct mgmt_rp_add_ext_adv_data rp;
9001 u8 schedule_instance = 0;
9002 struct adv_info *next_instance;
9003 struct adv_info *adv_instance;
9004 int err = 0;
9005 struct mgmt_pending_cmd *cmd;
9006
9007 BT_DBG("%s", hdev->name);
9008
9009 hci_dev_lock(hdev);
9010
9011 adv_instance = hci_find_adv_instance(hdev, cp->instance);
9012
9013 if (!adv_instance) {
9014 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9015 MGMT_STATUS_INVALID_PARAMS);
9016 goto unlock;
9017 }
9018
9019 /* In new interface, we require that we are powered to register */
9020 if (!hdev_is_powered(hdev)) {
9021 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9022 MGMT_STATUS_REJECTED);
9023 goto clear_new_instance;
9024 }
9025
9026 if (adv_busy(hdev)) {
9027 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9028 MGMT_STATUS_BUSY);
9029 goto clear_new_instance;
9030 }
9031
9032 /* Validate new data */
9033 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
9034 cp->adv_data_len, true) ||
9035 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
9036 cp->adv_data_len, cp->scan_rsp_len, false)) {
9037 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9038 MGMT_STATUS_INVALID_PARAMS);
9039 goto clear_new_instance;
9040 }
9041
9042 /* Set the data in the advertising instance */
9043 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
9044 cp->data, cp->scan_rsp_len,
9045 cp->data + cp->adv_data_len);
9046
9047 /* If using software rotation, determine next instance to use */
9048 if (hdev->cur_adv_instance == cp->instance) {
9049 /* If the currently advertised instance is being changed
9050 * then cancel the current advertising and schedule the
9051 * next instance. If there is only one instance then the
9052 * overridden advertising data will be visible right
9053 * away
9054 */
9055 cancel_adv_timeout(hdev);
9056
9057 next_instance = hci_get_next_instance(hdev, cp->instance);
9058 if (next_instance)
9059 schedule_instance = next_instance->instance;
9060 } else if (!hdev->adv_instance_timeout) {
9061 /* Immediately advertise the new instance if no other
9062 * instance is currently being advertised.
9063 */
9064 schedule_instance = cp->instance;
9065 }
9066
9067 /* If the HCI_ADVERTISING flag is set or there is no instance to
9068 * be advertised then we have no HCI communication to make.
9069 * Simply return.
9070 */
9071 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
9072 if (adv_instance->pending) {
9073 mgmt_advertising_added(sk, hdev, cp->instance);
9074 adv_instance->pending = false;
9075 }
9076 rp.instance = cp->instance;
9077 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9078 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9079 goto unlock;
9080 }
9081
9082 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9083 data_len);
9084 if (!cmd) {
9085 err = -ENOMEM;
9086 goto clear_new_instance;
9087 }
9088
9089 err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
9090 add_ext_adv_data_complete);
9091 if (err < 0) {
9092 mgmt_pending_free(cmd);
9093 goto clear_new_instance;
9094 }
9095
9096 /* We were successful in updating data, so trigger advertising_added
9097 * event if this is an instance that wasn't previously advertising. If
9098 * a failure occurs in the requests we initiated, we will remove the
9099 * instance again in add_advertising_complete
9100 */
9101 if (adv_instance->pending)
9102 mgmt_advertising_added(sk, hdev, cp->instance);
9103
9104 goto unlock;
9105
9106 clear_new_instance:
9107 hci_remove_adv_instance(hdev, cp->instance);
9108
9109 unlock:
9110 hci_dev_unlock(hdev);
9111
9112 return err;
9113 }
9114
remove_advertising_complete(struct hci_dev * hdev,void * data,int err)9115 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
9116 int err)
9117 {
9118 struct mgmt_pending_cmd *cmd = data;
9119 struct mgmt_cp_remove_advertising *cp = cmd->param;
9120 struct mgmt_rp_remove_advertising rp;
9121
9122 bt_dev_dbg(hdev, "err %d", err);
9123
9124 memset(&rp, 0, sizeof(rp));
9125 rp.instance = cp->instance;
9126
9127 if (err)
9128 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9129 mgmt_status(err));
9130 else
9131 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9132 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9133
9134 mgmt_pending_free(cmd);
9135 }
9136
remove_advertising_sync(struct hci_dev * hdev,void * data)9137 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
9138 {
9139 struct mgmt_pending_cmd *cmd = data;
9140 struct mgmt_cp_remove_advertising *cp = cmd->param;
9141 int err;
9142
9143 err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
9144 if (err)
9145 return err;
9146
9147 if (list_empty(&hdev->adv_instances))
9148 err = hci_disable_advertising_sync(hdev);
9149
9150 return err;
9151 }
9152
remove_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9153 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9154 void *data, u16 data_len)
9155 {
9156 struct mgmt_cp_remove_advertising *cp = data;
9157 struct mgmt_pending_cmd *cmd;
9158 int err;
9159
9160 bt_dev_dbg(hdev, "sock %p", sk);
9161
9162 hci_dev_lock(hdev);
9163
9164 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
9165 err = mgmt_cmd_status(sk, hdev->id,
9166 MGMT_OP_REMOVE_ADVERTISING,
9167 MGMT_STATUS_INVALID_PARAMS);
9168 goto unlock;
9169 }
9170
9171 if (pending_find(MGMT_OP_SET_LE, hdev)) {
9172 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9173 MGMT_STATUS_BUSY);
9174 goto unlock;
9175 }
9176
9177 if (list_empty(&hdev->adv_instances)) {
9178 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9179 MGMT_STATUS_INVALID_PARAMS);
9180 goto unlock;
9181 }
9182
9183 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9184 data_len);
9185 if (!cmd) {
9186 err = -ENOMEM;
9187 goto unlock;
9188 }
9189
9190 err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
9191 remove_advertising_complete);
9192 if (err < 0)
9193 mgmt_pending_free(cmd);
9194
9195 unlock:
9196 hci_dev_unlock(hdev);
9197
9198 return err;
9199 }
9200
get_adv_size_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9201 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9202 void *data, u16 data_len)
9203 {
9204 struct mgmt_cp_get_adv_size_info *cp = data;
9205 struct mgmt_rp_get_adv_size_info rp;
9206 u32 flags, supported_flags;
9207
9208 bt_dev_dbg(hdev, "sock %p", sk);
9209
9210 if (!lmp_le_capable(hdev))
9211 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9212 MGMT_STATUS_REJECTED);
9213
9214 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9215 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9216 MGMT_STATUS_INVALID_PARAMS);
9217
9218 flags = __le32_to_cpu(cp->flags);
9219
9220 /* The current implementation only supports a subset of the specified
9221 * flags.
9222 */
9223 supported_flags = get_supported_adv_flags(hdev);
9224 if (flags & ~supported_flags)
9225 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9226 MGMT_STATUS_INVALID_PARAMS);
9227
9228 rp.instance = cp->instance;
9229 rp.flags = cp->flags;
9230 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9231 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9232
9233 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9234 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9235 }
9236
9237 static const struct hci_mgmt_handler mgmt_handlers[] = {
9238 { NULL }, /* 0x0000 (no command) */
9239 { read_version, MGMT_READ_VERSION_SIZE,
9240 HCI_MGMT_NO_HDEV |
9241 HCI_MGMT_UNTRUSTED },
9242 { read_commands, MGMT_READ_COMMANDS_SIZE,
9243 HCI_MGMT_NO_HDEV |
9244 HCI_MGMT_UNTRUSTED },
9245 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
9246 HCI_MGMT_NO_HDEV |
9247 HCI_MGMT_UNTRUSTED },
9248 { read_controller_info, MGMT_READ_INFO_SIZE,
9249 HCI_MGMT_UNTRUSTED },
9250 { set_powered, MGMT_SETTING_SIZE },
9251 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
9252 { set_connectable, MGMT_SETTING_SIZE },
9253 { set_fast_connectable, MGMT_SETTING_SIZE },
9254 { set_bondable, MGMT_SETTING_SIZE },
9255 { set_link_security, MGMT_SETTING_SIZE },
9256 { set_ssp, MGMT_SETTING_SIZE },
9257 { set_hs, MGMT_SETTING_SIZE },
9258 { set_le, MGMT_SETTING_SIZE },
9259 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
9260 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
9261 { add_uuid, MGMT_ADD_UUID_SIZE },
9262 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
9263 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
9264 HCI_MGMT_VAR_LEN },
9265 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9266 HCI_MGMT_VAR_LEN },
9267 { disconnect, MGMT_DISCONNECT_SIZE },
9268 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
9269 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
9270 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
9271 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
9272 { pair_device, MGMT_PAIR_DEVICE_SIZE },
9273 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
9274 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
9275 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
9276 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9277 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
9278 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9279 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
9280 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9281 HCI_MGMT_VAR_LEN },
9282 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9283 { start_discovery, MGMT_START_DISCOVERY_SIZE },
9284 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
9285 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
9286 { block_device, MGMT_BLOCK_DEVICE_SIZE },
9287 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
9288 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
9289 { set_advertising, MGMT_SETTING_SIZE },
9290 { set_bredr, MGMT_SETTING_SIZE },
9291 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
9292 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
9293 { set_secure_conn, MGMT_SETTING_SIZE },
9294 { set_debug_keys, MGMT_SETTING_SIZE },
9295 { set_privacy, MGMT_SET_PRIVACY_SIZE },
9296 { load_irks, MGMT_LOAD_IRKS_SIZE,
9297 HCI_MGMT_VAR_LEN },
9298 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
9299 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
9300 { add_device, MGMT_ADD_DEVICE_SIZE },
9301 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
9302 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
9303 HCI_MGMT_VAR_LEN },
9304 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9305 HCI_MGMT_NO_HDEV |
9306 HCI_MGMT_UNTRUSTED },
9307 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
9308 HCI_MGMT_UNCONFIGURED |
9309 HCI_MGMT_UNTRUSTED },
9310 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
9311 HCI_MGMT_UNCONFIGURED },
9312 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
9313 HCI_MGMT_UNCONFIGURED },
9314 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9315 HCI_MGMT_VAR_LEN },
9316 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9317 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
9318 HCI_MGMT_NO_HDEV |
9319 HCI_MGMT_UNTRUSTED },
9320 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
9321 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
9322 HCI_MGMT_VAR_LEN },
9323 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
9324 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
9325 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9326 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9327 HCI_MGMT_UNTRUSTED },
9328 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
9329 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
9330 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
9331 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9332 HCI_MGMT_VAR_LEN },
9333 { set_wideband_speech, MGMT_SETTING_SIZE },
9334 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
9335 HCI_MGMT_UNTRUSTED },
9336 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
9337 HCI_MGMT_UNTRUSTED |
9338 HCI_MGMT_HDEV_OPTIONAL },
9339 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
9340 HCI_MGMT_VAR_LEN |
9341 HCI_MGMT_HDEV_OPTIONAL },
9342 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9343 HCI_MGMT_UNTRUSTED },
9344 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9345 HCI_MGMT_VAR_LEN },
9346 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9347 HCI_MGMT_UNTRUSTED },
9348 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9349 HCI_MGMT_VAR_LEN },
9350 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
9351 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
9352 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9353 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9354 HCI_MGMT_VAR_LEN },
9355 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
9356 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
9357 HCI_MGMT_VAR_LEN },
9358 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
9359 HCI_MGMT_VAR_LEN },
9360 { add_adv_patterns_monitor_rssi,
9361 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
9362 HCI_MGMT_VAR_LEN },
9363 { set_mesh, MGMT_SET_MESH_RECEIVER_SIZE,
9364 HCI_MGMT_VAR_LEN },
9365 { mesh_features, MGMT_MESH_READ_FEATURES_SIZE },
9366 { mesh_send, MGMT_MESH_SEND_SIZE,
9367 HCI_MGMT_VAR_LEN },
9368 { mesh_send_cancel, MGMT_MESH_SEND_CANCEL_SIZE },
9369 };
9370
mgmt_index_added(struct hci_dev * hdev)9371 void mgmt_index_added(struct hci_dev *hdev)
9372 {
9373 struct mgmt_ev_ext_index ev;
9374
9375 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9376 return;
9377
9378 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9379 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0,
9380 HCI_MGMT_UNCONF_INDEX_EVENTS);
9381 ev.type = 0x01;
9382 } else {
9383 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
9384 HCI_MGMT_INDEX_EVENTS);
9385 ev.type = 0x00;
9386 }
9387
9388 ev.bus = hdev->bus;
9389
9390 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
9391 HCI_MGMT_EXT_INDEX_EVENTS);
9392 }
9393
mgmt_index_removed(struct hci_dev * hdev)9394 void mgmt_index_removed(struct hci_dev *hdev)
9395 {
9396 struct mgmt_ev_ext_index ev;
9397 u8 status = MGMT_STATUS_INVALID_INDEX;
9398
9399 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9400 return;
9401
9402 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9403
9404 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9405 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0,
9406 HCI_MGMT_UNCONF_INDEX_EVENTS);
9407 ev.type = 0x01;
9408 } else {
9409 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
9410 HCI_MGMT_INDEX_EVENTS);
9411 ev.type = 0x00;
9412 }
9413
9414 ev.bus = hdev->bus;
9415
9416 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
9417 HCI_MGMT_EXT_INDEX_EVENTS);
9418
9419 /* Cancel any remaining timed work */
9420 if (!hci_dev_test_flag(hdev, HCI_MGMT))
9421 return;
9422 cancel_delayed_work_sync(&hdev->discov_off);
9423 cancel_delayed_work_sync(&hdev->service_cache);
9424 cancel_delayed_work_sync(&hdev->rpa_expired);
9425 }
9426
mgmt_power_on(struct hci_dev * hdev,int err)9427 void mgmt_power_on(struct hci_dev *hdev, int err)
9428 {
9429 struct cmd_lookup match = { NULL, hdev };
9430
9431 bt_dev_dbg(hdev, "err %d", err);
9432
9433 hci_dev_lock(hdev);
9434
9435 if (!err) {
9436 restart_le_actions(hdev);
9437 hci_update_passive_scan(hdev);
9438 }
9439
9440 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9441
9442 new_settings(hdev, match.sk);
9443
9444 if (match.sk)
9445 sock_put(match.sk);
9446
9447 hci_dev_unlock(hdev);
9448 }
9449
__mgmt_power_off(struct hci_dev * hdev)9450 void __mgmt_power_off(struct hci_dev *hdev)
9451 {
9452 struct cmd_lookup match = { NULL, hdev };
9453 u8 status, zero_cod[] = { 0, 0, 0 };
9454
9455 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9456
9457 /* If the power off is because of hdev unregistration let
9458 * use the appropriate INVALID_INDEX status. Otherwise use
9459 * NOT_POWERED. We cover both scenarios here since later in
9460 * mgmt_index_removed() any hci_conn callbacks will have already
9461 * been triggered, potentially causing misleading DISCONNECTED
9462 * status responses.
9463 */
9464 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9465 status = MGMT_STATUS_INVALID_INDEX;
9466 else
9467 status = MGMT_STATUS_NOT_POWERED;
9468
9469 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9470
9471 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9472 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9473 zero_cod, sizeof(zero_cod),
9474 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9475 ext_info_changed(hdev, NULL);
9476 }
9477
9478 new_settings(hdev, match.sk);
9479
9480 if (match.sk)
9481 sock_put(match.sk);
9482 }
9483
mgmt_set_powered_failed(struct hci_dev * hdev,int err)9484 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9485 {
9486 struct mgmt_pending_cmd *cmd;
9487 u8 status;
9488
9489 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9490 if (!cmd)
9491 return;
9492
9493 if (err == -ERFKILL)
9494 status = MGMT_STATUS_RFKILLED;
9495 else
9496 status = MGMT_STATUS_FAILED;
9497
9498 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9499
9500 mgmt_pending_remove(cmd);
9501 }
9502
mgmt_new_link_key(struct hci_dev * hdev,struct link_key * key,bool persistent)9503 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9504 bool persistent)
9505 {
9506 struct mgmt_ev_new_link_key ev;
9507
9508 memset(&ev, 0, sizeof(ev));
9509
9510 ev.store_hint = persistent;
9511 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9512 ev.key.addr.type = BDADDR_BREDR;
9513 ev.key.type = key->type;
9514 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9515 ev.key.pin_len = key->pin_len;
9516
9517 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9518 }
9519
mgmt_ltk_type(struct smp_ltk * ltk)9520 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9521 {
9522 switch (ltk->type) {
9523 case SMP_LTK:
9524 case SMP_LTK_RESPONDER:
9525 if (ltk->authenticated)
9526 return MGMT_LTK_AUTHENTICATED;
9527 return MGMT_LTK_UNAUTHENTICATED;
9528 case SMP_LTK_P256:
9529 if (ltk->authenticated)
9530 return MGMT_LTK_P256_AUTH;
9531 return MGMT_LTK_P256_UNAUTH;
9532 case SMP_LTK_P256_DEBUG:
9533 return MGMT_LTK_P256_DEBUG;
9534 }
9535
9536 return MGMT_LTK_UNAUTHENTICATED;
9537 }
9538
mgmt_new_ltk(struct hci_dev * hdev,struct smp_ltk * key,bool persistent)9539 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9540 {
9541 struct mgmt_ev_new_long_term_key ev;
9542
9543 memset(&ev, 0, sizeof(ev));
9544
9545 /* Devices using resolvable or non-resolvable random addresses
9546 * without providing an identity resolving key don't require
9547 * to store long term keys. Their addresses will change the
9548 * next time around.
9549 *
9550 * Only when a remote device provides an identity address
9551 * make sure the long term key is stored. If the remote
9552 * identity is known, the long term keys are internally
9553 * mapped to the identity address. So allow static random
9554 * and public addresses here.
9555 */
9556 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9557 (key->bdaddr.b[5] & 0xc0) != 0xc0)
9558 ev.store_hint = 0x00;
9559 else
9560 ev.store_hint = persistent;
9561
9562 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9563 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
9564 ev.key.type = mgmt_ltk_type(key);
9565 ev.key.enc_size = key->enc_size;
9566 ev.key.ediv = key->ediv;
9567 ev.key.rand = key->rand;
9568
9569 if (key->type == SMP_LTK)
9570 ev.key.initiator = 1;
9571
9572 /* Make sure we copy only the significant bytes based on the
9573 * encryption key size, and set the rest of the value to zeroes.
9574 */
9575 memcpy(ev.key.val, key->val, key->enc_size);
9576 memset(ev.key.val + key->enc_size, 0,
9577 sizeof(ev.key.val) - key->enc_size);
9578
9579 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9580 }
9581
mgmt_new_irk(struct hci_dev * hdev,struct smp_irk * irk,bool persistent)9582 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9583 {
9584 struct mgmt_ev_new_irk ev;
9585
9586 memset(&ev, 0, sizeof(ev));
9587
9588 ev.store_hint = persistent;
9589
9590 bacpy(&ev.rpa, &irk->rpa);
9591 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9592 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
9593 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9594
9595 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9596 }
9597
mgmt_new_csrk(struct hci_dev * hdev,struct smp_csrk * csrk,bool persistent)9598 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9599 bool persistent)
9600 {
9601 struct mgmt_ev_new_csrk ev;
9602
9603 memset(&ev, 0, sizeof(ev));
9604
9605 /* Devices using resolvable or non-resolvable random addresses
9606 * without providing an identity resolving key don't require
9607 * to store signature resolving keys. Their addresses will change
9608 * the next time around.
9609 *
9610 * Only when a remote device provides an identity address
9611 * make sure the signature resolving key is stored. So allow
9612 * static random and public addresses here.
9613 */
9614 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9615 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9616 ev.store_hint = 0x00;
9617 else
9618 ev.store_hint = persistent;
9619
9620 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9621 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
9622 ev.key.type = csrk->type;
9623 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9624
9625 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9626 }
9627
mgmt_new_conn_param(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u8 store_hint,u16 min_interval,u16 max_interval,u16 latency,u16 timeout)9628 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9629 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9630 u16 max_interval, u16 latency, u16 timeout)
9631 {
9632 struct mgmt_ev_new_conn_param ev;
9633
9634 if (!hci_is_identity_address(bdaddr, bdaddr_type))
9635 return;
9636
9637 memset(&ev, 0, sizeof(ev));
9638 bacpy(&ev.addr.bdaddr, bdaddr);
9639 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9640 ev.store_hint = store_hint;
9641 ev.min_interval = cpu_to_le16(min_interval);
9642 ev.max_interval = cpu_to_le16(max_interval);
9643 ev.latency = cpu_to_le16(latency);
9644 ev.timeout = cpu_to_le16(timeout);
9645
9646 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9647 }
9648
mgmt_device_connected(struct hci_dev * hdev,struct hci_conn * conn,u8 * name,u8 name_len)9649 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9650 u8 *name, u8 name_len)
9651 {
9652 struct sk_buff *skb;
9653 struct mgmt_ev_device_connected *ev;
9654 u16 eir_len = 0;
9655 u32 flags = 0;
9656
9657 if (test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
9658 return;
9659
9660 /* allocate buff for LE or BR/EDR adv */
9661 if (conn->le_adv_data_len > 0)
9662 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9663 sizeof(*ev) + conn->le_adv_data_len);
9664 else
9665 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9666 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9667 eir_precalc_len(sizeof(conn->dev_class)));
9668
9669 ev = skb_put(skb, sizeof(*ev));
9670 bacpy(&ev->addr.bdaddr, &conn->dst);
9671 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9672
9673 if (conn->out)
9674 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9675
9676 ev->flags = __cpu_to_le32(flags);
9677
9678 /* We must ensure that the EIR Data fields are ordered and
9679 * unique. Keep it simple for now and avoid the problem by not
9680 * adding any BR/EDR data to the LE adv.
9681 */
9682 if (conn->le_adv_data_len > 0) {
9683 skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9684 eir_len = conn->le_adv_data_len;
9685 } else {
9686 if (name)
9687 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9688
9689 if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9690 eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9691 conn->dev_class, sizeof(conn->dev_class));
9692 }
9693
9694 ev->eir_len = cpu_to_le16(eir_len);
9695
9696 mgmt_event_skb(skb, NULL);
9697 }
9698
unpair_device_rsp(struct mgmt_pending_cmd * cmd,void * data)9699 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9700 {
9701 struct hci_dev *hdev = data;
9702 struct mgmt_cp_unpair_device *cp = cmd->param;
9703
9704 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9705
9706 cmd->cmd_complete(cmd, 0);
9707 mgmt_pending_remove(cmd);
9708 }
9709
mgmt_powering_down(struct hci_dev * hdev)9710 bool mgmt_powering_down(struct hci_dev *hdev)
9711 {
9712 struct mgmt_pending_cmd *cmd;
9713 struct mgmt_mode *cp;
9714
9715 if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
9716 return true;
9717
9718 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9719 if (!cmd)
9720 return false;
9721
9722 cp = cmd->param;
9723 if (!cp->val)
9724 return true;
9725
9726 return false;
9727 }
9728
mgmt_device_disconnected(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 reason,bool mgmt_connected)9729 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9730 u8 link_type, u8 addr_type, u8 reason,
9731 bool mgmt_connected)
9732 {
9733 struct mgmt_ev_device_disconnected ev;
9734 struct sock *sk = NULL;
9735
9736 if (!mgmt_connected)
9737 return;
9738
9739 if (link_type != ACL_LINK && link_type != LE_LINK)
9740 return;
9741
9742 bacpy(&ev.addr.bdaddr, bdaddr);
9743 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9744 ev.reason = reason;
9745
9746 /* Report disconnects due to suspend */
9747 if (hdev->suspended)
9748 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9749
9750 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9751
9752 if (sk)
9753 sock_put(sk);
9754 }
9755
mgmt_disconnect_failed(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9756 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9757 u8 link_type, u8 addr_type, u8 status)
9758 {
9759 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9760 struct mgmt_cp_disconnect *cp;
9761 struct mgmt_pending_cmd *cmd;
9762
9763 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9764 hdev);
9765
9766 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9767 if (!cmd)
9768 return;
9769
9770 cp = cmd->param;
9771
9772 if (bacmp(bdaddr, &cp->addr.bdaddr))
9773 return;
9774
9775 if (cp->addr.type != bdaddr_type)
9776 return;
9777
9778 cmd->cmd_complete(cmd, mgmt_status(status));
9779 mgmt_pending_remove(cmd);
9780 }
9781
mgmt_connect_failed(struct hci_dev * hdev,struct hci_conn * conn,u8 status)9782 void mgmt_connect_failed(struct hci_dev *hdev, struct hci_conn *conn, u8 status)
9783 {
9784 struct mgmt_ev_connect_failed ev;
9785
9786 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
9787 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
9788 conn->dst_type, status, true);
9789 return;
9790 }
9791
9792 bacpy(&ev.addr.bdaddr, &conn->dst);
9793 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9794 ev.status = mgmt_status(status);
9795
9796 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9797 }
9798
mgmt_pin_code_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 secure)9799 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9800 {
9801 struct mgmt_ev_pin_code_request ev;
9802
9803 bacpy(&ev.addr.bdaddr, bdaddr);
9804 ev.addr.type = BDADDR_BREDR;
9805 ev.secure = secure;
9806
9807 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9808 }
9809
mgmt_pin_code_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)9810 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9811 u8 status)
9812 {
9813 struct mgmt_pending_cmd *cmd;
9814
9815 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9816 if (!cmd)
9817 return;
9818
9819 cmd->cmd_complete(cmd, mgmt_status(status));
9820 mgmt_pending_remove(cmd);
9821 }
9822
mgmt_pin_code_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)9823 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9824 u8 status)
9825 {
9826 struct mgmt_pending_cmd *cmd;
9827
9828 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9829 if (!cmd)
9830 return;
9831
9832 cmd->cmd_complete(cmd, mgmt_status(status));
9833 mgmt_pending_remove(cmd);
9834 }
9835
mgmt_user_confirm_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 value,u8 confirm_hint)9836 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9837 u8 link_type, u8 addr_type, u32 value,
9838 u8 confirm_hint)
9839 {
9840 struct mgmt_ev_user_confirm_request ev;
9841
9842 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9843
9844 bacpy(&ev.addr.bdaddr, bdaddr);
9845 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9846 ev.confirm_hint = confirm_hint;
9847 ev.value = cpu_to_le32(value);
9848
9849 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9850 NULL);
9851 }
9852
mgmt_user_passkey_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type)9853 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9854 u8 link_type, u8 addr_type)
9855 {
9856 struct mgmt_ev_user_passkey_request ev;
9857
9858 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9859
9860 bacpy(&ev.addr.bdaddr, bdaddr);
9861 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9862
9863 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9864 NULL);
9865 }
9866
user_pairing_resp_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status,u8 opcode)9867 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9868 u8 link_type, u8 addr_type, u8 status,
9869 u8 opcode)
9870 {
9871 struct mgmt_pending_cmd *cmd;
9872
9873 cmd = pending_find(opcode, hdev);
9874 if (!cmd)
9875 return -ENOENT;
9876
9877 cmd->cmd_complete(cmd, mgmt_status(status));
9878 mgmt_pending_remove(cmd);
9879
9880 return 0;
9881 }
9882
mgmt_user_confirm_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9883 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9884 u8 link_type, u8 addr_type, u8 status)
9885 {
9886 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9887 status, MGMT_OP_USER_CONFIRM_REPLY);
9888 }
9889
mgmt_user_confirm_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9890 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9891 u8 link_type, u8 addr_type, u8 status)
9892 {
9893 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9894 status,
9895 MGMT_OP_USER_CONFIRM_NEG_REPLY);
9896 }
9897
mgmt_user_passkey_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9898 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9899 u8 link_type, u8 addr_type, u8 status)
9900 {
9901 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9902 status, MGMT_OP_USER_PASSKEY_REPLY);
9903 }
9904
mgmt_user_passkey_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9905 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9906 u8 link_type, u8 addr_type, u8 status)
9907 {
9908 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9909 status,
9910 MGMT_OP_USER_PASSKEY_NEG_REPLY);
9911 }
9912
mgmt_user_passkey_notify(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 passkey,u8 entered)9913 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9914 u8 link_type, u8 addr_type, u32 passkey,
9915 u8 entered)
9916 {
9917 struct mgmt_ev_passkey_notify ev;
9918
9919 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9920
9921 bacpy(&ev.addr.bdaddr, bdaddr);
9922 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9923 ev.passkey = __cpu_to_le32(passkey);
9924 ev.entered = entered;
9925
9926 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9927 }
9928
mgmt_auth_failed(struct hci_conn * conn,u8 hci_status)9929 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9930 {
9931 struct mgmt_ev_auth_failed ev;
9932 struct mgmt_pending_cmd *cmd;
9933 u8 status = mgmt_status(hci_status);
9934
9935 bacpy(&ev.addr.bdaddr, &conn->dst);
9936 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9937 ev.status = status;
9938
9939 cmd = find_pairing(conn);
9940
9941 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9942 cmd ? cmd->sk : NULL);
9943
9944 if (cmd) {
9945 cmd->cmd_complete(cmd, status);
9946 mgmt_pending_remove(cmd);
9947 }
9948 }
9949
mgmt_auth_enable_complete(struct hci_dev * hdev,u8 status)9950 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9951 {
9952 struct cmd_lookup match = { NULL, hdev };
9953 bool changed;
9954
9955 if (status) {
9956 u8 mgmt_err = mgmt_status(status);
9957 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9958 cmd_status_rsp, &mgmt_err);
9959 return;
9960 }
9961
9962 if (test_bit(HCI_AUTH, &hdev->flags))
9963 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9964 else
9965 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9966
9967 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
9968 &match);
9969
9970 if (changed)
9971 new_settings(hdev, match.sk);
9972
9973 if (match.sk)
9974 sock_put(match.sk);
9975 }
9976
sk_lookup(struct mgmt_pending_cmd * cmd,void * data)9977 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9978 {
9979 struct cmd_lookup *match = data;
9980
9981 if (match->sk == NULL) {
9982 match->sk = cmd->sk;
9983 sock_hold(match->sk);
9984 }
9985 }
9986
mgmt_set_class_of_dev_complete(struct hci_dev * hdev,u8 * dev_class,u8 status)9987 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9988 u8 status)
9989 {
9990 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
9991
9992 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
9993 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
9994 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
9995
9996 if (!status) {
9997 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
9998 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9999 ext_info_changed(hdev, NULL);
10000 }
10001
10002 if (match.sk)
10003 sock_put(match.sk);
10004 }
10005
mgmt_set_local_name_complete(struct hci_dev * hdev,u8 * name,u8 status)10006 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
10007 {
10008 struct mgmt_cp_set_local_name ev;
10009 struct mgmt_pending_cmd *cmd;
10010
10011 if (status)
10012 return;
10013
10014 memset(&ev, 0, sizeof(ev));
10015 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
10016 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
10017
10018 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
10019 if (!cmd) {
10020 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
10021
10022 /* If this is a HCI command related to powering on the
10023 * HCI dev don't send any mgmt signals.
10024 */
10025 if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
10026 return;
10027
10028 if (pending_find(MGMT_OP_SET_POWERED, hdev))
10029 return;
10030 }
10031
10032 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
10033 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
10034 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
10035 }
10036
has_uuid(u8 * uuid,u16 uuid_count,u8 (* uuids)[16])10037 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
10038 {
10039 int i;
10040
10041 for (i = 0; i < uuid_count; i++) {
10042 if (!memcmp(uuid, uuids[i], 16))
10043 return true;
10044 }
10045
10046 return false;
10047 }
10048
eir_has_uuids(u8 * eir,u16 eir_len,u16 uuid_count,u8 (* uuids)[16])10049 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
10050 {
10051 u16 parsed = 0;
10052
10053 while (parsed < eir_len) {
10054 u8 field_len = eir[0];
10055 u8 uuid[16];
10056 int i;
10057
10058 if (field_len == 0)
10059 break;
10060
10061 if (eir_len - parsed < field_len + 1)
10062 break;
10063
10064 switch (eir[1]) {
10065 case EIR_UUID16_ALL:
10066 case EIR_UUID16_SOME:
10067 for (i = 0; i + 3 <= field_len; i += 2) {
10068 memcpy(uuid, bluetooth_base_uuid, 16);
10069 uuid[13] = eir[i + 3];
10070 uuid[12] = eir[i + 2];
10071 if (has_uuid(uuid, uuid_count, uuids))
10072 return true;
10073 }
10074 break;
10075 case EIR_UUID32_ALL:
10076 case EIR_UUID32_SOME:
10077 for (i = 0; i + 5 <= field_len; i += 4) {
10078 memcpy(uuid, bluetooth_base_uuid, 16);
10079 uuid[15] = eir[i + 5];
10080 uuid[14] = eir[i + 4];
10081 uuid[13] = eir[i + 3];
10082 uuid[12] = eir[i + 2];
10083 if (has_uuid(uuid, uuid_count, uuids))
10084 return true;
10085 }
10086 break;
10087 case EIR_UUID128_ALL:
10088 case EIR_UUID128_SOME:
10089 for (i = 0; i + 17 <= field_len; i += 16) {
10090 memcpy(uuid, eir + i + 2, 16);
10091 if (has_uuid(uuid, uuid_count, uuids))
10092 return true;
10093 }
10094 break;
10095 }
10096
10097 parsed += field_len + 1;
10098 eir += field_len + 1;
10099 }
10100
10101 return false;
10102 }
10103
is_filter_match(struct hci_dev * hdev,s8 rssi,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len)10104 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10105 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10106 {
10107 /* If a RSSI threshold has been specified, and
10108 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10109 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10110 * is set, let it through for further processing, as we might need to
10111 * restart the scan.
10112 *
10113 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10114 * the results are also dropped.
10115 */
10116 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10117 (rssi == HCI_RSSI_INVALID ||
10118 (rssi < hdev->discovery.rssi &&
10119 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
10120 return false;
10121
10122 if (hdev->discovery.uuid_count != 0) {
10123 /* If a list of UUIDs is provided in filter, results with no
10124 * matching UUID should be dropped.
10125 */
10126 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10127 hdev->discovery.uuids) &&
10128 !eir_has_uuids(scan_rsp, scan_rsp_len,
10129 hdev->discovery.uuid_count,
10130 hdev->discovery.uuids))
10131 return false;
10132 }
10133
10134 /* If duplicate filtering does not report RSSI changes, then restart
10135 * scanning to ensure updated result with updated RSSI values.
10136 */
10137 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
10138 /* Validate RSSI value against the RSSI threshold once more. */
10139 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10140 rssi < hdev->discovery.rssi)
10141 return false;
10142 }
10143
10144 return true;
10145 }
10146
mgmt_adv_monitor_device_lost(struct hci_dev * hdev,u16 handle,bdaddr_t * bdaddr,u8 addr_type)10147 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
10148 bdaddr_t *bdaddr, u8 addr_type)
10149 {
10150 struct mgmt_ev_adv_monitor_device_lost ev;
10151
10152 ev.monitor_handle = cpu_to_le16(handle);
10153 bacpy(&ev.addr.bdaddr, bdaddr);
10154 ev.addr.type = addr_type;
10155
10156 mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
10157 NULL);
10158 }
10159
mgmt_send_adv_monitor_device_found(struct hci_dev * hdev,struct sk_buff * skb,struct sock * skip_sk,u16 handle)10160 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
10161 struct sk_buff *skb,
10162 struct sock *skip_sk,
10163 u16 handle)
10164 {
10165 struct sk_buff *advmon_skb;
10166 size_t advmon_skb_len;
10167 __le16 *monitor_handle;
10168
10169 if (!skb)
10170 return;
10171
10172 advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
10173 sizeof(struct mgmt_ev_device_found)) + skb->len;
10174 advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
10175 advmon_skb_len);
10176 if (!advmon_skb)
10177 return;
10178
10179 /* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10180 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10181 * store monitor_handle of the matched monitor.
10182 */
10183 monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
10184 *monitor_handle = cpu_to_le16(handle);
10185 skb_put_data(advmon_skb, skb->data, skb->len);
10186
10187 mgmt_event_skb(advmon_skb, skip_sk);
10188 }
10189
mgmt_adv_monitor_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,bool report_device,struct sk_buff * skb,struct sock * skip_sk)10190 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
10191 bdaddr_t *bdaddr, bool report_device,
10192 struct sk_buff *skb,
10193 struct sock *skip_sk)
10194 {
10195 struct monitored_device *dev, *tmp;
10196 bool matched = false;
10197 bool notified = false;
10198
10199 /* We have received the Advertisement Report because:
10200 * 1. the kernel has initiated active discovery
10201 * 2. if not, we have pend_le_reports > 0 in which case we are doing
10202 * passive scanning
10203 * 3. if none of the above is true, we have one or more active
10204 * Advertisement Monitor
10205 *
10206 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10207 * and report ONLY one advertisement per device for the matched Monitor
10208 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10209 *
10210 * For case 3, since we are not active scanning and all advertisements
10211 * received are due to a matched Advertisement Monitor, report all
10212 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10213 */
10214 if (report_device && !hdev->advmon_pend_notify) {
10215 mgmt_event_skb(skb, skip_sk);
10216 return;
10217 }
10218
10219 hdev->advmon_pend_notify = false;
10220
10221 list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
10222 if (!bacmp(&dev->bdaddr, bdaddr)) {
10223 matched = true;
10224
10225 if (!dev->notified) {
10226 mgmt_send_adv_monitor_device_found(hdev, skb,
10227 skip_sk,
10228 dev->handle);
10229 notified = true;
10230 dev->notified = true;
10231 }
10232 }
10233
10234 if (!dev->notified)
10235 hdev->advmon_pend_notify = true;
10236 }
10237
10238 if (!report_device &&
10239 ((matched && !notified) || !msft_monitor_supported(hdev))) {
10240 /* Handle 0 indicates that we are not active scanning and this
10241 * is a subsequent advertisement report for an already matched
10242 * Advertisement Monitor or the controller offloading support
10243 * is not available.
10244 */
10245 mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
10246 }
10247
10248 if (report_device)
10249 mgmt_event_skb(skb, skip_sk);
10250 else
10251 kfree_skb(skb);
10252 }
10253
mesh_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len,u64 instant)10254 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
10255 u8 addr_type, s8 rssi, u32 flags, u8 *eir,
10256 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10257 u64 instant)
10258 {
10259 struct sk_buff *skb;
10260 struct mgmt_ev_mesh_device_found *ev;
10261 int i, j;
10262
10263 if (!hdev->mesh_ad_types[0])
10264 goto accepted;
10265
10266 /* Scan for requested AD types */
10267 if (eir_len > 0) {
10268 for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
10269 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10270 if (!hdev->mesh_ad_types[j])
10271 break;
10272
10273 if (hdev->mesh_ad_types[j] == eir[i + 1])
10274 goto accepted;
10275 }
10276 }
10277 }
10278
10279 if (scan_rsp_len > 0) {
10280 for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
10281 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10282 if (!hdev->mesh_ad_types[j])
10283 break;
10284
10285 if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
10286 goto accepted;
10287 }
10288 }
10289 }
10290
10291 return;
10292
10293 accepted:
10294 skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
10295 sizeof(*ev) + eir_len + scan_rsp_len);
10296 if (!skb)
10297 return;
10298
10299 ev = skb_put(skb, sizeof(*ev));
10300
10301 bacpy(&ev->addr.bdaddr, bdaddr);
10302 ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
10303 ev->rssi = rssi;
10304 ev->flags = cpu_to_le32(flags);
10305 ev->instant = cpu_to_le64(instant);
10306
10307 if (eir_len > 0)
10308 /* Copy EIR or advertising data into event */
10309 skb_put_data(skb, eir, eir_len);
10310
10311 if (scan_rsp_len > 0)
10312 /* Append scan response data to event */
10313 skb_put_data(skb, scan_rsp, scan_rsp_len);
10314
10315 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10316
10317 mgmt_event_skb(skb, NULL);
10318 }
10319
mgmt_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 * dev_class,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len,u64 instant)10320 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10321 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10322 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10323 u64 instant)
10324 {
10325 struct sk_buff *skb;
10326 struct mgmt_ev_device_found *ev;
10327 bool report_device = hci_discovery_active(hdev);
10328
10329 if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
10330 mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
10331 eir, eir_len, scan_rsp, scan_rsp_len,
10332 instant);
10333
10334 /* Don't send events for a non-kernel initiated discovery. With
10335 * LE one exception is if we have pend_le_reports > 0 in which
10336 * case we're doing passive scanning and want these events.
10337 */
10338 if (!hci_discovery_active(hdev)) {
10339 if (link_type == ACL_LINK)
10340 return;
10341 if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
10342 report_device = true;
10343 else if (!hci_is_adv_monitoring(hdev))
10344 return;
10345 }
10346
10347 if (hdev->discovery.result_filtering) {
10348 /* We are using service discovery */
10349 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10350 scan_rsp_len))
10351 return;
10352 }
10353
10354 if (hdev->discovery.limited) {
10355 /* Check for limited discoverable bit */
10356 if (dev_class) {
10357 if (!(dev_class[1] & 0x20))
10358 return;
10359 } else {
10360 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10361 if (!flags || !(flags[0] & LE_AD_LIMITED))
10362 return;
10363 }
10364 }
10365
10366 /* Allocate skb. The 5 extra bytes are for the potential CoD field */
10367 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10368 sizeof(*ev) + eir_len + scan_rsp_len + 5);
10369 if (!skb)
10370 return;
10371
10372 ev = skb_put(skb, sizeof(*ev));
10373
10374 /* In case of device discovery with BR/EDR devices (pre 1.2), the
10375 * RSSI value was reported as 0 when not available. This behavior
10376 * is kept when using device discovery. This is required for full
10377 * backwards compatibility with the API.
10378 *
10379 * However when using service discovery, the value 127 will be
10380 * returned when the RSSI is not available.
10381 */
10382 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10383 link_type == ACL_LINK)
10384 rssi = 0;
10385
10386 bacpy(&ev->addr.bdaddr, bdaddr);
10387 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10388 ev->rssi = rssi;
10389 ev->flags = cpu_to_le32(flags);
10390
10391 if (eir_len > 0)
10392 /* Copy EIR or advertising data into event */
10393 skb_put_data(skb, eir, eir_len);
10394
10395 if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
10396 u8 eir_cod[5];
10397
10398 eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
10399 dev_class, 3);
10400 skb_put_data(skb, eir_cod, sizeof(eir_cod));
10401 }
10402
10403 if (scan_rsp_len > 0)
10404 /* Append scan response data to event */
10405 skb_put_data(skb, scan_rsp, scan_rsp_len);
10406
10407 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10408
10409 mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
10410 }
10411
mgmt_remote_name(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,s8 rssi,u8 * name,u8 name_len)10412 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10413 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10414 {
10415 struct sk_buff *skb;
10416 struct mgmt_ev_device_found *ev;
10417 u16 eir_len = 0;
10418 u32 flags = 0;
10419
10420 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10421 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
10422
10423 ev = skb_put(skb, sizeof(*ev));
10424 bacpy(&ev->addr.bdaddr, bdaddr);
10425 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10426 ev->rssi = rssi;
10427
10428 if (name)
10429 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
10430 else
10431 flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
10432
10433 ev->eir_len = cpu_to_le16(eir_len);
10434 ev->flags = cpu_to_le32(flags);
10435
10436 mgmt_event_skb(skb, NULL);
10437 }
10438
mgmt_discovering(struct hci_dev * hdev,u8 discovering)10439 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10440 {
10441 struct mgmt_ev_discovering ev;
10442
10443 bt_dev_dbg(hdev, "discovering %u", discovering);
10444
10445 memset(&ev, 0, sizeof(ev));
10446 ev.type = hdev->discovery.type;
10447 ev.discovering = discovering;
10448
10449 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10450 }
10451
mgmt_suspending(struct hci_dev * hdev,u8 state)10452 void mgmt_suspending(struct hci_dev *hdev, u8 state)
10453 {
10454 struct mgmt_ev_controller_suspend ev;
10455
10456 ev.suspend_state = state;
10457 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10458 }
10459
mgmt_resuming(struct hci_dev * hdev,u8 reason,bdaddr_t * bdaddr,u8 addr_type)10460 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10461 u8 addr_type)
10462 {
10463 struct mgmt_ev_controller_resume ev;
10464
10465 ev.wake_reason = reason;
10466 if (bdaddr) {
10467 bacpy(&ev.addr.bdaddr, bdaddr);
10468 ev.addr.type = addr_type;
10469 } else {
10470 memset(&ev.addr, 0, sizeof(ev.addr));
10471 }
10472
10473 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
10474 }
10475
10476 static struct hci_mgmt_chan chan = {
10477 .channel = HCI_CHANNEL_CONTROL,
10478 .handler_count = ARRAY_SIZE(mgmt_handlers),
10479 .handlers = mgmt_handlers,
10480 .hdev_init = mgmt_init_hdev,
10481 };
10482
mgmt_init(void)10483 int mgmt_init(void)
10484 {
10485 return hci_mgmt_chan_register(&chan);
10486 }
10487
mgmt_exit(void)10488 void mgmt_exit(void)
10489 {
10490 hci_mgmt_chan_unregister(&chan);
10491 }
10492
mgmt_cleanup(struct sock * sk)10493 void mgmt_cleanup(struct sock *sk)
10494 {
10495 struct mgmt_mesh_tx *mesh_tx;
10496 struct hci_dev *hdev;
10497
10498 read_lock(&hci_dev_list_lock);
10499
10500 list_for_each_entry(hdev, &hci_dev_list, list) {
10501 do {
10502 mesh_tx = mgmt_mesh_next(hdev, sk);
10503
10504 if (mesh_tx)
10505 mesh_send_complete(hdev, mesh_tx, true);
10506 } while (mesh_tx);
10507 }
10508
10509 read_unlock(&hci_dev_list_lock);
10510 }
10511