1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <linux/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35
36 #include "smp.h"
37 #include "mgmt_util.h"
38 #include "mgmt_config.h"
39 #include "msft.h"
40 #include "eir.h"
41 #include "aosp.h"
42
43 #define MGMT_VERSION 1
44 #define MGMT_REVISION 23
45
46 static const u16 mgmt_commands[] = {
47 MGMT_OP_READ_INDEX_LIST,
48 MGMT_OP_READ_INFO,
49 MGMT_OP_SET_POWERED,
50 MGMT_OP_SET_DISCOVERABLE,
51 MGMT_OP_SET_CONNECTABLE,
52 MGMT_OP_SET_FAST_CONNECTABLE,
53 MGMT_OP_SET_BONDABLE,
54 MGMT_OP_SET_LINK_SECURITY,
55 MGMT_OP_SET_SSP,
56 MGMT_OP_SET_HS,
57 MGMT_OP_SET_LE,
58 MGMT_OP_SET_DEV_CLASS,
59 MGMT_OP_SET_LOCAL_NAME,
60 MGMT_OP_ADD_UUID,
61 MGMT_OP_REMOVE_UUID,
62 MGMT_OP_LOAD_LINK_KEYS,
63 MGMT_OP_LOAD_LONG_TERM_KEYS,
64 MGMT_OP_DISCONNECT,
65 MGMT_OP_GET_CONNECTIONS,
66 MGMT_OP_PIN_CODE_REPLY,
67 MGMT_OP_PIN_CODE_NEG_REPLY,
68 MGMT_OP_SET_IO_CAPABILITY,
69 MGMT_OP_PAIR_DEVICE,
70 MGMT_OP_CANCEL_PAIR_DEVICE,
71 MGMT_OP_UNPAIR_DEVICE,
72 MGMT_OP_USER_CONFIRM_REPLY,
73 MGMT_OP_USER_CONFIRM_NEG_REPLY,
74 MGMT_OP_USER_PASSKEY_REPLY,
75 MGMT_OP_USER_PASSKEY_NEG_REPLY,
76 MGMT_OP_READ_LOCAL_OOB_DATA,
77 MGMT_OP_ADD_REMOTE_OOB_DATA,
78 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
79 MGMT_OP_START_DISCOVERY,
80 MGMT_OP_STOP_DISCOVERY,
81 MGMT_OP_CONFIRM_NAME,
82 MGMT_OP_BLOCK_DEVICE,
83 MGMT_OP_UNBLOCK_DEVICE,
84 MGMT_OP_SET_DEVICE_ID,
85 MGMT_OP_SET_ADVERTISING,
86 MGMT_OP_SET_BREDR,
87 MGMT_OP_SET_STATIC_ADDRESS,
88 MGMT_OP_SET_SCAN_PARAMS,
89 MGMT_OP_SET_SECURE_CONN,
90 MGMT_OP_SET_DEBUG_KEYS,
91 MGMT_OP_SET_PRIVACY,
92 MGMT_OP_LOAD_IRKS,
93 MGMT_OP_GET_CONN_INFO,
94 MGMT_OP_GET_CLOCK_INFO,
95 MGMT_OP_ADD_DEVICE,
96 MGMT_OP_REMOVE_DEVICE,
97 MGMT_OP_LOAD_CONN_PARAM,
98 MGMT_OP_READ_UNCONF_INDEX_LIST,
99 MGMT_OP_READ_CONFIG_INFO,
100 MGMT_OP_SET_EXTERNAL_CONFIG,
101 MGMT_OP_SET_PUBLIC_ADDRESS,
102 MGMT_OP_START_SERVICE_DISCOVERY,
103 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
104 MGMT_OP_READ_EXT_INDEX_LIST,
105 MGMT_OP_READ_ADV_FEATURES,
106 MGMT_OP_ADD_ADVERTISING,
107 MGMT_OP_REMOVE_ADVERTISING,
108 MGMT_OP_GET_ADV_SIZE_INFO,
109 MGMT_OP_START_LIMITED_DISCOVERY,
110 MGMT_OP_READ_EXT_INFO,
111 MGMT_OP_SET_APPEARANCE,
112 MGMT_OP_GET_PHY_CONFIGURATION,
113 MGMT_OP_SET_PHY_CONFIGURATION,
114 MGMT_OP_SET_BLOCKED_KEYS,
115 MGMT_OP_SET_WIDEBAND_SPEECH,
116 MGMT_OP_READ_CONTROLLER_CAP,
117 MGMT_OP_READ_EXP_FEATURES_INFO,
118 MGMT_OP_SET_EXP_FEATURE,
119 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
120 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
121 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
122 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
123 MGMT_OP_GET_DEVICE_FLAGS,
124 MGMT_OP_SET_DEVICE_FLAGS,
125 MGMT_OP_READ_ADV_MONITOR_FEATURES,
126 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
127 MGMT_OP_REMOVE_ADV_MONITOR,
128 MGMT_OP_ADD_EXT_ADV_PARAMS,
129 MGMT_OP_ADD_EXT_ADV_DATA,
130 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
131 MGMT_OP_SET_MESH_RECEIVER,
132 MGMT_OP_MESH_READ_FEATURES,
133 MGMT_OP_MESH_SEND,
134 MGMT_OP_MESH_SEND_CANCEL,
135 MGMT_OP_HCI_CMD_SYNC,
136 };
137
138 static const u16 mgmt_events[] = {
139 MGMT_EV_CONTROLLER_ERROR,
140 MGMT_EV_INDEX_ADDED,
141 MGMT_EV_INDEX_REMOVED,
142 MGMT_EV_NEW_SETTINGS,
143 MGMT_EV_CLASS_OF_DEV_CHANGED,
144 MGMT_EV_LOCAL_NAME_CHANGED,
145 MGMT_EV_NEW_LINK_KEY,
146 MGMT_EV_NEW_LONG_TERM_KEY,
147 MGMT_EV_DEVICE_CONNECTED,
148 MGMT_EV_DEVICE_DISCONNECTED,
149 MGMT_EV_CONNECT_FAILED,
150 MGMT_EV_PIN_CODE_REQUEST,
151 MGMT_EV_USER_CONFIRM_REQUEST,
152 MGMT_EV_USER_PASSKEY_REQUEST,
153 MGMT_EV_AUTH_FAILED,
154 MGMT_EV_DEVICE_FOUND,
155 MGMT_EV_DISCOVERING,
156 MGMT_EV_DEVICE_BLOCKED,
157 MGMT_EV_DEVICE_UNBLOCKED,
158 MGMT_EV_DEVICE_UNPAIRED,
159 MGMT_EV_PASSKEY_NOTIFY,
160 MGMT_EV_NEW_IRK,
161 MGMT_EV_NEW_CSRK,
162 MGMT_EV_DEVICE_ADDED,
163 MGMT_EV_DEVICE_REMOVED,
164 MGMT_EV_NEW_CONN_PARAM,
165 MGMT_EV_UNCONF_INDEX_ADDED,
166 MGMT_EV_UNCONF_INDEX_REMOVED,
167 MGMT_EV_NEW_CONFIG_OPTIONS,
168 MGMT_EV_EXT_INDEX_ADDED,
169 MGMT_EV_EXT_INDEX_REMOVED,
170 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
171 MGMT_EV_ADVERTISING_ADDED,
172 MGMT_EV_ADVERTISING_REMOVED,
173 MGMT_EV_EXT_INFO_CHANGED,
174 MGMT_EV_PHY_CONFIGURATION_CHANGED,
175 MGMT_EV_EXP_FEATURE_CHANGED,
176 MGMT_EV_DEVICE_FLAGS_CHANGED,
177 MGMT_EV_ADV_MONITOR_ADDED,
178 MGMT_EV_ADV_MONITOR_REMOVED,
179 MGMT_EV_CONTROLLER_SUSPEND,
180 MGMT_EV_CONTROLLER_RESUME,
181 MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
182 MGMT_EV_ADV_MONITOR_DEVICE_LOST,
183 };
184
185 static const u16 mgmt_untrusted_commands[] = {
186 MGMT_OP_READ_INDEX_LIST,
187 MGMT_OP_READ_INFO,
188 MGMT_OP_READ_UNCONF_INDEX_LIST,
189 MGMT_OP_READ_CONFIG_INFO,
190 MGMT_OP_READ_EXT_INDEX_LIST,
191 MGMT_OP_READ_EXT_INFO,
192 MGMT_OP_READ_CONTROLLER_CAP,
193 MGMT_OP_READ_EXP_FEATURES_INFO,
194 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
195 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
196 };
197
198 static const u16 mgmt_untrusted_events[] = {
199 MGMT_EV_INDEX_ADDED,
200 MGMT_EV_INDEX_REMOVED,
201 MGMT_EV_NEW_SETTINGS,
202 MGMT_EV_CLASS_OF_DEV_CHANGED,
203 MGMT_EV_LOCAL_NAME_CHANGED,
204 MGMT_EV_UNCONF_INDEX_ADDED,
205 MGMT_EV_UNCONF_INDEX_REMOVED,
206 MGMT_EV_NEW_CONFIG_OPTIONS,
207 MGMT_EV_EXT_INDEX_ADDED,
208 MGMT_EV_EXT_INDEX_REMOVED,
209 MGMT_EV_EXT_INFO_CHANGED,
210 MGMT_EV_EXP_FEATURE_CHANGED,
211 };
212
213 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
214
215 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
216 "\x00\x00\x00\x00\x00\x00\x00\x00"
217
218 /* HCI to MGMT error code conversion table */
219 static const u8 mgmt_status_table[] = {
220 MGMT_STATUS_SUCCESS,
221 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
222 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
223 MGMT_STATUS_FAILED, /* Hardware Failure */
224 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
225 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
226 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
227 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
228 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
229 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
230 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
231 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
232 MGMT_STATUS_BUSY, /* Command Disallowed */
233 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
234 MGMT_STATUS_REJECTED, /* Rejected Security */
235 MGMT_STATUS_REJECTED, /* Rejected Personal */
236 MGMT_STATUS_TIMEOUT, /* Host Timeout */
237 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
238 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
239 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
240 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
241 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
242 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
243 MGMT_STATUS_BUSY, /* Repeated Attempts */
244 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
245 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
246 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
247 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
248 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
249 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
250 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
251 MGMT_STATUS_FAILED, /* Unspecified Error */
252 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
253 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
254 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
255 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
256 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
257 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
258 MGMT_STATUS_FAILED, /* Unit Link Key Used */
259 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
260 MGMT_STATUS_TIMEOUT, /* Instant Passed */
261 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
262 MGMT_STATUS_FAILED, /* Transaction Collision */
263 MGMT_STATUS_FAILED, /* Reserved for future use */
264 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
265 MGMT_STATUS_REJECTED, /* QoS Rejected */
266 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
267 MGMT_STATUS_REJECTED, /* Insufficient Security */
268 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
269 MGMT_STATUS_FAILED, /* Reserved for future use */
270 MGMT_STATUS_BUSY, /* Role Switch Pending */
271 MGMT_STATUS_FAILED, /* Reserved for future use */
272 MGMT_STATUS_FAILED, /* Slot Violation */
273 MGMT_STATUS_FAILED, /* Role Switch Failed */
274 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
275 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
276 MGMT_STATUS_BUSY, /* Host Busy Pairing */
277 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
278 MGMT_STATUS_BUSY, /* Controller Busy */
279 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
280 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
281 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
282 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
283 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
284 };
285
mgmt_errno_status(int err)286 static u8 mgmt_errno_status(int err)
287 {
288 switch (err) {
289 case 0:
290 return MGMT_STATUS_SUCCESS;
291 case -EPERM:
292 return MGMT_STATUS_REJECTED;
293 case -EINVAL:
294 return MGMT_STATUS_INVALID_PARAMS;
295 case -EOPNOTSUPP:
296 return MGMT_STATUS_NOT_SUPPORTED;
297 case -EBUSY:
298 return MGMT_STATUS_BUSY;
299 case -ETIMEDOUT:
300 return MGMT_STATUS_AUTH_FAILED;
301 case -ENOMEM:
302 return MGMT_STATUS_NO_RESOURCES;
303 case -EISCONN:
304 return MGMT_STATUS_ALREADY_CONNECTED;
305 case -ENOTCONN:
306 return MGMT_STATUS_DISCONNECTED;
307 }
308
309 return MGMT_STATUS_FAILED;
310 }
311
mgmt_status(int err)312 static u8 mgmt_status(int err)
313 {
314 if (err < 0)
315 return mgmt_errno_status(err);
316
317 if (err < ARRAY_SIZE(mgmt_status_table))
318 return mgmt_status_table[err];
319
320 return MGMT_STATUS_FAILED;
321 }
322
mgmt_index_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag)323 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
324 u16 len, int flag)
325 {
326 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
327 flag, NULL);
328 }
329
mgmt_limited_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag,struct sock * skip_sk)330 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
331 u16 len, int flag, struct sock *skip_sk)
332 {
333 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
334 flag, skip_sk);
335 }
336
mgmt_event(u16 event,struct hci_dev * hdev,void * data,u16 len,struct sock * skip_sk)337 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
338 struct sock *skip_sk)
339 {
340 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
341 HCI_SOCK_TRUSTED, skip_sk);
342 }
343
mgmt_event_skb(struct sk_buff * skb,struct sock * skip_sk)344 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
345 {
346 return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
347 skip_sk);
348 }
349
le_addr_type(u8 mgmt_addr_type)350 static u8 le_addr_type(u8 mgmt_addr_type)
351 {
352 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
353 return ADDR_LE_DEV_PUBLIC;
354 else
355 return ADDR_LE_DEV_RANDOM;
356 }
357
mgmt_fill_version_info(void * ver)358 void mgmt_fill_version_info(void *ver)
359 {
360 struct mgmt_rp_read_version *rp = ver;
361
362 rp->version = MGMT_VERSION;
363 rp->revision = cpu_to_le16(MGMT_REVISION);
364 }
365
read_version(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)366 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
367 u16 data_len)
368 {
369 struct mgmt_rp_read_version rp;
370
371 bt_dev_dbg(hdev, "sock %p", sk);
372
373 mgmt_fill_version_info(&rp);
374
375 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
376 &rp, sizeof(rp));
377 }
378
read_commands(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)379 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
380 u16 data_len)
381 {
382 struct mgmt_rp_read_commands *rp;
383 u16 num_commands, num_events;
384 size_t rp_size;
385 int i, err;
386
387 bt_dev_dbg(hdev, "sock %p", sk);
388
389 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
390 num_commands = ARRAY_SIZE(mgmt_commands);
391 num_events = ARRAY_SIZE(mgmt_events);
392 } else {
393 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
394 num_events = ARRAY_SIZE(mgmt_untrusted_events);
395 }
396
397 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
398
399 rp = kmalloc(rp_size, GFP_KERNEL);
400 if (!rp)
401 return -ENOMEM;
402
403 rp->num_commands = cpu_to_le16(num_commands);
404 rp->num_events = cpu_to_le16(num_events);
405
406 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
407 __le16 *opcode = rp->opcodes;
408
409 for (i = 0; i < num_commands; i++, opcode++)
410 put_unaligned_le16(mgmt_commands[i], opcode);
411
412 for (i = 0; i < num_events; i++, opcode++)
413 put_unaligned_le16(mgmt_events[i], opcode);
414 } else {
415 __le16 *opcode = rp->opcodes;
416
417 for (i = 0; i < num_commands; i++, opcode++)
418 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
419
420 for (i = 0; i < num_events; i++, opcode++)
421 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
422 }
423
424 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
425 rp, rp_size);
426 kfree(rp);
427
428 return err;
429 }
430
read_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)431 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
432 u16 data_len)
433 {
434 struct mgmt_rp_read_index_list *rp;
435 struct hci_dev *d;
436 size_t rp_len;
437 u16 count;
438 int err;
439
440 bt_dev_dbg(hdev, "sock %p", sk);
441
442 read_lock(&hci_dev_list_lock);
443
444 count = 0;
445 list_for_each_entry(d, &hci_dev_list, list) {
446 if (!hci_dev_test_flag(d, HCI_UNCONFIGURED))
447 count++;
448 }
449
450 rp_len = sizeof(*rp) + (2 * count);
451 rp = kmalloc(rp_len, GFP_ATOMIC);
452 if (!rp) {
453 read_unlock(&hci_dev_list_lock);
454 return -ENOMEM;
455 }
456
457 count = 0;
458 list_for_each_entry(d, &hci_dev_list, list) {
459 if (hci_dev_test_flag(d, HCI_SETUP) ||
460 hci_dev_test_flag(d, HCI_CONFIG) ||
461 hci_dev_test_flag(d, HCI_USER_CHANNEL))
462 continue;
463
464 /* Devices marked as raw-only are neither configured
465 * nor unconfigured controllers.
466 */
467 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
468 continue;
469
470 if (!hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
471 rp->index[count++] = cpu_to_le16(d->id);
472 bt_dev_dbg(hdev, "Added hci%u", d->id);
473 }
474 }
475
476 rp->num_controllers = cpu_to_le16(count);
477 rp_len = sizeof(*rp) + (2 * count);
478
479 read_unlock(&hci_dev_list_lock);
480
481 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
482 0, rp, rp_len);
483
484 kfree(rp);
485
486 return err;
487 }
488
read_unconf_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)489 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
490 void *data, u16 data_len)
491 {
492 struct mgmt_rp_read_unconf_index_list *rp;
493 struct hci_dev *d;
494 size_t rp_len;
495 u16 count;
496 int err;
497
498 bt_dev_dbg(hdev, "sock %p", sk);
499
500 read_lock(&hci_dev_list_lock);
501
502 count = 0;
503 list_for_each_entry(d, &hci_dev_list, list) {
504 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
505 count++;
506 }
507
508 rp_len = sizeof(*rp) + (2 * count);
509 rp = kmalloc(rp_len, GFP_ATOMIC);
510 if (!rp) {
511 read_unlock(&hci_dev_list_lock);
512 return -ENOMEM;
513 }
514
515 count = 0;
516 list_for_each_entry(d, &hci_dev_list, list) {
517 if (hci_dev_test_flag(d, HCI_SETUP) ||
518 hci_dev_test_flag(d, HCI_CONFIG) ||
519 hci_dev_test_flag(d, HCI_USER_CHANNEL))
520 continue;
521
522 /* Devices marked as raw-only are neither configured
523 * nor unconfigured controllers.
524 */
525 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
526 continue;
527
528 if (hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
529 rp->index[count++] = cpu_to_le16(d->id);
530 bt_dev_dbg(hdev, "Added hci%u", d->id);
531 }
532 }
533
534 rp->num_controllers = cpu_to_le16(count);
535 rp_len = sizeof(*rp) + (2 * count);
536
537 read_unlock(&hci_dev_list_lock);
538
539 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
540 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
541
542 kfree(rp);
543
544 return err;
545 }
546
read_ext_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)547 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
548 void *data, u16 data_len)
549 {
550 struct mgmt_rp_read_ext_index_list *rp;
551 struct hci_dev *d;
552 u16 count;
553 int err;
554
555 bt_dev_dbg(hdev, "sock %p", sk);
556
557 read_lock(&hci_dev_list_lock);
558
559 count = 0;
560 list_for_each_entry(d, &hci_dev_list, list)
561 count++;
562
563 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
564 if (!rp) {
565 read_unlock(&hci_dev_list_lock);
566 return -ENOMEM;
567 }
568
569 count = 0;
570 list_for_each_entry(d, &hci_dev_list, list) {
571 if (hci_dev_test_flag(d, HCI_SETUP) ||
572 hci_dev_test_flag(d, HCI_CONFIG) ||
573 hci_dev_test_flag(d, HCI_USER_CHANNEL))
574 continue;
575
576 /* Devices marked as raw-only are neither configured
577 * nor unconfigured controllers.
578 */
579 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
580 continue;
581
582 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
583 rp->entry[count].type = 0x01;
584 else
585 rp->entry[count].type = 0x00;
586
587 rp->entry[count].bus = d->bus;
588 rp->entry[count++].index = cpu_to_le16(d->id);
589 bt_dev_dbg(hdev, "Added hci%u", d->id);
590 }
591
592 rp->num_controllers = cpu_to_le16(count);
593
594 read_unlock(&hci_dev_list_lock);
595
596 /* If this command is called at least once, then all the
597 * default index and unconfigured index events are disabled
598 * and from now on only extended index events are used.
599 */
600 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
601 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
602 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
603
604 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
605 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
606 struct_size(rp, entry, count));
607
608 kfree(rp);
609
610 return err;
611 }
612
is_configured(struct hci_dev * hdev)613 static bool is_configured(struct hci_dev *hdev)
614 {
615 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
616 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
617 return false;
618
619 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
620 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
621 !bacmp(&hdev->public_addr, BDADDR_ANY))
622 return false;
623
624 return true;
625 }
626
get_missing_options(struct hci_dev * hdev)627 static __le32 get_missing_options(struct hci_dev *hdev)
628 {
629 u32 options = 0;
630
631 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
632 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
633 options |= MGMT_OPTION_EXTERNAL_CONFIG;
634
635 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
636 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
637 !bacmp(&hdev->public_addr, BDADDR_ANY))
638 options |= MGMT_OPTION_PUBLIC_ADDRESS;
639
640 return cpu_to_le32(options);
641 }
642
new_options(struct hci_dev * hdev,struct sock * skip)643 static int new_options(struct hci_dev *hdev, struct sock *skip)
644 {
645 __le32 options = get_missing_options(hdev);
646
647 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
648 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
649 }
650
send_options_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)651 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
652 {
653 __le32 options = get_missing_options(hdev);
654
655 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
656 sizeof(options));
657 }
658
read_config_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)659 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
660 void *data, u16 data_len)
661 {
662 struct mgmt_rp_read_config_info rp;
663 u32 options = 0;
664
665 bt_dev_dbg(hdev, "sock %p", sk);
666
667 hci_dev_lock(hdev);
668
669 memset(&rp, 0, sizeof(rp));
670 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
671
672 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
673 options |= MGMT_OPTION_EXTERNAL_CONFIG;
674
675 if (hdev->set_bdaddr)
676 options |= MGMT_OPTION_PUBLIC_ADDRESS;
677
678 rp.supported_options = cpu_to_le32(options);
679 rp.missing_options = get_missing_options(hdev);
680
681 hci_dev_unlock(hdev);
682
683 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
684 &rp, sizeof(rp));
685 }
686
get_supported_phys(struct hci_dev * hdev)687 static u32 get_supported_phys(struct hci_dev *hdev)
688 {
689 u32 supported_phys = 0;
690
691 if (lmp_bredr_capable(hdev)) {
692 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
693
694 if (hdev->features[0][0] & LMP_3SLOT)
695 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
696
697 if (hdev->features[0][0] & LMP_5SLOT)
698 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
699
700 if (lmp_edr_2m_capable(hdev)) {
701 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
702
703 if (lmp_edr_3slot_capable(hdev))
704 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
705
706 if (lmp_edr_5slot_capable(hdev))
707 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
708
709 if (lmp_edr_3m_capable(hdev)) {
710 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
711
712 if (lmp_edr_3slot_capable(hdev))
713 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
714
715 if (lmp_edr_5slot_capable(hdev))
716 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
717 }
718 }
719 }
720
721 if (lmp_le_capable(hdev)) {
722 supported_phys |= MGMT_PHY_LE_1M_TX;
723 supported_phys |= MGMT_PHY_LE_1M_RX;
724
725 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
726 supported_phys |= MGMT_PHY_LE_2M_TX;
727 supported_phys |= MGMT_PHY_LE_2M_RX;
728 }
729
730 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
731 supported_phys |= MGMT_PHY_LE_CODED_TX;
732 supported_phys |= MGMT_PHY_LE_CODED_RX;
733 }
734 }
735
736 return supported_phys;
737 }
738
get_selected_phys(struct hci_dev * hdev)739 static u32 get_selected_phys(struct hci_dev *hdev)
740 {
741 u32 selected_phys = 0;
742
743 if (lmp_bredr_capable(hdev)) {
744 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
745
746 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
747 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
748
749 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
750 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
751
752 if (lmp_edr_2m_capable(hdev)) {
753 if (!(hdev->pkt_type & HCI_2DH1))
754 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
755
756 if (lmp_edr_3slot_capable(hdev) &&
757 !(hdev->pkt_type & HCI_2DH3))
758 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
759
760 if (lmp_edr_5slot_capable(hdev) &&
761 !(hdev->pkt_type & HCI_2DH5))
762 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
763
764 if (lmp_edr_3m_capable(hdev)) {
765 if (!(hdev->pkt_type & HCI_3DH1))
766 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
767
768 if (lmp_edr_3slot_capable(hdev) &&
769 !(hdev->pkt_type & HCI_3DH3))
770 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
771
772 if (lmp_edr_5slot_capable(hdev) &&
773 !(hdev->pkt_type & HCI_3DH5))
774 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
775 }
776 }
777 }
778
779 if (lmp_le_capable(hdev)) {
780 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
781 selected_phys |= MGMT_PHY_LE_1M_TX;
782
783 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
784 selected_phys |= MGMT_PHY_LE_1M_RX;
785
786 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
787 selected_phys |= MGMT_PHY_LE_2M_TX;
788
789 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
790 selected_phys |= MGMT_PHY_LE_2M_RX;
791
792 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
793 selected_phys |= MGMT_PHY_LE_CODED_TX;
794
795 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
796 selected_phys |= MGMT_PHY_LE_CODED_RX;
797 }
798
799 return selected_phys;
800 }
801
get_configurable_phys(struct hci_dev * hdev)802 static u32 get_configurable_phys(struct hci_dev *hdev)
803 {
804 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
805 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
806 }
807
get_supported_settings(struct hci_dev * hdev)808 static u32 get_supported_settings(struct hci_dev *hdev)
809 {
810 u32 settings = 0;
811
812 settings |= MGMT_SETTING_POWERED;
813 settings |= MGMT_SETTING_BONDABLE;
814 settings |= MGMT_SETTING_DEBUG_KEYS;
815 settings |= MGMT_SETTING_CONNECTABLE;
816 settings |= MGMT_SETTING_DISCOVERABLE;
817
818 if (lmp_bredr_capable(hdev)) {
819 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
820 settings |= MGMT_SETTING_FAST_CONNECTABLE;
821 settings |= MGMT_SETTING_BREDR;
822 settings |= MGMT_SETTING_LINK_SECURITY;
823
824 if (lmp_ssp_capable(hdev)) {
825 settings |= MGMT_SETTING_SSP;
826 }
827
828 if (lmp_sc_capable(hdev))
829 settings |= MGMT_SETTING_SECURE_CONN;
830
831 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
832 &hdev->quirks))
833 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
834 }
835
836 if (lmp_le_capable(hdev)) {
837 settings |= MGMT_SETTING_LE;
838 settings |= MGMT_SETTING_SECURE_CONN;
839 settings |= MGMT_SETTING_PRIVACY;
840 settings |= MGMT_SETTING_STATIC_ADDRESS;
841 settings |= MGMT_SETTING_ADVERTISING;
842 }
843
844 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
845 hdev->set_bdaddr)
846 settings |= MGMT_SETTING_CONFIGURATION;
847
848 if (cis_central_capable(hdev))
849 settings |= MGMT_SETTING_CIS_CENTRAL;
850
851 if (cis_peripheral_capable(hdev))
852 settings |= MGMT_SETTING_CIS_PERIPHERAL;
853
854 settings |= MGMT_SETTING_PHY_CONFIGURATION;
855
856 return settings;
857 }
858
get_current_settings(struct hci_dev * hdev)859 static u32 get_current_settings(struct hci_dev *hdev)
860 {
861 u32 settings = 0;
862
863 if (hdev_is_powered(hdev))
864 settings |= MGMT_SETTING_POWERED;
865
866 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
867 settings |= MGMT_SETTING_CONNECTABLE;
868
869 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
870 settings |= MGMT_SETTING_FAST_CONNECTABLE;
871
872 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
873 settings |= MGMT_SETTING_DISCOVERABLE;
874
875 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
876 settings |= MGMT_SETTING_BONDABLE;
877
878 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
879 settings |= MGMT_SETTING_BREDR;
880
881 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
882 settings |= MGMT_SETTING_LE;
883
884 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
885 settings |= MGMT_SETTING_LINK_SECURITY;
886
887 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
888 settings |= MGMT_SETTING_SSP;
889
890 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
891 settings |= MGMT_SETTING_ADVERTISING;
892
893 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
894 settings |= MGMT_SETTING_SECURE_CONN;
895
896 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
897 settings |= MGMT_SETTING_DEBUG_KEYS;
898
899 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
900 settings |= MGMT_SETTING_PRIVACY;
901
902 /* The current setting for static address has two purposes. The
903 * first is to indicate if the static address will be used and
904 * the second is to indicate if it is actually set.
905 *
906 * This means if the static address is not configured, this flag
907 * will never be set. If the address is configured, then if the
908 * address is actually used decides if the flag is set or not.
909 *
910 * For single mode LE only controllers and dual-mode controllers
911 * with BR/EDR disabled, the existence of the static address will
912 * be evaluated.
913 */
914 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
915 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
916 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
917 if (bacmp(&hdev->static_addr, BDADDR_ANY))
918 settings |= MGMT_SETTING_STATIC_ADDRESS;
919 }
920
921 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
922 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
923
924 if (cis_central_capable(hdev))
925 settings |= MGMT_SETTING_CIS_CENTRAL;
926
927 if (cis_peripheral_capable(hdev))
928 settings |= MGMT_SETTING_CIS_PERIPHERAL;
929
930 if (bis_capable(hdev))
931 settings |= MGMT_SETTING_ISO_BROADCASTER;
932
933 if (sync_recv_capable(hdev))
934 settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
935
936 return settings;
937 }
938
pending_find(u16 opcode,struct hci_dev * hdev)939 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
940 {
941 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
942 }
943
mgmt_get_adv_discov_flags(struct hci_dev * hdev)944 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
945 {
946 struct mgmt_pending_cmd *cmd;
947
948 /* If there's a pending mgmt command the flags will not yet have
949 * their final values, so check for this first.
950 */
951 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
952 if (cmd) {
953 struct mgmt_mode *cp = cmd->param;
954 if (cp->val == 0x01)
955 return LE_AD_GENERAL;
956 else if (cp->val == 0x02)
957 return LE_AD_LIMITED;
958 } else {
959 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
960 return LE_AD_LIMITED;
961 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
962 return LE_AD_GENERAL;
963 }
964
965 return 0;
966 }
967
mgmt_get_connectable(struct hci_dev * hdev)968 bool mgmt_get_connectable(struct hci_dev *hdev)
969 {
970 struct mgmt_pending_cmd *cmd;
971
972 /* If there's a pending mgmt command the flag will not yet have
973 * it's final value, so check for this first.
974 */
975 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
976 if (cmd) {
977 struct mgmt_mode *cp = cmd->param;
978
979 return cp->val;
980 }
981
982 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
983 }
984
service_cache_sync(struct hci_dev * hdev,void * data)985 static int service_cache_sync(struct hci_dev *hdev, void *data)
986 {
987 hci_update_eir_sync(hdev);
988 hci_update_class_sync(hdev);
989
990 return 0;
991 }
992
service_cache_off(struct work_struct * work)993 static void service_cache_off(struct work_struct *work)
994 {
995 struct hci_dev *hdev = container_of(work, struct hci_dev,
996 service_cache.work);
997
998 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
999 return;
1000
1001 hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1002 }
1003
rpa_expired_sync(struct hci_dev * hdev,void * data)1004 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1005 {
1006 /* The generation of a new RPA and programming it into the
1007 * controller happens in the hci_req_enable_advertising()
1008 * function.
1009 */
1010 if (ext_adv_capable(hdev))
1011 return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1012 else
1013 return hci_enable_advertising_sync(hdev);
1014 }
1015
rpa_expired(struct work_struct * work)1016 static void rpa_expired(struct work_struct *work)
1017 {
1018 struct hci_dev *hdev = container_of(work, struct hci_dev,
1019 rpa_expired.work);
1020
1021 bt_dev_dbg(hdev, "");
1022
1023 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1024
1025 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1026 return;
1027
1028 hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1029 }
1030
1031 static int set_discoverable_sync(struct hci_dev *hdev, void *data);
1032
discov_off(struct work_struct * work)1033 static void discov_off(struct work_struct *work)
1034 {
1035 struct hci_dev *hdev = container_of(work, struct hci_dev,
1036 discov_off.work);
1037
1038 bt_dev_dbg(hdev, "");
1039
1040 hci_dev_lock(hdev);
1041
1042 /* When discoverable timeout triggers, then just make sure
1043 * the limited discoverable flag is cleared. Even in the case
1044 * of a timeout triggered from general discoverable, it is
1045 * safe to unconditionally clear the flag.
1046 */
1047 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1048 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1049 hdev->discov_timeout = 0;
1050
1051 hci_cmd_sync_queue(hdev, set_discoverable_sync, NULL, NULL);
1052
1053 mgmt_new_settings(hdev);
1054
1055 hci_dev_unlock(hdev);
1056 }
1057
1058 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1059
mesh_send_complete(struct hci_dev * hdev,struct mgmt_mesh_tx * mesh_tx,bool silent)1060 static void mesh_send_complete(struct hci_dev *hdev,
1061 struct mgmt_mesh_tx *mesh_tx, bool silent)
1062 {
1063 u8 handle = mesh_tx->handle;
1064
1065 if (!silent)
1066 mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1067 sizeof(handle), NULL);
1068
1069 mgmt_mesh_remove(mesh_tx);
1070 }
1071
mesh_send_done_sync(struct hci_dev * hdev,void * data)1072 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1073 {
1074 struct mgmt_mesh_tx *mesh_tx;
1075
1076 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1077 hci_disable_advertising_sync(hdev);
1078 mesh_tx = mgmt_mesh_next(hdev, NULL);
1079
1080 if (mesh_tx)
1081 mesh_send_complete(hdev, mesh_tx, false);
1082
1083 return 0;
1084 }
1085
1086 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1087 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
mesh_next(struct hci_dev * hdev,void * data,int err)1088 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1089 {
1090 struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1091
1092 if (!mesh_tx)
1093 return;
1094
1095 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1096 mesh_send_start_complete);
1097
1098 if (err < 0)
1099 mesh_send_complete(hdev, mesh_tx, false);
1100 else
1101 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1102 }
1103
mesh_send_done(struct work_struct * work)1104 static void mesh_send_done(struct work_struct *work)
1105 {
1106 struct hci_dev *hdev = container_of(work, struct hci_dev,
1107 mesh_send_done.work);
1108
1109 if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1110 return;
1111
1112 hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1113 }
1114
mgmt_init_hdev(struct sock * sk,struct hci_dev * hdev)1115 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1116 {
1117 if (hci_dev_test_flag(hdev, HCI_MGMT))
1118 return;
1119
1120 BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1121
1122 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1123 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1124 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1125 INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1126
1127 /* Non-mgmt controlled devices get this bit set
1128 * implicitly so that pairing works for them, however
1129 * for mgmt we require user-space to explicitly enable
1130 * it
1131 */
1132 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1133
1134 hci_dev_set_flag(hdev, HCI_MGMT);
1135 }
1136
read_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1137 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1138 void *data, u16 data_len)
1139 {
1140 struct mgmt_rp_read_info rp;
1141
1142 bt_dev_dbg(hdev, "sock %p", sk);
1143
1144 hci_dev_lock(hdev);
1145
1146 memset(&rp, 0, sizeof(rp));
1147
1148 bacpy(&rp.bdaddr, &hdev->bdaddr);
1149
1150 rp.version = hdev->hci_ver;
1151 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1152
1153 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1154 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1155
1156 memcpy(rp.dev_class, hdev->dev_class, 3);
1157
1158 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1159 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1160
1161 hci_dev_unlock(hdev);
1162
1163 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1164 sizeof(rp));
1165 }
1166
append_eir_data_to_buf(struct hci_dev * hdev,u8 * eir)1167 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1168 {
1169 u16 eir_len = 0;
1170 size_t name_len;
1171
1172 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1173 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1174 hdev->dev_class, 3);
1175
1176 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1177 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1178 hdev->appearance);
1179
1180 name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1181 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1182 hdev->dev_name, name_len);
1183
1184 name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1185 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1186 hdev->short_name, name_len);
1187
1188 return eir_len;
1189 }
1190
read_ext_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1191 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1192 void *data, u16 data_len)
1193 {
1194 char buf[512];
1195 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1196 u16 eir_len;
1197
1198 bt_dev_dbg(hdev, "sock %p", sk);
1199
1200 memset(&buf, 0, sizeof(buf));
1201
1202 hci_dev_lock(hdev);
1203
1204 bacpy(&rp->bdaddr, &hdev->bdaddr);
1205
1206 rp->version = hdev->hci_ver;
1207 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1208
1209 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1210 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1211
1212
1213 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1214 rp->eir_len = cpu_to_le16(eir_len);
1215
1216 hci_dev_unlock(hdev);
1217
1218 /* If this command is called at least once, then the events
1219 * for class of device and local name changes are disabled
1220 * and only the new extended controller information event
1221 * is used.
1222 */
1223 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1224 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1225 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1226
1227 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1228 sizeof(*rp) + eir_len);
1229 }
1230
ext_info_changed(struct hci_dev * hdev,struct sock * skip)1231 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1232 {
1233 char buf[512];
1234 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1235 u16 eir_len;
1236
1237 memset(buf, 0, sizeof(buf));
1238
1239 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1240 ev->eir_len = cpu_to_le16(eir_len);
1241
1242 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1243 sizeof(*ev) + eir_len,
1244 HCI_MGMT_EXT_INFO_EVENTS, skip);
1245 }
1246
send_settings_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)1247 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1248 {
1249 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1250
1251 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1252 sizeof(settings));
1253 }
1254
mgmt_advertising_added(struct sock * sk,struct hci_dev * hdev,u8 instance)1255 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1256 {
1257 struct mgmt_ev_advertising_added ev;
1258
1259 ev.instance = instance;
1260
1261 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1262 }
1263
mgmt_advertising_removed(struct sock * sk,struct hci_dev * hdev,u8 instance)1264 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1265 u8 instance)
1266 {
1267 struct mgmt_ev_advertising_removed ev;
1268
1269 ev.instance = instance;
1270
1271 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1272 }
1273
cancel_adv_timeout(struct hci_dev * hdev)1274 static void cancel_adv_timeout(struct hci_dev *hdev)
1275 {
1276 if (hdev->adv_instance_timeout) {
1277 hdev->adv_instance_timeout = 0;
1278 cancel_delayed_work(&hdev->adv_instance_expire);
1279 }
1280 }
1281
1282 /* This function requires the caller holds hdev->lock */
restart_le_actions(struct hci_dev * hdev)1283 static void restart_le_actions(struct hci_dev *hdev)
1284 {
1285 struct hci_conn_params *p;
1286
1287 list_for_each_entry(p, &hdev->le_conn_params, list) {
1288 /* Needed for AUTO_OFF case where might not "really"
1289 * have been powered off.
1290 */
1291 hci_pend_le_list_del_init(p);
1292
1293 switch (p->auto_connect) {
1294 case HCI_AUTO_CONN_DIRECT:
1295 case HCI_AUTO_CONN_ALWAYS:
1296 hci_pend_le_list_add(p, &hdev->pend_le_conns);
1297 break;
1298 case HCI_AUTO_CONN_REPORT:
1299 hci_pend_le_list_add(p, &hdev->pend_le_reports);
1300 break;
1301 default:
1302 break;
1303 }
1304 }
1305 }
1306
new_settings(struct hci_dev * hdev,struct sock * skip)1307 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1308 {
1309 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1310
1311 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1312 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1313 }
1314
mgmt_set_powered_complete(struct hci_dev * hdev,void * data,int err)1315 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1316 {
1317 struct mgmt_pending_cmd *cmd = data;
1318 struct mgmt_mode *cp;
1319
1320 /* Make sure cmd still outstanding. */
1321 if (err == -ECANCELED ||
1322 cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1323 return;
1324
1325 cp = cmd->param;
1326
1327 bt_dev_dbg(hdev, "err %d", err);
1328
1329 if (!err) {
1330 if (cp->val) {
1331 hci_dev_lock(hdev);
1332 restart_le_actions(hdev);
1333 hci_update_passive_scan(hdev);
1334 hci_dev_unlock(hdev);
1335 }
1336
1337 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1338
1339 /* Only call new_setting for power on as power off is deferred
1340 * to hdev->power_off work which does call hci_dev_do_close.
1341 */
1342 if (cp->val)
1343 new_settings(hdev, cmd->sk);
1344 } else {
1345 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1346 mgmt_status(err));
1347 }
1348
1349 mgmt_pending_remove(cmd);
1350 }
1351
set_powered_sync(struct hci_dev * hdev,void * data)1352 static int set_powered_sync(struct hci_dev *hdev, void *data)
1353 {
1354 struct mgmt_pending_cmd *cmd = data;
1355 struct mgmt_mode *cp;
1356
1357 /* Make sure cmd still outstanding. */
1358 if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1359 return -ECANCELED;
1360
1361 cp = cmd->param;
1362
1363 BT_DBG("%s", hdev->name);
1364
1365 return hci_set_powered_sync(hdev, cp->val);
1366 }
1367
set_powered(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1368 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1369 u16 len)
1370 {
1371 struct mgmt_mode *cp = data;
1372 struct mgmt_pending_cmd *cmd;
1373 int err;
1374
1375 bt_dev_dbg(hdev, "sock %p", sk);
1376
1377 if (cp->val != 0x00 && cp->val != 0x01)
1378 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1379 MGMT_STATUS_INVALID_PARAMS);
1380
1381 hci_dev_lock(hdev);
1382
1383 if (!cp->val) {
1384 if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN)) {
1385 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1386 MGMT_STATUS_BUSY);
1387 goto failed;
1388 }
1389 }
1390
1391 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1392 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1393 MGMT_STATUS_BUSY);
1394 goto failed;
1395 }
1396
1397 if (!!cp->val == hdev_is_powered(hdev)) {
1398 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1399 goto failed;
1400 }
1401
1402 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1403 if (!cmd) {
1404 err = -ENOMEM;
1405 goto failed;
1406 }
1407
1408 /* Cancel potentially blocking sync operation before power off */
1409 if (cp->val == 0x00) {
1410 hci_cmd_sync_cancel_sync(hdev, -EHOSTDOWN);
1411 err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1412 mgmt_set_powered_complete);
1413 } else {
1414 /* Use hci_cmd_sync_submit since hdev might not be running */
1415 err = hci_cmd_sync_submit(hdev, set_powered_sync, cmd,
1416 mgmt_set_powered_complete);
1417 }
1418
1419 if (err < 0)
1420 mgmt_pending_remove(cmd);
1421
1422 failed:
1423 hci_dev_unlock(hdev);
1424 return err;
1425 }
1426
mgmt_new_settings(struct hci_dev * hdev)1427 int mgmt_new_settings(struct hci_dev *hdev)
1428 {
1429 return new_settings(hdev, NULL);
1430 }
1431
1432 struct cmd_lookup {
1433 struct sock *sk;
1434 struct hci_dev *hdev;
1435 u8 mgmt_status;
1436 };
1437
settings_rsp(struct mgmt_pending_cmd * cmd,void * data)1438 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1439 {
1440 struct cmd_lookup *match = data;
1441
1442 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1443
1444 list_del(&cmd->list);
1445
1446 if (match->sk == NULL) {
1447 match->sk = cmd->sk;
1448 sock_hold(match->sk);
1449 }
1450
1451 mgmt_pending_free(cmd);
1452 }
1453
cmd_status_rsp(struct mgmt_pending_cmd * cmd,void * data)1454 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1455 {
1456 u8 *status = data;
1457
1458 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1459 mgmt_pending_remove(cmd);
1460 }
1461
cmd_complete_rsp(struct mgmt_pending_cmd * cmd,void * data)1462 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1463 {
1464 struct cmd_lookup *match = data;
1465
1466 /* dequeue cmd_sync entries using cmd as data as that is about to be
1467 * removed/freed.
1468 */
1469 hci_cmd_sync_dequeue(match->hdev, NULL, cmd, NULL);
1470
1471 if (cmd->cmd_complete) {
1472 cmd->cmd_complete(cmd, match->mgmt_status);
1473 mgmt_pending_remove(cmd);
1474
1475 return;
1476 }
1477
1478 cmd_status_rsp(cmd, data);
1479 }
1480
generic_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1481 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1482 {
1483 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1484 cmd->param, cmd->param_len);
1485 }
1486
addr_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1487 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1488 {
1489 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1490 cmd->param, sizeof(struct mgmt_addr_info));
1491 }
1492
mgmt_bredr_support(struct hci_dev * hdev)1493 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1494 {
1495 if (!lmp_bredr_capable(hdev))
1496 return MGMT_STATUS_NOT_SUPPORTED;
1497 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1498 return MGMT_STATUS_REJECTED;
1499 else
1500 return MGMT_STATUS_SUCCESS;
1501 }
1502
mgmt_le_support(struct hci_dev * hdev)1503 static u8 mgmt_le_support(struct hci_dev *hdev)
1504 {
1505 if (!lmp_le_capable(hdev))
1506 return MGMT_STATUS_NOT_SUPPORTED;
1507 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1508 return MGMT_STATUS_REJECTED;
1509 else
1510 return MGMT_STATUS_SUCCESS;
1511 }
1512
mgmt_set_discoverable_complete(struct hci_dev * hdev,void * data,int err)1513 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1514 int err)
1515 {
1516 struct mgmt_pending_cmd *cmd = data;
1517
1518 bt_dev_dbg(hdev, "err %d", err);
1519
1520 /* Make sure cmd still outstanding. */
1521 if (err == -ECANCELED ||
1522 cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1523 return;
1524
1525 hci_dev_lock(hdev);
1526
1527 if (err) {
1528 u8 mgmt_err = mgmt_status(err);
1529 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1530 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1531 goto done;
1532 }
1533
1534 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1535 hdev->discov_timeout > 0) {
1536 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1537 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1538 }
1539
1540 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1541 new_settings(hdev, cmd->sk);
1542
1543 done:
1544 mgmt_pending_remove(cmd);
1545 hci_dev_unlock(hdev);
1546 }
1547
set_discoverable_sync(struct hci_dev * hdev,void * data)1548 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1549 {
1550 BT_DBG("%s", hdev->name);
1551
1552 return hci_update_discoverable_sync(hdev);
1553 }
1554
set_discoverable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1555 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1556 u16 len)
1557 {
1558 struct mgmt_cp_set_discoverable *cp = data;
1559 struct mgmt_pending_cmd *cmd;
1560 u16 timeout;
1561 int err;
1562
1563 bt_dev_dbg(hdev, "sock %p", sk);
1564
1565 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1566 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1567 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1568 MGMT_STATUS_REJECTED);
1569
1570 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1571 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1572 MGMT_STATUS_INVALID_PARAMS);
1573
1574 timeout = __le16_to_cpu(cp->timeout);
1575
1576 /* Disabling discoverable requires that no timeout is set,
1577 * and enabling limited discoverable requires a timeout.
1578 */
1579 if ((cp->val == 0x00 && timeout > 0) ||
1580 (cp->val == 0x02 && timeout == 0))
1581 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1582 MGMT_STATUS_INVALID_PARAMS);
1583
1584 hci_dev_lock(hdev);
1585
1586 if (!hdev_is_powered(hdev) && timeout > 0) {
1587 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1588 MGMT_STATUS_NOT_POWERED);
1589 goto failed;
1590 }
1591
1592 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1593 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1594 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1595 MGMT_STATUS_BUSY);
1596 goto failed;
1597 }
1598
1599 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1600 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1601 MGMT_STATUS_REJECTED);
1602 goto failed;
1603 }
1604
1605 if (hdev->advertising_paused) {
1606 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1607 MGMT_STATUS_BUSY);
1608 goto failed;
1609 }
1610
1611 if (!hdev_is_powered(hdev)) {
1612 bool changed = false;
1613
1614 /* Setting limited discoverable when powered off is
1615 * not a valid operation since it requires a timeout
1616 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1617 */
1618 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1619 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1620 changed = true;
1621 }
1622
1623 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1624 if (err < 0)
1625 goto failed;
1626
1627 if (changed)
1628 err = new_settings(hdev, sk);
1629
1630 goto failed;
1631 }
1632
1633 /* If the current mode is the same, then just update the timeout
1634 * value with the new value. And if only the timeout gets updated,
1635 * then no need for any HCI transactions.
1636 */
1637 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1638 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1639 HCI_LIMITED_DISCOVERABLE)) {
1640 cancel_delayed_work(&hdev->discov_off);
1641 hdev->discov_timeout = timeout;
1642
1643 if (cp->val && hdev->discov_timeout > 0) {
1644 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1645 queue_delayed_work(hdev->req_workqueue,
1646 &hdev->discov_off, to);
1647 }
1648
1649 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1650 goto failed;
1651 }
1652
1653 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1654 if (!cmd) {
1655 err = -ENOMEM;
1656 goto failed;
1657 }
1658
1659 /* Cancel any potential discoverable timeout that might be
1660 * still active and store new timeout value. The arming of
1661 * the timeout happens in the complete handler.
1662 */
1663 cancel_delayed_work(&hdev->discov_off);
1664 hdev->discov_timeout = timeout;
1665
1666 if (cp->val)
1667 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1668 else
1669 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1670
1671 /* Limited discoverable mode */
1672 if (cp->val == 0x02)
1673 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1674 else
1675 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1676
1677 err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1678 mgmt_set_discoverable_complete);
1679
1680 if (err < 0)
1681 mgmt_pending_remove(cmd);
1682
1683 failed:
1684 hci_dev_unlock(hdev);
1685 return err;
1686 }
1687
mgmt_set_connectable_complete(struct hci_dev * hdev,void * data,int err)1688 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1689 int err)
1690 {
1691 struct mgmt_pending_cmd *cmd = data;
1692
1693 bt_dev_dbg(hdev, "err %d", err);
1694
1695 /* Make sure cmd still outstanding. */
1696 if (err == -ECANCELED ||
1697 cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1698 return;
1699
1700 hci_dev_lock(hdev);
1701
1702 if (err) {
1703 u8 mgmt_err = mgmt_status(err);
1704 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1705 goto done;
1706 }
1707
1708 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1709 new_settings(hdev, cmd->sk);
1710
1711 done:
1712 mgmt_pending_remove(cmd);
1713
1714 hci_dev_unlock(hdev);
1715 }
1716
set_connectable_update_settings(struct hci_dev * hdev,struct sock * sk,u8 val)1717 static int set_connectable_update_settings(struct hci_dev *hdev,
1718 struct sock *sk, u8 val)
1719 {
1720 bool changed = false;
1721 int err;
1722
1723 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1724 changed = true;
1725
1726 if (val) {
1727 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1728 } else {
1729 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1730 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1731 }
1732
1733 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1734 if (err < 0)
1735 return err;
1736
1737 if (changed) {
1738 hci_update_scan(hdev);
1739 hci_update_passive_scan(hdev);
1740 return new_settings(hdev, sk);
1741 }
1742
1743 return 0;
1744 }
1745
set_connectable_sync(struct hci_dev * hdev,void * data)1746 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1747 {
1748 BT_DBG("%s", hdev->name);
1749
1750 return hci_update_connectable_sync(hdev);
1751 }
1752
set_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1753 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1754 u16 len)
1755 {
1756 struct mgmt_mode *cp = data;
1757 struct mgmt_pending_cmd *cmd;
1758 int err;
1759
1760 bt_dev_dbg(hdev, "sock %p", sk);
1761
1762 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1763 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1764 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1765 MGMT_STATUS_REJECTED);
1766
1767 if (cp->val != 0x00 && cp->val != 0x01)
1768 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1769 MGMT_STATUS_INVALID_PARAMS);
1770
1771 hci_dev_lock(hdev);
1772
1773 if (!hdev_is_powered(hdev)) {
1774 err = set_connectable_update_settings(hdev, sk, cp->val);
1775 goto failed;
1776 }
1777
1778 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1779 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1780 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1781 MGMT_STATUS_BUSY);
1782 goto failed;
1783 }
1784
1785 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1786 if (!cmd) {
1787 err = -ENOMEM;
1788 goto failed;
1789 }
1790
1791 if (cp->val) {
1792 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1793 } else {
1794 if (hdev->discov_timeout > 0)
1795 cancel_delayed_work(&hdev->discov_off);
1796
1797 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1798 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1799 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1800 }
1801
1802 err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1803 mgmt_set_connectable_complete);
1804
1805 if (err < 0)
1806 mgmt_pending_remove(cmd);
1807
1808 failed:
1809 hci_dev_unlock(hdev);
1810 return err;
1811 }
1812
set_bondable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1813 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1814 u16 len)
1815 {
1816 struct mgmt_mode *cp = data;
1817 bool changed;
1818 int err;
1819
1820 bt_dev_dbg(hdev, "sock %p", sk);
1821
1822 if (cp->val != 0x00 && cp->val != 0x01)
1823 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1824 MGMT_STATUS_INVALID_PARAMS);
1825
1826 hci_dev_lock(hdev);
1827
1828 if (cp->val)
1829 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1830 else
1831 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1832
1833 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1834 if (err < 0)
1835 goto unlock;
1836
1837 if (changed) {
1838 /* In limited privacy mode the change of bondable mode
1839 * may affect the local advertising address.
1840 */
1841 hci_update_discoverable(hdev);
1842
1843 err = new_settings(hdev, sk);
1844 }
1845
1846 unlock:
1847 hci_dev_unlock(hdev);
1848 return err;
1849 }
1850
set_link_security(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1851 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1852 u16 len)
1853 {
1854 struct mgmt_mode *cp = data;
1855 struct mgmt_pending_cmd *cmd;
1856 u8 val, status;
1857 int err;
1858
1859 bt_dev_dbg(hdev, "sock %p", sk);
1860
1861 status = mgmt_bredr_support(hdev);
1862 if (status)
1863 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1864 status);
1865
1866 if (cp->val != 0x00 && cp->val != 0x01)
1867 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1868 MGMT_STATUS_INVALID_PARAMS);
1869
1870 hci_dev_lock(hdev);
1871
1872 if (!hdev_is_powered(hdev)) {
1873 bool changed = false;
1874
1875 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1876 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1877 changed = true;
1878 }
1879
1880 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1881 if (err < 0)
1882 goto failed;
1883
1884 if (changed)
1885 err = new_settings(hdev, sk);
1886
1887 goto failed;
1888 }
1889
1890 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1891 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1892 MGMT_STATUS_BUSY);
1893 goto failed;
1894 }
1895
1896 val = !!cp->val;
1897
1898 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1899 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1900 goto failed;
1901 }
1902
1903 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1904 if (!cmd) {
1905 err = -ENOMEM;
1906 goto failed;
1907 }
1908
1909 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1910 if (err < 0) {
1911 mgmt_pending_remove(cmd);
1912 goto failed;
1913 }
1914
1915 failed:
1916 hci_dev_unlock(hdev);
1917 return err;
1918 }
1919
set_ssp_complete(struct hci_dev * hdev,void * data,int err)1920 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1921 {
1922 struct cmd_lookup match = { NULL, hdev };
1923 struct mgmt_pending_cmd *cmd = data;
1924 struct mgmt_mode *cp = cmd->param;
1925 u8 enable = cp->val;
1926 bool changed;
1927
1928 /* Make sure cmd still outstanding. */
1929 if (err == -ECANCELED || cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1930 return;
1931
1932 if (err) {
1933 u8 mgmt_err = mgmt_status(err);
1934
1935 if (enable && hci_dev_test_and_clear_flag(hdev,
1936 HCI_SSP_ENABLED)) {
1937 new_settings(hdev, NULL);
1938 }
1939
1940 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1941 &mgmt_err);
1942 return;
1943 }
1944
1945 if (enable) {
1946 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1947 } else {
1948 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1949 }
1950
1951 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1952
1953 if (changed)
1954 new_settings(hdev, match.sk);
1955
1956 if (match.sk)
1957 sock_put(match.sk);
1958
1959 hci_update_eir_sync(hdev);
1960 }
1961
set_ssp_sync(struct hci_dev * hdev,void * data)1962 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1963 {
1964 struct mgmt_pending_cmd *cmd = data;
1965 struct mgmt_mode *cp = cmd->param;
1966 bool changed = false;
1967 int err;
1968
1969 if (cp->val)
1970 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1971
1972 err = hci_write_ssp_mode_sync(hdev, cp->val);
1973
1974 if (!err && changed)
1975 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1976
1977 return err;
1978 }
1979
set_ssp(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1980 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1981 {
1982 struct mgmt_mode *cp = data;
1983 struct mgmt_pending_cmd *cmd;
1984 u8 status;
1985 int err;
1986
1987 bt_dev_dbg(hdev, "sock %p", sk);
1988
1989 status = mgmt_bredr_support(hdev);
1990 if (status)
1991 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1992
1993 if (!lmp_ssp_capable(hdev))
1994 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1995 MGMT_STATUS_NOT_SUPPORTED);
1996
1997 if (cp->val != 0x00 && cp->val != 0x01)
1998 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1999 MGMT_STATUS_INVALID_PARAMS);
2000
2001 hci_dev_lock(hdev);
2002
2003 if (!hdev_is_powered(hdev)) {
2004 bool changed;
2005
2006 if (cp->val) {
2007 changed = !hci_dev_test_and_set_flag(hdev,
2008 HCI_SSP_ENABLED);
2009 } else {
2010 changed = hci_dev_test_and_clear_flag(hdev,
2011 HCI_SSP_ENABLED);
2012 }
2013
2014 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2015 if (err < 0)
2016 goto failed;
2017
2018 if (changed)
2019 err = new_settings(hdev, sk);
2020
2021 goto failed;
2022 }
2023
2024 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2025 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2026 MGMT_STATUS_BUSY);
2027 goto failed;
2028 }
2029
2030 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2031 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2032 goto failed;
2033 }
2034
2035 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2036 if (!cmd)
2037 err = -ENOMEM;
2038 else
2039 err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2040 set_ssp_complete);
2041
2042 if (err < 0) {
2043 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2044 MGMT_STATUS_FAILED);
2045
2046 if (cmd)
2047 mgmt_pending_remove(cmd);
2048 }
2049
2050 failed:
2051 hci_dev_unlock(hdev);
2052 return err;
2053 }
2054
set_hs(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2055 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2056 {
2057 bt_dev_dbg(hdev, "sock %p", sk);
2058
2059 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2060 MGMT_STATUS_NOT_SUPPORTED);
2061 }
2062
set_le_complete(struct hci_dev * hdev,void * data,int err)2063 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2064 {
2065 struct cmd_lookup match = { NULL, hdev };
2066 u8 status = mgmt_status(err);
2067
2068 bt_dev_dbg(hdev, "err %d", err);
2069
2070 if (status) {
2071 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2072 &status);
2073 return;
2074 }
2075
2076 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2077
2078 new_settings(hdev, match.sk);
2079
2080 if (match.sk)
2081 sock_put(match.sk);
2082 }
2083
set_le_sync(struct hci_dev * hdev,void * data)2084 static int set_le_sync(struct hci_dev *hdev, void *data)
2085 {
2086 struct mgmt_pending_cmd *cmd = data;
2087 struct mgmt_mode *cp = cmd->param;
2088 u8 val = !!cp->val;
2089 int err;
2090
2091 if (!val) {
2092 hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2093
2094 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2095 hci_disable_advertising_sync(hdev);
2096
2097 if (ext_adv_capable(hdev))
2098 hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2099 } else {
2100 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2101 }
2102
2103 err = hci_write_le_host_supported_sync(hdev, val, 0);
2104
2105 /* Make sure the controller has a good default for
2106 * advertising data. Restrict the update to when LE
2107 * has actually been enabled. During power on, the
2108 * update in powered_update_hci will take care of it.
2109 */
2110 if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2111 if (ext_adv_capable(hdev)) {
2112 int status;
2113
2114 status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2115 if (!status)
2116 hci_update_scan_rsp_data_sync(hdev, 0x00);
2117 } else {
2118 hci_update_adv_data_sync(hdev, 0x00);
2119 hci_update_scan_rsp_data_sync(hdev, 0x00);
2120 }
2121
2122 hci_update_passive_scan(hdev);
2123 }
2124
2125 return err;
2126 }
2127
set_mesh_complete(struct hci_dev * hdev,void * data,int err)2128 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2129 {
2130 struct mgmt_pending_cmd *cmd = data;
2131 u8 status = mgmt_status(err);
2132 struct sock *sk = cmd->sk;
2133
2134 if (status) {
2135 mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev,
2136 cmd_status_rsp, &status);
2137 return;
2138 }
2139
2140 mgmt_pending_remove(cmd);
2141 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2142 }
2143
set_mesh_sync(struct hci_dev * hdev,void * data)2144 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2145 {
2146 struct mgmt_pending_cmd *cmd = data;
2147 struct mgmt_cp_set_mesh *cp = cmd->param;
2148 size_t len = cmd->param_len;
2149
2150 memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2151
2152 if (cp->enable)
2153 hci_dev_set_flag(hdev, HCI_MESH);
2154 else
2155 hci_dev_clear_flag(hdev, HCI_MESH);
2156
2157 len -= sizeof(*cp);
2158
2159 /* If filters don't fit, forward all adv pkts */
2160 if (len <= sizeof(hdev->mesh_ad_types))
2161 memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2162
2163 hci_update_passive_scan_sync(hdev);
2164 return 0;
2165 }
2166
set_mesh(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2167 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2168 {
2169 struct mgmt_cp_set_mesh *cp = data;
2170 struct mgmt_pending_cmd *cmd;
2171 int err = 0;
2172
2173 bt_dev_dbg(hdev, "sock %p", sk);
2174
2175 if (!lmp_le_capable(hdev) ||
2176 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2177 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2178 MGMT_STATUS_NOT_SUPPORTED);
2179
2180 if (cp->enable != 0x00 && cp->enable != 0x01)
2181 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2182 MGMT_STATUS_INVALID_PARAMS);
2183
2184 hci_dev_lock(hdev);
2185
2186 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2187 if (!cmd)
2188 err = -ENOMEM;
2189 else
2190 err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2191 set_mesh_complete);
2192
2193 if (err < 0) {
2194 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2195 MGMT_STATUS_FAILED);
2196
2197 if (cmd)
2198 mgmt_pending_remove(cmd);
2199 }
2200
2201 hci_dev_unlock(hdev);
2202 return err;
2203 }
2204
mesh_send_start_complete(struct hci_dev * hdev,void * data,int err)2205 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2206 {
2207 struct mgmt_mesh_tx *mesh_tx = data;
2208 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2209 unsigned long mesh_send_interval;
2210 u8 mgmt_err = mgmt_status(err);
2211
2212 /* Report any errors here, but don't report completion */
2213
2214 if (mgmt_err) {
2215 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2216 /* Send Complete Error Code for handle */
2217 mesh_send_complete(hdev, mesh_tx, false);
2218 return;
2219 }
2220
2221 mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2222 queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2223 mesh_send_interval);
2224 }
2225
mesh_send_sync(struct hci_dev * hdev,void * data)2226 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2227 {
2228 struct mgmt_mesh_tx *mesh_tx = data;
2229 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2230 struct adv_info *adv, *next_instance;
2231 u8 instance = hdev->le_num_of_adv_sets + 1;
2232 u16 timeout, duration;
2233 int err = 0;
2234
2235 if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2236 return MGMT_STATUS_BUSY;
2237
2238 timeout = 1000;
2239 duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2240 adv = hci_add_adv_instance(hdev, instance, 0,
2241 send->adv_data_len, send->adv_data,
2242 0, NULL,
2243 timeout, duration,
2244 HCI_ADV_TX_POWER_NO_PREFERENCE,
2245 hdev->le_adv_min_interval,
2246 hdev->le_adv_max_interval,
2247 mesh_tx->handle);
2248
2249 if (!IS_ERR(adv))
2250 mesh_tx->instance = instance;
2251 else
2252 err = PTR_ERR(adv);
2253
2254 if (hdev->cur_adv_instance == instance) {
2255 /* If the currently advertised instance is being changed then
2256 * cancel the current advertising and schedule the next
2257 * instance. If there is only one instance then the overridden
2258 * advertising data will be visible right away.
2259 */
2260 cancel_adv_timeout(hdev);
2261
2262 next_instance = hci_get_next_instance(hdev, instance);
2263 if (next_instance)
2264 instance = next_instance->instance;
2265 else
2266 instance = 0;
2267 } else if (hdev->adv_instance_timeout) {
2268 /* Immediately advertise the new instance if no other, or
2269 * let it go naturally from queue if ADV is already happening
2270 */
2271 instance = 0;
2272 }
2273
2274 if (instance)
2275 return hci_schedule_adv_instance_sync(hdev, instance, true);
2276
2277 return err;
2278 }
2279
send_count(struct mgmt_mesh_tx * mesh_tx,void * data)2280 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2281 {
2282 struct mgmt_rp_mesh_read_features *rp = data;
2283
2284 if (rp->used_handles >= rp->max_handles)
2285 return;
2286
2287 rp->handles[rp->used_handles++] = mesh_tx->handle;
2288 }
2289
mesh_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2290 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2291 void *data, u16 len)
2292 {
2293 struct mgmt_rp_mesh_read_features rp;
2294
2295 if (!lmp_le_capable(hdev) ||
2296 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2297 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2298 MGMT_STATUS_NOT_SUPPORTED);
2299
2300 memset(&rp, 0, sizeof(rp));
2301 rp.index = cpu_to_le16(hdev->id);
2302 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2303 rp.max_handles = MESH_HANDLES_MAX;
2304
2305 hci_dev_lock(hdev);
2306
2307 if (rp.max_handles)
2308 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2309
2310 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2311 rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2312
2313 hci_dev_unlock(hdev);
2314 return 0;
2315 }
2316
send_cancel(struct hci_dev * hdev,void * data)2317 static int send_cancel(struct hci_dev *hdev, void *data)
2318 {
2319 struct mgmt_pending_cmd *cmd = data;
2320 struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2321 struct mgmt_mesh_tx *mesh_tx;
2322
2323 if (!cancel->handle) {
2324 do {
2325 mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2326
2327 if (mesh_tx)
2328 mesh_send_complete(hdev, mesh_tx, false);
2329 } while (mesh_tx);
2330 } else {
2331 mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2332
2333 if (mesh_tx && mesh_tx->sk == cmd->sk)
2334 mesh_send_complete(hdev, mesh_tx, false);
2335 }
2336
2337 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2338 0, NULL, 0);
2339 mgmt_pending_free(cmd);
2340
2341 return 0;
2342 }
2343
mesh_send_cancel(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2344 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2345 void *data, u16 len)
2346 {
2347 struct mgmt_pending_cmd *cmd;
2348 int err;
2349
2350 if (!lmp_le_capable(hdev) ||
2351 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2352 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2353 MGMT_STATUS_NOT_SUPPORTED);
2354
2355 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2356 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2357 MGMT_STATUS_REJECTED);
2358
2359 hci_dev_lock(hdev);
2360 cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2361 if (!cmd)
2362 err = -ENOMEM;
2363 else
2364 err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2365
2366 if (err < 0) {
2367 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2368 MGMT_STATUS_FAILED);
2369
2370 if (cmd)
2371 mgmt_pending_free(cmd);
2372 }
2373
2374 hci_dev_unlock(hdev);
2375 return err;
2376 }
2377
mesh_send(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2378 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2379 {
2380 struct mgmt_mesh_tx *mesh_tx;
2381 struct mgmt_cp_mesh_send *send = data;
2382 struct mgmt_rp_mesh_read_features rp;
2383 bool sending;
2384 int err = 0;
2385
2386 if (!lmp_le_capable(hdev) ||
2387 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2388 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2389 MGMT_STATUS_NOT_SUPPORTED);
2390 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2391 len <= MGMT_MESH_SEND_SIZE ||
2392 len > (MGMT_MESH_SEND_SIZE + 31))
2393 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2394 MGMT_STATUS_REJECTED);
2395
2396 hci_dev_lock(hdev);
2397
2398 memset(&rp, 0, sizeof(rp));
2399 rp.max_handles = MESH_HANDLES_MAX;
2400
2401 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2402
2403 if (rp.max_handles <= rp.used_handles) {
2404 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2405 MGMT_STATUS_BUSY);
2406 goto done;
2407 }
2408
2409 sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2410 mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2411
2412 if (!mesh_tx)
2413 err = -ENOMEM;
2414 else if (!sending)
2415 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2416 mesh_send_start_complete);
2417
2418 if (err < 0) {
2419 bt_dev_err(hdev, "Send Mesh Failed %d", err);
2420 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2421 MGMT_STATUS_FAILED);
2422
2423 if (mesh_tx) {
2424 if (sending)
2425 mgmt_mesh_remove(mesh_tx);
2426 }
2427 } else {
2428 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2429
2430 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2431 &mesh_tx->handle, 1);
2432 }
2433
2434 done:
2435 hci_dev_unlock(hdev);
2436 return err;
2437 }
2438
set_le(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2439 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2440 {
2441 struct mgmt_mode *cp = data;
2442 struct mgmt_pending_cmd *cmd;
2443 int err;
2444 u8 val, enabled;
2445
2446 bt_dev_dbg(hdev, "sock %p", sk);
2447
2448 if (!lmp_le_capable(hdev))
2449 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2450 MGMT_STATUS_NOT_SUPPORTED);
2451
2452 if (cp->val != 0x00 && cp->val != 0x01)
2453 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2454 MGMT_STATUS_INVALID_PARAMS);
2455
2456 /* Bluetooth single mode LE only controllers or dual-mode
2457 * controllers configured as LE only devices, do not allow
2458 * switching LE off. These have either LE enabled explicitly
2459 * or BR/EDR has been previously switched off.
2460 *
2461 * When trying to enable an already enabled LE, then gracefully
2462 * send a positive response. Trying to disable it however will
2463 * result into rejection.
2464 */
2465 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2466 if (cp->val == 0x01)
2467 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2468
2469 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2470 MGMT_STATUS_REJECTED);
2471 }
2472
2473 hci_dev_lock(hdev);
2474
2475 val = !!cp->val;
2476 enabled = lmp_host_le_capable(hdev);
2477
2478 if (!hdev_is_powered(hdev) || val == enabled) {
2479 bool changed = false;
2480
2481 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2482 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2483 changed = true;
2484 }
2485
2486 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2487 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2488 changed = true;
2489 }
2490
2491 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2492 if (err < 0)
2493 goto unlock;
2494
2495 if (changed)
2496 err = new_settings(hdev, sk);
2497
2498 goto unlock;
2499 }
2500
2501 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2502 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2503 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2504 MGMT_STATUS_BUSY);
2505 goto unlock;
2506 }
2507
2508 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2509 if (!cmd)
2510 err = -ENOMEM;
2511 else
2512 err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2513 set_le_complete);
2514
2515 if (err < 0) {
2516 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2517 MGMT_STATUS_FAILED);
2518
2519 if (cmd)
2520 mgmt_pending_remove(cmd);
2521 }
2522
2523 unlock:
2524 hci_dev_unlock(hdev);
2525 return err;
2526 }
2527
send_hci_cmd_sync(struct hci_dev * hdev,void * data)2528 static int send_hci_cmd_sync(struct hci_dev *hdev, void *data)
2529 {
2530 struct mgmt_pending_cmd *cmd = data;
2531 struct mgmt_cp_hci_cmd_sync *cp = cmd->param;
2532 struct sk_buff *skb;
2533
2534 skb = __hci_cmd_sync_ev(hdev, le16_to_cpu(cp->opcode),
2535 le16_to_cpu(cp->params_len), cp->params,
2536 cp->event, cp->timeout ?
2537 msecs_to_jiffies(cp->timeout * 1000) :
2538 HCI_CMD_TIMEOUT);
2539 if (IS_ERR(skb)) {
2540 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
2541 mgmt_status(PTR_ERR(skb)));
2542 goto done;
2543 }
2544
2545 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_HCI_CMD_SYNC, 0,
2546 skb->data, skb->len);
2547
2548 kfree_skb(skb);
2549
2550 done:
2551 mgmt_pending_free(cmd);
2552
2553 return 0;
2554 }
2555
mgmt_hci_cmd_sync(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2556 static int mgmt_hci_cmd_sync(struct sock *sk, struct hci_dev *hdev,
2557 void *data, u16 len)
2558 {
2559 struct mgmt_cp_hci_cmd_sync *cp = data;
2560 struct mgmt_pending_cmd *cmd;
2561 int err;
2562
2563 if (len < sizeof(*cp))
2564 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
2565 MGMT_STATUS_INVALID_PARAMS);
2566
2567 hci_dev_lock(hdev);
2568 cmd = mgmt_pending_new(sk, MGMT_OP_HCI_CMD_SYNC, hdev, data, len);
2569 if (!cmd)
2570 err = -ENOMEM;
2571 else
2572 err = hci_cmd_sync_queue(hdev, send_hci_cmd_sync, cmd, NULL);
2573
2574 if (err < 0) {
2575 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
2576 MGMT_STATUS_FAILED);
2577
2578 if (cmd)
2579 mgmt_pending_free(cmd);
2580 }
2581
2582 hci_dev_unlock(hdev);
2583 return err;
2584 }
2585
2586 /* This is a helper function to test for pending mgmt commands that can
2587 * cause CoD or EIR HCI commands. We can only allow one such pending
2588 * mgmt command at a time since otherwise we cannot easily track what
2589 * the current values are, will be, and based on that calculate if a new
2590 * HCI command needs to be sent and if yes with what value.
2591 */
pending_eir_or_class(struct hci_dev * hdev)2592 static bool pending_eir_or_class(struct hci_dev *hdev)
2593 {
2594 struct mgmt_pending_cmd *cmd;
2595
2596 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2597 switch (cmd->opcode) {
2598 case MGMT_OP_ADD_UUID:
2599 case MGMT_OP_REMOVE_UUID:
2600 case MGMT_OP_SET_DEV_CLASS:
2601 case MGMT_OP_SET_POWERED:
2602 return true;
2603 }
2604 }
2605
2606 return false;
2607 }
2608
2609 static const u8 bluetooth_base_uuid[] = {
2610 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2611 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2612 };
2613
get_uuid_size(const u8 * uuid)2614 static u8 get_uuid_size(const u8 *uuid)
2615 {
2616 u32 val;
2617
2618 if (memcmp(uuid, bluetooth_base_uuid, 12))
2619 return 128;
2620
2621 val = get_unaligned_le32(&uuid[12]);
2622 if (val > 0xffff)
2623 return 32;
2624
2625 return 16;
2626 }
2627
mgmt_class_complete(struct hci_dev * hdev,void * data,int err)2628 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2629 {
2630 struct mgmt_pending_cmd *cmd = data;
2631
2632 bt_dev_dbg(hdev, "err %d", err);
2633
2634 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2635 mgmt_status(err), hdev->dev_class, 3);
2636
2637 mgmt_pending_free(cmd);
2638 }
2639
add_uuid_sync(struct hci_dev * hdev,void * data)2640 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2641 {
2642 int err;
2643
2644 err = hci_update_class_sync(hdev);
2645 if (err)
2646 return err;
2647
2648 return hci_update_eir_sync(hdev);
2649 }
2650
add_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2651 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2652 {
2653 struct mgmt_cp_add_uuid *cp = data;
2654 struct mgmt_pending_cmd *cmd;
2655 struct bt_uuid *uuid;
2656 int err;
2657
2658 bt_dev_dbg(hdev, "sock %p", sk);
2659
2660 hci_dev_lock(hdev);
2661
2662 if (pending_eir_or_class(hdev)) {
2663 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2664 MGMT_STATUS_BUSY);
2665 goto failed;
2666 }
2667
2668 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2669 if (!uuid) {
2670 err = -ENOMEM;
2671 goto failed;
2672 }
2673
2674 memcpy(uuid->uuid, cp->uuid, 16);
2675 uuid->svc_hint = cp->svc_hint;
2676 uuid->size = get_uuid_size(cp->uuid);
2677
2678 list_add_tail(&uuid->list, &hdev->uuids);
2679
2680 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2681 if (!cmd) {
2682 err = -ENOMEM;
2683 goto failed;
2684 }
2685
2686 /* MGMT_OP_ADD_UUID don't require adapter the UP/Running so use
2687 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2688 */
2689 err = hci_cmd_sync_submit(hdev, add_uuid_sync, cmd,
2690 mgmt_class_complete);
2691 if (err < 0) {
2692 mgmt_pending_free(cmd);
2693 goto failed;
2694 }
2695
2696 failed:
2697 hci_dev_unlock(hdev);
2698 return err;
2699 }
2700
enable_service_cache(struct hci_dev * hdev)2701 static bool enable_service_cache(struct hci_dev *hdev)
2702 {
2703 if (!hdev_is_powered(hdev))
2704 return false;
2705
2706 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2707 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2708 CACHE_TIMEOUT);
2709 return true;
2710 }
2711
2712 return false;
2713 }
2714
remove_uuid_sync(struct hci_dev * hdev,void * data)2715 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2716 {
2717 int err;
2718
2719 err = hci_update_class_sync(hdev);
2720 if (err)
2721 return err;
2722
2723 return hci_update_eir_sync(hdev);
2724 }
2725
remove_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2726 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2727 u16 len)
2728 {
2729 struct mgmt_cp_remove_uuid *cp = data;
2730 struct mgmt_pending_cmd *cmd;
2731 struct bt_uuid *match, *tmp;
2732 static const u8 bt_uuid_any[] = {
2733 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2734 };
2735 int err, found;
2736
2737 bt_dev_dbg(hdev, "sock %p", sk);
2738
2739 hci_dev_lock(hdev);
2740
2741 if (pending_eir_or_class(hdev)) {
2742 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2743 MGMT_STATUS_BUSY);
2744 goto unlock;
2745 }
2746
2747 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2748 hci_uuids_clear(hdev);
2749
2750 if (enable_service_cache(hdev)) {
2751 err = mgmt_cmd_complete(sk, hdev->id,
2752 MGMT_OP_REMOVE_UUID,
2753 0, hdev->dev_class, 3);
2754 goto unlock;
2755 }
2756
2757 goto update_class;
2758 }
2759
2760 found = 0;
2761
2762 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2763 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2764 continue;
2765
2766 list_del(&match->list);
2767 kfree(match);
2768 found++;
2769 }
2770
2771 if (found == 0) {
2772 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2773 MGMT_STATUS_INVALID_PARAMS);
2774 goto unlock;
2775 }
2776
2777 update_class:
2778 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2779 if (!cmd) {
2780 err = -ENOMEM;
2781 goto unlock;
2782 }
2783
2784 /* MGMT_OP_REMOVE_UUID don't require adapter the UP/Running so use
2785 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2786 */
2787 err = hci_cmd_sync_submit(hdev, remove_uuid_sync, cmd,
2788 mgmt_class_complete);
2789 if (err < 0)
2790 mgmt_pending_free(cmd);
2791
2792 unlock:
2793 hci_dev_unlock(hdev);
2794 return err;
2795 }
2796
set_class_sync(struct hci_dev * hdev,void * data)2797 static int set_class_sync(struct hci_dev *hdev, void *data)
2798 {
2799 int err = 0;
2800
2801 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2802 cancel_delayed_work_sync(&hdev->service_cache);
2803 err = hci_update_eir_sync(hdev);
2804 }
2805
2806 if (err)
2807 return err;
2808
2809 return hci_update_class_sync(hdev);
2810 }
2811
set_dev_class(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2812 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2813 u16 len)
2814 {
2815 struct mgmt_cp_set_dev_class *cp = data;
2816 struct mgmt_pending_cmd *cmd;
2817 int err;
2818
2819 bt_dev_dbg(hdev, "sock %p", sk);
2820
2821 if (!lmp_bredr_capable(hdev))
2822 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2823 MGMT_STATUS_NOT_SUPPORTED);
2824
2825 hci_dev_lock(hdev);
2826
2827 if (pending_eir_or_class(hdev)) {
2828 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2829 MGMT_STATUS_BUSY);
2830 goto unlock;
2831 }
2832
2833 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2834 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2835 MGMT_STATUS_INVALID_PARAMS);
2836 goto unlock;
2837 }
2838
2839 hdev->major_class = cp->major;
2840 hdev->minor_class = cp->minor;
2841
2842 if (!hdev_is_powered(hdev)) {
2843 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2844 hdev->dev_class, 3);
2845 goto unlock;
2846 }
2847
2848 cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2849 if (!cmd) {
2850 err = -ENOMEM;
2851 goto unlock;
2852 }
2853
2854 /* MGMT_OP_SET_DEV_CLASS don't require adapter the UP/Running so use
2855 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2856 */
2857 err = hci_cmd_sync_submit(hdev, set_class_sync, cmd,
2858 mgmt_class_complete);
2859 if (err < 0)
2860 mgmt_pending_free(cmd);
2861
2862 unlock:
2863 hci_dev_unlock(hdev);
2864 return err;
2865 }
2866
load_link_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2867 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2868 u16 len)
2869 {
2870 struct mgmt_cp_load_link_keys *cp = data;
2871 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2872 sizeof(struct mgmt_link_key_info));
2873 u16 key_count, expected_len;
2874 bool changed;
2875 int i;
2876
2877 bt_dev_dbg(hdev, "sock %p", sk);
2878
2879 if (!lmp_bredr_capable(hdev))
2880 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2881 MGMT_STATUS_NOT_SUPPORTED);
2882
2883 key_count = __le16_to_cpu(cp->key_count);
2884 if (key_count > max_key_count) {
2885 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2886 key_count);
2887 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2888 MGMT_STATUS_INVALID_PARAMS);
2889 }
2890
2891 expected_len = struct_size(cp, keys, key_count);
2892 if (expected_len != len) {
2893 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2894 expected_len, len);
2895 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2896 MGMT_STATUS_INVALID_PARAMS);
2897 }
2898
2899 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2900 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2901 MGMT_STATUS_INVALID_PARAMS);
2902
2903 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2904 key_count);
2905
2906 hci_dev_lock(hdev);
2907
2908 hci_link_keys_clear(hdev);
2909
2910 if (cp->debug_keys)
2911 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2912 else
2913 changed = hci_dev_test_and_clear_flag(hdev,
2914 HCI_KEEP_DEBUG_KEYS);
2915
2916 if (changed)
2917 new_settings(hdev, NULL);
2918
2919 for (i = 0; i < key_count; i++) {
2920 struct mgmt_link_key_info *key = &cp->keys[i];
2921
2922 if (hci_is_blocked_key(hdev,
2923 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2924 key->val)) {
2925 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2926 &key->addr.bdaddr);
2927 continue;
2928 }
2929
2930 if (key->addr.type != BDADDR_BREDR) {
2931 bt_dev_warn(hdev,
2932 "Invalid link address type %u for %pMR",
2933 key->addr.type, &key->addr.bdaddr);
2934 continue;
2935 }
2936
2937 if (key->type > 0x08) {
2938 bt_dev_warn(hdev, "Invalid link key type %u for %pMR",
2939 key->type, &key->addr.bdaddr);
2940 continue;
2941 }
2942
2943 /* Always ignore debug keys and require a new pairing if
2944 * the user wants to use them.
2945 */
2946 if (key->type == HCI_LK_DEBUG_COMBINATION)
2947 continue;
2948
2949 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2950 key->type, key->pin_len, NULL);
2951 }
2952
2953 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2954
2955 hci_dev_unlock(hdev);
2956
2957 return 0;
2958 }
2959
device_unpaired(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,struct sock * skip_sk)2960 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2961 u8 addr_type, struct sock *skip_sk)
2962 {
2963 struct mgmt_ev_device_unpaired ev;
2964
2965 bacpy(&ev.addr.bdaddr, bdaddr);
2966 ev.addr.type = addr_type;
2967
2968 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2969 skip_sk);
2970 }
2971
unpair_device_complete(struct hci_dev * hdev,void * data,int err)2972 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2973 {
2974 struct mgmt_pending_cmd *cmd = data;
2975 struct mgmt_cp_unpair_device *cp = cmd->param;
2976
2977 if (!err)
2978 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2979
2980 cmd->cmd_complete(cmd, err);
2981 mgmt_pending_free(cmd);
2982 }
2983
unpair_device_sync(struct hci_dev * hdev,void * data)2984 static int unpair_device_sync(struct hci_dev *hdev, void *data)
2985 {
2986 struct mgmt_pending_cmd *cmd = data;
2987 struct mgmt_cp_unpair_device *cp = cmd->param;
2988 struct hci_conn *conn;
2989
2990 if (cp->addr.type == BDADDR_BREDR)
2991 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2992 &cp->addr.bdaddr);
2993 else
2994 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2995 le_addr_type(cp->addr.type));
2996
2997 if (!conn)
2998 return 0;
2999
3000 /* Disregard any possible error since the likes of hci_abort_conn_sync
3001 * will clean up the connection no matter the error.
3002 */
3003 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3004
3005 return 0;
3006 }
3007
unpair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3008 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3009 u16 len)
3010 {
3011 struct mgmt_cp_unpair_device *cp = data;
3012 struct mgmt_rp_unpair_device rp;
3013 struct hci_conn_params *params;
3014 struct mgmt_pending_cmd *cmd;
3015 struct hci_conn *conn;
3016 u8 addr_type;
3017 int err;
3018
3019 memset(&rp, 0, sizeof(rp));
3020 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3021 rp.addr.type = cp->addr.type;
3022
3023 if (!bdaddr_type_is_valid(cp->addr.type))
3024 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3025 MGMT_STATUS_INVALID_PARAMS,
3026 &rp, sizeof(rp));
3027
3028 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
3029 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3030 MGMT_STATUS_INVALID_PARAMS,
3031 &rp, sizeof(rp));
3032
3033 hci_dev_lock(hdev);
3034
3035 if (!hdev_is_powered(hdev)) {
3036 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3037 MGMT_STATUS_NOT_POWERED, &rp,
3038 sizeof(rp));
3039 goto unlock;
3040 }
3041
3042 if (cp->addr.type == BDADDR_BREDR) {
3043 /* If disconnection is requested, then look up the
3044 * connection. If the remote device is connected, it
3045 * will be later used to terminate the link.
3046 *
3047 * Setting it to NULL explicitly will cause no
3048 * termination of the link.
3049 */
3050 if (cp->disconnect)
3051 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3052 &cp->addr.bdaddr);
3053 else
3054 conn = NULL;
3055
3056 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
3057 if (err < 0) {
3058 err = mgmt_cmd_complete(sk, hdev->id,
3059 MGMT_OP_UNPAIR_DEVICE,
3060 MGMT_STATUS_NOT_PAIRED, &rp,
3061 sizeof(rp));
3062 goto unlock;
3063 }
3064
3065 goto done;
3066 }
3067
3068 /* LE address type */
3069 addr_type = le_addr_type(cp->addr.type);
3070
3071 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
3072 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
3073 if (err < 0) {
3074 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3075 MGMT_STATUS_NOT_PAIRED, &rp,
3076 sizeof(rp));
3077 goto unlock;
3078 }
3079
3080 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3081 if (!conn) {
3082 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3083 goto done;
3084 }
3085
3086
3087 /* Defer clearing up the connection parameters until closing to
3088 * give a chance of keeping them if a repairing happens.
3089 */
3090 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3091
3092 /* Disable auto-connection parameters if present */
3093 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3094 if (params) {
3095 if (params->explicit_connect)
3096 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3097 else
3098 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3099 }
3100
3101 /* If disconnection is not requested, then clear the connection
3102 * variable so that the link is not terminated.
3103 */
3104 if (!cp->disconnect)
3105 conn = NULL;
3106
3107 done:
3108 /* If the connection variable is set, then termination of the
3109 * link is requested.
3110 */
3111 if (!conn) {
3112 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3113 &rp, sizeof(rp));
3114 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3115 goto unlock;
3116 }
3117
3118 cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3119 sizeof(*cp));
3120 if (!cmd) {
3121 err = -ENOMEM;
3122 goto unlock;
3123 }
3124
3125 cmd->cmd_complete = addr_cmd_complete;
3126
3127 err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3128 unpair_device_complete);
3129 if (err < 0)
3130 mgmt_pending_free(cmd);
3131
3132 unlock:
3133 hci_dev_unlock(hdev);
3134 return err;
3135 }
3136
disconnect_complete(struct hci_dev * hdev,void * data,int err)3137 static void disconnect_complete(struct hci_dev *hdev, void *data, int err)
3138 {
3139 struct mgmt_pending_cmd *cmd = data;
3140
3141 cmd->cmd_complete(cmd, mgmt_status(err));
3142 mgmt_pending_free(cmd);
3143 }
3144
disconnect_sync(struct hci_dev * hdev,void * data)3145 static int disconnect_sync(struct hci_dev *hdev, void *data)
3146 {
3147 struct mgmt_pending_cmd *cmd = data;
3148 struct mgmt_cp_disconnect *cp = cmd->param;
3149 struct hci_conn *conn;
3150
3151 if (cp->addr.type == BDADDR_BREDR)
3152 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3153 &cp->addr.bdaddr);
3154 else
3155 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3156 le_addr_type(cp->addr.type));
3157
3158 if (!conn)
3159 return -ENOTCONN;
3160
3161 /* Disregard any possible error since the likes of hci_abort_conn_sync
3162 * will clean up the connection no matter the error.
3163 */
3164 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3165
3166 return 0;
3167 }
3168
disconnect(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3169 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3170 u16 len)
3171 {
3172 struct mgmt_cp_disconnect *cp = data;
3173 struct mgmt_rp_disconnect rp;
3174 struct mgmt_pending_cmd *cmd;
3175 int err;
3176
3177 bt_dev_dbg(hdev, "sock %p", sk);
3178
3179 memset(&rp, 0, sizeof(rp));
3180 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3181 rp.addr.type = cp->addr.type;
3182
3183 if (!bdaddr_type_is_valid(cp->addr.type))
3184 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3185 MGMT_STATUS_INVALID_PARAMS,
3186 &rp, sizeof(rp));
3187
3188 hci_dev_lock(hdev);
3189
3190 if (!test_bit(HCI_UP, &hdev->flags)) {
3191 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3192 MGMT_STATUS_NOT_POWERED, &rp,
3193 sizeof(rp));
3194 goto failed;
3195 }
3196
3197 cmd = mgmt_pending_new(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3198 if (!cmd) {
3199 err = -ENOMEM;
3200 goto failed;
3201 }
3202
3203 cmd->cmd_complete = generic_cmd_complete;
3204
3205 err = hci_cmd_sync_queue(hdev, disconnect_sync, cmd,
3206 disconnect_complete);
3207 if (err < 0)
3208 mgmt_pending_free(cmd);
3209
3210 failed:
3211 hci_dev_unlock(hdev);
3212 return err;
3213 }
3214
link_to_bdaddr(u8 link_type,u8 addr_type)3215 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3216 {
3217 switch (link_type) {
3218 case ISO_LINK:
3219 case LE_LINK:
3220 switch (addr_type) {
3221 case ADDR_LE_DEV_PUBLIC:
3222 return BDADDR_LE_PUBLIC;
3223
3224 default:
3225 /* Fallback to LE Random address type */
3226 return BDADDR_LE_RANDOM;
3227 }
3228
3229 default:
3230 /* Fallback to BR/EDR type */
3231 return BDADDR_BREDR;
3232 }
3233 }
3234
get_connections(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)3235 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3236 u16 data_len)
3237 {
3238 struct mgmt_rp_get_connections *rp;
3239 struct hci_conn *c;
3240 int err;
3241 u16 i;
3242
3243 bt_dev_dbg(hdev, "sock %p", sk);
3244
3245 hci_dev_lock(hdev);
3246
3247 if (!hdev_is_powered(hdev)) {
3248 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3249 MGMT_STATUS_NOT_POWERED);
3250 goto unlock;
3251 }
3252
3253 i = 0;
3254 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3255 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3256 i++;
3257 }
3258
3259 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3260 if (!rp) {
3261 err = -ENOMEM;
3262 goto unlock;
3263 }
3264
3265 i = 0;
3266 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3267 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3268 continue;
3269 bacpy(&rp->addr[i].bdaddr, &c->dst);
3270 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3271 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3272 continue;
3273 i++;
3274 }
3275
3276 rp->conn_count = cpu_to_le16(i);
3277
3278 /* Recalculate length in case of filtered SCO connections, etc */
3279 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3280 struct_size(rp, addr, i));
3281
3282 kfree(rp);
3283
3284 unlock:
3285 hci_dev_unlock(hdev);
3286 return err;
3287 }
3288
send_pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_pin_code_neg_reply * cp)3289 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3290 struct mgmt_cp_pin_code_neg_reply *cp)
3291 {
3292 struct mgmt_pending_cmd *cmd;
3293 int err;
3294
3295 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3296 sizeof(*cp));
3297 if (!cmd)
3298 return -ENOMEM;
3299
3300 cmd->cmd_complete = addr_cmd_complete;
3301
3302 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3303 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3304 if (err < 0)
3305 mgmt_pending_remove(cmd);
3306
3307 return err;
3308 }
3309
pin_code_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3310 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3311 u16 len)
3312 {
3313 struct hci_conn *conn;
3314 struct mgmt_cp_pin_code_reply *cp = data;
3315 struct hci_cp_pin_code_reply reply;
3316 struct mgmt_pending_cmd *cmd;
3317 int err;
3318
3319 bt_dev_dbg(hdev, "sock %p", sk);
3320
3321 hci_dev_lock(hdev);
3322
3323 if (!hdev_is_powered(hdev)) {
3324 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3325 MGMT_STATUS_NOT_POWERED);
3326 goto failed;
3327 }
3328
3329 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3330 if (!conn) {
3331 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3332 MGMT_STATUS_NOT_CONNECTED);
3333 goto failed;
3334 }
3335
3336 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3337 struct mgmt_cp_pin_code_neg_reply ncp;
3338
3339 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3340
3341 bt_dev_err(hdev, "PIN code is not 16 bytes long");
3342
3343 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3344 if (err >= 0)
3345 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3346 MGMT_STATUS_INVALID_PARAMS);
3347
3348 goto failed;
3349 }
3350
3351 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3352 if (!cmd) {
3353 err = -ENOMEM;
3354 goto failed;
3355 }
3356
3357 cmd->cmd_complete = addr_cmd_complete;
3358
3359 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3360 reply.pin_len = cp->pin_len;
3361 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3362
3363 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3364 if (err < 0)
3365 mgmt_pending_remove(cmd);
3366
3367 failed:
3368 hci_dev_unlock(hdev);
3369 return err;
3370 }
3371
set_io_capability(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3372 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3373 u16 len)
3374 {
3375 struct mgmt_cp_set_io_capability *cp = data;
3376
3377 bt_dev_dbg(hdev, "sock %p", sk);
3378
3379 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3380 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3381 MGMT_STATUS_INVALID_PARAMS);
3382
3383 hci_dev_lock(hdev);
3384
3385 hdev->io_capability = cp->io_capability;
3386
3387 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3388
3389 hci_dev_unlock(hdev);
3390
3391 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3392 NULL, 0);
3393 }
3394
find_pairing(struct hci_conn * conn)3395 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3396 {
3397 struct hci_dev *hdev = conn->hdev;
3398 struct mgmt_pending_cmd *cmd;
3399
3400 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3401 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3402 continue;
3403
3404 if (cmd->user_data != conn)
3405 continue;
3406
3407 return cmd;
3408 }
3409
3410 return NULL;
3411 }
3412
pairing_complete(struct mgmt_pending_cmd * cmd,u8 status)3413 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3414 {
3415 struct mgmt_rp_pair_device rp;
3416 struct hci_conn *conn = cmd->user_data;
3417 int err;
3418
3419 bacpy(&rp.addr.bdaddr, &conn->dst);
3420 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3421
3422 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3423 status, &rp, sizeof(rp));
3424
3425 /* So we don't get further callbacks for this connection */
3426 conn->connect_cfm_cb = NULL;
3427 conn->security_cfm_cb = NULL;
3428 conn->disconn_cfm_cb = NULL;
3429
3430 hci_conn_drop(conn);
3431
3432 /* The device is paired so there is no need to remove
3433 * its connection parameters anymore.
3434 */
3435 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3436
3437 hci_conn_put(conn);
3438
3439 return err;
3440 }
3441
mgmt_smp_complete(struct hci_conn * conn,bool complete)3442 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3443 {
3444 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3445 struct mgmt_pending_cmd *cmd;
3446
3447 cmd = find_pairing(conn);
3448 if (cmd) {
3449 cmd->cmd_complete(cmd, status);
3450 mgmt_pending_remove(cmd);
3451 }
3452 }
3453
pairing_complete_cb(struct hci_conn * conn,u8 status)3454 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3455 {
3456 struct mgmt_pending_cmd *cmd;
3457
3458 BT_DBG("status %u", status);
3459
3460 cmd = find_pairing(conn);
3461 if (!cmd) {
3462 BT_DBG("Unable to find a pending command");
3463 return;
3464 }
3465
3466 cmd->cmd_complete(cmd, mgmt_status(status));
3467 mgmt_pending_remove(cmd);
3468 }
3469
le_pairing_complete_cb(struct hci_conn * conn,u8 status)3470 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3471 {
3472 struct mgmt_pending_cmd *cmd;
3473
3474 BT_DBG("status %u", status);
3475
3476 if (!status)
3477 return;
3478
3479 cmd = find_pairing(conn);
3480 if (!cmd) {
3481 BT_DBG("Unable to find a pending command");
3482 return;
3483 }
3484
3485 cmd->cmd_complete(cmd, mgmt_status(status));
3486 mgmt_pending_remove(cmd);
3487 }
3488
pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3489 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3490 u16 len)
3491 {
3492 struct mgmt_cp_pair_device *cp = data;
3493 struct mgmt_rp_pair_device rp;
3494 struct mgmt_pending_cmd *cmd;
3495 u8 sec_level, auth_type;
3496 struct hci_conn *conn;
3497 int err;
3498
3499 bt_dev_dbg(hdev, "sock %p", sk);
3500
3501 memset(&rp, 0, sizeof(rp));
3502 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3503 rp.addr.type = cp->addr.type;
3504
3505 if (!bdaddr_type_is_valid(cp->addr.type))
3506 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3507 MGMT_STATUS_INVALID_PARAMS,
3508 &rp, sizeof(rp));
3509
3510 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3511 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3512 MGMT_STATUS_INVALID_PARAMS,
3513 &rp, sizeof(rp));
3514
3515 hci_dev_lock(hdev);
3516
3517 if (!hdev_is_powered(hdev)) {
3518 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3519 MGMT_STATUS_NOT_POWERED, &rp,
3520 sizeof(rp));
3521 goto unlock;
3522 }
3523
3524 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3525 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3526 MGMT_STATUS_ALREADY_PAIRED, &rp,
3527 sizeof(rp));
3528 goto unlock;
3529 }
3530
3531 sec_level = BT_SECURITY_MEDIUM;
3532 auth_type = HCI_AT_DEDICATED_BONDING;
3533
3534 if (cp->addr.type == BDADDR_BREDR) {
3535 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3536 auth_type, CONN_REASON_PAIR_DEVICE,
3537 HCI_ACL_CONN_TIMEOUT);
3538 } else {
3539 u8 addr_type = le_addr_type(cp->addr.type);
3540 struct hci_conn_params *p;
3541
3542 /* When pairing a new device, it is expected to remember
3543 * this device for future connections. Adding the connection
3544 * parameter information ahead of time allows tracking
3545 * of the peripheral preferred values and will speed up any
3546 * further connection establishment.
3547 *
3548 * If connection parameters already exist, then they
3549 * will be kept and this function does nothing.
3550 */
3551 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3552 if (!p) {
3553 err = -EIO;
3554 goto unlock;
3555 }
3556
3557 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3558 p->auto_connect = HCI_AUTO_CONN_DISABLED;
3559
3560 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3561 sec_level, HCI_LE_CONN_TIMEOUT,
3562 CONN_REASON_PAIR_DEVICE);
3563 }
3564
3565 if (IS_ERR(conn)) {
3566 int status;
3567
3568 if (PTR_ERR(conn) == -EBUSY)
3569 status = MGMT_STATUS_BUSY;
3570 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3571 status = MGMT_STATUS_NOT_SUPPORTED;
3572 else if (PTR_ERR(conn) == -ECONNREFUSED)
3573 status = MGMT_STATUS_REJECTED;
3574 else
3575 status = MGMT_STATUS_CONNECT_FAILED;
3576
3577 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3578 status, &rp, sizeof(rp));
3579 goto unlock;
3580 }
3581
3582 if (conn->connect_cfm_cb) {
3583 hci_conn_drop(conn);
3584 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3585 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3586 goto unlock;
3587 }
3588
3589 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3590 if (!cmd) {
3591 err = -ENOMEM;
3592 hci_conn_drop(conn);
3593 goto unlock;
3594 }
3595
3596 cmd->cmd_complete = pairing_complete;
3597
3598 /* For LE, just connecting isn't a proof that the pairing finished */
3599 if (cp->addr.type == BDADDR_BREDR) {
3600 conn->connect_cfm_cb = pairing_complete_cb;
3601 conn->security_cfm_cb = pairing_complete_cb;
3602 conn->disconn_cfm_cb = pairing_complete_cb;
3603 } else {
3604 conn->connect_cfm_cb = le_pairing_complete_cb;
3605 conn->security_cfm_cb = le_pairing_complete_cb;
3606 conn->disconn_cfm_cb = le_pairing_complete_cb;
3607 }
3608
3609 conn->io_capability = cp->io_cap;
3610 cmd->user_data = hci_conn_get(conn);
3611
3612 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3613 hci_conn_security(conn, sec_level, auth_type, true)) {
3614 cmd->cmd_complete(cmd, 0);
3615 mgmt_pending_remove(cmd);
3616 }
3617
3618 err = 0;
3619
3620 unlock:
3621 hci_dev_unlock(hdev);
3622 return err;
3623 }
3624
cancel_pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3625 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3626 u16 len)
3627 {
3628 struct mgmt_addr_info *addr = data;
3629 struct mgmt_pending_cmd *cmd;
3630 struct hci_conn *conn;
3631 int err;
3632
3633 bt_dev_dbg(hdev, "sock %p", sk);
3634
3635 hci_dev_lock(hdev);
3636
3637 if (!hdev_is_powered(hdev)) {
3638 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3639 MGMT_STATUS_NOT_POWERED);
3640 goto unlock;
3641 }
3642
3643 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3644 if (!cmd) {
3645 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3646 MGMT_STATUS_INVALID_PARAMS);
3647 goto unlock;
3648 }
3649
3650 conn = cmd->user_data;
3651
3652 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3653 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3654 MGMT_STATUS_INVALID_PARAMS);
3655 goto unlock;
3656 }
3657
3658 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3659 mgmt_pending_remove(cmd);
3660
3661 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3662 addr, sizeof(*addr));
3663
3664 /* Since user doesn't want to proceed with the connection, abort any
3665 * ongoing pairing and then terminate the link if it was created
3666 * because of the pair device action.
3667 */
3668 if (addr->type == BDADDR_BREDR)
3669 hci_remove_link_key(hdev, &addr->bdaddr);
3670 else
3671 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3672 le_addr_type(addr->type));
3673
3674 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3675 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3676
3677 unlock:
3678 hci_dev_unlock(hdev);
3679 return err;
3680 }
3681
user_pairing_resp(struct sock * sk,struct hci_dev * hdev,struct mgmt_addr_info * addr,u16 mgmt_op,u16 hci_op,__le32 passkey)3682 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3683 struct mgmt_addr_info *addr, u16 mgmt_op,
3684 u16 hci_op, __le32 passkey)
3685 {
3686 struct mgmt_pending_cmd *cmd;
3687 struct hci_conn *conn;
3688 int err;
3689
3690 hci_dev_lock(hdev);
3691
3692 if (!hdev_is_powered(hdev)) {
3693 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3694 MGMT_STATUS_NOT_POWERED, addr,
3695 sizeof(*addr));
3696 goto done;
3697 }
3698
3699 if (addr->type == BDADDR_BREDR)
3700 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3701 else
3702 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3703 le_addr_type(addr->type));
3704
3705 if (!conn) {
3706 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3707 MGMT_STATUS_NOT_CONNECTED, addr,
3708 sizeof(*addr));
3709 goto done;
3710 }
3711
3712 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3713 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3714 if (!err)
3715 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3716 MGMT_STATUS_SUCCESS, addr,
3717 sizeof(*addr));
3718 else
3719 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3720 MGMT_STATUS_FAILED, addr,
3721 sizeof(*addr));
3722
3723 goto done;
3724 }
3725
3726 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3727 if (!cmd) {
3728 err = -ENOMEM;
3729 goto done;
3730 }
3731
3732 cmd->cmd_complete = addr_cmd_complete;
3733
3734 /* Continue with pairing via HCI */
3735 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3736 struct hci_cp_user_passkey_reply cp;
3737
3738 bacpy(&cp.bdaddr, &addr->bdaddr);
3739 cp.passkey = passkey;
3740 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3741 } else
3742 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3743 &addr->bdaddr);
3744
3745 if (err < 0)
3746 mgmt_pending_remove(cmd);
3747
3748 done:
3749 hci_dev_unlock(hdev);
3750 return err;
3751 }
3752
pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3753 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3754 void *data, u16 len)
3755 {
3756 struct mgmt_cp_pin_code_neg_reply *cp = data;
3757
3758 bt_dev_dbg(hdev, "sock %p", sk);
3759
3760 return user_pairing_resp(sk, hdev, &cp->addr,
3761 MGMT_OP_PIN_CODE_NEG_REPLY,
3762 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3763 }
3764
user_confirm_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3765 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3766 u16 len)
3767 {
3768 struct mgmt_cp_user_confirm_reply *cp = data;
3769
3770 bt_dev_dbg(hdev, "sock %p", sk);
3771
3772 if (len != sizeof(*cp))
3773 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3774 MGMT_STATUS_INVALID_PARAMS);
3775
3776 return user_pairing_resp(sk, hdev, &cp->addr,
3777 MGMT_OP_USER_CONFIRM_REPLY,
3778 HCI_OP_USER_CONFIRM_REPLY, 0);
3779 }
3780
user_confirm_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3781 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3782 void *data, u16 len)
3783 {
3784 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3785
3786 bt_dev_dbg(hdev, "sock %p", sk);
3787
3788 return user_pairing_resp(sk, hdev, &cp->addr,
3789 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3790 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3791 }
3792
user_passkey_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3793 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3794 u16 len)
3795 {
3796 struct mgmt_cp_user_passkey_reply *cp = data;
3797
3798 bt_dev_dbg(hdev, "sock %p", sk);
3799
3800 return user_pairing_resp(sk, hdev, &cp->addr,
3801 MGMT_OP_USER_PASSKEY_REPLY,
3802 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3803 }
3804
user_passkey_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3805 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3806 void *data, u16 len)
3807 {
3808 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3809
3810 bt_dev_dbg(hdev, "sock %p", sk);
3811
3812 return user_pairing_resp(sk, hdev, &cp->addr,
3813 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3814 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3815 }
3816
adv_expire_sync(struct hci_dev * hdev,u32 flags)3817 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3818 {
3819 struct adv_info *adv_instance;
3820
3821 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3822 if (!adv_instance)
3823 return 0;
3824
3825 /* stop if current instance doesn't need to be changed */
3826 if (!(adv_instance->flags & flags))
3827 return 0;
3828
3829 cancel_adv_timeout(hdev);
3830
3831 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3832 if (!adv_instance)
3833 return 0;
3834
3835 hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3836
3837 return 0;
3838 }
3839
name_changed_sync(struct hci_dev * hdev,void * data)3840 static int name_changed_sync(struct hci_dev *hdev, void *data)
3841 {
3842 return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3843 }
3844
set_name_complete(struct hci_dev * hdev,void * data,int err)3845 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3846 {
3847 struct mgmt_pending_cmd *cmd = data;
3848 struct mgmt_cp_set_local_name *cp = cmd->param;
3849 u8 status = mgmt_status(err);
3850
3851 bt_dev_dbg(hdev, "err %d", err);
3852
3853 if (err == -ECANCELED ||
3854 cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3855 return;
3856
3857 if (status) {
3858 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3859 status);
3860 } else {
3861 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3862 cp, sizeof(*cp));
3863
3864 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3865 hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3866 }
3867
3868 mgmt_pending_remove(cmd);
3869 }
3870
set_name_sync(struct hci_dev * hdev,void * data)3871 static int set_name_sync(struct hci_dev *hdev, void *data)
3872 {
3873 if (lmp_bredr_capable(hdev)) {
3874 hci_update_name_sync(hdev);
3875 hci_update_eir_sync(hdev);
3876 }
3877
3878 /* The name is stored in the scan response data and so
3879 * no need to update the advertising data here.
3880 */
3881 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3882 hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3883
3884 return 0;
3885 }
3886
set_local_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3887 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3888 u16 len)
3889 {
3890 struct mgmt_cp_set_local_name *cp = data;
3891 struct mgmt_pending_cmd *cmd;
3892 int err;
3893
3894 bt_dev_dbg(hdev, "sock %p", sk);
3895
3896 hci_dev_lock(hdev);
3897
3898 /* If the old values are the same as the new ones just return a
3899 * direct command complete event.
3900 */
3901 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3902 !memcmp(hdev->short_name, cp->short_name,
3903 sizeof(hdev->short_name))) {
3904 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3905 data, len);
3906 goto failed;
3907 }
3908
3909 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3910
3911 if (!hdev_is_powered(hdev)) {
3912 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3913
3914 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3915 data, len);
3916 if (err < 0)
3917 goto failed;
3918
3919 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3920 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3921 ext_info_changed(hdev, sk);
3922
3923 goto failed;
3924 }
3925
3926 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3927 if (!cmd)
3928 err = -ENOMEM;
3929 else
3930 err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3931 set_name_complete);
3932
3933 if (err < 0) {
3934 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3935 MGMT_STATUS_FAILED);
3936
3937 if (cmd)
3938 mgmt_pending_remove(cmd);
3939
3940 goto failed;
3941 }
3942
3943 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3944
3945 failed:
3946 hci_dev_unlock(hdev);
3947 return err;
3948 }
3949
appearance_changed_sync(struct hci_dev * hdev,void * data)3950 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3951 {
3952 return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3953 }
3954
set_appearance(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3955 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3956 u16 len)
3957 {
3958 struct mgmt_cp_set_appearance *cp = data;
3959 u16 appearance;
3960 int err;
3961
3962 bt_dev_dbg(hdev, "sock %p", sk);
3963
3964 if (!lmp_le_capable(hdev))
3965 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3966 MGMT_STATUS_NOT_SUPPORTED);
3967
3968 appearance = le16_to_cpu(cp->appearance);
3969
3970 hci_dev_lock(hdev);
3971
3972 if (hdev->appearance != appearance) {
3973 hdev->appearance = appearance;
3974
3975 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3976 hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3977 NULL);
3978
3979 ext_info_changed(hdev, sk);
3980 }
3981
3982 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3983 0);
3984
3985 hci_dev_unlock(hdev);
3986
3987 return err;
3988 }
3989
get_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3990 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3991 void *data, u16 len)
3992 {
3993 struct mgmt_rp_get_phy_configuration rp;
3994
3995 bt_dev_dbg(hdev, "sock %p", sk);
3996
3997 hci_dev_lock(hdev);
3998
3999 memset(&rp, 0, sizeof(rp));
4000
4001 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
4002 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
4003 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
4004
4005 hci_dev_unlock(hdev);
4006
4007 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
4008 &rp, sizeof(rp));
4009 }
4010
mgmt_phy_configuration_changed(struct hci_dev * hdev,struct sock * skip)4011 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
4012 {
4013 struct mgmt_ev_phy_configuration_changed ev;
4014
4015 memset(&ev, 0, sizeof(ev));
4016
4017 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
4018
4019 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
4020 sizeof(ev), skip);
4021 }
4022
set_default_phy_complete(struct hci_dev * hdev,void * data,int err)4023 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
4024 {
4025 struct mgmt_pending_cmd *cmd = data;
4026 struct sk_buff *skb = cmd->skb;
4027 u8 status = mgmt_status(err);
4028
4029 if (err == -ECANCELED ||
4030 cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
4031 return;
4032
4033 if (!status) {
4034 if (!skb)
4035 status = MGMT_STATUS_FAILED;
4036 else if (IS_ERR(skb))
4037 status = mgmt_status(PTR_ERR(skb));
4038 else
4039 status = mgmt_status(skb->data[0]);
4040 }
4041
4042 bt_dev_dbg(hdev, "status %d", status);
4043
4044 if (status) {
4045 mgmt_cmd_status(cmd->sk, hdev->id,
4046 MGMT_OP_SET_PHY_CONFIGURATION, status);
4047 } else {
4048 mgmt_cmd_complete(cmd->sk, hdev->id,
4049 MGMT_OP_SET_PHY_CONFIGURATION, 0,
4050 NULL, 0);
4051
4052 mgmt_phy_configuration_changed(hdev, cmd->sk);
4053 }
4054
4055 if (skb && !IS_ERR(skb))
4056 kfree_skb(skb);
4057
4058 mgmt_pending_remove(cmd);
4059 }
4060
set_default_phy_sync(struct hci_dev * hdev,void * data)4061 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
4062 {
4063 struct mgmt_pending_cmd *cmd = data;
4064 struct mgmt_cp_set_phy_configuration *cp = cmd->param;
4065 struct hci_cp_le_set_default_phy cp_phy;
4066 u32 selected_phys = __le32_to_cpu(cp->selected_phys);
4067
4068 memset(&cp_phy, 0, sizeof(cp_phy));
4069
4070 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
4071 cp_phy.all_phys |= 0x01;
4072
4073 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
4074 cp_phy.all_phys |= 0x02;
4075
4076 if (selected_phys & MGMT_PHY_LE_1M_TX)
4077 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
4078
4079 if (selected_phys & MGMT_PHY_LE_2M_TX)
4080 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
4081
4082 if (selected_phys & MGMT_PHY_LE_CODED_TX)
4083 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
4084
4085 if (selected_phys & MGMT_PHY_LE_1M_RX)
4086 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
4087
4088 if (selected_phys & MGMT_PHY_LE_2M_RX)
4089 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
4090
4091 if (selected_phys & MGMT_PHY_LE_CODED_RX)
4092 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
4093
4094 cmd->skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4095 sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
4096
4097 return 0;
4098 }
4099
set_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4100 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4101 void *data, u16 len)
4102 {
4103 struct mgmt_cp_set_phy_configuration *cp = data;
4104 struct mgmt_pending_cmd *cmd;
4105 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4106 u16 pkt_type = (HCI_DH1 | HCI_DM1);
4107 bool changed = false;
4108 int err;
4109
4110 bt_dev_dbg(hdev, "sock %p", sk);
4111
4112 configurable_phys = get_configurable_phys(hdev);
4113 supported_phys = get_supported_phys(hdev);
4114 selected_phys = __le32_to_cpu(cp->selected_phys);
4115
4116 if (selected_phys & ~supported_phys)
4117 return mgmt_cmd_status(sk, hdev->id,
4118 MGMT_OP_SET_PHY_CONFIGURATION,
4119 MGMT_STATUS_INVALID_PARAMS);
4120
4121 unconfigure_phys = supported_phys & ~configurable_phys;
4122
4123 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4124 return mgmt_cmd_status(sk, hdev->id,
4125 MGMT_OP_SET_PHY_CONFIGURATION,
4126 MGMT_STATUS_INVALID_PARAMS);
4127
4128 if (selected_phys == get_selected_phys(hdev))
4129 return mgmt_cmd_complete(sk, hdev->id,
4130 MGMT_OP_SET_PHY_CONFIGURATION,
4131 0, NULL, 0);
4132
4133 hci_dev_lock(hdev);
4134
4135 if (!hdev_is_powered(hdev)) {
4136 err = mgmt_cmd_status(sk, hdev->id,
4137 MGMT_OP_SET_PHY_CONFIGURATION,
4138 MGMT_STATUS_REJECTED);
4139 goto unlock;
4140 }
4141
4142 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4143 err = mgmt_cmd_status(sk, hdev->id,
4144 MGMT_OP_SET_PHY_CONFIGURATION,
4145 MGMT_STATUS_BUSY);
4146 goto unlock;
4147 }
4148
4149 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4150 pkt_type |= (HCI_DH3 | HCI_DM3);
4151 else
4152 pkt_type &= ~(HCI_DH3 | HCI_DM3);
4153
4154 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4155 pkt_type |= (HCI_DH5 | HCI_DM5);
4156 else
4157 pkt_type &= ~(HCI_DH5 | HCI_DM5);
4158
4159 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4160 pkt_type &= ~HCI_2DH1;
4161 else
4162 pkt_type |= HCI_2DH1;
4163
4164 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4165 pkt_type &= ~HCI_2DH3;
4166 else
4167 pkt_type |= HCI_2DH3;
4168
4169 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4170 pkt_type &= ~HCI_2DH5;
4171 else
4172 pkt_type |= HCI_2DH5;
4173
4174 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4175 pkt_type &= ~HCI_3DH1;
4176 else
4177 pkt_type |= HCI_3DH1;
4178
4179 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4180 pkt_type &= ~HCI_3DH3;
4181 else
4182 pkt_type |= HCI_3DH3;
4183
4184 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4185 pkt_type &= ~HCI_3DH5;
4186 else
4187 pkt_type |= HCI_3DH5;
4188
4189 if (pkt_type != hdev->pkt_type) {
4190 hdev->pkt_type = pkt_type;
4191 changed = true;
4192 }
4193
4194 if ((selected_phys & MGMT_PHY_LE_MASK) ==
4195 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4196 if (changed)
4197 mgmt_phy_configuration_changed(hdev, sk);
4198
4199 err = mgmt_cmd_complete(sk, hdev->id,
4200 MGMT_OP_SET_PHY_CONFIGURATION,
4201 0, NULL, 0);
4202
4203 goto unlock;
4204 }
4205
4206 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4207 len);
4208 if (!cmd)
4209 err = -ENOMEM;
4210 else
4211 err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4212 set_default_phy_complete);
4213
4214 if (err < 0) {
4215 err = mgmt_cmd_status(sk, hdev->id,
4216 MGMT_OP_SET_PHY_CONFIGURATION,
4217 MGMT_STATUS_FAILED);
4218
4219 if (cmd)
4220 mgmt_pending_remove(cmd);
4221 }
4222
4223 unlock:
4224 hci_dev_unlock(hdev);
4225
4226 return err;
4227 }
4228
set_blocked_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4229 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4230 u16 len)
4231 {
4232 int err = MGMT_STATUS_SUCCESS;
4233 struct mgmt_cp_set_blocked_keys *keys = data;
4234 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4235 sizeof(struct mgmt_blocked_key_info));
4236 u16 key_count, expected_len;
4237 int i;
4238
4239 bt_dev_dbg(hdev, "sock %p", sk);
4240
4241 key_count = __le16_to_cpu(keys->key_count);
4242 if (key_count > max_key_count) {
4243 bt_dev_err(hdev, "too big key_count value %u", key_count);
4244 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4245 MGMT_STATUS_INVALID_PARAMS);
4246 }
4247
4248 expected_len = struct_size(keys, keys, key_count);
4249 if (expected_len != len) {
4250 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4251 expected_len, len);
4252 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4253 MGMT_STATUS_INVALID_PARAMS);
4254 }
4255
4256 hci_dev_lock(hdev);
4257
4258 hci_blocked_keys_clear(hdev);
4259
4260 for (i = 0; i < key_count; ++i) {
4261 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4262
4263 if (!b) {
4264 err = MGMT_STATUS_NO_RESOURCES;
4265 break;
4266 }
4267
4268 b->type = keys->keys[i].type;
4269 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4270 list_add_rcu(&b->list, &hdev->blocked_keys);
4271 }
4272 hci_dev_unlock(hdev);
4273
4274 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4275 err, NULL, 0);
4276 }
4277
set_wideband_speech(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4278 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4279 void *data, u16 len)
4280 {
4281 struct mgmt_mode *cp = data;
4282 int err;
4283 bool changed = false;
4284
4285 bt_dev_dbg(hdev, "sock %p", sk);
4286
4287 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
4288 return mgmt_cmd_status(sk, hdev->id,
4289 MGMT_OP_SET_WIDEBAND_SPEECH,
4290 MGMT_STATUS_NOT_SUPPORTED);
4291
4292 if (cp->val != 0x00 && cp->val != 0x01)
4293 return mgmt_cmd_status(sk, hdev->id,
4294 MGMT_OP_SET_WIDEBAND_SPEECH,
4295 MGMT_STATUS_INVALID_PARAMS);
4296
4297 hci_dev_lock(hdev);
4298
4299 if (hdev_is_powered(hdev) &&
4300 !!cp->val != hci_dev_test_flag(hdev,
4301 HCI_WIDEBAND_SPEECH_ENABLED)) {
4302 err = mgmt_cmd_status(sk, hdev->id,
4303 MGMT_OP_SET_WIDEBAND_SPEECH,
4304 MGMT_STATUS_REJECTED);
4305 goto unlock;
4306 }
4307
4308 if (cp->val)
4309 changed = !hci_dev_test_and_set_flag(hdev,
4310 HCI_WIDEBAND_SPEECH_ENABLED);
4311 else
4312 changed = hci_dev_test_and_clear_flag(hdev,
4313 HCI_WIDEBAND_SPEECH_ENABLED);
4314
4315 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4316 if (err < 0)
4317 goto unlock;
4318
4319 if (changed)
4320 err = new_settings(hdev, sk);
4321
4322 unlock:
4323 hci_dev_unlock(hdev);
4324 return err;
4325 }
4326
read_controller_cap(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4327 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4328 void *data, u16 data_len)
4329 {
4330 char buf[20];
4331 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4332 u16 cap_len = 0;
4333 u8 flags = 0;
4334 u8 tx_power_range[2];
4335
4336 bt_dev_dbg(hdev, "sock %p", sk);
4337
4338 memset(&buf, 0, sizeof(buf));
4339
4340 hci_dev_lock(hdev);
4341
4342 /* When the Read Simple Pairing Options command is supported, then
4343 * the remote public key validation is supported.
4344 *
4345 * Alternatively, when Microsoft extensions are available, they can
4346 * indicate support for public key validation as well.
4347 */
4348 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4349 flags |= 0x01; /* Remote public key validation (BR/EDR) */
4350
4351 flags |= 0x02; /* Remote public key validation (LE) */
4352
4353 /* When the Read Encryption Key Size command is supported, then the
4354 * encryption key size is enforced.
4355 */
4356 if (hdev->commands[20] & 0x10)
4357 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
4358
4359 flags |= 0x08; /* Encryption key size enforcement (LE) */
4360
4361 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4362 &flags, 1);
4363
4364 /* When the Read Simple Pairing Options command is supported, then
4365 * also max encryption key size information is provided.
4366 */
4367 if (hdev->commands[41] & 0x08)
4368 cap_len = eir_append_le16(rp->cap, cap_len,
4369 MGMT_CAP_MAX_ENC_KEY_SIZE,
4370 hdev->max_enc_key_size);
4371
4372 cap_len = eir_append_le16(rp->cap, cap_len,
4373 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4374 SMP_MAX_ENC_KEY_SIZE);
4375
4376 /* Append the min/max LE tx power parameters if we were able to fetch
4377 * it from the controller
4378 */
4379 if (hdev->commands[38] & 0x80) {
4380 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4381 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4382 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4383 tx_power_range, 2);
4384 }
4385
4386 rp->cap_len = cpu_to_le16(cap_len);
4387
4388 hci_dev_unlock(hdev);
4389
4390 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4391 rp, sizeof(*rp) + cap_len);
4392 }
4393
4394 #ifdef CONFIG_BT_FEATURE_DEBUG
4395 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4396 static const u8 debug_uuid[16] = {
4397 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4398 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4399 };
4400 #endif
4401
4402 /* 330859bc-7506-492d-9370-9a6f0614037f */
4403 static const u8 quality_report_uuid[16] = {
4404 0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4405 0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4406 };
4407
4408 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4409 static const u8 offload_codecs_uuid[16] = {
4410 0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4411 0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4412 };
4413
4414 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4415 static const u8 le_simultaneous_roles_uuid[16] = {
4416 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4417 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4418 };
4419
4420 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
4421 static const u8 rpa_resolution_uuid[16] = {
4422 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
4423 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
4424 };
4425
4426 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4427 static const u8 iso_socket_uuid[16] = {
4428 0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4429 0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4430 };
4431
4432 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4433 static const u8 mgmt_mesh_uuid[16] = {
4434 0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4435 0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4436 };
4437
read_exp_features_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4438 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4439 void *data, u16 data_len)
4440 {
4441 struct mgmt_rp_read_exp_features_info *rp;
4442 size_t len;
4443 u16 idx = 0;
4444 u32 flags;
4445 int status;
4446
4447 bt_dev_dbg(hdev, "sock %p", sk);
4448
4449 /* Enough space for 7 features */
4450 len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4451 rp = kzalloc(len, GFP_KERNEL);
4452 if (!rp)
4453 return -ENOMEM;
4454
4455 #ifdef CONFIG_BT_FEATURE_DEBUG
4456 if (!hdev) {
4457 flags = bt_dbg_get() ? BIT(0) : 0;
4458
4459 memcpy(rp->features[idx].uuid, debug_uuid, 16);
4460 rp->features[idx].flags = cpu_to_le32(flags);
4461 idx++;
4462 }
4463 #endif
4464
4465 if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4466 if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4467 flags = BIT(0);
4468 else
4469 flags = 0;
4470
4471 memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4472 rp->features[idx].flags = cpu_to_le32(flags);
4473 idx++;
4474 }
4475
4476 if (hdev && ll_privacy_capable(hdev)) {
4477 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
4478 flags = BIT(0) | BIT(1);
4479 else
4480 flags = BIT(1);
4481
4482 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
4483 rp->features[idx].flags = cpu_to_le32(flags);
4484 idx++;
4485 }
4486
4487 if (hdev && (aosp_has_quality_report(hdev) ||
4488 hdev->set_quality_report)) {
4489 if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4490 flags = BIT(0);
4491 else
4492 flags = 0;
4493
4494 memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4495 rp->features[idx].flags = cpu_to_le32(flags);
4496 idx++;
4497 }
4498
4499 if (hdev && hdev->get_data_path_id) {
4500 if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4501 flags = BIT(0);
4502 else
4503 flags = 0;
4504
4505 memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4506 rp->features[idx].flags = cpu_to_le32(flags);
4507 idx++;
4508 }
4509
4510 if (IS_ENABLED(CONFIG_BT_LE)) {
4511 flags = iso_enabled() ? BIT(0) : 0;
4512 memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4513 rp->features[idx].flags = cpu_to_le32(flags);
4514 idx++;
4515 }
4516
4517 if (hdev && lmp_le_capable(hdev)) {
4518 if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4519 flags = BIT(0);
4520 else
4521 flags = 0;
4522
4523 memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4524 rp->features[idx].flags = cpu_to_le32(flags);
4525 idx++;
4526 }
4527
4528 rp->feature_count = cpu_to_le16(idx);
4529
4530 /* After reading the experimental features information, enable
4531 * the events to update client on any future change.
4532 */
4533 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4534
4535 status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4536 MGMT_OP_READ_EXP_FEATURES_INFO,
4537 0, rp, sizeof(*rp) + (20 * idx));
4538
4539 kfree(rp);
4540 return status;
4541 }
4542
exp_ll_privacy_feature_changed(bool enabled,struct hci_dev * hdev,struct sock * skip)4543 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4544 struct sock *skip)
4545 {
4546 struct mgmt_ev_exp_feature_changed ev;
4547
4548 memset(&ev, 0, sizeof(ev));
4549 memcpy(ev.uuid, rpa_resolution_uuid, 16);
4550 ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4551
4552 // Do we need to be atomic with the conn_flags?
4553 if (enabled && privacy_mode_capable(hdev))
4554 hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4555 else
4556 hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
4557
4558 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4559 &ev, sizeof(ev),
4560 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4561
4562 }
4563
exp_feature_changed(struct hci_dev * hdev,const u8 * uuid,bool enabled,struct sock * skip)4564 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4565 bool enabled, struct sock *skip)
4566 {
4567 struct mgmt_ev_exp_feature_changed ev;
4568
4569 memset(&ev, 0, sizeof(ev));
4570 memcpy(ev.uuid, uuid, 16);
4571 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4572
4573 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4574 &ev, sizeof(ev),
4575 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4576 }
4577
4578 #define EXP_FEAT(_uuid, _set_func) \
4579 { \
4580 .uuid = _uuid, \
4581 .set_func = _set_func, \
4582 }
4583
4584 /* The zero key uuid is special. Multiple exp features are set through it. */
set_zero_key_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4585 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4586 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4587 {
4588 struct mgmt_rp_set_exp_feature rp;
4589
4590 memset(rp.uuid, 0, 16);
4591 rp.flags = cpu_to_le32(0);
4592
4593 #ifdef CONFIG_BT_FEATURE_DEBUG
4594 if (!hdev) {
4595 bool changed = bt_dbg_get();
4596
4597 bt_dbg_set(false);
4598
4599 if (changed)
4600 exp_feature_changed(NULL, ZERO_KEY, false, sk);
4601 }
4602 #endif
4603
4604 if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4605 bool changed;
4606
4607 changed = hci_dev_test_and_clear_flag(hdev,
4608 HCI_ENABLE_LL_PRIVACY);
4609 if (changed)
4610 exp_feature_changed(hdev, rpa_resolution_uuid, false,
4611 sk);
4612 }
4613
4614 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4615
4616 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4617 MGMT_OP_SET_EXP_FEATURE, 0,
4618 &rp, sizeof(rp));
4619 }
4620
4621 #ifdef CONFIG_BT_FEATURE_DEBUG
set_debug_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4622 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4623 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4624 {
4625 struct mgmt_rp_set_exp_feature rp;
4626
4627 bool val, changed;
4628 int err;
4629
4630 /* Command requires to use the non-controller index */
4631 if (hdev)
4632 return mgmt_cmd_status(sk, hdev->id,
4633 MGMT_OP_SET_EXP_FEATURE,
4634 MGMT_STATUS_INVALID_INDEX);
4635
4636 /* Parameters are limited to a single octet */
4637 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4638 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4639 MGMT_OP_SET_EXP_FEATURE,
4640 MGMT_STATUS_INVALID_PARAMS);
4641
4642 /* Only boolean on/off is supported */
4643 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4644 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4645 MGMT_OP_SET_EXP_FEATURE,
4646 MGMT_STATUS_INVALID_PARAMS);
4647
4648 val = !!cp->param[0];
4649 changed = val ? !bt_dbg_get() : bt_dbg_get();
4650 bt_dbg_set(val);
4651
4652 memcpy(rp.uuid, debug_uuid, 16);
4653 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4654
4655 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4656
4657 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4658 MGMT_OP_SET_EXP_FEATURE, 0,
4659 &rp, sizeof(rp));
4660
4661 if (changed)
4662 exp_feature_changed(hdev, debug_uuid, val, sk);
4663
4664 return err;
4665 }
4666 #endif
4667
set_mgmt_mesh_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4668 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4669 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4670 {
4671 struct mgmt_rp_set_exp_feature rp;
4672 bool val, changed;
4673 int err;
4674
4675 /* Command requires to use the controller index */
4676 if (!hdev)
4677 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4678 MGMT_OP_SET_EXP_FEATURE,
4679 MGMT_STATUS_INVALID_INDEX);
4680
4681 /* Parameters are limited to a single octet */
4682 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4683 return mgmt_cmd_status(sk, hdev->id,
4684 MGMT_OP_SET_EXP_FEATURE,
4685 MGMT_STATUS_INVALID_PARAMS);
4686
4687 /* Only boolean on/off is supported */
4688 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4689 return mgmt_cmd_status(sk, hdev->id,
4690 MGMT_OP_SET_EXP_FEATURE,
4691 MGMT_STATUS_INVALID_PARAMS);
4692
4693 val = !!cp->param[0];
4694
4695 if (val) {
4696 changed = !hci_dev_test_and_set_flag(hdev,
4697 HCI_MESH_EXPERIMENTAL);
4698 } else {
4699 hci_dev_clear_flag(hdev, HCI_MESH);
4700 changed = hci_dev_test_and_clear_flag(hdev,
4701 HCI_MESH_EXPERIMENTAL);
4702 }
4703
4704 memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4705 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4706
4707 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4708
4709 err = mgmt_cmd_complete(sk, hdev->id,
4710 MGMT_OP_SET_EXP_FEATURE, 0,
4711 &rp, sizeof(rp));
4712
4713 if (changed)
4714 exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4715
4716 return err;
4717 }
4718
set_rpa_resolution_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4719 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4720 struct mgmt_cp_set_exp_feature *cp,
4721 u16 data_len)
4722 {
4723 struct mgmt_rp_set_exp_feature rp;
4724 bool val, changed;
4725 int err;
4726 u32 flags;
4727
4728 /* Command requires to use the controller index */
4729 if (!hdev)
4730 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4731 MGMT_OP_SET_EXP_FEATURE,
4732 MGMT_STATUS_INVALID_INDEX);
4733
4734 /* Changes can only be made when controller is powered down */
4735 if (hdev_is_powered(hdev))
4736 return mgmt_cmd_status(sk, hdev->id,
4737 MGMT_OP_SET_EXP_FEATURE,
4738 MGMT_STATUS_REJECTED);
4739
4740 /* Parameters are limited to a single octet */
4741 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4742 return mgmt_cmd_status(sk, hdev->id,
4743 MGMT_OP_SET_EXP_FEATURE,
4744 MGMT_STATUS_INVALID_PARAMS);
4745
4746 /* Only boolean on/off is supported */
4747 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4748 return mgmt_cmd_status(sk, hdev->id,
4749 MGMT_OP_SET_EXP_FEATURE,
4750 MGMT_STATUS_INVALID_PARAMS);
4751
4752 val = !!cp->param[0];
4753
4754 if (val) {
4755 changed = !hci_dev_test_and_set_flag(hdev,
4756 HCI_ENABLE_LL_PRIVACY);
4757 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4758
4759 /* Enable LL privacy + supported settings changed */
4760 flags = BIT(0) | BIT(1);
4761 } else {
4762 changed = hci_dev_test_and_clear_flag(hdev,
4763 HCI_ENABLE_LL_PRIVACY);
4764
4765 /* Disable LL privacy + supported settings changed */
4766 flags = BIT(1);
4767 }
4768
4769 memcpy(rp.uuid, rpa_resolution_uuid, 16);
4770 rp.flags = cpu_to_le32(flags);
4771
4772 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4773
4774 err = mgmt_cmd_complete(sk, hdev->id,
4775 MGMT_OP_SET_EXP_FEATURE, 0,
4776 &rp, sizeof(rp));
4777
4778 if (changed)
4779 exp_ll_privacy_feature_changed(val, hdev, sk);
4780
4781 return err;
4782 }
4783
set_quality_report_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4784 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4785 struct mgmt_cp_set_exp_feature *cp,
4786 u16 data_len)
4787 {
4788 struct mgmt_rp_set_exp_feature rp;
4789 bool val, changed;
4790 int err;
4791
4792 /* Command requires to use a valid controller index */
4793 if (!hdev)
4794 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4795 MGMT_OP_SET_EXP_FEATURE,
4796 MGMT_STATUS_INVALID_INDEX);
4797
4798 /* Parameters are limited to a single octet */
4799 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4800 return mgmt_cmd_status(sk, hdev->id,
4801 MGMT_OP_SET_EXP_FEATURE,
4802 MGMT_STATUS_INVALID_PARAMS);
4803
4804 /* Only boolean on/off is supported */
4805 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4806 return mgmt_cmd_status(sk, hdev->id,
4807 MGMT_OP_SET_EXP_FEATURE,
4808 MGMT_STATUS_INVALID_PARAMS);
4809
4810 hci_req_sync_lock(hdev);
4811
4812 val = !!cp->param[0];
4813 changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4814
4815 if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4816 err = mgmt_cmd_status(sk, hdev->id,
4817 MGMT_OP_SET_EXP_FEATURE,
4818 MGMT_STATUS_NOT_SUPPORTED);
4819 goto unlock_quality_report;
4820 }
4821
4822 if (changed) {
4823 if (hdev->set_quality_report)
4824 err = hdev->set_quality_report(hdev, val);
4825 else
4826 err = aosp_set_quality_report(hdev, val);
4827
4828 if (err) {
4829 err = mgmt_cmd_status(sk, hdev->id,
4830 MGMT_OP_SET_EXP_FEATURE,
4831 MGMT_STATUS_FAILED);
4832 goto unlock_quality_report;
4833 }
4834
4835 if (val)
4836 hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4837 else
4838 hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4839 }
4840
4841 bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4842
4843 memcpy(rp.uuid, quality_report_uuid, 16);
4844 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4845 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4846
4847 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4848 &rp, sizeof(rp));
4849
4850 if (changed)
4851 exp_feature_changed(hdev, quality_report_uuid, val, sk);
4852
4853 unlock_quality_report:
4854 hci_req_sync_unlock(hdev);
4855 return err;
4856 }
4857
set_offload_codec_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4858 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4859 struct mgmt_cp_set_exp_feature *cp,
4860 u16 data_len)
4861 {
4862 bool val, changed;
4863 int err;
4864 struct mgmt_rp_set_exp_feature rp;
4865
4866 /* Command requires to use a valid controller index */
4867 if (!hdev)
4868 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4869 MGMT_OP_SET_EXP_FEATURE,
4870 MGMT_STATUS_INVALID_INDEX);
4871
4872 /* Parameters are limited to a single octet */
4873 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4874 return mgmt_cmd_status(sk, hdev->id,
4875 MGMT_OP_SET_EXP_FEATURE,
4876 MGMT_STATUS_INVALID_PARAMS);
4877
4878 /* Only boolean on/off is supported */
4879 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4880 return mgmt_cmd_status(sk, hdev->id,
4881 MGMT_OP_SET_EXP_FEATURE,
4882 MGMT_STATUS_INVALID_PARAMS);
4883
4884 val = !!cp->param[0];
4885 changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4886
4887 if (!hdev->get_data_path_id) {
4888 return mgmt_cmd_status(sk, hdev->id,
4889 MGMT_OP_SET_EXP_FEATURE,
4890 MGMT_STATUS_NOT_SUPPORTED);
4891 }
4892
4893 if (changed) {
4894 if (val)
4895 hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4896 else
4897 hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4898 }
4899
4900 bt_dev_info(hdev, "offload codecs enable %d changed %d",
4901 val, changed);
4902
4903 memcpy(rp.uuid, offload_codecs_uuid, 16);
4904 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4905 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4906 err = mgmt_cmd_complete(sk, hdev->id,
4907 MGMT_OP_SET_EXP_FEATURE, 0,
4908 &rp, sizeof(rp));
4909
4910 if (changed)
4911 exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4912
4913 return err;
4914 }
4915
set_le_simultaneous_roles_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4916 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4917 struct mgmt_cp_set_exp_feature *cp,
4918 u16 data_len)
4919 {
4920 bool val, changed;
4921 int err;
4922 struct mgmt_rp_set_exp_feature rp;
4923
4924 /* Command requires to use a valid controller index */
4925 if (!hdev)
4926 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4927 MGMT_OP_SET_EXP_FEATURE,
4928 MGMT_STATUS_INVALID_INDEX);
4929
4930 /* Parameters are limited to a single octet */
4931 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4932 return mgmt_cmd_status(sk, hdev->id,
4933 MGMT_OP_SET_EXP_FEATURE,
4934 MGMT_STATUS_INVALID_PARAMS);
4935
4936 /* Only boolean on/off is supported */
4937 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4938 return mgmt_cmd_status(sk, hdev->id,
4939 MGMT_OP_SET_EXP_FEATURE,
4940 MGMT_STATUS_INVALID_PARAMS);
4941
4942 val = !!cp->param[0];
4943 changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4944
4945 if (!hci_dev_le_state_simultaneous(hdev)) {
4946 return mgmt_cmd_status(sk, hdev->id,
4947 MGMT_OP_SET_EXP_FEATURE,
4948 MGMT_STATUS_NOT_SUPPORTED);
4949 }
4950
4951 if (changed) {
4952 if (val)
4953 hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4954 else
4955 hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4956 }
4957
4958 bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4959 val, changed);
4960
4961 memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4962 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4963 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4964 err = mgmt_cmd_complete(sk, hdev->id,
4965 MGMT_OP_SET_EXP_FEATURE, 0,
4966 &rp, sizeof(rp));
4967
4968 if (changed)
4969 exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4970
4971 return err;
4972 }
4973
4974 #ifdef CONFIG_BT_LE
set_iso_socket_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4975 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4976 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4977 {
4978 struct mgmt_rp_set_exp_feature rp;
4979 bool val, changed = false;
4980 int err;
4981
4982 /* Command requires to use the non-controller index */
4983 if (hdev)
4984 return mgmt_cmd_status(sk, hdev->id,
4985 MGMT_OP_SET_EXP_FEATURE,
4986 MGMT_STATUS_INVALID_INDEX);
4987
4988 /* Parameters are limited to a single octet */
4989 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4990 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4991 MGMT_OP_SET_EXP_FEATURE,
4992 MGMT_STATUS_INVALID_PARAMS);
4993
4994 /* Only boolean on/off is supported */
4995 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4996 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4997 MGMT_OP_SET_EXP_FEATURE,
4998 MGMT_STATUS_INVALID_PARAMS);
4999
5000 val = cp->param[0] ? true : false;
5001 if (val)
5002 err = iso_init();
5003 else
5004 err = iso_exit();
5005
5006 if (!err)
5007 changed = true;
5008
5009 memcpy(rp.uuid, iso_socket_uuid, 16);
5010 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
5011
5012 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
5013
5014 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
5015 MGMT_OP_SET_EXP_FEATURE, 0,
5016 &rp, sizeof(rp));
5017
5018 if (changed)
5019 exp_feature_changed(hdev, iso_socket_uuid, val, sk);
5020
5021 return err;
5022 }
5023 #endif
5024
5025 static const struct mgmt_exp_feature {
5026 const u8 *uuid;
5027 int (*set_func)(struct sock *sk, struct hci_dev *hdev,
5028 struct mgmt_cp_set_exp_feature *cp, u16 data_len);
5029 } exp_features[] = {
5030 EXP_FEAT(ZERO_KEY, set_zero_key_func),
5031 #ifdef CONFIG_BT_FEATURE_DEBUG
5032 EXP_FEAT(debug_uuid, set_debug_func),
5033 #endif
5034 EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
5035 EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
5036 EXP_FEAT(quality_report_uuid, set_quality_report_func),
5037 EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
5038 EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
5039 #ifdef CONFIG_BT_LE
5040 EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
5041 #endif
5042
5043 /* end with a null feature */
5044 EXP_FEAT(NULL, NULL)
5045 };
5046
set_exp_feature(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)5047 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
5048 void *data, u16 data_len)
5049 {
5050 struct mgmt_cp_set_exp_feature *cp = data;
5051 size_t i = 0;
5052
5053 bt_dev_dbg(hdev, "sock %p", sk);
5054
5055 for (i = 0; exp_features[i].uuid; i++) {
5056 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
5057 return exp_features[i].set_func(sk, hdev, cp, data_len);
5058 }
5059
5060 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
5061 MGMT_OP_SET_EXP_FEATURE,
5062 MGMT_STATUS_NOT_SUPPORTED);
5063 }
5064
get_params_flags(struct hci_dev * hdev,struct hci_conn_params * params)5065 static u32 get_params_flags(struct hci_dev *hdev,
5066 struct hci_conn_params *params)
5067 {
5068 u32 flags = hdev->conn_flags;
5069
5070 /* Devices using RPAs can only be programmed in the acceptlist if
5071 * LL Privacy has been enable otherwise they cannot mark
5072 * HCI_CONN_FLAG_REMOTE_WAKEUP.
5073 */
5074 if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) &&
5075 hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type))
5076 flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP;
5077
5078 return flags;
5079 }
5080
get_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)5081 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5082 u16 data_len)
5083 {
5084 struct mgmt_cp_get_device_flags *cp = data;
5085 struct mgmt_rp_get_device_flags rp;
5086 struct bdaddr_list_with_flags *br_params;
5087 struct hci_conn_params *params;
5088 u32 supported_flags;
5089 u32 current_flags = 0;
5090 u8 status = MGMT_STATUS_INVALID_PARAMS;
5091
5092 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
5093 &cp->addr.bdaddr, cp->addr.type);
5094
5095 hci_dev_lock(hdev);
5096
5097 supported_flags = hdev->conn_flags;
5098
5099 memset(&rp, 0, sizeof(rp));
5100
5101 if (cp->addr.type == BDADDR_BREDR) {
5102 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5103 &cp->addr.bdaddr,
5104 cp->addr.type);
5105 if (!br_params)
5106 goto done;
5107
5108 current_flags = br_params->flags;
5109 } else {
5110 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5111 le_addr_type(cp->addr.type));
5112 if (!params)
5113 goto done;
5114
5115 supported_flags = get_params_flags(hdev, params);
5116 current_flags = params->flags;
5117 }
5118
5119 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5120 rp.addr.type = cp->addr.type;
5121 rp.supported_flags = cpu_to_le32(supported_flags);
5122 rp.current_flags = cpu_to_le32(current_flags);
5123
5124 status = MGMT_STATUS_SUCCESS;
5125
5126 done:
5127 hci_dev_unlock(hdev);
5128
5129 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5130 &rp, sizeof(rp));
5131 }
5132
device_flags_changed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u32 supported_flags,u32 current_flags)5133 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5134 bdaddr_t *bdaddr, u8 bdaddr_type,
5135 u32 supported_flags, u32 current_flags)
5136 {
5137 struct mgmt_ev_device_flags_changed ev;
5138
5139 bacpy(&ev.addr.bdaddr, bdaddr);
5140 ev.addr.type = bdaddr_type;
5141 ev.supported_flags = cpu_to_le32(supported_flags);
5142 ev.current_flags = cpu_to_le32(current_flags);
5143
5144 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5145 }
5146
set_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5147 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5148 u16 len)
5149 {
5150 struct mgmt_cp_set_device_flags *cp = data;
5151 struct bdaddr_list_with_flags *br_params;
5152 struct hci_conn_params *params;
5153 u8 status = MGMT_STATUS_INVALID_PARAMS;
5154 u32 supported_flags;
5155 u32 current_flags = __le32_to_cpu(cp->current_flags);
5156
5157 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5158 &cp->addr.bdaddr, cp->addr.type, current_flags);
5159
5160 // We should take hci_dev_lock() early, I think.. conn_flags can change
5161 supported_flags = hdev->conn_flags;
5162
5163 if ((supported_flags | current_flags) != supported_flags) {
5164 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5165 current_flags, supported_flags);
5166 goto done;
5167 }
5168
5169 hci_dev_lock(hdev);
5170
5171 if (cp->addr.type == BDADDR_BREDR) {
5172 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5173 &cp->addr.bdaddr,
5174 cp->addr.type);
5175
5176 if (br_params) {
5177 br_params->flags = current_flags;
5178 status = MGMT_STATUS_SUCCESS;
5179 } else {
5180 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5181 &cp->addr.bdaddr, cp->addr.type);
5182 }
5183
5184 goto unlock;
5185 }
5186
5187 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5188 le_addr_type(cp->addr.type));
5189 if (!params) {
5190 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5191 &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5192 goto unlock;
5193 }
5194
5195 supported_flags = get_params_flags(hdev, params);
5196
5197 if ((supported_flags | current_flags) != supported_flags) {
5198 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5199 current_flags, supported_flags);
5200 goto unlock;
5201 }
5202
5203 WRITE_ONCE(params->flags, current_flags);
5204 status = MGMT_STATUS_SUCCESS;
5205
5206 /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5207 * has been set.
5208 */
5209 if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5210 hci_update_passive_scan(hdev);
5211
5212 unlock:
5213 hci_dev_unlock(hdev);
5214
5215 done:
5216 if (status == MGMT_STATUS_SUCCESS)
5217 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5218 supported_flags, current_flags);
5219
5220 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5221 &cp->addr, sizeof(cp->addr));
5222 }
5223
mgmt_adv_monitor_added(struct sock * sk,struct hci_dev * hdev,u16 handle)5224 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5225 u16 handle)
5226 {
5227 struct mgmt_ev_adv_monitor_added ev;
5228
5229 ev.monitor_handle = cpu_to_le16(handle);
5230
5231 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5232 }
5233
mgmt_adv_monitor_removed(struct hci_dev * hdev,u16 handle)5234 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
5235 {
5236 struct mgmt_ev_adv_monitor_removed ev;
5237 struct mgmt_pending_cmd *cmd;
5238 struct sock *sk_skip = NULL;
5239 struct mgmt_cp_remove_adv_monitor *cp;
5240
5241 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
5242 if (cmd) {
5243 cp = cmd->param;
5244
5245 if (cp->monitor_handle)
5246 sk_skip = cmd->sk;
5247 }
5248
5249 ev.monitor_handle = cpu_to_le16(handle);
5250
5251 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
5252 }
5253
read_adv_mon_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5254 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5255 void *data, u16 len)
5256 {
5257 struct adv_monitor *monitor = NULL;
5258 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5259 int handle, err;
5260 size_t rp_size = 0;
5261 __u32 supported = 0;
5262 __u32 enabled = 0;
5263 __u16 num_handles = 0;
5264 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5265
5266 BT_DBG("request for %s", hdev->name);
5267
5268 hci_dev_lock(hdev);
5269
5270 if (msft_monitor_supported(hdev))
5271 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5272
5273 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5274 handles[num_handles++] = monitor->handle;
5275
5276 hci_dev_unlock(hdev);
5277
5278 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5279 rp = kmalloc(rp_size, GFP_KERNEL);
5280 if (!rp)
5281 return -ENOMEM;
5282
5283 /* All supported features are currently enabled */
5284 enabled = supported;
5285
5286 rp->supported_features = cpu_to_le32(supported);
5287 rp->enabled_features = cpu_to_le32(enabled);
5288 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5289 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5290 rp->num_handles = cpu_to_le16(num_handles);
5291 if (num_handles)
5292 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5293
5294 err = mgmt_cmd_complete(sk, hdev->id,
5295 MGMT_OP_READ_ADV_MONITOR_FEATURES,
5296 MGMT_STATUS_SUCCESS, rp, rp_size);
5297
5298 kfree(rp);
5299
5300 return err;
5301 }
5302
mgmt_add_adv_patterns_monitor_complete(struct hci_dev * hdev,void * data,int status)5303 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5304 void *data, int status)
5305 {
5306 struct mgmt_rp_add_adv_patterns_monitor rp;
5307 struct mgmt_pending_cmd *cmd = data;
5308 struct adv_monitor *monitor = cmd->user_data;
5309
5310 hci_dev_lock(hdev);
5311
5312 rp.monitor_handle = cpu_to_le16(monitor->handle);
5313
5314 if (!status) {
5315 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5316 hdev->adv_monitors_cnt++;
5317 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5318 monitor->state = ADV_MONITOR_STATE_REGISTERED;
5319 hci_update_passive_scan(hdev);
5320 }
5321
5322 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5323 mgmt_status(status), &rp, sizeof(rp));
5324 mgmt_pending_remove(cmd);
5325
5326 hci_dev_unlock(hdev);
5327 bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5328 rp.monitor_handle, status);
5329 }
5330
mgmt_add_adv_patterns_monitor_sync(struct hci_dev * hdev,void * data)5331 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5332 {
5333 struct mgmt_pending_cmd *cmd = data;
5334 struct adv_monitor *monitor = cmd->user_data;
5335
5336 return hci_add_adv_monitor(hdev, monitor);
5337 }
5338
__add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,struct adv_monitor * m,u8 status,void * data,u16 len,u16 op)5339 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5340 struct adv_monitor *m, u8 status,
5341 void *data, u16 len, u16 op)
5342 {
5343 struct mgmt_pending_cmd *cmd;
5344 int err;
5345
5346 hci_dev_lock(hdev);
5347
5348 if (status)
5349 goto unlock;
5350
5351 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5352 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5353 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
5354 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
5355 status = MGMT_STATUS_BUSY;
5356 goto unlock;
5357 }
5358
5359 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5360 if (!cmd) {
5361 status = MGMT_STATUS_NO_RESOURCES;
5362 goto unlock;
5363 }
5364
5365 cmd->user_data = m;
5366 err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5367 mgmt_add_adv_patterns_monitor_complete);
5368 if (err) {
5369 if (err == -ENOMEM)
5370 status = MGMT_STATUS_NO_RESOURCES;
5371 else
5372 status = MGMT_STATUS_FAILED;
5373
5374 goto unlock;
5375 }
5376
5377 hci_dev_unlock(hdev);
5378
5379 return 0;
5380
5381 unlock:
5382 hci_free_adv_monitor(hdev, m);
5383 hci_dev_unlock(hdev);
5384 return mgmt_cmd_status(sk, hdev->id, op, status);
5385 }
5386
parse_adv_monitor_rssi(struct adv_monitor * m,struct mgmt_adv_rssi_thresholds * rssi)5387 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5388 struct mgmt_adv_rssi_thresholds *rssi)
5389 {
5390 if (rssi) {
5391 m->rssi.low_threshold = rssi->low_threshold;
5392 m->rssi.low_threshold_timeout =
5393 __le16_to_cpu(rssi->low_threshold_timeout);
5394 m->rssi.high_threshold = rssi->high_threshold;
5395 m->rssi.high_threshold_timeout =
5396 __le16_to_cpu(rssi->high_threshold_timeout);
5397 m->rssi.sampling_period = rssi->sampling_period;
5398 } else {
5399 /* Default values. These numbers are the least constricting
5400 * parameters for MSFT API to work, so it behaves as if there
5401 * are no rssi parameter to consider. May need to be changed
5402 * if other API are to be supported.
5403 */
5404 m->rssi.low_threshold = -127;
5405 m->rssi.low_threshold_timeout = 60;
5406 m->rssi.high_threshold = -127;
5407 m->rssi.high_threshold_timeout = 0;
5408 m->rssi.sampling_period = 0;
5409 }
5410 }
5411
parse_adv_monitor_pattern(struct adv_monitor * m,u8 pattern_count,struct mgmt_adv_pattern * patterns)5412 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5413 struct mgmt_adv_pattern *patterns)
5414 {
5415 u8 offset = 0, length = 0;
5416 struct adv_pattern *p = NULL;
5417 int i;
5418
5419 for (i = 0; i < pattern_count; i++) {
5420 offset = patterns[i].offset;
5421 length = patterns[i].length;
5422 if (offset >= HCI_MAX_EXT_AD_LENGTH ||
5423 length > HCI_MAX_EXT_AD_LENGTH ||
5424 (offset + length) > HCI_MAX_EXT_AD_LENGTH)
5425 return MGMT_STATUS_INVALID_PARAMS;
5426
5427 p = kmalloc(sizeof(*p), GFP_KERNEL);
5428 if (!p)
5429 return MGMT_STATUS_NO_RESOURCES;
5430
5431 p->ad_type = patterns[i].ad_type;
5432 p->offset = patterns[i].offset;
5433 p->length = patterns[i].length;
5434 memcpy(p->value, patterns[i].value, p->length);
5435
5436 INIT_LIST_HEAD(&p->list);
5437 list_add(&p->list, &m->patterns);
5438 }
5439
5440 return MGMT_STATUS_SUCCESS;
5441 }
5442
add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5443 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5444 void *data, u16 len)
5445 {
5446 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5447 struct adv_monitor *m = NULL;
5448 u8 status = MGMT_STATUS_SUCCESS;
5449 size_t expected_size = sizeof(*cp);
5450
5451 BT_DBG("request for %s", hdev->name);
5452
5453 if (len <= sizeof(*cp)) {
5454 status = MGMT_STATUS_INVALID_PARAMS;
5455 goto done;
5456 }
5457
5458 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5459 if (len != expected_size) {
5460 status = MGMT_STATUS_INVALID_PARAMS;
5461 goto done;
5462 }
5463
5464 m = kzalloc(sizeof(*m), GFP_KERNEL);
5465 if (!m) {
5466 status = MGMT_STATUS_NO_RESOURCES;
5467 goto done;
5468 }
5469
5470 INIT_LIST_HEAD(&m->patterns);
5471
5472 parse_adv_monitor_rssi(m, NULL);
5473 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5474
5475 done:
5476 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5477 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5478 }
5479
add_adv_patterns_monitor_rssi(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5480 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5481 void *data, u16 len)
5482 {
5483 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5484 struct adv_monitor *m = NULL;
5485 u8 status = MGMT_STATUS_SUCCESS;
5486 size_t expected_size = sizeof(*cp);
5487
5488 BT_DBG("request for %s", hdev->name);
5489
5490 if (len <= sizeof(*cp)) {
5491 status = MGMT_STATUS_INVALID_PARAMS;
5492 goto done;
5493 }
5494
5495 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5496 if (len != expected_size) {
5497 status = MGMT_STATUS_INVALID_PARAMS;
5498 goto done;
5499 }
5500
5501 m = kzalloc(sizeof(*m), GFP_KERNEL);
5502 if (!m) {
5503 status = MGMT_STATUS_NO_RESOURCES;
5504 goto done;
5505 }
5506
5507 INIT_LIST_HEAD(&m->patterns);
5508
5509 parse_adv_monitor_rssi(m, &cp->rssi);
5510 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5511
5512 done:
5513 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5514 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5515 }
5516
mgmt_remove_adv_monitor_complete(struct hci_dev * hdev,void * data,int status)5517 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5518 void *data, int status)
5519 {
5520 struct mgmt_rp_remove_adv_monitor rp;
5521 struct mgmt_pending_cmd *cmd = data;
5522 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5523
5524 hci_dev_lock(hdev);
5525
5526 rp.monitor_handle = cp->monitor_handle;
5527
5528 if (!status)
5529 hci_update_passive_scan(hdev);
5530
5531 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5532 mgmt_status(status), &rp, sizeof(rp));
5533 mgmt_pending_remove(cmd);
5534
5535 hci_dev_unlock(hdev);
5536 bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5537 rp.monitor_handle, status);
5538 }
5539
mgmt_remove_adv_monitor_sync(struct hci_dev * hdev,void * data)5540 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5541 {
5542 struct mgmt_pending_cmd *cmd = data;
5543 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5544 u16 handle = __le16_to_cpu(cp->monitor_handle);
5545
5546 if (!handle)
5547 return hci_remove_all_adv_monitor(hdev);
5548
5549 return hci_remove_single_adv_monitor(hdev, handle);
5550 }
5551
remove_adv_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5552 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5553 void *data, u16 len)
5554 {
5555 struct mgmt_pending_cmd *cmd;
5556 int err, status;
5557
5558 hci_dev_lock(hdev);
5559
5560 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5561 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
5562 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5563 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5564 status = MGMT_STATUS_BUSY;
5565 goto unlock;
5566 }
5567
5568 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5569 if (!cmd) {
5570 status = MGMT_STATUS_NO_RESOURCES;
5571 goto unlock;
5572 }
5573
5574 err = hci_cmd_sync_submit(hdev, mgmt_remove_adv_monitor_sync, cmd,
5575 mgmt_remove_adv_monitor_complete);
5576
5577 if (err) {
5578 mgmt_pending_remove(cmd);
5579
5580 if (err == -ENOMEM)
5581 status = MGMT_STATUS_NO_RESOURCES;
5582 else
5583 status = MGMT_STATUS_FAILED;
5584
5585 goto unlock;
5586 }
5587
5588 hci_dev_unlock(hdev);
5589
5590 return 0;
5591
5592 unlock:
5593 hci_dev_unlock(hdev);
5594 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5595 status);
5596 }
5597
read_local_oob_data_complete(struct hci_dev * hdev,void * data,int err)5598 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5599 {
5600 struct mgmt_rp_read_local_oob_data mgmt_rp;
5601 size_t rp_size = sizeof(mgmt_rp);
5602 struct mgmt_pending_cmd *cmd = data;
5603 struct sk_buff *skb = cmd->skb;
5604 u8 status = mgmt_status(err);
5605
5606 if (!status) {
5607 if (!skb)
5608 status = MGMT_STATUS_FAILED;
5609 else if (IS_ERR(skb))
5610 status = mgmt_status(PTR_ERR(skb));
5611 else
5612 status = mgmt_status(skb->data[0]);
5613 }
5614
5615 bt_dev_dbg(hdev, "status %d", status);
5616
5617 if (status) {
5618 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5619 goto remove;
5620 }
5621
5622 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5623
5624 if (!bredr_sc_enabled(hdev)) {
5625 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5626
5627 if (skb->len < sizeof(*rp)) {
5628 mgmt_cmd_status(cmd->sk, hdev->id,
5629 MGMT_OP_READ_LOCAL_OOB_DATA,
5630 MGMT_STATUS_FAILED);
5631 goto remove;
5632 }
5633
5634 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5635 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5636
5637 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5638 } else {
5639 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5640
5641 if (skb->len < sizeof(*rp)) {
5642 mgmt_cmd_status(cmd->sk, hdev->id,
5643 MGMT_OP_READ_LOCAL_OOB_DATA,
5644 MGMT_STATUS_FAILED);
5645 goto remove;
5646 }
5647
5648 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5649 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5650
5651 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5652 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5653 }
5654
5655 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5656 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5657
5658 remove:
5659 if (skb && !IS_ERR(skb))
5660 kfree_skb(skb);
5661
5662 mgmt_pending_free(cmd);
5663 }
5664
read_local_oob_data_sync(struct hci_dev * hdev,void * data)5665 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5666 {
5667 struct mgmt_pending_cmd *cmd = data;
5668
5669 if (bredr_sc_enabled(hdev))
5670 cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5671 else
5672 cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5673
5674 if (IS_ERR(cmd->skb))
5675 return PTR_ERR(cmd->skb);
5676 else
5677 return 0;
5678 }
5679
read_local_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)5680 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5681 void *data, u16 data_len)
5682 {
5683 struct mgmt_pending_cmd *cmd;
5684 int err;
5685
5686 bt_dev_dbg(hdev, "sock %p", sk);
5687
5688 hci_dev_lock(hdev);
5689
5690 if (!hdev_is_powered(hdev)) {
5691 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5692 MGMT_STATUS_NOT_POWERED);
5693 goto unlock;
5694 }
5695
5696 if (!lmp_ssp_capable(hdev)) {
5697 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5698 MGMT_STATUS_NOT_SUPPORTED);
5699 goto unlock;
5700 }
5701
5702 cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5703 if (!cmd)
5704 err = -ENOMEM;
5705 else
5706 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5707 read_local_oob_data_complete);
5708
5709 if (err < 0) {
5710 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5711 MGMT_STATUS_FAILED);
5712
5713 if (cmd)
5714 mgmt_pending_free(cmd);
5715 }
5716
5717 unlock:
5718 hci_dev_unlock(hdev);
5719 return err;
5720 }
5721
add_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5722 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5723 void *data, u16 len)
5724 {
5725 struct mgmt_addr_info *addr = data;
5726 int err;
5727
5728 bt_dev_dbg(hdev, "sock %p", sk);
5729
5730 if (!bdaddr_type_is_valid(addr->type))
5731 return mgmt_cmd_complete(sk, hdev->id,
5732 MGMT_OP_ADD_REMOTE_OOB_DATA,
5733 MGMT_STATUS_INVALID_PARAMS,
5734 addr, sizeof(*addr));
5735
5736 hci_dev_lock(hdev);
5737
5738 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5739 struct mgmt_cp_add_remote_oob_data *cp = data;
5740 u8 status;
5741
5742 if (cp->addr.type != BDADDR_BREDR) {
5743 err = mgmt_cmd_complete(sk, hdev->id,
5744 MGMT_OP_ADD_REMOTE_OOB_DATA,
5745 MGMT_STATUS_INVALID_PARAMS,
5746 &cp->addr, sizeof(cp->addr));
5747 goto unlock;
5748 }
5749
5750 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5751 cp->addr.type, cp->hash,
5752 cp->rand, NULL, NULL);
5753 if (err < 0)
5754 status = MGMT_STATUS_FAILED;
5755 else
5756 status = MGMT_STATUS_SUCCESS;
5757
5758 err = mgmt_cmd_complete(sk, hdev->id,
5759 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5760 &cp->addr, sizeof(cp->addr));
5761 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5762 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5763 u8 *rand192, *hash192, *rand256, *hash256;
5764 u8 status;
5765
5766 if (bdaddr_type_is_le(cp->addr.type)) {
5767 /* Enforce zero-valued 192-bit parameters as
5768 * long as legacy SMP OOB isn't implemented.
5769 */
5770 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5771 memcmp(cp->hash192, ZERO_KEY, 16)) {
5772 err = mgmt_cmd_complete(sk, hdev->id,
5773 MGMT_OP_ADD_REMOTE_OOB_DATA,
5774 MGMT_STATUS_INVALID_PARAMS,
5775 addr, sizeof(*addr));
5776 goto unlock;
5777 }
5778
5779 rand192 = NULL;
5780 hash192 = NULL;
5781 } else {
5782 /* In case one of the P-192 values is set to zero,
5783 * then just disable OOB data for P-192.
5784 */
5785 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5786 !memcmp(cp->hash192, ZERO_KEY, 16)) {
5787 rand192 = NULL;
5788 hash192 = NULL;
5789 } else {
5790 rand192 = cp->rand192;
5791 hash192 = cp->hash192;
5792 }
5793 }
5794
5795 /* In case one of the P-256 values is set to zero, then just
5796 * disable OOB data for P-256.
5797 */
5798 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5799 !memcmp(cp->hash256, ZERO_KEY, 16)) {
5800 rand256 = NULL;
5801 hash256 = NULL;
5802 } else {
5803 rand256 = cp->rand256;
5804 hash256 = cp->hash256;
5805 }
5806
5807 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5808 cp->addr.type, hash192, rand192,
5809 hash256, rand256);
5810 if (err < 0)
5811 status = MGMT_STATUS_FAILED;
5812 else
5813 status = MGMT_STATUS_SUCCESS;
5814
5815 err = mgmt_cmd_complete(sk, hdev->id,
5816 MGMT_OP_ADD_REMOTE_OOB_DATA,
5817 status, &cp->addr, sizeof(cp->addr));
5818 } else {
5819 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5820 len);
5821 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5822 MGMT_STATUS_INVALID_PARAMS);
5823 }
5824
5825 unlock:
5826 hci_dev_unlock(hdev);
5827 return err;
5828 }
5829
remove_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5830 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5831 void *data, u16 len)
5832 {
5833 struct mgmt_cp_remove_remote_oob_data *cp = data;
5834 u8 status;
5835 int err;
5836
5837 bt_dev_dbg(hdev, "sock %p", sk);
5838
5839 if (cp->addr.type != BDADDR_BREDR)
5840 return mgmt_cmd_complete(sk, hdev->id,
5841 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5842 MGMT_STATUS_INVALID_PARAMS,
5843 &cp->addr, sizeof(cp->addr));
5844
5845 hci_dev_lock(hdev);
5846
5847 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5848 hci_remote_oob_data_clear(hdev);
5849 status = MGMT_STATUS_SUCCESS;
5850 goto done;
5851 }
5852
5853 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5854 if (err < 0)
5855 status = MGMT_STATUS_INVALID_PARAMS;
5856 else
5857 status = MGMT_STATUS_SUCCESS;
5858
5859 done:
5860 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5861 status, &cp->addr, sizeof(cp->addr));
5862
5863 hci_dev_unlock(hdev);
5864 return err;
5865 }
5866
mgmt_start_discovery_complete(struct hci_dev * hdev,u8 status)5867 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5868 {
5869 struct mgmt_pending_cmd *cmd;
5870
5871 bt_dev_dbg(hdev, "status %u", status);
5872
5873 hci_dev_lock(hdev);
5874
5875 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5876 if (!cmd)
5877 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5878
5879 if (!cmd)
5880 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5881
5882 if (cmd) {
5883 cmd->cmd_complete(cmd, mgmt_status(status));
5884 mgmt_pending_remove(cmd);
5885 }
5886
5887 hci_dev_unlock(hdev);
5888 }
5889
discovery_type_is_valid(struct hci_dev * hdev,uint8_t type,uint8_t * mgmt_status)5890 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5891 uint8_t *mgmt_status)
5892 {
5893 switch (type) {
5894 case DISCOV_TYPE_LE:
5895 *mgmt_status = mgmt_le_support(hdev);
5896 if (*mgmt_status)
5897 return false;
5898 break;
5899 case DISCOV_TYPE_INTERLEAVED:
5900 *mgmt_status = mgmt_le_support(hdev);
5901 if (*mgmt_status)
5902 return false;
5903 fallthrough;
5904 case DISCOV_TYPE_BREDR:
5905 *mgmt_status = mgmt_bredr_support(hdev);
5906 if (*mgmt_status)
5907 return false;
5908 break;
5909 default:
5910 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5911 return false;
5912 }
5913
5914 return true;
5915 }
5916
start_discovery_complete(struct hci_dev * hdev,void * data,int err)5917 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5918 {
5919 struct mgmt_pending_cmd *cmd = data;
5920
5921 bt_dev_dbg(hdev, "err %d", err);
5922
5923 if (err == -ECANCELED)
5924 return;
5925
5926 if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5927 cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5928 cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5929 return;
5930
5931 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5932 cmd->param, 1);
5933 mgmt_pending_remove(cmd);
5934
5935 hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5936 DISCOVERY_FINDING);
5937 }
5938
start_discovery_sync(struct hci_dev * hdev,void * data)5939 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5940 {
5941 return hci_start_discovery_sync(hdev);
5942 }
5943
start_discovery_internal(struct sock * sk,struct hci_dev * hdev,u16 op,void * data,u16 len)5944 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5945 u16 op, void *data, u16 len)
5946 {
5947 struct mgmt_cp_start_discovery *cp = data;
5948 struct mgmt_pending_cmd *cmd;
5949 u8 status;
5950 int err;
5951
5952 bt_dev_dbg(hdev, "sock %p", sk);
5953
5954 hci_dev_lock(hdev);
5955
5956 if (!hdev_is_powered(hdev)) {
5957 err = mgmt_cmd_complete(sk, hdev->id, op,
5958 MGMT_STATUS_NOT_POWERED,
5959 &cp->type, sizeof(cp->type));
5960 goto failed;
5961 }
5962
5963 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5964 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5965 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5966 &cp->type, sizeof(cp->type));
5967 goto failed;
5968 }
5969
5970 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5971 err = mgmt_cmd_complete(sk, hdev->id, op, status,
5972 &cp->type, sizeof(cp->type));
5973 goto failed;
5974 }
5975
5976 /* Can't start discovery when it is paused */
5977 if (hdev->discovery_paused) {
5978 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5979 &cp->type, sizeof(cp->type));
5980 goto failed;
5981 }
5982
5983 /* Clear the discovery filter first to free any previously
5984 * allocated memory for the UUID list.
5985 */
5986 hci_discovery_filter_clear(hdev);
5987
5988 hdev->discovery.type = cp->type;
5989 hdev->discovery.report_invalid_rssi = false;
5990 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5991 hdev->discovery.limited = true;
5992 else
5993 hdev->discovery.limited = false;
5994
5995 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5996 if (!cmd) {
5997 err = -ENOMEM;
5998 goto failed;
5999 }
6000
6001 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6002 start_discovery_complete);
6003 if (err < 0) {
6004 mgmt_pending_remove(cmd);
6005 goto failed;
6006 }
6007
6008 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6009
6010 failed:
6011 hci_dev_unlock(hdev);
6012 return err;
6013 }
6014
start_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6015 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
6016 void *data, u16 len)
6017 {
6018 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
6019 data, len);
6020 }
6021
start_limited_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6022 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
6023 void *data, u16 len)
6024 {
6025 return start_discovery_internal(sk, hdev,
6026 MGMT_OP_START_LIMITED_DISCOVERY,
6027 data, len);
6028 }
6029
start_service_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6030 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
6031 void *data, u16 len)
6032 {
6033 struct mgmt_cp_start_service_discovery *cp = data;
6034 struct mgmt_pending_cmd *cmd;
6035 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
6036 u16 uuid_count, expected_len;
6037 u8 status;
6038 int err;
6039
6040 bt_dev_dbg(hdev, "sock %p", sk);
6041
6042 hci_dev_lock(hdev);
6043
6044 if (!hdev_is_powered(hdev)) {
6045 err = mgmt_cmd_complete(sk, hdev->id,
6046 MGMT_OP_START_SERVICE_DISCOVERY,
6047 MGMT_STATUS_NOT_POWERED,
6048 &cp->type, sizeof(cp->type));
6049 goto failed;
6050 }
6051
6052 if (hdev->discovery.state != DISCOVERY_STOPPED ||
6053 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
6054 err = mgmt_cmd_complete(sk, hdev->id,
6055 MGMT_OP_START_SERVICE_DISCOVERY,
6056 MGMT_STATUS_BUSY, &cp->type,
6057 sizeof(cp->type));
6058 goto failed;
6059 }
6060
6061 if (hdev->discovery_paused) {
6062 err = mgmt_cmd_complete(sk, hdev->id,
6063 MGMT_OP_START_SERVICE_DISCOVERY,
6064 MGMT_STATUS_BUSY, &cp->type,
6065 sizeof(cp->type));
6066 goto failed;
6067 }
6068
6069 uuid_count = __le16_to_cpu(cp->uuid_count);
6070 if (uuid_count > max_uuid_count) {
6071 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
6072 uuid_count);
6073 err = mgmt_cmd_complete(sk, hdev->id,
6074 MGMT_OP_START_SERVICE_DISCOVERY,
6075 MGMT_STATUS_INVALID_PARAMS, &cp->type,
6076 sizeof(cp->type));
6077 goto failed;
6078 }
6079
6080 expected_len = sizeof(*cp) + uuid_count * 16;
6081 if (expected_len != len) {
6082 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
6083 expected_len, len);
6084 err = mgmt_cmd_complete(sk, hdev->id,
6085 MGMT_OP_START_SERVICE_DISCOVERY,
6086 MGMT_STATUS_INVALID_PARAMS, &cp->type,
6087 sizeof(cp->type));
6088 goto failed;
6089 }
6090
6091 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
6092 err = mgmt_cmd_complete(sk, hdev->id,
6093 MGMT_OP_START_SERVICE_DISCOVERY,
6094 status, &cp->type, sizeof(cp->type));
6095 goto failed;
6096 }
6097
6098 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
6099 hdev, data, len);
6100 if (!cmd) {
6101 err = -ENOMEM;
6102 goto failed;
6103 }
6104
6105 /* Clear the discovery filter first to free any previously
6106 * allocated memory for the UUID list.
6107 */
6108 hci_discovery_filter_clear(hdev);
6109
6110 hdev->discovery.result_filtering = true;
6111 hdev->discovery.type = cp->type;
6112 hdev->discovery.rssi = cp->rssi;
6113 hdev->discovery.uuid_count = uuid_count;
6114
6115 if (uuid_count > 0) {
6116 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
6117 GFP_KERNEL);
6118 if (!hdev->discovery.uuids) {
6119 err = mgmt_cmd_complete(sk, hdev->id,
6120 MGMT_OP_START_SERVICE_DISCOVERY,
6121 MGMT_STATUS_FAILED,
6122 &cp->type, sizeof(cp->type));
6123 mgmt_pending_remove(cmd);
6124 goto failed;
6125 }
6126 }
6127
6128 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6129 start_discovery_complete);
6130 if (err < 0) {
6131 mgmt_pending_remove(cmd);
6132 goto failed;
6133 }
6134
6135 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6136
6137 failed:
6138 hci_dev_unlock(hdev);
6139 return err;
6140 }
6141
mgmt_stop_discovery_complete(struct hci_dev * hdev,u8 status)6142 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
6143 {
6144 struct mgmt_pending_cmd *cmd;
6145
6146 bt_dev_dbg(hdev, "status %u", status);
6147
6148 hci_dev_lock(hdev);
6149
6150 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6151 if (cmd) {
6152 cmd->cmd_complete(cmd, mgmt_status(status));
6153 mgmt_pending_remove(cmd);
6154 }
6155
6156 hci_dev_unlock(hdev);
6157 }
6158
stop_discovery_complete(struct hci_dev * hdev,void * data,int err)6159 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6160 {
6161 struct mgmt_pending_cmd *cmd = data;
6162
6163 if (err == -ECANCELED ||
6164 cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
6165 return;
6166
6167 bt_dev_dbg(hdev, "err %d", err);
6168
6169 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
6170 cmd->param, 1);
6171 mgmt_pending_remove(cmd);
6172
6173 if (!err)
6174 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6175 }
6176
stop_discovery_sync(struct hci_dev * hdev,void * data)6177 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6178 {
6179 return hci_stop_discovery_sync(hdev);
6180 }
6181
stop_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6182 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6183 u16 len)
6184 {
6185 struct mgmt_cp_stop_discovery *mgmt_cp = data;
6186 struct mgmt_pending_cmd *cmd;
6187 int err;
6188
6189 bt_dev_dbg(hdev, "sock %p", sk);
6190
6191 hci_dev_lock(hdev);
6192
6193 if (!hci_discovery_active(hdev)) {
6194 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6195 MGMT_STATUS_REJECTED, &mgmt_cp->type,
6196 sizeof(mgmt_cp->type));
6197 goto unlock;
6198 }
6199
6200 if (hdev->discovery.type != mgmt_cp->type) {
6201 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6202 MGMT_STATUS_INVALID_PARAMS,
6203 &mgmt_cp->type, sizeof(mgmt_cp->type));
6204 goto unlock;
6205 }
6206
6207 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6208 if (!cmd) {
6209 err = -ENOMEM;
6210 goto unlock;
6211 }
6212
6213 err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6214 stop_discovery_complete);
6215 if (err < 0) {
6216 mgmt_pending_remove(cmd);
6217 goto unlock;
6218 }
6219
6220 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6221
6222 unlock:
6223 hci_dev_unlock(hdev);
6224 return err;
6225 }
6226
confirm_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6227 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6228 u16 len)
6229 {
6230 struct mgmt_cp_confirm_name *cp = data;
6231 struct inquiry_entry *e;
6232 int err;
6233
6234 bt_dev_dbg(hdev, "sock %p", sk);
6235
6236 hci_dev_lock(hdev);
6237
6238 if (!hci_discovery_active(hdev)) {
6239 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6240 MGMT_STATUS_FAILED, &cp->addr,
6241 sizeof(cp->addr));
6242 goto failed;
6243 }
6244
6245 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6246 if (!e) {
6247 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6248 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6249 sizeof(cp->addr));
6250 goto failed;
6251 }
6252
6253 if (cp->name_known) {
6254 e->name_state = NAME_KNOWN;
6255 list_del(&e->list);
6256 } else {
6257 e->name_state = NAME_NEEDED;
6258 hci_inquiry_cache_update_resolve(hdev, e);
6259 }
6260
6261 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6262 &cp->addr, sizeof(cp->addr));
6263
6264 failed:
6265 hci_dev_unlock(hdev);
6266 return err;
6267 }
6268
block_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6269 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6270 u16 len)
6271 {
6272 struct mgmt_cp_block_device *cp = data;
6273 u8 status;
6274 int err;
6275
6276 bt_dev_dbg(hdev, "sock %p", sk);
6277
6278 if (!bdaddr_type_is_valid(cp->addr.type))
6279 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6280 MGMT_STATUS_INVALID_PARAMS,
6281 &cp->addr, sizeof(cp->addr));
6282
6283 hci_dev_lock(hdev);
6284
6285 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6286 cp->addr.type);
6287 if (err < 0) {
6288 status = MGMT_STATUS_FAILED;
6289 goto done;
6290 }
6291
6292 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6293 sk);
6294 status = MGMT_STATUS_SUCCESS;
6295
6296 done:
6297 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6298 &cp->addr, sizeof(cp->addr));
6299
6300 hci_dev_unlock(hdev);
6301
6302 return err;
6303 }
6304
unblock_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6305 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6306 u16 len)
6307 {
6308 struct mgmt_cp_unblock_device *cp = data;
6309 u8 status;
6310 int err;
6311
6312 bt_dev_dbg(hdev, "sock %p", sk);
6313
6314 if (!bdaddr_type_is_valid(cp->addr.type))
6315 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6316 MGMT_STATUS_INVALID_PARAMS,
6317 &cp->addr, sizeof(cp->addr));
6318
6319 hci_dev_lock(hdev);
6320
6321 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6322 cp->addr.type);
6323 if (err < 0) {
6324 status = MGMT_STATUS_INVALID_PARAMS;
6325 goto done;
6326 }
6327
6328 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6329 sk);
6330 status = MGMT_STATUS_SUCCESS;
6331
6332 done:
6333 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6334 &cp->addr, sizeof(cp->addr));
6335
6336 hci_dev_unlock(hdev);
6337
6338 return err;
6339 }
6340
set_device_id_sync(struct hci_dev * hdev,void * data)6341 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6342 {
6343 return hci_update_eir_sync(hdev);
6344 }
6345
set_device_id(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6346 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6347 u16 len)
6348 {
6349 struct mgmt_cp_set_device_id *cp = data;
6350 int err;
6351 __u16 source;
6352
6353 bt_dev_dbg(hdev, "sock %p", sk);
6354
6355 source = __le16_to_cpu(cp->source);
6356
6357 if (source > 0x0002)
6358 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6359 MGMT_STATUS_INVALID_PARAMS);
6360
6361 hci_dev_lock(hdev);
6362
6363 hdev->devid_source = source;
6364 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6365 hdev->devid_product = __le16_to_cpu(cp->product);
6366 hdev->devid_version = __le16_to_cpu(cp->version);
6367
6368 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6369 NULL, 0);
6370
6371 hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6372
6373 hci_dev_unlock(hdev);
6374
6375 return err;
6376 }
6377
enable_advertising_instance(struct hci_dev * hdev,int err)6378 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6379 {
6380 if (err)
6381 bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6382 else
6383 bt_dev_dbg(hdev, "status %d", err);
6384 }
6385
set_advertising_complete(struct hci_dev * hdev,void * data,int err)6386 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6387 {
6388 struct cmd_lookup match = { NULL, hdev };
6389 u8 instance;
6390 struct adv_info *adv_instance;
6391 u8 status = mgmt_status(err);
6392
6393 if (status) {
6394 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
6395 cmd_status_rsp, &status);
6396 return;
6397 }
6398
6399 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6400 hci_dev_set_flag(hdev, HCI_ADVERTISING);
6401 else
6402 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6403
6404 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
6405 &match);
6406
6407 new_settings(hdev, match.sk);
6408
6409 if (match.sk)
6410 sock_put(match.sk);
6411
6412 /* If "Set Advertising" was just disabled and instance advertising was
6413 * set up earlier, then re-enable multi-instance advertising.
6414 */
6415 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6416 list_empty(&hdev->adv_instances))
6417 return;
6418
6419 instance = hdev->cur_adv_instance;
6420 if (!instance) {
6421 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6422 struct adv_info, list);
6423 if (!adv_instance)
6424 return;
6425
6426 instance = adv_instance->instance;
6427 }
6428
6429 err = hci_schedule_adv_instance_sync(hdev, instance, true);
6430
6431 enable_advertising_instance(hdev, err);
6432 }
6433
set_adv_sync(struct hci_dev * hdev,void * data)6434 static int set_adv_sync(struct hci_dev *hdev, void *data)
6435 {
6436 struct mgmt_pending_cmd *cmd = data;
6437 struct mgmt_mode *cp = cmd->param;
6438 u8 val = !!cp->val;
6439
6440 if (cp->val == 0x02)
6441 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6442 else
6443 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6444
6445 cancel_adv_timeout(hdev);
6446
6447 if (val) {
6448 /* Switch to instance "0" for the Set Advertising setting.
6449 * We cannot use update_[adv|scan_rsp]_data() here as the
6450 * HCI_ADVERTISING flag is not yet set.
6451 */
6452 hdev->cur_adv_instance = 0x00;
6453
6454 if (ext_adv_capable(hdev)) {
6455 hci_start_ext_adv_sync(hdev, 0x00);
6456 } else {
6457 hci_update_adv_data_sync(hdev, 0x00);
6458 hci_update_scan_rsp_data_sync(hdev, 0x00);
6459 hci_enable_advertising_sync(hdev);
6460 }
6461 } else {
6462 hci_disable_advertising_sync(hdev);
6463 }
6464
6465 return 0;
6466 }
6467
set_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6468 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6469 u16 len)
6470 {
6471 struct mgmt_mode *cp = data;
6472 struct mgmt_pending_cmd *cmd;
6473 u8 val, status;
6474 int err;
6475
6476 bt_dev_dbg(hdev, "sock %p", sk);
6477
6478 status = mgmt_le_support(hdev);
6479 if (status)
6480 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6481 status);
6482
6483 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6484 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6485 MGMT_STATUS_INVALID_PARAMS);
6486
6487 if (hdev->advertising_paused)
6488 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6489 MGMT_STATUS_BUSY);
6490
6491 hci_dev_lock(hdev);
6492
6493 val = !!cp->val;
6494
6495 /* The following conditions are ones which mean that we should
6496 * not do any HCI communication but directly send a mgmt
6497 * response to user space (after toggling the flag if
6498 * necessary).
6499 */
6500 if (!hdev_is_powered(hdev) ||
6501 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6502 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6503 hci_dev_test_flag(hdev, HCI_MESH) ||
6504 hci_conn_num(hdev, LE_LINK) > 0 ||
6505 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6506 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6507 bool changed;
6508
6509 if (cp->val) {
6510 hdev->cur_adv_instance = 0x00;
6511 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6512 if (cp->val == 0x02)
6513 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6514 else
6515 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6516 } else {
6517 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6518 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6519 }
6520
6521 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6522 if (err < 0)
6523 goto unlock;
6524
6525 if (changed)
6526 err = new_settings(hdev, sk);
6527
6528 goto unlock;
6529 }
6530
6531 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6532 pending_find(MGMT_OP_SET_LE, hdev)) {
6533 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6534 MGMT_STATUS_BUSY);
6535 goto unlock;
6536 }
6537
6538 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6539 if (!cmd)
6540 err = -ENOMEM;
6541 else
6542 err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6543 set_advertising_complete);
6544
6545 if (err < 0 && cmd)
6546 mgmt_pending_remove(cmd);
6547
6548 unlock:
6549 hci_dev_unlock(hdev);
6550 return err;
6551 }
6552
set_static_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6553 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6554 void *data, u16 len)
6555 {
6556 struct mgmt_cp_set_static_address *cp = data;
6557 int err;
6558
6559 bt_dev_dbg(hdev, "sock %p", sk);
6560
6561 if (!lmp_le_capable(hdev))
6562 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6563 MGMT_STATUS_NOT_SUPPORTED);
6564
6565 if (hdev_is_powered(hdev))
6566 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6567 MGMT_STATUS_REJECTED);
6568
6569 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6570 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6571 return mgmt_cmd_status(sk, hdev->id,
6572 MGMT_OP_SET_STATIC_ADDRESS,
6573 MGMT_STATUS_INVALID_PARAMS);
6574
6575 /* Two most significant bits shall be set */
6576 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6577 return mgmt_cmd_status(sk, hdev->id,
6578 MGMT_OP_SET_STATIC_ADDRESS,
6579 MGMT_STATUS_INVALID_PARAMS);
6580 }
6581
6582 hci_dev_lock(hdev);
6583
6584 bacpy(&hdev->static_addr, &cp->bdaddr);
6585
6586 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6587 if (err < 0)
6588 goto unlock;
6589
6590 err = new_settings(hdev, sk);
6591
6592 unlock:
6593 hci_dev_unlock(hdev);
6594 return err;
6595 }
6596
set_scan_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6597 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6598 void *data, u16 len)
6599 {
6600 struct mgmt_cp_set_scan_params *cp = data;
6601 __u16 interval, window;
6602 int err;
6603
6604 bt_dev_dbg(hdev, "sock %p", sk);
6605
6606 if (!lmp_le_capable(hdev))
6607 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6608 MGMT_STATUS_NOT_SUPPORTED);
6609
6610 interval = __le16_to_cpu(cp->interval);
6611
6612 if (interval < 0x0004 || interval > 0x4000)
6613 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6614 MGMT_STATUS_INVALID_PARAMS);
6615
6616 window = __le16_to_cpu(cp->window);
6617
6618 if (window < 0x0004 || window > 0x4000)
6619 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6620 MGMT_STATUS_INVALID_PARAMS);
6621
6622 if (window > interval)
6623 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6624 MGMT_STATUS_INVALID_PARAMS);
6625
6626 hci_dev_lock(hdev);
6627
6628 hdev->le_scan_interval = interval;
6629 hdev->le_scan_window = window;
6630
6631 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6632 NULL, 0);
6633
6634 /* If background scan is running, restart it so new parameters are
6635 * loaded.
6636 */
6637 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6638 hdev->discovery.state == DISCOVERY_STOPPED)
6639 hci_update_passive_scan(hdev);
6640
6641 hci_dev_unlock(hdev);
6642
6643 return err;
6644 }
6645
fast_connectable_complete(struct hci_dev * hdev,void * data,int err)6646 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6647 {
6648 struct mgmt_pending_cmd *cmd = data;
6649
6650 bt_dev_dbg(hdev, "err %d", err);
6651
6652 if (err) {
6653 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6654 mgmt_status(err));
6655 } else {
6656 struct mgmt_mode *cp = cmd->param;
6657
6658 if (cp->val)
6659 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6660 else
6661 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6662
6663 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6664 new_settings(hdev, cmd->sk);
6665 }
6666
6667 mgmt_pending_free(cmd);
6668 }
6669
write_fast_connectable_sync(struct hci_dev * hdev,void * data)6670 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6671 {
6672 struct mgmt_pending_cmd *cmd = data;
6673 struct mgmt_mode *cp = cmd->param;
6674
6675 return hci_write_fast_connectable_sync(hdev, cp->val);
6676 }
6677
set_fast_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6678 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6679 void *data, u16 len)
6680 {
6681 struct mgmt_mode *cp = data;
6682 struct mgmt_pending_cmd *cmd;
6683 int err;
6684
6685 bt_dev_dbg(hdev, "sock %p", sk);
6686
6687 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6688 hdev->hci_ver < BLUETOOTH_VER_1_2)
6689 return mgmt_cmd_status(sk, hdev->id,
6690 MGMT_OP_SET_FAST_CONNECTABLE,
6691 MGMT_STATUS_NOT_SUPPORTED);
6692
6693 if (cp->val != 0x00 && cp->val != 0x01)
6694 return mgmt_cmd_status(sk, hdev->id,
6695 MGMT_OP_SET_FAST_CONNECTABLE,
6696 MGMT_STATUS_INVALID_PARAMS);
6697
6698 hci_dev_lock(hdev);
6699
6700 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6701 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6702 goto unlock;
6703 }
6704
6705 if (!hdev_is_powered(hdev)) {
6706 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6707 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6708 new_settings(hdev, sk);
6709 goto unlock;
6710 }
6711
6712 cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6713 len);
6714 if (!cmd)
6715 err = -ENOMEM;
6716 else
6717 err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6718 fast_connectable_complete);
6719
6720 if (err < 0) {
6721 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6722 MGMT_STATUS_FAILED);
6723
6724 if (cmd)
6725 mgmt_pending_free(cmd);
6726 }
6727
6728 unlock:
6729 hci_dev_unlock(hdev);
6730
6731 return err;
6732 }
6733
set_bredr_complete(struct hci_dev * hdev,void * data,int err)6734 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6735 {
6736 struct mgmt_pending_cmd *cmd = data;
6737
6738 bt_dev_dbg(hdev, "err %d", err);
6739
6740 if (err) {
6741 u8 mgmt_err = mgmt_status(err);
6742
6743 /* We need to restore the flag if related HCI commands
6744 * failed.
6745 */
6746 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6747
6748 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6749 } else {
6750 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6751 new_settings(hdev, cmd->sk);
6752 }
6753
6754 mgmt_pending_free(cmd);
6755 }
6756
set_bredr_sync(struct hci_dev * hdev,void * data)6757 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6758 {
6759 int status;
6760
6761 status = hci_write_fast_connectable_sync(hdev, false);
6762
6763 if (!status)
6764 status = hci_update_scan_sync(hdev);
6765
6766 /* Since only the advertising data flags will change, there
6767 * is no need to update the scan response data.
6768 */
6769 if (!status)
6770 status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6771
6772 return status;
6773 }
6774
set_bredr(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6775 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6776 {
6777 struct mgmt_mode *cp = data;
6778 struct mgmt_pending_cmd *cmd;
6779 int err;
6780
6781 bt_dev_dbg(hdev, "sock %p", sk);
6782
6783 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6784 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6785 MGMT_STATUS_NOT_SUPPORTED);
6786
6787 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6788 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6789 MGMT_STATUS_REJECTED);
6790
6791 if (cp->val != 0x00 && cp->val != 0x01)
6792 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6793 MGMT_STATUS_INVALID_PARAMS);
6794
6795 hci_dev_lock(hdev);
6796
6797 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6798 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6799 goto unlock;
6800 }
6801
6802 if (!hdev_is_powered(hdev)) {
6803 if (!cp->val) {
6804 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6805 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6806 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6807 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6808 }
6809
6810 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6811
6812 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6813 if (err < 0)
6814 goto unlock;
6815
6816 err = new_settings(hdev, sk);
6817 goto unlock;
6818 }
6819
6820 /* Reject disabling when powered on */
6821 if (!cp->val) {
6822 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6823 MGMT_STATUS_REJECTED);
6824 goto unlock;
6825 } else {
6826 /* When configuring a dual-mode controller to operate
6827 * with LE only and using a static address, then switching
6828 * BR/EDR back on is not allowed.
6829 *
6830 * Dual-mode controllers shall operate with the public
6831 * address as its identity address for BR/EDR and LE. So
6832 * reject the attempt to create an invalid configuration.
6833 *
6834 * The same restrictions applies when secure connections
6835 * has been enabled. For BR/EDR this is a controller feature
6836 * while for LE it is a host stack feature. This means that
6837 * switching BR/EDR back on when secure connections has been
6838 * enabled is not a supported transaction.
6839 */
6840 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6841 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6842 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6843 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6844 MGMT_STATUS_REJECTED);
6845 goto unlock;
6846 }
6847 }
6848
6849 cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6850 if (!cmd)
6851 err = -ENOMEM;
6852 else
6853 err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6854 set_bredr_complete);
6855
6856 if (err < 0) {
6857 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6858 MGMT_STATUS_FAILED);
6859 if (cmd)
6860 mgmt_pending_free(cmd);
6861
6862 goto unlock;
6863 }
6864
6865 /* We need to flip the bit already here so that
6866 * hci_req_update_adv_data generates the correct flags.
6867 */
6868 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6869
6870 unlock:
6871 hci_dev_unlock(hdev);
6872 return err;
6873 }
6874
set_secure_conn_complete(struct hci_dev * hdev,void * data,int err)6875 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6876 {
6877 struct mgmt_pending_cmd *cmd = data;
6878 struct mgmt_mode *cp;
6879
6880 bt_dev_dbg(hdev, "err %d", err);
6881
6882 if (err) {
6883 u8 mgmt_err = mgmt_status(err);
6884
6885 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6886 goto done;
6887 }
6888
6889 cp = cmd->param;
6890
6891 switch (cp->val) {
6892 case 0x00:
6893 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6894 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6895 break;
6896 case 0x01:
6897 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6898 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6899 break;
6900 case 0x02:
6901 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6902 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6903 break;
6904 }
6905
6906 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6907 new_settings(hdev, cmd->sk);
6908
6909 done:
6910 mgmt_pending_free(cmd);
6911 }
6912
set_secure_conn_sync(struct hci_dev * hdev,void * data)6913 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6914 {
6915 struct mgmt_pending_cmd *cmd = data;
6916 struct mgmt_mode *cp = cmd->param;
6917 u8 val = !!cp->val;
6918
6919 /* Force write of val */
6920 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6921
6922 return hci_write_sc_support_sync(hdev, val);
6923 }
6924
set_secure_conn(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6925 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6926 void *data, u16 len)
6927 {
6928 struct mgmt_mode *cp = data;
6929 struct mgmt_pending_cmd *cmd;
6930 u8 val;
6931 int err;
6932
6933 bt_dev_dbg(hdev, "sock %p", sk);
6934
6935 if (!lmp_sc_capable(hdev) &&
6936 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6937 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6938 MGMT_STATUS_NOT_SUPPORTED);
6939
6940 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6941 lmp_sc_capable(hdev) &&
6942 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6943 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6944 MGMT_STATUS_REJECTED);
6945
6946 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6947 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6948 MGMT_STATUS_INVALID_PARAMS);
6949
6950 hci_dev_lock(hdev);
6951
6952 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6953 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6954 bool changed;
6955
6956 if (cp->val) {
6957 changed = !hci_dev_test_and_set_flag(hdev,
6958 HCI_SC_ENABLED);
6959 if (cp->val == 0x02)
6960 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6961 else
6962 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6963 } else {
6964 changed = hci_dev_test_and_clear_flag(hdev,
6965 HCI_SC_ENABLED);
6966 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6967 }
6968
6969 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6970 if (err < 0)
6971 goto failed;
6972
6973 if (changed)
6974 err = new_settings(hdev, sk);
6975
6976 goto failed;
6977 }
6978
6979 val = !!cp->val;
6980
6981 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6982 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6983 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6984 goto failed;
6985 }
6986
6987 cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6988 if (!cmd)
6989 err = -ENOMEM;
6990 else
6991 err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6992 set_secure_conn_complete);
6993
6994 if (err < 0) {
6995 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6996 MGMT_STATUS_FAILED);
6997 if (cmd)
6998 mgmt_pending_free(cmd);
6999 }
7000
7001 failed:
7002 hci_dev_unlock(hdev);
7003 return err;
7004 }
7005
set_debug_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7006 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
7007 void *data, u16 len)
7008 {
7009 struct mgmt_mode *cp = data;
7010 bool changed, use_changed;
7011 int err;
7012
7013 bt_dev_dbg(hdev, "sock %p", sk);
7014
7015 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
7016 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
7017 MGMT_STATUS_INVALID_PARAMS);
7018
7019 hci_dev_lock(hdev);
7020
7021 if (cp->val)
7022 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
7023 else
7024 changed = hci_dev_test_and_clear_flag(hdev,
7025 HCI_KEEP_DEBUG_KEYS);
7026
7027 if (cp->val == 0x02)
7028 use_changed = !hci_dev_test_and_set_flag(hdev,
7029 HCI_USE_DEBUG_KEYS);
7030 else
7031 use_changed = hci_dev_test_and_clear_flag(hdev,
7032 HCI_USE_DEBUG_KEYS);
7033
7034 if (hdev_is_powered(hdev) && use_changed &&
7035 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7036 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
7037 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
7038 sizeof(mode), &mode);
7039 }
7040
7041 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
7042 if (err < 0)
7043 goto unlock;
7044
7045 if (changed)
7046 err = new_settings(hdev, sk);
7047
7048 unlock:
7049 hci_dev_unlock(hdev);
7050 return err;
7051 }
7052
set_privacy(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)7053 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7054 u16 len)
7055 {
7056 struct mgmt_cp_set_privacy *cp = cp_data;
7057 bool changed;
7058 int err;
7059
7060 bt_dev_dbg(hdev, "sock %p", sk);
7061
7062 if (!lmp_le_capable(hdev))
7063 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7064 MGMT_STATUS_NOT_SUPPORTED);
7065
7066 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
7067 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7068 MGMT_STATUS_INVALID_PARAMS);
7069
7070 if (hdev_is_powered(hdev))
7071 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7072 MGMT_STATUS_REJECTED);
7073
7074 hci_dev_lock(hdev);
7075
7076 /* If user space supports this command it is also expected to
7077 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
7078 */
7079 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7080
7081 if (cp->privacy) {
7082 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
7083 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
7084 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
7085 hci_adv_instances_set_rpa_expired(hdev, true);
7086 if (cp->privacy == 0x02)
7087 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
7088 else
7089 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7090 } else {
7091 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
7092 memset(hdev->irk, 0, sizeof(hdev->irk));
7093 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
7094 hci_adv_instances_set_rpa_expired(hdev, false);
7095 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7096 }
7097
7098 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
7099 if (err < 0)
7100 goto unlock;
7101
7102 if (changed)
7103 err = new_settings(hdev, sk);
7104
7105 unlock:
7106 hci_dev_unlock(hdev);
7107 return err;
7108 }
7109
irk_is_valid(struct mgmt_irk_info * irk)7110 static bool irk_is_valid(struct mgmt_irk_info *irk)
7111 {
7112 switch (irk->addr.type) {
7113 case BDADDR_LE_PUBLIC:
7114 return true;
7115
7116 case BDADDR_LE_RANDOM:
7117 /* Two most significant bits shall be set */
7118 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7119 return false;
7120 return true;
7121 }
7122
7123 return false;
7124 }
7125
load_irks(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)7126 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7127 u16 len)
7128 {
7129 struct mgmt_cp_load_irks *cp = cp_data;
7130 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
7131 sizeof(struct mgmt_irk_info));
7132 u16 irk_count, expected_len;
7133 int i, err;
7134
7135 bt_dev_dbg(hdev, "sock %p", sk);
7136
7137 if (!lmp_le_capable(hdev))
7138 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7139 MGMT_STATUS_NOT_SUPPORTED);
7140
7141 irk_count = __le16_to_cpu(cp->irk_count);
7142 if (irk_count > max_irk_count) {
7143 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
7144 irk_count);
7145 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7146 MGMT_STATUS_INVALID_PARAMS);
7147 }
7148
7149 expected_len = struct_size(cp, irks, irk_count);
7150 if (expected_len != len) {
7151 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7152 expected_len, len);
7153 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7154 MGMT_STATUS_INVALID_PARAMS);
7155 }
7156
7157 bt_dev_dbg(hdev, "irk_count %u", irk_count);
7158
7159 for (i = 0; i < irk_count; i++) {
7160 struct mgmt_irk_info *key = &cp->irks[i];
7161
7162 if (!irk_is_valid(key))
7163 return mgmt_cmd_status(sk, hdev->id,
7164 MGMT_OP_LOAD_IRKS,
7165 MGMT_STATUS_INVALID_PARAMS);
7166 }
7167
7168 hci_dev_lock(hdev);
7169
7170 hci_smp_irks_clear(hdev);
7171
7172 for (i = 0; i < irk_count; i++) {
7173 struct mgmt_irk_info *irk = &cp->irks[i];
7174
7175 if (hci_is_blocked_key(hdev,
7176 HCI_BLOCKED_KEY_TYPE_IRK,
7177 irk->val)) {
7178 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7179 &irk->addr.bdaddr);
7180 continue;
7181 }
7182
7183 hci_add_irk(hdev, &irk->addr.bdaddr,
7184 le_addr_type(irk->addr.type), irk->val,
7185 BDADDR_ANY);
7186 }
7187
7188 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7189
7190 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7191
7192 hci_dev_unlock(hdev);
7193
7194 return err;
7195 }
7196
ltk_is_valid(struct mgmt_ltk_info * key)7197 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7198 {
7199 if (key->initiator != 0x00 && key->initiator != 0x01)
7200 return false;
7201
7202 switch (key->addr.type) {
7203 case BDADDR_LE_PUBLIC:
7204 return true;
7205
7206 case BDADDR_LE_RANDOM:
7207 /* Two most significant bits shall be set */
7208 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7209 return false;
7210 return true;
7211 }
7212
7213 return false;
7214 }
7215
load_long_term_keys(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)7216 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7217 void *cp_data, u16 len)
7218 {
7219 struct mgmt_cp_load_long_term_keys *cp = cp_data;
7220 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7221 sizeof(struct mgmt_ltk_info));
7222 u16 key_count, expected_len;
7223 int i, err;
7224
7225 bt_dev_dbg(hdev, "sock %p", sk);
7226
7227 if (!lmp_le_capable(hdev))
7228 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7229 MGMT_STATUS_NOT_SUPPORTED);
7230
7231 key_count = __le16_to_cpu(cp->key_count);
7232 if (key_count > max_key_count) {
7233 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7234 key_count);
7235 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7236 MGMT_STATUS_INVALID_PARAMS);
7237 }
7238
7239 expected_len = struct_size(cp, keys, key_count);
7240 if (expected_len != len) {
7241 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7242 expected_len, len);
7243 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7244 MGMT_STATUS_INVALID_PARAMS);
7245 }
7246
7247 bt_dev_dbg(hdev, "key_count %u", key_count);
7248
7249 hci_dev_lock(hdev);
7250
7251 hci_smp_ltks_clear(hdev);
7252
7253 for (i = 0; i < key_count; i++) {
7254 struct mgmt_ltk_info *key = &cp->keys[i];
7255 u8 type, authenticated;
7256
7257 if (hci_is_blocked_key(hdev,
7258 HCI_BLOCKED_KEY_TYPE_LTK,
7259 key->val)) {
7260 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7261 &key->addr.bdaddr);
7262 continue;
7263 }
7264
7265 if (!ltk_is_valid(key)) {
7266 bt_dev_warn(hdev, "Invalid LTK for %pMR",
7267 &key->addr.bdaddr);
7268 continue;
7269 }
7270
7271 switch (key->type) {
7272 case MGMT_LTK_UNAUTHENTICATED:
7273 authenticated = 0x00;
7274 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7275 break;
7276 case MGMT_LTK_AUTHENTICATED:
7277 authenticated = 0x01;
7278 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7279 break;
7280 case MGMT_LTK_P256_UNAUTH:
7281 authenticated = 0x00;
7282 type = SMP_LTK_P256;
7283 break;
7284 case MGMT_LTK_P256_AUTH:
7285 authenticated = 0x01;
7286 type = SMP_LTK_P256;
7287 break;
7288 case MGMT_LTK_P256_DEBUG:
7289 authenticated = 0x00;
7290 type = SMP_LTK_P256_DEBUG;
7291 fallthrough;
7292 default:
7293 continue;
7294 }
7295
7296 hci_add_ltk(hdev, &key->addr.bdaddr,
7297 le_addr_type(key->addr.type), type, authenticated,
7298 key->val, key->enc_size, key->ediv, key->rand);
7299 }
7300
7301 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7302 NULL, 0);
7303
7304 hci_dev_unlock(hdev);
7305
7306 return err;
7307 }
7308
get_conn_info_complete(struct hci_dev * hdev,void * data,int err)7309 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
7310 {
7311 struct mgmt_pending_cmd *cmd = data;
7312 struct hci_conn *conn = cmd->user_data;
7313 struct mgmt_cp_get_conn_info *cp = cmd->param;
7314 struct mgmt_rp_get_conn_info rp;
7315 u8 status;
7316
7317 bt_dev_dbg(hdev, "err %d", err);
7318
7319 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
7320
7321 status = mgmt_status(err);
7322 if (status == MGMT_STATUS_SUCCESS) {
7323 rp.rssi = conn->rssi;
7324 rp.tx_power = conn->tx_power;
7325 rp.max_tx_power = conn->max_tx_power;
7326 } else {
7327 rp.rssi = HCI_RSSI_INVALID;
7328 rp.tx_power = HCI_TX_POWER_INVALID;
7329 rp.max_tx_power = HCI_TX_POWER_INVALID;
7330 }
7331
7332 mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
7333 &rp, sizeof(rp));
7334
7335 mgmt_pending_free(cmd);
7336 }
7337
get_conn_info_sync(struct hci_dev * hdev,void * data)7338 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
7339 {
7340 struct mgmt_pending_cmd *cmd = data;
7341 struct mgmt_cp_get_conn_info *cp = cmd->param;
7342 struct hci_conn *conn;
7343 int err;
7344 __le16 handle;
7345
7346 /* Make sure we are still connected */
7347 if (cp->addr.type == BDADDR_BREDR)
7348 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7349 &cp->addr.bdaddr);
7350 else
7351 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7352
7353 if (!conn || conn->state != BT_CONNECTED)
7354 return MGMT_STATUS_NOT_CONNECTED;
7355
7356 cmd->user_data = conn;
7357 handle = cpu_to_le16(conn->handle);
7358
7359 /* Refresh RSSI each time */
7360 err = hci_read_rssi_sync(hdev, handle);
7361
7362 /* For LE links TX power does not change thus we don't need to
7363 * query for it once value is known.
7364 */
7365 if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
7366 conn->tx_power == HCI_TX_POWER_INVALID))
7367 err = hci_read_tx_power_sync(hdev, handle, 0x00);
7368
7369 /* Max TX power needs to be read only once per connection */
7370 if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
7371 err = hci_read_tx_power_sync(hdev, handle, 0x01);
7372
7373 return err;
7374 }
7375
get_conn_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7376 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7377 u16 len)
7378 {
7379 struct mgmt_cp_get_conn_info *cp = data;
7380 struct mgmt_rp_get_conn_info rp;
7381 struct hci_conn *conn;
7382 unsigned long conn_info_age;
7383 int err = 0;
7384
7385 bt_dev_dbg(hdev, "sock %p", sk);
7386
7387 memset(&rp, 0, sizeof(rp));
7388 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7389 rp.addr.type = cp->addr.type;
7390
7391 if (!bdaddr_type_is_valid(cp->addr.type))
7392 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7393 MGMT_STATUS_INVALID_PARAMS,
7394 &rp, sizeof(rp));
7395
7396 hci_dev_lock(hdev);
7397
7398 if (!hdev_is_powered(hdev)) {
7399 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7400 MGMT_STATUS_NOT_POWERED, &rp,
7401 sizeof(rp));
7402 goto unlock;
7403 }
7404
7405 if (cp->addr.type == BDADDR_BREDR)
7406 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7407 &cp->addr.bdaddr);
7408 else
7409 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7410
7411 if (!conn || conn->state != BT_CONNECTED) {
7412 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7413 MGMT_STATUS_NOT_CONNECTED, &rp,
7414 sizeof(rp));
7415 goto unlock;
7416 }
7417
7418 /* To avoid client trying to guess when to poll again for information we
7419 * calculate conn info age as random value between min/max set in hdev.
7420 */
7421 conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age,
7422 hdev->conn_info_max_age - 1);
7423
7424 /* Query controller to refresh cached values if they are too old or were
7425 * never read.
7426 */
7427 if (time_after(jiffies, conn->conn_info_timestamp +
7428 msecs_to_jiffies(conn_info_age)) ||
7429 !conn->conn_info_timestamp) {
7430 struct mgmt_pending_cmd *cmd;
7431
7432 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
7433 len);
7434 if (!cmd) {
7435 err = -ENOMEM;
7436 } else {
7437 err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
7438 cmd, get_conn_info_complete);
7439 }
7440
7441 if (err < 0) {
7442 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7443 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7444
7445 if (cmd)
7446 mgmt_pending_free(cmd);
7447
7448 goto unlock;
7449 }
7450
7451 conn->conn_info_timestamp = jiffies;
7452 } else {
7453 /* Cache is valid, just reply with values cached in hci_conn */
7454 rp.rssi = conn->rssi;
7455 rp.tx_power = conn->tx_power;
7456 rp.max_tx_power = conn->max_tx_power;
7457
7458 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7459 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7460 }
7461
7462 unlock:
7463 hci_dev_unlock(hdev);
7464 return err;
7465 }
7466
get_clock_info_complete(struct hci_dev * hdev,void * data,int err)7467 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
7468 {
7469 struct mgmt_pending_cmd *cmd = data;
7470 struct mgmt_cp_get_clock_info *cp = cmd->param;
7471 struct mgmt_rp_get_clock_info rp;
7472 struct hci_conn *conn = cmd->user_data;
7473 u8 status = mgmt_status(err);
7474
7475 bt_dev_dbg(hdev, "err %d", err);
7476
7477 memset(&rp, 0, sizeof(rp));
7478 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7479 rp.addr.type = cp->addr.type;
7480
7481 if (err)
7482 goto complete;
7483
7484 rp.local_clock = cpu_to_le32(hdev->clock);
7485
7486 if (conn) {
7487 rp.piconet_clock = cpu_to_le32(conn->clock);
7488 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7489 }
7490
7491 complete:
7492 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
7493 sizeof(rp));
7494
7495 mgmt_pending_free(cmd);
7496 }
7497
get_clock_info_sync(struct hci_dev * hdev,void * data)7498 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
7499 {
7500 struct mgmt_pending_cmd *cmd = data;
7501 struct mgmt_cp_get_clock_info *cp = cmd->param;
7502 struct hci_cp_read_clock hci_cp;
7503 struct hci_conn *conn;
7504
7505 memset(&hci_cp, 0, sizeof(hci_cp));
7506 hci_read_clock_sync(hdev, &hci_cp);
7507
7508 /* Make sure connection still exists */
7509 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
7510 if (!conn || conn->state != BT_CONNECTED)
7511 return MGMT_STATUS_NOT_CONNECTED;
7512
7513 cmd->user_data = conn;
7514 hci_cp.handle = cpu_to_le16(conn->handle);
7515 hci_cp.which = 0x01; /* Piconet clock */
7516
7517 return hci_read_clock_sync(hdev, &hci_cp);
7518 }
7519
get_clock_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7520 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7521 u16 len)
7522 {
7523 struct mgmt_cp_get_clock_info *cp = data;
7524 struct mgmt_rp_get_clock_info rp;
7525 struct mgmt_pending_cmd *cmd;
7526 struct hci_conn *conn;
7527 int err;
7528
7529 bt_dev_dbg(hdev, "sock %p", sk);
7530
7531 memset(&rp, 0, sizeof(rp));
7532 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7533 rp.addr.type = cp->addr.type;
7534
7535 if (cp->addr.type != BDADDR_BREDR)
7536 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7537 MGMT_STATUS_INVALID_PARAMS,
7538 &rp, sizeof(rp));
7539
7540 hci_dev_lock(hdev);
7541
7542 if (!hdev_is_powered(hdev)) {
7543 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7544 MGMT_STATUS_NOT_POWERED, &rp,
7545 sizeof(rp));
7546 goto unlock;
7547 }
7548
7549 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7550 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7551 &cp->addr.bdaddr);
7552 if (!conn || conn->state != BT_CONNECTED) {
7553 err = mgmt_cmd_complete(sk, hdev->id,
7554 MGMT_OP_GET_CLOCK_INFO,
7555 MGMT_STATUS_NOT_CONNECTED,
7556 &rp, sizeof(rp));
7557 goto unlock;
7558 }
7559 } else {
7560 conn = NULL;
7561 }
7562
7563 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7564 if (!cmd)
7565 err = -ENOMEM;
7566 else
7567 err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7568 get_clock_info_complete);
7569
7570 if (err < 0) {
7571 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7572 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7573
7574 if (cmd)
7575 mgmt_pending_free(cmd);
7576 }
7577
7578
7579 unlock:
7580 hci_dev_unlock(hdev);
7581 return err;
7582 }
7583
is_connected(struct hci_dev * hdev,bdaddr_t * addr,u8 type)7584 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7585 {
7586 struct hci_conn *conn;
7587
7588 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7589 if (!conn)
7590 return false;
7591
7592 if (conn->dst_type != type)
7593 return false;
7594
7595 if (conn->state != BT_CONNECTED)
7596 return false;
7597
7598 return true;
7599 }
7600
7601 /* This function requires the caller holds hdev->lock */
hci_conn_params_set(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type,u8 auto_connect)7602 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7603 u8 addr_type, u8 auto_connect)
7604 {
7605 struct hci_conn_params *params;
7606
7607 params = hci_conn_params_add(hdev, addr, addr_type);
7608 if (!params)
7609 return -EIO;
7610
7611 if (params->auto_connect == auto_connect)
7612 return 0;
7613
7614 hci_pend_le_list_del_init(params);
7615
7616 switch (auto_connect) {
7617 case HCI_AUTO_CONN_DISABLED:
7618 case HCI_AUTO_CONN_LINK_LOSS:
7619 /* If auto connect is being disabled when we're trying to
7620 * connect to device, keep connecting.
7621 */
7622 if (params->explicit_connect)
7623 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7624 break;
7625 case HCI_AUTO_CONN_REPORT:
7626 if (params->explicit_connect)
7627 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7628 else
7629 hci_pend_le_list_add(params, &hdev->pend_le_reports);
7630 break;
7631 case HCI_AUTO_CONN_DIRECT:
7632 case HCI_AUTO_CONN_ALWAYS:
7633 if (!is_connected(hdev, addr, addr_type))
7634 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7635 break;
7636 }
7637
7638 params->auto_connect = auto_connect;
7639
7640 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7641 addr, addr_type, auto_connect);
7642
7643 return 0;
7644 }
7645
device_added(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type,u8 action)7646 static void device_added(struct sock *sk, struct hci_dev *hdev,
7647 bdaddr_t *bdaddr, u8 type, u8 action)
7648 {
7649 struct mgmt_ev_device_added ev;
7650
7651 bacpy(&ev.addr.bdaddr, bdaddr);
7652 ev.addr.type = type;
7653 ev.action = action;
7654
7655 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7656 }
7657
add_device_complete(struct hci_dev * hdev,void * data,int err)7658 static void add_device_complete(struct hci_dev *hdev, void *data, int err)
7659 {
7660 struct mgmt_pending_cmd *cmd = data;
7661 struct mgmt_cp_add_device *cp = cmd->param;
7662
7663 if (!err) {
7664 device_added(cmd->sk, hdev, &cp->addr.bdaddr, cp->addr.type,
7665 cp->action);
7666 device_flags_changed(NULL, hdev, &cp->addr.bdaddr,
7667 cp->addr.type, hdev->conn_flags,
7668 PTR_UINT(cmd->user_data));
7669 }
7670
7671 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_ADD_DEVICE,
7672 mgmt_status(err), &cp->addr, sizeof(cp->addr));
7673 mgmt_pending_free(cmd);
7674 }
7675
add_device_sync(struct hci_dev * hdev,void * data)7676 static int add_device_sync(struct hci_dev *hdev, void *data)
7677 {
7678 return hci_update_passive_scan_sync(hdev);
7679 }
7680
add_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7681 static int add_device(struct sock *sk, struct hci_dev *hdev,
7682 void *data, u16 len)
7683 {
7684 struct mgmt_pending_cmd *cmd;
7685 struct mgmt_cp_add_device *cp = data;
7686 u8 auto_conn, addr_type;
7687 struct hci_conn_params *params;
7688 int err;
7689 u32 current_flags = 0;
7690 u32 supported_flags;
7691
7692 bt_dev_dbg(hdev, "sock %p", sk);
7693
7694 if (!bdaddr_type_is_valid(cp->addr.type) ||
7695 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7696 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7697 MGMT_STATUS_INVALID_PARAMS,
7698 &cp->addr, sizeof(cp->addr));
7699
7700 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7701 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7702 MGMT_STATUS_INVALID_PARAMS,
7703 &cp->addr, sizeof(cp->addr));
7704
7705 hci_dev_lock(hdev);
7706
7707 if (cp->addr.type == BDADDR_BREDR) {
7708 /* Only incoming connections action is supported for now */
7709 if (cp->action != 0x01) {
7710 err = mgmt_cmd_complete(sk, hdev->id,
7711 MGMT_OP_ADD_DEVICE,
7712 MGMT_STATUS_INVALID_PARAMS,
7713 &cp->addr, sizeof(cp->addr));
7714 goto unlock;
7715 }
7716
7717 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7718 &cp->addr.bdaddr,
7719 cp->addr.type, 0);
7720 if (err)
7721 goto unlock;
7722
7723 hci_update_scan(hdev);
7724
7725 goto added;
7726 }
7727
7728 addr_type = le_addr_type(cp->addr.type);
7729
7730 if (cp->action == 0x02)
7731 auto_conn = HCI_AUTO_CONN_ALWAYS;
7732 else if (cp->action == 0x01)
7733 auto_conn = HCI_AUTO_CONN_DIRECT;
7734 else
7735 auto_conn = HCI_AUTO_CONN_REPORT;
7736
7737 /* Kernel internally uses conn_params with resolvable private
7738 * address, but Add Device allows only identity addresses.
7739 * Make sure it is enforced before calling
7740 * hci_conn_params_lookup.
7741 */
7742 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7743 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7744 MGMT_STATUS_INVALID_PARAMS,
7745 &cp->addr, sizeof(cp->addr));
7746 goto unlock;
7747 }
7748
7749 /* If the connection parameters don't exist for this device,
7750 * they will be created and configured with defaults.
7751 */
7752 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7753 auto_conn) < 0) {
7754 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7755 MGMT_STATUS_FAILED, &cp->addr,
7756 sizeof(cp->addr));
7757 goto unlock;
7758 } else {
7759 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7760 addr_type);
7761 if (params)
7762 current_flags = params->flags;
7763 }
7764
7765 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
7766 if (!cmd) {
7767 err = -ENOMEM;
7768 goto unlock;
7769 }
7770
7771 cmd->user_data = UINT_PTR(current_flags);
7772
7773 err = hci_cmd_sync_queue(hdev, add_device_sync, cmd,
7774 add_device_complete);
7775 if (err < 0) {
7776 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7777 MGMT_STATUS_FAILED, &cp->addr,
7778 sizeof(cp->addr));
7779 mgmt_pending_free(cmd);
7780 }
7781
7782 goto unlock;
7783
7784 added:
7785 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7786 supported_flags = hdev->conn_flags;
7787 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7788 supported_flags, current_flags);
7789
7790 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7791 MGMT_STATUS_SUCCESS, &cp->addr,
7792 sizeof(cp->addr));
7793
7794 unlock:
7795 hci_dev_unlock(hdev);
7796 return err;
7797 }
7798
device_removed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)7799 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7800 bdaddr_t *bdaddr, u8 type)
7801 {
7802 struct mgmt_ev_device_removed ev;
7803
7804 bacpy(&ev.addr.bdaddr, bdaddr);
7805 ev.addr.type = type;
7806
7807 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7808 }
7809
remove_device_sync(struct hci_dev * hdev,void * data)7810 static int remove_device_sync(struct hci_dev *hdev, void *data)
7811 {
7812 return hci_update_passive_scan_sync(hdev);
7813 }
7814
remove_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7815 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7816 void *data, u16 len)
7817 {
7818 struct mgmt_cp_remove_device *cp = data;
7819 int err;
7820
7821 bt_dev_dbg(hdev, "sock %p", sk);
7822
7823 hci_dev_lock(hdev);
7824
7825 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7826 struct hci_conn_params *params;
7827 u8 addr_type;
7828
7829 if (!bdaddr_type_is_valid(cp->addr.type)) {
7830 err = mgmt_cmd_complete(sk, hdev->id,
7831 MGMT_OP_REMOVE_DEVICE,
7832 MGMT_STATUS_INVALID_PARAMS,
7833 &cp->addr, sizeof(cp->addr));
7834 goto unlock;
7835 }
7836
7837 if (cp->addr.type == BDADDR_BREDR) {
7838 err = hci_bdaddr_list_del(&hdev->accept_list,
7839 &cp->addr.bdaddr,
7840 cp->addr.type);
7841 if (err) {
7842 err = mgmt_cmd_complete(sk, hdev->id,
7843 MGMT_OP_REMOVE_DEVICE,
7844 MGMT_STATUS_INVALID_PARAMS,
7845 &cp->addr,
7846 sizeof(cp->addr));
7847 goto unlock;
7848 }
7849
7850 hci_update_scan(hdev);
7851
7852 device_removed(sk, hdev, &cp->addr.bdaddr,
7853 cp->addr.type);
7854 goto complete;
7855 }
7856
7857 addr_type = le_addr_type(cp->addr.type);
7858
7859 /* Kernel internally uses conn_params with resolvable private
7860 * address, but Remove Device allows only identity addresses.
7861 * Make sure it is enforced before calling
7862 * hci_conn_params_lookup.
7863 */
7864 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7865 err = mgmt_cmd_complete(sk, hdev->id,
7866 MGMT_OP_REMOVE_DEVICE,
7867 MGMT_STATUS_INVALID_PARAMS,
7868 &cp->addr, sizeof(cp->addr));
7869 goto unlock;
7870 }
7871
7872 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7873 addr_type);
7874 if (!params) {
7875 err = mgmt_cmd_complete(sk, hdev->id,
7876 MGMT_OP_REMOVE_DEVICE,
7877 MGMT_STATUS_INVALID_PARAMS,
7878 &cp->addr, sizeof(cp->addr));
7879 goto unlock;
7880 }
7881
7882 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7883 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7884 err = mgmt_cmd_complete(sk, hdev->id,
7885 MGMT_OP_REMOVE_DEVICE,
7886 MGMT_STATUS_INVALID_PARAMS,
7887 &cp->addr, sizeof(cp->addr));
7888 goto unlock;
7889 }
7890
7891 hci_conn_params_free(params);
7892
7893 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7894 } else {
7895 struct hci_conn_params *p, *tmp;
7896 struct bdaddr_list *b, *btmp;
7897
7898 if (cp->addr.type) {
7899 err = mgmt_cmd_complete(sk, hdev->id,
7900 MGMT_OP_REMOVE_DEVICE,
7901 MGMT_STATUS_INVALID_PARAMS,
7902 &cp->addr, sizeof(cp->addr));
7903 goto unlock;
7904 }
7905
7906 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7907 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7908 list_del(&b->list);
7909 kfree(b);
7910 }
7911
7912 hci_update_scan(hdev);
7913
7914 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7915 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7916 continue;
7917 device_removed(sk, hdev, &p->addr, p->addr_type);
7918 if (p->explicit_connect) {
7919 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7920 continue;
7921 }
7922 hci_conn_params_free(p);
7923 }
7924
7925 bt_dev_dbg(hdev, "All LE connection parameters were removed");
7926 }
7927
7928 hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7929
7930 complete:
7931 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7932 MGMT_STATUS_SUCCESS, &cp->addr,
7933 sizeof(cp->addr));
7934 unlock:
7935 hci_dev_unlock(hdev);
7936 return err;
7937 }
7938
conn_update_sync(struct hci_dev * hdev,void * data)7939 static int conn_update_sync(struct hci_dev *hdev, void *data)
7940 {
7941 struct hci_conn_params *params = data;
7942 struct hci_conn *conn;
7943
7944 conn = hci_conn_hash_lookup_le(hdev, ¶ms->addr, params->addr_type);
7945 if (!conn)
7946 return -ECANCELED;
7947
7948 return hci_le_conn_update_sync(hdev, conn, params);
7949 }
7950
load_conn_param(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7951 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7952 u16 len)
7953 {
7954 struct mgmt_cp_load_conn_param *cp = data;
7955 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7956 sizeof(struct mgmt_conn_param));
7957 u16 param_count, expected_len;
7958 int i;
7959
7960 if (!lmp_le_capable(hdev))
7961 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7962 MGMT_STATUS_NOT_SUPPORTED);
7963
7964 param_count = __le16_to_cpu(cp->param_count);
7965 if (param_count > max_param_count) {
7966 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7967 param_count);
7968 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7969 MGMT_STATUS_INVALID_PARAMS);
7970 }
7971
7972 expected_len = struct_size(cp, params, param_count);
7973 if (expected_len != len) {
7974 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7975 expected_len, len);
7976 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7977 MGMT_STATUS_INVALID_PARAMS);
7978 }
7979
7980 bt_dev_dbg(hdev, "param_count %u", param_count);
7981
7982 hci_dev_lock(hdev);
7983
7984 if (param_count > 1)
7985 hci_conn_params_clear_disabled(hdev);
7986
7987 for (i = 0; i < param_count; i++) {
7988 struct mgmt_conn_param *param = &cp->params[i];
7989 struct hci_conn_params *hci_param;
7990 u16 min, max, latency, timeout;
7991 bool update = false;
7992 u8 addr_type;
7993
7994 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
7995 param->addr.type);
7996
7997 if (param->addr.type == BDADDR_LE_PUBLIC) {
7998 addr_type = ADDR_LE_DEV_PUBLIC;
7999 } else if (param->addr.type == BDADDR_LE_RANDOM) {
8000 addr_type = ADDR_LE_DEV_RANDOM;
8001 } else {
8002 bt_dev_err(hdev, "ignoring invalid connection parameters");
8003 continue;
8004 }
8005
8006 min = le16_to_cpu(param->min_interval);
8007 max = le16_to_cpu(param->max_interval);
8008 latency = le16_to_cpu(param->latency);
8009 timeout = le16_to_cpu(param->timeout);
8010
8011 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
8012 min, max, latency, timeout);
8013
8014 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
8015 bt_dev_err(hdev, "ignoring invalid connection parameters");
8016 continue;
8017 }
8018
8019 /* Detect when the loading is for an existing parameter then
8020 * attempt to trigger the connection update procedure.
8021 */
8022 if (!i && param_count == 1) {
8023 hci_param = hci_conn_params_lookup(hdev,
8024 ¶m->addr.bdaddr,
8025 addr_type);
8026 if (hci_param)
8027 update = true;
8028 else
8029 hci_conn_params_clear_disabled(hdev);
8030 }
8031
8032 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
8033 addr_type);
8034 if (!hci_param) {
8035 bt_dev_err(hdev, "failed to add connection parameters");
8036 continue;
8037 }
8038
8039 hci_param->conn_min_interval = min;
8040 hci_param->conn_max_interval = max;
8041 hci_param->conn_latency = latency;
8042 hci_param->supervision_timeout = timeout;
8043
8044 /* Check if we need to trigger a connection update */
8045 if (update) {
8046 struct hci_conn *conn;
8047
8048 /* Lookup for existing connection as central and check
8049 * if parameters match and if they don't then trigger
8050 * a connection update.
8051 */
8052 conn = hci_conn_hash_lookup_le(hdev, &hci_param->addr,
8053 addr_type);
8054 if (conn && conn->role == HCI_ROLE_MASTER &&
8055 (conn->le_conn_min_interval != min ||
8056 conn->le_conn_max_interval != max ||
8057 conn->le_conn_latency != latency ||
8058 conn->le_supv_timeout != timeout))
8059 hci_cmd_sync_queue(hdev, conn_update_sync,
8060 hci_param, NULL);
8061 }
8062 }
8063
8064 hci_dev_unlock(hdev);
8065
8066 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
8067 NULL, 0);
8068 }
8069
set_external_config(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)8070 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
8071 void *data, u16 len)
8072 {
8073 struct mgmt_cp_set_external_config *cp = data;
8074 bool changed;
8075 int err;
8076
8077 bt_dev_dbg(hdev, "sock %p", sk);
8078
8079 if (hdev_is_powered(hdev))
8080 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
8081 MGMT_STATUS_REJECTED);
8082
8083 if (cp->config != 0x00 && cp->config != 0x01)
8084 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
8085 MGMT_STATUS_INVALID_PARAMS);
8086
8087 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
8088 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
8089 MGMT_STATUS_NOT_SUPPORTED);
8090
8091 hci_dev_lock(hdev);
8092
8093 if (cp->config)
8094 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
8095 else
8096 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
8097
8098 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
8099 if (err < 0)
8100 goto unlock;
8101
8102 if (!changed)
8103 goto unlock;
8104
8105 err = new_options(hdev, sk);
8106
8107 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
8108 mgmt_index_removed(hdev);
8109
8110 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
8111 hci_dev_set_flag(hdev, HCI_CONFIG);
8112 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8113
8114 queue_work(hdev->req_workqueue, &hdev->power_on);
8115 } else {
8116 set_bit(HCI_RAW, &hdev->flags);
8117 mgmt_index_added(hdev);
8118 }
8119 }
8120
8121 unlock:
8122 hci_dev_unlock(hdev);
8123 return err;
8124 }
8125
set_public_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)8126 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
8127 void *data, u16 len)
8128 {
8129 struct mgmt_cp_set_public_address *cp = data;
8130 bool changed;
8131 int err;
8132
8133 bt_dev_dbg(hdev, "sock %p", sk);
8134
8135 if (hdev_is_powered(hdev))
8136 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8137 MGMT_STATUS_REJECTED);
8138
8139 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
8140 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8141 MGMT_STATUS_INVALID_PARAMS);
8142
8143 if (!hdev->set_bdaddr)
8144 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8145 MGMT_STATUS_NOT_SUPPORTED);
8146
8147 hci_dev_lock(hdev);
8148
8149 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
8150 bacpy(&hdev->public_addr, &cp->bdaddr);
8151
8152 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
8153 if (err < 0)
8154 goto unlock;
8155
8156 if (!changed)
8157 goto unlock;
8158
8159 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
8160 err = new_options(hdev, sk);
8161
8162 if (is_configured(hdev)) {
8163 mgmt_index_removed(hdev);
8164
8165 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
8166
8167 hci_dev_set_flag(hdev, HCI_CONFIG);
8168 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8169
8170 queue_work(hdev->req_workqueue, &hdev->power_on);
8171 }
8172
8173 unlock:
8174 hci_dev_unlock(hdev);
8175 return err;
8176 }
8177
read_local_oob_ext_data_complete(struct hci_dev * hdev,void * data,int err)8178 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
8179 int err)
8180 {
8181 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8182 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8183 u8 *h192, *r192, *h256, *r256;
8184 struct mgmt_pending_cmd *cmd = data;
8185 struct sk_buff *skb = cmd->skb;
8186 u8 status = mgmt_status(err);
8187 u16 eir_len;
8188
8189 if (err == -ECANCELED ||
8190 cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
8191 return;
8192
8193 if (!status) {
8194 if (!skb)
8195 status = MGMT_STATUS_FAILED;
8196 else if (IS_ERR(skb))
8197 status = mgmt_status(PTR_ERR(skb));
8198 else
8199 status = mgmt_status(skb->data[0]);
8200 }
8201
8202 bt_dev_dbg(hdev, "status %u", status);
8203
8204 mgmt_cp = cmd->param;
8205
8206 if (status) {
8207 status = mgmt_status(status);
8208 eir_len = 0;
8209
8210 h192 = NULL;
8211 r192 = NULL;
8212 h256 = NULL;
8213 r256 = NULL;
8214 } else if (!bredr_sc_enabled(hdev)) {
8215 struct hci_rp_read_local_oob_data *rp;
8216
8217 if (skb->len != sizeof(*rp)) {
8218 status = MGMT_STATUS_FAILED;
8219 eir_len = 0;
8220 } else {
8221 status = MGMT_STATUS_SUCCESS;
8222 rp = (void *)skb->data;
8223
8224 eir_len = 5 + 18 + 18;
8225 h192 = rp->hash;
8226 r192 = rp->rand;
8227 h256 = NULL;
8228 r256 = NULL;
8229 }
8230 } else {
8231 struct hci_rp_read_local_oob_ext_data *rp;
8232
8233 if (skb->len != sizeof(*rp)) {
8234 status = MGMT_STATUS_FAILED;
8235 eir_len = 0;
8236 } else {
8237 status = MGMT_STATUS_SUCCESS;
8238 rp = (void *)skb->data;
8239
8240 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8241 eir_len = 5 + 18 + 18;
8242 h192 = NULL;
8243 r192 = NULL;
8244 } else {
8245 eir_len = 5 + 18 + 18 + 18 + 18;
8246 h192 = rp->hash192;
8247 r192 = rp->rand192;
8248 }
8249
8250 h256 = rp->hash256;
8251 r256 = rp->rand256;
8252 }
8253 }
8254
8255 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8256 if (!mgmt_rp)
8257 goto done;
8258
8259 if (eir_len == 0)
8260 goto send_rsp;
8261
8262 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8263 hdev->dev_class, 3);
8264
8265 if (h192 && r192) {
8266 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8267 EIR_SSP_HASH_C192, h192, 16);
8268 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8269 EIR_SSP_RAND_R192, r192, 16);
8270 }
8271
8272 if (h256 && r256) {
8273 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8274 EIR_SSP_HASH_C256, h256, 16);
8275 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8276 EIR_SSP_RAND_R256, r256, 16);
8277 }
8278
8279 send_rsp:
8280 mgmt_rp->type = mgmt_cp->type;
8281 mgmt_rp->eir_len = cpu_to_le16(eir_len);
8282
8283 err = mgmt_cmd_complete(cmd->sk, hdev->id,
8284 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8285 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8286 if (err < 0 || status)
8287 goto done;
8288
8289 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8290
8291 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8292 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8293 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8294 done:
8295 if (skb && !IS_ERR(skb))
8296 kfree_skb(skb);
8297
8298 kfree(mgmt_rp);
8299 mgmt_pending_remove(cmd);
8300 }
8301
read_local_ssp_oob_req(struct hci_dev * hdev,struct sock * sk,struct mgmt_cp_read_local_oob_ext_data * cp)8302 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8303 struct mgmt_cp_read_local_oob_ext_data *cp)
8304 {
8305 struct mgmt_pending_cmd *cmd;
8306 int err;
8307
8308 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8309 cp, sizeof(*cp));
8310 if (!cmd)
8311 return -ENOMEM;
8312
8313 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
8314 read_local_oob_ext_data_complete);
8315
8316 if (err < 0) {
8317 mgmt_pending_remove(cmd);
8318 return err;
8319 }
8320
8321 return 0;
8322 }
8323
read_local_oob_ext_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8324 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8325 void *data, u16 data_len)
8326 {
8327 struct mgmt_cp_read_local_oob_ext_data *cp = data;
8328 struct mgmt_rp_read_local_oob_ext_data *rp;
8329 size_t rp_len;
8330 u16 eir_len;
8331 u8 status, flags, role, addr[7], hash[16], rand[16];
8332 int err;
8333
8334 bt_dev_dbg(hdev, "sock %p", sk);
8335
8336 if (hdev_is_powered(hdev)) {
8337 switch (cp->type) {
8338 case BIT(BDADDR_BREDR):
8339 status = mgmt_bredr_support(hdev);
8340 if (status)
8341 eir_len = 0;
8342 else
8343 eir_len = 5;
8344 break;
8345 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8346 status = mgmt_le_support(hdev);
8347 if (status)
8348 eir_len = 0;
8349 else
8350 eir_len = 9 + 3 + 18 + 18 + 3;
8351 break;
8352 default:
8353 status = MGMT_STATUS_INVALID_PARAMS;
8354 eir_len = 0;
8355 break;
8356 }
8357 } else {
8358 status = MGMT_STATUS_NOT_POWERED;
8359 eir_len = 0;
8360 }
8361
8362 rp_len = sizeof(*rp) + eir_len;
8363 rp = kmalloc(rp_len, GFP_ATOMIC);
8364 if (!rp)
8365 return -ENOMEM;
8366
8367 if (!status && !lmp_ssp_capable(hdev)) {
8368 status = MGMT_STATUS_NOT_SUPPORTED;
8369 eir_len = 0;
8370 }
8371
8372 if (status)
8373 goto complete;
8374
8375 hci_dev_lock(hdev);
8376
8377 eir_len = 0;
8378 switch (cp->type) {
8379 case BIT(BDADDR_BREDR):
8380 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8381 err = read_local_ssp_oob_req(hdev, sk, cp);
8382 hci_dev_unlock(hdev);
8383 if (!err)
8384 goto done;
8385
8386 status = MGMT_STATUS_FAILED;
8387 goto complete;
8388 } else {
8389 eir_len = eir_append_data(rp->eir, eir_len,
8390 EIR_CLASS_OF_DEV,
8391 hdev->dev_class, 3);
8392 }
8393 break;
8394 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8395 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8396 smp_generate_oob(hdev, hash, rand) < 0) {
8397 hci_dev_unlock(hdev);
8398 status = MGMT_STATUS_FAILED;
8399 goto complete;
8400 }
8401
8402 /* This should return the active RPA, but since the RPA
8403 * is only programmed on demand, it is really hard to fill
8404 * this in at the moment. For now disallow retrieving
8405 * local out-of-band data when privacy is in use.
8406 *
8407 * Returning the identity address will not help here since
8408 * pairing happens before the identity resolving key is
8409 * known and thus the connection establishment happens
8410 * based on the RPA and not the identity address.
8411 */
8412 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8413 hci_dev_unlock(hdev);
8414 status = MGMT_STATUS_REJECTED;
8415 goto complete;
8416 }
8417
8418 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8419 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8420 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8421 bacmp(&hdev->static_addr, BDADDR_ANY))) {
8422 memcpy(addr, &hdev->static_addr, 6);
8423 addr[6] = 0x01;
8424 } else {
8425 memcpy(addr, &hdev->bdaddr, 6);
8426 addr[6] = 0x00;
8427 }
8428
8429 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8430 addr, sizeof(addr));
8431
8432 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8433 role = 0x02;
8434 else
8435 role = 0x01;
8436
8437 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8438 &role, sizeof(role));
8439
8440 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8441 eir_len = eir_append_data(rp->eir, eir_len,
8442 EIR_LE_SC_CONFIRM,
8443 hash, sizeof(hash));
8444
8445 eir_len = eir_append_data(rp->eir, eir_len,
8446 EIR_LE_SC_RANDOM,
8447 rand, sizeof(rand));
8448 }
8449
8450 flags = mgmt_get_adv_discov_flags(hdev);
8451
8452 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8453 flags |= LE_AD_NO_BREDR;
8454
8455 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8456 &flags, sizeof(flags));
8457 break;
8458 }
8459
8460 hci_dev_unlock(hdev);
8461
8462 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8463
8464 status = MGMT_STATUS_SUCCESS;
8465
8466 complete:
8467 rp->type = cp->type;
8468 rp->eir_len = cpu_to_le16(eir_len);
8469
8470 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8471 status, rp, sizeof(*rp) + eir_len);
8472 if (err < 0 || status)
8473 goto done;
8474
8475 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8476 rp, sizeof(*rp) + eir_len,
8477 HCI_MGMT_OOB_DATA_EVENTS, sk);
8478
8479 done:
8480 kfree(rp);
8481
8482 return err;
8483 }
8484
get_supported_adv_flags(struct hci_dev * hdev)8485 static u32 get_supported_adv_flags(struct hci_dev *hdev)
8486 {
8487 u32 flags = 0;
8488
8489 flags |= MGMT_ADV_FLAG_CONNECTABLE;
8490 flags |= MGMT_ADV_FLAG_DISCOV;
8491 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8492 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8493 flags |= MGMT_ADV_FLAG_APPEARANCE;
8494 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8495 flags |= MGMT_ADV_PARAM_DURATION;
8496 flags |= MGMT_ADV_PARAM_TIMEOUT;
8497 flags |= MGMT_ADV_PARAM_INTERVALS;
8498 flags |= MGMT_ADV_PARAM_TX_POWER;
8499 flags |= MGMT_ADV_PARAM_SCAN_RSP;
8500
8501 /* In extended adv TX_POWER returned from Set Adv Param
8502 * will be always valid.
8503 */
8504 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
8505 flags |= MGMT_ADV_FLAG_TX_POWER;
8506
8507 if (ext_adv_capable(hdev)) {
8508 flags |= MGMT_ADV_FLAG_SEC_1M;
8509 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8510 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8511
8512 if (le_2m_capable(hdev))
8513 flags |= MGMT_ADV_FLAG_SEC_2M;
8514
8515 if (le_coded_capable(hdev))
8516 flags |= MGMT_ADV_FLAG_SEC_CODED;
8517 }
8518
8519 return flags;
8520 }
8521
read_adv_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8522 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8523 void *data, u16 data_len)
8524 {
8525 struct mgmt_rp_read_adv_features *rp;
8526 size_t rp_len;
8527 int err;
8528 struct adv_info *adv_instance;
8529 u32 supported_flags;
8530 u8 *instance;
8531
8532 bt_dev_dbg(hdev, "sock %p", sk);
8533
8534 if (!lmp_le_capable(hdev))
8535 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8536 MGMT_STATUS_REJECTED);
8537
8538 hci_dev_lock(hdev);
8539
8540 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8541 rp = kmalloc(rp_len, GFP_ATOMIC);
8542 if (!rp) {
8543 hci_dev_unlock(hdev);
8544 return -ENOMEM;
8545 }
8546
8547 supported_flags = get_supported_adv_flags(hdev);
8548
8549 rp->supported_flags = cpu_to_le32(supported_flags);
8550 rp->max_adv_data_len = max_adv_len(hdev);
8551 rp->max_scan_rsp_len = max_adv_len(hdev);
8552 rp->max_instances = hdev->le_num_of_adv_sets;
8553 rp->num_instances = hdev->adv_instance_cnt;
8554
8555 instance = rp->instance;
8556 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8557 /* Only instances 1-le_num_of_adv_sets are externally visible */
8558 if (adv_instance->instance <= hdev->adv_instance_cnt) {
8559 *instance = adv_instance->instance;
8560 instance++;
8561 } else {
8562 rp->num_instances--;
8563 rp_len--;
8564 }
8565 }
8566
8567 hci_dev_unlock(hdev);
8568
8569 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8570 MGMT_STATUS_SUCCESS, rp, rp_len);
8571
8572 kfree(rp);
8573
8574 return err;
8575 }
8576
calculate_name_len(struct hci_dev * hdev)8577 static u8 calculate_name_len(struct hci_dev *hdev)
8578 {
8579 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 2]; /* len + type + name */
8580
8581 return eir_append_local_name(hdev, buf, 0);
8582 }
8583
tlv_data_max_len(struct hci_dev * hdev,u32 adv_flags,bool is_adv_data)8584 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8585 bool is_adv_data)
8586 {
8587 u8 max_len = max_adv_len(hdev);
8588
8589 if (is_adv_data) {
8590 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8591 MGMT_ADV_FLAG_LIMITED_DISCOV |
8592 MGMT_ADV_FLAG_MANAGED_FLAGS))
8593 max_len -= 3;
8594
8595 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8596 max_len -= 3;
8597 } else {
8598 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8599 max_len -= calculate_name_len(hdev);
8600
8601 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8602 max_len -= 4;
8603 }
8604
8605 return max_len;
8606 }
8607
flags_managed(u32 adv_flags)8608 static bool flags_managed(u32 adv_flags)
8609 {
8610 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8611 MGMT_ADV_FLAG_LIMITED_DISCOV |
8612 MGMT_ADV_FLAG_MANAGED_FLAGS);
8613 }
8614
tx_power_managed(u32 adv_flags)8615 static bool tx_power_managed(u32 adv_flags)
8616 {
8617 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8618 }
8619
name_managed(u32 adv_flags)8620 static bool name_managed(u32 adv_flags)
8621 {
8622 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8623 }
8624
appearance_managed(u32 adv_flags)8625 static bool appearance_managed(u32 adv_flags)
8626 {
8627 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8628 }
8629
tlv_data_is_valid(struct hci_dev * hdev,u32 adv_flags,u8 * data,u8 len,bool is_adv_data)8630 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8631 u8 len, bool is_adv_data)
8632 {
8633 int i, cur_len;
8634 u8 max_len;
8635
8636 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8637
8638 if (len > max_len)
8639 return false;
8640
8641 /* Make sure that the data is correctly formatted. */
8642 for (i = 0; i < len; i += (cur_len + 1)) {
8643 cur_len = data[i];
8644
8645 if (!cur_len)
8646 continue;
8647
8648 if (data[i + 1] == EIR_FLAGS &&
8649 (!is_adv_data || flags_managed(adv_flags)))
8650 return false;
8651
8652 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8653 return false;
8654
8655 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8656 return false;
8657
8658 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8659 return false;
8660
8661 if (data[i + 1] == EIR_APPEARANCE &&
8662 appearance_managed(adv_flags))
8663 return false;
8664
8665 /* If the current field length would exceed the total data
8666 * length, then it's invalid.
8667 */
8668 if (i + cur_len >= len)
8669 return false;
8670 }
8671
8672 return true;
8673 }
8674
requested_adv_flags_are_valid(struct hci_dev * hdev,u32 adv_flags)8675 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8676 {
8677 u32 supported_flags, phy_flags;
8678
8679 /* The current implementation only supports a subset of the specified
8680 * flags. Also need to check mutual exclusiveness of sec flags.
8681 */
8682 supported_flags = get_supported_adv_flags(hdev);
8683 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8684 if (adv_flags & ~supported_flags ||
8685 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8686 return false;
8687
8688 return true;
8689 }
8690
adv_busy(struct hci_dev * hdev)8691 static bool adv_busy(struct hci_dev *hdev)
8692 {
8693 return pending_find(MGMT_OP_SET_LE, hdev);
8694 }
8695
add_adv_complete(struct hci_dev * hdev,struct sock * sk,u8 instance,int err)8696 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8697 int err)
8698 {
8699 struct adv_info *adv, *n;
8700
8701 bt_dev_dbg(hdev, "err %d", err);
8702
8703 hci_dev_lock(hdev);
8704
8705 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8706 u8 instance;
8707
8708 if (!adv->pending)
8709 continue;
8710
8711 if (!err) {
8712 adv->pending = false;
8713 continue;
8714 }
8715
8716 instance = adv->instance;
8717
8718 if (hdev->cur_adv_instance == instance)
8719 cancel_adv_timeout(hdev);
8720
8721 hci_remove_adv_instance(hdev, instance);
8722 mgmt_advertising_removed(sk, hdev, instance);
8723 }
8724
8725 hci_dev_unlock(hdev);
8726 }
8727
add_advertising_complete(struct hci_dev * hdev,void * data,int err)8728 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8729 {
8730 struct mgmt_pending_cmd *cmd = data;
8731 struct mgmt_cp_add_advertising *cp = cmd->param;
8732 struct mgmt_rp_add_advertising rp;
8733
8734 memset(&rp, 0, sizeof(rp));
8735
8736 rp.instance = cp->instance;
8737
8738 if (err)
8739 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8740 mgmt_status(err));
8741 else
8742 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8743 mgmt_status(err), &rp, sizeof(rp));
8744
8745 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8746
8747 mgmt_pending_free(cmd);
8748 }
8749
add_advertising_sync(struct hci_dev * hdev,void * data)8750 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8751 {
8752 struct mgmt_pending_cmd *cmd = data;
8753 struct mgmt_cp_add_advertising *cp = cmd->param;
8754
8755 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8756 }
8757
add_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8758 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8759 void *data, u16 data_len)
8760 {
8761 struct mgmt_cp_add_advertising *cp = data;
8762 struct mgmt_rp_add_advertising rp;
8763 u32 flags;
8764 u8 status;
8765 u16 timeout, duration;
8766 unsigned int prev_instance_cnt;
8767 u8 schedule_instance = 0;
8768 struct adv_info *adv, *next_instance;
8769 int err;
8770 struct mgmt_pending_cmd *cmd;
8771
8772 bt_dev_dbg(hdev, "sock %p", sk);
8773
8774 status = mgmt_le_support(hdev);
8775 if (status)
8776 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8777 status);
8778
8779 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8780 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8781 MGMT_STATUS_INVALID_PARAMS);
8782
8783 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8784 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8785 MGMT_STATUS_INVALID_PARAMS);
8786
8787 flags = __le32_to_cpu(cp->flags);
8788 timeout = __le16_to_cpu(cp->timeout);
8789 duration = __le16_to_cpu(cp->duration);
8790
8791 if (!requested_adv_flags_are_valid(hdev, flags))
8792 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8793 MGMT_STATUS_INVALID_PARAMS);
8794
8795 hci_dev_lock(hdev);
8796
8797 if (timeout && !hdev_is_powered(hdev)) {
8798 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8799 MGMT_STATUS_REJECTED);
8800 goto unlock;
8801 }
8802
8803 if (adv_busy(hdev)) {
8804 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8805 MGMT_STATUS_BUSY);
8806 goto unlock;
8807 }
8808
8809 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8810 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8811 cp->scan_rsp_len, false)) {
8812 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8813 MGMT_STATUS_INVALID_PARAMS);
8814 goto unlock;
8815 }
8816
8817 prev_instance_cnt = hdev->adv_instance_cnt;
8818
8819 adv = hci_add_adv_instance(hdev, cp->instance, flags,
8820 cp->adv_data_len, cp->data,
8821 cp->scan_rsp_len,
8822 cp->data + cp->adv_data_len,
8823 timeout, duration,
8824 HCI_ADV_TX_POWER_NO_PREFERENCE,
8825 hdev->le_adv_min_interval,
8826 hdev->le_adv_max_interval, 0);
8827 if (IS_ERR(adv)) {
8828 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8829 MGMT_STATUS_FAILED);
8830 goto unlock;
8831 }
8832
8833 /* Only trigger an advertising added event if a new instance was
8834 * actually added.
8835 */
8836 if (hdev->adv_instance_cnt > prev_instance_cnt)
8837 mgmt_advertising_added(sk, hdev, cp->instance);
8838
8839 if (hdev->cur_adv_instance == cp->instance) {
8840 /* If the currently advertised instance is being changed then
8841 * cancel the current advertising and schedule the next
8842 * instance. If there is only one instance then the overridden
8843 * advertising data will be visible right away.
8844 */
8845 cancel_adv_timeout(hdev);
8846
8847 next_instance = hci_get_next_instance(hdev, cp->instance);
8848 if (next_instance)
8849 schedule_instance = next_instance->instance;
8850 } else if (!hdev->adv_instance_timeout) {
8851 /* Immediately advertise the new instance if no other
8852 * instance is currently being advertised.
8853 */
8854 schedule_instance = cp->instance;
8855 }
8856
8857 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
8858 * there is no instance to be advertised then we have no HCI
8859 * communication to make. Simply return.
8860 */
8861 if (!hdev_is_powered(hdev) ||
8862 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8863 !schedule_instance) {
8864 rp.instance = cp->instance;
8865 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8866 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8867 goto unlock;
8868 }
8869
8870 /* We're good to go, update advertising data, parameters, and start
8871 * advertising.
8872 */
8873 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8874 data_len);
8875 if (!cmd) {
8876 err = -ENOMEM;
8877 goto unlock;
8878 }
8879
8880 cp->instance = schedule_instance;
8881
8882 err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8883 add_advertising_complete);
8884 if (err < 0)
8885 mgmt_pending_free(cmd);
8886
8887 unlock:
8888 hci_dev_unlock(hdev);
8889
8890 return err;
8891 }
8892
add_ext_adv_params_complete(struct hci_dev * hdev,void * data,int err)8893 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8894 int err)
8895 {
8896 struct mgmt_pending_cmd *cmd = data;
8897 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8898 struct mgmt_rp_add_ext_adv_params rp;
8899 struct adv_info *adv;
8900 u32 flags;
8901
8902 BT_DBG("%s", hdev->name);
8903
8904 hci_dev_lock(hdev);
8905
8906 adv = hci_find_adv_instance(hdev, cp->instance);
8907 if (!adv)
8908 goto unlock;
8909
8910 rp.instance = cp->instance;
8911 rp.tx_power = adv->tx_power;
8912
8913 /* While we're at it, inform userspace of the available space for this
8914 * advertisement, given the flags that will be used.
8915 */
8916 flags = __le32_to_cpu(cp->flags);
8917 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8918 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8919
8920 if (err) {
8921 /* If this advertisement was previously advertising and we
8922 * failed to update it, we signal that it has been removed and
8923 * delete its structure
8924 */
8925 if (!adv->pending)
8926 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8927
8928 hci_remove_adv_instance(hdev, cp->instance);
8929
8930 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8931 mgmt_status(err));
8932 } else {
8933 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8934 mgmt_status(err), &rp, sizeof(rp));
8935 }
8936
8937 unlock:
8938 mgmt_pending_free(cmd);
8939
8940 hci_dev_unlock(hdev);
8941 }
8942
add_ext_adv_params_sync(struct hci_dev * hdev,void * data)8943 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8944 {
8945 struct mgmt_pending_cmd *cmd = data;
8946 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8947
8948 return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8949 }
8950
add_ext_adv_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8951 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8952 void *data, u16 data_len)
8953 {
8954 struct mgmt_cp_add_ext_adv_params *cp = data;
8955 struct mgmt_rp_add_ext_adv_params rp;
8956 struct mgmt_pending_cmd *cmd = NULL;
8957 struct adv_info *adv;
8958 u32 flags, min_interval, max_interval;
8959 u16 timeout, duration;
8960 u8 status;
8961 s8 tx_power;
8962 int err;
8963
8964 BT_DBG("%s", hdev->name);
8965
8966 status = mgmt_le_support(hdev);
8967 if (status)
8968 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8969 status);
8970
8971 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8972 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8973 MGMT_STATUS_INVALID_PARAMS);
8974
8975 /* The purpose of breaking add_advertising into two separate MGMT calls
8976 * for params and data is to allow more parameters to be added to this
8977 * structure in the future. For this reason, we verify that we have the
8978 * bare minimum structure we know of when the interface was defined. Any
8979 * extra parameters we don't know about will be ignored in this request.
8980 */
8981 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8982 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8983 MGMT_STATUS_INVALID_PARAMS);
8984
8985 flags = __le32_to_cpu(cp->flags);
8986
8987 if (!requested_adv_flags_are_valid(hdev, flags))
8988 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8989 MGMT_STATUS_INVALID_PARAMS);
8990
8991 hci_dev_lock(hdev);
8992
8993 /* In new interface, we require that we are powered to register */
8994 if (!hdev_is_powered(hdev)) {
8995 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8996 MGMT_STATUS_REJECTED);
8997 goto unlock;
8998 }
8999
9000 if (adv_busy(hdev)) {
9001 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
9002 MGMT_STATUS_BUSY);
9003 goto unlock;
9004 }
9005
9006 /* Parse defined parameters from request, use defaults otherwise */
9007 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
9008 __le16_to_cpu(cp->timeout) : 0;
9009
9010 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
9011 __le16_to_cpu(cp->duration) :
9012 hdev->def_multi_adv_rotation_duration;
9013
9014 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
9015 __le32_to_cpu(cp->min_interval) :
9016 hdev->le_adv_min_interval;
9017
9018 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
9019 __le32_to_cpu(cp->max_interval) :
9020 hdev->le_adv_max_interval;
9021
9022 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
9023 cp->tx_power :
9024 HCI_ADV_TX_POWER_NO_PREFERENCE;
9025
9026 /* Create advertising instance with no advertising or response data */
9027 adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
9028 timeout, duration, tx_power, min_interval,
9029 max_interval, 0);
9030
9031 if (IS_ERR(adv)) {
9032 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
9033 MGMT_STATUS_FAILED);
9034 goto unlock;
9035 }
9036
9037 /* Submit request for advertising params if ext adv available */
9038 if (ext_adv_capable(hdev)) {
9039 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
9040 data, data_len);
9041 if (!cmd) {
9042 err = -ENOMEM;
9043 hci_remove_adv_instance(hdev, cp->instance);
9044 goto unlock;
9045 }
9046
9047 err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
9048 add_ext_adv_params_complete);
9049 if (err < 0)
9050 mgmt_pending_free(cmd);
9051 } else {
9052 rp.instance = cp->instance;
9053 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
9054 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9055 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9056 err = mgmt_cmd_complete(sk, hdev->id,
9057 MGMT_OP_ADD_EXT_ADV_PARAMS,
9058 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9059 }
9060
9061 unlock:
9062 hci_dev_unlock(hdev);
9063
9064 return err;
9065 }
9066
add_ext_adv_data_complete(struct hci_dev * hdev,void * data,int err)9067 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
9068 {
9069 struct mgmt_pending_cmd *cmd = data;
9070 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
9071 struct mgmt_rp_add_advertising rp;
9072
9073 add_adv_complete(hdev, cmd->sk, cp->instance, err);
9074
9075 memset(&rp, 0, sizeof(rp));
9076
9077 rp.instance = cp->instance;
9078
9079 if (err)
9080 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9081 mgmt_status(err));
9082 else
9083 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9084 mgmt_status(err), &rp, sizeof(rp));
9085
9086 mgmt_pending_free(cmd);
9087 }
9088
add_ext_adv_data_sync(struct hci_dev * hdev,void * data)9089 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
9090 {
9091 struct mgmt_pending_cmd *cmd = data;
9092 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
9093 int err;
9094
9095 if (ext_adv_capable(hdev)) {
9096 err = hci_update_adv_data_sync(hdev, cp->instance);
9097 if (err)
9098 return err;
9099
9100 err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
9101 if (err)
9102 return err;
9103
9104 return hci_enable_ext_advertising_sync(hdev, cp->instance);
9105 }
9106
9107 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
9108 }
9109
add_ext_adv_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9110 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
9111 u16 data_len)
9112 {
9113 struct mgmt_cp_add_ext_adv_data *cp = data;
9114 struct mgmt_rp_add_ext_adv_data rp;
9115 u8 schedule_instance = 0;
9116 struct adv_info *next_instance;
9117 struct adv_info *adv_instance;
9118 int err = 0;
9119 struct mgmt_pending_cmd *cmd;
9120
9121 BT_DBG("%s", hdev->name);
9122
9123 hci_dev_lock(hdev);
9124
9125 adv_instance = hci_find_adv_instance(hdev, cp->instance);
9126
9127 if (!adv_instance) {
9128 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9129 MGMT_STATUS_INVALID_PARAMS);
9130 goto unlock;
9131 }
9132
9133 /* In new interface, we require that we are powered to register */
9134 if (!hdev_is_powered(hdev)) {
9135 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9136 MGMT_STATUS_REJECTED);
9137 goto clear_new_instance;
9138 }
9139
9140 if (adv_busy(hdev)) {
9141 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9142 MGMT_STATUS_BUSY);
9143 goto clear_new_instance;
9144 }
9145
9146 /* Validate new data */
9147 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
9148 cp->adv_data_len, true) ||
9149 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
9150 cp->adv_data_len, cp->scan_rsp_len, false)) {
9151 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9152 MGMT_STATUS_INVALID_PARAMS);
9153 goto clear_new_instance;
9154 }
9155
9156 /* Set the data in the advertising instance */
9157 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
9158 cp->data, cp->scan_rsp_len,
9159 cp->data + cp->adv_data_len);
9160
9161 /* If using software rotation, determine next instance to use */
9162 if (hdev->cur_adv_instance == cp->instance) {
9163 /* If the currently advertised instance is being changed
9164 * then cancel the current advertising and schedule the
9165 * next instance. If there is only one instance then the
9166 * overridden advertising data will be visible right
9167 * away
9168 */
9169 cancel_adv_timeout(hdev);
9170
9171 next_instance = hci_get_next_instance(hdev, cp->instance);
9172 if (next_instance)
9173 schedule_instance = next_instance->instance;
9174 } else if (!hdev->adv_instance_timeout) {
9175 /* Immediately advertise the new instance if no other
9176 * instance is currently being advertised.
9177 */
9178 schedule_instance = cp->instance;
9179 }
9180
9181 /* If the HCI_ADVERTISING flag is set or there is no instance to
9182 * be advertised then we have no HCI communication to make.
9183 * Simply return.
9184 */
9185 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
9186 if (adv_instance->pending) {
9187 mgmt_advertising_added(sk, hdev, cp->instance);
9188 adv_instance->pending = false;
9189 }
9190 rp.instance = cp->instance;
9191 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9192 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9193 goto unlock;
9194 }
9195
9196 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9197 data_len);
9198 if (!cmd) {
9199 err = -ENOMEM;
9200 goto clear_new_instance;
9201 }
9202
9203 err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
9204 add_ext_adv_data_complete);
9205 if (err < 0) {
9206 mgmt_pending_free(cmd);
9207 goto clear_new_instance;
9208 }
9209
9210 /* We were successful in updating data, so trigger advertising_added
9211 * event if this is an instance that wasn't previously advertising. If
9212 * a failure occurs in the requests we initiated, we will remove the
9213 * instance again in add_advertising_complete
9214 */
9215 if (adv_instance->pending)
9216 mgmt_advertising_added(sk, hdev, cp->instance);
9217
9218 goto unlock;
9219
9220 clear_new_instance:
9221 hci_remove_adv_instance(hdev, cp->instance);
9222
9223 unlock:
9224 hci_dev_unlock(hdev);
9225
9226 return err;
9227 }
9228
remove_advertising_complete(struct hci_dev * hdev,void * data,int err)9229 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
9230 int err)
9231 {
9232 struct mgmt_pending_cmd *cmd = data;
9233 struct mgmt_cp_remove_advertising *cp = cmd->param;
9234 struct mgmt_rp_remove_advertising rp;
9235
9236 bt_dev_dbg(hdev, "err %d", err);
9237
9238 memset(&rp, 0, sizeof(rp));
9239 rp.instance = cp->instance;
9240
9241 if (err)
9242 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9243 mgmt_status(err));
9244 else
9245 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9246 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9247
9248 mgmt_pending_free(cmd);
9249 }
9250
remove_advertising_sync(struct hci_dev * hdev,void * data)9251 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
9252 {
9253 struct mgmt_pending_cmd *cmd = data;
9254 struct mgmt_cp_remove_advertising *cp = cmd->param;
9255 int err;
9256
9257 err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
9258 if (err)
9259 return err;
9260
9261 if (list_empty(&hdev->adv_instances))
9262 err = hci_disable_advertising_sync(hdev);
9263
9264 return err;
9265 }
9266
remove_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9267 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9268 void *data, u16 data_len)
9269 {
9270 struct mgmt_cp_remove_advertising *cp = data;
9271 struct mgmt_pending_cmd *cmd;
9272 int err;
9273
9274 bt_dev_dbg(hdev, "sock %p", sk);
9275
9276 hci_dev_lock(hdev);
9277
9278 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
9279 err = mgmt_cmd_status(sk, hdev->id,
9280 MGMT_OP_REMOVE_ADVERTISING,
9281 MGMT_STATUS_INVALID_PARAMS);
9282 goto unlock;
9283 }
9284
9285 if (pending_find(MGMT_OP_SET_LE, hdev)) {
9286 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9287 MGMT_STATUS_BUSY);
9288 goto unlock;
9289 }
9290
9291 if (list_empty(&hdev->adv_instances)) {
9292 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9293 MGMT_STATUS_INVALID_PARAMS);
9294 goto unlock;
9295 }
9296
9297 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9298 data_len);
9299 if (!cmd) {
9300 err = -ENOMEM;
9301 goto unlock;
9302 }
9303
9304 err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
9305 remove_advertising_complete);
9306 if (err < 0)
9307 mgmt_pending_free(cmd);
9308
9309 unlock:
9310 hci_dev_unlock(hdev);
9311
9312 return err;
9313 }
9314
get_adv_size_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9315 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9316 void *data, u16 data_len)
9317 {
9318 struct mgmt_cp_get_adv_size_info *cp = data;
9319 struct mgmt_rp_get_adv_size_info rp;
9320 u32 flags, supported_flags;
9321
9322 bt_dev_dbg(hdev, "sock %p", sk);
9323
9324 if (!lmp_le_capable(hdev))
9325 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9326 MGMT_STATUS_REJECTED);
9327
9328 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9329 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9330 MGMT_STATUS_INVALID_PARAMS);
9331
9332 flags = __le32_to_cpu(cp->flags);
9333
9334 /* The current implementation only supports a subset of the specified
9335 * flags.
9336 */
9337 supported_flags = get_supported_adv_flags(hdev);
9338 if (flags & ~supported_flags)
9339 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9340 MGMT_STATUS_INVALID_PARAMS);
9341
9342 rp.instance = cp->instance;
9343 rp.flags = cp->flags;
9344 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9345 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9346
9347 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9348 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9349 }
9350
9351 static const struct hci_mgmt_handler mgmt_handlers[] = {
9352 { NULL }, /* 0x0000 (no command) */
9353 { read_version, MGMT_READ_VERSION_SIZE,
9354 HCI_MGMT_NO_HDEV |
9355 HCI_MGMT_UNTRUSTED },
9356 { read_commands, MGMT_READ_COMMANDS_SIZE,
9357 HCI_MGMT_NO_HDEV |
9358 HCI_MGMT_UNTRUSTED },
9359 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
9360 HCI_MGMT_NO_HDEV |
9361 HCI_MGMT_UNTRUSTED },
9362 { read_controller_info, MGMT_READ_INFO_SIZE,
9363 HCI_MGMT_UNTRUSTED },
9364 { set_powered, MGMT_SETTING_SIZE },
9365 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
9366 { set_connectable, MGMT_SETTING_SIZE },
9367 { set_fast_connectable, MGMT_SETTING_SIZE },
9368 { set_bondable, MGMT_SETTING_SIZE },
9369 { set_link_security, MGMT_SETTING_SIZE },
9370 { set_ssp, MGMT_SETTING_SIZE },
9371 { set_hs, MGMT_SETTING_SIZE },
9372 { set_le, MGMT_SETTING_SIZE },
9373 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
9374 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
9375 { add_uuid, MGMT_ADD_UUID_SIZE },
9376 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
9377 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
9378 HCI_MGMT_VAR_LEN },
9379 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9380 HCI_MGMT_VAR_LEN },
9381 { disconnect, MGMT_DISCONNECT_SIZE },
9382 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
9383 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
9384 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
9385 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
9386 { pair_device, MGMT_PAIR_DEVICE_SIZE },
9387 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
9388 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
9389 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
9390 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9391 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
9392 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9393 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
9394 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9395 HCI_MGMT_VAR_LEN },
9396 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9397 { start_discovery, MGMT_START_DISCOVERY_SIZE },
9398 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
9399 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
9400 { block_device, MGMT_BLOCK_DEVICE_SIZE },
9401 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
9402 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
9403 { set_advertising, MGMT_SETTING_SIZE },
9404 { set_bredr, MGMT_SETTING_SIZE },
9405 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
9406 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
9407 { set_secure_conn, MGMT_SETTING_SIZE },
9408 { set_debug_keys, MGMT_SETTING_SIZE },
9409 { set_privacy, MGMT_SET_PRIVACY_SIZE },
9410 { load_irks, MGMT_LOAD_IRKS_SIZE,
9411 HCI_MGMT_VAR_LEN },
9412 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
9413 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
9414 { add_device, MGMT_ADD_DEVICE_SIZE },
9415 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
9416 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
9417 HCI_MGMT_VAR_LEN },
9418 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9419 HCI_MGMT_NO_HDEV |
9420 HCI_MGMT_UNTRUSTED },
9421 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
9422 HCI_MGMT_UNCONFIGURED |
9423 HCI_MGMT_UNTRUSTED },
9424 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
9425 HCI_MGMT_UNCONFIGURED },
9426 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
9427 HCI_MGMT_UNCONFIGURED },
9428 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9429 HCI_MGMT_VAR_LEN },
9430 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9431 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
9432 HCI_MGMT_NO_HDEV |
9433 HCI_MGMT_UNTRUSTED },
9434 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
9435 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
9436 HCI_MGMT_VAR_LEN },
9437 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
9438 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
9439 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9440 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9441 HCI_MGMT_UNTRUSTED },
9442 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
9443 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
9444 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
9445 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9446 HCI_MGMT_VAR_LEN },
9447 { set_wideband_speech, MGMT_SETTING_SIZE },
9448 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
9449 HCI_MGMT_UNTRUSTED },
9450 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
9451 HCI_MGMT_UNTRUSTED |
9452 HCI_MGMT_HDEV_OPTIONAL },
9453 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
9454 HCI_MGMT_VAR_LEN |
9455 HCI_MGMT_HDEV_OPTIONAL },
9456 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9457 HCI_MGMT_UNTRUSTED },
9458 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9459 HCI_MGMT_VAR_LEN },
9460 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9461 HCI_MGMT_UNTRUSTED },
9462 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9463 HCI_MGMT_VAR_LEN },
9464 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
9465 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
9466 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9467 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9468 HCI_MGMT_VAR_LEN },
9469 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
9470 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
9471 HCI_MGMT_VAR_LEN },
9472 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
9473 HCI_MGMT_VAR_LEN },
9474 { add_adv_patterns_monitor_rssi,
9475 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
9476 HCI_MGMT_VAR_LEN },
9477 { set_mesh, MGMT_SET_MESH_RECEIVER_SIZE,
9478 HCI_MGMT_VAR_LEN },
9479 { mesh_features, MGMT_MESH_READ_FEATURES_SIZE },
9480 { mesh_send, MGMT_MESH_SEND_SIZE,
9481 HCI_MGMT_VAR_LEN },
9482 { mesh_send_cancel, MGMT_MESH_SEND_CANCEL_SIZE },
9483 { mgmt_hci_cmd_sync, MGMT_HCI_CMD_SYNC_SIZE, HCI_MGMT_VAR_LEN },
9484 };
9485
mgmt_index_added(struct hci_dev * hdev)9486 void mgmt_index_added(struct hci_dev *hdev)
9487 {
9488 struct mgmt_ev_ext_index ev;
9489
9490 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9491 return;
9492
9493 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9494 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0,
9495 HCI_MGMT_UNCONF_INDEX_EVENTS);
9496 ev.type = 0x01;
9497 } else {
9498 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
9499 HCI_MGMT_INDEX_EVENTS);
9500 ev.type = 0x00;
9501 }
9502
9503 ev.bus = hdev->bus;
9504
9505 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
9506 HCI_MGMT_EXT_INDEX_EVENTS);
9507 }
9508
mgmt_index_removed(struct hci_dev * hdev)9509 void mgmt_index_removed(struct hci_dev *hdev)
9510 {
9511 struct mgmt_ev_ext_index ev;
9512 struct cmd_lookup match = { NULL, hdev, MGMT_STATUS_INVALID_INDEX };
9513
9514 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9515 return;
9516
9517 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &match);
9518
9519 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9520 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0,
9521 HCI_MGMT_UNCONF_INDEX_EVENTS);
9522 ev.type = 0x01;
9523 } else {
9524 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
9525 HCI_MGMT_INDEX_EVENTS);
9526 ev.type = 0x00;
9527 }
9528
9529 ev.bus = hdev->bus;
9530
9531 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
9532 HCI_MGMT_EXT_INDEX_EVENTS);
9533
9534 /* Cancel any remaining timed work */
9535 if (!hci_dev_test_flag(hdev, HCI_MGMT))
9536 return;
9537 cancel_delayed_work_sync(&hdev->discov_off);
9538 cancel_delayed_work_sync(&hdev->service_cache);
9539 cancel_delayed_work_sync(&hdev->rpa_expired);
9540 }
9541
mgmt_power_on(struct hci_dev * hdev,int err)9542 void mgmt_power_on(struct hci_dev *hdev, int err)
9543 {
9544 struct cmd_lookup match = { NULL, hdev };
9545
9546 bt_dev_dbg(hdev, "err %d", err);
9547
9548 hci_dev_lock(hdev);
9549
9550 if (!err) {
9551 restart_le_actions(hdev);
9552 hci_update_passive_scan(hdev);
9553 }
9554
9555 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9556
9557 new_settings(hdev, match.sk);
9558
9559 if (match.sk)
9560 sock_put(match.sk);
9561
9562 hci_dev_unlock(hdev);
9563 }
9564
__mgmt_power_off(struct hci_dev * hdev)9565 void __mgmt_power_off(struct hci_dev *hdev)
9566 {
9567 struct cmd_lookup match = { NULL, hdev };
9568 u8 zero_cod[] = { 0, 0, 0 };
9569
9570 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9571
9572 /* If the power off is because of hdev unregistration let
9573 * use the appropriate INVALID_INDEX status. Otherwise use
9574 * NOT_POWERED. We cover both scenarios here since later in
9575 * mgmt_index_removed() any hci_conn callbacks will have already
9576 * been triggered, potentially causing misleading DISCONNECTED
9577 * status responses.
9578 */
9579 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9580 match.mgmt_status = MGMT_STATUS_INVALID_INDEX;
9581 else
9582 match.mgmt_status = MGMT_STATUS_NOT_POWERED;
9583
9584 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &match);
9585
9586 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9587 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9588 zero_cod, sizeof(zero_cod),
9589 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9590 ext_info_changed(hdev, NULL);
9591 }
9592
9593 new_settings(hdev, match.sk);
9594
9595 if (match.sk)
9596 sock_put(match.sk);
9597 }
9598
mgmt_set_powered_failed(struct hci_dev * hdev,int err)9599 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9600 {
9601 struct mgmt_pending_cmd *cmd;
9602 u8 status;
9603
9604 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9605 if (!cmd)
9606 return;
9607
9608 if (err == -ERFKILL)
9609 status = MGMT_STATUS_RFKILLED;
9610 else
9611 status = MGMT_STATUS_FAILED;
9612
9613 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9614
9615 mgmt_pending_remove(cmd);
9616 }
9617
mgmt_new_link_key(struct hci_dev * hdev,struct link_key * key,bool persistent)9618 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9619 bool persistent)
9620 {
9621 struct mgmt_ev_new_link_key ev;
9622
9623 memset(&ev, 0, sizeof(ev));
9624
9625 ev.store_hint = persistent;
9626 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9627 ev.key.addr.type = BDADDR_BREDR;
9628 ev.key.type = key->type;
9629 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9630 ev.key.pin_len = key->pin_len;
9631
9632 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9633 }
9634
mgmt_ltk_type(struct smp_ltk * ltk)9635 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9636 {
9637 switch (ltk->type) {
9638 case SMP_LTK:
9639 case SMP_LTK_RESPONDER:
9640 if (ltk->authenticated)
9641 return MGMT_LTK_AUTHENTICATED;
9642 return MGMT_LTK_UNAUTHENTICATED;
9643 case SMP_LTK_P256:
9644 if (ltk->authenticated)
9645 return MGMT_LTK_P256_AUTH;
9646 return MGMT_LTK_P256_UNAUTH;
9647 case SMP_LTK_P256_DEBUG:
9648 return MGMT_LTK_P256_DEBUG;
9649 }
9650
9651 return MGMT_LTK_UNAUTHENTICATED;
9652 }
9653
mgmt_new_ltk(struct hci_dev * hdev,struct smp_ltk * key,bool persistent)9654 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9655 {
9656 struct mgmt_ev_new_long_term_key ev;
9657
9658 memset(&ev, 0, sizeof(ev));
9659
9660 /* Devices using resolvable or non-resolvable random addresses
9661 * without providing an identity resolving key don't require
9662 * to store long term keys. Their addresses will change the
9663 * next time around.
9664 *
9665 * Only when a remote device provides an identity address
9666 * make sure the long term key is stored. If the remote
9667 * identity is known, the long term keys are internally
9668 * mapped to the identity address. So allow static random
9669 * and public addresses here.
9670 */
9671 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9672 (key->bdaddr.b[5] & 0xc0) != 0xc0)
9673 ev.store_hint = 0x00;
9674 else
9675 ev.store_hint = persistent;
9676
9677 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9678 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
9679 ev.key.type = mgmt_ltk_type(key);
9680 ev.key.enc_size = key->enc_size;
9681 ev.key.ediv = key->ediv;
9682 ev.key.rand = key->rand;
9683
9684 if (key->type == SMP_LTK)
9685 ev.key.initiator = 1;
9686
9687 /* Make sure we copy only the significant bytes based on the
9688 * encryption key size, and set the rest of the value to zeroes.
9689 */
9690 memcpy(ev.key.val, key->val, key->enc_size);
9691 memset(ev.key.val + key->enc_size, 0,
9692 sizeof(ev.key.val) - key->enc_size);
9693
9694 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9695 }
9696
mgmt_new_irk(struct hci_dev * hdev,struct smp_irk * irk,bool persistent)9697 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9698 {
9699 struct mgmt_ev_new_irk ev;
9700
9701 memset(&ev, 0, sizeof(ev));
9702
9703 ev.store_hint = persistent;
9704
9705 bacpy(&ev.rpa, &irk->rpa);
9706 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9707 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
9708 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9709
9710 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9711 }
9712
mgmt_new_csrk(struct hci_dev * hdev,struct smp_csrk * csrk,bool persistent)9713 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9714 bool persistent)
9715 {
9716 struct mgmt_ev_new_csrk ev;
9717
9718 memset(&ev, 0, sizeof(ev));
9719
9720 /* Devices using resolvable or non-resolvable random addresses
9721 * without providing an identity resolving key don't require
9722 * to store signature resolving keys. Their addresses will change
9723 * the next time around.
9724 *
9725 * Only when a remote device provides an identity address
9726 * make sure the signature resolving key is stored. So allow
9727 * static random and public addresses here.
9728 */
9729 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9730 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9731 ev.store_hint = 0x00;
9732 else
9733 ev.store_hint = persistent;
9734
9735 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9736 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
9737 ev.key.type = csrk->type;
9738 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9739
9740 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9741 }
9742
mgmt_new_conn_param(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u8 store_hint,u16 min_interval,u16 max_interval,u16 latency,u16 timeout)9743 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9744 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9745 u16 max_interval, u16 latency, u16 timeout)
9746 {
9747 struct mgmt_ev_new_conn_param ev;
9748
9749 if (!hci_is_identity_address(bdaddr, bdaddr_type))
9750 return;
9751
9752 memset(&ev, 0, sizeof(ev));
9753 bacpy(&ev.addr.bdaddr, bdaddr);
9754 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9755 ev.store_hint = store_hint;
9756 ev.min_interval = cpu_to_le16(min_interval);
9757 ev.max_interval = cpu_to_le16(max_interval);
9758 ev.latency = cpu_to_le16(latency);
9759 ev.timeout = cpu_to_le16(timeout);
9760
9761 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9762 }
9763
mgmt_device_connected(struct hci_dev * hdev,struct hci_conn * conn,u8 * name,u8 name_len)9764 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9765 u8 *name, u8 name_len)
9766 {
9767 struct sk_buff *skb;
9768 struct mgmt_ev_device_connected *ev;
9769 u16 eir_len = 0;
9770 u32 flags = 0;
9771
9772 if (test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
9773 return;
9774
9775 /* allocate buff for LE or BR/EDR adv */
9776 if (conn->le_adv_data_len > 0)
9777 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9778 sizeof(*ev) + conn->le_adv_data_len);
9779 else
9780 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9781 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9782 eir_precalc_len(sizeof(conn->dev_class)));
9783
9784 ev = skb_put(skb, sizeof(*ev));
9785 bacpy(&ev->addr.bdaddr, &conn->dst);
9786 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9787
9788 if (conn->out)
9789 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9790
9791 ev->flags = __cpu_to_le32(flags);
9792
9793 /* We must ensure that the EIR Data fields are ordered and
9794 * unique. Keep it simple for now and avoid the problem by not
9795 * adding any BR/EDR data to the LE adv.
9796 */
9797 if (conn->le_adv_data_len > 0) {
9798 skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9799 eir_len = conn->le_adv_data_len;
9800 } else {
9801 if (name)
9802 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9803
9804 if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9805 eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9806 conn->dev_class, sizeof(conn->dev_class));
9807 }
9808
9809 ev->eir_len = cpu_to_le16(eir_len);
9810
9811 mgmt_event_skb(skb, NULL);
9812 }
9813
unpair_device_rsp(struct mgmt_pending_cmd * cmd,void * data)9814 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9815 {
9816 struct hci_dev *hdev = data;
9817 struct mgmt_cp_unpair_device *cp = cmd->param;
9818
9819 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9820
9821 cmd->cmd_complete(cmd, 0);
9822 mgmt_pending_remove(cmd);
9823 }
9824
mgmt_powering_down(struct hci_dev * hdev)9825 bool mgmt_powering_down(struct hci_dev *hdev)
9826 {
9827 struct mgmt_pending_cmd *cmd;
9828 struct mgmt_mode *cp;
9829
9830 if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
9831 return true;
9832
9833 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9834 if (!cmd)
9835 return false;
9836
9837 cp = cmd->param;
9838 if (!cp->val)
9839 return true;
9840
9841 return false;
9842 }
9843
mgmt_device_disconnected(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 reason,bool mgmt_connected)9844 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9845 u8 link_type, u8 addr_type, u8 reason,
9846 bool mgmt_connected)
9847 {
9848 struct mgmt_ev_device_disconnected ev;
9849 struct sock *sk = NULL;
9850
9851 if (!mgmt_connected)
9852 return;
9853
9854 if (link_type != ACL_LINK && link_type != LE_LINK)
9855 return;
9856
9857 bacpy(&ev.addr.bdaddr, bdaddr);
9858 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9859 ev.reason = reason;
9860
9861 /* Report disconnects due to suspend */
9862 if (hdev->suspended)
9863 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9864
9865 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9866
9867 if (sk)
9868 sock_put(sk);
9869 }
9870
mgmt_disconnect_failed(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9871 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9872 u8 link_type, u8 addr_type, u8 status)
9873 {
9874 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9875 struct mgmt_cp_disconnect *cp;
9876 struct mgmt_pending_cmd *cmd;
9877
9878 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9879 hdev);
9880
9881 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9882 if (!cmd)
9883 return;
9884
9885 cp = cmd->param;
9886
9887 if (bacmp(bdaddr, &cp->addr.bdaddr))
9888 return;
9889
9890 if (cp->addr.type != bdaddr_type)
9891 return;
9892
9893 cmd->cmd_complete(cmd, mgmt_status(status));
9894 mgmt_pending_remove(cmd);
9895 }
9896
mgmt_connect_failed(struct hci_dev * hdev,struct hci_conn * conn,u8 status)9897 void mgmt_connect_failed(struct hci_dev *hdev, struct hci_conn *conn, u8 status)
9898 {
9899 struct mgmt_ev_connect_failed ev;
9900
9901 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
9902 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
9903 conn->dst_type, status, true);
9904 return;
9905 }
9906
9907 bacpy(&ev.addr.bdaddr, &conn->dst);
9908 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9909 ev.status = mgmt_status(status);
9910
9911 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9912 }
9913
mgmt_pin_code_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 secure)9914 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9915 {
9916 struct mgmt_ev_pin_code_request ev;
9917
9918 bacpy(&ev.addr.bdaddr, bdaddr);
9919 ev.addr.type = BDADDR_BREDR;
9920 ev.secure = secure;
9921
9922 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9923 }
9924
mgmt_pin_code_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)9925 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9926 u8 status)
9927 {
9928 struct mgmt_pending_cmd *cmd;
9929
9930 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9931 if (!cmd)
9932 return;
9933
9934 cmd->cmd_complete(cmd, mgmt_status(status));
9935 mgmt_pending_remove(cmd);
9936 }
9937
mgmt_pin_code_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)9938 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9939 u8 status)
9940 {
9941 struct mgmt_pending_cmd *cmd;
9942
9943 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9944 if (!cmd)
9945 return;
9946
9947 cmd->cmd_complete(cmd, mgmt_status(status));
9948 mgmt_pending_remove(cmd);
9949 }
9950
mgmt_user_confirm_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 value,u8 confirm_hint)9951 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9952 u8 link_type, u8 addr_type, u32 value,
9953 u8 confirm_hint)
9954 {
9955 struct mgmt_ev_user_confirm_request ev;
9956
9957 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9958
9959 bacpy(&ev.addr.bdaddr, bdaddr);
9960 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9961 ev.confirm_hint = confirm_hint;
9962 ev.value = cpu_to_le32(value);
9963
9964 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9965 NULL);
9966 }
9967
mgmt_user_passkey_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type)9968 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9969 u8 link_type, u8 addr_type)
9970 {
9971 struct mgmt_ev_user_passkey_request ev;
9972
9973 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9974
9975 bacpy(&ev.addr.bdaddr, bdaddr);
9976 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9977
9978 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9979 NULL);
9980 }
9981
user_pairing_resp_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status,u8 opcode)9982 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9983 u8 link_type, u8 addr_type, u8 status,
9984 u8 opcode)
9985 {
9986 struct mgmt_pending_cmd *cmd;
9987
9988 cmd = pending_find(opcode, hdev);
9989 if (!cmd)
9990 return -ENOENT;
9991
9992 cmd->cmd_complete(cmd, mgmt_status(status));
9993 mgmt_pending_remove(cmd);
9994
9995 return 0;
9996 }
9997
mgmt_user_confirm_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9998 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9999 u8 link_type, u8 addr_type, u8 status)
10000 {
10001 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
10002 status, MGMT_OP_USER_CONFIRM_REPLY);
10003 }
10004
mgmt_user_confirm_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)10005 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10006 u8 link_type, u8 addr_type, u8 status)
10007 {
10008 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
10009 status,
10010 MGMT_OP_USER_CONFIRM_NEG_REPLY);
10011 }
10012
mgmt_user_passkey_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)10013 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10014 u8 link_type, u8 addr_type, u8 status)
10015 {
10016 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
10017 status, MGMT_OP_USER_PASSKEY_REPLY);
10018 }
10019
mgmt_user_passkey_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)10020 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10021 u8 link_type, u8 addr_type, u8 status)
10022 {
10023 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
10024 status,
10025 MGMT_OP_USER_PASSKEY_NEG_REPLY);
10026 }
10027
mgmt_user_passkey_notify(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 passkey,u8 entered)10028 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
10029 u8 link_type, u8 addr_type, u32 passkey,
10030 u8 entered)
10031 {
10032 struct mgmt_ev_passkey_notify ev;
10033
10034 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
10035
10036 bacpy(&ev.addr.bdaddr, bdaddr);
10037 ev.addr.type = link_to_bdaddr(link_type, addr_type);
10038 ev.passkey = __cpu_to_le32(passkey);
10039 ev.entered = entered;
10040
10041 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
10042 }
10043
mgmt_auth_failed(struct hci_conn * conn,u8 hci_status)10044 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
10045 {
10046 struct mgmt_ev_auth_failed ev;
10047 struct mgmt_pending_cmd *cmd;
10048 u8 status = mgmt_status(hci_status);
10049
10050 bacpy(&ev.addr.bdaddr, &conn->dst);
10051 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
10052 ev.status = status;
10053
10054 cmd = find_pairing(conn);
10055
10056 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
10057 cmd ? cmd->sk : NULL);
10058
10059 if (cmd) {
10060 cmd->cmd_complete(cmd, status);
10061 mgmt_pending_remove(cmd);
10062 }
10063 }
10064
mgmt_auth_enable_complete(struct hci_dev * hdev,u8 status)10065 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
10066 {
10067 struct cmd_lookup match = { NULL, hdev };
10068 bool changed;
10069
10070 if (status) {
10071 u8 mgmt_err = mgmt_status(status);
10072 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
10073 cmd_status_rsp, &mgmt_err);
10074 return;
10075 }
10076
10077 if (test_bit(HCI_AUTH, &hdev->flags))
10078 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
10079 else
10080 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
10081
10082 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
10083 &match);
10084
10085 if (changed)
10086 new_settings(hdev, match.sk);
10087
10088 if (match.sk)
10089 sock_put(match.sk);
10090 }
10091
sk_lookup(struct mgmt_pending_cmd * cmd,void * data)10092 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
10093 {
10094 struct cmd_lookup *match = data;
10095
10096 if (match->sk == NULL) {
10097 match->sk = cmd->sk;
10098 sock_hold(match->sk);
10099 }
10100 }
10101
mgmt_set_class_of_dev_complete(struct hci_dev * hdev,u8 * dev_class,u8 status)10102 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
10103 u8 status)
10104 {
10105 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
10106
10107 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
10108 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
10109 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
10110
10111 if (!status) {
10112 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
10113 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
10114 ext_info_changed(hdev, NULL);
10115 }
10116
10117 if (match.sk)
10118 sock_put(match.sk);
10119 }
10120
mgmt_set_local_name_complete(struct hci_dev * hdev,u8 * name,u8 status)10121 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
10122 {
10123 struct mgmt_cp_set_local_name ev;
10124 struct mgmt_pending_cmd *cmd;
10125
10126 if (status)
10127 return;
10128
10129 memset(&ev, 0, sizeof(ev));
10130 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
10131 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
10132
10133 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
10134 if (!cmd) {
10135 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
10136
10137 /* If this is a HCI command related to powering on the
10138 * HCI dev don't send any mgmt signals.
10139 */
10140 if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
10141 return;
10142
10143 if (pending_find(MGMT_OP_SET_POWERED, hdev))
10144 return;
10145 }
10146
10147 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
10148 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
10149 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
10150 }
10151
has_uuid(u8 * uuid,u16 uuid_count,u8 (* uuids)[16])10152 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
10153 {
10154 int i;
10155
10156 for (i = 0; i < uuid_count; i++) {
10157 if (!memcmp(uuid, uuids[i], 16))
10158 return true;
10159 }
10160
10161 return false;
10162 }
10163
eir_has_uuids(u8 * eir,u16 eir_len,u16 uuid_count,u8 (* uuids)[16])10164 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
10165 {
10166 u16 parsed = 0;
10167
10168 while (parsed < eir_len) {
10169 u8 field_len = eir[0];
10170 u8 uuid[16];
10171 int i;
10172
10173 if (field_len == 0)
10174 break;
10175
10176 if (eir_len - parsed < field_len + 1)
10177 break;
10178
10179 switch (eir[1]) {
10180 case EIR_UUID16_ALL:
10181 case EIR_UUID16_SOME:
10182 for (i = 0; i + 3 <= field_len; i += 2) {
10183 memcpy(uuid, bluetooth_base_uuid, 16);
10184 uuid[13] = eir[i + 3];
10185 uuid[12] = eir[i + 2];
10186 if (has_uuid(uuid, uuid_count, uuids))
10187 return true;
10188 }
10189 break;
10190 case EIR_UUID32_ALL:
10191 case EIR_UUID32_SOME:
10192 for (i = 0; i + 5 <= field_len; i += 4) {
10193 memcpy(uuid, bluetooth_base_uuid, 16);
10194 uuid[15] = eir[i + 5];
10195 uuid[14] = eir[i + 4];
10196 uuid[13] = eir[i + 3];
10197 uuid[12] = eir[i + 2];
10198 if (has_uuid(uuid, uuid_count, uuids))
10199 return true;
10200 }
10201 break;
10202 case EIR_UUID128_ALL:
10203 case EIR_UUID128_SOME:
10204 for (i = 0; i + 17 <= field_len; i += 16) {
10205 memcpy(uuid, eir + i + 2, 16);
10206 if (has_uuid(uuid, uuid_count, uuids))
10207 return true;
10208 }
10209 break;
10210 }
10211
10212 parsed += field_len + 1;
10213 eir += field_len + 1;
10214 }
10215
10216 return false;
10217 }
10218
is_filter_match(struct hci_dev * hdev,s8 rssi,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len)10219 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10220 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10221 {
10222 /* If a RSSI threshold has been specified, and
10223 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10224 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10225 * is set, let it through for further processing, as we might need to
10226 * restart the scan.
10227 *
10228 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10229 * the results are also dropped.
10230 */
10231 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10232 (rssi == HCI_RSSI_INVALID ||
10233 (rssi < hdev->discovery.rssi &&
10234 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
10235 return false;
10236
10237 if (hdev->discovery.uuid_count != 0) {
10238 /* If a list of UUIDs is provided in filter, results with no
10239 * matching UUID should be dropped.
10240 */
10241 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10242 hdev->discovery.uuids) &&
10243 !eir_has_uuids(scan_rsp, scan_rsp_len,
10244 hdev->discovery.uuid_count,
10245 hdev->discovery.uuids))
10246 return false;
10247 }
10248
10249 /* If duplicate filtering does not report RSSI changes, then restart
10250 * scanning to ensure updated result with updated RSSI values.
10251 */
10252 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
10253 /* Validate RSSI value against the RSSI threshold once more. */
10254 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10255 rssi < hdev->discovery.rssi)
10256 return false;
10257 }
10258
10259 return true;
10260 }
10261
mgmt_adv_monitor_device_lost(struct hci_dev * hdev,u16 handle,bdaddr_t * bdaddr,u8 addr_type)10262 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
10263 bdaddr_t *bdaddr, u8 addr_type)
10264 {
10265 struct mgmt_ev_adv_monitor_device_lost ev;
10266
10267 ev.monitor_handle = cpu_to_le16(handle);
10268 bacpy(&ev.addr.bdaddr, bdaddr);
10269 ev.addr.type = addr_type;
10270
10271 mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
10272 NULL);
10273 }
10274
mgmt_send_adv_monitor_device_found(struct hci_dev * hdev,struct sk_buff * skb,struct sock * skip_sk,u16 handle)10275 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
10276 struct sk_buff *skb,
10277 struct sock *skip_sk,
10278 u16 handle)
10279 {
10280 struct sk_buff *advmon_skb;
10281 size_t advmon_skb_len;
10282 __le16 *monitor_handle;
10283
10284 if (!skb)
10285 return;
10286
10287 advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
10288 sizeof(struct mgmt_ev_device_found)) + skb->len;
10289 advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
10290 advmon_skb_len);
10291 if (!advmon_skb)
10292 return;
10293
10294 /* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10295 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10296 * store monitor_handle of the matched monitor.
10297 */
10298 monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
10299 *monitor_handle = cpu_to_le16(handle);
10300 skb_put_data(advmon_skb, skb->data, skb->len);
10301
10302 mgmt_event_skb(advmon_skb, skip_sk);
10303 }
10304
mgmt_adv_monitor_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,bool report_device,struct sk_buff * skb,struct sock * skip_sk)10305 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
10306 bdaddr_t *bdaddr, bool report_device,
10307 struct sk_buff *skb,
10308 struct sock *skip_sk)
10309 {
10310 struct monitored_device *dev, *tmp;
10311 bool matched = false;
10312 bool notified = false;
10313
10314 /* We have received the Advertisement Report because:
10315 * 1. the kernel has initiated active discovery
10316 * 2. if not, we have pend_le_reports > 0 in which case we are doing
10317 * passive scanning
10318 * 3. if none of the above is true, we have one or more active
10319 * Advertisement Monitor
10320 *
10321 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10322 * and report ONLY one advertisement per device for the matched Monitor
10323 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10324 *
10325 * For case 3, since we are not active scanning and all advertisements
10326 * received are due to a matched Advertisement Monitor, report all
10327 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10328 */
10329 if (report_device && !hdev->advmon_pend_notify) {
10330 mgmt_event_skb(skb, skip_sk);
10331 return;
10332 }
10333
10334 hdev->advmon_pend_notify = false;
10335
10336 list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
10337 if (!bacmp(&dev->bdaddr, bdaddr)) {
10338 matched = true;
10339
10340 if (!dev->notified) {
10341 mgmt_send_adv_monitor_device_found(hdev, skb,
10342 skip_sk,
10343 dev->handle);
10344 notified = true;
10345 dev->notified = true;
10346 }
10347 }
10348
10349 if (!dev->notified)
10350 hdev->advmon_pend_notify = true;
10351 }
10352
10353 if (!report_device &&
10354 ((matched && !notified) || !msft_monitor_supported(hdev))) {
10355 /* Handle 0 indicates that we are not active scanning and this
10356 * is a subsequent advertisement report for an already matched
10357 * Advertisement Monitor or the controller offloading support
10358 * is not available.
10359 */
10360 mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
10361 }
10362
10363 if (report_device)
10364 mgmt_event_skb(skb, skip_sk);
10365 else
10366 kfree_skb(skb);
10367 }
10368
mesh_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len,u64 instant)10369 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
10370 u8 addr_type, s8 rssi, u32 flags, u8 *eir,
10371 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10372 u64 instant)
10373 {
10374 struct sk_buff *skb;
10375 struct mgmt_ev_mesh_device_found *ev;
10376 int i, j;
10377
10378 if (!hdev->mesh_ad_types[0])
10379 goto accepted;
10380
10381 /* Scan for requested AD types */
10382 if (eir_len > 0) {
10383 for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
10384 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10385 if (!hdev->mesh_ad_types[j])
10386 break;
10387
10388 if (hdev->mesh_ad_types[j] == eir[i + 1])
10389 goto accepted;
10390 }
10391 }
10392 }
10393
10394 if (scan_rsp_len > 0) {
10395 for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
10396 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10397 if (!hdev->mesh_ad_types[j])
10398 break;
10399
10400 if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
10401 goto accepted;
10402 }
10403 }
10404 }
10405
10406 return;
10407
10408 accepted:
10409 skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
10410 sizeof(*ev) + eir_len + scan_rsp_len);
10411 if (!skb)
10412 return;
10413
10414 ev = skb_put(skb, sizeof(*ev));
10415
10416 bacpy(&ev->addr.bdaddr, bdaddr);
10417 ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
10418 ev->rssi = rssi;
10419 ev->flags = cpu_to_le32(flags);
10420 ev->instant = cpu_to_le64(instant);
10421
10422 if (eir_len > 0)
10423 /* Copy EIR or advertising data into event */
10424 skb_put_data(skb, eir, eir_len);
10425
10426 if (scan_rsp_len > 0)
10427 /* Append scan response data to event */
10428 skb_put_data(skb, scan_rsp, scan_rsp_len);
10429
10430 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10431
10432 mgmt_event_skb(skb, NULL);
10433 }
10434
mgmt_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 * dev_class,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len,u64 instant)10435 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10436 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10437 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10438 u64 instant)
10439 {
10440 struct sk_buff *skb;
10441 struct mgmt_ev_device_found *ev;
10442 bool report_device = hci_discovery_active(hdev);
10443
10444 if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
10445 mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
10446 eir, eir_len, scan_rsp, scan_rsp_len,
10447 instant);
10448
10449 /* Don't send events for a non-kernel initiated discovery. With
10450 * LE one exception is if we have pend_le_reports > 0 in which
10451 * case we're doing passive scanning and want these events.
10452 */
10453 if (!hci_discovery_active(hdev)) {
10454 if (link_type == ACL_LINK)
10455 return;
10456 if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
10457 report_device = true;
10458 else if (!hci_is_adv_monitoring(hdev))
10459 return;
10460 }
10461
10462 if (hdev->discovery.result_filtering) {
10463 /* We are using service discovery */
10464 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10465 scan_rsp_len))
10466 return;
10467 }
10468
10469 if (hdev->discovery.limited) {
10470 /* Check for limited discoverable bit */
10471 if (dev_class) {
10472 if (!(dev_class[1] & 0x20))
10473 return;
10474 } else {
10475 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10476 if (!flags || !(flags[0] & LE_AD_LIMITED))
10477 return;
10478 }
10479 }
10480
10481 /* Allocate skb. The 5 extra bytes are for the potential CoD field */
10482 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10483 sizeof(*ev) + eir_len + scan_rsp_len + 5);
10484 if (!skb)
10485 return;
10486
10487 ev = skb_put(skb, sizeof(*ev));
10488
10489 /* In case of device discovery with BR/EDR devices (pre 1.2), the
10490 * RSSI value was reported as 0 when not available. This behavior
10491 * is kept when using device discovery. This is required for full
10492 * backwards compatibility with the API.
10493 *
10494 * However when using service discovery, the value 127 will be
10495 * returned when the RSSI is not available.
10496 */
10497 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10498 link_type == ACL_LINK)
10499 rssi = 0;
10500
10501 bacpy(&ev->addr.bdaddr, bdaddr);
10502 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10503 ev->rssi = rssi;
10504 ev->flags = cpu_to_le32(flags);
10505
10506 if (eir_len > 0)
10507 /* Copy EIR or advertising data into event */
10508 skb_put_data(skb, eir, eir_len);
10509
10510 if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
10511 u8 eir_cod[5];
10512
10513 eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
10514 dev_class, 3);
10515 skb_put_data(skb, eir_cod, sizeof(eir_cod));
10516 }
10517
10518 if (scan_rsp_len > 0)
10519 /* Append scan response data to event */
10520 skb_put_data(skb, scan_rsp, scan_rsp_len);
10521
10522 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10523
10524 mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
10525 }
10526
mgmt_remote_name(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,s8 rssi,u8 * name,u8 name_len)10527 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10528 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10529 {
10530 struct sk_buff *skb;
10531 struct mgmt_ev_device_found *ev;
10532 u16 eir_len = 0;
10533 u32 flags = 0;
10534
10535 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10536 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
10537
10538 ev = skb_put(skb, sizeof(*ev));
10539 bacpy(&ev->addr.bdaddr, bdaddr);
10540 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10541 ev->rssi = rssi;
10542
10543 if (name)
10544 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
10545 else
10546 flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
10547
10548 ev->eir_len = cpu_to_le16(eir_len);
10549 ev->flags = cpu_to_le32(flags);
10550
10551 mgmt_event_skb(skb, NULL);
10552 }
10553
mgmt_discovering(struct hci_dev * hdev,u8 discovering)10554 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10555 {
10556 struct mgmt_ev_discovering ev;
10557
10558 bt_dev_dbg(hdev, "discovering %u", discovering);
10559
10560 memset(&ev, 0, sizeof(ev));
10561 ev.type = hdev->discovery.type;
10562 ev.discovering = discovering;
10563
10564 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10565 }
10566
mgmt_suspending(struct hci_dev * hdev,u8 state)10567 void mgmt_suspending(struct hci_dev *hdev, u8 state)
10568 {
10569 struct mgmt_ev_controller_suspend ev;
10570
10571 ev.suspend_state = state;
10572 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10573 }
10574
mgmt_resuming(struct hci_dev * hdev,u8 reason,bdaddr_t * bdaddr,u8 addr_type)10575 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10576 u8 addr_type)
10577 {
10578 struct mgmt_ev_controller_resume ev;
10579
10580 ev.wake_reason = reason;
10581 if (bdaddr) {
10582 bacpy(&ev.addr.bdaddr, bdaddr);
10583 ev.addr.type = addr_type;
10584 } else {
10585 memset(&ev.addr, 0, sizeof(ev.addr));
10586 }
10587
10588 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
10589 }
10590
10591 static struct hci_mgmt_chan chan = {
10592 .channel = HCI_CHANNEL_CONTROL,
10593 .handler_count = ARRAY_SIZE(mgmt_handlers),
10594 .handlers = mgmt_handlers,
10595 .hdev_init = mgmt_init_hdev,
10596 };
10597
mgmt_init(void)10598 int mgmt_init(void)
10599 {
10600 return hci_mgmt_chan_register(&chan);
10601 }
10602
mgmt_exit(void)10603 void mgmt_exit(void)
10604 {
10605 hci_mgmt_chan_unregister(&chan);
10606 }
10607
mgmt_cleanup(struct sock * sk)10608 void mgmt_cleanup(struct sock *sk)
10609 {
10610 struct mgmt_mesh_tx *mesh_tx;
10611 struct hci_dev *hdev;
10612
10613 read_lock(&hci_dev_list_lock);
10614
10615 list_for_each_entry(hdev, &hci_dev_list, list) {
10616 do {
10617 mesh_tx = mgmt_mesh_next(hdev, sk);
10618
10619 if (mesh_tx)
10620 mesh_send_complete(hdev, mesh_tx, true);
10621 } while (mesh_tx);
10622 }
10623
10624 read_unlock(&hci_dev_list_lock);
10625 }
10626