1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <linux/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35
36 #include "smp.h"
37 #include "mgmt_util.h"
38 #include "mgmt_config.h"
39 #include "msft.h"
40 #include "eir.h"
41 #include "aosp.h"
42
43 #define MGMT_VERSION 1
44 #define MGMT_REVISION 23
45
46 static const u16 mgmt_commands[] = {
47 MGMT_OP_READ_INDEX_LIST,
48 MGMT_OP_READ_INFO,
49 MGMT_OP_SET_POWERED,
50 MGMT_OP_SET_DISCOVERABLE,
51 MGMT_OP_SET_CONNECTABLE,
52 MGMT_OP_SET_FAST_CONNECTABLE,
53 MGMT_OP_SET_BONDABLE,
54 MGMT_OP_SET_LINK_SECURITY,
55 MGMT_OP_SET_SSP,
56 MGMT_OP_SET_HS,
57 MGMT_OP_SET_LE,
58 MGMT_OP_SET_DEV_CLASS,
59 MGMT_OP_SET_LOCAL_NAME,
60 MGMT_OP_ADD_UUID,
61 MGMT_OP_REMOVE_UUID,
62 MGMT_OP_LOAD_LINK_KEYS,
63 MGMT_OP_LOAD_LONG_TERM_KEYS,
64 MGMT_OP_DISCONNECT,
65 MGMT_OP_GET_CONNECTIONS,
66 MGMT_OP_PIN_CODE_REPLY,
67 MGMT_OP_PIN_CODE_NEG_REPLY,
68 MGMT_OP_SET_IO_CAPABILITY,
69 MGMT_OP_PAIR_DEVICE,
70 MGMT_OP_CANCEL_PAIR_DEVICE,
71 MGMT_OP_UNPAIR_DEVICE,
72 MGMT_OP_USER_CONFIRM_REPLY,
73 MGMT_OP_USER_CONFIRM_NEG_REPLY,
74 MGMT_OP_USER_PASSKEY_REPLY,
75 MGMT_OP_USER_PASSKEY_NEG_REPLY,
76 MGMT_OP_READ_LOCAL_OOB_DATA,
77 MGMT_OP_ADD_REMOTE_OOB_DATA,
78 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
79 MGMT_OP_START_DISCOVERY,
80 MGMT_OP_STOP_DISCOVERY,
81 MGMT_OP_CONFIRM_NAME,
82 MGMT_OP_BLOCK_DEVICE,
83 MGMT_OP_UNBLOCK_DEVICE,
84 MGMT_OP_SET_DEVICE_ID,
85 MGMT_OP_SET_ADVERTISING,
86 MGMT_OP_SET_BREDR,
87 MGMT_OP_SET_STATIC_ADDRESS,
88 MGMT_OP_SET_SCAN_PARAMS,
89 MGMT_OP_SET_SECURE_CONN,
90 MGMT_OP_SET_DEBUG_KEYS,
91 MGMT_OP_SET_PRIVACY,
92 MGMT_OP_LOAD_IRKS,
93 MGMT_OP_GET_CONN_INFO,
94 MGMT_OP_GET_CLOCK_INFO,
95 MGMT_OP_ADD_DEVICE,
96 MGMT_OP_REMOVE_DEVICE,
97 MGMT_OP_LOAD_CONN_PARAM,
98 MGMT_OP_READ_UNCONF_INDEX_LIST,
99 MGMT_OP_READ_CONFIG_INFO,
100 MGMT_OP_SET_EXTERNAL_CONFIG,
101 MGMT_OP_SET_PUBLIC_ADDRESS,
102 MGMT_OP_START_SERVICE_DISCOVERY,
103 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
104 MGMT_OP_READ_EXT_INDEX_LIST,
105 MGMT_OP_READ_ADV_FEATURES,
106 MGMT_OP_ADD_ADVERTISING,
107 MGMT_OP_REMOVE_ADVERTISING,
108 MGMT_OP_GET_ADV_SIZE_INFO,
109 MGMT_OP_START_LIMITED_DISCOVERY,
110 MGMT_OP_READ_EXT_INFO,
111 MGMT_OP_SET_APPEARANCE,
112 MGMT_OP_GET_PHY_CONFIGURATION,
113 MGMT_OP_SET_PHY_CONFIGURATION,
114 MGMT_OP_SET_BLOCKED_KEYS,
115 MGMT_OP_SET_WIDEBAND_SPEECH,
116 MGMT_OP_READ_CONTROLLER_CAP,
117 MGMT_OP_READ_EXP_FEATURES_INFO,
118 MGMT_OP_SET_EXP_FEATURE,
119 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
120 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
121 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
122 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
123 MGMT_OP_GET_DEVICE_FLAGS,
124 MGMT_OP_SET_DEVICE_FLAGS,
125 MGMT_OP_READ_ADV_MONITOR_FEATURES,
126 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
127 MGMT_OP_REMOVE_ADV_MONITOR,
128 MGMT_OP_ADD_EXT_ADV_PARAMS,
129 MGMT_OP_ADD_EXT_ADV_DATA,
130 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
131 MGMT_OP_SET_MESH_RECEIVER,
132 MGMT_OP_MESH_READ_FEATURES,
133 MGMT_OP_MESH_SEND,
134 MGMT_OP_MESH_SEND_CANCEL,
135 MGMT_OP_HCI_CMD_SYNC,
136 };
137
138 static const u16 mgmt_events[] = {
139 MGMT_EV_CONTROLLER_ERROR,
140 MGMT_EV_INDEX_ADDED,
141 MGMT_EV_INDEX_REMOVED,
142 MGMT_EV_NEW_SETTINGS,
143 MGMT_EV_CLASS_OF_DEV_CHANGED,
144 MGMT_EV_LOCAL_NAME_CHANGED,
145 MGMT_EV_NEW_LINK_KEY,
146 MGMT_EV_NEW_LONG_TERM_KEY,
147 MGMT_EV_DEVICE_CONNECTED,
148 MGMT_EV_DEVICE_DISCONNECTED,
149 MGMT_EV_CONNECT_FAILED,
150 MGMT_EV_PIN_CODE_REQUEST,
151 MGMT_EV_USER_CONFIRM_REQUEST,
152 MGMT_EV_USER_PASSKEY_REQUEST,
153 MGMT_EV_AUTH_FAILED,
154 MGMT_EV_DEVICE_FOUND,
155 MGMT_EV_DISCOVERING,
156 MGMT_EV_DEVICE_BLOCKED,
157 MGMT_EV_DEVICE_UNBLOCKED,
158 MGMT_EV_DEVICE_UNPAIRED,
159 MGMT_EV_PASSKEY_NOTIFY,
160 MGMT_EV_NEW_IRK,
161 MGMT_EV_NEW_CSRK,
162 MGMT_EV_DEVICE_ADDED,
163 MGMT_EV_DEVICE_REMOVED,
164 MGMT_EV_NEW_CONN_PARAM,
165 MGMT_EV_UNCONF_INDEX_ADDED,
166 MGMT_EV_UNCONF_INDEX_REMOVED,
167 MGMT_EV_NEW_CONFIG_OPTIONS,
168 MGMT_EV_EXT_INDEX_ADDED,
169 MGMT_EV_EXT_INDEX_REMOVED,
170 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
171 MGMT_EV_ADVERTISING_ADDED,
172 MGMT_EV_ADVERTISING_REMOVED,
173 MGMT_EV_EXT_INFO_CHANGED,
174 MGMT_EV_PHY_CONFIGURATION_CHANGED,
175 MGMT_EV_EXP_FEATURE_CHANGED,
176 MGMT_EV_DEVICE_FLAGS_CHANGED,
177 MGMT_EV_ADV_MONITOR_ADDED,
178 MGMT_EV_ADV_MONITOR_REMOVED,
179 MGMT_EV_CONTROLLER_SUSPEND,
180 MGMT_EV_CONTROLLER_RESUME,
181 MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
182 MGMT_EV_ADV_MONITOR_DEVICE_LOST,
183 };
184
185 static const u16 mgmt_untrusted_commands[] = {
186 MGMT_OP_READ_INDEX_LIST,
187 MGMT_OP_READ_INFO,
188 MGMT_OP_READ_UNCONF_INDEX_LIST,
189 MGMT_OP_READ_CONFIG_INFO,
190 MGMT_OP_READ_EXT_INDEX_LIST,
191 MGMT_OP_READ_EXT_INFO,
192 MGMT_OP_READ_CONTROLLER_CAP,
193 MGMT_OP_READ_EXP_FEATURES_INFO,
194 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
195 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
196 };
197
198 static const u16 mgmt_untrusted_events[] = {
199 MGMT_EV_INDEX_ADDED,
200 MGMT_EV_INDEX_REMOVED,
201 MGMT_EV_NEW_SETTINGS,
202 MGMT_EV_CLASS_OF_DEV_CHANGED,
203 MGMT_EV_LOCAL_NAME_CHANGED,
204 MGMT_EV_UNCONF_INDEX_ADDED,
205 MGMT_EV_UNCONF_INDEX_REMOVED,
206 MGMT_EV_NEW_CONFIG_OPTIONS,
207 MGMT_EV_EXT_INDEX_ADDED,
208 MGMT_EV_EXT_INDEX_REMOVED,
209 MGMT_EV_EXT_INFO_CHANGED,
210 MGMT_EV_EXP_FEATURE_CHANGED,
211 };
212
213 #define CACHE_TIMEOUT secs_to_jiffies(2)
214
215 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
216 "\x00\x00\x00\x00\x00\x00\x00\x00"
217
218 /* HCI to MGMT error code conversion table */
219 static const u8 mgmt_status_table[] = {
220 MGMT_STATUS_SUCCESS,
221 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
222 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
223 MGMT_STATUS_FAILED, /* Hardware Failure */
224 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
225 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
226 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
227 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
228 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
229 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
230 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
231 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
232 MGMT_STATUS_BUSY, /* Command Disallowed */
233 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
234 MGMT_STATUS_REJECTED, /* Rejected Security */
235 MGMT_STATUS_REJECTED, /* Rejected Personal */
236 MGMT_STATUS_TIMEOUT, /* Host Timeout */
237 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
238 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
239 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
240 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
241 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
242 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
243 MGMT_STATUS_BUSY, /* Repeated Attempts */
244 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
245 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
246 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
247 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
248 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
249 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
250 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
251 MGMT_STATUS_FAILED, /* Unspecified Error */
252 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
253 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
254 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
255 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
256 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
257 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
258 MGMT_STATUS_FAILED, /* Unit Link Key Used */
259 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
260 MGMT_STATUS_TIMEOUT, /* Instant Passed */
261 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
262 MGMT_STATUS_FAILED, /* Transaction Collision */
263 MGMT_STATUS_FAILED, /* Reserved for future use */
264 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
265 MGMT_STATUS_REJECTED, /* QoS Rejected */
266 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
267 MGMT_STATUS_REJECTED, /* Insufficient Security */
268 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
269 MGMT_STATUS_FAILED, /* Reserved for future use */
270 MGMT_STATUS_BUSY, /* Role Switch Pending */
271 MGMT_STATUS_FAILED, /* Reserved for future use */
272 MGMT_STATUS_FAILED, /* Slot Violation */
273 MGMT_STATUS_FAILED, /* Role Switch Failed */
274 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
275 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
276 MGMT_STATUS_BUSY, /* Host Busy Pairing */
277 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
278 MGMT_STATUS_BUSY, /* Controller Busy */
279 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
280 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
281 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
282 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
283 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
284 };
285
mgmt_errno_status(int err)286 static u8 mgmt_errno_status(int err)
287 {
288 switch (err) {
289 case 0:
290 return MGMT_STATUS_SUCCESS;
291 case -EPERM:
292 return MGMT_STATUS_REJECTED;
293 case -EINVAL:
294 return MGMT_STATUS_INVALID_PARAMS;
295 case -EOPNOTSUPP:
296 return MGMT_STATUS_NOT_SUPPORTED;
297 case -EBUSY:
298 return MGMT_STATUS_BUSY;
299 case -ETIMEDOUT:
300 return MGMT_STATUS_AUTH_FAILED;
301 case -ENOMEM:
302 return MGMT_STATUS_NO_RESOURCES;
303 case -EISCONN:
304 return MGMT_STATUS_ALREADY_CONNECTED;
305 case -ENOTCONN:
306 return MGMT_STATUS_DISCONNECTED;
307 }
308
309 return MGMT_STATUS_FAILED;
310 }
311
mgmt_status(int err)312 static u8 mgmt_status(int err)
313 {
314 if (err < 0)
315 return mgmt_errno_status(err);
316
317 if (err < ARRAY_SIZE(mgmt_status_table))
318 return mgmt_status_table[err];
319
320 return MGMT_STATUS_FAILED;
321 }
322
mgmt_index_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag)323 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
324 u16 len, int flag)
325 {
326 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
327 flag, NULL);
328 }
329
mgmt_limited_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag,struct sock * skip_sk)330 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
331 u16 len, int flag, struct sock *skip_sk)
332 {
333 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
334 flag, skip_sk);
335 }
336
mgmt_event(u16 event,struct hci_dev * hdev,void * data,u16 len,struct sock * skip_sk)337 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
338 struct sock *skip_sk)
339 {
340 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
341 HCI_SOCK_TRUSTED, skip_sk);
342 }
343
mgmt_event_skb(struct sk_buff * skb,struct sock * skip_sk)344 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
345 {
346 return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
347 skip_sk);
348 }
349
le_addr_type(u8 mgmt_addr_type)350 static u8 le_addr_type(u8 mgmt_addr_type)
351 {
352 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
353 return ADDR_LE_DEV_PUBLIC;
354 else
355 return ADDR_LE_DEV_RANDOM;
356 }
357
mgmt_fill_version_info(void * ver)358 void mgmt_fill_version_info(void *ver)
359 {
360 struct mgmt_rp_read_version *rp = ver;
361
362 rp->version = MGMT_VERSION;
363 rp->revision = cpu_to_le16(MGMT_REVISION);
364 }
365
read_version(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)366 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
367 u16 data_len)
368 {
369 struct mgmt_rp_read_version rp;
370
371 bt_dev_dbg(hdev, "sock %p", sk);
372
373 mgmt_fill_version_info(&rp);
374
375 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
376 &rp, sizeof(rp));
377 }
378
read_commands(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)379 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
380 u16 data_len)
381 {
382 struct mgmt_rp_read_commands *rp;
383 u16 num_commands, num_events;
384 size_t rp_size;
385 int i, err;
386
387 bt_dev_dbg(hdev, "sock %p", sk);
388
389 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
390 num_commands = ARRAY_SIZE(mgmt_commands);
391 num_events = ARRAY_SIZE(mgmt_events);
392 } else {
393 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
394 num_events = ARRAY_SIZE(mgmt_untrusted_events);
395 }
396
397 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
398
399 rp = kmalloc(rp_size, GFP_KERNEL);
400 if (!rp)
401 return -ENOMEM;
402
403 rp->num_commands = cpu_to_le16(num_commands);
404 rp->num_events = cpu_to_le16(num_events);
405
406 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
407 __le16 *opcode = rp->opcodes;
408
409 for (i = 0; i < num_commands; i++, opcode++)
410 put_unaligned_le16(mgmt_commands[i], opcode);
411
412 for (i = 0; i < num_events; i++, opcode++)
413 put_unaligned_le16(mgmt_events[i], opcode);
414 } else {
415 __le16 *opcode = rp->opcodes;
416
417 for (i = 0; i < num_commands; i++, opcode++)
418 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
419
420 for (i = 0; i < num_events; i++, opcode++)
421 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
422 }
423
424 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
425 rp, rp_size);
426 kfree(rp);
427
428 return err;
429 }
430
read_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)431 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
432 u16 data_len)
433 {
434 struct mgmt_rp_read_index_list *rp;
435 struct hci_dev *d;
436 size_t rp_len;
437 u16 count;
438 int err;
439
440 bt_dev_dbg(hdev, "sock %p", sk);
441
442 read_lock(&hci_dev_list_lock);
443
444 count = 0;
445 list_for_each_entry(d, &hci_dev_list, list) {
446 if (!hci_dev_test_flag(d, HCI_UNCONFIGURED))
447 count++;
448 }
449
450 rp_len = sizeof(*rp) + (2 * count);
451 rp = kmalloc(rp_len, GFP_ATOMIC);
452 if (!rp) {
453 read_unlock(&hci_dev_list_lock);
454 return -ENOMEM;
455 }
456
457 count = 0;
458 list_for_each_entry(d, &hci_dev_list, list) {
459 if (hci_dev_test_flag(d, HCI_SETUP) ||
460 hci_dev_test_flag(d, HCI_CONFIG) ||
461 hci_dev_test_flag(d, HCI_USER_CHANNEL))
462 continue;
463
464 /* Devices marked as raw-only are neither configured
465 * nor unconfigured controllers.
466 */
467 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
468 continue;
469
470 if (!hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
471 rp->index[count++] = cpu_to_le16(d->id);
472 bt_dev_dbg(hdev, "Added hci%u", d->id);
473 }
474 }
475
476 rp->num_controllers = cpu_to_le16(count);
477 rp_len = sizeof(*rp) + (2 * count);
478
479 read_unlock(&hci_dev_list_lock);
480
481 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
482 0, rp, rp_len);
483
484 kfree(rp);
485
486 return err;
487 }
488
read_unconf_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)489 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
490 void *data, u16 data_len)
491 {
492 struct mgmt_rp_read_unconf_index_list *rp;
493 struct hci_dev *d;
494 size_t rp_len;
495 u16 count;
496 int err;
497
498 bt_dev_dbg(hdev, "sock %p", sk);
499
500 read_lock(&hci_dev_list_lock);
501
502 count = 0;
503 list_for_each_entry(d, &hci_dev_list, list) {
504 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
505 count++;
506 }
507
508 rp_len = sizeof(*rp) + (2 * count);
509 rp = kmalloc(rp_len, GFP_ATOMIC);
510 if (!rp) {
511 read_unlock(&hci_dev_list_lock);
512 return -ENOMEM;
513 }
514
515 count = 0;
516 list_for_each_entry(d, &hci_dev_list, list) {
517 if (hci_dev_test_flag(d, HCI_SETUP) ||
518 hci_dev_test_flag(d, HCI_CONFIG) ||
519 hci_dev_test_flag(d, HCI_USER_CHANNEL))
520 continue;
521
522 /* Devices marked as raw-only are neither configured
523 * nor unconfigured controllers.
524 */
525 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
526 continue;
527
528 if (hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
529 rp->index[count++] = cpu_to_le16(d->id);
530 bt_dev_dbg(hdev, "Added hci%u", d->id);
531 }
532 }
533
534 rp->num_controllers = cpu_to_le16(count);
535 rp_len = sizeof(*rp) + (2 * count);
536
537 read_unlock(&hci_dev_list_lock);
538
539 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
540 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
541
542 kfree(rp);
543
544 return err;
545 }
546
read_ext_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)547 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
548 void *data, u16 data_len)
549 {
550 struct mgmt_rp_read_ext_index_list *rp;
551 struct hci_dev *d;
552 u16 count;
553 int err;
554
555 bt_dev_dbg(hdev, "sock %p", sk);
556
557 read_lock(&hci_dev_list_lock);
558
559 count = 0;
560 list_for_each_entry(d, &hci_dev_list, list)
561 count++;
562
563 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
564 if (!rp) {
565 read_unlock(&hci_dev_list_lock);
566 return -ENOMEM;
567 }
568
569 count = 0;
570 list_for_each_entry(d, &hci_dev_list, list) {
571 if (hci_dev_test_flag(d, HCI_SETUP) ||
572 hci_dev_test_flag(d, HCI_CONFIG) ||
573 hci_dev_test_flag(d, HCI_USER_CHANNEL))
574 continue;
575
576 /* Devices marked as raw-only are neither configured
577 * nor unconfigured controllers.
578 */
579 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
580 continue;
581
582 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
583 rp->entry[count].type = 0x01;
584 else
585 rp->entry[count].type = 0x00;
586
587 rp->entry[count].bus = d->bus;
588 rp->entry[count++].index = cpu_to_le16(d->id);
589 bt_dev_dbg(hdev, "Added hci%u", d->id);
590 }
591
592 rp->num_controllers = cpu_to_le16(count);
593
594 read_unlock(&hci_dev_list_lock);
595
596 /* If this command is called at least once, then all the
597 * default index and unconfigured index events are disabled
598 * and from now on only extended index events are used.
599 */
600 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
601 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
602 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
603
604 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
605 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
606 struct_size(rp, entry, count));
607
608 kfree(rp);
609
610 return err;
611 }
612
is_configured(struct hci_dev * hdev)613 static bool is_configured(struct hci_dev *hdev)
614 {
615 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
616 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
617 return false;
618
619 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
620 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
621 !bacmp(&hdev->public_addr, BDADDR_ANY))
622 return false;
623
624 return true;
625 }
626
get_missing_options(struct hci_dev * hdev)627 static __le32 get_missing_options(struct hci_dev *hdev)
628 {
629 u32 options = 0;
630
631 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
632 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
633 options |= MGMT_OPTION_EXTERNAL_CONFIG;
634
635 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
636 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
637 !bacmp(&hdev->public_addr, BDADDR_ANY))
638 options |= MGMT_OPTION_PUBLIC_ADDRESS;
639
640 return cpu_to_le32(options);
641 }
642
new_options(struct hci_dev * hdev,struct sock * skip)643 static int new_options(struct hci_dev *hdev, struct sock *skip)
644 {
645 __le32 options = get_missing_options(hdev);
646
647 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
648 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
649 }
650
send_options_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)651 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
652 {
653 __le32 options = get_missing_options(hdev);
654
655 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
656 sizeof(options));
657 }
658
read_config_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)659 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
660 void *data, u16 data_len)
661 {
662 struct mgmt_rp_read_config_info rp;
663 u32 options = 0;
664
665 bt_dev_dbg(hdev, "sock %p", sk);
666
667 hci_dev_lock(hdev);
668
669 memset(&rp, 0, sizeof(rp));
670 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
671
672 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
673 options |= MGMT_OPTION_EXTERNAL_CONFIG;
674
675 if (hdev->set_bdaddr)
676 options |= MGMT_OPTION_PUBLIC_ADDRESS;
677
678 rp.supported_options = cpu_to_le32(options);
679 rp.missing_options = get_missing_options(hdev);
680
681 hci_dev_unlock(hdev);
682
683 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
684 &rp, sizeof(rp));
685 }
686
get_supported_phys(struct hci_dev * hdev)687 static u32 get_supported_phys(struct hci_dev *hdev)
688 {
689 u32 supported_phys = 0;
690
691 if (lmp_bredr_capable(hdev)) {
692 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
693
694 if (hdev->features[0][0] & LMP_3SLOT)
695 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
696
697 if (hdev->features[0][0] & LMP_5SLOT)
698 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
699
700 if (lmp_edr_2m_capable(hdev)) {
701 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
702
703 if (lmp_edr_3slot_capable(hdev))
704 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
705
706 if (lmp_edr_5slot_capable(hdev))
707 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
708
709 if (lmp_edr_3m_capable(hdev)) {
710 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
711
712 if (lmp_edr_3slot_capable(hdev))
713 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
714
715 if (lmp_edr_5slot_capable(hdev))
716 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
717 }
718 }
719 }
720
721 if (lmp_le_capable(hdev)) {
722 supported_phys |= MGMT_PHY_LE_1M_TX;
723 supported_phys |= MGMT_PHY_LE_1M_RX;
724
725 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
726 supported_phys |= MGMT_PHY_LE_2M_TX;
727 supported_phys |= MGMT_PHY_LE_2M_RX;
728 }
729
730 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
731 supported_phys |= MGMT_PHY_LE_CODED_TX;
732 supported_phys |= MGMT_PHY_LE_CODED_RX;
733 }
734 }
735
736 return supported_phys;
737 }
738
get_selected_phys(struct hci_dev * hdev)739 static u32 get_selected_phys(struct hci_dev *hdev)
740 {
741 u32 selected_phys = 0;
742
743 if (lmp_bredr_capable(hdev)) {
744 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
745
746 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
747 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
748
749 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
750 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
751
752 if (lmp_edr_2m_capable(hdev)) {
753 if (!(hdev->pkt_type & HCI_2DH1))
754 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
755
756 if (lmp_edr_3slot_capable(hdev) &&
757 !(hdev->pkt_type & HCI_2DH3))
758 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
759
760 if (lmp_edr_5slot_capable(hdev) &&
761 !(hdev->pkt_type & HCI_2DH5))
762 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
763
764 if (lmp_edr_3m_capable(hdev)) {
765 if (!(hdev->pkt_type & HCI_3DH1))
766 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
767
768 if (lmp_edr_3slot_capable(hdev) &&
769 !(hdev->pkt_type & HCI_3DH3))
770 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
771
772 if (lmp_edr_5slot_capable(hdev) &&
773 !(hdev->pkt_type & HCI_3DH5))
774 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
775 }
776 }
777 }
778
779 if (lmp_le_capable(hdev)) {
780 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
781 selected_phys |= MGMT_PHY_LE_1M_TX;
782
783 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
784 selected_phys |= MGMT_PHY_LE_1M_RX;
785
786 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
787 selected_phys |= MGMT_PHY_LE_2M_TX;
788
789 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
790 selected_phys |= MGMT_PHY_LE_2M_RX;
791
792 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
793 selected_phys |= MGMT_PHY_LE_CODED_TX;
794
795 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
796 selected_phys |= MGMT_PHY_LE_CODED_RX;
797 }
798
799 return selected_phys;
800 }
801
get_configurable_phys(struct hci_dev * hdev)802 static u32 get_configurable_phys(struct hci_dev *hdev)
803 {
804 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
805 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
806 }
807
get_supported_settings(struct hci_dev * hdev)808 static u32 get_supported_settings(struct hci_dev *hdev)
809 {
810 u32 settings = 0;
811
812 settings |= MGMT_SETTING_POWERED;
813 settings |= MGMT_SETTING_BONDABLE;
814 settings |= MGMT_SETTING_DEBUG_KEYS;
815 settings |= MGMT_SETTING_CONNECTABLE;
816 settings |= MGMT_SETTING_DISCOVERABLE;
817
818 if (lmp_bredr_capable(hdev)) {
819 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
820 settings |= MGMT_SETTING_FAST_CONNECTABLE;
821 settings |= MGMT_SETTING_BREDR;
822 settings |= MGMT_SETTING_LINK_SECURITY;
823
824 if (lmp_ssp_capable(hdev)) {
825 settings |= MGMT_SETTING_SSP;
826 }
827
828 if (lmp_sc_capable(hdev))
829 settings |= MGMT_SETTING_SECURE_CONN;
830
831 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
832 &hdev->quirks))
833 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
834 }
835
836 if (lmp_le_capable(hdev)) {
837 settings |= MGMT_SETTING_LE;
838 settings |= MGMT_SETTING_SECURE_CONN;
839 settings |= MGMT_SETTING_PRIVACY;
840 settings |= MGMT_SETTING_STATIC_ADDRESS;
841 settings |= MGMT_SETTING_ADVERTISING;
842 }
843
844 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
845 hdev->set_bdaddr)
846 settings |= MGMT_SETTING_CONFIGURATION;
847
848 if (cis_central_capable(hdev))
849 settings |= MGMT_SETTING_CIS_CENTRAL;
850
851 if (cis_peripheral_capable(hdev))
852 settings |= MGMT_SETTING_CIS_PERIPHERAL;
853
854 settings |= MGMT_SETTING_PHY_CONFIGURATION;
855
856 return settings;
857 }
858
get_current_settings(struct hci_dev * hdev)859 static u32 get_current_settings(struct hci_dev *hdev)
860 {
861 u32 settings = 0;
862
863 if (hdev_is_powered(hdev))
864 settings |= MGMT_SETTING_POWERED;
865
866 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
867 settings |= MGMT_SETTING_CONNECTABLE;
868
869 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
870 settings |= MGMT_SETTING_FAST_CONNECTABLE;
871
872 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
873 settings |= MGMT_SETTING_DISCOVERABLE;
874
875 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
876 settings |= MGMT_SETTING_BONDABLE;
877
878 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
879 settings |= MGMT_SETTING_BREDR;
880
881 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
882 settings |= MGMT_SETTING_LE;
883
884 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
885 settings |= MGMT_SETTING_LINK_SECURITY;
886
887 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
888 settings |= MGMT_SETTING_SSP;
889
890 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
891 settings |= MGMT_SETTING_ADVERTISING;
892
893 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
894 settings |= MGMT_SETTING_SECURE_CONN;
895
896 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
897 settings |= MGMT_SETTING_DEBUG_KEYS;
898
899 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
900 settings |= MGMT_SETTING_PRIVACY;
901
902 /* The current setting for static address has two purposes. The
903 * first is to indicate if the static address will be used and
904 * the second is to indicate if it is actually set.
905 *
906 * This means if the static address is not configured, this flag
907 * will never be set. If the address is configured, then if the
908 * address is actually used decides if the flag is set or not.
909 *
910 * For single mode LE only controllers and dual-mode controllers
911 * with BR/EDR disabled, the existence of the static address will
912 * be evaluated.
913 */
914 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
915 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
916 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
917 if (bacmp(&hdev->static_addr, BDADDR_ANY))
918 settings |= MGMT_SETTING_STATIC_ADDRESS;
919 }
920
921 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
922 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
923
924 if (cis_central_capable(hdev))
925 settings |= MGMT_SETTING_CIS_CENTRAL;
926
927 if (cis_peripheral_capable(hdev))
928 settings |= MGMT_SETTING_CIS_PERIPHERAL;
929
930 if (bis_capable(hdev))
931 settings |= MGMT_SETTING_ISO_BROADCASTER;
932
933 if (sync_recv_capable(hdev))
934 settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
935
936 return settings;
937 }
938
pending_find(u16 opcode,struct hci_dev * hdev)939 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
940 {
941 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
942 }
943
mgmt_get_adv_discov_flags(struct hci_dev * hdev)944 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
945 {
946 struct mgmt_pending_cmd *cmd;
947
948 /* If there's a pending mgmt command the flags will not yet have
949 * their final values, so check for this first.
950 */
951 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
952 if (cmd) {
953 struct mgmt_mode *cp = cmd->param;
954 if (cp->val == 0x01)
955 return LE_AD_GENERAL;
956 else if (cp->val == 0x02)
957 return LE_AD_LIMITED;
958 } else {
959 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
960 return LE_AD_LIMITED;
961 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
962 return LE_AD_GENERAL;
963 }
964
965 return 0;
966 }
967
mgmt_get_connectable(struct hci_dev * hdev)968 bool mgmt_get_connectable(struct hci_dev *hdev)
969 {
970 struct mgmt_pending_cmd *cmd;
971
972 /* If there's a pending mgmt command the flag will not yet have
973 * it's final value, so check for this first.
974 */
975 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
976 if (cmd) {
977 struct mgmt_mode *cp = cmd->param;
978
979 return cp->val;
980 }
981
982 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
983 }
984
service_cache_sync(struct hci_dev * hdev,void * data)985 static int service_cache_sync(struct hci_dev *hdev, void *data)
986 {
987 hci_update_eir_sync(hdev);
988 hci_update_class_sync(hdev);
989
990 return 0;
991 }
992
service_cache_off(struct work_struct * work)993 static void service_cache_off(struct work_struct *work)
994 {
995 struct hci_dev *hdev = container_of(work, struct hci_dev,
996 service_cache.work);
997
998 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
999 return;
1000
1001 hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1002 }
1003
rpa_expired_sync(struct hci_dev * hdev,void * data)1004 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1005 {
1006 /* The generation of a new RPA and programming it into the
1007 * controller happens in the hci_req_enable_advertising()
1008 * function.
1009 */
1010 if (ext_adv_capable(hdev))
1011 return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1012 else
1013 return hci_enable_advertising_sync(hdev);
1014 }
1015
rpa_expired(struct work_struct * work)1016 static void rpa_expired(struct work_struct *work)
1017 {
1018 struct hci_dev *hdev = container_of(work, struct hci_dev,
1019 rpa_expired.work);
1020
1021 bt_dev_dbg(hdev, "");
1022
1023 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1024
1025 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1026 return;
1027
1028 hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1029 }
1030
1031 static int set_discoverable_sync(struct hci_dev *hdev, void *data);
1032
discov_off(struct work_struct * work)1033 static void discov_off(struct work_struct *work)
1034 {
1035 struct hci_dev *hdev = container_of(work, struct hci_dev,
1036 discov_off.work);
1037
1038 bt_dev_dbg(hdev, "");
1039
1040 hci_dev_lock(hdev);
1041
1042 /* When discoverable timeout triggers, then just make sure
1043 * the limited discoverable flag is cleared. Even in the case
1044 * of a timeout triggered from general discoverable, it is
1045 * safe to unconditionally clear the flag.
1046 */
1047 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1048 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1049 hdev->discov_timeout = 0;
1050
1051 hci_cmd_sync_queue(hdev, set_discoverable_sync, NULL, NULL);
1052
1053 mgmt_new_settings(hdev);
1054
1055 hci_dev_unlock(hdev);
1056 }
1057
1058 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1059
mesh_send_complete(struct hci_dev * hdev,struct mgmt_mesh_tx * mesh_tx,bool silent)1060 static void mesh_send_complete(struct hci_dev *hdev,
1061 struct mgmt_mesh_tx *mesh_tx, bool silent)
1062 {
1063 u8 handle = mesh_tx->handle;
1064
1065 if (!silent)
1066 mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1067 sizeof(handle), NULL);
1068
1069 mgmt_mesh_remove(mesh_tx);
1070 }
1071
mesh_send_done_sync(struct hci_dev * hdev,void * data)1072 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1073 {
1074 struct mgmt_mesh_tx *mesh_tx;
1075
1076 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1077 hci_disable_advertising_sync(hdev);
1078 mesh_tx = mgmt_mesh_next(hdev, NULL);
1079
1080 if (mesh_tx)
1081 mesh_send_complete(hdev, mesh_tx, false);
1082
1083 return 0;
1084 }
1085
1086 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1087 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
mesh_next(struct hci_dev * hdev,void * data,int err)1088 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1089 {
1090 struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1091
1092 if (!mesh_tx)
1093 return;
1094
1095 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1096 mesh_send_start_complete);
1097
1098 if (err < 0)
1099 mesh_send_complete(hdev, mesh_tx, false);
1100 else
1101 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1102 }
1103
mesh_send_done(struct work_struct * work)1104 static void mesh_send_done(struct work_struct *work)
1105 {
1106 struct hci_dev *hdev = container_of(work, struct hci_dev,
1107 mesh_send_done.work);
1108
1109 if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1110 return;
1111
1112 hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1113 }
1114
mgmt_init_hdev(struct sock * sk,struct hci_dev * hdev)1115 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1116 {
1117 if (hci_dev_test_flag(hdev, HCI_MGMT))
1118 return;
1119
1120 BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1121
1122 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1123 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1124 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1125 INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1126
1127 /* Non-mgmt controlled devices get this bit set
1128 * implicitly so that pairing works for them, however
1129 * for mgmt we require user-space to explicitly enable
1130 * it
1131 */
1132 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1133
1134 hci_dev_set_flag(hdev, HCI_MGMT);
1135 }
1136
read_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1137 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1138 void *data, u16 data_len)
1139 {
1140 struct mgmt_rp_read_info rp;
1141
1142 bt_dev_dbg(hdev, "sock %p", sk);
1143
1144 hci_dev_lock(hdev);
1145
1146 memset(&rp, 0, sizeof(rp));
1147
1148 bacpy(&rp.bdaddr, &hdev->bdaddr);
1149
1150 rp.version = hdev->hci_ver;
1151 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1152
1153 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1154 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1155
1156 memcpy(rp.dev_class, hdev->dev_class, 3);
1157
1158 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1159 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1160
1161 hci_dev_unlock(hdev);
1162
1163 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1164 sizeof(rp));
1165 }
1166
append_eir_data_to_buf(struct hci_dev * hdev,u8 * eir)1167 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1168 {
1169 u16 eir_len = 0;
1170 size_t name_len;
1171
1172 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1173 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1174 hdev->dev_class, 3);
1175
1176 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1177 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1178 hdev->appearance);
1179
1180 name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1181 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1182 hdev->dev_name, name_len);
1183
1184 name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1185 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1186 hdev->short_name, name_len);
1187
1188 return eir_len;
1189 }
1190
read_ext_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1191 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1192 void *data, u16 data_len)
1193 {
1194 char buf[512];
1195 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1196 u16 eir_len;
1197
1198 bt_dev_dbg(hdev, "sock %p", sk);
1199
1200 memset(&buf, 0, sizeof(buf));
1201
1202 hci_dev_lock(hdev);
1203
1204 bacpy(&rp->bdaddr, &hdev->bdaddr);
1205
1206 rp->version = hdev->hci_ver;
1207 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1208
1209 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1210 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1211
1212
1213 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1214 rp->eir_len = cpu_to_le16(eir_len);
1215
1216 hci_dev_unlock(hdev);
1217
1218 /* If this command is called at least once, then the events
1219 * for class of device and local name changes are disabled
1220 * and only the new extended controller information event
1221 * is used.
1222 */
1223 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1224 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1225 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1226
1227 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1228 sizeof(*rp) + eir_len);
1229 }
1230
ext_info_changed(struct hci_dev * hdev,struct sock * skip)1231 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1232 {
1233 char buf[512];
1234 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1235 u16 eir_len;
1236
1237 memset(buf, 0, sizeof(buf));
1238
1239 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1240 ev->eir_len = cpu_to_le16(eir_len);
1241
1242 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1243 sizeof(*ev) + eir_len,
1244 HCI_MGMT_EXT_INFO_EVENTS, skip);
1245 }
1246
send_settings_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)1247 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1248 {
1249 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1250
1251 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1252 sizeof(settings));
1253 }
1254
mgmt_advertising_added(struct sock * sk,struct hci_dev * hdev,u8 instance)1255 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1256 {
1257 struct mgmt_ev_advertising_added ev;
1258
1259 ev.instance = instance;
1260
1261 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1262 }
1263
mgmt_advertising_removed(struct sock * sk,struct hci_dev * hdev,u8 instance)1264 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1265 u8 instance)
1266 {
1267 struct mgmt_ev_advertising_removed ev;
1268
1269 ev.instance = instance;
1270
1271 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1272 }
1273
cancel_adv_timeout(struct hci_dev * hdev)1274 static void cancel_adv_timeout(struct hci_dev *hdev)
1275 {
1276 if (hdev->adv_instance_timeout) {
1277 hdev->adv_instance_timeout = 0;
1278 cancel_delayed_work(&hdev->adv_instance_expire);
1279 }
1280 }
1281
1282 /* This function requires the caller holds hdev->lock */
restart_le_actions(struct hci_dev * hdev)1283 static void restart_le_actions(struct hci_dev *hdev)
1284 {
1285 struct hci_conn_params *p;
1286
1287 list_for_each_entry(p, &hdev->le_conn_params, list) {
1288 /* Needed for AUTO_OFF case where might not "really"
1289 * have been powered off.
1290 */
1291 hci_pend_le_list_del_init(p);
1292
1293 switch (p->auto_connect) {
1294 case HCI_AUTO_CONN_DIRECT:
1295 case HCI_AUTO_CONN_ALWAYS:
1296 hci_pend_le_list_add(p, &hdev->pend_le_conns);
1297 break;
1298 case HCI_AUTO_CONN_REPORT:
1299 hci_pend_le_list_add(p, &hdev->pend_le_reports);
1300 break;
1301 default:
1302 break;
1303 }
1304 }
1305 }
1306
new_settings(struct hci_dev * hdev,struct sock * skip)1307 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1308 {
1309 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1310
1311 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1312 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1313 }
1314
mgmt_set_powered_complete(struct hci_dev * hdev,void * data,int err)1315 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1316 {
1317 struct mgmt_pending_cmd *cmd = data;
1318 struct mgmt_mode *cp;
1319
1320 /* Make sure cmd still outstanding. */
1321 if (err == -ECANCELED ||
1322 cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1323 return;
1324
1325 cp = cmd->param;
1326
1327 bt_dev_dbg(hdev, "err %d", err);
1328
1329 if (!err) {
1330 if (cp->val) {
1331 hci_dev_lock(hdev);
1332 restart_le_actions(hdev);
1333 hci_update_passive_scan(hdev);
1334 hci_dev_unlock(hdev);
1335 }
1336
1337 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1338
1339 /* Only call new_setting for power on as power off is deferred
1340 * to hdev->power_off work which does call hci_dev_do_close.
1341 */
1342 if (cp->val)
1343 new_settings(hdev, cmd->sk);
1344 } else {
1345 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1346 mgmt_status(err));
1347 }
1348
1349 mgmt_pending_remove(cmd);
1350 }
1351
set_powered_sync(struct hci_dev * hdev,void * data)1352 static int set_powered_sync(struct hci_dev *hdev, void *data)
1353 {
1354 struct mgmt_pending_cmd *cmd = data;
1355 struct mgmt_mode *cp;
1356
1357 /* Make sure cmd still outstanding. */
1358 if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1359 return -ECANCELED;
1360
1361 cp = cmd->param;
1362
1363 BT_DBG("%s", hdev->name);
1364
1365 return hci_set_powered_sync(hdev, cp->val);
1366 }
1367
set_powered(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1368 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1369 u16 len)
1370 {
1371 struct mgmt_mode *cp = data;
1372 struct mgmt_pending_cmd *cmd;
1373 int err;
1374
1375 bt_dev_dbg(hdev, "sock %p", sk);
1376
1377 if (cp->val != 0x00 && cp->val != 0x01)
1378 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1379 MGMT_STATUS_INVALID_PARAMS);
1380
1381 hci_dev_lock(hdev);
1382
1383 if (!cp->val) {
1384 if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN)) {
1385 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1386 MGMT_STATUS_BUSY);
1387 goto failed;
1388 }
1389 }
1390
1391 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1392 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1393 MGMT_STATUS_BUSY);
1394 goto failed;
1395 }
1396
1397 if (!!cp->val == hdev_is_powered(hdev)) {
1398 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1399 goto failed;
1400 }
1401
1402 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1403 if (!cmd) {
1404 err = -ENOMEM;
1405 goto failed;
1406 }
1407
1408 /* Cancel potentially blocking sync operation before power off */
1409 if (cp->val == 0x00) {
1410 hci_cmd_sync_cancel_sync(hdev, -EHOSTDOWN);
1411 err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1412 mgmt_set_powered_complete);
1413 } else {
1414 /* Use hci_cmd_sync_submit since hdev might not be running */
1415 err = hci_cmd_sync_submit(hdev, set_powered_sync, cmd,
1416 mgmt_set_powered_complete);
1417 }
1418
1419 if (err < 0)
1420 mgmt_pending_remove(cmd);
1421
1422 failed:
1423 hci_dev_unlock(hdev);
1424 return err;
1425 }
1426
mgmt_new_settings(struct hci_dev * hdev)1427 int mgmt_new_settings(struct hci_dev *hdev)
1428 {
1429 return new_settings(hdev, NULL);
1430 }
1431
1432 struct cmd_lookup {
1433 struct sock *sk;
1434 struct hci_dev *hdev;
1435 u8 mgmt_status;
1436 };
1437
settings_rsp(struct mgmt_pending_cmd * cmd,void * data)1438 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1439 {
1440 struct cmd_lookup *match = data;
1441
1442 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1443
1444 list_del(&cmd->list);
1445
1446 if (match->sk == NULL) {
1447 match->sk = cmd->sk;
1448 sock_hold(match->sk);
1449 }
1450
1451 mgmt_pending_free(cmd);
1452 }
1453
cmd_status_rsp(struct mgmt_pending_cmd * cmd,void * data)1454 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1455 {
1456 u8 *status = data;
1457
1458 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1459 mgmt_pending_remove(cmd);
1460 }
1461
cmd_complete_rsp(struct mgmt_pending_cmd * cmd,void * data)1462 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1463 {
1464 struct cmd_lookup *match = data;
1465
1466 /* dequeue cmd_sync entries using cmd as data as that is about to be
1467 * removed/freed.
1468 */
1469 hci_cmd_sync_dequeue(match->hdev, NULL, cmd, NULL);
1470
1471 if (cmd->cmd_complete) {
1472 cmd->cmd_complete(cmd, match->mgmt_status);
1473 mgmt_pending_remove(cmd);
1474
1475 return;
1476 }
1477
1478 cmd_status_rsp(cmd, data);
1479 }
1480
generic_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1481 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1482 {
1483 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1484 cmd->param, cmd->param_len);
1485 }
1486
addr_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1487 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1488 {
1489 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1490 cmd->param, sizeof(struct mgmt_addr_info));
1491 }
1492
mgmt_bredr_support(struct hci_dev * hdev)1493 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1494 {
1495 if (!lmp_bredr_capable(hdev))
1496 return MGMT_STATUS_NOT_SUPPORTED;
1497 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1498 return MGMT_STATUS_REJECTED;
1499 else
1500 return MGMT_STATUS_SUCCESS;
1501 }
1502
mgmt_le_support(struct hci_dev * hdev)1503 static u8 mgmt_le_support(struct hci_dev *hdev)
1504 {
1505 if (!lmp_le_capable(hdev))
1506 return MGMT_STATUS_NOT_SUPPORTED;
1507 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1508 return MGMT_STATUS_REJECTED;
1509 else
1510 return MGMT_STATUS_SUCCESS;
1511 }
1512
mgmt_set_discoverable_complete(struct hci_dev * hdev,void * data,int err)1513 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1514 int err)
1515 {
1516 struct mgmt_pending_cmd *cmd = data;
1517
1518 bt_dev_dbg(hdev, "err %d", err);
1519
1520 /* Make sure cmd still outstanding. */
1521 if (err == -ECANCELED ||
1522 cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1523 return;
1524
1525 hci_dev_lock(hdev);
1526
1527 if (err) {
1528 u8 mgmt_err = mgmt_status(err);
1529 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1530 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1531 goto done;
1532 }
1533
1534 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1535 hdev->discov_timeout > 0) {
1536 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1537 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1538 }
1539
1540 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1541 new_settings(hdev, cmd->sk);
1542
1543 done:
1544 mgmt_pending_remove(cmd);
1545 hci_dev_unlock(hdev);
1546 }
1547
set_discoverable_sync(struct hci_dev * hdev,void * data)1548 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1549 {
1550 BT_DBG("%s", hdev->name);
1551
1552 return hci_update_discoverable_sync(hdev);
1553 }
1554
set_discoverable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1555 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1556 u16 len)
1557 {
1558 struct mgmt_cp_set_discoverable *cp = data;
1559 struct mgmt_pending_cmd *cmd;
1560 u16 timeout;
1561 int err;
1562
1563 bt_dev_dbg(hdev, "sock %p", sk);
1564
1565 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1566 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1567 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1568 MGMT_STATUS_REJECTED);
1569
1570 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1571 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1572 MGMT_STATUS_INVALID_PARAMS);
1573
1574 timeout = __le16_to_cpu(cp->timeout);
1575
1576 /* Disabling discoverable requires that no timeout is set,
1577 * and enabling limited discoverable requires a timeout.
1578 */
1579 if ((cp->val == 0x00 && timeout > 0) ||
1580 (cp->val == 0x02 && timeout == 0))
1581 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1582 MGMT_STATUS_INVALID_PARAMS);
1583
1584 hci_dev_lock(hdev);
1585
1586 if (!hdev_is_powered(hdev) && timeout > 0) {
1587 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1588 MGMT_STATUS_NOT_POWERED);
1589 goto failed;
1590 }
1591
1592 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1593 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1594 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1595 MGMT_STATUS_BUSY);
1596 goto failed;
1597 }
1598
1599 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1600 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1601 MGMT_STATUS_REJECTED);
1602 goto failed;
1603 }
1604
1605 if (hdev->advertising_paused) {
1606 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1607 MGMT_STATUS_BUSY);
1608 goto failed;
1609 }
1610
1611 if (!hdev_is_powered(hdev)) {
1612 bool changed = false;
1613
1614 /* Setting limited discoverable when powered off is
1615 * not a valid operation since it requires a timeout
1616 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1617 */
1618 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1619 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1620 changed = true;
1621 }
1622
1623 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1624 if (err < 0)
1625 goto failed;
1626
1627 if (changed)
1628 err = new_settings(hdev, sk);
1629
1630 goto failed;
1631 }
1632
1633 /* If the current mode is the same, then just update the timeout
1634 * value with the new value. And if only the timeout gets updated,
1635 * then no need for any HCI transactions.
1636 */
1637 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1638 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1639 HCI_LIMITED_DISCOVERABLE)) {
1640 cancel_delayed_work(&hdev->discov_off);
1641 hdev->discov_timeout = timeout;
1642
1643 if (cp->val && hdev->discov_timeout > 0) {
1644 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1645 queue_delayed_work(hdev->req_workqueue,
1646 &hdev->discov_off, to);
1647 }
1648
1649 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1650 goto failed;
1651 }
1652
1653 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1654 if (!cmd) {
1655 err = -ENOMEM;
1656 goto failed;
1657 }
1658
1659 /* Cancel any potential discoverable timeout that might be
1660 * still active and store new timeout value. The arming of
1661 * the timeout happens in the complete handler.
1662 */
1663 cancel_delayed_work(&hdev->discov_off);
1664 hdev->discov_timeout = timeout;
1665
1666 if (cp->val)
1667 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1668 else
1669 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1670
1671 /* Limited discoverable mode */
1672 if (cp->val == 0x02)
1673 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1674 else
1675 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1676
1677 err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1678 mgmt_set_discoverable_complete);
1679
1680 if (err < 0)
1681 mgmt_pending_remove(cmd);
1682
1683 failed:
1684 hci_dev_unlock(hdev);
1685 return err;
1686 }
1687
mgmt_set_connectable_complete(struct hci_dev * hdev,void * data,int err)1688 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1689 int err)
1690 {
1691 struct mgmt_pending_cmd *cmd = data;
1692
1693 bt_dev_dbg(hdev, "err %d", err);
1694
1695 /* Make sure cmd still outstanding. */
1696 if (err == -ECANCELED ||
1697 cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1698 return;
1699
1700 hci_dev_lock(hdev);
1701
1702 if (err) {
1703 u8 mgmt_err = mgmt_status(err);
1704 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1705 goto done;
1706 }
1707
1708 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1709 new_settings(hdev, cmd->sk);
1710
1711 done:
1712 mgmt_pending_remove(cmd);
1713
1714 hci_dev_unlock(hdev);
1715 }
1716
set_connectable_update_settings(struct hci_dev * hdev,struct sock * sk,u8 val)1717 static int set_connectable_update_settings(struct hci_dev *hdev,
1718 struct sock *sk, u8 val)
1719 {
1720 bool changed = false;
1721 int err;
1722
1723 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1724 changed = true;
1725
1726 if (val) {
1727 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1728 } else {
1729 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1730 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1731 }
1732
1733 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1734 if (err < 0)
1735 return err;
1736
1737 if (changed) {
1738 hci_update_scan(hdev);
1739 hci_update_passive_scan(hdev);
1740 return new_settings(hdev, sk);
1741 }
1742
1743 return 0;
1744 }
1745
set_connectable_sync(struct hci_dev * hdev,void * data)1746 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1747 {
1748 BT_DBG("%s", hdev->name);
1749
1750 return hci_update_connectable_sync(hdev);
1751 }
1752
set_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1753 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1754 u16 len)
1755 {
1756 struct mgmt_mode *cp = data;
1757 struct mgmt_pending_cmd *cmd;
1758 int err;
1759
1760 bt_dev_dbg(hdev, "sock %p", sk);
1761
1762 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1763 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1764 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1765 MGMT_STATUS_REJECTED);
1766
1767 if (cp->val != 0x00 && cp->val != 0x01)
1768 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1769 MGMT_STATUS_INVALID_PARAMS);
1770
1771 hci_dev_lock(hdev);
1772
1773 if (!hdev_is_powered(hdev)) {
1774 err = set_connectable_update_settings(hdev, sk, cp->val);
1775 goto failed;
1776 }
1777
1778 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1779 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1780 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1781 MGMT_STATUS_BUSY);
1782 goto failed;
1783 }
1784
1785 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1786 if (!cmd) {
1787 err = -ENOMEM;
1788 goto failed;
1789 }
1790
1791 if (cp->val) {
1792 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1793 } else {
1794 if (hdev->discov_timeout > 0)
1795 cancel_delayed_work(&hdev->discov_off);
1796
1797 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1798 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1799 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1800 }
1801
1802 err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1803 mgmt_set_connectable_complete);
1804
1805 if (err < 0)
1806 mgmt_pending_remove(cmd);
1807
1808 failed:
1809 hci_dev_unlock(hdev);
1810 return err;
1811 }
1812
set_bondable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1813 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1814 u16 len)
1815 {
1816 struct mgmt_mode *cp = data;
1817 bool changed;
1818 int err;
1819
1820 bt_dev_dbg(hdev, "sock %p", sk);
1821
1822 if (cp->val != 0x00 && cp->val != 0x01)
1823 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1824 MGMT_STATUS_INVALID_PARAMS);
1825
1826 hci_dev_lock(hdev);
1827
1828 if (cp->val)
1829 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1830 else
1831 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1832
1833 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1834 if (err < 0)
1835 goto unlock;
1836
1837 if (changed) {
1838 /* In limited privacy mode the change of bondable mode
1839 * may affect the local advertising address.
1840 */
1841 hci_update_discoverable(hdev);
1842
1843 err = new_settings(hdev, sk);
1844 }
1845
1846 unlock:
1847 hci_dev_unlock(hdev);
1848 return err;
1849 }
1850
set_link_security(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1851 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1852 u16 len)
1853 {
1854 struct mgmt_mode *cp = data;
1855 struct mgmt_pending_cmd *cmd;
1856 u8 val, status;
1857 int err;
1858
1859 bt_dev_dbg(hdev, "sock %p", sk);
1860
1861 status = mgmt_bredr_support(hdev);
1862 if (status)
1863 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1864 status);
1865
1866 if (cp->val != 0x00 && cp->val != 0x01)
1867 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1868 MGMT_STATUS_INVALID_PARAMS);
1869
1870 hci_dev_lock(hdev);
1871
1872 if (!hdev_is_powered(hdev)) {
1873 bool changed = false;
1874
1875 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1876 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1877 changed = true;
1878 }
1879
1880 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1881 if (err < 0)
1882 goto failed;
1883
1884 if (changed)
1885 err = new_settings(hdev, sk);
1886
1887 goto failed;
1888 }
1889
1890 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1891 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1892 MGMT_STATUS_BUSY);
1893 goto failed;
1894 }
1895
1896 val = !!cp->val;
1897
1898 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1899 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1900 goto failed;
1901 }
1902
1903 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1904 if (!cmd) {
1905 err = -ENOMEM;
1906 goto failed;
1907 }
1908
1909 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1910 if (err < 0) {
1911 mgmt_pending_remove(cmd);
1912 goto failed;
1913 }
1914
1915 failed:
1916 hci_dev_unlock(hdev);
1917 return err;
1918 }
1919
set_ssp_complete(struct hci_dev * hdev,void * data,int err)1920 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1921 {
1922 struct cmd_lookup match = { NULL, hdev };
1923 struct mgmt_pending_cmd *cmd = data;
1924 struct mgmt_mode *cp = cmd->param;
1925 u8 enable = cp->val;
1926 bool changed;
1927
1928 /* Make sure cmd still outstanding. */
1929 if (err == -ECANCELED || cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1930 return;
1931
1932 if (err) {
1933 u8 mgmt_err = mgmt_status(err);
1934
1935 if (enable && hci_dev_test_and_clear_flag(hdev,
1936 HCI_SSP_ENABLED)) {
1937 new_settings(hdev, NULL);
1938 }
1939
1940 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1941 &mgmt_err);
1942 return;
1943 }
1944
1945 if (enable) {
1946 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1947 } else {
1948 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1949 }
1950
1951 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1952
1953 if (changed)
1954 new_settings(hdev, match.sk);
1955
1956 if (match.sk)
1957 sock_put(match.sk);
1958
1959 hci_update_eir_sync(hdev);
1960 }
1961
set_ssp_sync(struct hci_dev * hdev,void * data)1962 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1963 {
1964 struct mgmt_pending_cmd *cmd = data;
1965 struct mgmt_mode *cp = cmd->param;
1966 bool changed = false;
1967 int err;
1968
1969 if (cp->val)
1970 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1971
1972 err = hci_write_ssp_mode_sync(hdev, cp->val);
1973
1974 if (!err && changed)
1975 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1976
1977 return err;
1978 }
1979
set_ssp(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1980 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1981 {
1982 struct mgmt_mode *cp = data;
1983 struct mgmt_pending_cmd *cmd;
1984 u8 status;
1985 int err;
1986
1987 bt_dev_dbg(hdev, "sock %p", sk);
1988
1989 status = mgmt_bredr_support(hdev);
1990 if (status)
1991 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1992
1993 if (!lmp_ssp_capable(hdev))
1994 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1995 MGMT_STATUS_NOT_SUPPORTED);
1996
1997 if (cp->val != 0x00 && cp->val != 0x01)
1998 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1999 MGMT_STATUS_INVALID_PARAMS);
2000
2001 hci_dev_lock(hdev);
2002
2003 if (!hdev_is_powered(hdev)) {
2004 bool changed;
2005
2006 if (cp->val) {
2007 changed = !hci_dev_test_and_set_flag(hdev,
2008 HCI_SSP_ENABLED);
2009 } else {
2010 changed = hci_dev_test_and_clear_flag(hdev,
2011 HCI_SSP_ENABLED);
2012 }
2013
2014 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2015 if (err < 0)
2016 goto failed;
2017
2018 if (changed)
2019 err = new_settings(hdev, sk);
2020
2021 goto failed;
2022 }
2023
2024 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2025 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2026 MGMT_STATUS_BUSY);
2027 goto failed;
2028 }
2029
2030 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2031 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2032 goto failed;
2033 }
2034
2035 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2036 if (!cmd)
2037 err = -ENOMEM;
2038 else
2039 err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2040 set_ssp_complete);
2041
2042 if (err < 0) {
2043 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2044 MGMT_STATUS_FAILED);
2045
2046 if (cmd)
2047 mgmt_pending_remove(cmd);
2048 }
2049
2050 failed:
2051 hci_dev_unlock(hdev);
2052 return err;
2053 }
2054
set_hs(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2055 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2056 {
2057 bt_dev_dbg(hdev, "sock %p", sk);
2058
2059 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2060 MGMT_STATUS_NOT_SUPPORTED);
2061 }
2062
set_le_complete(struct hci_dev * hdev,void * data,int err)2063 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2064 {
2065 struct cmd_lookup match = { NULL, hdev };
2066 u8 status = mgmt_status(err);
2067
2068 bt_dev_dbg(hdev, "err %d", err);
2069
2070 if (status) {
2071 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2072 &status);
2073 return;
2074 }
2075
2076 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2077
2078 new_settings(hdev, match.sk);
2079
2080 if (match.sk)
2081 sock_put(match.sk);
2082 }
2083
set_le_sync(struct hci_dev * hdev,void * data)2084 static int set_le_sync(struct hci_dev *hdev, void *data)
2085 {
2086 struct mgmt_pending_cmd *cmd = data;
2087 struct mgmt_mode *cp = cmd->param;
2088 u8 val = !!cp->val;
2089 int err;
2090
2091 if (!val) {
2092 hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2093
2094 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2095 hci_disable_advertising_sync(hdev);
2096
2097 if (ext_adv_capable(hdev))
2098 hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2099 } else {
2100 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2101 }
2102
2103 err = hci_write_le_host_supported_sync(hdev, val, 0);
2104
2105 /* Make sure the controller has a good default for
2106 * advertising data. Restrict the update to when LE
2107 * has actually been enabled. During power on, the
2108 * update in powered_update_hci will take care of it.
2109 */
2110 if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2111 if (ext_adv_capable(hdev)) {
2112 int status;
2113
2114 status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2115 if (!status)
2116 hci_update_scan_rsp_data_sync(hdev, 0x00);
2117 } else {
2118 hci_update_adv_data_sync(hdev, 0x00);
2119 hci_update_scan_rsp_data_sync(hdev, 0x00);
2120 }
2121
2122 hci_update_passive_scan(hdev);
2123 }
2124
2125 return err;
2126 }
2127
set_mesh_complete(struct hci_dev * hdev,void * data,int err)2128 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2129 {
2130 struct mgmt_pending_cmd *cmd = data;
2131 u8 status = mgmt_status(err);
2132 struct sock *sk = cmd->sk;
2133
2134 if (status) {
2135 mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev,
2136 cmd_status_rsp, &status);
2137 return;
2138 }
2139
2140 mgmt_pending_remove(cmd);
2141 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2142 }
2143
set_mesh_sync(struct hci_dev * hdev,void * data)2144 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2145 {
2146 struct mgmt_pending_cmd *cmd = data;
2147 struct mgmt_cp_set_mesh *cp = cmd->param;
2148 size_t len = cmd->param_len;
2149
2150 memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2151
2152 if (cp->enable)
2153 hci_dev_set_flag(hdev, HCI_MESH);
2154 else
2155 hci_dev_clear_flag(hdev, HCI_MESH);
2156
2157 len -= sizeof(*cp);
2158
2159 /* If filters don't fit, forward all adv pkts */
2160 if (len <= sizeof(hdev->mesh_ad_types))
2161 memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2162
2163 hci_update_passive_scan_sync(hdev);
2164 return 0;
2165 }
2166
set_mesh(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2167 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2168 {
2169 struct mgmt_cp_set_mesh *cp = data;
2170 struct mgmt_pending_cmd *cmd;
2171 int err = 0;
2172
2173 bt_dev_dbg(hdev, "sock %p", sk);
2174
2175 if (!lmp_le_capable(hdev) ||
2176 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2177 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2178 MGMT_STATUS_NOT_SUPPORTED);
2179
2180 if (cp->enable != 0x00 && cp->enable != 0x01)
2181 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2182 MGMT_STATUS_INVALID_PARAMS);
2183
2184 hci_dev_lock(hdev);
2185
2186 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2187 if (!cmd)
2188 err = -ENOMEM;
2189 else
2190 err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2191 set_mesh_complete);
2192
2193 if (err < 0) {
2194 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2195 MGMT_STATUS_FAILED);
2196
2197 if (cmd)
2198 mgmt_pending_remove(cmd);
2199 }
2200
2201 hci_dev_unlock(hdev);
2202 return err;
2203 }
2204
mesh_send_start_complete(struct hci_dev * hdev,void * data,int err)2205 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2206 {
2207 struct mgmt_mesh_tx *mesh_tx = data;
2208 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2209 unsigned long mesh_send_interval;
2210 u8 mgmt_err = mgmt_status(err);
2211
2212 /* Report any errors here, but don't report completion */
2213
2214 if (mgmt_err) {
2215 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2216 /* Send Complete Error Code for handle */
2217 mesh_send_complete(hdev, mesh_tx, false);
2218 return;
2219 }
2220
2221 mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2222 queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2223 mesh_send_interval);
2224 }
2225
mesh_send_sync(struct hci_dev * hdev,void * data)2226 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2227 {
2228 struct mgmt_mesh_tx *mesh_tx = data;
2229 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2230 struct adv_info *adv, *next_instance;
2231 u8 instance = hdev->le_num_of_adv_sets + 1;
2232 u16 timeout, duration;
2233 int err = 0;
2234
2235 if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2236 return MGMT_STATUS_BUSY;
2237
2238 timeout = 1000;
2239 duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2240 adv = hci_add_adv_instance(hdev, instance, 0,
2241 send->adv_data_len, send->adv_data,
2242 0, NULL,
2243 timeout, duration,
2244 HCI_ADV_TX_POWER_NO_PREFERENCE,
2245 hdev->le_adv_min_interval,
2246 hdev->le_adv_max_interval,
2247 mesh_tx->handle);
2248
2249 if (!IS_ERR(adv))
2250 mesh_tx->instance = instance;
2251 else
2252 err = PTR_ERR(adv);
2253
2254 if (hdev->cur_adv_instance == instance) {
2255 /* If the currently advertised instance is being changed then
2256 * cancel the current advertising and schedule the next
2257 * instance. If there is only one instance then the overridden
2258 * advertising data will be visible right away.
2259 */
2260 cancel_adv_timeout(hdev);
2261
2262 next_instance = hci_get_next_instance(hdev, instance);
2263 if (next_instance)
2264 instance = next_instance->instance;
2265 else
2266 instance = 0;
2267 } else if (hdev->adv_instance_timeout) {
2268 /* Immediately advertise the new instance if no other, or
2269 * let it go naturally from queue if ADV is already happening
2270 */
2271 instance = 0;
2272 }
2273
2274 if (instance)
2275 return hci_schedule_adv_instance_sync(hdev, instance, true);
2276
2277 return err;
2278 }
2279
send_count(struct mgmt_mesh_tx * mesh_tx,void * data)2280 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2281 {
2282 struct mgmt_rp_mesh_read_features *rp = data;
2283
2284 if (rp->used_handles >= rp->max_handles)
2285 return;
2286
2287 rp->handles[rp->used_handles++] = mesh_tx->handle;
2288 }
2289
mesh_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2290 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2291 void *data, u16 len)
2292 {
2293 struct mgmt_rp_mesh_read_features rp;
2294
2295 if (!lmp_le_capable(hdev) ||
2296 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2297 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2298 MGMT_STATUS_NOT_SUPPORTED);
2299
2300 memset(&rp, 0, sizeof(rp));
2301 rp.index = cpu_to_le16(hdev->id);
2302 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2303 rp.max_handles = MESH_HANDLES_MAX;
2304
2305 hci_dev_lock(hdev);
2306
2307 if (rp.max_handles)
2308 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2309
2310 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2311 rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2312
2313 hci_dev_unlock(hdev);
2314 return 0;
2315 }
2316
send_cancel(struct hci_dev * hdev,void * data)2317 static int send_cancel(struct hci_dev *hdev, void *data)
2318 {
2319 struct mgmt_pending_cmd *cmd = data;
2320 struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2321 struct mgmt_mesh_tx *mesh_tx;
2322
2323 if (!cancel->handle) {
2324 do {
2325 mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2326
2327 if (mesh_tx)
2328 mesh_send_complete(hdev, mesh_tx, false);
2329 } while (mesh_tx);
2330 } else {
2331 mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2332
2333 if (mesh_tx && mesh_tx->sk == cmd->sk)
2334 mesh_send_complete(hdev, mesh_tx, false);
2335 }
2336
2337 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2338 0, NULL, 0);
2339 mgmt_pending_free(cmd);
2340
2341 return 0;
2342 }
2343
mesh_send_cancel(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2344 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2345 void *data, u16 len)
2346 {
2347 struct mgmt_pending_cmd *cmd;
2348 int err;
2349
2350 if (!lmp_le_capable(hdev) ||
2351 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2352 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2353 MGMT_STATUS_NOT_SUPPORTED);
2354
2355 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2356 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2357 MGMT_STATUS_REJECTED);
2358
2359 hci_dev_lock(hdev);
2360 cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2361 if (!cmd)
2362 err = -ENOMEM;
2363 else
2364 err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2365
2366 if (err < 0) {
2367 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2368 MGMT_STATUS_FAILED);
2369
2370 if (cmd)
2371 mgmt_pending_free(cmd);
2372 }
2373
2374 hci_dev_unlock(hdev);
2375 return err;
2376 }
2377
mesh_send(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2378 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2379 {
2380 struct mgmt_mesh_tx *mesh_tx;
2381 struct mgmt_cp_mesh_send *send = data;
2382 struct mgmt_rp_mesh_read_features rp;
2383 bool sending;
2384 int err = 0;
2385
2386 if (!lmp_le_capable(hdev) ||
2387 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2388 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2389 MGMT_STATUS_NOT_SUPPORTED);
2390 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2391 len <= MGMT_MESH_SEND_SIZE ||
2392 len > (MGMT_MESH_SEND_SIZE + 31))
2393 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2394 MGMT_STATUS_REJECTED);
2395
2396 hci_dev_lock(hdev);
2397
2398 memset(&rp, 0, sizeof(rp));
2399 rp.max_handles = MESH_HANDLES_MAX;
2400
2401 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2402
2403 if (rp.max_handles <= rp.used_handles) {
2404 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2405 MGMT_STATUS_BUSY);
2406 goto done;
2407 }
2408
2409 sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2410 mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2411
2412 if (!mesh_tx)
2413 err = -ENOMEM;
2414 else if (!sending)
2415 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2416 mesh_send_start_complete);
2417
2418 if (err < 0) {
2419 bt_dev_err(hdev, "Send Mesh Failed %d", err);
2420 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2421 MGMT_STATUS_FAILED);
2422
2423 if (mesh_tx) {
2424 if (sending)
2425 mgmt_mesh_remove(mesh_tx);
2426 }
2427 } else {
2428 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2429
2430 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2431 &mesh_tx->handle, 1);
2432 }
2433
2434 done:
2435 hci_dev_unlock(hdev);
2436 return err;
2437 }
2438
set_le(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2439 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2440 {
2441 struct mgmt_mode *cp = data;
2442 struct mgmt_pending_cmd *cmd;
2443 int err;
2444 u8 val, enabled;
2445
2446 bt_dev_dbg(hdev, "sock %p", sk);
2447
2448 if (!lmp_le_capable(hdev))
2449 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2450 MGMT_STATUS_NOT_SUPPORTED);
2451
2452 if (cp->val != 0x00 && cp->val != 0x01)
2453 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2454 MGMT_STATUS_INVALID_PARAMS);
2455
2456 /* Bluetooth single mode LE only controllers or dual-mode
2457 * controllers configured as LE only devices, do not allow
2458 * switching LE off. These have either LE enabled explicitly
2459 * or BR/EDR has been previously switched off.
2460 *
2461 * When trying to enable an already enabled LE, then gracefully
2462 * send a positive response. Trying to disable it however will
2463 * result into rejection.
2464 */
2465 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2466 if (cp->val == 0x01)
2467 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2468
2469 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2470 MGMT_STATUS_REJECTED);
2471 }
2472
2473 hci_dev_lock(hdev);
2474
2475 val = !!cp->val;
2476 enabled = lmp_host_le_capable(hdev);
2477
2478 if (!hdev_is_powered(hdev) || val == enabled) {
2479 bool changed = false;
2480
2481 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2482 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2483 changed = true;
2484 }
2485
2486 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2487 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2488 changed = true;
2489 }
2490
2491 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2492 if (err < 0)
2493 goto unlock;
2494
2495 if (changed)
2496 err = new_settings(hdev, sk);
2497
2498 goto unlock;
2499 }
2500
2501 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2502 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2503 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2504 MGMT_STATUS_BUSY);
2505 goto unlock;
2506 }
2507
2508 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2509 if (!cmd)
2510 err = -ENOMEM;
2511 else
2512 err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2513 set_le_complete);
2514
2515 if (err < 0) {
2516 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2517 MGMT_STATUS_FAILED);
2518
2519 if (cmd)
2520 mgmt_pending_remove(cmd);
2521 }
2522
2523 unlock:
2524 hci_dev_unlock(hdev);
2525 return err;
2526 }
2527
send_hci_cmd_sync(struct hci_dev * hdev,void * data)2528 static int send_hci_cmd_sync(struct hci_dev *hdev, void *data)
2529 {
2530 struct mgmt_pending_cmd *cmd = data;
2531 struct mgmt_cp_hci_cmd_sync *cp = cmd->param;
2532 struct sk_buff *skb;
2533
2534 skb = __hci_cmd_sync_ev(hdev, le16_to_cpu(cp->opcode),
2535 le16_to_cpu(cp->params_len), cp->params,
2536 cp->event, cp->timeout ?
2537 msecs_to_jiffies(cp->timeout * 1000) :
2538 HCI_CMD_TIMEOUT);
2539 if (IS_ERR(skb)) {
2540 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
2541 mgmt_status(PTR_ERR(skb)));
2542 goto done;
2543 }
2544
2545 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_HCI_CMD_SYNC, 0,
2546 skb->data, skb->len);
2547
2548 kfree_skb(skb);
2549
2550 done:
2551 mgmt_pending_free(cmd);
2552
2553 return 0;
2554 }
2555
mgmt_hci_cmd_sync(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2556 static int mgmt_hci_cmd_sync(struct sock *sk, struct hci_dev *hdev,
2557 void *data, u16 len)
2558 {
2559 struct mgmt_cp_hci_cmd_sync *cp = data;
2560 struct mgmt_pending_cmd *cmd;
2561 int err;
2562
2563 if (len < sizeof(*cp))
2564 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
2565 MGMT_STATUS_INVALID_PARAMS);
2566
2567 hci_dev_lock(hdev);
2568 cmd = mgmt_pending_new(sk, MGMT_OP_HCI_CMD_SYNC, hdev, data, len);
2569 if (!cmd)
2570 err = -ENOMEM;
2571 else
2572 err = hci_cmd_sync_queue(hdev, send_hci_cmd_sync, cmd, NULL);
2573
2574 if (err < 0) {
2575 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
2576 MGMT_STATUS_FAILED);
2577
2578 if (cmd)
2579 mgmt_pending_free(cmd);
2580 }
2581
2582 hci_dev_unlock(hdev);
2583 return err;
2584 }
2585
2586 /* This is a helper function to test for pending mgmt commands that can
2587 * cause CoD or EIR HCI commands. We can only allow one such pending
2588 * mgmt command at a time since otherwise we cannot easily track what
2589 * the current values are, will be, and based on that calculate if a new
2590 * HCI command needs to be sent and if yes with what value.
2591 */
pending_eir_or_class(struct hci_dev * hdev)2592 static bool pending_eir_or_class(struct hci_dev *hdev)
2593 {
2594 struct mgmt_pending_cmd *cmd;
2595
2596 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2597 switch (cmd->opcode) {
2598 case MGMT_OP_ADD_UUID:
2599 case MGMT_OP_REMOVE_UUID:
2600 case MGMT_OP_SET_DEV_CLASS:
2601 case MGMT_OP_SET_POWERED:
2602 return true;
2603 }
2604 }
2605
2606 return false;
2607 }
2608
2609 static const u8 bluetooth_base_uuid[] = {
2610 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2611 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2612 };
2613
get_uuid_size(const u8 * uuid)2614 static u8 get_uuid_size(const u8 *uuid)
2615 {
2616 u32 val;
2617
2618 if (memcmp(uuid, bluetooth_base_uuid, 12))
2619 return 128;
2620
2621 val = get_unaligned_le32(&uuid[12]);
2622 if (val > 0xffff)
2623 return 32;
2624
2625 return 16;
2626 }
2627
mgmt_class_complete(struct hci_dev * hdev,void * data,int err)2628 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2629 {
2630 struct mgmt_pending_cmd *cmd = data;
2631
2632 bt_dev_dbg(hdev, "err %d", err);
2633
2634 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2635 mgmt_status(err), hdev->dev_class, 3);
2636
2637 mgmt_pending_free(cmd);
2638 }
2639
add_uuid_sync(struct hci_dev * hdev,void * data)2640 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2641 {
2642 int err;
2643
2644 err = hci_update_class_sync(hdev);
2645 if (err)
2646 return err;
2647
2648 return hci_update_eir_sync(hdev);
2649 }
2650
add_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2651 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2652 {
2653 struct mgmt_cp_add_uuid *cp = data;
2654 struct mgmt_pending_cmd *cmd;
2655 struct bt_uuid *uuid;
2656 int err;
2657
2658 bt_dev_dbg(hdev, "sock %p", sk);
2659
2660 hci_dev_lock(hdev);
2661
2662 if (pending_eir_or_class(hdev)) {
2663 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2664 MGMT_STATUS_BUSY);
2665 goto failed;
2666 }
2667
2668 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2669 if (!uuid) {
2670 err = -ENOMEM;
2671 goto failed;
2672 }
2673
2674 memcpy(uuid->uuid, cp->uuid, 16);
2675 uuid->svc_hint = cp->svc_hint;
2676 uuid->size = get_uuid_size(cp->uuid);
2677
2678 list_add_tail(&uuid->list, &hdev->uuids);
2679
2680 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2681 if (!cmd) {
2682 err = -ENOMEM;
2683 goto failed;
2684 }
2685
2686 /* MGMT_OP_ADD_UUID don't require adapter the UP/Running so use
2687 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2688 */
2689 err = hci_cmd_sync_submit(hdev, add_uuid_sync, cmd,
2690 mgmt_class_complete);
2691 if (err < 0) {
2692 mgmt_pending_free(cmd);
2693 goto failed;
2694 }
2695
2696 failed:
2697 hci_dev_unlock(hdev);
2698 return err;
2699 }
2700
enable_service_cache(struct hci_dev * hdev)2701 static bool enable_service_cache(struct hci_dev *hdev)
2702 {
2703 if (!hdev_is_powered(hdev))
2704 return false;
2705
2706 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2707 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2708 CACHE_TIMEOUT);
2709 return true;
2710 }
2711
2712 return false;
2713 }
2714
remove_uuid_sync(struct hci_dev * hdev,void * data)2715 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2716 {
2717 int err;
2718
2719 err = hci_update_class_sync(hdev);
2720 if (err)
2721 return err;
2722
2723 return hci_update_eir_sync(hdev);
2724 }
2725
remove_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2726 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2727 u16 len)
2728 {
2729 struct mgmt_cp_remove_uuid *cp = data;
2730 struct mgmt_pending_cmd *cmd;
2731 struct bt_uuid *match, *tmp;
2732 static const u8 bt_uuid_any[] = {
2733 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2734 };
2735 int err, found;
2736
2737 bt_dev_dbg(hdev, "sock %p", sk);
2738
2739 hci_dev_lock(hdev);
2740
2741 if (pending_eir_or_class(hdev)) {
2742 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2743 MGMT_STATUS_BUSY);
2744 goto unlock;
2745 }
2746
2747 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2748 hci_uuids_clear(hdev);
2749
2750 if (enable_service_cache(hdev)) {
2751 err = mgmt_cmd_complete(sk, hdev->id,
2752 MGMT_OP_REMOVE_UUID,
2753 0, hdev->dev_class, 3);
2754 goto unlock;
2755 }
2756
2757 goto update_class;
2758 }
2759
2760 found = 0;
2761
2762 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2763 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2764 continue;
2765
2766 list_del(&match->list);
2767 kfree(match);
2768 found++;
2769 }
2770
2771 if (found == 0) {
2772 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2773 MGMT_STATUS_INVALID_PARAMS);
2774 goto unlock;
2775 }
2776
2777 update_class:
2778 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2779 if (!cmd) {
2780 err = -ENOMEM;
2781 goto unlock;
2782 }
2783
2784 /* MGMT_OP_REMOVE_UUID don't require adapter the UP/Running so use
2785 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2786 */
2787 err = hci_cmd_sync_submit(hdev, remove_uuid_sync, cmd,
2788 mgmt_class_complete);
2789 if (err < 0)
2790 mgmt_pending_free(cmd);
2791
2792 unlock:
2793 hci_dev_unlock(hdev);
2794 return err;
2795 }
2796
set_class_sync(struct hci_dev * hdev,void * data)2797 static int set_class_sync(struct hci_dev *hdev, void *data)
2798 {
2799 int err = 0;
2800
2801 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2802 cancel_delayed_work_sync(&hdev->service_cache);
2803 err = hci_update_eir_sync(hdev);
2804 }
2805
2806 if (err)
2807 return err;
2808
2809 return hci_update_class_sync(hdev);
2810 }
2811
set_dev_class(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2812 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2813 u16 len)
2814 {
2815 struct mgmt_cp_set_dev_class *cp = data;
2816 struct mgmt_pending_cmd *cmd;
2817 int err;
2818
2819 bt_dev_dbg(hdev, "sock %p", sk);
2820
2821 if (!lmp_bredr_capable(hdev))
2822 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2823 MGMT_STATUS_NOT_SUPPORTED);
2824
2825 hci_dev_lock(hdev);
2826
2827 if (pending_eir_or_class(hdev)) {
2828 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2829 MGMT_STATUS_BUSY);
2830 goto unlock;
2831 }
2832
2833 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2834 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2835 MGMT_STATUS_INVALID_PARAMS);
2836 goto unlock;
2837 }
2838
2839 hdev->major_class = cp->major;
2840 hdev->minor_class = cp->minor;
2841
2842 if (!hdev_is_powered(hdev)) {
2843 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2844 hdev->dev_class, 3);
2845 goto unlock;
2846 }
2847
2848 cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2849 if (!cmd) {
2850 err = -ENOMEM;
2851 goto unlock;
2852 }
2853
2854 /* MGMT_OP_SET_DEV_CLASS don't require adapter the UP/Running so use
2855 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2856 */
2857 err = hci_cmd_sync_submit(hdev, set_class_sync, cmd,
2858 mgmt_class_complete);
2859 if (err < 0)
2860 mgmt_pending_free(cmd);
2861
2862 unlock:
2863 hci_dev_unlock(hdev);
2864 return err;
2865 }
2866
load_link_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2867 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2868 u16 len)
2869 {
2870 struct mgmt_cp_load_link_keys *cp = data;
2871 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2872 sizeof(struct mgmt_link_key_info));
2873 u16 key_count, expected_len;
2874 bool changed;
2875 int i;
2876
2877 bt_dev_dbg(hdev, "sock %p", sk);
2878
2879 if (!lmp_bredr_capable(hdev))
2880 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2881 MGMT_STATUS_NOT_SUPPORTED);
2882
2883 key_count = __le16_to_cpu(cp->key_count);
2884 if (key_count > max_key_count) {
2885 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2886 key_count);
2887 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2888 MGMT_STATUS_INVALID_PARAMS);
2889 }
2890
2891 expected_len = struct_size(cp, keys, key_count);
2892 if (expected_len != len) {
2893 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2894 expected_len, len);
2895 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2896 MGMT_STATUS_INVALID_PARAMS);
2897 }
2898
2899 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2900 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2901 MGMT_STATUS_INVALID_PARAMS);
2902
2903 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2904 key_count);
2905
2906 hci_dev_lock(hdev);
2907
2908 hci_link_keys_clear(hdev);
2909
2910 if (cp->debug_keys)
2911 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2912 else
2913 changed = hci_dev_test_and_clear_flag(hdev,
2914 HCI_KEEP_DEBUG_KEYS);
2915
2916 if (changed)
2917 new_settings(hdev, NULL);
2918
2919 for (i = 0; i < key_count; i++) {
2920 struct mgmt_link_key_info *key = &cp->keys[i];
2921
2922 if (hci_is_blocked_key(hdev,
2923 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2924 key->val)) {
2925 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2926 &key->addr.bdaddr);
2927 continue;
2928 }
2929
2930 if (key->addr.type != BDADDR_BREDR) {
2931 bt_dev_warn(hdev,
2932 "Invalid link address type %u for %pMR",
2933 key->addr.type, &key->addr.bdaddr);
2934 continue;
2935 }
2936
2937 if (key->type > 0x08) {
2938 bt_dev_warn(hdev, "Invalid link key type %u for %pMR",
2939 key->type, &key->addr.bdaddr);
2940 continue;
2941 }
2942
2943 /* Always ignore debug keys and require a new pairing if
2944 * the user wants to use them.
2945 */
2946 if (key->type == HCI_LK_DEBUG_COMBINATION)
2947 continue;
2948
2949 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2950 key->type, key->pin_len, NULL);
2951 }
2952
2953 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2954
2955 hci_dev_unlock(hdev);
2956
2957 return 0;
2958 }
2959
device_unpaired(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,struct sock * skip_sk)2960 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2961 u8 addr_type, struct sock *skip_sk)
2962 {
2963 struct mgmt_ev_device_unpaired ev;
2964
2965 bacpy(&ev.addr.bdaddr, bdaddr);
2966 ev.addr.type = addr_type;
2967
2968 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2969 skip_sk);
2970 }
2971
unpair_device_complete(struct hci_dev * hdev,void * data,int err)2972 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2973 {
2974 struct mgmt_pending_cmd *cmd = data;
2975 struct mgmt_cp_unpair_device *cp = cmd->param;
2976
2977 if (!err)
2978 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2979
2980 cmd->cmd_complete(cmd, err);
2981 mgmt_pending_free(cmd);
2982 }
2983
unpair_device_sync(struct hci_dev * hdev,void * data)2984 static int unpair_device_sync(struct hci_dev *hdev, void *data)
2985 {
2986 struct mgmt_pending_cmd *cmd = data;
2987 struct mgmt_cp_unpair_device *cp = cmd->param;
2988 struct hci_conn *conn;
2989
2990 if (cp->addr.type == BDADDR_BREDR)
2991 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2992 &cp->addr.bdaddr);
2993 else
2994 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2995 le_addr_type(cp->addr.type));
2996
2997 if (!conn)
2998 return 0;
2999
3000 /* Disregard any possible error since the likes of hci_abort_conn_sync
3001 * will clean up the connection no matter the error.
3002 */
3003 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3004
3005 return 0;
3006 }
3007
unpair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3008 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3009 u16 len)
3010 {
3011 struct mgmt_cp_unpair_device *cp = data;
3012 struct mgmt_rp_unpair_device rp;
3013 struct hci_conn_params *params;
3014 struct mgmt_pending_cmd *cmd;
3015 struct hci_conn *conn;
3016 u8 addr_type;
3017 int err;
3018
3019 memset(&rp, 0, sizeof(rp));
3020 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3021 rp.addr.type = cp->addr.type;
3022
3023 if (!bdaddr_type_is_valid(cp->addr.type))
3024 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3025 MGMT_STATUS_INVALID_PARAMS,
3026 &rp, sizeof(rp));
3027
3028 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
3029 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3030 MGMT_STATUS_INVALID_PARAMS,
3031 &rp, sizeof(rp));
3032
3033 hci_dev_lock(hdev);
3034
3035 if (!hdev_is_powered(hdev)) {
3036 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3037 MGMT_STATUS_NOT_POWERED, &rp,
3038 sizeof(rp));
3039 goto unlock;
3040 }
3041
3042 if (cp->addr.type == BDADDR_BREDR) {
3043 /* If disconnection is requested, then look up the
3044 * connection. If the remote device is connected, it
3045 * will be later used to terminate the link.
3046 *
3047 * Setting it to NULL explicitly will cause no
3048 * termination of the link.
3049 */
3050 if (cp->disconnect)
3051 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3052 &cp->addr.bdaddr);
3053 else
3054 conn = NULL;
3055
3056 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
3057 if (err < 0) {
3058 err = mgmt_cmd_complete(sk, hdev->id,
3059 MGMT_OP_UNPAIR_DEVICE,
3060 MGMT_STATUS_NOT_PAIRED, &rp,
3061 sizeof(rp));
3062 goto unlock;
3063 }
3064
3065 goto done;
3066 }
3067
3068 /* LE address type */
3069 addr_type = le_addr_type(cp->addr.type);
3070
3071 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
3072 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
3073 if (err < 0) {
3074 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3075 MGMT_STATUS_NOT_PAIRED, &rp,
3076 sizeof(rp));
3077 goto unlock;
3078 }
3079
3080 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3081 if (!conn) {
3082 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3083 goto done;
3084 }
3085
3086
3087 /* Defer clearing up the connection parameters until closing to
3088 * give a chance of keeping them if a repairing happens.
3089 */
3090 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3091
3092 /* Disable auto-connection parameters if present */
3093 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3094 if (params) {
3095 if (params->explicit_connect)
3096 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3097 else
3098 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3099 }
3100
3101 /* If disconnection is not requested, then clear the connection
3102 * variable so that the link is not terminated.
3103 */
3104 if (!cp->disconnect)
3105 conn = NULL;
3106
3107 done:
3108 /* If the connection variable is set, then termination of the
3109 * link is requested.
3110 */
3111 if (!conn) {
3112 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3113 &rp, sizeof(rp));
3114 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3115 goto unlock;
3116 }
3117
3118 cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3119 sizeof(*cp));
3120 if (!cmd) {
3121 err = -ENOMEM;
3122 goto unlock;
3123 }
3124
3125 cmd->cmd_complete = addr_cmd_complete;
3126
3127 err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3128 unpair_device_complete);
3129 if (err < 0)
3130 mgmt_pending_free(cmd);
3131
3132 unlock:
3133 hci_dev_unlock(hdev);
3134 return err;
3135 }
3136
disconnect_complete(struct hci_dev * hdev,void * data,int err)3137 static void disconnect_complete(struct hci_dev *hdev, void *data, int err)
3138 {
3139 struct mgmt_pending_cmd *cmd = data;
3140
3141 cmd->cmd_complete(cmd, mgmt_status(err));
3142 mgmt_pending_free(cmd);
3143 }
3144
disconnect_sync(struct hci_dev * hdev,void * data)3145 static int disconnect_sync(struct hci_dev *hdev, void *data)
3146 {
3147 struct mgmt_pending_cmd *cmd = data;
3148 struct mgmt_cp_disconnect *cp = cmd->param;
3149 struct hci_conn *conn;
3150
3151 if (cp->addr.type == BDADDR_BREDR)
3152 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3153 &cp->addr.bdaddr);
3154 else
3155 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3156 le_addr_type(cp->addr.type));
3157
3158 if (!conn)
3159 return -ENOTCONN;
3160
3161 /* Disregard any possible error since the likes of hci_abort_conn_sync
3162 * will clean up the connection no matter the error.
3163 */
3164 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3165
3166 return 0;
3167 }
3168
disconnect(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3169 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3170 u16 len)
3171 {
3172 struct mgmt_cp_disconnect *cp = data;
3173 struct mgmt_rp_disconnect rp;
3174 struct mgmt_pending_cmd *cmd;
3175 int err;
3176
3177 bt_dev_dbg(hdev, "sock %p", sk);
3178
3179 memset(&rp, 0, sizeof(rp));
3180 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3181 rp.addr.type = cp->addr.type;
3182
3183 if (!bdaddr_type_is_valid(cp->addr.type))
3184 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3185 MGMT_STATUS_INVALID_PARAMS,
3186 &rp, sizeof(rp));
3187
3188 hci_dev_lock(hdev);
3189
3190 if (!test_bit(HCI_UP, &hdev->flags)) {
3191 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3192 MGMT_STATUS_NOT_POWERED, &rp,
3193 sizeof(rp));
3194 goto failed;
3195 }
3196
3197 cmd = mgmt_pending_new(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3198 if (!cmd) {
3199 err = -ENOMEM;
3200 goto failed;
3201 }
3202
3203 cmd->cmd_complete = generic_cmd_complete;
3204
3205 err = hci_cmd_sync_queue(hdev, disconnect_sync, cmd,
3206 disconnect_complete);
3207 if (err < 0)
3208 mgmt_pending_free(cmd);
3209
3210 failed:
3211 hci_dev_unlock(hdev);
3212 return err;
3213 }
3214
link_to_bdaddr(u8 link_type,u8 addr_type)3215 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3216 {
3217 switch (link_type) {
3218 case ISO_LINK:
3219 case LE_LINK:
3220 switch (addr_type) {
3221 case ADDR_LE_DEV_PUBLIC:
3222 return BDADDR_LE_PUBLIC;
3223
3224 default:
3225 /* Fallback to LE Random address type */
3226 return BDADDR_LE_RANDOM;
3227 }
3228
3229 default:
3230 /* Fallback to BR/EDR type */
3231 return BDADDR_BREDR;
3232 }
3233 }
3234
get_connections(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)3235 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3236 u16 data_len)
3237 {
3238 struct mgmt_rp_get_connections *rp;
3239 struct hci_conn *c;
3240 int err;
3241 u16 i;
3242
3243 bt_dev_dbg(hdev, "sock %p", sk);
3244
3245 hci_dev_lock(hdev);
3246
3247 if (!hdev_is_powered(hdev)) {
3248 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3249 MGMT_STATUS_NOT_POWERED);
3250 goto unlock;
3251 }
3252
3253 i = 0;
3254 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3255 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3256 i++;
3257 }
3258
3259 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3260 if (!rp) {
3261 err = -ENOMEM;
3262 goto unlock;
3263 }
3264
3265 i = 0;
3266 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3267 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3268 continue;
3269 bacpy(&rp->addr[i].bdaddr, &c->dst);
3270 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3271 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3272 continue;
3273 i++;
3274 }
3275
3276 rp->conn_count = cpu_to_le16(i);
3277
3278 /* Recalculate length in case of filtered SCO connections, etc */
3279 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3280 struct_size(rp, addr, i));
3281
3282 kfree(rp);
3283
3284 unlock:
3285 hci_dev_unlock(hdev);
3286 return err;
3287 }
3288
send_pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_pin_code_neg_reply * cp)3289 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3290 struct mgmt_cp_pin_code_neg_reply *cp)
3291 {
3292 struct mgmt_pending_cmd *cmd;
3293 int err;
3294
3295 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3296 sizeof(*cp));
3297 if (!cmd)
3298 return -ENOMEM;
3299
3300 cmd->cmd_complete = addr_cmd_complete;
3301
3302 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3303 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3304 if (err < 0)
3305 mgmt_pending_remove(cmd);
3306
3307 return err;
3308 }
3309
pin_code_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3310 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3311 u16 len)
3312 {
3313 struct hci_conn *conn;
3314 struct mgmt_cp_pin_code_reply *cp = data;
3315 struct hci_cp_pin_code_reply reply;
3316 struct mgmt_pending_cmd *cmd;
3317 int err;
3318
3319 bt_dev_dbg(hdev, "sock %p", sk);
3320
3321 hci_dev_lock(hdev);
3322
3323 if (!hdev_is_powered(hdev)) {
3324 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3325 MGMT_STATUS_NOT_POWERED);
3326 goto failed;
3327 }
3328
3329 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3330 if (!conn) {
3331 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3332 MGMT_STATUS_NOT_CONNECTED);
3333 goto failed;
3334 }
3335
3336 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3337 struct mgmt_cp_pin_code_neg_reply ncp;
3338
3339 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3340
3341 bt_dev_err(hdev, "PIN code is not 16 bytes long");
3342
3343 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3344 if (err >= 0)
3345 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3346 MGMT_STATUS_INVALID_PARAMS);
3347
3348 goto failed;
3349 }
3350
3351 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3352 if (!cmd) {
3353 err = -ENOMEM;
3354 goto failed;
3355 }
3356
3357 cmd->cmd_complete = addr_cmd_complete;
3358
3359 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3360 reply.pin_len = cp->pin_len;
3361 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3362
3363 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3364 if (err < 0)
3365 mgmt_pending_remove(cmd);
3366
3367 failed:
3368 hci_dev_unlock(hdev);
3369 return err;
3370 }
3371
set_io_capability(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3372 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3373 u16 len)
3374 {
3375 struct mgmt_cp_set_io_capability *cp = data;
3376
3377 bt_dev_dbg(hdev, "sock %p", sk);
3378
3379 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3380 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3381 MGMT_STATUS_INVALID_PARAMS);
3382
3383 hci_dev_lock(hdev);
3384
3385 hdev->io_capability = cp->io_capability;
3386
3387 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3388
3389 hci_dev_unlock(hdev);
3390
3391 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3392 NULL, 0);
3393 }
3394
find_pairing(struct hci_conn * conn)3395 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3396 {
3397 struct hci_dev *hdev = conn->hdev;
3398 struct mgmt_pending_cmd *cmd;
3399
3400 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3401 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3402 continue;
3403
3404 if (cmd->user_data != conn)
3405 continue;
3406
3407 return cmd;
3408 }
3409
3410 return NULL;
3411 }
3412
pairing_complete(struct mgmt_pending_cmd * cmd,u8 status)3413 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3414 {
3415 struct mgmt_rp_pair_device rp;
3416 struct hci_conn *conn = cmd->user_data;
3417 int err;
3418
3419 bacpy(&rp.addr.bdaddr, &conn->dst);
3420 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3421
3422 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3423 status, &rp, sizeof(rp));
3424
3425 /* So we don't get further callbacks for this connection */
3426 conn->connect_cfm_cb = NULL;
3427 conn->security_cfm_cb = NULL;
3428 conn->disconn_cfm_cb = NULL;
3429
3430 hci_conn_drop(conn);
3431
3432 /* The device is paired so there is no need to remove
3433 * its connection parameters anymore.
3434 */
3435 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3436
3437 hci_conn_put(conn);
3438
3439 return err;
3440 }
3441
mgmt_smp_complete(struct hci_conn * conn,bool complete)3442 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3443 {
3444 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3445 struct mgmt_pending_cmd *cmd;
3446
3447 cmd = find_pairing(conn);
3448 if (cmd) {
3449 cmd->cmd_complete(cmd, status);
3450 mgmt_pending_remove(cmd);
3451 }
3452 }
3453
pairing_complete_cb(struct hci_conn * conn,u8 status)3454 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3455 {
3456 struct mgmt_pending_cmd *cmd;
3457
3458 BT_DBG("status %u", status);
3459
3460 cmd = find_pairing(conn);
3461 if (!cmd) {
3462 BT_DBG("Unable to find a pending command");
3463 return;
3464 }
3465
3466 cmd->cmd_complete(cmd, mgmt_status(status));
3467 mgmt_pending_remove(cmd);
3468 }
3469
le_pairing_complete_cb(struct hci_conn * conn,u8 status)3470 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3471 {
3472 struct mgmt_pending_cmd *cmd;
3473
3474 BT_DBG("status %u", status);
3475
3476 if (!status)
3477 return;
3478
3479 cmd = find_pairing(conn);
3480 if (!cmd) {
3481 BT_DBG("Unable to find a pending command");
3482 return;
3483 }
3484
3485 cmd->cmd_complete(cmd, mgmt_status(status));
3486 mgmt_pending_remove(cmd);
3487 }
3488
pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3489 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3490 u16 len)
3491 {
3492 struct mgmt_cp_pair_device *cp = data;
3493 struct mgmt_rp_pair_device rp;
3494 struct mgmt_pending_cmd *cmd;
3495 u8 sec_level, auth_type;
3496 struct hci_conn *conn;
3497 int err;
3498
3499 bt_dev_dbg(hdev, "sock %p", sk);
3500
3501 memset(&rp, 0, sizeof(rp));
3502 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3503 rp.addr.type = cp->addr.type;
3504
3505 if (!bdaddr_type_is_valid(cp->addr.type))
3506 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3507 MGMT_STATUS_INVALID_PARAMS,
3508 &rp, sizeof(rp));
3509
3510 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3511 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3512 MGMT_STATUS_INVALID_PARAMS,
3513 &rp, sizeof(rp));
3514
3515 hci_dev_lock(hdev);
3516
3517 if (!hdev_is_powered(hdev)) {
3518 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3519 MGMT_STATUS_NOT_POWERED, &rp,
3520 sizeof(rp));
3521 goto unlock;
3522 }
3523
3524 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3525 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3526 MGMT_STATUS_ALREADY_PAIRED, &rp,
3527 sizeof(rp));
3528 goto unlock;
3529 }
3530
3531 sec_level = BT_SECURITY_MEDIUM;
3532 auth_type = HCI_AT_DEDICATED_BONDING;
3533
3534 if (cp->addr.type == BDADDR_BREDR) {
3535 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3536 auth_type, CONN_REASON_PAIR_DEVICE,
3537 HCI_ACL_CONN_TIMEOUT);
3538 } else {
3539 u8 addr_type = le_addr_type(cp->addr.type);
3540 struct hci_conn_params *p;
3541
3542 /* When pairing a new device, it is expected to remember
3543 * this device for future connections. Adding the connection
3544 * parameter information ahead of time allows tracking
3545 * of the peripheral preferred values and will speed up any
3546 * further connection establishment.
3547 *
3548 * If connection parameters already exist, then they
3549 * will be kept and this function does nothing.
3550 */
3551 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3552 if (!p) {
3553 err = -EIO;
3554 goto unlock;
3555 }
3556
3557 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3558 p->auto_connect = HCI_AUTO_CONN_DISABLED;
3559
3560 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3561 sec_level, HCI_LE_CONN_TIMEOUT,
3562 CONN_REASON_PAIR_DEVICE);
3563 }
3564
3565 if (IS_ERR(conn)) {
3566 int status;
3567
3568 if (PTR_ERR(conn) == -EBUSY)
3569 status = MGMT_STATUS_BUSY;
3570 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3571 status = MGMT_STATUS_NOT_SUPPORTED;
3572 else if (PTR_ERR(conn) == -ECONNREFUSED)
3573 status = MGMT_STATUS_REJECTED;
3574 else
3575 status = MGMT_STATUS_CONNECT_FAILED;
3576
3577 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3578 status, &rp, sizeof(rp));
3579 goto unlock;
3580 }
3581
3582 if (conn->connect_cfm_cb) {
3583 hci_conn_drop(conn);
3584 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3585 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3586 goto unlock;
3587 }
3588
3589 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3590 if (!cmd) {
3591 err = -ENOMEM;
3592 hci_conn_drop(conn);
3593 goto unlock;
3594 }
3595
3596 cmd->cmd_complete = pairing_complete;
3597
3598 /* For LE, just connecting isn't a proof that the pairing finished */
3599 if (cp->addr.type == BDADDR_BREDR) {
3600 conn->connect_cfm_cb = pairing_complete_cb;
3601 conn->security_cfm_cb = pairing_complete_cb;
3602 conn->disconn_cfm_cb = pairing_complete_cb;
3603 } else {
3604 conn->connect_cfm_cb = le_pairing_complete_cb;
3605 conn->security_cfm_cb = le_pairing_complete_cb;
3606 conn->disconn_cfm_cb = le_pairing_complete_cb;
3607 }
3608
3609 conn->io_capability = cp->io_cap;
3610 cmd->user_data = hci_conn_get(conn);
3611
3612 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3613 hci_conn_security(conn, sec_level, auth_type, true)) {
3614 cmd->cmd_complete(cmd, 0);
3615 mgmt_pending_remove(cmd);
3616 }
3617
3618 err = 0;
3619
3620 unlock:
3621 hci_dev_unlock(hdev);
3622 return err;
3623 }
3624
cancel_pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3625 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3626 u16 len)
3627 {
3628 struct mgmt_addr_info *addr = data;
3629 struct mgmt_pending_cmd *cmd;
3630 struct hci_conn *conn;
3631 int err;
3632
3633 bt_dev_dbg(hdev, "sock %p", sk);
3634
3635 hci_dev_lock(hdev);
3636
3637 if (!hdev_is_powered(hdev)) {
3638 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3639 MGMT_STATUS_NOT_POWERED);
3640 goto unlock;
3641 }
3642
3643 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3644 if (!cmd) {
3645 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3646 MGMT_STATUS_INVALID_PARAMS);
3647 goto unlock;
3648 }
3649
3650 conn = cmd->user_data;
3651
3652 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3653 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3654 MGMT_STATUS_INVALID_PARAMS);
3655 goto unlock;
3656 }
3657
3658 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3659 mgmt_pending_remove(cmd);
3660
3661 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3662 addr, sizeof(*addr));
3663
3664 /* Since user doesn't want to proceed with the connection, abort any
3665 * ongoing pairing and then terminate the link if it was created
3666 * because of the pair device action.
3667 */
3668 if (addr->type == BDADDR_BREDR)
3669 hci_remove_link_key(hdev, &addr->bdaddr);
3670 else
3671 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3672 le_addr_type(addr->type));
3673
3674 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3675 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3676
3677 unlock:
3678 hci_dev_unlock(hdev);
3679 return err;
3680 }
3681
user_pairing_resp(struct sock * sk,struct hci_dev * hdev,struct mgmt_addr_info * addr,u16 mgmt_op,u16 hci_op,__le32 passkey)3682 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3683 struct mgmt_addr_info *addr, u16 mgmt_op,
3684 u16 hci_op, __le32 passkey)
3685 {
3686 struct mgmt_pending_cmd *cmd;
3687 struct hci_conn *conn;
3688 int err;
3689
3690 hci_dev_lock(hdev);
3691
3692 if (!hdev_is_powered(hdev)) {
3693 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3694 MGMT_STATUS_NOT_POWERED, addr,
3695 sizeof(*addr));
3696 goto done;
3697 }
3698
3699 if (addr->type == BDADDR_BREDR)
3700 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3701 else
3702 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3703 le_addr_type(addr->type));
3704
3705 if (!conn) {
3706 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3707 MGMT_STATUS_NOT_CONNECTED, addr,
3708 sizeof(*addr));
3709 goto done;
3710 }
3711
3712 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3713 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3714 if (!err)
3715 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3716 MGMT_STATUS_SUCCESS, addr,
3717 sizeof(*addr));
3718 else
3719 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3720 MGMT_STATUS_FAILED, addr,
3721 sizeof(*addr));
3722
3723 goto done;
3724 }
3725
3726 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3727 if (!cmd) {
3728 err = -ENOMEM;
3729 goto done;
3730 }
3731
3732 cmd->cmd_complete = addr_cmd_complete;
3733
3734 /* Continue with pairing via HCI */
3735 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3736 struct hci_cp_user_passkey_reply cp;
3737
3738 bacpy(&cp.bdaddr, &addr->bdaddr);
3739 cp.passkey = passkey;
3740 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3741 } else
3742 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3743 &addr->bdaddr);
3744
3745 if (err < 0)
3746 mgmt_pending_remove(cmd);
3747
3748 done:
3749 hci_dev_unlock(hdev);
3750 return err;
3751 }
3752
pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3753 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3754 void *data, u16 len)
3755 {
3756 struct mgmt_cp_pin_code_neg_reply *cp = data;
3757
3758 bt_dev_dbg(hdev, "sock %p", sk);
3759
3760 return user_pairing_resp(sk, hdev, &cp->addr,
3761 MGMT_OP_PIN_CODE_NEG_REPLY,
3762 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3763 }
3764
user_confirm_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3765 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3766 u16 len)
3767 {
3768 struct mgmt_cp_user_confirm_reply *cp = data;
3769
3770 bt_dev_dbg(hdev, "sock %p", sk);
3771
3772 if (len != sizeof(*cp))
3773 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3774 MGMT_STATUS_INVALID_PARAMS);
3775
3776 return user_pairing_resp(sk, hdev, &cp->addr,
3777 MGMT_OP_USER_CONFIRM_REPLY,
3778 HCI_OP_USER_CONFIRM_REPLY, 0);
3779 }
3780
user_confirm_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3781 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3782 void *data, u16 len)
3783 {
3784 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3785
3786 bt_dev_dbg(hdev, "sock %p", sk);
3787
3788 return user_pairing_resp(sk, hdev, &cp->addr,
3789 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3790 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3791 }
3792
user_passkey_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3793 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3794 u16 len)
3795 {
3796 struct mgmt_cp_user_passkey_reply *cp = data;
3797
3798 bt_dev_dbg(hdev, "sock %p", sk);
3799
3800 return user_pairing_resp(sk, hdev, &cp->addr,
3801 MGMT_OP_USER_PASSKEY_REPLY,
3802 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3803 }
3804
user_passkey_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3805 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3806 void *data, u16 len)
3807 {
3808 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3809
3810 bt_dev_dbg(hdev, "sock %p", sk);
3811
3812 return user_pairing_resp(sk, hdev, &cp->addr,
3813 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3814 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3815 }
3816
adv_expire_sync(struct hci_dev * hdev,u32 flags)3817 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3818 {
3819 struct adv_info *adv_instance;
3820
3821 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3822 if (!adv_instance)
3823 return 0;
3824
3825 /* stop if current instance doesn't need to be changed */
3826 if (!(adv_instance->flags & flags))
3827 return 0;
3828
3829 cancel_adv_timeout(hdev);
3830
3831 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3832 if (!adv_instance)
3833 return 0;
3834
3835 hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3836
3837 return 0;
3838 }
3839
name_changed_sync(struct hci_dev * hdev,void * data)3840 static int name_changed_sync(struct hci_dev *hdev, void *data)
3841 {
3842 return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3843 }
3844
set_name_complete(struct hci_dev * hdev,void * data,int err)3845 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3846 {
3847 struct mgmt_pending_cmd *cmd = data;
3848 struct mgmt_cp_set_local_name *cp = cmd->param;
3849 u8 status = mgmt_status(err);
3850
3851 bt_dev_dbg(hdev, "err %d", err);
3852
3853 if (err == -ECANCELED ||
3854 cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3855 return;
3856
3857 if (status) {
3858 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3859 status);
3860 } else {
3861 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3862 cp, sizeof(*cp));
3863
3864 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3865 hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3866 }
3867
3868 mgmt_pending_remove(cmd);
3869 }
3870
set_name_sync(struct hci_dev * hdev,void * data)3871 static int set_name_sync(struct hci_dev *hdev, void *data)
3872 {
3873 if (lmp_bredr_capable(hdev)) {
3874 hci_update_name_sync(hdev);
3875 hci_update_eir_sync(hdev);
3876 }
3877
3878 /* The name is stored in the scan response data and so
3879 * no need to update the advertising data here.
3880 */
3881 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3882 hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3883
3884 return 0;
3885 }
3886
set_local_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3887 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3888 u16 len)
3889 {
3890 struct mgmt_cp_set_local_name *cp = data;
3891 struct mgmt_pending_cmd *cmd;
3892 int err;
3893
3894 bt_dev_dbg(hdev, "sock %p", sk);
3895
3896 hci_dev_lock(hdev);
3897
3898 /* If the old values are the same as the new ones just return a
3899 * direct command complete event.
3900 */
3901 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3902 !memcmp(hdev->short_name, cp->short_name,
3903 sizeof(hdev->short_name))) {
3904 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3905 data, len);
3906 goto failed;
3907 }
3908
3909 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3910
3911 if (!hdev_is_powered(hdev)) {
3912 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3913
3914 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3915 data, len);
3916 if (err < 0)
3917 goto failed;
3918
3919 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3920 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3921 ext_info_changed(hdev, sk);
3922
3923 goto failed;
3924 }
3925
3926 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3927 if (!cmd)
3928 err = -ENOMEM;
3929 else
3930 err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3931 set_name_complete);
3932
3933 if (err < 0) {
3934 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3935 MGMT_STATUS_FAILED);
3936
3937 if (cmd)
3938 mgmt_pending_remove(cmd);
3939
3940 goto failed;
3941 }
3942
3943 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3944
3945 failed:
3946 hci_dev_unlock(hdev);
3947 return err;
3948 }
3949
appearance_changed_sync(struct hci_dev * hdev,void * data)3950 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3951 {
3952 return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3953 }
3954
set_appearance(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3955 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3956 u16 len)
3957 {
3958 struct mgmt_cp_set_appearance *cp = data;
3959 u16 appearance;
3960 int err;
3961
3962 bt_dev_dbg(hdev, "sock %p", sk);
3963
3964 if (!lmp_le_capable(hdev))
3965 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3966 MGMT_STATUS_NOT_SUPPORTED);
3967
3968 appearance = le16_to_cpu(cp->appearance);
3969
3970 hci_dev_lock(hdev);
3971
3972 if (hdev->appearance != appearance) {
3973 hdev->appearance = appearance;
3974
3975 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3976 hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3977 NULL);
3978
3979 ext_info_changed(hdev, sk);
3980 }
3981
3982 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3983 0);
3984
3985 hci_dev_unlock(hdev);
3986
3987 return err;
3988 }
3989
get_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3990 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3991 void *data, u16 len)
3992 {
3993 struct mgmt_rp_get_phy_configuration rp;
3994
3995 bt_dev_dbg(hdev, "sock %p", sk);
3996
3997 hci_dev_lock(hdev);
3998
3999 memset(&rp, 0, sizeof(rp));
4000
4001 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
4002 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
4003 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
4004
4005 hci_dev_unlock(hdev);
4006
4007 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
4008 &rp, sizeof(rp));
4009 }
4010
mgmt_phy_configuration_changed(struct hci_dev * hdev,struct sock * skip)4011 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
4012 {
4013 struct mgmt_ev_phy_configuration_changed ev;
4014
4015 memset(&ev, 0, sizeof(ev));
4016
4017 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
4018
4019 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
4020 sizeof(ev), skip);
4021 }
4022
set_default_phy_complete(struct hci_dev * hdev,void * data,int err)4023 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
4024 {
4025 struct mgmt_pending_cmd *cmd = data;
4026 struct sk_buff *skb = cmd->skb;
4027 u8 status = mgmt_status(err);
4028
4029 if (err == -ECANCELED ||
4030 cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
4031 return;
4032
4033 if (!status) {
4034 if (!skb)
4035 status = MGMT_STATUS_FAILED;
4036 else if (IS_ERR(skb))
4037 status = mgmt_status(PTR_ERR(skb));
4038 else
4039 status = mgmt_status(skb->data[0]);
4040 }
4041
4042 bt_dev_dbg(hdev, "status %d", status);
4043
4044 if (status) {
4045 mgmt_cmd_status(cmd->sk, hdev->id,
4046 MGMT_OP_SET_PHY_CONFIGURATION, status);
4047 } else {
4048 mgmt_cmd_complete(cmd->sk, hdev->id,
4049 MGMT_OP_SET_PHY_CONFIGURATION, 0,
4050 NULL, 0);
4051
4052 mgmt_phy_configuration_changed(hdev, cmd->sk);
4053 }
4054
4055 if (skb && !IS_ERR(skb))
4056 kfree_skb(skb);
4057
4058 mgmt_pending_remove(cmd);
4059 }
4060
set_default_phy_sync(struct hci_dev * hdev,void * data)4061 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
4062 {
4063 struct mgmt_pending_cmd *cmd = data;
4064 struct mgmt_cp_set_phy_configuration *cp = cmd->param;
4065 struct hci_cp_le_set_default_phy cp_phy;
4066 u32 selected_phys = __le32_to_cpu(cp->selected_phys);
4067
4068 memset(&cp_phy, 0, sizeof(cp_phy));
4069
4070 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
4071 cp_phy.all_phys |= 0x01;
4072
4073 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
4074 cp_phy.all_phys |= 0x02;
4075
4076 if (selected_phys & MGMT_PHY_LE_1M_TX)
4077 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
4078
4079 if (selected_phys & MGMT_PHY_LE_2M_TX)
4080 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
4081
4082 if (selected_phys & MGMT_PHY_LE_CODED_TX)
4083 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
4084
4085 if (selected_phys & MGMT_PHY_LE_1M_RX)
4086 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
4087
4088 if (selected_phys & MGMT_PHY_LE_2M_RX)
4089 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
4090
4091 if (selected_phys & MGMT_PHY_LE_CODED_RX)
4092 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
4093
4094 cmd->skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4095 sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
4096
4097 return 0;
4098 }
4099
set_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4100 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4101 void *data, u16 len)
4102 {
4103 struct mgmt_cp_set_phy_configuration *cp = data;
4104 struct mgmt_pending_cmd *cmd;
4105 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4106 u16 pkt_type = (HCI_DH1 | HCI_DM1);
4107 bool changed = false;
4108 int err;
4109
4110 bt_dev_dbg(hdev, "sock %p", sk);
4111
4112 configurable_phys = get_configurable_phys(hdev);
4113 supported_phys = get_supported_phys(hdev);
4114 selected_phys = __le32_to_cpu(cp->selected_phys);
4115
4116 if (selected_phys & ~supported_phys)
4117 return mgmt_cmd_status(sk, hdev->id,
4118 MGMT_OP_SET_PHY_CONFIGURATION,
4119 MGMT_STATUS_INVALID_PARAMS);
4120
4121 unconfigure_phys = supported_phys & ~configurable_phys;
4122
4123 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4124 return mgmt_cmd_status(sk, hdev->id,
4125 MGMT_OP_SET_PHY_CONFIGURATION,
4126 MGMT_STATUS_INVALID_PARAMS);
4127
4128 if (selected_phys == get_selected_phys(hdev))
4129 return mgmt_cmd_complete(sk, hdev->id,
4130 MGMT_OP_SET_PHY_CONFIGURATION,
4131 0, NULL, 0);
4132
4133 hci_dev_lock(hdev);
4134
4135 if (!hdev_is_powered(hdev)) {
4136 err = mgmt_cmd_status(sk, hdev->id,
4137 MGMT_OP_SET_PHY_CONFIGURATION,
4138 MGMT_STATUS_REJECTED);
4139 goto unlock;
4140 }
4141
4142 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4143 err = mgmt_cmd_status(sk, hdev->id,
4144 MGMT_OP_SET_PHY_CONFIGURATION,
4145 MGMT_STATUS_BUSY);
4146 goto unlock;
4147 }
4148
4149 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4150 pkt_type |= (HCI_DH3 | HCI_DM3);
4151 else
4152 pkt_type &= ~(HCI_DH3 | HCI_DM3);
4153
4154 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4155 pkt_type |= (HCI_DH5 | HCI_DM5);
4156 else
4157 pkt_type &= ~(HCI_DH5 | HCI_DM5);
4158
4159 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4160 pkt_type &= ~HCI_2DH1;
4161 else
4162 pkt_type |= HCI_2DH1;
4163
4164 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4165 pkt_type &= ~HCI_2DH3;
4166 else
4167 pkt_type |= HCI_2DH3;
4168
4169 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4170 pkt_type &= ~HCI_2DH5;
4171 else
4172 pkt_type |= HCI_2DH5;
4173
4174 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4175 pkt_type &= ~HCI_3DH1;
4176 else
4177 pkt_type |= HCI_3DH1;
4178
4179 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4180 pkt_type &= ~HCI_3DH3;
4181 else
4182 pkt_type |= HCI_3DH3;
4183
4184 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4185 pkt_type &= ~HCI_3DH5;
4186 else
4187 pkt_type |= HCI_3DH5;
4188
4189 if (pkt_type != hdev->pkt_type) {
4190 hdev->pkt_type = pkt_type;
4191 changed = true;
4192 }
4193
4194 if ((selected_phys & MGMT_PHY_LE_MASK) ==
4195 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4196 if (changed)
4197 mgmt_phy_configuration_changed(hdev, sk);
4198
4199 err = mgmt_cmd_complete(sk, hdev->id,
4200 MGMT_OP_SET_PHY_CONFIGURATION,
4201 0, NULL, 0);
4202
4203 goto unlock;
4204 }
4205
4206 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4207 len);
4208 if (!cmd)
4209 err = -ENOMEM;
4210 else
4211 err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4212 set_default_phy_complete);
4213
4214 if (err < 0) {
4215 err = mgmt_cmd_status(sk, hdev->id,
4216 MGMT_OP_SET_PHY_CONFIGURATION,
4217 MGMT_STATUS_FAILED);
4218
4219 if (cmd)
4220 mgmt_pending_remove(cmd);
4221 }
4222
4223 unlock:
4224 hci_dev_unlock(hdev);
4225
4226 return err;
4227 }
4228
set_blocked_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4229 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4230 u16 len)
4231 {
4232 int err = MGMT_STATUS_SUCCESS;
4233 struct mgmt_cp_set_blocked_keys *keys = data;
4234 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4235 sizeof(struct mgmt_blocked_key_info));
4236 u16 key_count, expected_len;
4237 int i;
4238
4239 bt_dev_dbg(hdev, "sock %p", sk);
4240
4241 key_count = __le16_to_cpu(keys->key_count);
4242 if (key_count > max_key_count) {
4243 bt_dev_err(hdev, "too big key_count value %u", key_count);
4244 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4245 MGMT_STATUS_INVALID_PARAMS);
4246 }
4247
4248 expected_len = struct_size(keys, keys, key_count);
4249 if (expected_len != len) {
4250 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4251 expected_len, len);
4252 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4253 MGMT_STATUS_INVALID_PARAMS);
4254 }
4255
4256 hci_dev_lock(hdev);
4257
4258 hci_blocked_keys_clear(hdev);
4259
4260 for (i = 0; i < key_count; ++i) {
4261 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4262
4263 if (!b) {
4264 err = MGMT_STATUS_NO_RESOURCES;
4265 break;
4266 }
4267
4268 b->type = keys->keys[i].type;
4269 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4270 list_add_rcu(&b->list, &hdev->blocked_keys);
4271 }
4272 hci_dev_unlock(hdev);
4273
4274 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4275 err, NULL, 0);
4276 }
4277
set_wideband_speech(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4278 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4279 void *data, u16 len)
4280 {
4281 struct mgmt_mode *cp = data;
4282 int err;
4283 bool changed = false;
4284
4285 bt_dev_dbg(hdev, "sock %p", sk);
4286
4287 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
4288 return mgmt_cmd_status(sk, hdev->id,
4289 MGMT_OP_SET_WIDEBAND_SPEECH,
4290 MGMT_STATUS_NOT_SUPPORTED);
4291
4292 if (cp->val != 0x00 && cp->val != 0x01)
4293 return mgmt_cmd_status(sk, hdev->id,
4294 MGMT_OP_SET_WIDEBAND_SPEECH,
4295 MGMT_STATUS_INVALID_PARAMS);
4296
4297 hci_dev_lock(hdev);
4298
4299 if (hdev_is_powered(hdev) &&
4300 !!cp->val != hci_dev_test_flag(hdev,
4301 HCI_WIDEBAND_SPEECH_ENABLED)) {
4302 err = mgmt_cmd_status(sk, hdev->id,
4303 MGMT_OP_SET_WIDEBAND_SPEECH,
4304 MGMT_STATUS_REJECTED);
4305 goto unlock;
4306 }
4307
4308 if (cp->val)
4309 changed = !hci_dev_test_and_set_flag(hdev,
4310 HCI_WIDEBAND_SPEECH_ENABLED);
4311 else
4312 changed = hci_dev_test_and_clear_flag(hdev,
4313 HCI_WIDEBAND_SPEECH_ENABLED);
4314
4315 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4316 if (err < 0)
4317 goto unlock;
4318
4319 if (changed)
4320 err = new_settings(hdev, sk);
4321
4322 unlock:
4323 hci_dev_unlock(hdev);
4324 return err;
4325 }
4326
read_controller_cap(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4327 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4328 void *data, u16 data_len)
4329 {
4330 char buf[20];
4331 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4332 u16 cap_len = 0;
4333 u8 flags = 0;
4334 u8 tx_power_range[2];
4335
4336 bt_dev_dbg(hdev, "sock %p", sk);
4337
4338 memset(&buf, 0, sizeof(buf));
4339
4340 hci_dev_lock(hdev);
4341
4342 /* When the Read Simple Pairing Options command is supported, then
4343 * the remote public key validation is supported.
4344 *
4345 * Alternatively, when Microsoft extensions are available, they can
4346 * indicate support for public key validation as well.
4347 */
4348 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4349 flags |= 0x01; /* Remote public key validation (BR/EDR) */
4350
4351 flags |= 0x02; /* Remote public key validation (LE) */
4352
4353 /* When the Read Encryption Key Size command is supported, then the
4354 * encryption key size is enforced.
4355 */
4356 if (hdev->commands[20] & 0x10)
4357 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
4358
4359 flags |= 0x08; /* Encryption key size enforcement (LE) */
4360
4361 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4362 &flags, 1);
4363
4364 /* When the Read Simple Pairing Options command is supported, then
4365 * also max encryption key size information is provided.
4366 */
4367 if (hdev->commands[41] & 0x08)
4368 cap_len = eir_append_le16(rp->cap, cap_len,
4369 MGMT_CAP_MAX_ENC_KEY_SIZE,
4370 hdev->max_enc_key_size);
4371
4372 cap_len = eir_append_le16(rp->cap, cap_len,
4373 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4374 SMP_MAX_ENC_KEY_SIZE);
4375
4376 /* Append the min/max LE tx power parameters if we were able to fetch
4377 * it from the controller
4378 */
4379 if (hdev->commands[38] & 0x80) {
4380 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4381 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4382 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4383 tx_power_range, 2);
4384 }
4385
4386 rp->cap_len = cpu_to_le16(cap_len);
4387
4388 hci_dev_unlock(hdev);
4389
4390 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4391 rp, sizeof(*rp) + cap_len);
4392 }
4393
4394 #ifdef CONFIG_BT_FEATURE_DEBUG
4395 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4396 static const u8 debug_uuid[16] = {
4397 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4398 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4399 };
4400 #endif
4401
4402 /* 330859bc-7506-492d-9370-9a6f0614037f */
4403 static const u8 quality_report_uuid[16] = {
4404 0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4405 0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4406 };
4407
4408 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4409 static const u8 offload_codecs_uuid[16] = {
4410 0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4411 0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4412 };
4413
4414 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4415 static const u8 le_simultaneous_roles_uuid[16] = {
4416 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4417 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4418 };
4419
4420 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4421 static const u8 iso_socket_uuid[16] = {
4422 0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4423 0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4424 };
4425
4426 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4427 static const u8 mgmt_mesh_uuid[16] = {
4428 0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4429 0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4430 };
4431
read_exp_features_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4432 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4433 void *data, u16 data_len)
4434 {
4435 struct mgmt_rp_read_exp_features_info *rp;
4436 size_t len;
4437 u16 idx = 0;
4438 u32 flags;
4439 int status;
4440
4441 bt_dev_dbg(hdev, "sock %p", sk);
4442
4443 /* Enough space for 7 features */
4444 len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4445 rp = kzalloc(len, GFP_KERNEL);
4446 if (!rp)
4447 return -ENOMEM;
4448
4449 #ifdef CONFIG_BT_FEATURE_DEBUG
4450 if (!hdev) {
4451 flags = bt_dbg_get() ? BIT(0) : 0;
4452
4453 memcpy(rp->features[idx].uuid, debug_uuid, 16);
4454 rp->features[idx].flags = cpu_to_le32(flags);
4455 idx++;
4456 }
4457 #endif
4458
4459 if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4460 if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4461 flags = BIT(0);
4462 else
4463 flags = 0;
4464
4465 memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4466 rp->features[idx].flags = cpu_to_le32(flags);
4467 idx++;
4468 }
4469
4470 if (hdev && (aosp_has_quality_report(hdev) ||
4471 hdev->set_quality_report)) {
4472 if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4473 flags = BIT(0);
4474 else
4475 flags = 0;
4476
4477 memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4478 rp->features[idx].flags = cpu_to_le32(flags);
4479 idx++;
4480 }
4481
4482 if (hdev && hdev->get_data_path_id) {
4483 if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4484 flags = BIT(0);
4485 else
4486 flags = 0;
4487
4488 memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4489 rp->features[idx].flags = cpu_to_le32(flags);
4490 idx++;
4491 }
4492
4493 if (IS_ENABLED(CONFIG_BT_LE)) {
4494 flags = iso_enabled() ? BIT(0) : 0;
4495 memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4496 rp->features[idx].flags = cpu_to_le32(flags);
4497 idx++;
4498 }
4499
4500 if (hdev && lmp_le_capable(hdev)) {
4501 if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4502 flags = BIT(0);
4503 else
4504 flags = 0;
4505
4506 memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4507 rp->features[idx].flags = cpu_to_le32(flags);
4508 idx++;
4509 }
4510
4511 rp->feature_count = cpu_to_le16(idx);
4512
4513 /* After reading the experimental features information, enable
4514 * the events to update client on any future change.
4515 */
4516 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4517
4518 status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4519 MGMT_OP_READ_EXP_FEATURES_INFO,
4520 0, rp, sizeof(*rp) + (20 * idx));
4521
4522 kfree(rp);
4523 return status;
4524 }
4525
exp_feature_changed(struct hci_dev * hdev,const u8 * uuid,bool enabled,struct sock * skip)4526 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4527 bool enabled, struct sock *skip)
4528 {
4529 struct mgmt_ev_exp_feature_changed ev;
4530
4531 memset(&ev, 0, sizeof(ev));
4532 memcpy(ev.uuid, uuid, 16);
4533 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4534
4535 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4536 &ev, sizeof(ev),
4537 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4538 }
4539
4540 #define EXP_FEAT(_uuid, _set_func) \
4541 { \
4542 .uuid = _uuid, \
4543 .set_func = _set_func, \
4544 }
4545
4546 /* The zero key uuid is special. Multiple exp features are set through it. */
set_zero_key_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4547 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4548 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4549 {
4550 struct mgmt_rp_set_exp_feature rp;
4551
4552 memset(rp.uuid, 0, 16);
4553 rp.flags = cpu_to_le32(0);
4554
4555 #ifdef CONFIG_BT_FEATURE_DEBUG
4556 if (!hdev) {
4557 bool changed = bt_dbg_get();
4558
4559 bt_dbg_set(false);
4560
4561 if (changed)
4562 exp_feature_changed(NULL, ZERO_KEY, false, sk);
4563 }
4564 #endif
4565
4566 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4567
4568 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4569 MGMT_OP_SET_EXP_FEATURE, 0,
4570 &rp, sizeof(rp));
4571 }
4572
4573 #ifdef CONFIG_BT_FEATURE_DEBUG
set_debug_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4574 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4575 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4576 {
4577 struct mgmt_rp_set_exp_feature rp;
4578
4579 bool val, changed;
4580 int err;
4581
4582 /* Command requires to use the non-controller index */
4583 if (hdev)
4584 return mgmt_cmd_status(sk, hdev->id,
4585 MGMT_OP_SET_EXP_FEATURE,
4586 MGMT_STATUS_INVALID_INDEX);
4587
4588 /* Parameters are limited to a single octet */
4589 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4590 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4591 MGMT_OP_SET_EXP_FEATURE,
4592 MGMT_STATUS_INVALID_PARAMS);
4593
4594 /* Only boolean on/off is supported */
4595 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4596 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4597 MGMT_OP_SET_EXP_FEATURE,
4598 MGMT_STATUS_INVALID_PARAMS);
4599
4600 val = !!cp->param[0];
4601 changed = val ? !bt_dbg_get() : bt_dbg_get();
4602 bt_dbg_set(val);
4603
4604 memcpy(rp.uuid, debug_uuid, 16);
4605 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4606
4607 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4608
4609 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4610 MGMT_OP_SET_EXP_FEATURE, 0,
4611 &rp, sizeof(rp));
4612
4613 if (changed)
4614 exp_feature_changed(hdev, debug_uuid, val, sk);
4615
4616 return err;
4617 }
4618 #endif
4619
set_mgmt_mesh_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4620 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4621 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4622 {
4623 struct mgmt_rp_set_exp_feature rp;
4624 bool val, changed;
4625 int err;
4626
4627 /* Command requires to use the controller index */
4628 if (!hdev)
4629 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4630 MGMT_OP_SET_EXP_FEATURE,
4631 MGMT_STATUS_INVALID_INDEX);
4632
4633 /* Parameters are limited to a single octet */
4634 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4635 return mgmt_cmd_status(sk, hdev->id,
4636 MGMT_OP_SET_EXP_FEATURE,
4637 MGMT_STATUS_INVALID_PARAMS);
4638
4639 /* Only boolean on/off is supported */
4640 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4641 return mgmt_cmd_status(sk, hdev->id,
4642 MGMT_OP_SET_EXP_FEATURE,
4643 MGMT_STATUS_INVALID_PARAMS);
4644
4645 val = !!cp->param[0];
4646
4647 if (val) {
4648 changed = !hci_dev_test_and_set_flag(hdev,
4649 HCI_MESH_EXPERIMENTAL);
4650 } else {
4651 hci_dev_clear_flag(hdev, HCI_MESH);
4652 changed = hci_dev_test_and_clear_flag(hdev,
4653 HCI_MESH_EXPERIMENTAL);
4654 }
4655
4656 memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4657 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4658
4659 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4660
4661 err = mgmt_cmd_complete(sk, hdev->id,
4662 MGMT_OP_SET_EXP_FEATURE, 0,
4663 &rp, sizeof(rp));
4664
4665 if (changed)
4666 exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4667
4668 return err;
4669 }
4670
set_quality_report_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4671 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4672 struct mgmt_cp_set_exp_feature *cp,
4673 u16 data_len)
4674 {
4675 struct mgmt_rp_set_exp_feature rp;
4676 bool val, changed;
4677 int err;
4678
4679 /* Command requires to use a valid controller index */
4680 if (!hdev)
4681 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4682 MGMT_OP_SET_EXP_FEATURE,
4683 MGMT_STATUS_INVALID_INDEX);
4684
4685 /* Parameters are limited to a single octet */
4686 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4687 return mgmt_cmd_status(sk, hdev->id,
4688 MGMT_OP_SET_EXP_FEATURE,
4689 MGMT_STATUS_INVALID_PARAMS);
4690
4691 /* Only boolean on/off is supported */
4692 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4693 return mgmt_cmd_status(sk, hdev->id,
4694 MGMT_OP_SET_EXP_FEATURE,
4695 MGMT_STATUS_INVALID_PARAMS);
4696
4697 hci_req_sync_lock(hdev);
4698
4699 val = !!cp->param[0];
4700 changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4701
4702 if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4703 err = mgmt_cmd_status(sk, hdev->id,
4704 MGMT_OP_SET_EXP_FEATURE,
4705 MGMT_STATUS_NOT_SUPPORTED);
4706 goto unlock_quality_report;
4707 }
4708
4709 if (changed) {
4710 if (hdev->set_quality_report)
4711 err = hdev->set_quality_report(hdev, val);
4712 else
4713 err = aosp_set_quality_report(hdev, val);
4714
4715 if (err) {
4716 err = mgmt_cmd_status(sk, hdev->id,
4717 MGMT_OP_SET_EXP_FEATURE,
4718 MGMT_STATUS_FAILED);
4719 goto unlock_quality_report;
4720 }
4721
4722 if (val)
4723 hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4724 else
4725 hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4726 }
4727
4728 bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4729
4730 memcpy(rp.uuid, quality_report_uuid, 16);
4731 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4732 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4733
4734 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4735 &rp, sizeof(rp));
4736
4737 if (changed)
4738 exp_feature_changed(hdev, quality_report_uuid, val, sk);
4739
4740 unlock_quality_report:
4741 hci_req_sync_unlock(hdev);
4742 return err;
4743 }
4744
set_offload_codec_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4745 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4746 struct mgmt_cp_set_exp_feature *cp,
4747 u16 data_len)
4748 {
4749 bool val, changed;
4750 int err;
4751 struct mgmt_rp_set_exp_feature rp;
4752
4753 /* Command requires to use a valid controller index */
4754 if (!hdev)
4755 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4756 MGMT_OP_SET_EXP_FEATURE,
4757 MGMT_STATUS_INVALID_INDEX);
4758
4759 /* Parameters are limited to a single octet */
4760 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4761 return mgmt_cmd_status(sk, hdev->id,
4762 MGMT_OP_SET_EXP_FEATURE,
4763 MGMT_STATUS_INVALID_PARAMS);
4764
4765 /* Only boolean on/off is supported */
4766 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4767 return mgmt_cmd_status(sk, hdev->id,
4768 MGMT_OP_SET_EXP_FEATURE,
4769 MGMT_STATUS_INVALID_PARAMS);
4770
4771 val = !!cp->param[0];
4772 changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4773
4774 if (!hdev->get_data_path_id) {
4775 return mgmt_cmd_status(sk, hdev->id,
4776 MGMT_OP_SET_EXP_FEATURE,
4777 MGMT_STATUS_NOT_SUPPORTED);
4778 }
4779
4780 if (changed) {
4781 if (val)
4782 hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4783 else
4784 hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4785 }
4786
4787 bt_dev_info(hdev, "offload codecs enable %d changed %d",
4788 val, changed);
4789
4790 memcpy(rp.uuid, offload_codecs_uuid, 16);
4791 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4792 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4793 err = mgmt_cmd_complete(sk, hdev->id,
4794 MGMT_OP_SET_EXP_FEATURE, 0,
4795 &rp, sizeof(rp));
4796
4797 if (changed)
4798 exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4799
4800 return err;
4801 }
4802
set_le_simultaneous_roles_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4803 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4804 struct mgmt_cp_set_exp_feature *cp,
4805 u16 data_len)
4806 {
4807 bool val, changed;
4808 int err;
4809 struct mgmt_rp_set_exp_feature rp;
4810
4811 /* Command requires to use a valid controller index */
4812 if (!hdev)
4813 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4814 MGMT_OP_SET_EXP_FEATURE,
4815 MGMT_STATUS_INVALID_INDEX);
4816
4817 /* Parameters are limited to a single octet */
4818 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4819 return mgmt_cmd_status(sk, hdev->id,
4820 MGMT_OP_SET_EXP_FEATURE,
4821 MGMT_STATUS_INVALID_PARAMS);
4822
4823 /* Only boolean on/off is supported */
4824 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4825 return mgmt_cmd_status(sk, hdev->id,
4826 MGMT_OP_SET_EXP_FEATURE,
4827 MGMT_STATUS_INVALID_PARAMS);
4828
4829 val = !!cp->param[0];
4830 changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4831
4832 if (!hci_dev_le_state_simultaneous(hdev)) {
4833 return mgmt_cmd_status(sk, hdev->id,
4834 MGMT_OP_SET_EXP_FEATURE,
4835 MGMT_STATUS_NOT_SUPPORTED);
4836 }
4837
4838 if (changed) {
4839 if (val)
4840 hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4841 else
4842 hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4843 }
4844
4845 bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4846 val, changed);
4847
4848 memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4849 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4850 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4851 err = mgmt_cmd_complete(sk, hdev->id,
4852 MGMT_OP_SET_EXP_FEATURE, 0,
4853 &rp, sizeof(rp));
4854
4855 if (changed)
4856 exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4857
4858 return err;
4859 }
4860
4861 #ifdef CONFIG_BT_LE
set_iso_socket_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4862 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4863 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4864 {
4865 struct mgmt_rp_set_exp_feature rp;
4866 bool val, changed = false;
4867 int err;
4868
4869 /* Command requires to use the non-controller index */
4870 if (hdev)
4871 return mgmt_cmd_status(sk, hdev->id,
4872 MGMT_OP_SET_EXP_FEATURE,
4873 MGMT_STATUS_INVALID_INDEX);
4874
4875 /* Parameters are limited to a single octet */
4876 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4877 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4878 MGMT_OP_SET_EXP_FEATURE,
4879 MGMT_STATUS_INVALID_PARAMS);
4880
4881 /* Only boolean on/off is supported */
4882 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4883 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4884 MGMT_OP_SET_EXP_FEATURE,
4885 MGMT_STATUS_INVALID_PARAMS);
4886
4887 val = cp->param[0] ? true : false;
4888 if (val)
4889 err = iso_init();
4890 else
4891 err = iso_exit();
4892
4893 if (!err)
4894 changed = true;
4895
4896 memcpy(rp.uuid, iso_socket_uuid, 16);
4897 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4898
4899 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4900
4901 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4902 MGMT_OP_SET_EXP_FEATURE, 0,
4903 &rp, sizeof(rp));
4904
4905 if (changed)
4906 exp_feature_changed(hdev, iso_socket_uuid, val, sk);
4907
4908 return err;
4909 }
4910 #endif
4911
4912 static const struct mgmt_exp_feature {
4913 const u8 *uuid;
4914 int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4915 struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4916 } exp_features[] = {
4917 EXP_FEAT(ZERO_KEY, set_zero_key_func),
4918 #ifdef CONFIG_BT_FEATURE_DEBUG
4919 EXP_FEAT(debug_uuid, set_debug_func),
4920 #endif
4921 EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
4922 EXP_FEAT(quality_report_uuid, set_quality_report_func),
4923 EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
4924 EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
4925 #ifdef CONFIG_BT_LE
4926 EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
4927 #endif
4928
4929 /* end with a null feature */
4930 EXP_FEAT(NULL, NULL)
4931 };
4932
set_exp_feature(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4933 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4934 void *data, u16 data_len)
4935 {
4936 struct mgmt_cp_set_exp_feature *cp = data;
4937 size_t i = 0;
4938
4939 bt_dev_dbg(hdev, "sock %p", sk);
4940
4941 for (i = 0; exp_features[i].uuid; i++) {
4942 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
4943 return exp_features[i].set_func(sk, hdev, cp, data_len);
4944 }
4945
4946 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4947 MGMT_OP_SET_EXP_FEATURE,
4948 MGMT_STATUS_NOT_SUPPORTED);
4949 }
4950
get_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4951 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4952 u16 data_len)
4953 {
4954 struct mgmt_cp_get_device_flags *cp = data;
4955 struct mgmt_rp_get_device_flags rp;
4956 struct bdaddr_list_with_flags *br_params;
4957 struct hci_conn_params *params;
4958 u32 supported_flags;
4959 u32 current_flags = 0;
4960 u8 status = MGMT_STATUS_INVALID_PARAMS;
4961
4962 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4963 &cp->addr.bdaddr, cp->addr.type);
4964
4965 hci_dev_lock(hdev);
4966
4967 supported_flags = hdev->conn_flags;
4968
4969 memset(&rp, 0, sizeof(rp));
4970
4971 if (cp->addr.type == BDADDR_BREDR) {
4972 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4973 &cp->addr.bdaddr,
4974 cp->addr.type);
4975 if (!br_params)
4976 goto done;
4977
4978 current_flags = br_params->flags;
4979 } else {
4980 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4981 le_addr_type(cp->addr.type));
4982 if (!params)
4983 goto done;
4984
4985 current_flags = params->flags;
4986 }
4987
4988 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4989 rp.addr.type = cp->addr.type;
4990 rp.supported_flags = cpu_to_le32(supported_flags);
4991 rp.current_flags = cpu_to_le32(current_flags);
4992
4993 status = MGMT_STATUS_SUCCESS;
4994
4995 done:
4996 hci_dev_unlock(hdev);
4997
4998 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
4999 &rp, sizeof(rp));
5000 }
5001
device_flags_changed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u32 supported_flags,u32 current_flags)5002 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5003 bdaddr_t *bdaddr, u8 bdaddr_type,
5004 u32 supported_flags, u32 current_flags)
5005 {
5006 struct mgmt_ev_device_flags_changed ev;
5007
5008 bacpy(&ev.addr.bdaddr, bdaddr);
5009 ev.addr.type = bdaddr_type;
5010 ev.supported_flags = cpu_to_le32(supported_flags);
5011 ev.current_flags = cpu_to_le32(current_flags);
5012
5013 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5014 }
5015
set_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5016 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5017 u16 len)
5018 {
5019 struct mgmt_cp_set_device_flags *cp = data;
5020 struct bdaddr_list_with_flags *br_params;
5021 struct hci_conn_params *params;
5022 u8 status = MGMT_STATUS_INVALID_PARAMS;
5023 u32 supported_flags;
5024 u32 current_flags = __le32_to_cpu(cp->current_flags);
5025
5026 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5027 &cp->addr.bdaddr, cp->addr.type, current_flags);
5028
5029 // We should take hci_dev_lock() early, I think.. conn_flags can change
5030 supported_flags = hdev->conn_flags;
5031
5032 if ((supported_flags | current_flags) != supported_flags) {
5033 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5034 current_flags, supported_flags);
5035 goto done;
5036 }
5037
5038 hci_dev_lock(hdev);
5039
5040 if (cp->addr.type == BDADDR_BREDR) {
5041 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5042 &cp->addr.bdaddr,
5043 cp->addr.type);
5044
5045 if (br_params) {
5046 br_params->flags = current_flags;
5047 status = MGMT_STATUS_SUCCESS;
5048 } else {
5049 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5050 &cp->addr.bdaddr, cp->addr.type);
5051 }
5052
5053 goto unlock;
5054 }
5055
5056 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5057 le_addr_type(cp->addr.type));
5058 if (!params) {
5059 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5060 &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5061 goto unlock;
5062 }
5063
5064 supported_flags = hdev->conn_flags;
5065
5066 if ((supported_flags | current_flags) != supported_flags) {
5067 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5068 current_flags, supported_flags);
5069 goto unlock;
5070 }
5071
5072 WRITE_ONCE(params->flags, current_flags);
5073 status = MGMT_STATUS_SUCCESS;
5074
5075 /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5076 * has been set.
5077 */
5078 if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5079 hci_update_passive_scan(hdev);
5080
5081 unlock:
5082 hci_dev_unlock(hdev);
5083
5084 done:
5085 if (status == MGMT_STATUS_SUCCESS)
5086 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5087 supported_flags, current_flags);
5088
5089 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5090 &cp->addr, sizeof(cp->addr));
5091 }
5092
mgmt_adv_monitor_added(struct sock * sk,struct hci_dev * hdev,u16 handle)5093 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5094 u16 handle)
5095 {
5096 struct mgmt_ev_adv_monitor_added ev;
5097
5098 ev.monitor_handle = cpu_to_le16(handle);
5099
5100 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5101 }
5102
mgmt_adv_monitor_removed(struct hci_dev * hdev,u16 handle)5103 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
5104 {
5105 struct mgmt_ev_adv_monitor_removed ev;
5106 struct mgmt_pending_cmd *cmd;
5107 struct sock *sk_skip = NULL;
5108 struct mgmt_cp_remove_adv_monitor *cp;
5109
5110 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
5111 if (cmd) {
5112 cp = cmd->param;
5113
5114 if (cp->monitor_handle)
5115 sk_skip = cmd->sk;
5116 }
5117
5118 ev.monitor_handle = cpu_to_le16(handle);
5119
5120 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
5121 }
5122
read_adv_mon_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5123 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5124 void *data, u16 len)
5125 {
5126 struct adv_monitor *monitor = NULL;
5127 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5128 int handle, err;
5129 size_t rp_size = 0;
5130 __u32 supported = 0;
5131 __u32 enabled = 0;
5132 __u16 num_handles = 0;
5133 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5134
5135 BT_DBG("request for %s", hdev->name);
5136
5137 hci_dev_lock(hdev);
5138
5139 if (msft_monitor_supported(hdev))
5140 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5141
5142 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5143 handles[num_handles++] = monitor->handle;
5144
5145 hci_dev_unlock(hdev);
5146
5147 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5148 rp = kmalloc(rp_size, GFP_KERNEL);
5149 if (!rp)
5150 return -ENOMEM;
5151
5152 /* All supported features are currently enabled */
5153 enabled = supported;
5154
5155 rp->supported_features = cpu_to_le32(supported);
5156 rp->enabled_features = cpu_to_le32(enabled);
5157 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5158 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5159 rp->num_handles = cpu_to_le16(num_handles);
5160 if (num_handles)
5161 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5162
5163 err = mgmt_cmd_complete(sk, hdev->id,
5164 MGMT_OP_READ_ADV_MONITOR_FEATURES,
5165 MGMT_STATUS_SUCCESS, rp, rp_size);
5166
5167 kfree(rp);
5168
5169 return err;
5170 }
5171
mgmt_add_adv_patterns_monitor_complete(struct hci_dev * hdev,void * data,int status)5172 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5173 void *data, int status)
5174 {
5175 struct mgmt_rp_add_adv_patterns_monitor rp;
5176 struct mgmt_pending_cmd *cmd = data;
5177 struct adv_monitor *monitor = cmd->user_data;
5178
5179 hci_dev_lock(hdev);
5180
5181 rp.monitor_handle = cpu_to_le16(monitor->handle);
5182
5183 if (!status) {
5184 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5185 hdev->adv_monitors_cnt++;
5186 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5187 monitor->state = ADV_MONITOR_STATE_REGISTERED;
5188 hci_update_passive_scan(hdev);
5189 }
5190
5191 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5192 mgmt_status(status), &rp, sizeof(rp));
5193 mgmt_pending_remove(cmd);
5194
5195 hci_dev_unlock(hdev);
5196 bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5197 rp.monitor_handle, status);
5198 }
5199
mgmt_add_adv_patterns_monitor_sync(struct hci_dev * hdev,void * data)5200 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5201 {
5202 struct mgmt_pending_cmd *cmd = data;
5203 struct adv_monitor *monitor = cmd->user_data;
5204
5205 return hci_add_adv_monitor(hdev, monitor);
5206 }
5207
__add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,struct adv_monitor * m,u8 status,void * data,u16 len,u16 op)5208 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5209 struct adv_monitor *m, u8 status,
5210 void *data, u16 len, u16 op)
5211 {
5212 struct mgmt_pending_cmd *cmd;
5213 int err;
5214
5215 hci_dev_lock(hdev);
5216
5217 if (status)
5218 goto unlock;
5219
5220 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5221 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5222 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
5223 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
5224 status = MGMT_STATUS_BUSY;
5225 goto unlock;
5226 }
5227
5228 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5229 if (!cmd) {
5230 status = MGMT_STATUS_NO_RESOURCES;
5231 goto unlock;
5232 }
5233
5234 cmd->user_data = m;
5235 err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5236 mgmt_add_adv_patterns_monitor_complete);
5237 if (err) {
5238 if (err == -ENOMEM)
5239 status = MGMT_STATUS_NO_RESOURCES;
5240 else
5241 status = MGMT_STATUS_FAILED;
5242
5243 goto unlock;
5244 }
5245
5246 hci_dev_unlock(hdev);
5247
5248 return 0;
5249
5250 unlock:
5251 hci_free_adv_monitor(hdev, m);
5252 hci_dev_unlock(hdev);
5253 return mgmt_cmd_status(sk, hdev->id, op, status);
5254 }
5255
parse_adv_monitor_rssi(struct adv_monitor * m,struct mgmt_adv_rssi_thresholds * rssi)5256 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5257 struct mgmt_adv_rssi_thresholds *rssi)
5258 {
5259 if (rssi) {
5260 m->rssi.low_threshold = rssi->low_threshold;
5261 m->rssi.low_threshold_timeout =
5262 __le16_to_cpu(rssi->low_threshold_timeout);
5263 m->rssi.high_threshold = rssi->high_threshold;
5264 m->rssi.high_threshold_timeout =
5265 __le16_to_cpu(rssi->high_threshold_timeout);
5266 m->rssi.sampling_period = rssi->sampling_period;
5267 } else {
5268 /* Default values. These numbers are the least constricting
5269 * parameters for MSFT API to work, so it behaves as if there
5270 * are no rssi parameter to consider. May need to be changed
5271 * if other API are to be supported.
5272 */
5273 m->rssi.low_threshold = -127;
5274 m->rssi.low_threshold_timeout = 60;
5275 m->rssi.high_threshold = -127;
5276 m->rssi.high_threshold_timeout = 0;
5277 m->rssi.sampling_period = 0;
5278 }
5279 }
5280
parse_adv_monitor_pattern(struct adv_monitor * m,u8 pattern_count,struct mgmt_adv_pattern * patterns)5281 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5282 struct mgmt_adv_pattern *patterns)
5283 {
5284 u8 offset = 0, length = 0;
5285 struct adv_pattern *p = NULL;
5286 int i;
5287
5288 for (i = 0; i < pattern_count; i++) {
5289 offset = patterns[i].offset;
5290 length = patterns[i].length;
5291 if (offset >= HCI_MAX_EXT_AD_LENGTH ||
5292 length > HCI_MAX_EXT_AD_LENGTH ||
5293 (offset + length) > HCI_MAX_EXT_AD_LENGTH)
5294 return MGMT_STATUS_INVALID_PARAMS;
5295
5296 p = kmalloc(sizeof(*p), GFP_KERNEL);
5297 if (!p)
5298 return MGMT_STATUS_NO_RESOURCES;
5299
5300 p->ad_type = patterns[i].ad_type;
5301 p->offset = patterns[i].offset;
5302 p->length = patterns[i].length;
5303 memcpy(p->value, patterns[i].value, p->length);
5304
5305 INIT_LIST_HEAD(&p->list);
5306 list_add(&p->list, &m->patterns);
5307 }
5308
5309 return MGMT_STATUS_SUCCESS;
5310 }
5311
add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5312 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5313 void *data, u16 len)
5314 {
5315 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5316 struct adv_monitor *m = NULL;
5317 u8 status = MGMT_STATUS_SUCCESS;
5318 size_t expected_size = sizeof(*cp);
5319
5320 BT_DBG("request for %s", hdev->name);
5321
5322 if (len <= sizeof(*cp)) {
5323 status = MGMT_STATUS_INVALID_PARAMS;
5324 goto done;
5325 }
5326
5327 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5328 if (len != expected_size) {
5329 status = MGMT_STATUS_INVALID_PARAMS;
5330 goto done;
5331 }
5332
5333 m = kzalloc(sizeof(*m), GFP_KERNEL);
5334 if (!m) {
5335 status = MGMT_STATUS_NO_RESOURCES;
5336 goto done;
5337 }
5338
5339 INIT_LIST_HEAD(&m->patterns);
5340
5341 parse_adv_monitor_rssi(m, NULL);
5342 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5343
5344 done:
5345 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5346 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5347 }
5348
add_adv_patterns_monitor_rssi(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5349 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5350 void *data, u16 len)
5351 {
5352 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5353 struct adv_monitor *m = NULL;
5354 u8 status = MGMT_STATUS_SUCCESS;
5355 size_t expected_size = sizeof(*cp);
5356
5357 BT_DBG("request for %s", hdev->name);
5358
5359 if (len <= sizeof(*cp)) {
5360 status = MGMT_STATUS_INVALID_PARAMS;
5361 goto done;
5362 }
5363
5364 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5365 if (len != expected_size) {
5366 status = MGMT_STATUS_INVALID_PARAMS;
5367 goto done;
5368 }
5369
5370 m = kzalloc(sizeof(*m), GFP_KERNEL);
5371 if (!m) {
5372 status = MGMT_STATUS_NO_RESOURCES;
5373 goto done;
5374 }
5375
5376 INIT_LIST_HEAD(&m->patterns);
5377
5378 parse_adv_monitor_rssi(m, &cp->rssi);
5379 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5380
5381 done:
5382 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5383 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5384 }
5385
mgmt_remove_adv_monitor_complete(struct hci_dev * hdev,void * data,int status)5386 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5387 void *data, int status)
5388 {
5389 struct mgmt_rp_remove_adv_monitor rp;
5390 struct mgmt_pending_cmd *cmd = data;
5391 struct mgmt_cp_remove_adv_monitor *cp;
5392
5393 if (status == -ECANCELED ||
5394 cmd != pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev))
5395 return;
5396
5397 hci_dev_lock(hdev);
5398
5399 cp = cmd->param;
5400
5401 rp.monitor_handle = cp->monitor_handle;
5402
5403 if (!status)
5404 hci_update_passive_scan(hdev);
5405
5406 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5407 mgmt_status(status), &rp, sizeof(rp));
5408 mgmt_pending_remove(cmd);
5409
5410 hci_dev_unlock(hdev);
5411 bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5412 rp.monitor_handle, status);
5413 }
5414
mgmt_remove_adv_monitor_sync(struct hci_dev * hdev,void * data)5415 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5416 {
5417 struct mgmt_pending_cmd *cmd = data;
5418
5419 if (cmd != pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev))
5420 return -ECANCELED;
5421
5422 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5423 u16 handle = __le16_to_cpu(cp->monitor_handle);
5424
5425 if (!handle)
5426 return hci_remove_all_adv_monitor(hdev);
5427
5428 return hci_remove_single_adv_monitor(hdev, handle);
5429 }
5430
remove_adv_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5431 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5432 void *data, u16 len)
5433 {
5434 struct mgmt_pending_cmd *cmd;
5435 int err, status;
5436
5437 hci_dev_lock(hdev);
5438
5439 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5440 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
5441 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5442 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5443 status = MGMT_STATUS_BUSY;
5444 goto unlock;
5445 }
5446
5447 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5448 if (!cmd) {
5449 status = MGMT_STATUS_NO_RESOURCES;
5450 goto unlock;
5451 }
5452
5453 err = hci_cmd_sync_submit(hdev, mgmt_remove_adv_monitor_sync, cmd,
5454 mgmt_remove_adv_monitor_complete);
5455
5456 if (err) {
5457 mgmt_pending_remove(cmd);
5458
5459 if (err == -ENOMEM)
5460 status = MGMT_STATUS_NO_RESOURCES;
5461 else
5462 status = MGMT_STATUS_FAILED;
5463
5464 goto unlock;
5465 }
5466
5467 hci_dev_unlock(hdev);
5468
5469 return 0;
5470
5471 unlock:
5472 hci_dev_unlock(hdev);
5473 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5474 status);
5475 }
5476
read_local_oob_data_complete(struct hci_dev * hdev,void * data,int err)5477 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5478 {
5479 struct mgmt_rp_read_local_oob_data mgmt_rp;
5480 size_t rp_size = sizeof(mgmt_rp);
5481 struct mgmt_pending_cmd *cmd = data;
5482 struct sk_buff *skb = cmd->skb;
5483 u8 status = mgmt_status(err);
5484
5485 if (!status) {
5486 if (!skb)
5487 status = MGMT_STATUS_FAILED;
5488 else if (IS_ERR(skb))
5489 status = mgmt_status(PTR_ERR(skb));
5490 else
5491 status = mgmt_status(skb->data[0]);
5492 }
5493
5494 bt_dev_dbg(hdev, "status %d", status);
5495
5496 if (status) {
5497 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5498 goto remove;
5499 }
5500
5501 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5502
5503 if (!bredr_sc_enabled(hdev)) {
5504 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5505
5506 if (skb->len < sizeof(*rp)) {
5507 mgmt_cmd_status(cmd->sk, hdev->id,
5508 MGMT_OP_READ_LOCAL_OOB_DATA,
5509 MGMT_STATUS_FAILED);
5510 goto remove;
5511 }
5512
5513 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5514 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5515
5516 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5517 } else {
5518 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5519
5520 if (skb->len < sizeof(*rp)) {
5521 mgmt_cmd_status(cmd->sk, hdev->id,
5522 MGMT_OP_READ_LOCAL_OOB_DATA,
5523 MGMT_STATUS_FAILED);
5524 goto remove;
5525 }
5526
5527 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5528 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5529
5530 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5531 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5532 }
5533
5534 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5535 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5536
5537 remove:
5538 if (skb && !IS_ERR(skb))
5539 kfree_skb(skb);
5540
5541 mgmt_pending_free(cmd);
5542 }
5543
read_local_oob_data_sync(struct hci_dev * hdev,void * data)5544 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5545 {
5546 struct mgmt_pending_cmd *cmd = data;
5547
5548 if (bredr_sc_enabled(hdev))
5549 cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5550 else
5551 cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5552
5553 if (IS_ERR(cmd->skb))
5554 return PTR_ERR(cmd->skb);
5555 else
5556 return 0;
5557 }
5558
read_local_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)5559 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5560 void *data, u16 data_len)
5561 {
5562 struct mgmt_pending_cmd *cmd;
5563 int err;
5564
5565 bt_dev_dbg(hdev, "sock %p", sk);
5566
5567 hci_dev_lock(hdev);
5568
5569 if (!hdev_is_powered(hdev)) {
5570 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5571 MGMT_STATUS_NOT_POWERED);
5572 goto unlock;
5573 }
5574
5575 if (!lmp_ssp_capable(hdev)) {
5576 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5577 MGMT_STATUS_NOT_SUPPORTED);
5578 goto unlock;
5579 }
5580
5581 cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5582 if (!cmd)
5583 err = -ENOMEM;
5584 else
5585 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5586 read_local_oob_data_complete);
5587
5588 if (err < 0) {
5589 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5590 MGMT_STATUS_FAILED);
5591
5592 if (cmd)
5593 mgmt_pending_free(cmd);
5594 }
5595
5596 unlock:
5597 hci_dev_unlock(hdev);
5598 return err;
5599 }
5600
add_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5601 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5602 void *data, u16 len)
5603 {
5604 struct mgmt_addr_info *addr = data;
5605 int err;
5606
5607 bt_dev_dbg(hdev, "sock %p", sk);
5608
5609 if (!bdaddr_type_is_valid(addr->type))
5610 return mgmt_cmd_complete(sk, hdev->id,
5611 MGMT_OP_ADD_REMOTE_OOB_DATA,
5612 MGMT_STATUS_INVALID_PARAMS,
5613 addr, sizeof(*addr));
5614
5615 hci_dev_lock(hdev);
5616
5617 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5618 struct mgmt_cp_add_remote_oob_data *cp = data;
5619 u8 status;
5620
5621 if (cp->addr.type != BDADDR_BREDR) {
5622 err = mgmt_cmd_complete(sk, hdev->id,
5623 MGMT_OP_ADD_REMOTE_OOB_DATA,
5624 MGMT_STATUS_INVALID_PARAMS,
5625 &cp->addr, sizeof(cp->addr));
5626 goto unlock;
5627 }
5628
5629 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5630 cp->addr.type, cp->hash,
5631 cp->rand, NULL, NULL);
5632 if (err < 0)
5633 status = MGMT_STATUS_FAILED;
5634 else
5635 status = MGMT_STATUS_SUCCESS;
5636
5637 err = mgmt_cmd_complete(sk, hdev->id,
5638 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5639 &cp->addr, sizeof(cp->addr));
5640 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5641 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5642 u8 *rand192, *hash192, *rand256, *hash256;
5643 u8 status;
5644
5645 if (bdaddr_type_is_le(cp->addr.type)) {
5646 /* Enforce zero-valued 192-bit parameters as
5647 * long as legacy SMP OOB isn't implemented.
5648 */
5649 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5650 memcmp(cp->hash192, ZERO_KEY, 16)) {
5651 err = mgmt_cmd_complete(sk, hdev->id,
5652 MGMT_OP_ADD_REMOTE_OOB_DATA,
5653 MGMT_STATUS_INVALID_PARAMS,
5654 addr, sizeof(*addr));
5655 goto unlock;
5656 }
5657
5658 rand192 = NULL;
5659 hash192 = NULL;
5660 } else {
5661 /* In case one of the P-192 values is set to zero,
5662 * then just disable OOB data for P-192.
5663 */
5664 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5665 !memcmp(cp->hash192, ZERO_KEY, 16)) {
5666 rand192 = NULL;
5667 hash192 = NULL;
5668 } else {
5669 rand192 = cp->rand192;
5670 hash192 = cp->hash192;
5671 }
5672 }
5673
5674 /* In case one of the P-256 values is set to zero, then just
5675 * disable OOB data for P-256.
5676 */
5677 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5678 !memcmp(cp->hash256, ZERO_KEY, 16)) {
5679 rand256 = NULL;
5680 hash256 = NULL;
5681 } else {
5682 rand256 = cp->rand256;
5683 hash256 = cp->hash256;
5684 }
5685
5686 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5687 cp->addr.type, hash192, rand192,
5688 hash256, rand256);
5689 if (err < 0)
5690 status = MGMT_STATUS_FAILED;
5691 else
5692 status = MGMT_STATUS_SUCCESS;
5693
5694 err = mgmt_cmd_complete(sk, hdev->id,
5695 MGMT_OP_ADD_REMOTE_OOB_DATA,
5696 status, &cp->addr, sizeof(cp->addr));
5697 } else {
5698 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5699 len);
5700 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5701 MGMT_STATUS_INVALID_PARAMS);
5702 }
5703
5704 unlock:
5705 hci_dev_unlock(hdev);
5706 return err;
5707 }
5708
remove_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5709 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5710 void *data, u16 len)
5711 {
5712 struct mgmt_cp_remove_remote_oob_data *cp = data;
5713 u8 status;
5714 int err;
5715
5716 bt_dev_dbg(hdev, "sock %p", sk);
5717
5718 if (cp->addr.type != BDADDR_BREDR)
5719 return mgmt_cmd_complete(sk, hdev->id,
5720 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5721 MGMT_STATUS_INVALID_PARAMS,
5722 &cp->addr, sizeof(cp->addr));
5723
5724 hci_dev_lock(hdev);
5725
5726 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5727 hci_remote_oob_data_clear(hdev);
5728 status = MGMT_STATUS_SUCCESS;
5729 goto done;
5730 }
5731
5732 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5733 if (err < 0)
5734 status = MGMT_STATUS_INVALID_PARAMS;
5735 else
5736 status = MGMT_STATUS_SUCCESS;
5737
5738 done:
5739 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5740 status, &cp->addr, sizeof(cp->addr));
5741
5742 hci_dev_unlock(hdev);
5743 return err;
5744 }
5745
mgmt_start_discovery_complete(struct hci_dev * hdev,u8 status)5746 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5747 {
5748 struct mgmt_pending_cmd *cmd;
5749
5750 bt_dev_dbg(hdev, "status %u", status);
5751
5752 hci_dev_lock(hdev);
5753
5754 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5755 if (!cmd)
5756 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5757
5758 if (!cmd)
5759 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5760
5761 if (cmd) {
5762 cmd->cmd_complete(cmd, mgmt_status(status));
5763 mgmt_pending_remove(cmd);
5764 }
5765
5766 hci_dev_unlock(hdev);
5767 }
5768
discovery_type_is_valid(struct hci_dev * hdev,uint8_t type,uint8_t * mgmt_status)5769 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5770 uint8_t *mgmt_status)
5771 {
5772 switch (type) {
5773 case DISCOV_TYPE_LE:
5774 *mgmt_status = mgmt_le_support(hdev);
5775 if (*mgmt_status)
5776 return false;
5777 break;
5778 case DISCOV_TYPE_INTERLEAVED:
5779 *mgmt_status = mgmt_le_support(hdev);
5780 if (*mgmt_status)
5781 return false;
5782 fallthrough;
5783 case DISCOV_TYPE_BREDR:
5784 *mgmt_status = mgmt_bredr_support(hdev);
5785 if (*mgmt_status)
5786 return false;
5787 break;
5788 default:
5789 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5790 return false;
5791 }
5792
5793 return true;
5794 }
5795
start_discovery_complete(struct hci_dev * hdev,void * data,int err)5796 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5797 {
5798 struct mgmt_pending_cmd *cmd = data;
5799
5800 bt_dev_dbg(hdev, "err %d", err);
5801
5802 if (err == -ECANCELED)
5803 return;
5804
5805 if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5806 cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5807 cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5808 return;
5809
5810 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5811 cmd->param, 1);
5812 mgmt_pending_remove(cmd);
5813
5814 hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5815 DISCOVERY_FINDING);
5816 }
5817
start_discovery_sync(struct hci_dev * hdev,void * data)5818 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5819 {
5820 return hci_start_discovery_sync(hdev);
5821 }
5822
start_discovery_internal(struct sock * sk,struct hci_dev * hdev,u16 op,void * data,u16 len)5823 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5824 u16 op, void *data, u16 len)
5825 {
5826 struct mgmt_cp_start_discovery *cp = data;
5827 struct mgmt_pending_cmd *cmd;
5828 u8 status;
5829 int err;
5830
5831 bt_dev_dbg(hdev, "sock %p", sk);
5832
5833 hci_dev_lock(hdev);
5834
5835 if (!hdev_is_powered(hdev)) {
5836 err = mgmt_cmd_complete(sk, hdev->id, op,
5837 MGMT_STATUS_NOT_POWERED,
5838 &cp->type, sizeof(cp->type));
5839 goto failed;
5840 }
5841
5842 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5843 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5844 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5845 &cp->type, sizeof(cp->type));
5846 goto failed;
5847 }
5848
5849 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5850 err = mgmt_cmd_complete(sk, hdev->id, op, status,
5851 &cp->type, sizeof(cp->type));
5852 goto failed;
5853 }
5854
5855 /* Can't start discovery when it is paused */
5856 if (hdev->discovery_paused) {
5857 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5858 &cp->type, sizeof(cp->type));
5859 goto failed;
5860 }
5861
5862 /* Clear the discovery filter first to free any previously
5863 * allocated memory for the UUID list.
5864 */
5865 hci_discovery_filter_clear(hdev);
5866
5867 hdev->discovery.type = cp->type;
5868 hdev->discovery.report_invalid_rssi = false;
5869 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5870 hdev->discovery.limited = true;
5871 else
5872 hdev->discovery.limited = false;
5873
5874 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5875 if (!cmd) {
5876 err = -ENOMEM;
5877 goto failed;
5878 }
5879
5880 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5881 start_discovery_complete);
5882 if (err < 0) {
5883 mgmt_pending_remove(cmd);
5884 goto failed;
5885 }
5886
5887 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5888
5889 failed:
5890 hci_dev_unlock(hdev);
5891 return err;
5892 }
5893
start_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5894 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5895 void *data, u16 len)
5896 {
5897 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5898 data, len);
5899 }
5900
start_limited_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5901 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5902 void *data, u16 len)
5903 {
5904 return start_discovery_internal(sk, hdev,
5905 MGMT_OP_START_LIMITED_DISCOVERY,
5906 data, len);
5907 }
5908
start_service_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5909 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5910 void *data, u16 len)
5911 {
5912 struct mgmt_cp_start_service_discovery *cp = data;
5913 struct mgmt_pending_cmd *cmd;
5914 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5915 u16 uuid_count, expected_len;
5916 u8 status;
5917 int err;
5918
5919 bt_dev_dbg(hdev, "sock %p", sk);
5920
5921 hci_dev_lock(hdev);
5922
5923 if (!hdev_is_powered(hdev)) {
5924 err = mgmt_cmd_complete(sk, hdev->id,
5925 MGMT_OP_START_SERVICE_DISCOVERY,
5926 MGMT_STATUS_NOT_POWERED,
5927 &cp->type, sizeof(cp->type));
5928 goto failed;
5929 }
5930
5931 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5932 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5933 err = mgmt_cmd_complete(sk, hdev->id,
5934 MGMT_OP_START_SERVICE_DISCOVERY,
5935 MGMT_STATUS_BUSY, &cp->type,
5936 sizeof(cp->type));
5937 goto failed;
5938 }
5939
5940 if (hdev->discovery_paused) {
5941 err = mgmt_cmd_complete(sk, hdev->id,
5942 MGMT_OP_START_SERVICE_DISCOVERY,
5943 MGMT_STATUS_BUSY, &cp->type,
5944 sizeof(cp->type));
5945 goto failed;
5946 }
5947
5948 uuid_count = __le16_to_cpu(cp->uuid_count);
5949 if (uuid_count > max_uuid_count) {
5950 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5951 uuid_count);
5952 err = mgmt_cmd_complete(sk, hdev->id,
5953 MGMT_OP_START_SERVICE_DISCOVERY,
5954 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5955 sizeof(cp->type));
5956 goto failed;
5957 }
5958
5959 expected_len = sizeof(*cp) + uuid_count * 16;
5960 if (expected_len != len) {
5961 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
5962 expected_len, len);
5963 err = mgmt_cmd_complete(sk, hdev->id,
5964 MGMT_OP_START_SERVICE_DISCOVERY,
5965 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5966 sizeof(cp->type));
5967 goto failed;
5968 }
5969
5970 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5971 err = mgmt_cmd_complete(sk, hdev->id,
5972 MGMT_OP_START_SERVICE_DISCOVERY,
5973 status, &cp->type, sizeof(cp->type));
5974 goto failed;
5975 }
5976
5977 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
5978 hdev, data, len);
5979 if (!cmd) {
5980 err = -ENOMEM;
5981 goto failed;
5982 }
5983
5984 /* Clear the discovery filter first to free any previously
5985 * allocated memory for the UUID list.
5986 */
5987 hci_discovery_filter_clear(hdev);
5988
5989 hdev->discovery.result_filtering = true;
5990 hdev->discovery.type = cp->type;
5991 hdev->discovery.rssi = cp->rssi;
5992 hdev->discovery.uuid_count = uuid_count;
5993
5994 if (uuid_count > 0) {
5995 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
5996 GFP_KERNEL);
5997 if (!hdev->discovery.uuids) {
5998 err = mgmt_cmd_complete(sk, hdev->id,
5999 MGMT_OP_START_SERVICE_DISCOVERY,
6000 MGMT_STATUS_FAILED,
6001 &cp->type, sizeof(cp->type));
6002 mgmt_pending_remove(cmd);
6003 goto failed;
6004 }
6005 }
6006
6007 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6008 start_discovery_complete);
6009 if (err < 0) {
6010 mgmt_pending_remove(cmd);
6011 goto failed;
6012 }
6013
6014 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6015
6016 failed:
6017 hci_dev_unlock(hdev);
6018 return err;
6019 }
6020
mgmt_stop_discovery_complete(struct hci_dev * hdev,u8 status)6021 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
6022 {
6023 struct mgmt_pending_cmd *cmd;
6024
6025 bt_dev_dbg(hdev, "status %u", status);
6026
6027 hci_dev_lock(hdev);
6028
6029 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6030 if (cmd) {
6031 cmd->cmd_complete(cmd, mgmt_status(status));
6032 mgmt_pending_remove(cmd);
6033 }
6034
6035 hci_dev_unlock(hdev);
6036 }
6037
stop_discovery_complete(struct hci_dev * hdev,void * data,int err)6038 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6039 {
6040 struct mgmt_pending_cmd *cmd = data;
6041
6042 if (err == -ECANCELED ||
6043 cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
6044 return;
6045
6046 bt_dev_dbg(hdev, "err %d", err);
6047
6048 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
6049 cmd->param, 1);
6050 mgmt_pending_remove(cmd);
6051
6052 if (!err)
6053 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6054 }
6055
stop_discovery_sync(struct hci_dev * hdev,void * data)6056 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6057 {
6058 return hci_stop_discovery_sync(hdev);
6059 }
6060
stop_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6061 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6062 u16 len)
6063 {
6064 struct mgmt_cp_stop_discovery *mgmt_cp = data;
6065 struct mgmt_pending_cmd *cmd;
6066 int err;
6067
6068 bt_dev_dbg(hdev, "sock %p", sk);
6069
6070 hci_dev_lock(hdev);
6071
6072 if (!hci_discovery_active(hdev)) {
6073 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6074 MGMT_STATUS_REJECTED, &mgmt_cp->type,
6075 sizeof(mgmt_cp->type));
6076 goto unlock;
6077 }
6078
6079 if (hdev->discovery.type != mgmt_cp->type) {
6080 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6081 MGMT_STATUS_INVALID_PARAMS,
6082 &mgmt_cp->type, sizeof(mgmt_cp->type));
6083 goto unlock;
6084 }
6085
6086 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6087 if (!cmd) {
6088 err = -ENOMEM;
6089 goto unlock;
6090 }
6091
6092 err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6093 stop_discovery_complete);
6094 if (err < 0) {
6095 mgmt_pending_remove(cmd);
6096 goto unlock;
6097 }
6098
6099 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6100
6101 unlock:
6102 hci_dev_unlock(hdev);
6103 return err;
6104 }
6105
confirm_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6106 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6107 u16 len)
6108 {
6109 struct mgmt_cp_confirm_name *cp = data;
6110 struct inquiry_entry *e;
6111 int err;
6112
6113 bt_dev_dbg(hdev, "sock %p", sk);
6114
6115 hci_dev_lock(hdev);
6116
6117 if (!hci_discovery_active(hdev)) {
6118 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6119 MGMT_STATUS_FAILED, &cp->addr,
6120 sizeof(cp->addr));
6121 goto failed;
6122 }
6123
6124 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6125 if (!e) {
6126 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6127 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6128 sizeof(cp->addr));
6129 goto failed;
6130 }
6131
6132 if (cp->name_known) {
6133 e->name_state = NAME_KNOWN;
6134 list_del(&e->list);
6135 } else {
6136 e->name_state = NAME_NEEDED;
6137 hci_inquiry_cache_update_resolve(hdev, e);
6138 }
6139
6140 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6141 &cp->addr, sizeof(cp->addr));
6142
6143 failed:
6144 hci_dev_unlock(hdev);
6145 return err;
6146 }
6147
block_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6148 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6149 u16 len)
6150 {
6151 struct mgmt_cp_block_device *cp = data;
6152 u8 status;
6153 int err;
6154
6155 bt_dev_dbg(hdev, "sock %p", sk);
6156
6157 if (!bdaddr_type_is_valid(cp->addr.type))
6158 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6159 MGMT_STATUS_INVALID_PARAMS,
6160 &cp->addr, sizeof(cp->addr));
6161
6162 hci_dev_lock(hdev);
6163
6164 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6165 cp->addr.type);
6166 if (err < 0) {
6167 status = MGMT_STATUS_FAILED;
6168 goto done;
6169 }
6170
6171 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6172 sk);
6173 status = MGMT_STATUS_SUCCESS;
6174
6175 done:
6176 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6177 &cp->addr, sizeof(cp->addr));
6178
6179 hci_dev_unlock(hdev);
6180
6181 return err;
6182 }
6183
unblock_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6184 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6185 u16 len)
6186 {
6187 struct mgmt_cp_unblock_device *cp = data;
6188 u8 status;
6189 int err;
6190
6191 bt_dev_dbg(hdev, "sock %p", sk);
6192
6193 if (!bdaddr_type_is_valid(cp->addr.type))
6194 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6195 MGMT_STATUS_INVALID_PARAMS,
6196 &cp->addr, sizeof(cp->addr));
6197
6198 hci_dev_lock(hdev);
6199
6200 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6201 cp->addr.type);
6202 if (err < 0) {
6203 status = MGMT_STATUS_INVALID_PARAMS;
6204 goto done;
6205 }
6206
6207 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6208 sk);
6209 status = MGMT_STATUS_SUCCESS;
6210
6211 done:
6212 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6213 &cp->addr, sizeof(cp->addr));
6214
6215 hci_dev_unlock(hdev);
6216
6217 return err;
6218 }
6219
set_device_id_sync(struct hci_dev * hdev,void * data)6220 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6221 {
6222 return hci_update_eir_sync(hdev);
6223 }
6224
set_device_id(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6225 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6226 u16 len)
6227 {
6228 struct mgmt_cp_set_device_id *cp = data;
6229 int err;
6230 __u16 source;
6231
6232 bt_dev_dbg(hdev, "sock %p", sk);
6233
6234 source = __le16_to_cpu(cp->source);
6235
6236 if (source > 0x0002)
6237 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6238 MGMT_STATUS_INVALID_PARAMS);
6239
6240 hci_dev_lock(hdev);
6241
6242 hdev->devid_source = source;
6243 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6244 hdev->devid_product = __le16_to_cpu(cp->product);
6245 hdev->devid_version = __le16_to_cpu(cp->version);
6246
6247 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6248 NULL, 0);
6249
6250 hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6251
6252 hci_dev_unlock(hdev);
6253
6254 return err;
6255 }
6256
enable_advertising_instance(struct hci_dev * hdev,int err)6257 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6258 {
6259 if (err)
6260 bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6261 else
6262 bt_dev_dbg(hdev, "status %d", err);
6263 }
6264
set_advertising_complete(struct hci_dev * hdev,void * data,int err)6265 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6266 {
6267 struct cmd_lookup match = { NULL, hdev };
6268 u8 instance;
6269 struct adv_info *adv_instance;
6270 u8 status = mgmt_status(err);
6271
6272 if (status) {
6273 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
6274 cmd_status_rsp, &status);
6275 return;
6276 }
6277
6278 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6279 hci_dev_set_flag(hdev, HCI_ADVERTISING);
6280 else
6281 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6282
6283 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
6284 &match);
6285
6286 new_settings(hdev, match.sk);
6287
6288 if (match.sk)
6289 sock_put(match.sk);
6290
6291 /* If "Set Advertising" was just disabled and instance advertising was
6292 * set up earlier, then re-enable multi-instance advertising.
6293 */
6294 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6295 list_empty(&hdev->adv_instances))
6296 return;
6297
6298 instance = hdev->cur_adv_instance;
6299 if (!instance) {
6300 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6301 struct adv_info, list);
6302 if (!adv_instance)
6303 return;
6304
6305 instance = adv_instance->instance;
6306 }
6307
6308 err = hci_schedule_adv_instance_sync(hdev, instance, true);
6309
6310 enable_advertising_instance(hdev, err);
6311 }
6312
set_adv_sync(struct hci_dev * hdev,void * data)6313 static int set_adv_sync(struct hci_dev *hdev, void *data)
6314 {
6315 struct mgmt_pending_cmd *cmd = data;
6316 struct mgmt_mode *cp = cmd->param;
6317 u8 val = !!cp->val;
6318
6319 if (cp->val == 0x02)
6320 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6321 else
6322 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6323
6324 cancel_adv_timeout(hdev);
6325
6326 if (val) {
6327 /* Switch to instance "0" for the Set Advertising setting.
6328 * We cannot use update_[adv|scan_rsp]_data() here as the
6329 * HCI_ADVERTISING flag is not yet set.
6330 */
6331 hdev->cur_adv_instance = 0x00;
6332
6333 if (ext_adv_capable(hdev)) {
6334 hci_start_ext_adv_sync(hdev, 0x00);
6335 } else {
6336 hci_update_adv_data_sync(hdev, 0x00);
6337 hci_update_scan_rsp_data_sync(hdev, 0x00);
6338 hci_enable_advertising_sync(hdev);
6339 }
6340 } else {
6341 hci_disable_advertising_sync(hdev);
6342 }
6343
6344 return 0;
6345 }
6346
set_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6347 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6348 u16 len)
6349 {
6350 struct mgmt_mode *cp = data;
6351 struct mgmt_pending_cmd *cmd;
6352 u8 val, status;
6353 int err;
6354
6355 bt_dev_dbg(hdev, "sock %p", sk);
6356
6357 status = mgmt_le_support(hdev);
6358 if (status)
6359 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6360 status);
6361
6362 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6363 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6364 MGMT_STATUS_INVALID_PARAMS);
6365
6366 if (hdev->advertising_paused)
6367 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6368 MGMT_STATUS_BUSY);
6369
6370 hci_dev_lock(hdev);
6371
6372 val = !!cp->val;
6373
6374 /* The following conditions are ones which mean that we should
6375 * not do any HCI communication but directly send a mgmt
6376 * response to user space (after toggling the flag if
6377 * necessary).
6378 */
6379 if (!hdev_is_powered(hdev) ||
6380 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6381 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6382 hci_dev_test_flag(hdev, HCI_MESH) ||
6383 hci_conn_num(hdev, LE_LINK) > 0 ||
6384 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6385 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6386 bool changed;
6387
6388 if (cp->val) {
6389 hdev->cur_adv_instance = 0x00;
6390 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6391 if (cp->val == 0x02)
6392 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6393 else
6394 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6395 } else {
6396 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6397 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6398 }
6399
6400 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6401 if (err < 0)
6402 goto unlock;
6403
6404 if (changed)
6405 err = new_settings(hdev, sk);
6406
6407 goto unlock;
6408 }
6409
6410 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6411 pending_find(MGMT_OP_SET_LE, hdev)) {
6412 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6413 MGMT_STATUS_BUSY);
6414 goto unlock;
6415 }
6416
6417 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6418 if (!cmd)
6419 err = -ENOMEM;
6420 else
6421 err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6422 set_advertising_complete);
6423
6424 if (err < 0 && cmd)
6425 mgmt_pending_remove(cmd);
6426
6427 unlock:
6428 hci_dev_unlock(hdev);
6429 return err;
6430 }
6431
set_static_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6432 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6433 void *data, u16 len)
6434 {
6435 struct mgmt_cp_set_static_address *cp = data;
6436 int err;
6437
6438 bt_dev_dbg(hdev, "sock %p", sk);
6439
6440 if (!lmp_le_capable(hdev))
6441 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6442 MGMT_STATUS_NOT_SUPPORTED);
6443
6444 if (hdev_is_powered(hdev))
6445 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6446 MGMT_STATUS_REJECTED);
6447
6448 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6449 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6450 return mgmt_cmd_status(sk, hdev->id,
6451 MGMT_OP_SET_STATIC_ADDRESS,
6452 MGMT_STATUS_INVALID_PARAMS);
6453
6454 /* Two most significant bits shall be set */
6455 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6456 return mgmt_cmd_status(sk, hdev->id,
6457 MGMT_OP_SET_STATIC_ADDRESS,
6458 MGMT_STATUS_INVALID_PARAMS);
6459 }
6460
6461 hci_dev_lock(hdev);
6462
6463 bacpy(&hdev->static_addr, &cp->bdaddr);
6464
6465 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6466 if (err < 0)
6467 goto unlock;
6468
6469 err = new_settings(hdev, sk);
6470
6471 unlock:
6472 hci_dev_unlock(hdev);
6473 return err;
6474 }
6475
set_scan_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6476 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6477 void *data, u16 len)
6478 {
6479 struct mgmt_cp_set_scan_params *cp = data;
6480 __u16 interval, window;
6481 int err;
6482
6483 bt_dev_dbg(hdev, "sock %p", sk);
6484
6485 if (!lmp_le_capable(hdev))
6486 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6487 MGMT_STATUS_NOT_SUPPORTED);
6488
6489 interval = __le16_to_cpu(cp->interval);
6490
6491 if (interval < 0x0004 || interval > 0x4000)
6492 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6493 MGMT_STATUS_INVALID_PARAMS);
6494
6495 window = __le16_to_cpu(cp->window);
6496
6497 if (window < 0x0004 || window > 0x4000)
6498 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6499 MGMT_STATUS_INVALID_PARAMS);
6500
6501 if (window > interval)
6502 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6503 MGMT_STATUS_INVALID_PARAMS);
6504
6505 hci_dev_lock(hdev);
6506
6507 hdev->le_scan_interval = interval;
6508 hdev->le_scan_window = window;
6509
6510 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6511 NULL, 0);
6512
6513 /* If background scan is running, restart it so new parameters are
6514 * loaded.
6515 */
6516 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6517 hdev->discovery.state == DISCOVERY_STOPPED)
6518 hci_update_passive_scan(hdev);
6519
6520 hci_dev_unlock(hdev);
6521
6522 return err;
6523 }
6524
fast_connectable_complete(struct hci_dev * hdev,void * data,int err)6525 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6526 {
6527 struct mgmt_pending_cmd *cmd = data;
6528
6529 bt_dev_dbg(hdev, "err %d", err);
6530
6531 if (err) {
6532 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6533 mgmt_status(err));
6534 } else {
6535 struct mgmt_mode *cp = cmd->param;
6536
6537 if (cp->val)
6538 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6539 else
6540 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6541
6542 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6543 new_settings(hdev, cmd->sk);
6544 }
6545
6546 mgmt_pending_free(cmd);
6547 }
6548
write_fast_connectable_sync(struct hci_dev * hdev,void * data)6549 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6550 {
6551 struct mgmt_pending_cmd *cmd = data;
6552 struct mgmt_mode *cp = cmd->param;
6553
6554 return hci_write_fast_connectable_sync(hdev, cp->val);
6555 }
6556
set_fast_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6557 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6558 void *data, u16 len)
6559 {
6560 struct mgmt_mode *cp = data;
6561 struct mgmt_pending_cmd *cmd;
6562 int err;
6563
6564 bt_dev_dbg(hdev, "sock %p", sk);
6565
6566 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6567 hdev->hci_ver < BLUETOOTH_VER_1_2)
6568 return mgmt_cmd_status(sk, hdev->id,
6569 MGMT_OP_SET_FAST_CONNECTABLE,
6570 MGMT_STATUS_NOT_SUPPORTED);
6571
6572 if (cp->val != 0x00 && cp->val != 0x01)
6573 return mgmt_cmd_status(sk, hdev->id,
6574 MGMT_OP_SET_FAST_CONNECTABLE,
6575 MGMT_STATUS_INVALID_PARAMS);
6576
6577 hci_dev_lock(hdev);
6578
6579 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6580 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6581 goto unlock;
6582 }
6583
6584 if (!hdev_is_powered(hdev)) {
6585 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6586 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6587 new_settings(hdev, sk);
6588 goto unlock;
6589 }
6590
6591 cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6592 len);
6593 if (!cmd)
6594 err = -ENOMEM;
6595 else
6596 err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6597 fast_connectable_complete);
6598
6599 if (err < 0) {
6600 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6601 MGMT_STATUS_FAILED);
6602
6603 if (cmd)
6604 mgmt_pending_free(cmd);
6605 }
6606
6607 unlock:
6608 hci_dev_unlock(hdev);
6609
6610 return err;
6611 }
6612
set_bredr_complete(struct hci_dev * hdev,void * data,int err)6613 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6614 {
6615 struct mgmt_pending_cmd *cmd = data;
6616
6617 bt_dev_dbg(hdev, "err %d", err);
6618
6619 if (err) {
6620 u8 mgmt_err = mgmt_status(err);
6621
6622 /* We need to restore the flag if related HCI commands
6623 * failed.
6624 */
6625 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6626
6627 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6628 } else {
6629 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6630 new_settings(hdev, cmd->sk);
6631 }
6632
6633 mgmt_pending_free(cmd);
6634 }
6635
set_bredr_sync(struct hci_dev * hdev,void * data)6636 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6637 {
6638 int status;
6639
6640 status = hci_write_fast_connectable_sync(hdev, false);
6641
6642 if (!status)
6643 status = hci_update_scan_sync(hdev);
6644
6645 /* Since only the advertising data flags will change, there
6646 * is no need to update the scan response data.
6647 */
6648 if (!status)
6649 status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6650
6651 return status;
6652 }
6653
set_bredr(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6654 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6655 {
6656 struct mgmt_mode *cp = data;
6657 struct mgmt_pending_cmd *cmd;
6658 int err;
6659
6660 bt_dev_dbg(hdev, "sock %p", sk);
6661
6662 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6663 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6664 MGMT_STATUS_NOT_SUPPORTED);
6665
6666 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6667 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6668 MGMT_STATUS_REJECTED);
6669
6670 if (cp->val != 0x00 && cp->val != 0x01)
6671 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6672 MGMT_STATUS_INVALID_PARAMS);
6673
6674 hci_dev_lock(hdev);
6675
6676 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6677 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6678 goto unlock;
6679 }
6680
6681 if (!hdev_is_powered(hdev)) {
6682 if (!cp->val) {
6683 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6684 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6685 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6686 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6687 }
6688
6689 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6690
6691 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6692 if (err < 0)
6693 goto unlock;
6694
6695 err = new_settings(hdev, sk);
6696 goto unlock;
6697 }
6698
6699 /* Reject disabling when powered on */
6700 if (!cp->val) {
6701 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6702 MGMT_STATUS_REJECTED);
6703 goto unlock;
6704 } else {
6705 /* When configuring a dual-mode controller to operate
6706 * with LE only and using a static address, then switching
6707 * BR/EDR back on is not allowed.
6708 *
6709 * Dual-mode controllers shall operate with the public
6710 * address as its identity address for BR/EDR and LE. So
6711 * reject the attempt to create an invalid configuration.
6712 *
6713 * The same restrictions applies when secure connections
6714 * has been enabled. For BR/EDR this is a controller feature
6715 * while for LE it is a host stack feature. This means that
6716 * switching BR/EDR back on when secure connections has been
6717 * enabled is not a supported transaction.
6718 */
6719 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6720 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6721 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6722 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6723 MGMT_STATUS_REJECTED);
6724 goto unlock;
6725 }
6726 }
6727
6728 cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6729 if (!cmd)
6730 err = -ENOMEM;
6731 else
6732 err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6733 set_bredr_complete);
6734
6735 if (err < 0) {
6736 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6737 MGMT_STATUS_FAILED);
6738 if (cmd)
6739 mgmt_pending_free(cmd);
6740
6741 goto unlock;
6742 }
6743
6744 /* We need to flip the bit already here so that
6745 * hci_req_update_adv_data generates the correct flags.
6746 */
6747 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6748
6749 unlock:
6750 hci_dev_unlock(hdev);
6751 return err;
6752 }
6753
set_secure_conn_complete(struct hci_dev * hdev,void * data,int err)6754 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6755 {
6756 struct mgmt_pending_cmd *cmd = data;
6757 struct mgmt_mode *cp;
6758
6759 bt_dev_dbg(hdev, "err %d", err);
6760
6761 if (err) {
6762 u8 mgmt_err = mgmt_status(err);
6763
6764 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6765 goto done;
6766 }
6767
6768 cp = cmd->param;
6769
6770 switch (cp->val) {
6771 case 0x00:
6772 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6773 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6774 break;
6775 case 0x01:
6776 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6777 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6778 break;
6779 case 0x02:
6780 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6781 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6782 break;
6783 }
6784
6785 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6786 new_settings(hdev, cmd->sk);
6787
6788 done:
6789 mgmt_pending_free(cmd);
6790 }
6791
set_secure_conn_sync(struct hci_dev * hdev,void * data)6792 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6793 {
6794 struct mgmt_pending_cmd *cmd = data;
6795 struct mgmt_mode *cp = cmd->param;
6796 u8 val = !!cp->val;
6797
6798 /* Force write of val */
6799 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6800
6801 return hci_write_sc_support_sync(hdev, val);
6802 }
6803
set_secure_conn(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6804 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6805 void *data, u16 len)
6806 {
6807 struct mgmt_mode *cp = data;
6808 struct mgmt_pending_cmd *cmd;
6809 u8 val;
6810 int err;
6811
6812 bt_dev_dbg(hdev, "sock %p", sk);
6813
6814 if (!lmp_sc_capable(hdev) &&
6815 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6816 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6817 MGMT_STATUS_NOT_SUPPORTED);
6818
6819 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6820 lmp_sc_capable(hdev) &&
6821 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6822 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6823 MGMT_STATUS_REJECTED);
6824
6825 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6826 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6827 MGMT_STATUS_INVALID_PARAMS);
6828
6829 hci_dev_lock(hdev);
6830
6831 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6832 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6833 bool changed;
6834
6835 if (cp->val) {
6836 changed = !hci_dev_test_and_set_flag(hdev,
6837 HCI_SC_ENABLED);
6838 if (cp->val == 0x02)
6839 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6840 else
6841 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6842 } else {
6843 changed = hci_dev_test_and_clear_flag(hdev,
6844 HCI_SC_ENABLED);
6845 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6846 }
6847
6848 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6849 if (err < 0)
6850 goto failed;
6851
6852 if (changed)
6853 err = new_settings(hdev, sk);
6854
6855 goto failed;
6856 }
6857
6858 val = !!cp->val;
6859
6860 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6861 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6862 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6863 goto failed;
6864 }
6865
6866 cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6867 if (!cmd)
6868 err = -ENOMEM;
6869 else
6870 err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6871 set_secure_conn_complete);
6872
6873 if (err < 0) {
6874 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6875 MGMT_STATUS_FAILED);
6876 if (cmd)
6877 mgmt_pending_free(cmd);
6878 }
6879
6880 failed:
6881 hci_dev_unlock(hdev);
6882 return err;
6883 }
6884
set_debug_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6885 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6886 void *data, u16 len)
6887 {
6888 struct mgmt_mode *cp = data;
6889 bool changed, use_changed;
6890 int err;
6891
6892 bt_dev_dbg(hdev, "sock %p", sk);
6893
6894 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6895 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6896 MGMT_STATUS_INVALID_PARAMS);
6897
6898 hci_dev_lock(hdev);
6899
6900 if (cp->val)
6901 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6902 else
6903 changed = hci_dev_test_and_clear_flag(hdev,
6904 HCI_KEEP_DEBUG_KEYS);
6905
6906 if (cp->val == 0x02)
6907 use_changed = !hci_dev_test_and_set_flag(hdev,
6908 HCI_USE_DEBUG_KEYS);
6909 else
6910 use_changed = hci_dev_test_and_clear_flag(hdev,
6911 HCI_USE_DEBUG_KEYS);
6912
6913 if (hdev_is_powered(hdev) && use_changed &&
6914 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6915 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6916 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6917 sizeof(mode), &mode);
6918 }
6919
6920 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6921 if (err < 0)
6922 goto unlock;
6923
6924 if (changed)
6925 err = new_settings(hdev, sk);
6926
6927 unlock:
6928 hci_dev_unlock(hdev);
6929 return err;
6930 }
6931
set_privacy(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)6932 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6933 u16 len)
6934 {
6935 struct mgmt_cp_set_privacy *cp = cp_data;
6936 bool changed;
6937 int err;
6938
6939 bt_dev_dbg(hdev, "sock %p", sk);
6940
6941 if (!lmp_le_capable(hdev))
6942 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6943 MGMT_STATUS_NOT_SUPPORTED);
6944
6945 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6946 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6947 MGMT_STATUS_INVALID_PARAMS);
6948
6949 if (hdev_is_powered(hdev))
6950 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6951 MGMT_STATUS_REJECTED);
6952
6953 hci_dev_lock(hdev);
6954
6955 /* If user space supports this command it is also expected to
6956 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
6957 */
6958 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6959
6960 if (cp->privacy) {
6961 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
6962 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
6963 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
6964 hci_adv_instances_set_rpa_expired(hdev, true);
6965 if (cp->privacy == 0x02)
6966 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
6967 else
6968 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6969 } else {
6970 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
6971 memset(hdev->irk, 0, sizeof(hdev->irk));
6972 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
6973 hci_adv_instances_set_rpa_expired(hdev, false);
6974 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6975 }
6976
6977 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
6978 if (err < 0)
6979 goto unlock;
6980
6981 if (changed)
6982 err = new_settings(hdev, sk);
6983
6984 unlock:
6985 hci_dev_unlock(hdev);
6986 return err;
6987 }
6988
irk_is_valid(struct mgmt_irk_info * irk)6989 static bool irk_is_valid(struct mgmt_irk_info *irk)
6990 {
6991 switch (irk->addr.type) {
6992 case BDADDR_LE_PUBLIC:
6993 return true;
6994
6995 case BDADDR_LE_RANDOM:
6996 /* Two most significant bits shall be set */
6997 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6998 return false;
6999 return true;
7000 }
7001
7002 return false;
7003 }
7004
load_irks(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)7005 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7006 u16 len)
7007 {
7008 struct mgmt_cp_load_irks *cp = cp_data;
7009 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
7010 sizeof(struct mgmt_irk_info));
7011 u16 irk_count, expected_len;
7012 int i, err;
7013
7014 bt_dev_dbg(hdev, "sock %p", sk);
7015
7016 if (!lmp_le_capable(hdev))
7017 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7018 MGMT_STATUS_NOT_SUPPORTED);
7019
7020 irk_count = __le16_to_cpu(cp->irk_count);
7021 if (irk_count > max_irk_count) {
7022 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
7023 irk_count);
7024 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7025 MGMT_STATUS_INVALID_PARAMS);
7026 }
7027
7028 expected_len = struct_size(cp, irks, irk_count);
7029 if (expected_len != len) {
7030 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7031 expected_len, len);
7032 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7033 MGMT_STATUS_INVALID_PARAMS);
7034 }
7035
7036 bt_dev_dbg(hdev, "irk_count %u", irk_count);
7037
7038 for (i = 0; i < irk_count; i++) {
7039 struct mgmt_irk_info *key = &cp->irks[i];
7040
7041 if (!irk_is_valid(key))
7042 return mgmt_cmd_status(sk, hdev->id,
7043 MGMT_OP_LOAD_IRKS,
7044 MGMT_STATUS_INVALID_PARAMS);
7045 }
7046
7047 hci_dev_lock(hdev);
7048
7049 hci_smp_irks_clear(hdev);
7050
7051 for (i = 0; i < irk_count; i++) {
7052 struct mgmt_irk_info *irk = &cp->irks[i];
7053
7054 if (hci_is_blocked_key(hdev,
7055 HCI_BLOCKED_KEY_TYPE_IRK,
7056 irk->val)) {
7057 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7058 &irk->addr.bdaddr);
7059 continue;
7060 }
7061
7062 hci_add_irk(hdev, &irk->addr.bdaddr,
7063 le_addr_type(irk->addr.type), irk->val,
7064 BDADDR_ANY);
7065 }
7066
7067 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7068
7069 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7070
7071 hci_dev_unlock(hdev);
7072
7073 return err;
7074 }
7075
ltk_is_valid(struct mgmt_ltk_info * key)7076 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7077 {
7078 if (key->initiator != 0x00 && key->initiator != 0x01)
7079 return false;
7080
7081 switch (key->addr.type) {
7082 case BDADDR_LE_PUBLIC:
7083 return true;
7084
7085 case BDADDR_LE_RANDOM:
7086 /* Two most significant bits shall be set */
7087 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7088 return false;
7089 return true;
7090 }
7091
7092 return false;
7093 }
7094
load_long_term_keys(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)7095 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7096 void *cp_data, u16 len)
7097 {
7098 struct mgmt_cp_load_long_term_keys *cp = cp_data;
7099 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7100 sizeof(struct mgmt_ltk_info));
7101 u16 key_count, expected_len;
7102 int i, err;
7103
7104 bt_dev_dbg(hdev, "sock %p", sk);
7105
7106 if (!lmp_le_capable(hdev))
7107 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7108 MGMT_STATUS_NOT_SUPPORTED);
7109
7110 key_count = __le16_to_cpu(cp->key_count);
7111 if (key_count > max_key_count) {
7112 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7113 key_count);
7114 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7115 MGMT_STATUS_INVALID_PARAMS);
7116 }
7117
7118 expected_len = struct_size(cp, keys, key_count);
7119 if (expected_len != len) {
7120 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7121 expected_len, len);
7122 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7123 MGMT_STATUS_INVALID_PARAMS);
7124 }
7125
7126 bt_dev_dbg(hdev, "key_count %u", key_count);
7127
7128 hci_dev_lock(hdev);
7129
7130 hci_smp_ltks_clear(hdev);
7131
7132 for (i = 0; i < key_count; i++) {
7133 struct mgmt_ltk_info *key = &cp->keys[i];
7134 u8 type, authenticated;
7135
7136 if (hci_is_blocked_key(hdev,
7137 HCI_BLOCKED_KEY_TYPE_LTK,
7138 key->val)) {
7139 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7140 &key->addr.bdaddr);
7141 continue;
7142 }
7143
7144 if (!ltk_is_valid(key)) {
7145 bt_dev_warn(hdev, "Invalid LTK for %pMR",
7146 &key->addr.bdaddr);
7147 continue;
7148 }
7149
7150 switch (key->type) {
7151 case MGMT_LTK_UNAUTHENTICATED:
7152 authenticated = 0x00;
7153 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7154 break;
7155 case MGMT_LTK_AUTHENTICATED:
7156 authenticated = 0x01;
7157 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7158 break;
7159 case MGMT_LTK_P256_UNAUTH:
7160 authenticated = 0x00;
7161 type = SMP_LTK_P256;
7162 break;
7163 case MGMT_LTK_P256_AUTH:
7164 authenticated = 0x01;
7165 type = SMP_LTK_P256;
7166 break;
7167 case MGMT_LTK_P256_DEBUG:
7168 authenticated = 0x00;
7169 type = SMP_LTK_P256_DEBUG;
7170 fallthrough;
7171 default:
7172 continue;
7173 }
7174
7175 hci_add_ltk(hdev, &key->addr.bdaddr,
7176 le_addr_type(key->addr.type), type, authenticated,
7177 key->val, key->enc_size, key->ediv, key->rand);
7178 }
7179
7180 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7181 NULL, 0);
7182
7183 hci_dev_unlock(hdev);
7184
7185 return err;
7186 }
7187
get_conn_info_complete(struct hci_dev * hdev,void * data,int err)7188 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
7189 {
7190 struct mgmt_pending_cmd *cmd = data;
7191 struct hci_conn *conn = cmd->user_data;
7192 struct mgmt_cp_get_conn_info *cp = cmd->param;
7193 struct mgmt_rp_get_conn_info rp;
7194 u8 status;
7195
7196 bt_dev_dbg(hdev, "err %d", err);
7197
7198 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
7199
7200 status = mgmt_status(err);
7201 if (status == MGMT_STATUS_SUCCESS) {
7202 rp.rssi = conn->rssi;
7203 rp.tx_power = conn->tx_power;
7204 rp.max_tx_power = conn->max_tx_power;
7205 } else {
7206 rp.rssi = HCI_RSSI_INVALID;
7207 rp.tx_power = HCI_TX_POWER_INVALID;
7208 rp.max_tx_power = HCI_TX_POWER_INVALID;
7209 }
7210
7211 mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
7212 &rp, sizeof(rp));
7213
7214 mgmt_pending_free(cmd);
7215 }
7216
get_conn_info_sync(struct hci_dev * hdev,void * data)7217 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
7218 {
7219 struct mgmt_pending_cmd *cmd = data;
7220 struct mgmt_cp_get_conn_info *cp = cmd->param;
7221 struct hci_conn *conn;
7222 int err;
7223 __le16 handle;
7224
7225 /* Make sure we are still connected */
7226 if (cp->addr.type == BDADDR_BREDR)
7227 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7228 &cp->addr.bdaddr);
7229 else
7230 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7231
7232 if (!conn || conn->state != BT_CONNECTED)
7233 return MGMT_STATUS_NOT_CONNECTED;
7234
7235 cmd->user_data = conn;
7236 handle = cpu_to_le16(conn->handle);
7237
7238 /* Refresh RSSI each time */
7239 err = hci_read_rssi_sync(hdev, handle);
7240
7241 /* For LE links TX power does not change thus we don't need to
7242 * query for it once value is known.
7243 */
7244 if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
7245 conn->tx_power == HCI_TX_POWER_INVALID))
7246 err = hci_read_tx_power_sync(hdev, handle, 0x00);
7247
7248 /* Max TX power needs to be read only once per connection */
7249 if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
7250 err = hci_read_tx_power_sync(hdev, handle, 0x01);
7251
7252 return err;
7253 }
7254
get_conn_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7255 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7256 u16 len)
7257 {
7258 struct mgmt_cp_get_conn_info *cp = data;
7259 struct mgmt_rp_get_conn_info rp;
7260 struct hci_conn *conn;
7261 unsigned long conn_info_age;
7262 int err = 0;
7263
7264 bt_dev_dbg(hdev, "sock %p", sk);
7265
7266 memset(&rp, 0, sizeof(rp));
7267 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7268 rp.addr.type = cp->addr.type;
7269
7270 if (!bdaddr_type_is_valid(cp->addr.type))
7271 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7272 MGMT_STATUS_INVALID_PARAMS,
7273 &rp, sizeof(rp));
7274
7275 hci_dev_lock(hdev);
7276
7277 if (!hdev_is_powered(hdev)) {
7278 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7279 MGMT_STATUS_NOT_POWERED, &rp,
7280 sizeof(rp));
7281 goto unlock;
7282 }
7283
7284 if (cp->addr.type == BDADDR_BREDR)
7285 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7286 &cp->addr.bdaddr);
7287 else
7288 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7289
7290 if (!conn || conn->state != BT_CONNECTED) {
7291 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7292 MGMT_STATUS_NOT_CONNECTED, &rp,
7293 sizeof(rp));
7294 goto unlock;
7295 }
7296
7297 /* To avoid client trying to guess when to poll again for information we
7298 * calculate conn info age as random value between min/max set in hdev.
7299 */
7300 conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age,
7301 hdev->conn_info_max_age - 1);
7302
7303 /* Query controller to refresh cached values if they are too old or were
7304 * never read.
7305 */
7306 if (time_after(jiffies, conn->conn_info_timestamp +
7307 msecs_to_jiffies(conn_info_age)) ||
7308 !conn->conn_info_timestamp) {
7309 struct mgmt_pending_cmd *cmd;
7310
7311 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
7312 len);
7313 if (!cmd) {
7314 err = -ENOMEM;
7315 } else {
7316 err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
7317 cmd, get_conn_info_complete);
7318 }
7319
7320 if (err < 0) {
7321 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7322 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7323
7324 if (cmd)
7325 mgmt_pending_free(cmd);
7326
7327 goto unlock;
7328 }
7329
7330 conn->conn_info_timestamp = jiffies;
7331 } else {
7332 /* Cache is valid, just reply with values cached in hci_conn */
7333 rp.rssi = conn->rssi;
7334 rp.tx_power = conn->tx_power;
7335 rp.max_tx_power = conn->max_tx_power;
7336
7337 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7338 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7339 }
7340
7341 unlock:
7342 hci_dev_unlock(hdev);
7343 return err;
7344 }
7345
get_clock_info_complete(struct hci_dev * hdev,void * data,int err)7346 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
7347 {
7348 struct mgmt_pending_cmd *cmd = data;
7349 struct mgmt_cp_get_clock_info *cp = cmd->param;
7350 struct mgmt_rp_get_clock_info rp;
7351 struct hci_conn *conn = cmd->user_data;
7352 u8 status = mgmt_status(err);
7353
7354 bt_dev_dbg(hdev, "err %d", err);
7355
7356 memset(&rp, 0, sizeof(rp));
7357 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7358 rp.addr.type = cp->addr.type;
7359
7360 if (err)
7361 goto complete;
7362
7363 rp.local_clock = cpu_to_le32(hdev->clock);
7364
7365 if (conn) {
7366 rp.piconet_clock = cpu_to_le32(conn->clock);
7367 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7368 }
7369
7370 complete:
7371 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
7372 sizeof(rp));
7373
7374 mgmt_pending_free(cmd);
7375 }
7376
get_clock_info_sync(struct hci_dev * hdev,void * data)7377 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
7378 {
7379 struct mgmt_pending_cmd *cmd = data;
7380 struct mgmt_cp_get_clock_info *cp = cmd->param;
7381 struct hci_cp_read_clock hci_cp;
7382 struct hci_conn *conn;
7383
7384 memset(&hci_cp, 0, sizeof(hci_cp));
7385 hci_read_clock_sync(hdev, &hci_cp);
7386
7387 /* Make sure connection still exists */
7388 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
7389 if (!conn || conn->state != BT_CONNECTED)
7390 return MGMT_STATUS_NOT_CONNECTED;
7391
7392 cmd->user_data = conn;
7393 hci_cp.handle = cpu_to_le16(conn->handle);
7394 hci_cp.which = 0x01; /* Piconet clock */
7395
7396 return hci_read_clock_sync(hdev, &hci_cp);
7397 }
7398
get_clock_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7399 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7400 u16 len)
7401 {
7402 struct mgmt_cp_get_clock_info *cp = data;
7403 struct mgmt_rp_get_clock_info rp;
7404 struct mgmt_pending_cmd *cmd;
7405 struct hci_conn *conn;
7406 int err;
7407
7408 bt_dev_dbg(hdev, "sock %p", sk);
7409
7410 memset(&rp, 0, sizeof(rp));
7411 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7412 rp.addr.type = cp->addr.type;
7413
7414 if (cp->addr.type != BDADDR_BREDR)
7415 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7416 MGMT_STATUS_INVALID_PARAMS,
7417 &rp, sizeof(rp));
7418
7419 hci_dev_lock(hdev);
7420
7421 if (!hdev_is_powered(hdev)) {
7422 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7423 MGMT_STATUS_NOT_POWERED, &rp,
7424 sizeof(rp));
7425 goto unlock;
7426 }
7427
7428 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7429 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7430 &cp->addr.bdaddr);
7431 if (!conn || conn->state != BT_CONNECTED) {
7432 err = mgmt_cmd_complete(sk, hdev->id,
7433 MGMT_OP_GET_CLOCK_INFO,
7434 MGMT_STATUS_NOT_CONNECTED,
7435 &rp, sizeof(rp));
7436 goto unlock;
7437 }
7438 } else {
7439 conn = NULL;
7440 }
7441
7442 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7443 if (!cmd)
7444 err = -ENOMEM;
7445 else
7446 err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7447 get_clock_info_complete);
7448
7449 if (err < 0) {
7450 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7451 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7452
7453 if (cmd)
7454 mgmt_pending_free(cmd);
7455 }
7456
7457
7458 unlock:
7459 hci_dev_unlock(hdev);
7460 return err;
7461 }
7462
is_connected(struct hci_dev * hdev,bdaddr_t * addr,u8 type)7463 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7464 {
7465 struct hci_conn *conn;
7466
7467 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7468 if (!conn)
7469 return false;
7470
7471 if (conn->dst_type != type)
7472 return false;
7473
7474 if (conn->state != BT_CONNECTED)
7475 return false;
7476
7477 return true;
7478 }
7479
7480 /* This function requires the caller holds hdev->lock */
hci_conn_params_set(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type,u8 auto_connect)7481 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7482 u8 addr_type, u8 auto_connect)
7483 {
7484 struct hci_conn_params *params;
7485
7486 params = hci_conn_params_add(hdev, addr, addr_type);
7487 if (!params)
7488 return -EIO;
7489
7490 if (params->auto_connect == auto_connect)
7491 return 0;
7492
7493 hci_pend_le_list_del_init(params);
7494
7495 switch (auto_connect) {
7496 case HCI_AUTO_CONN_DISABLED:
7497 case HCI_AUTO_CONN_LINK_LOSS:
7498 /* If auto connect is being disabled when we're trying to
7499 * connect to device, keep connecting.
7500 */
7501 if (params->explicit_connect)
7502 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7503 break;
7504 case HCI_AUTO_CONN_REPORT:
7505 if (params->explicit_connect)
7506 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7507 else
7508 hci_pend_le_list_add(params, &hdev->pend_le_reports);
7509 break;
7510 case HCI_AUTO_CONN_DIRECT:
7511 case HCI_AUTO_CONN_ALWAYS:
7512 if (!is_connected(hdev, addr, addr_type))
7513 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7514 break;
7515 }
7516
7517 params->auto_connect = auto_connect;
7518
7519 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7520 addr, addr_type, auto_connect);
7521
7522 return 0;
7523 }
7524
device_added(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type,u8 action)7525 static void device_added(struct sock *sk, struct hci_dev *hdev,
7526 bdaddr_t *bdaddr, u8 type, u8 action)
7527 {
7528 struct mgmt_ev_device_added ev;
7529
7530 bacpy(&ev.addr.bdaddr, bdaddr);
7531 ev.addr.type = type;
7532 ev.action = action;
7533
7534 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7535 }
7536
add_device_complete(struct hci_dev * hdev,void * data,int err)7537 static void add_device_complete(struct hci_dev *hdev, void *data, int err)
7538 {
7539 struct mgmt_pending_cmd *cmd = data;
7540 struct mgmt_cp_add_device *cp = cmd->param;
7541
7542 if (!err) {
7543 device_added(cmd->sk, hdev, &cp->addr.bdaddr, cp->addr.type,
7544 cp->action);
7545 device_flags_changed(NULL, hdev, &cp->addr.bdaddr,
7546 cp->addr.type, hdev->conn_flags,
7547 PTR_UINT(cmd->user_data));
7548 }
7549
7550 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_ADD_DEVICE,
7551 mgmt_status(err), &cp->addr, sizeof(cp->addr));
7552 mgmt_pending_free(cmd);
7553 }
7554
add_device_sync(struct hci_dev * hdev,void * data)7555 static int add_device_sync(struct hci_dev *hdev, void *data)
7556 {
7557 return hci_update_passive_scan_sync(hdev);
7558 }
7559
add_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7560 static int add_device(struct sock *sk, struct hci_dev *hdev,
7561 void *data, u16 len)
7562 {
7563 struct mgmt_pending_cmd *cmd;
7564 struct mgmt_cp_add_device *cp = data;
7565 u8 auto_conn, addr_type;
7566 struct hci_conn_params *params;
7567 int err;
7568 u32 current_flags = 0;
7569 u32 supported_flags;
7570
7571 bt_dev_dbg(hdev, "sock %p", sk);
7572
7573 if (!bdaddr_type_is_valid(cp->addr.type) ||
7574 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7575 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7576 MGMT_STATUS_INVALID_PARAMS,
7577 &cp->addr, sizeof(cp->addr));
7578
7579 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7580 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7581 MGMT_STATUS_INVALID_PARAMS,
7582 &cp->addr, sizeof(cp->addr));
7583
7584 hci_dev_lock(hdev);
7585
7586 if (cp->addr.type == BDADDR_BREDR) {
7587 /* Only incoming connections action is supported for now */
7588 if (cp->action != 0x01) {
7589 err = mgmt_cmd_complete(sk, hdev->id,
7590 MGMT_OP_ADD_DEVICE,
7591 MGMT_STATUS_INVALID_PARAMS,
7592 &cp->addr, sizeof(cp->addr));
7593 goto unlock;
7594 }
7595
7596 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7597 &cp->addr.bdaddr,
7598 cp->addr.type, 0);
7599 if (err)
7600 goto unlock;
7601
7602 hci_update_scan(hdev);
7603
7604 goto added;
7605 }
7606
7607 addr_type = le_addr_type(cp->addr.type);
7608
7609 if (cp->action == 0x02)
7610 auto_conn = HCI_AUTO_CONN_ALWAYS;
7611 else if (cp->action == 0x01)
7612 auto_conn = HCI_AUTO_CONN_DIRECT;
7613 else
7614 auto_conn = HCI_AUTO_CONN_REPORT;
7615
7616 /* Kernel internally uses conn_params with resolvable private
7617 * address, but Add Device allows only identity addresses.
7618 * Make sure it is enforced before calling
7619 * hci_conn_params_lookup.
7620 */
7621 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7622 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7623 MGMT_STATUS_INVALID_PARAMS,
7624 &cp->addr, sizeof(cp->addr));
7625 goto unlock;
7626 }
7627
7628 /* If the connection parameters don't exist for this device,
7629 * they will be created and configured with defaults.
7630 */
7631 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7632 auto_conn) < 0) {
7633 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7634 MGMT_STATUS_FAILED, &cp->addr,
7635 sizeof(cp->addr));
7636 goto unlock;
7637 } else {
7638 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7639 addr_type);
7640 if (params)
7641 current_flags = params->flags;
7642 }
7643
7644 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
7645 if (!cmd) {
7646 err = -ENOMEM;
7647 goto unlock;
7648 }
7649
7650 cmd->user_data = UINT_PTR(current_flags);
7651
7652 err = hci_cmd_sync_queue(hdev, add_device_sync, cmd,
7653 add_device_complete);
7654 if (err < 0) {
7655 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7656 MGMT_STATUS_FAILED, &cp->addr,
7657 sizeof(cp->addr));
7658 mgmt_pending_free(cmd);
7659 }
7660
7661 goto unlock;
7662
7663 added:
7664 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7665 supported_flags = hdev->conn_flags;
7666 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7667 supported_flags, current_flags);
7668
7669 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7670 MGMT_STATUS_SUCCESS, &cp->addr,
7671 sizeof(cp->addr));
7672
7673 unlock:
7674 hci_dev_unlock(hdev);
7675 return err;
7676 }
7677
device_removed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)7678 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7679 bdaddr_t *bdaddr, u8 type)
7680 {
7681 struct mgmt_ev_device_removed ev;
7682
7683 bacpy(&ev.addr.bdaddr, bdaddr);
7684 ev.addr.type = type;
7685
7686 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7687 }
7688
remove_device_sync(struct hci_dev * hdev,void * data)7689 static int remove_device_sync(struct hci_dev *hdev, void *data)
7690 {
7691 return hci_update_passive_scan_sync(hdev);
7692 }
7693
remove_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7694 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7695 void *data, u16 len)
7696 {
7697 struct mgmt_cp_remove_device *cp = data;
7698 int err;
7699
7700 bt_dev_dbg(hdev, "sock %p", sk);
7701
7702 hci_dev_lock(hdev);
7703
7704 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7705 struct hci_conn_params *params;
7706 u8 addr_type;
7707
7708 if (!bdaddr_type_is_valid(cp->addr.type)) {
7709 err = mgmt_cmd_complete(sk, hdev->id,
7710 MGMT_OP_REMOVE_DEVICE,
7711 MGMT_STATUS_INVALID_PARAMS,
7712 &cp->addr, sizeof(cp->addr));
7713 goto unlock;
7714 }
7715
7716 if (cp->addr.type == BDADDR_BREDR) {
7717 err = hci_bdaddr_list_del(&hdev->accept_list,
7718 &cp->addr.bdaddr,
7719 cp->addr.type);
7720 if (err) {
7721 err = mgmt_cmd_complete(sk, hdev->id,
7722 MGMT_OP_REMOVE_DEVICE,
7723 MGMT_STATUS_INVALID_PARAMS,
7724 &cp->addr,
7725 sizeof(cp->addr));
7726 goto unlock;
7727 }
7728
7729 hci_update_scan(hdev);
7730
7731 device_removed(sk, hdev, &cp->addr.bdaddr,
7732 cp->addr.type);
7733 goto complete;
7734 }
7735
7736 addr_type = le_addr_type(cp->addr.type);
7737
7738 /* Kernel internally uses conn_params with resolvable private
7739 * address, but Remove Device allows only identity addresses.
7740 * Make sure it is enforced before calling
7741 * hci_conn_params_lookup.
7742 */
7743 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7744 err = mgmt_cmd_complete(sk, hdev->id,
7745 MGMT_OP_REMOVE_DEVICE,
7746 MGMT_STATUS_INVALID_PARAMS,
7747 &cp->addr, sizeof(cp->addr));
7748 goto unlock;
7749 }
7750
7751 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7752 addr_type);
7753 if (!params) {
7754 err = mgmt_cmd_complete(sk, hdev->id,
7755 MGMT_OP_REMOVE_DEVICE,
7756 MGMT_STATUS_INVALID_PARAMS,
7757 &cp->addr, sizeof(cp->addr));
7758 goto unlock;
7759 }
7760
7761 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7762 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7763 err = mgmt_cmd_complete(sk, hdev->id,
7764 MGMT_OP_REMOVE_DEVICE,
7765 MGMT_STATUS_INVALID_PARAMS,
7766 &cp->addr, sizeof(cp->addr));
7767 goto unlock;
7768 }
7769
7770 hci_conn_params_free(params);
7771
7772 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7773 } else {
7774 struct hci_conn_params *p, *tmp;
7775 struct bdaddr_list *b, *btmp;
7776
7777 if (cp->addr.type) {
7778 err = mgmt_cmd_complete(sk, hdev->id,
7779 MGMT_OP_REMOVE_DEVICE,
7780 MGMT_STATUS_INVALID_PARAMS,
7781 &cp->addr, sizeof(cp->addr));
7782 goto unlock;
7783 }
7784
7785 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7786 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7787 list_del(&b->list);
7788 kfree(b);
7789 }
7790
7791 hci_update_scan(hdev);
7792
7793 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7794 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7795 continue;
7796 device_removed(sk, hdev, &p->addr, p->addr_type);
7797 if (p->explicit_connect) {
7798 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7799 continue;
7800 }
7801 hci_conn_params_free(p);
7802 }
7803
7804 bt_dev_dbg(hdev, "All LE connection parameters were removed");
7805 }
7806
7807 hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7808
7809 complete:
7810 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7811 MGMT_STATUS_SUCCESS, &cp->addr,
7812 sizeof(cp->addr));
7813 unlock:
7814 hci_dev_unlock(hdev);
7815 return err;
7816 }
7817
conn_update_sync(struct hci_dev * hdev,void * data)7818 static int conn_update_sync(struct hci_dev *hdev, void *data)
7819 {
7820 struct hci_conn_params *params = data;
7821 struct hci_conn *conn;
7822
7823 conn = hci_conn_hash_lookup_le(hdev, ¶ms->addr, params->addr_type);
7824 if (!conn)
7825 return -ECANCELED;
7826
7827 return hci_le_conn_update_sync(hdev, conn, params);
7828 }
7829
load_conn_param(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7830 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7831 u16 len)
7832 {
7833 struct mgmt_cp_load_conn_param *cp = data;
7834 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7835 sizeof(struct mgmt_conn_param));
7836 u16 param_count, expected_len;
7837 int i;
7838
7839 if (!lmp_le_capable(hdev))
7840 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7841 MGMT_STATUS_NOT_SUPPORTED);
7842
7843 param_count = __le16_to_cpu(cp->param_count);
7844 if (param_count > max_param_count) {
7845 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7846 param_count);
7847 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7848 MGMT_STATUS_INVALID_PARAMS);
7849 }
7850
7851 expected_len = struct_size(cp, params, param_count);
7852 if (expected_len != len) {
7853 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7854 expected_len, len);
7855 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7856 MGMT_STATUS_INVALID_PARAMS);
7857 }
7858
7859 bt_dev_dbg(hdev, "param_count %u", param_count);
7860
7861 hci_dev_lock(hdev);
7862
7863 if (param_count > 1)
7864 hci_conn_params_clear_disabled(hdev);
7865
7866 for (i = 0; i < param_count; i++) {
7867 struct mgmt_conn_param *param = &cp->params[i];
7868 struct hci_conn_params *hci_param;
7869 u16 min, max, latency, timeout;
7870 bool update = false;
7871 u8 addr_type;
7872
7873 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
7874 param->addr.type);
7875
7876 if (param->addr.type == BDADDR_LE_PUBLIC) {
7877 addr_type = ADDR_LE_DEV_PUBLIC;
7878 } else if (param->addr.type == BDADDR_LE_RANDOM) {
7879 addr_type = ADDR_LE_DEV_RANDOM;
7880 } else {
7881 bt_dev_err(hdev, "ignoring invalid connection parameters");
7882 continue;
7883 }
7884
7885 min = le16_to_cpu(param->min_interval);
7886 max = le16_to_cpu(param->max_interval);
7887 latency = le16_to_cpu(param->latency);
7888 timeout = le16_to_cpu(param->timeout);
7889
7890 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7891 min, max, latency, timeout);
7892
7893 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7894 bt_dev_err(hdev, "ignoring invalid connection parameters");
7895 continue;
7896 }
7897
7898 /* Detect when the loading is for an existing parameter then
7899 * attempt to trigger the connection update procedure.
7900 */
7901 if (!i && param_count == 1) {
7902 hci_param = hci_conn_params_lookup(hdev,
7903 ¶m->addr.bdaddr,
7904 addr_type);
7905 if (hci_param)
7906 update = true;
7907 else
7908 hci_conn_params_clear_disabled(hdev);
7909 }
7910
7911 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
7912 addr_type);
7913 if (!hci_param) {
7914 bt_dev_err(hdev, "failed to add connection parameters");
7915 continue;
7916 }
7917
7918 hci_param->conn_min_interval = min;
7919 hci_param->conn_max_interval = max;
7920 hci_param->conn_latency = latency;
7921 hci_param->supervision_timeout = timeout;
7922
7923 /* Check if we need to trigger a connection update */
7924 if (update) {
7925 struct hci_conn *conn;
7926
7927 /* Lookup for existing connection as central and check
7928 * if parameters match and if they don't then trigger
7929 * a connection update.
7930 */
7931 conn = hci_conn_hash_lookup_le(hdev, &hci_param->addr,
7932 addr_type);
7933 if (conn && conn->role == HCI_ROLE_MASTER &&
7934 (conn->le_conn_min_interval != min ||
7935 conn->le_conn_max_interval != max ||
7936 conn->le_conn_latency != latency ||
7937 conn->le_supv_timeout != timeout))
7938 hci_cmd_sync_queue(hdev, conn_update_sync,
7939 hci_param, NULL);
7940 }
7941 }
7942
7943 hci_dev_unlock(hdev);
7944
7945 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7946 NULL, 0);
7947 }
7948
set_external_config(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7949 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7950 void *data, u16 len)
7951 {
7952 struct mgmt_cp_set_external_config *cp = data;
7953 bool changed;
7954 int err;
7955
7956 bt_dev_dbg(hdev, "sock %p", sk);
7957
7958 if (hdev_is_powered(hdev))
7959 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7960 MGMT_STATUS_REJECTED);
7961
7962 if (cp->config != 0x00 && cp->config != 0x01)
7963 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7964 MGMT_STATUS_INVALID_PARAMS);
7965
7966 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7967 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7968 MGMT_STATUS_NOT_SUPPORTED);
7969
7970 hci_dev_lock(hdev);
7971
7972 if (cp->config)
7973 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7974 else
7975 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7976
7977 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7978 if (err < 0)
7979 goto unlock;
7980
7981 if (!changed)
7982 goto unlock;
7983
7984 err = new_options(hdev, sk);
7985
7986 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7987 mgmt_index_removed(hdev);
7988
7989 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7990 hci_dev_set_flag(hdev, HCI_CONFIG);
7991 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7992
7993 queue_work(hdev->req_workqueue, &hdev->power_on);
7994 } else {
7995 set_bit(HCI_RAW, &hdev->flags);
7996 mgmt_index_added(hdev);
7997 }
7998 }
7999
8000 unlock:
8001 hci_dev_unlock(hdev);
8002 return err;
8003 }
8004
set_public_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)8005 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
8006 void *data, u16 len)
8007 {
8008 struct mgmt_cp_set_public_address *cp = data;
8009 bool changed;
8010 int err;
8011
8012 bt_dev_dbg(hdev, "sock %p", sk);
8013
8014 if (hdev_is_powered(hdev))
8015 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8016 MGMT_STATUS_REJECTED);
8017
8018 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
8019 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8020 MGMT_STATUS_INVALID_PARAMS);
8021
8022 if (!hdev->set_bdaddr)
8023 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8024 MGMT_STATUS_NOT_SUPPORTED);
8025
8026 hci_dev_lock(hdev);
8027
8028 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
8029 bacpy(&hdev->public_addr, &cp->bdaddr);
8030
8031 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
8032 if (err < 0)
8033 goto unlock;
8034
8035 if (!changed)
8036 goto unlock;
8037
8038 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
8039 err = new_options(hdev, sk);
8040
8041 if (is_configured(hdev)) {
8042 mgmt_index_removed(hdev);
8043
8044 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
8045
8046 hci_dev_set_flag(hdev, HCI_CONFIG);
8047 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8048
8049 queue_work(hdev->req_workqueue, &hdev->power_on);
8050 }
8051
8052 unlock:
8053 hci_dev_unlock(hdev);
8054 return err;
8055 }
8056
read_local_oob_ext_data_complete(struct hci_dev * hdev,void * data,int err)8057 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
8058 int err)
8059 {
8060 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8061 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8062 u8 *h192, *r192, *h256, *r256;
8063 struct mgmt_pending_cmd *cmd = data;
8064 struct sk_buff *skb = cmd->skb;
8065 u8 status = mgmt_status(err);
8066 u16 eir_len;
8067
8068 if (err == -ECANCELED ||
8069 cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
8070 return;
8071
8072 if (!status) {
8073 if (!skb)
8074 status = MGMT_STATUS_FAILED;
8075 else if (IS_ERR(skb))
8076 status = mgmt_status(PTR_ERR(skb));
8077 else
8078 status = mgmt_status(skb->data[0]);
8079 }
8080
8081 bt_dev_dbg(hdev, "status %u", status);
8082
8083 mgmt_cp = cmd->param;
8084
8085 if (status) {
8086 status = mgmt_status(status);
8087 eir_len = 0;
8088
8089 h192 = NULL;
8090 r192 = NULL;
8091 h256 = NULL;
8092 r256 = NULL;
8093 } else if (!bredr_sc_enabled(hdev)) {
8094 struct hci_rp_read_local_oob_data *rp;
8095
8096 if (skb->len != sizeof(*rp)) {
8097 status = MGMT_STATUS_FAILED;
8098 eir_len = 0;
8099 } else {
8100 status = MGMT_STATUS_SUCCESS;
8101 rp = (void *)skb->data;
8102
8103 eir_len = 5 + 18 + 18;
8104 h192 = rp->hash;
8105 r192 = rp->rand;
8106 h256 = NULL;
8107 r256 = NULL;
8108 }
8109 } else {
8110 struct hci_rp_read_local_oob_ext_data *rp;
8111
8112 if (skb->len != sizeof(*rp)) {
8113 status = MGMT_STATUS_FAILED;
8114 eir_len = 0;
8115 } else {
8116 status = MGMT_STATUS_SUCCESS;
8117 rp = (void *)skb->data;
8118
8119 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8120 eir_len = 5 + 18 + 18;
8121 h192 = NULL;
8122 r192 = NULL;
8123 } else {
8124 eir_len = 5 + 18 + 18 + 18 + 18;
8125 h192 = rp->hash192;
8126 r192 = rp->rand192;
8127 }
8128
8129 h256 = rp->hash256;
8130 r256 = rp->rand256;
8131 }
8132 }
8133
8134 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8135 if (!mgmt_rp)
8136 goto done;
8137
8138 if (eir_len == 0)
8139 goto send_rsp;
8140
8141 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8142 hdev->dev_class, 3);
8143
8144 if (h192 && r192) {
8145 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8146 EIR_SSP_HASH_C192, h192, 16);
8147 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8148 EIR_SSP_RAND_R192, r192, 16);
8149 }
8150
8151 if (h256 && r256) {
8152 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8153 EIR_SSP_HASH_C256, h256, 16);
8154 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8155 EIR_SSP_RAND_R256, r256, 16);
8156 }
8157
8158 send_rsp:
8159 mgmt_rp->type = mgmt_cp->type;
8160 mgmt_rp->eir_len = cpu_to_le16(eir_len);
8161
8162 err = mgmt_cmd_complete(cmd->sk, hdev->id,
8163 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8164 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8165 if (err < 0 || status)
8166 goto done;
8167
8168 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8169
8170 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8171 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8172 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8173 done:
8174 if (skb && !IS_ERR(skb))
8175 kfree_skb(skb);
8176
8177 kfree(mgmt_rp);
8178 mgmt_pending_remove(cmd);
8179 }
8180
read_local_ssp_oob_req(struct hci_dev * hdev,struct sock * sk,struct mgmt_cp_read_local_oob_ext_data * cp)8181 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8182 struct mgmt_cp_read_local_oob_ext_data *cp)
8183 {
8184 struct mgmt_pending_cmd *cmd;
8185 int err;
8186
8187 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8188 cp, sizeof(*cp));
8189 if (!cmd)
8190 return -ENOMEM;
8191
8192 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
8193 read_local_oob_ext_data_complete);
8194
8195 if (err < 0) {
8196 mgmt_pending_remove(cmd);
8197 return err;
8198 }
8199
8200 return 0;
8201 }
8202
read_local_oob_ext_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8203 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8204 void *data, u16 data_len)
8205 {
8206 struct mgmt_cp_read_local_oob_ext_data *cp = data;
8207 struct mgmt_rp_read_local_oob_ext_data *rp;
8208 size_t rp_len;
8209 u16 eir_len;
8210 u8 status, flags, role, addr[7], hash[16], rand[16];
8211 int err;
8212
8213 bt_dev_dbg(hdev, "sock %p", sk);
8214
8215 if (hdev_is_powered(hdev)) {
8216 switch (cp->type) {
8217 case BIT(BDADDR_BREDR):
8218 status = mgmt_bredr_support(hdev);
8219 if (status)
8220 eir_len = 0;
8221 else
8222 eir_len = 5;
8223 break;
8224 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8225 status = mgmt_le_support(hdev);
8226 if (status)
8227 eir_len = 0;
8228 else
8229 eir_len = 9 + 3 + 18 + 18 + 3;
8230 break;
8231 default:
8232 status = MGMT_STATUS_INVALID_PARAMS;
8233 eir_len = 0;
8234 break;
8235 }
8236 } else {
8237 status = MGMT_STATUS_NOT_POWERED;
8238 eir_len = 0;
8239 }
8240
8241 rp_len = sizeof(*rp) + eir_len;
8242 rp = kmalloc(rp_len, GFP_ATOMIC);
8243 if (!rp)
8244 return -ENOMEM;
8245
8246 if (!status && !lmp_ssp_capable(hdev)) {
8247 status = MGMT_STATUS_NOT_SUPPORTED;
8248 eir_len = 0;
8249 }
8250
8251 if (status)
8252 goto complete;
8253
8254 hci_dev_lock(hdev);
8255
8256 eir_len = 0;
8257 switch (cp->type) {
8258 case BIT(BDADDR_BREDR):
8259 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8260 err = read_local_ssp_oob_req(hdev, sk, cp);
8261 hci_dev_unlock(hdev);
8262 if (!err)
8263 goto done;
8264
8265 status = MGMT_STATUS_FAILED;
8266 goto complete;
8267 } else {
8268 eir_len = eir_append_data(rp->eir, eir_len,
8269 EIR_CLASS_OF_DEV,
8270 hdev->dev_class, 3);
8271 }
8272 break;
8273 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8274 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8275 smp_generate_oob(hdev, hash, rand) < 0) {
8276 hci_dev_unlock(hdev);
8277 status = MGMT_STATUS_FAILED;
8278 goto complete;
8279 }
8280
8281 /* This should return the active RPA, but since the RPA
8282 * is only programmed on demand, it is really hard to fill
8283 * this in at the moment. For now disallow retrieving
8284 * local out-of-band data when privacy is in use.
8285 *
8286 * Returning the identity address will not help here since
8287 * pairing happens before the identity resolving key is
8288 * known and thus the connection establishment happens
8289 * based on the RPA and not the identity address.
8290 */
8291 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8292 hci_dev_unlock(hdev);
8293 status = MGMT_STATUS_REJECTED;
8294 goto complete;
8295 }
8296
8297 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8298 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8299 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8300 bacmp(&hdev->static_addr, BDADDR_ANY))) {
8301 memcpy(addr, &hdev->static_addr, 6);
8302 addr[6] = 0x01;
8303 } else {
8304 memcpy(addr, &hdev->bdaddr, 6);
8305 addr[6] = 0x00;
8306 }
8307
8308 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8309 addr, sizeof(addr));
8310
8311 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8312 role = 0x02;
8313 else
8314 role = 0x01;
8315
8316 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8317 &role, sizeof(role));
8318
8319 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8320 eir_len = eir_append_data(rp->eir, eir_len,
8321 EIR_LE_SC_CONFIRM,
8322 hash, sizeof(hash));
8323
8324 eir_len = eir_append_data(rp->eir, eir_len,
8325 EIR_LE_SC_RANDOM,
8326 rand, sizeof(rand));
8327 }
8328
8329 flags = mgmt_get_adv_discov_flags(hdev);
8330
8331 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8332 flags |= LE_AD_NO_BREDR;
8333
8334 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8335 &flags, sizeof(flags));
8336 break;
8337 }
8338
8339 hci_dev_unlock(hdev);
8340
8341 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8342
8343 status = MGMT_STATUS_SUCCESS;
8344
8345 complete:
8346 rp->type = cp->type;
8347 rp->eir_len = cpu_to_le16(eir_len);
8348
8349 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8350 status, rp, sizeof(*rp) + eir_len);
8351 if (err < 0 || status)
8352 goto done;
8353
8354 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8355 rp, sizeof(*rp) + eir_len,
8356 HCI_MGMT_OOB_DATA_EVENTS, sk);
8357
8358 done:
8359 kfree(rp);
8360
8361 return err;
8362 }
8363
get_supported_adv_flags(struct hci_dev * hdev)8364 static u32 get_supported_adv_flags(struct hci_dev *hdev)
8365 {
8366 u32 flags = 0;
8367
8368 flags |= MGMT_ADV_FLAG_CONNECTABLE;
8369 flags |= MGMT_ADV_FLAG_DISCOV;
8370 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8371 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8372 flags |= MGMT_ADV_FLAG_APPEARANCE;
8373 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8374 flags |= MGMT_ADV_PARAM_DURATION;
8375 flags |= MGMT_ADV_PARAM_TIMEOUT;
8376 flags |= MGMT_ADV_PARAM_INTERVALS;
8377 flags |= MGMT_ADV_PARAM_TX_POWER;
8378 flags |= MGMT_ADV_PARAM_SCAN_RSP;
8379
8380 /* In extended adv TX_POWER returned from Set Adv Param
8381 * will be always valid.
8382 */
8383 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
8384 flags |= MGMT_ADV_FLAG_TX_POWER;
8385
8386 if (ext_adv_capable(hdev)) {
8387 flags |= MGMT_ADV_FLAG_SEC_1M;
8388 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8389 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8390
8391 if (le_2m_capable(hdev))
8392 flags |= MGMT_ADV_FLAG_SEC_2M;
8393
8394 if (le_coded_capable(hdev))
8395 flags |= MGMT_ADV_FLAG_SEC_CODED;
8396 }
8397
8398 return flags;
8399 }
8400
read_adv_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8401 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8402 void *data, u16 data_len)
8403 {
8404 struct mgmt_rp_read_adv_features *rp;
8405 size_t rp_len;
8406 int err;
8407 struct adv_info *adv_instance;
8408 u32 supported_flags;
8409 u8 *instance;
8410
8411 bt_dev_dbg(hdev, "sock %p", sk);
8412
8413 if (!lmp_le_capable(hdev))
8414 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8415 MGMT_STATUS_REJECTED);
8416
8417 hci_dev_lock(hdev);
8418
8419 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8420 rp = kmalloc(rp_len, GFP_ATOMIC);
8421 if (!rp) {
8422 hci_dev_unlock(hdev);
8423 return -ENOMEM;
8424 }
8425
8426 supported_flags = get_supported_adv_flags(hdev);
8427
8428 rp->supported_flags = cpu_to_le32(supported_flags);
8429 rp->max_adv_data_len = max_adv_len(hdev);
8430 rp->max_scan_rsp_len = max_adv_len(hdev);
8431 rp->max_instances = hdev->le_num_of_adv_sets;
8432 rp->num_instances = hdev->adv_instance_cnt;
8433
8434 instance = rp->instance;
8435 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8436 /* Only instances 1-le_num_of_adv_sets are externally visible */
8437 if (adv_instance->instance <= hdev->adv_instance_cnt) {
8438 *instance = adv_instance->instance;
8439 instance++;
8440 } else {
8441 rp->num_instances--;
8442 rp_len--;
8443 }
8444 }
8445
8446 hci_dev_unlock(hdev);
8447
8448 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8449 MGMT_STATUS_SUCCESS, rp, rp_len);
8450
8451 kfree(rp);
8452
8453 return err;
8454 }
8455
calculate_name_len(struct hci_dev * hdev)8456 static u8 calculate_name_len(struct hci_dev *hdev)
8457 {
8458 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 2]; /* len + type + name */
8459
8460 return eir_append_local_name(hdev, buf, 0);
8461 }
8462
tlv_data_max_len(struct hci_dev * hdev,u32 adv_flags,bool is_adv_data)8463 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8464 bool is_adv_data)
8465 {
8466 u8 max_len = max_adv_len(hdev);
8467
8468 if (is_adv_data) {
8469 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8470 MGMT_ADV_FLAG_LIMITED_DISCOV |
8471 MGMT_ADV_FLAG_MANAGED_FLAGS))
8472 max_len -= 3;
8473
8474 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8475 max_len -= 3;
8476 } else {
8477 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8478 max_len -= calculate_name_len(hdev);
8479
8480 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8481 max_len -= 4;
8482 }
8483
8484 return max_len;
8485 }
8486
flags_managed(u32 adv_flags)8487 static bool flags_managed(u32 adv_flags)
8488 {
8489 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8490 MGMT_ADV_FLAG_LIMITED_DISCOV |
8491 MGMT_ADV_FLAG_MANAGED_FLAGS);
8492 }
8493
tx_power_managed(u32 adv_flags)8494 static bool tx_power_managed(u32 adv_flags)
8495 {
8496 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8497 }
8498
name_managed(u32 adv_flags)8499 static bool name_managed(u32 adv_flags)
8500 {
8501 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8502 }
8503
appearance_managed(u32 adv_flags)8504 static bool appearance_managed(u32 adv_flags)
8505 {
8506 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8507 }
8508
tlv_data_is_valid(struct hci_dev * hdev,u32 adv_flags,u8 * data,u8 len,bool is_adv_data)8509 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8510 u8 len, bool is_adv_data)
8511 {
8512 int i, cur_len;
8513 u8 max_len;
8514
8515 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8516
8517 if (len > max_len)
8518 return false;
8519
8520 /* Make sure that the data is correctly formatted. */
8521 for (i = 0; i < len; i += (cur_len + 1)) {
8522 cur_len = data[i];
8523
8524 if (!cur_len)
8525 continue;
8526
8527 if (data[i + 1] == EIR_FLAGS &&
8528 (!is_adv_data || flags_managed(adv_flags)))
8529 return false;
8530
8531 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8532 return false;
8533
8534 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8535 return false;
8536
8537 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8538 return false;
8539
8540 if (data[i + 1] == EIR_APPEARANCE &&
8541 appearance_managed(adv_flags))
8542 return false;
8543
8544 /* If the current field length would exceed the total data
8545 * length, then it's invalid.
8546 */
8547 if (i + cur_len >= len)
8548 return false;
8549 }
8550
8551 return true;
8552 }
8553
requested_adv_flags_are_valid(struct hci_dev * hdev,u32 adv_flags)8554 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8555 {
8556 u32 supported_flags, phy_flags;
8557
8558 /* The current implementation only supports a subset of the specified
8559 * flags. Also need to check mutual exclusiveness of sec flags.
8560 */
8561 supported_flags = get_supported_adv_flags(hdev);
8562 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8563 if (adv_flags & ~supported_flags ||
8564 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8565 return false;
8566
8567 return true;
8568 }
8569
adv_busy(struct hci_dev * hdev)8570 static bool adv_busy(struct hci_dev *hdev)
8571 {
8572 return pending_find(MGMT_OP_SET_LE, hdev);
8573 }
8574
add_adv_complete(struct hci_dev * hdev,struct sock * sk,u8 instance,int err)8575 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8576 int err)
8577 {
8578 struct adv_info *adv, *n;
8579
8580 bt_dev_dbg(hdev, "err %d", err);
8581
8582 hci_dev_lock(hdev);
8583
8584 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8585 u8 instance;
8586
8587 if (!adv->pending)
8588 continue;
8589
8590 if (!err) {
8591 adv->pending = false;
8592 continue;
8593 }
8594
8595 instance = adv->instance;
8596
8597 if (hdev->cur_adv_instance == instance)
8598 cancel_adv_timeout(hdev);
8599
8600 hci_remove_adv_instance(hdev, instance);
8601 mgmt_advertising_removed(sk, hdev, instance);
8602 }
8603
8604 hci_dev_unlock(hdev);
8605 }
8606
add_advertising_complete(struct hci_dev * hdev,void * data,int err)8607 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8608 {
8609 struct mgmt_pending_cmd *cmd = data;
8610 struct mgmt_cp_add_advertising *cp = cmd->param;
8611 struct mgmt_rp_add_advertising rp;
8612
8613 memset(&rp, 0, sizeof(rp));
8614
8615 rp.instance = cp->instance;
8616
8617 if (err)
8618 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8619 mgmt_status(err));
8620 else
8621 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8622 mgmt_status(err), &rp, sizeof(rp));
8623
8624 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8625
8626 mgmt_pending_free(cmd);
8627 }
8628
add_advertising_sync(struct hci_dev * hdev,void * data)8629 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8630 {
8631 struct mgmt_pending_cmd *cmd = data;
8632 struct mgmt_cp_add_advertising *cp = cmd->param;
8633
8634 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8635 }
8636
add_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8637 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8638 void *data, u16 data_len)
8639 {
8640 struct mgmt_cp_add_advertising *cp = data;
8641 struct mgmt_rp_add_advertising rp;
8642 u32 flags;
8643 u8 status;
8644 u16 timeout, duration;
8645 unsigned int prev_instance_cnt;
8646 u8 schedule_instance = 0;
8647 struct adv_info *adv, *next_instance;
8648 int err;
8649 struct mgmt_pending_cmd *cmd;
8650
8651 bt_dev_dbg(hdev, "sock %p", sk);
8652
8653 status = mgmt_le_support(hdev);
8654 if (status)
8655 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8656 status);
8657
8658 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8659 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8660 MGMT_STATUS_INVALID_PARAMS);
8661
8662 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8663 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8664 MGMT_STATUS_INVALID_PARAMS);
8665
8666 flags = __le32_to_cpu(cp->flags);
8667 timeout = __le16_to_cpu(cp->timeout);
8668 duration = __le16_to_cpu(cp->duration);
8669
8670 if (!requested_adv_flags_are_valid(hdev, flags))
8671 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8672 MGMT_STATUS_INVALID_PARAMS);
8673
8674 hci_dev_lock(hdev);
8675
8676 if (timeout && !hdev_is_powered(hdev)) {
8677 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8678 MGMT_STATUS_REJECTED);
8679 goto unlock;
8680 }
8681
8682 if (adv_busy(hdev)) {
8683 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8684 MGMT_STATUS_BUSY);
8685 goto unlock;
8686 }
8687
8688 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8689 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8690 cp->scan_rsp_len, false)) {
8691 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8692 MGMT_STATUS_INVALID_PARAMS);
8693 goto unlock;
8694 }
8695
8696 prev_instance_cnt = hdev->adv_instance_cnt;
8697
8698 adv = hci_add_adv_instance(hdev, cp->instance, flags,
8699 cp->adv_data_len, cp->data,
8700 cp->scan_rsp_len,
8701 cp->data + cp->adv_data_len,
8702 timeout, duration,
8703 HCI_ADV_TX_POWER_NO_PREFERENCE,
8704 hdev->le_adv_min_interval,
8705 hdev->le_adv_max_interval, 0);
8706 if (IS_ERR(adv)) {
8707 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8708 MGMT_STATUS_FAILED);
8709 goto unlock;
8710 }
8711
8712 /* Only trigger an advertising added event if a new instance was
8713 * actually added.
8714 */
8715 if (hdev->adv_instance_cnt > prev_instance_cnt)
8716 mgmt_advertising_added(sk, hdev, cp->instance);
8717
8718 if (hdev->cur_adv_instance == cp->instance) {
8719 /* If the currently advertised instance is being changed then
8720 * cancel the current advertising and schedule the next
8721 * instance. If there is only one instance then the overridden
8722 * advertising data will be visible right away.
8723 */
8724 cancel_adv_timeout(hdev);
8725
8726 next_instance = hci_get_next_instance(hdev, cp->instance);
8727 if (next_instance)
8728 schedule_instance = next_instance->instance;
8729 } else if (!hdev->adv_instance_timeout) {
8730 /* Immediately advertise the new instance if no other
8731 * instance is currently being advertised.
8732 */
8733 schedule_instance = cp->instance;
8734 }
8735
8736 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
8737 * there is no instance to be advertised then we have no HCI
8738 * communication to make. Simply return.
8739 */
8740 if (!hdev_is_powered(hdev) ||
8741 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8742 !schedule_instance) {
8743 rp.instance = cp->instance;
8744 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8745 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8746 goto unlock;
8747 }
8748
8749 /* We're good to go, update advertising data, parameters, and start
8750 * advertising.
8751 */
8752 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8753 data_len);
8754 if (!cmd) {
8755 err = -ENOMEM;
8756 goto unlock;
8757 }
8758
8759 cp->instance = schedule_instance;
8760
8761 err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8762 add_advertising_complete);
8763 if (err < 0)
8764 mgmt_pending_free(cmd);
8765
8766 unlock:
8767 hci_dev_unlock(hdev);
8768
8769 return err;
8770 }
8771
add_ext_adv_params_complete(struct hci_dev * hdev,void * data,int err)8772 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8773 int err)
8774 {
8775 struct mgmt_pending_cmd *cmd = data;
8776 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8777 struct mgmt_rp_add_ext_adv_params rp;
8778 struct adv_info *adv;
8779 u32 flags;
8780
8781 BT_DBG("%s", hdev->name);
8782
8783 hci_dev_lock(hdev);
8784
8785 adv = hci_find_adv_instance(hdev, cp->instance);
8786 if (!adv)
8787 goto unlock;
8788
8789 rp.instance = cp->instance;
8790 rp.tx_power = adv->tx_power;
8791
8792 /* While we're at it, inform userspace of the available space for this
8793 * advertisement, given the flags that will be used.
8794 */
8795 flags = __le32_to_cpu(cp->flags);
8796 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8797 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8798
8799 if (err) {
8800 /* If this advertisement was previously advertising and we
8801 * failed to update it, we signal that it has been removed and
8802 * delete its structure
8803 */
8804 if (!adv->pending)
8805 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8806
8807 hci_remove_adv_instance(hdev, cp->instance);
8808
8809 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8810 mgmt_status(err));
8811 } else {
8812 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8813 mgmt_status(err), &rp, sizeof(rp));
8814 }
8815
8816 unlock:
8817 mgmt_pending_free(cmd);
8818
8819 hci_dev_unlock(hdev);
8820 }
8821
add_ext_adv_params_sync(struct hci_dev * hdev,void * data)8822 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8823 {
8824 struct mgmt_pending_cmd *cmd = data;
8825 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8826
8827 return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8828 }
8829
add_ext_adv_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8830 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8831 void *data, u16 data_len)
8832 {
8833 struct mgmt_cp_add_ext_adv_params *cp = data;
8834 struct mgmt_rp_add_ext_adv_params rp;
8835 struct mgmt_pending_cmd *cmd = NULL;
8836 struct adv_info *adv;
8837 u32 flags, min_interval, max_interval;
8838 u16 timeout, duration;
8839 u8 status;
8840 s8 tx_power;
8841 int err;
8842
8843 BT_DBG("%s", hdev->name);
8844
8845 status = mgmt_le_support(hdev);
8846 if (status)
8847 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8848 status);
8849
8850 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8851 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8852 MGMT_STATUS_INVALID_PARAMS);
8853
8854 /* The purpose of breaking add_advertising into two separate MGMT calls
8855 * for params and data is to allow more parameters to be added to this
8856 * structure in the future. For this reason, we verify that we have the
8857 * bare minimum structure we know of when the interface was defined. Any
8858 * extra parameters we don't know about will be ignored in this request.
8859 */
8860 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8861 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8862 MGMT_STATUS_INVALID_PARAMS);
8863
8864 flags = __le32_to_cpu(cp->flags);
8865
8866 if (!requested_adv_flags_are_valid(hdev, flags))
8867 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8868 MGMT_STATUS_INVALID_PARAMS);
8869
8870 hci_dev_lock(hdev);
8871
8872 /* In new interface, we require that we are powered to register */
8873 if (!hdev_is_powered(hdev)) {
8874 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8875 MGMT_STATUS_REJECTED);
8876 goto unlock;
8877 }
8878
8879 if (adv_busy(hdev)) {
8880 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8881 MGMT_STATUS_BUSY);
8882 goto unlock;
8883 }
8884
8885 /* Parse defined parameters from request, use defaults otherwise */
8886 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8887 __le16_to_cpu(cp->timeout) : 0;
8888
8889 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8890 __le16_to_cpu(cp->duration) :
8891 hdev->def_multi_adv_rotation_duration;
8892
8893 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8894 __le32_to_cpu(cp->min_interval) :
8895 hdev->le_adv_min_interval;
8896
8897 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8898 __le32_to_cpu(cp->max_interval) :
8899 hdev->le_adv_max_interval;
8900
8901 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8902 cp->tx_power :
8903 HCI_ADV_TX_POWER_NO_PREFERENCE;
8904
8905 /* Create advertising instance with no advertising or response data */
8906 adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
8907 timeout, duration, tx_power, min_interval,
8908 max_interval, 0);
8909
8910 if (IS_ERR(adv)) {
8911 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8912 MGMT_STATUS_FAILED);
8913 goto unlock;
8914 }
8915
8916 /* Submit request for advertising params if ext adv available */
8917 if (ext_adv_capable(hdev)) {
8918 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8919 data, data_len);
8920 if (!cmd) {
8921 err = -ENOMEM;
8922 hci_remove_adv_instance(hdev, cp->instance);
8923 goto unlock;
8924 }
8925
8926 err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8927 add_ext_adv_params_complete);
8928 if (err < 0)
8929 mgmt_pending_free(cmd);
8930 } else {
8931 rp.instance = cp->instance;
8932 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8933 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8934 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8935 err = mgmt_cmd_complete(sk, hdev->id,
8936 MGMT_OP_ADD_EXT_ADV_PARAMS,
8937 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8938 }
8939
8940 unlock:
8941 hci_dev_unlock(hdev);
8942
8943 return err;
8944 }
8945
add_ext_adv_data_complete(struct hci_dev * hdev,void * data,int err)8946 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8947 {
8948 struct mgmt_pending_cmd *cmd = data;
8949 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8950 struct mgmt_rp_add_advertising rp;
8951
8952 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8953
8954 memset(&rp, 0, sizeof(rp));
8955
8956 rp.instance = cp->instance;
8957
8958 if (err)
8959 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8960 mgmt_status(err));
8961 else
8962 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8963 mgmt_status(err), &rp, sizeof(rp));
8964
8965 mgmt_pending_free(cmd);
8966 }
8967
add_ext_adv_data_sync(struct hci_dev * hdev,void * data)8968 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8969 {
8970 struct mgmt_pending_cmd *cmd = data;
8971 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8972 int err;
8973
8974 if (ext_adv_capable(hdev)) {
8975 err = hci_update_adv_data_sync(hdev, cp->instance);
8976 if (err)
8977 return err;
8978
8979 err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
8980 if (err)
8981 return err;
8982
8983 return hci_enable_ext_advertising_sync(hdev, cp->instance);
8984 }
8985
8986 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8987 }
8988
add_ext_adv_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8989 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8990 u16 data_len)
8991 {
8992 struct mgmt_cp_add_ext_adv_data *cp = data;
8993 struct mgmt_rp_add_ext_adv_data rp;
8994 u8 schedule_instance = 0;
8995 struct adv_info *next_instance;
8996 struct adv_info *adv_instance;
8997 int err = 0;
8998 struct mgmt_pending_cmd *cmd;
8999
9000 BT_DBG("%s", hdev->name);
9001
9002 hci_dev_lock(hdev);
9003
9004 adv_instance = hci_find_adv_instance(hdev, cp->instance);
9005
9006 if (!adv_instance) {
9007 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9008 MGMT_STATUS_INVALID_PARAMS);
9009 goto unlock;
9010 }
9011
9012 /* In new interface, we require that we are powered to register */
9013 if (!hdev_is_powered(hdev)) {
9014 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9015 MGMT_STATUS_REJECTED);
9016 goto clear_new_instance;
9017 }
9018
9019 if (adv_busy(hdev)) {
9020 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9021 MGMT_STATUS_BUSY);
9022 goto clear_new_instance;
9023 }
9024
9025 /* Validate new data */
9026 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
9027 cp->adv_data_len, true) ||
9028 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
9029 cp->adv_data_len, cp->scan_rsp_len, false)) {
9030 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9031 MGMT_STATUS_INVALID_PARAMS);
9032 goto clear_new_instance;
9033 }
9034
9035 /* Set the data in the advertising instance */
9036 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
9037 cp->data, cp->scan_rsp_len,
9038 cp->data + cp->adv_data_len);
9039
9040 /* If using software rotation, determine next instance to use */
9041 if (hdev->cur_adv_instance == cp->instance) {
9042 /* If the currently advertised instance is being changed
9043 * then cancel the current advertising and schedule the
9044 * next instance. If there is only one instance then the
9045 * overridden advertising data will be visible right
9046 * away
9047 */
9048 cancel_adv_timeout(hdev);
9049
9050 next_instance = hci_get_next_instance(hdev, cp->instance);
9051 if (next_instance)
9052 schedule_instance = next_instance->instance;
9053 } else if (!hdev->adv_instance_timeout) {
9054 /* Immediately advertise the new instance if no other
9055 * instance is currently being advertised.
9056 */
9057 schedule_instance = cp->instance;
9058 }
9059
9060 /* If the HCI_ADVERTISING flag is set or there is no instance to
9061 * be advertised then we have no HCI communication to make.
9062 * Simply return.
9063 */
9064 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
9065 if (adv_instance->pending) {
9066 mgmt_advertising_added(sk, hdev, cp->instance);
9067 adv_instance->pending = false;
9068 }
9069 rp.instance = cp->instance;
9070 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9071 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9072 goto unlock;
9073 }
9074
9075 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9076 data_len);
9077 if (!cmd) {
9078 err = -ENOMEM;
9079 goto clear_new_instance;
9080 }
9081
9082 err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
9083 add_ext_adv_data_complete);
9084 if (err < 0) {
9085 mgmt_pending_free(cmd);
9086 goto clear_new_instance;
9087 }
9088
9089 /* We were successful in updating data, so trigger advertising_added
9090 * event if this is an instance that wasn't previously advertising. If
9091 * a failure occurs in the requests we initiated, we will remove the
9092 * instance again in add_advertising_complete
9093 */
9094 if (adv_instance->pending)
9095 mgmt_advertising_added(sk, hdev, cp->instance);
9096
9097 goto unlock;
9098
9099 clear_new_instance:
9100 hci_remove_adv_instance(hdev, cp->instance);
9101
9102 unlock:
9103 hci_dev_unlock(hdev);
9104
9105 return err;
9106 }
9107
remove_advertising_complete(struct hci_dev * hdev,void * data,int err)9108 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
9109 int err)
9110 {
9111 struct mgmt_pending_cmd *cmd = data;
9112 struct mgmt_cp_remove_advertising *cp = cmd->param;
9113 struct mgmt_rp_remove_advertising rp;
9114
9115 bt_dev_dbg(hdev, "err %d", err);
9116
9117 memset(&rp, 0, sizeof(rp));
9118 rp.instance = cp->instance;
9119
9120 if (err)
9121 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9122 mgmt_status(err));
9123 else
9124 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9125 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9126
9127 mgmt_pending_free(cmd);
9128 }
9129
remove_advertising_sync(struct hci_dev * hdev,void * data)9130 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
9131 {
9132 struct mgmt_pending_cmd *cmd = data;
9133 struct mgmt_cp_remove_advertising *cp = cmd->param;
9134 int err;
9135
9136 err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
9137 if (err)
9138 return err;
9139
9140 if (list_empty(&hdev->adv_instances))
9141 err = hci_disable_advertising_sync(hdev);
9142
9143 return err;
9144 }
9145
remove_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9146 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9147 void *data, u16 data_len)
9148 {
9149 struct mgmt_cp_remove_advertising *cp = data;
9150 struct mgmt_pending_cmd *cmd;
9151 int err;
9152
9153 bt_dev_dbg(hdev, "sock %p", sk);
9154
9155 hci_dev_lock(hdev);
9156
9157 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
9158 err = mgmt_cmd_status(sk, hdev->id,
9159 MGMT_OP_REMOVE_ADVERTISING,
9160 MGMT_STATUS_INVALID_PARAMS);
9161 goto unlock;
9162 }
9163
9164 if (pending_find(MGMT_OP_SET_LE, hdev)) {
9165 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9166 MGMT_STATUS_BUSY);
9167 goto unlock;
9168 }
9169
9170 if (list_empty(&hdev->adv_instances)) {
9171 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9172 MGMT_STATUS_INVALID_PARAMS);
9173 goto unlock;
9174 }
9175
9176 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9177 data_len);
9178 if (!cmd) {
9179 err = -ENOMEM;
9180 goto unlock;
9181 }
9182
9183 err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
9184 remove_advertising_complete);
9185 if (err < 0)
9186 mgmt_pending_free(cmd);
9187
9188 unlock:
9189 hci_dev_unlock(hdev);
9190
9191 return err;
9192 }
9193
get_adv_size_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9194 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9195 void *data, u16 data_len)
9196 {
9197 struct mgmt_cp_get_adv_size_info *cp = data;
9198 struct mgmt_rp_get_adv_size_info rp;
9199 u32 flags, supported_flags;
9200
9201 bt_dev_dbg(hdev, "sock %p", sk);
9202
9203 if (!lmp_le_capable(hdev))
9204 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9205 MGMT_STATUS_REJECTED);
9206
9207 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9208 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9209 MGMT_STATUS_INVALID_PARAMS);
9210
9211 flags = __le32_to_cpu(cp->flags);
9212
9213 /* The current implementation only supports a subset of the specified
9214 * flags.
9215 */
9216 supported_flags = get_supported_adv_flags(hdev);
9217 if (flags & ~supported_flags)
9218 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9219 MGMT_STATUS_INVALID_PARAMS);
9220
9221 rp.instance = cp->instance;
9222 rp.flags = cp->flags;
9223 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9224 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9225
9226 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9227 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9228 }
9229
9230 static const struct hci_mgmt_handler mgmt_handlers[] = {
9231 { NULL }, /* 0x0000 (no command) */
9232 { read_version, MGMT_READ_VERSION_SIZE,
9233 HCI_MGMT_NO_HDEV |
9234 HCI_MGMT_UNTRUSTED },
9235 { read_commands, MGMT_READ_COMMANDS_SIZE,
9236 HCI_MGMT_NO_HDEV |
9237 HCI_MGMT_UNTRUSTED },
9238 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
9239 HCI_MGMT_NO_HDEV |
9240 HCI_MGMT_UNTRUSTED },
9241 { read_controller_info, MGMT_READ_INFO_SIZE,
9242 HCI_MGMT_UNTRUSTED },
9243 { set_powered, MGMT_SETTING_SIZE },
9244 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
9245 { set_connectable, MGMT_SETTING_SIZE },
9246 { set_fast_connectable, MGMT_SETTING_SIZE },
9247 { set_bondable, MGMT_SETTING_SIZE },
9248 { set_link_security, MGMT_SETTING_SIZE },
9249 { set_ssp, MGMT_SETTING_SIZE },
9250 { set_hs, MGMT_SETTING_SIZE },
9251 { set_le, MGMT_SETTING_SIZE },
9252 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
9253 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
9254 { add_uuid, MGMT_ADD_UUID_SIZE },
9255 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
9256 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
9257 HCI_MGMT_VAR_LEN },
9258 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9259 HCI_MGMT_VAR_LEN },
9260 { disconnect, MGMT_DISCONNECT_SIZE },
9261 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
9262 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
9263 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
9264 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
9265 { pair_device, MGMT_PAIR_DEVICE_SIZE },
9266 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
9267 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
9268 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
9269 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9270 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
9271 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9272 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
9273 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9274 HCI_MGMT_VAR_LEN },
9275 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9276 { start_discovery, MGMT_START_DISCOVERY_SIZE },
9277 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
9278 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
9279 { block_device, MGMT_BLOCK_DEVICE_SIZE },
9280 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
9281 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
9282 { set_advertising, MGMT_SETTING_SIZE },
9283 { set_bredr, MGMT_SETTING_SIZE },
9284 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
9285 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
9286 { set_secure_conn, MGMT_SETTING_SIZE },
9287 { set_debug_keys, MGMT_SETTING_SIZE },
9288 { set_privacy, MGMT_SET_PRIVACY_SIZE },
9289 { load_irks, MGMT_LOAD_IRKS_SIZE,
9290 HCI_MGMT_VAR_LEN },
9291 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
9292 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
9293 { add_device, MGMT_ADD_DEVICE_SIZE },
9294 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
9295 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
9296 HCI_MGMT_VAR_LEN },
9297 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9298 HCI_MGMT_NO_HDEV |
9299 HCI_MGMT_UNTRUSTED },
9300 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
9301 HCI_MGMT_UNCONFIGURED |
9302 HCI_MGMT_UNTRUSTED },
9303 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
9304 HCI_MGMT_UNCONFIGURED },
9305 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
9306 HCI_MGMT_UNCONFIGURED },
9307 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9308 HCI_MGMT_VAR_LEN },
9309 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9310 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
9311 HCI_MGMT_NO_HDEV |
9312 HCI_MGMT_UNTRUSTED },
9313 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
9314 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
9315 HCI_MGMT_VAR_LEN },
9316 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
9317 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
9318 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9319 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9320 HCI_MGMT_UNTRUSTED },
9321 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
9322 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
9323 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
9324 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9325 HCI_MGMT_VAR_LEN },
9326 { set_wideband_speech, MGMT_SETTING_SIZE },
9327 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
9328 HCI_MGMT_UNTRUSTED },
9329 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
9330 HCI_MGMT_UNTRUSTED |
9331 HCI_MGMT_HDEV_OPTIONAL },
9332 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
9333 HCI_MGMT_VAR_LEN |
9334 HCI_MGMT_HDEV_OPTIONAL },
9335 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9336 HCI_MGMT_UNTRUSTED },
9337 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9338 HCI_MGMT_VAR_LEN },
9339 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9340 HCI_MGMT_UNTRUSTED },
9341 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9342 HCI_MGMT_VAR_LEN },
9343 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
9344 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
9345 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9346 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9347 HCI_MGMT_VAR_LEN },
9348 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
9349 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
9350 HCI_MGMT_VAR_LEN },
9351 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
9352 HCI_MGMT_VAR_LEN },
9353 { add_adv_patterns_monitor_rssi,
9354 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
9355 HCI_MGMT_VAR_LEN },
9356 { set_mesh, MGMT_SET_MESH_RECEIVER_SIZE,
9357 HCI_MGMT_VAR_LEN },
9358 { mesh_features, MGMT_MESH_READ_FEATURES_SIZE },
9359 { mesh_send, MGMT_MESH_SEND_SIZE,
9360 HCI_MGMT_VAR_LEN },
9361 { mesh_send_cancel, MGMT_MESH_SEND_CANCEL_SIZE },
9362 { mgmt_hci_cmd_sync, MGMT_HCI_CMD_SYNC_SIZE, HCI_MGMT_VAR_LEN },
9363 };
9364
mgmt_index_added(struct hci_dev * hdev)9365 void mgmt_index_added(struct hci_dev *hdev)
9366 {
9367 struct mgmt_ev_ext_index ev;
9368
9369 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9370 return;
9371
9372 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9373 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0,
9374 HCI_MGMT_UNCONF_INDEX_EVENTS);
9375 ev.type = 0x01;
9376 } else {
9377 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
9378 HCI_MGMT_INDEX_EVENTS);
9379 ev.type = 0x00;
9380 }
9381
9382 ev.bus = hdev->bus;
9383
9384 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
9385 HCI_MGMT_EXT_INDEX_EVENTS);
9386 }
9387
mgmt_index_removed(struct hci_dev * hdev)9388 void mgmt_index_removed(struct hci_dev *hdev)
9389 {
9390 struct mgmt_ev_ext_index ev;
9391 struct cmd_lookup match = { NULL, hdev, MGMT_STATUS_INVALID_INDEX };
9392
9393 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9394 return;
9395
9396 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &match);
9397
9398 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9399 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0,
9400 HCI_MGMT_UNCONF_INDEX_EVENTS);
9401 ev.type = 0x01;
9402 } else {
9403 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
9404 HCI_MGMT_INDEX_EVENTS);
9405 ev.type = 0x00;
9406 }
9407
9408 ev.bus = hdev->bus;
9409
9410 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
9411 HCI_MGMT_EXT_INDEX_EVENTS);
9412
9413 /* Cancel any remaining timed work */
9414 if (!hci_dev_test_flag(hdev, HCI_MGMT))
9415 return;
9416 cancel_delayed_work_sync(&hdev->discov_off);
9417 cancel_delayed_work_sync(&hdev->service_cache);
9418 cancel_delayed_work_sync(&hdev->rpa_expired);
9419 }
9420
mgmt_power_on(struct hci_dev * hdev,int err)9421 void mgmt_power_on(struct hci_dev *hdev, int err)
9422 {
9423 struct cmd_lookup match = { NULL, hdev };
9424
9425 bt_dev_dbg(hdev, "err %d", err);
9426
9427 hci_dev_lock(hdev);
9428
9429 if (!err) {
9430 restart_le_actions(hdev);
9431 hci_update_passive_scan(hdev);
9432 }
9433
9434 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9435
9436 new_settings(hdev, match.sk);
9437
9438 if (match.sk)
9439 sock_put(match.sk);
9440
9441 hci_dev_unlock(hdev);
9442 }
9443
__mgmt_power_off(struct hci_dev * hdev)9444 void __mgmt_power_off(struct hci_dev *hdev)
9445 {
9446 struct cmd_lookup match = { NULL, hdev };
9447 u8 zero_cod[] = { 0, 0, 0 };
9448
9449 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9450
9451 /* If the power off is because of hdev unregistration let
9452 * use the appropriate INVALID_INDEX status. Otherwise use
9453 * NOT_POWERED. We cover both scenarios here since later in
9454 * mgmt_index_removed() any hci_conn callbacks will have already
9455 * been triggered, potentially causing misleading DISCONNECTED
9456 * status responses.
9457 */
9458 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9459 match.mgmt_status = MGMT_STATUS_INVALID_INDEX;
9460 else
9461 match.mgmt_status = MGMT_STATUS_NOT_POWERED;
9462
9463 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &match);
9464
9465 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9466 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9467 zero_cod, sizeof(zero_cod),
9468 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9469 ext_info_changed(hdev, NULL);
9470 }
9471
9472 new_settings(hdev, match.sk);
9473
9474 if (match.sk)
9475 sock_put(match.sk);
9476 }
9477
mgmt_set_powered_failed(struct hci_dev * hdev,int err)9478 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9479 {
9480 struct mgmt_pending_cmd *cmd;
9481 u8 status;
9482
9483 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9484 if (!cmd)
9485 return;
9486
9487 if (err == -ERFKILL)
9488 status = MGMT_STATUS_RFKILLED;
9489 else
9490 status = MGMT_STATUS_FAILED;
9491
9492 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9493
9494 mgmt_pending_remove(cmd);
9495 }
9496
mgmt_new_link_key(struct hci_dev * hdev,struct link_key * key,bool persistent)9497 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9498 bool persistent)
9499 {
9500 struct mgmt_ev_new_link_key ev;
9501
9502 memset(&ev, 0, sizeof(ev));
9503
9504 ev.store_hint = persistent;
9505 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9506 ev.key.addr.type = BDADDR_BREDR;
9507 ev.key.type = key->type;
9508 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9509 ev.key.pin_len = key->pin_len;
9510
9511 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9512 }
9513
mgmt_ltk_type(struct smp_ltk * ltk)9514 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9515 {
9516 switch (ltk->type) {
9517 case SMP_LTK:
9518 case SMP_LTK_RESPONDER:
9519 if (ltk->authenticated)
9520 return MGMT_LTK_AUTHENTICATED;
9521 return MGMT_LTK_UNAUTHENTICATED;
9522 case SMP_LTK_P256:
9523 if (ltk->authenticated)
9524 return MGMT_LTK_P256_AUTH;
9525 return MGMT_LTK_P256_UNAUTH;
9526 case SMP_LTK_P256_DEBUG:
9527 return MGMT_LTK_P256_DEBUG;
9528 }
9529
9530 return MGMT_LTK_UNAUTHENTICATED;
9531 }
9532
mgmt_new_ltk(struct hci_dev * hdev,struct smp_ltk * key,bool persistent)9533 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9534 {
9535 struct mgmt_ev_new_long_term_key ev;
9536
9537 memset(&ev, 0, sizeof(ev));
9538
9539 /* Devices using resolvable or non-resolvable random addresses
9540 * without providing an identity resolving key don't require
9541 * to store long term keys. Their addresses will change the
9542 * next time around.
9543 *
9544 * Only when a remote device provides an identity address
9545 * make sure the long term key is stored. If the remote
9546 * identity is known, the long term keys are internally
9547 * mapped to the identity address. So allow static random
9548 * and public addresses here.
9549 */
9550 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9551 (key->bdaddr.b[5] & 0xc0) != 0xc0)
9552 ev.store_hint = 0x00;
9553 else
9554 ev.store_hint = persistent;
9555
9556 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9557 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
9558 ev.key.type = mgmt_ltk_type(key);
9559 ev.key.enc_size = key->enc_size;
9560 ev.key.ediv = key->ediv;
9561 ev.key.rand = key->rand;
9562
9563 if (key->type == SMP_LTK)
9564 ev.key.initiator = 1;
9565
9566 /* Make sure we copy only the significant bytes based on the
9567 * encryption key size, and set the rest of the value to zeroes.
9568 */
9569 memcpy(ev.key.val, key->val, key->enc_size);
9570 memset(ev.key.val + key->enc_size, 0,
9571 sizeof(ev.key.val) - key->enc_size);
9572
9573 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9574 }
9575
mgmt_new_irk(struct hci_dev * hdev,struct smp_irk * irk,bool persistent)9576 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9577 {
9578 struct mgmt_ev_new_irk ev;
9579
9580 memset(&ev, 0, sizeof(ev));
9581
9582 ev.store_hint = persistent;
9583
9584 bacpy(&ev.rpa, &irk->rpa);
9585 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9586 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
9587 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9588
9589 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9590 }
9591
mgmt_new_csrk(struct hci_dev * hdev,struct smp_csrk * csrk,bool persistent)9592 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9593 bool persistent)
9594 {
9595 struct mgmt_ev_new_csrk ev;
9596
9597 memset(&ev, 0, sizeof(ev));
9598
9599 /* Devices using resolvable or non-resolvable random addresses
9600 * without providing an identity resolving key don't require
9601 * to store signature resolving keys. Their addresses will change
9602 * the next time around.
9603 *
9604 * Only when a remote device provides an identity address
9605 * make sure the signature resolving key is stored. So allow
9606 * static random and public addresses here.
9607 */
9608 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9609 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9610 ev.store_hint = 0x00;
9611 else
9612 ev.store_hint = persistent;
9613
9614 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9615 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
9616 ev.key.type = csrk->type;
9617 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9618
9619 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9620 }
9621
mgmt_new_conn_param(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u8 store_hint,u16 min_interval,u16 max_interval,u16 latency,u16 timeout)9622 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9623 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9624 u16 max_interval, u16 latency, u16 timeout)
9625 {
9626 struct mgmt_ev_new_conn_param ev;
9627
9628 if (!hci_is_identity_address(bdaddr, bdaddr_type))
9629 return;
9630
9631 memset(&ev, 0, sizeof(ev));
9632 bacpy(&ev.addr.bdaddr, bdaddr);
9633 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9634 ev.store_hint = store_hint;
9635 ev.min_interval = cpu_to_le16(min_interval);
9636 ev.max_interval = cpu_to_le16(max_interval);
9637 ev.latency = cpu_to_le16(latency);
9638 ev.timeout = cpu_to_le16(timeout);
9639
9640 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9641 }
9642
mgmt_device_connected(struct hci_dev * hdev,struct hci_conn * conn,u8 * name,u8 name_len)9643 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9644 u8 *name, u8 name_len)
9645 {
9646 struct sk_buff *skb;
9647 struct mgmt_ev_device_connected *ev;
9648 u16 eir_len = 0;
9649 u32 flags = 0;
9650
9651 if (test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
9652 return;
9653
9654 /* allocate buff for LE or BR/EDR adv */
9655 if (conn->le_adv_data_len > 0)
9656 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9657 sizeof(*ev) + conn->le_adv_data_len);
9658 else
9659 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9660 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9661 eir_precalc_len(sizeof(conn->dev_class)));
9662
9663 if (!skb)
9664 return;
9665
9666 ev = skb_put(skb, sizeof(*ev));
9667 bacpy(&ev->addr.bdaddr, &conn->dst);
9668 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9669
9670 if (conn->out)
9671 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9672
9673 ev->flags = __cpu_to_le32(flags);
9674
9675 /* We must ensure that the EIR Data fields are ordered and
9676 * unique. Keep it simple for now and avoid the problem by not
9677 * adding any BR/EDR data to the LE adv.
9678 */
9679 if (conn->le_adv_data_len > 0) {
9680 skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9681 eir_len = conn->le_adv_data_len;
9682 } else {
9683 if (name)
9684 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9685
9686 if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9687 eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9688 conn->dev_class, sizeof(conn->dev_class));
9689 }
9690
9691 ev->eir_len = cpu_to_le16(eir_len);
9692
9693 mgmt_event_skb(skb, NULL);
9694 }
9695
unpair_device_rsp(struct mgmt_pending_cmd * cmd,void * data)9696 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9697 {
9698 struct hci_dev *hdev = data;
9699 struct mgmt_cp_unpair_device *cp = cmd->param;
9700
9701 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9702
9703 cmd->cmd_complete(cmd, 0);
9704 mgmt_pending_remove(cmd);
9705 }
9706
mgmt_powering_down(struct hci_dev * hdev)9707 bool mgmt_powering_down(struct hci_dev *hdev)
9708 {
9709 struct mgmt_pending_cmd *cmd;
9710 struct mgmt_mode *cp;
9711
9712 if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
9713 return true;
9714
9715 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9716 if (!cmd)
9717 return false;
9718
9719 cp = cmd->param;
9720 if (!cp->val)
9721 return true;
9722
9723 return false;
9724 }
9725
mgmt_device_disconnected(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 reason,bool mgmt_connected)9726 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9727 u8 link_type, u8 addr_type, u8 reason,
9728 bool mgmt_connected)
9729 {
9730 struct mgmt_ev_device_disconnected ev;
9731 struct sock *sk = NULL;
9732
9733 if (!mgmt_connected)
9734 return;
9735
9736 if (link_type != ACL_LINK && link_type != LE_LINK)
9737 return;
9738
9739 bacpy(&ev.addr.bdaddr, bdaddr);
9740 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9741 ev.reason = reason;
9742
9743 /* Report disconnects due to suspend */
9744 if (hdev->suspended)
9745 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9746
9747 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9748
9749 if (sk)
9750 sock_put(sk);
9751 }
9752
mgmt_disconnect_failed(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9753 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9754 u8 link_type, u8 addr_type, u8 status)
9755 {
9756 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9757 struct mgmt_cp_disconnect *cp;
9758 struct mgmt_pending_cmd *cmd;
9759
9760 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9761 hdev);
9762
9763 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9764 if (!cmd)
9765 return;
9766
9767 cp = cmd->param;
9768
9769 if (bacmp(bdaddr, &cp->addr.bdaddr))
9770 return;
9771
9772 if (cp->addr.type != bdaddr_type)
9773 return;
9774
9775 cmd->cmd_complete(cmd, mgmt_status(status));
9776 mgmt_pending_remove(cmd);
9777 }
9778
mgmt_connect_failed(struct hci_dev * hdev,struct hci_conn * conn,u8 status)9779 void mgmt_connect_failed(struct hci_dev *hdev, struct hci_conn *conn, u8 status)
9780 {
9781 struct mgmt_ev_connect_failed ev;
9782
9783 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
9784 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
9785 conn->dst_type, status, true);
9786 return;
9787 }
9788
9789 bacpy(&ev.addr.bdaddr, &conn->dst);
9790 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9791 ev.status = mgmt_status(status);
9792
9793 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9794 }
9795
mgmt_pin_code_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 secure)9796 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9797 {
9798 struct mgmt_ev_pin_code_request ev;
9799
9800 bacpy(&ev.addr.bdaddr, bdaddr);
9801 ev.addr.type = BDADDR_BREDR;
9802 ev.secure = secure;
9803
9804 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9805 }
9806
mgmt_pin_code_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)9807 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9808 u8 status)
9809 {
9810 struct mgmt_pending_cmd *cmd;
9811
9812 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9813 if (!cmd)
9814 return;
9815
9816 cmd->cmd_complete(cmd, mgmt_status(status));
9817 mgmt_pending_remove(cmd);
9818 }
9819
mgmt_pin_code_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)9820 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9821 u8 status)
9822 {
9823 struct mgmt_pending_cmd *cmd;
9824
9825 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9826 if (!cmd)
9827 return;
9828
9829 cmd->cmd_complete(cmd, mgmt_status(status));
9830 mgmt_pending_remove(cmd);
9831 }
9832
mgmt_user_confirm_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 value,u8 confirm_hint)9833 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9834 u8 link_type, u8 addr_type, u32 value,
9835 u8 confirm_hint)
9836 {
9837 struct mgmt_ev_user_confirm_request ev;
9838
9839 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9840
9841 bacpy(&ev.addr.bdaddr, bdaddr);
9842 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9843 ev.confirm_hint = confirm_hint;
9844 ev.value = cpu_to_le32(value);
9845
9846 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9847 NULL);
9848 }
9849
mgmt_user_passkey_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type)9850 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9851 u8 link_type, u8 addr_type)
9852 {
9853 struct mgmt_ev_user_passkey_request ev;
9854
9855 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9856
9857 bacpy(&ev.addr.bdaddr, bdaddr);
9858 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9859
9860 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9861 NULL);
9862 }
9863
user_pairing_resp_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status,u8 opcode)9864 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9865 u8 link_type, u8 addr_type, u8 status,
9866 u8 opcode)
9867 {
9868 struct mgmt_pending_cmd *cmd;
9869
9870 cmd = pending_find(opcode, hdev);
9871 if (!cmd)
9872 return -ENOENT;
9873
9874 cmd->cmd_complete(cmd, mgmt_status(status));
9875 mgmt_pending_remove(cmd);
9876
9877 return 0;
9878 }
9879
mgmt_user_confirm_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9880 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9881 u8 link_type, u8 addr_type, u8 status)
9882 {
9883 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9884 status, MGMT_OP_USER_CONFIRM_REPLY);
9885 }
9886
mgmt_user_confirm_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9887 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9888 u8 link_type, u8 addr_type, u8 status)
9889 {
9890 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9891 status,
9892 MGMT_OP_USER_CONFIRM_NEG_REPLY);
9893 }
9894
mgmt_user_passkey_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9895 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9896 u8 link_type, u8 addr_type, u8 status)
9897 {
9898 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9899 status, MGMT_OP_USER_PASSKEY_REPLY);
9900 }
9901
mgmt_user_passkey_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9902 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9903 u8 link_type, u8 addr_type, u8 status)
9904 {
9905 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9906 status,
9907 MGMT_OP_USER_PASSKEY_NEG_REPLY);
9908 }
9909
mgmt_user_passkey_notify(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 passkey,u8 entered)9910 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9911 u8 link_type, u8 addr_type, u32 passkey,
9912 u8 entered)
9913 {
9914 struct mgmt_ev_passkey_notify ev;
9915
9916 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9917
9918 bacpy(&ev.addr.bdaddr, bdaddr);
9919 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9920 ev.passkey = __cpu_to_le32(passkey);
9921 ev.entered = entered;
9922
9923 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9924 }
9925
mgmt_auth_failed(struct hci_conn * conn,u8 hci_status)9926 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9927 {
9928 struct mgmt_ev_auth_failed ev;
9929 struct mgmt_pending_cmd *cmd;
9930 u8 status = mgmt_status(hci_status);
9931
9932 bacpy(&ev.addr.bdaddr, &conn->dst);
9933 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9934 ev.status = status;
9935
9936 cmd = find_pairing(conn);
9937
9938 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9939 cmd ? cmd->sk : NULL);
9940
9941 if (cmd) {
9942 cmd->cmd_complete(cmd, status);
9943 mgmt_pending_remove(cmd);
9944 }
9945 }
9946
mgmt_auth_enable_complete(struct hci_dev * hdev,u8 status)9947 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9948 {
9949 struct cmd_lookup match = { NULL, hdev };
9950 bool changed;
9951
9952 if (status) {
9953 u8 mgmt_err = mgmt_status(status);
9954 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9955 cmd_status_rsp, &mgmt_err);
9956 return;
9957 }
9958
9959 if (test_bit(HCI_AUTH, &hdev->flags))
9960 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9961 else
9962 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9963
9964 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
9965 &match);
9966
9967 if (changed)
9968 new_settings(hdev, match.sk);
9969
9970 if (match.sk)
9971 sock_put(match.sk);
9972 }
9973
sk_lookup(struct mgmt_pending_cmd * cmd,void * data)9974 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9975 {
9976 struct cmd_lookup *match = data;
9977
9978 if (match->sk == NULL) {
9979 match->sk = cmd->sk;
9980 sock_hold(match->sk);
9981 }
9982 }
9983
mgmt_set_class_of_dev_complete(struct hci_dev * hdev,u8 * dev_class,u8 status)9984 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9985 u8 status)
9986 {
9987 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
9988
9989 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
9990 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
9991 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
9992
9993 if (!status) {
9994 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
9995 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9996 ext_info_changed(hdev, NULL);
9997 }
9998
9999 if (match.sk)
10000 sock_put(match.sk);
10001 }
10002
mgmt_set_local_name_complete(struct hci_dev * hdev,u8 * name,u8 status)10003 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
10004 {
10005 struct mgmt_cp_set_local_name ev;
10006 struct mgmt_pending_cmd *cmd;
10007
10008 if (status)
10009 return;
10010
10011 memset(&ev, 0, sizeof(ev));
10012 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
10013 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
10014
10015 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
10016 if (!cmd) {
10017 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
10018
10019 /* If this is a HCI command related to powering on the
10020 * HCI dev don't send any mgmt signals.
10021 */
10022 if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
10023 return;
10024
10025 if (pending_find(MGMT_OP_SET_POWERED, hdev))
10026 return;
10027 }
10028
10029 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
10030 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
10031 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
10032 }
10033
has_uuid(u8 * uuid,u16 uuid_count,u8 (* uuids)[16])10034 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
10035 {
10036 int i;
10037
10038 for (i = 0; i < uuid_count; i++) {
10039 if (!memcmp(uuid, uuids[i], 16))
10040 return true;
10041 }
10042
10043 return false;
10044 }
10045
eir_has_uuids(u8 * eir,u16 eir_len,u16 uuid_count,u8 (* uuids)[16])10046 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
10047 {
10048 u16 parsed = 0;
10049
10050 while (parsed < eir_len) {
10051 u8 field_len = eir[0];
10052 u8 uuid[16];
10053 int i;
10054
10055 if (field_len == 0)
10056 break;
10057
10058 if (eir_len - parsed < field_len + 1)
10059 break;
10060
10061 switch (eir[1]) {
10062 case EIR_UUID16_ALL:
10063 case EIR_UUID16_SOME:
10064 for (i = 0; i + 3 <= field_len; i += 2) {
10065 memcpy(uuid, bluetooth_base_uuid, 16);
10066 uuid[13] = eir[i + 3];
10067 uuid[12] = eir[i + 2];
10068 if (has_uuid(uuid, uuid_count, uuids))
10069 return true;
10070 }
10071 break;
10072 case EIR_UUID32_ALL:
10073 case EIR_UUID32_SOME:
10074 for (i = 0; i + 5 <= field_len; i += 4) {
10075 memcpy(uuid, bluetooth_base_uuid, 16);
10076 uuid[15] = eir[i + 5];
10077 uuid[14] = eir[i + 4];
10078 uuid[13] = eir[i + 3];
10079 uuid[12] = eir[i + 2];
10080 if (has_uuid(uuid, uuid_count, uuids))
10081 return true;
10082 }
10083 break;
10084 case EIR_UUID128_ALL:
10085 case EIR_UUID128_SOME:
10086 for (i = 0; i + 17 <= field_len; i += 16) {
10087 memcpy(uuid, eir + i + 2, 16);
10088 if (has_uuid(uuid, uuid_count, uuids))
10089 return true;
10090 }
10091 break;
10092 }
10093
10094 parsed += field_len + 1;
10095 eir += field_len + 1;
10096 }
10097
10098 return false;
10099 }
10100
is_filter_match(struct hci_dev * hdev,s8 rssi,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len)10101 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10102 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10103 {
10104 /* If a RSSI threshold has been specified, and
10105 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10106 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10107 * is set, let it through for further processing, as we might need to
10108 * restart the scan.
10109 *
10110 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10111 * the results are also dropped.
10112 */
10113 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10114 (rssi == HCI_RSSI_INVALID ||
10115 (rssi < hdev->discovery.rssi &&
10116 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
10117 return false;
10118
10119 if (hdev->discovery.uuid_count != 0) {
10120 /* If a list of UUIDs is provided in filter, results with no
10121 * matching UUID should be dropped.
10122 */
10123 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10124 hdev->discovery.uuids) &&
10125 !eir_has_uuids(scan_rsp, scan_rsp_len,
10126 hdev->discovery.uuid_count,
10127 hdev->discovery.uuids))
10128 return false;
10129 }
10130
10131 /* If duplicate filtering does not report RSSI changes, then restart
10132 * scanning to ensure updated result with updated RSSI values.
10133 */
10134 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
10135 /* Validate RSSI value against the RSSI threshold once more. */
10136 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10137 rssi < hdev->discovery.rssi)
10138 return false;
10139 }
10140
10141 return true;
10142 }
10143
mgmt_adv_monitor_device_lost(struct hci_dev * hdev,u16 handle,bdaddr_t * bdaddr,u8 addr_type)10144 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
10145 bdaddr_t *bdaddr, u8 addr_type)
10146 {
10147 struct mgmt_ev_adv_monitor_device_lost ev;
10148
10149 ev.monitor_handle = cpu_to_le16(handle);
10150 bacpy(&ev.addr.bdaddr, bdaddr);
10151 ev.addr.type = addr_type;
10152
10153 mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
10154 NULL);
10155 }
10156
mgmt_send_adv_monitor_device_found(struct hci_dev * hdev,struct sk_buff * skb,struct sock * skip_sk,u16 handle)10157 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
10158 struct sk_buff *skb,
10159 struct sock *skip_sk,
10160 u16 handle)
10161 {
10162 struct sk_buff *advmon_skb;
10163 size_t advmon_skb_len;
10164 __le16 *monitor_handle;
10165
10166 if (!skb)
10167 return;
10168
10169 advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
10170 sizeof(struct mgmt_ev_device_found)) + skb->len;
10171 advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
10172 advmon_skb_len);
10173 if (!advmon_skb)
10174 return;
10175
10176 /* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10177 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10178 * store monitor_handle of the matched monitor.
10179 */
10180 monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
10181 *monitor_handle = cpu_to_le16(handle);
10182 skb_put_data(advmon_skb, skb->data, skb->len);
10183
10184 mgmt_event_skb(advmon_skb, skip_sk);
10185 }
10186
mgmt_adv_monitor_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,bool report_device,struct sk_buff * skb,struct sock * skip_sk)10187 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
10188 bdaddr_t *bdaddr, bool report_device,
10189 struct sk_buff *skb,
10190 struct sock *skip_sk)
10191 {
10192 struct monitored_device *dev, *tmp;
10193 bool matched = false;
10194 bool notified = false;
10195
10196 /* We have received the Advertisement Report because:
10197 * 1. the kernel has initiated active discovery
10198 * 2. if not, we have pend_le_reports > 0 in which case we are doing
10199 * passive scanning
10200 * 3. if none of the above is true, we have one or more active
10201 * Advertisement Monitor
10202 *
10203 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10204 * and report ONLY one advertisement per device for the matched Monitor
10205 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10206 *
10207 * For case 3, since we are not active scanning and all advertisements
10208 * received are due to a matched Advertisement Monitor, report all
10209 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10210 */
10211 if (report_device && !hdev->advmon_pend_notify) {
10212 mgmt_event_skb(skb, skip_sk);
10213 return;
10214 }
10215
10216 hdev->advmon_pend_notify = false;
10217
10218 list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
10219 if (!bacmp(&dev->bdaddr, bdaddr)) {
10220 matched = true;
10221
10222 if (!dev->notified) {
10223 mgmt_send_adv_monitor_device_found(hdev, skb,
10224 skip_sk,
10225 dev->handle);
10226 notified = true;
10227 dev->notified = true;
10228 }
10229 }
10230
10231 if (!dev->notified)
10232 hdev->advmon_pend_notify = true;
10233 }
10234
10235 if (!report_device &&
10236 ((matched && !notified) || !msft_monitor_supported(hdev))) {
10237 /* Handle 0 indicates that we are not active scanning and this
10238 * is a subsequent advertisement report for an already matched
10239 * Advertisement Monitor or the controller offloading support
10240 * is not available.
10241 */
10242 mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
10243 }
10244
10245 if (report_device)
10246 mgmt_event_skb(skb, skip_sk);
10247 else
10248 kfree_skb(skb);
10249 }
10250
mesh_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len,u64 instant)10251 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
10252 u8 addr_type, s8 rssi, u32 flags, u8 *eir,
10253 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10254 u64 instant)
10255 {
10256 struct sk_buff *skb;
10257 struct mgmt_ev_mesh_device_found *ev;
10258 int i, j;
10259
10260 if (!hdev->mesh_ad_types[0])
10261 goto accepted;
10262
10263 /* Scan for requested AD types */
10264 if (eir_len > 0) {
10265 for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
10266 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10267 if (!hdev->mesh_ad_types[j])
10268 break;
10269
10270 if (hdev->mesh_ad_types[j] == eir[i + 1])
10271 goto accepted;
10272 }
10273 }
10274 }
10275
10276 if (scan_rsp_len > 0) {
10277 for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
10278 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10279 if (!hdev->mesh_ad_types[j])
10280 break;
10281
10282 if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
10283 goto accepted;
10284 }
10285 }
10286 }
10287
10288 return;
10289
10290 accepted:
10291 skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
10292 sizeof(*ev) + eir_len + scan_rsp_len);
10293 if (!skb)
10294 return;
10295
10296 ev = skb_put(skb, sizeof(*ev));
10297
10298 bacpy(&ev->addr.bdaddr, bdaddr);
10299 ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
10300 ev->rssi = rssi;
10301 ev->flags = cpu_to_le32(flags);
10302 ev->instant = cpu_to_le64(instant);
10303
10304 if (eir_len > 0)
10305 /* Copy EIR or advertising data into event */
10306 skb_put_data(skb, eir, eir_len);
10307
10308 if (scan_rsp_len > 0)
10309 /* Append scan response data to event */
10310 skb_put_data(skb, scan_rsp, scan_rsp_len);
10311
10312 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10313
10314 mgmt_event_skb(skb, NULL);
10315 }
10316
mgmt_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 * dev_class,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len,u64 instant)10317 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10318 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10319 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10320 u64 instant)
10321 {
10322 struct sk_buff *skb;
10323 struct mgmt_ev_device_found *ev;
10324 bool report_device = hci_discovery_active(hdev);
10325
10326 if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
10327 mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
10328 eir, eir_len, scan_rsp, scan_rsp_len,
10329 instant);
10330
10331 /* Don't send events for a non-kernel initiated discovery. With
10332 * LE one exception is if we have pend_le_reports > 0 in which
10333 * case we're doing passive scanning and want these events.
10334 */
10335 if (!hci_discovery_active(hdev)) {
10336 if (link_type == ACL_LINK)
10337 return;
10338 if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
10339 report_device = true;
10340 else if (!hci_is_adv_monitoring(hdev))
10341 return;
10342 }
10343
10344 if (hdev->discovery.result_filtering) {
10345 /* We are using service discovery */
10346 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10347 scan_rsp_len))
10348 return;
10349 }
10350
10351 if (hdev->discovery.limited) {
10352 /* Check for limited discoverable bit */
10353 if (dev_class) {
10354 if (!(dev_class[1] & 0x20))
10355 return;
10356 } else {
10357 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10358 if (!flags || !(flags[0] & LE_AD_LIMITED))
10359 return;
10360 }
10361 }
10362
10363 /* Allocate skb. The 5 extra bytes are for the potential CoD field */
10364 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10365 sizeof(*ev) + eir_len + scan_rsp_len + 5);
10366 if (!skb)
10367 return;
10368
10369 ev = skb_put(skb, sizeof(*ev));
10370
10371 /* In case of device discovery with BR/EDR devices (pre 1.2), the
10372 * RSSI value was reported as 0 when not available. This behavior
10373 * is kept when using device discovery. This is required for full
10374 * backwards compatibility with the API.
10375 *
10376 * However when using service discovery, the value 127 will be
10377 * returned when the RSSI is not available.
10378 */
10379 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10380 link_type == ACL_LINK)
10381 rssi = 0;
10382
10383 bacpy(&ev->addr.bdaddr, bdaddr);
10384 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10385 ev->rssi = rssi;
10386 ev->flags = cpu_to_le32(flags);
10387
10388 if (eir_len > 0)
10389 /* Copy EIR or advertising data into event */
10390 skb_put_data(skb, eir, eir_len);
10391
10392 if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
10393 u8 eir_cod[5];
10394
10395 eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
10396 dev_class, 3);
10397 skb_put_data(skb, eir_cod, sizeof(eir_cod));
10398 }
10399
10400 if (scan_rsp_len > 0)
10401 /* Append scan response data to event */
10402 skb_put_data(skb, scan_rsp, scan_rsp_len);
10403
10404 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10405
10406 mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
10407 }
10408
mgmt_remote_name(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,s8 rssi,u8 * name,u8 name_len)10409 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10410 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10411 {
10412 struct sk_buff *skb;
10413 struct mgmt_ev_device_found *ev;
10414 u16 eir_len = 0;
10415 u32 flags = 0;
10416
10417 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10418 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
10419 if (!skb)
10420 return;
10421
10422 ev = skb_put(skb, sizeof(*ev));
10423 bacpy(&ev->addr.bdaddr, bdaddr);
10424 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10425 ev->rssi = rssi;
10426
10427 if (name)
10428 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
10429 else
10430 flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
10431
10432 ev->eir_len = cpu_to_le16(eir_len);
10433 ev->flags = cpu_to_le32(flags);
10434
10435 mgmt_event_skb(skb, NULL);
10436 }
10437
mgmt_discovering(struct hci_dev * hdev,u8 discovering)10438 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10439 {
10440 struct mgmt_ev_discovering ev;
10441
10442 bt_dev_dbg(hdev, "discovering %u", discovering);
10443
10444 memset(&ev, 0, sizeof(ev));
10445 ev.type = hdev->discovery.type;
10446 ev.discovering = discovering;
10447
10448 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10449 }
10450
mgmt_suspending(struct hci_dev * hdev,u8 state)10451 void mgmt_suspending(struct hci_dev *hdev, u8 state)
10452 {
10453 struct mgmt_ev_controller_suspend ev;
10454
10455 ev.suspend_state = state;
10456 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10457 }
10458
mgmt_resuming(struct hci_dev * hdev,u8 reason,bdaddr_t * bdaddr,u8 addr_type)10459 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10460 u8 addr_type)
10461 {
10462 struct mgmt_ev_controller_resume ev;
10463
10464 ev.wake_reason = reason;
10465 if (bdaddr) {
10466 bacpy(&ev.addr.bdaddr, bdaddr);
10467 ev.addr.type = addr_type;
10468 } else {
10469 memset(&ev.addr, 0, sizeof(ev.addr));
10470 }
10471
10472 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
10473 }
10474
10475 static struct hci_mgmt_chan chan = {
10476 .channel = HCI_CHANNEL_CONTROL,
10477 .handler_count = ARRAY_SIZE(mgmt_handlers),
10478 .handlers = mgmt_handlers,
10479 .hdev_init = mgmt_init_hdev,
10480 };
10481
mgmt_init(void)10482 int mgmt_init(void)
10483 {
10484 return hci_mgmt_chan_register(&chan);
10485 }
10486
mgmt_exit(void)10487 void mgmt_exit(void)
10488 {
10489 hci_mgmt_chan_unregister(&chan);
10490 }
10491
mgmt_cleanup(struct sock * sk)10492 void mgmt_cleanup(struct sock *sk)
10493 {
10494 struct mgmt_mesh_tx *mesh_tx;
10495 struct hci_dev *hdev;
10496
10497 read_lock(&hci_dev_list_lock);
10498
10499 list_for_each_entry(hdev, &hci_dev_list, list) {
10500 do {
10501 mesh_tx = mgmt_mesh_next(hdev, sk);
10502
10503 if (mesh_tx)
10504 mesh_send_complete(hdev, mesh_tx, true);
10505 } while (mesh_tx);
10506 }
10507
10508 read_unlock(&hci_dev_list_lock);
10509 }
10510