1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <linux/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35
36 #include "smp.h"
37 #include "mgmt_util.h"
38 #include "mgmt_config.h"
39 #include "msft.h"
40 #include "eir.h"
41 #include "aosp.h"
42
43 #define MGMT_VERSION 1
44 #define MGMT_REVISION 23
45
46 static const u16 mgmt_commands[] = {
47 MGMT_OP_READ_INDEX_LIST,
48 MGMT_OP_READ_INFO,
49 MGMT_OP_SET_POWERED,
50 MGMT_OP_SET_DISCOVERABLE,
51 MGMT_OP_SET_CONNECTABLE,
52 MGMT_OP_SET_FAST_CONNECTABLE,
53 MGMT_OP_SET_BONDABLE,
54 MGMT_OP_SET_LINK_SECURITY,
55 MGMT_OP_SET_SSP,
56 MGMT_OP_SET_HS,
57 MGMT_OP_SET_LE,
58 MGMT_OP_SET_DEV_CLASS,
59 MGMT_OP_SET_LOCAL_NAME,
60 MGMT_OP_ADD_UUID,
61 MGMT_OP_REMOVE_UUID,
62 MGMT_OP_LOAD_LINK_KEYS,
63 MGMT_OP_LOAD_LONG_TERM_KEYS,
64 MGMT_OP_DISCONNECT,
65 MGMT_OP_GET_CONNECTIONS,
66 MGMT_OP_PIN_CODE_REPLY,
67 MGMT_OP_PIN_CODE_NEG_REPLY,
68 MGMT_OP_SET_IO_CAPABILITY,
69 MGMT_OP_PAIR_DEVICE,
70 MGMT_OP_CANCEL_PAIR_DEVICE,
71 MGMT_OP_UNPAIR_DEVICE,
72 MGMT_OP_USER_CONFIRM_REPLY,
73 MGMT_OP_USER_CONFIRM_NEG_REPLY,
74 MGMT_OP_USER_PASSKEY_REPLY,
75 MGMT_OP_USER_PASSKEY_NEG_REPLY,
76 MGMT_OP_READ_LOCAL_OOB_DATA,
77 MGMT_OP_ADD_REMOTE_OOB_DATA,
78 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
79 MGMT_OP_START_DISCOVERY,
80 MGMT_OP_STOP_DISCOVERY,
81 MGMT_OP_CONFIRM_NAME,
82 MGMT_OP_BLOCK_DEVICE,
83 MGMT_OP_UNBLOCK_DEVICE,
84 MGMT_OP_SET_DEVICE_ID,
85 MGMT_OP_SET_ADVERTISING,
86 MGMT_OP_SET_BREDR,
87 MGMT_OP_SET_STATIC_ADDRESS,
88 MGMT_OP_SET_SCAN_PARAMS,
89 MGMT_OP_SET_SECURE_CONN,
90 MGMT_OP_SET_DEBUG_KEYS,
91 MGMT_OP_SET_PRIVACY,
92 MGMT_OP_LOAD_IRKS,
93 MGMT_OP_GET_CONN_INFO,
94 MGMT_OP_GET_CLOCK_INFO,
95 MGMT_OP_ADD_DEVICE,
96 MGMT_OP_REMOVE_DEVICE,
97 MGMT_OP_LOAD_CONN_PARAM,
98 MGMT_OP_READ_UNCONF_INDEX_LIST,
99 MGMT_OP_READ_CONFIG_INFO,
100 MGMT_OP_SET_EXTERNAL_CONFIG,
101 MGMT_OP_SET_PUBLIC_ADDRESS,
102 MGMT_OP_START_SERVICE_DISCOVERY,
103 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
104 MGMT_OP_READ_EXT_INDEX_LIST,
105 MGMT_OP_READ_ADV_FEATURES,
106 MGMT_OP_ADD_ADVERTISING,
107 MGMT_OP_REMOVE_ADVERTISING,
108 MGMT_OP_GET_ADV_SIZE_INFO,
109 MGMT_OP_START_LIMITED_DISCOVERY,
110 MGMT_OP_READ_EXT_INFO,
111 MGMT_OP_SET_APPEARANCE,
112 MGMT_OP_GET_PHY_CONFIGURATION,
113 MGMT_OP_SET_PHY_CONFIGURATION,
114 MGMT_OP_SET_BLOCKED_KEYS,
115 MGMT_OP_SET_WIDEBAND_SPEECH,
116 MGMT_OP_READ_CONTROLLER_CAP,
117 MGMT_OP_READ_EXP_FEATURES_INFO,
118 MGMT_OP_SET_EXP_FEATURE,
119 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
120 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
121 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
122 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
123 MGMT_OP_GET_DEVICE_FLAGS,
124 MGMT_OP_SET_DEVICE_FLAGS,
125 MGMT_OP_READ_ADV_MONITOR_FEATURES,
126 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
127 MGMT_OP_REMOVE_ADV_MONITOR,
128 MGMT_OP_ADD_EXT_ADV_PARAMS,
129 MGMT_OP_ADD_EXT_ADV_DATA,
130 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
131 MGMT_OP_SET_MESH_RECEIVER,
132 MGMT_OP_MESH_READ_FEATURES,
133 MGMT_OP_MESH_SEND,
134 MGMT_OP_MESH_SEND_CANCEL,
135 MGMT_OP_HCI_CMD_SYNC,
136 };
137
138 static const u16 mgmt_events[] = {
139 MGMT_EV_CONTROLLER_ERROR,
140 MGMT_EV_INDEX_ADDED,
141 MGMT_EV_INDEX_REMOVED,
142 MGMT_EV_NEW_SETTINGS,
143 MGMT_EV_CLASS_OF_DEV_CHANGED,
144 MGMT_EV_LOCAL_NAME_CHANGED,
145 MGMT_EV_NEW_LINK_KEY,
146 MGMT_EV_NEW_LONG_TERM_KEY,
147 MGMT_EV_DEVICE_CONNECTED,
148 MGMT_EV_DEVICE_DISCONNECTED,
149 MGMT_EV_CONNECT_FAILED,
150 MGMT_EV_PIN_CODE_REQUEST,
151 MGMT_EV_USER_CONFIRM_REQUEST,
152 MGMT_EV_USER_PASSKEY_REQUEST,
153 MGMT_EV_AUTH_FAILED,
154 MGMT_EV_DEVICE_FOUND,
155 MGMT_EV_DISCOVERING,
156 MGMT_EV_DEVICE_BLOCKED,
157 MGMT_EV_DEVICE_UNBLOCKED,
158 MGMT_EV_DEVICE_UNPAIRED,
159 MGMT_EV_PASSKEY_NOTIFY,
160 MGMT_EV_NEW_IRK,
161 MGMT_EV_NEW_CSRK,
162 MGMT_EV_DEVICE_ADDED,
163 MGMT_EV_DEVICE_REMOVED,
164 MGMT_EV_NEW_CONN_PARAM,
165 MGMT_EV_UNCONF_INDEX_ADDED,
166 MGMT_EV_UNCONF_INDEX_REMOVED,
167 MGMT_EV_NEW_CONFIG_OPTIONS,
168 MGMT_EV_EXT_INDEX_ADDED,
169 MGMT_EV_EXT_INDEX_REMOVED,
170 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
171 MGMT_EV_ADVERTISING_ADDED,
172 MGMT_EV_ADVERTISING_REMOVED,
173 MGMT_EV_EXT_INFO_CHANGED,
174 MGMT_EV_PHY_CONFIGURATION_CHANGED,
175 MGMT_EV_EXP_FEATURE_CHANGED,
176 MGMT_EV_DEVICE_FLAGS_CHANGED,
177 MGMT_EV_ADV_MONITOR_ADDED,
178 MGMT_EV_ADV_MONITOR_REMOVED,
179 MGMT_EV_CONTROLLER_SUSPEND,
180 MGMT_EV_CONTROLLER_RESUME,
181 MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
182 MGMT_EV_ADV_MONITOR_DEVICE_LOST,
183 };
184
185 static const u16 mgmt_untrusted_commands[] = {
186 MGMT_OP_READ_INDEX_LIST,
187 MGMT_OP_READ_INFO,
188 MGMT_OP_READ_UNCONF_INDEX_LIST,
189 MGMT_OP_READ_CONFIG_INFO,
190 MGMT_OP_READ_EXT_INDEX_LIST,
191 MGMT_OP_READ_EXT_INFO,
192 MGMT_OP_READ_CONTROLLER_CAP,
193 MGMT_OP_READ_EXP_FEATURES_INFO,
194 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
195 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
196 };
197
198 static const u16 mgmt_untrusted_events[] = {
199 MGMT_EV_INDEX_ADDED,
200 MGMT_EV_INDEX_REMOVED,
201 MGMT_EV_NEW_SETTINGS,
202 MGMT_EV_CLASS_OF_DEV_CHANGED,
203 MGMT_EV_LOCAL_NAME_CHANGED,
204 MGMT_EV_UNCONF_INDEX_ADDED,
205 MGMT_EV_UNCONF_INDEX_REMOVED,
206 MGMT_EV_NEW_CONFIG_OPTIONS,
207 MGMT_EV_EXT_INDEX_ADDED,
208 MGMT_EV_EXT_INDEX_REMOVED,
209 MGMT_EV_EXT_INFO_CHANGED,
210 MGMT_EV_EXP_FEATURE_CHANGED,
211 };
212
213 #define CACHE_TIMEOUT secs_to_jiffies(2)
214
215 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
216 "\x00\x00\x00\x00\x00\x00\x00\x00"
217
218 /* HCI to MGMT error code conversion table */
219 static const u8 mgmt_status_table[] = {
220 MGMT_STATUS_SUCCESS,
221 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
222 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
223 MGMT_STATUS_FAILED, /* Hardware Failure */
224 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
225 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
226 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
227 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
228 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
229 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
230 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
231 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
232 MGMT_STATUS_BUSY, /* Command Disallowed */
233 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
234 MGMT_STATUS_REJECTED, /* Rejected Security */
235 MGMT_STATUS_REJECTED, /* Rejected Personal */
236 MGMT_STATUS_TIMEOUT, /* Host Timeout */
237 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
238 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
239 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
240 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
241 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
242 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
243 MGMT_STATUS_BUSY, /* Repeated Attempts */
244 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
245 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
246 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
247 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
248 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
249 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
250 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
251 MGMT_STATUS_FAILED, /* Unspecified Error */
252 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
253 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
254 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
255 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
256 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
257 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
258 MGMT_STATUS_FAILED, /* Unit Link Key Used */
259 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
260 MGMT_STATUS_TIMEOUT, /* Instant Passed */
261 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
262 MGMT_STATUS_FAILED, /* Transaction Collision */
263 MGMT_STATUS_FAILED, /* Reserved for future use */
264 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
265 MGMT_STATUS_REJECTED, /* QoS Rejected */
266 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
267 MGMT_STATUS_REJECTED, /* Insufficient Security */
268 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
269 MGMT_STATUS_FAILED, /* Reserved for future use */
270 MGMT_STATUS_BUSY, /* Role Switch Pending */
271 MGMT_STATUS_FAILED, /* Reserved for future use */
272 MGMT_STATUS_FAILED, /* Slot Violation */
273 MGMT_STATUS_FAILED, /* Role Switch Failed */
274 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
275 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
276 MGMT_STATUS_BUSY, /* Host Busy Pairing */
277 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
278 MGMT_STATUS_BUSY, /* Controller Busy */
279 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
280 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
281 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
282 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
283 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
284 };
285
mgmt_errno_status(int err)286 static u8 mgmt_errno_status(int err)
287 {
288 switch (err) {
289 case 0:
290 return MGMT_STATUS_SUCCESS;
291 case -EPERM:
292 return MGMT_STATUS_REJECTED;
293 case -EINVAL:
294 return MGMT_STATUS_INVALID_PARAMS;
295 case -EOPNOTSUPP:
296 return MGMT_STATUS_NOT_SUPPORTED;
297 case -EBUSY:
298 return MGMT_STATUS_BUSY;
299 case -ETIMEDOUT:
300 return MGMT_STATUS_AUTH_FAILED;
301 case -ENOMEM:
302 return MGMT_STATUS_NO_RESOURCES;
303 case -EISCONN:
304 return MGMT_STATUS_ALREADY_CONNECTED;
305 case -ENOTCONN:
306 return MGMT_STATUS_DISCONNECTED;
307 }
308
309 return MGMT_STATUS_FAILED;
310 }
311
mgmt_status(int err)312 static u8 mgmt_status(int err)
313 {
314 if (err < 0)
315 return mgmt_errno_status(err);
316
317 if (err < ARRAY_SIZE(mgmt_status_table))
318 return mgmt_status_table[err];
319
320 return MGMT_STATUS_FAILED;
321 }
322
mgmt_index_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag)323 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
324 u16 len, int flag)
325 {
326 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
327 flag, NULL);
328 }
329
mgmt_limited_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag,struct sock * skip_sk)330 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
331 u16 len, int flag, struct sock *skip_sk)
332 {
333 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
334 flag, skip_sk);
335 }
336
mgmt_event(u16 event,struct hci_dev * hdev,void * data,u16 len,struct sock * skip_sk)337 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
338 struct sock *skip_sk)
339 {
340 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
341 HCI_SOCK_TRUSTED, skip_sk);
342 }
343
mgmt_event_skb(struct sk_buff * skb,struct sock * skip_sk)344 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
345 {
346 return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
347 skip_sk);
348 }
349
le_addr_type(u8 mgmt_addr_type)350 static u8 le_addr_type(u8 mgmt_addr_type)
351 {
352 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
353 return ADDR_LE_DEV_PUBLIC;
354 else
355 return ADDR_LE_DEV_RANDOM;
356 }
357
mgmt_fill_version_info(void * ver)358 void mgmt_fill_version_info(void *ver)
359 {
360 struct mgmt_rp_read_version *rp = ver;
361
362 rp->version = MGMT_VERSION;
363 rp->revision = cpu_to_le16(MGMT_REVISION);
364 }
365
read_version(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)366 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
367 u16 data_len)
368 {
369 struct mgmt_rp_read_version rp;
370
371 bt_dev_dbg(hdev, "sock %p", sk);
372
373 mgmt_fill_version_info(&rp);
374
375 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
376 &rp, sizeof(rp));
377 }
378
read_commands(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)379 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
380 u16 data_len)
381 {
382 struct mgmt_rp_read_commands *rp;
383 u16 num_commands, num_events;
384 size_t rp_size;
385 int i, err;
386
387 bt_dev_dbg(hdev, "sock %p", sk);
388
389 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
390 num_commands = ARRAY_SIZE(mgmt_commands);
391 num_events = ARRAY_SIZE(mgmt_events);
392 } else {
393 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
394 num_events = ARRAY_SIZE(mgmt_untrusted_events);
395 }
396
397 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
398
399 rp = kmalloc(rp_size, GFP_KERNEL);
400 if (!rp)
401 return -ENOMEM;
402
403 rp->num_commands = cpu_to_le16(num_commands);
404 rp->num_events = cpu_to_le16(num_events);
405
406 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
407 __le16 *opcode = rp->opcodes;
408
409 for (i = 0; i < num_commands; i++, opcode++)
410 put_unaligned_le16(mgmt_commands[i], opcode);
411
412 for (i = 0; i < num_events; i++, opcode++)
413 put_unaligned_le16(mgmt_events[i], opcode);
414 } else {
415 __le16 *opcode = rp->opcodes;
416
417 for (i = 0; i < num_commands; i++, opcode++)
418 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
419
420 for (i = 0; i < num_events; i++, opcode++)
421 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
422 }
423
424 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
425 rp, rp_size);
426 kfree(rp);
427
428 return err;
429 }
430
read_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)431 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
432 u16 data_len)
433 {
434 struct mgmt_rp_read_index_list *rp;
435 struct hci_dev *d;
436 size_t rp_len;
437 u16 count;
438 int err;
439
440 bt_dev_dbg(hdev, "sock %p", sk);
441
442 read_lock(&hci_dev_list_lock);
443
444 count = 0;
445 list_for_each_entry(d, &hci_dev_list, list) {
446 if (!hci_dev_test_flag(d, HCI_UNCONFIGURED))
447 count++;
448 }
449
450 rp_len = sizeof(*rp) + (2 * count);
451 rp = kmalloc(rp_len, GFP_ATOMIC);
452 if (!rp) {
453 read_unlock(&hci_dev_list_lock);
454 return -ENOMEM;
455 }
456
457 count = 0;
458 list_for_each_entry(d, &hci_dev_list, list) {
459 if (hci_dev_test_flag(d, HCI_SETUP) ||
460 hci_dev_test_flag(d, HCI_CONFIG) ||
461 hci_dev_test_flag(d, HCI_USER_CHANNEL))
462 continue;
463
464 /* Devices marked as raw-only are neither configured
465 * nor unconfigured controllers.
466 */
467 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
468 continue;
469
470 if (!hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
471 rp->index[count++] = cpu_to_le16(d->id);
472 bt_dev_dbg(hdev, "Added hci%u", d->id);
473 }
474 }
475
476 rp->num_controllers = cpu_to_le16(count);
477 rp_len = sizeof(*rp) + (2 * count);
478
479 read_unlock(&hci_dev_list_lock);
480
481 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
482 0, rp, rp_len);
483
484 kfree(rp);
485
486 return err;
487 }
488
read_unconf_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)489 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
490 void *data, u16 data_len)
491 {
492 struct mgmt_rp_read_unconf_index_list *rp;
493 struct hci_dev *d;
494 size_t rp_len;
495 u16 count;
496 int err;
497
498 bt_dev_dbg(hdev, "sock %p", sk);
499
500 read_lock(&hci_dev_list_lock);
501
502 count = 0;
503 list_for_each_entry(d, &hci_dev_list, list) {
504 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
505 count++;
506 }
507
508 rp_len = sizeof(*rp) + (2 * count);
509 rp = kmalloc(rp_len, GFP_ATOMIC);
510 if (!rp) {
511 read_unlock(&hci_dev_list_lock);
512 return -ENOMEM;
513 }
514
515 count = 0;
516 list_for_each_entry(d, &hci_dev_list, list) {
517 if (hci_dev_test_flag(d, HCI_SETUP) ||
518 hci_dev_test_flag(d, HCI_CONFIG) ||
519 hci_dev_test_flag(d, HCI_USER_CHANNEL))
520 continue;
521
522 /* Devices marked as raw-only are neither configured
523 * nor unconfigured controllers.
524 */
525 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
526 continue;
527
528 if (hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
529 rp->index[count++] = cpu_to_le16(d->id);
530 bt_dev_dbg(hdev, "Added hci%u", d->id);
531 }
532 }
533
534 rp->num_controllers = cpu_to_le16(count);
535 rp_len = sizeof(*rp) + (2 * count);
536
537 read_unlock(&hci_dev_list_lock);
538
539 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
540 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
541
542 kfree(rp);
543
544 return err;
545 }
546
read_ext_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)547 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
548 void *data, u16 data_len)
549 {
550 struct mgmt_rp_read_ext_index_list *rp;
551 struct hci_dev *d;
552 u16 count;
553 int err;
554
555 bt_dev_dbg(hdev, "sock %p", sk);
556
557 read_lock(&hci_dev_list_lock);
558
559 count = 0;
560 list_for_each_entry(d, &hci_dev_list, list)
561 count++;
562
563 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
564 if (!rp) {
565 read_unlock(&hci_dev_list_lock);
566 return -ENOMEM;
567 }
568
569 count = 0;
570 list_for_each_entry(d, &hci_dev_list, list) {
571 if (hci_dev_test_flag(d, HCI_SETUP) ||
572 hci_dev_test_flag(d, HCI_CONFIG) ||
573 hci_dev_test_flag(d, HCI_USER_CHANNEL))
574 continue;
575
576 /* Devices marked as raw-only are neither configured
577 * nor unconfigured controllers.
578 */
579 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
580 continue;
581
582 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
583 rp->entry[count].type = 0x01;
584 else
585 rp->entry[count].type = 0x00;
586
587 rp->entry[count].bus = d->bus;
588 rp->entry[count++].index = cpu_to_le16(d->id);
589 bt_dev_dbg(hdev, "Added hci%u", d->id);
590 }
591
592 rp->num_controllers = cpu_to_le16(count);
593
594 read_unlock(&hci_dev_list_lock);
595
596 /* If this command is called at least once, then all the
597 * default index and unconfigured index events are disabled
598 * and from now on only extended index events are used.
599 */
600 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
601 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
602 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
603
604 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
605 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
606 struct_size(rp, entry, count));
607
608 kfree(rp);
609
610 return err;
611 }
612
is_configured(struct hci_dev * hdev)613 static bool is_configured(struct hci_dev *hdev)
614 {
615 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
616 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
617 return false;
618
619 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
620 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
621 !bacmp(&hdev->public_addr, BDADDR_ANY))
622 return false;
623
624 return true;
625 }
626
get_missing_options(struct hci_dev * hdev)627 static __le32 get_missing_options(struct hci_dev *hdev)
628 {
629 u32 options = 0;
630
631 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
632 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
633 options |= MGMT_OPTION_EXTERNAL_CONFIG;
634
635 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
636 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
637 !bacmp(&hdev->public_addr, BDADDR_ANY))
638 options |= MGMT_OPTION_PUBLIC_ADDRESS;
639
640 return cpu_to_le32(options);
641 }
642
new_options(struct hci_dev * hdev,struct sock * skip)643 static int new_options(struct hci_dev *hdev, struct sock *skip)
644 {
645 __le32 options = get_missing_options(hdev);
646
647 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
648 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
649 }
650
send_options_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)651 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
652 {
653 __le32 options = get_missing_options(hdev);
654
655 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
656 sizeof(options));
657 }
658
read_config_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)659 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
660 void *data, u16 data_len)
661 {
662 struct mgmt_rp_read_config_info rp;
663 u32 options = 0;
664
665 bt_dev_dbg(hdev, "sock %p", sk);
666
667 hci_dev_lock(hdev);
668
669 memset(&rp, 0, sizeof(rp));
670 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
671
672 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
673 options |= MGMT_OPTION_EXTERNAL_CONFIG;
674
675 if (hdev->set_bdaddr)
676 options |= MGMT_OPTION_PUBLIC_ADDRESS;
677
678 rp.supported_options = cpu_to_le32(options);
679 rp.missing_options = get_missing_options(hdev);
680
681 hci_dev_unlock(hdev);
682
683 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
684 &rp, sizeof(rp));
685 }
686
get_supported_phys(struct hci_dev * hdev)687 static u32 get_supported_phys(struct hci_dev *hdev)
688 {
689 u32 supported_phys = 0;
690
691 if (lmp_bredr_capable(hdev)) {
692 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
693
694 if (hdev->features[0][0] & LMP_3SLOT)
695 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
696
697 if (hdev->features[0][0] & LMP_5SLOT)
698 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
699
700 if (lmp_edr_2m_capable(hdev)) {
701 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
702
703 if (lmp_edr_3slot_capable(hdev))
704 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
705
706 if (lmp_edr_5slot_capable(hdev))
707 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
708
709 if (lmp_edr_3m_capable(hdev)) {
710 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
711
712 if (lmp_edr_3slot_capable(hdev))
713 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
714
715 if (lmp_edr_5slot_capable(hdev))
716 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
717 }
718 }
719 }
720
721 if (lmp_le_capable(hdev)) {
722 supported_phys |= MGMT_PHY_LE_1M_TX;
723 supported_phys |= MGMT_PHY_LE_1M_RX;
724
725 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
726 supported_phys |= MGMT_PHY_LE_2M_TX;
727 supported_phys |= MGMT_PHY_LE_2M_RX;
728 }
729
730 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
731 supported_phys |= MGMT_PHY_LE_CODED_TX;
732 supported_phys |= MGMT_PHY_LE_CODED_RX;
733 }
734 }
735
736 return supported_phys;
737 }
738
get_selected_phys(struct hci_dev * hdev)739 static u32 get_selected_phys(struct hci_dev *hdev)
740 {
741 u32 selected_phys = 0;
742
743 if (lmp_bredr_capable(hdev)) {
744 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
745
746 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
747 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
748
749 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
750 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
751
752 if (lmp_edr_2m_capable(hdev)) {
753 if (!(hdev->pkt_type & HCI_2DH1))
754 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
755
756 if (lmp_edr_3slot_capable(hdev) &&
757 !(hdev->pkt_type & HCI_2DH3))
758 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
759
760 if (lmp_edr_5slot_capable(hdev) &&
761 !(hdev->pkt_type & HCI_2DH5))
762 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
763
764 if (lmp_edr_3m_capable(hdev)) {
765 if (!(hdev->pkt_type & HCI_3DH1))
766 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
767
768 if (lmp_edr_3slot_capable(hdev) &&
769 !(hdev->pkt_type & HCI_3DH3))
770 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
771
772 if (lmp_edr_5slot_capable(hdev) &&
773 !(hdev->pkt_type & HCI_3DH5))
774 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
775 }
776 }
777 }
778
779 if (lmp_le_capable(hdev)) {
780 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
781 selected_phys |= MGMT_PHY_LE_1M_TX;
782
783 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
784 selected_phys |= MGMT_PHY_LE_1M_RX;
785
786 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
787 selected_phys |= MGMT_PHY_LE_2M_TX;
788
789 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
790 selected_phys |= MGMT_PHY_LE_2M_RX;
791
792 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
793 selected_phys |= MGMT_PHY_LE_CODED_TX;
794
795 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
796 selected_phys |= MGMT_PHY_LE_CODED_RX;
797 }
798
799 return selected_phys;
800 }
801
get_configurable_phys(struct hci_dev * hdev)802 static u32 get_configurable_phys(struct hci_dev *hdev)
803 {
804 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
805 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
806 }
807
get_supported_settings(struct hci_dev * hdev)808 static u32 get_supported_settings(struct hci_dev *hdev)
809 {
810 u32 settings = 0;
811
812 settings |= MGMT_SETTING_POWERED;
813 settings |= MGMT_SETTING_BONDABLE;
814 settings |= MGMT_SETTING_DEBUG_KEYS;
815 settings |= MGMT_SETTING_CONNECTABLE;
816 settings |= MGMT_SETTING_DISCOVERABLE;
817
818 if (lmp_bredr_capable(hdev)) {
819 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
820 settings |= MGMT_SETTING_FAST_CONNECTABLE;
821 settings |= MGMT_SETTING_BREDR;
822 settings |= MGMT_SETTING_LINK_SECURITY;
823
824 if (lmp_ssp_capable(hdev)) {
825 settings |= MGMT_SETTING_SSP;
826 }
827
828 if (lmp_sc_capable(hdev))
829 settings |= MGMT_SETTING_SECURE_CONN;
830
831 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
832 &hdev->quirks))
833 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
834 }
835
836 if (lmp_le_capable(hdev)) {
837 settings |= MGMT_SETTING_LE;
838 settings |= MGMT_SETTING_SECURE_CONN;
839 settings |= MGMT_SETTING_PRIVACY;
840 settings |= MGMT_SETTING_STATIC_ADDRESS;
841 settings |= MGMT_SETTING_ADVERTISING;
842 }
843
844 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
845 hdev->set_bdaddr)
846 settings |= MGMT_SETTING_CONFIGURATION;
847
848 if (cis_central_capable(hdev))
849 settings |= MGMT_SETTING_CIS_CENTRAL;
850
851 if (cis_peripheral_capable(hdev))
852 settings |= MGMT_SETTING_CIS_PERIPHERAL;
853
854 if (ll_privacy_capable(hdev))
855 settings |= MGMT_SETTING_LL_PRIVACY;
856
857 settings |= MGMT_SETTING_PHY_CONFIGURATION;
858
859 return settings;
860 }
861
get_current_settings(struct hci_dev * hdev)862 static u32 get_current_settings(struct hci_dev *hdev)
863 {
864 u32 settings = 0;
865
866 if (hdev_is_powered(hdev))
867 settings |= MGMT_SETTING_POWERED;
868
869 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
870 settings |= MGMT_SETTING_CONNECTABLE;
871
872 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
873 settings |= MGMT_SETTING_FAST_CONNECTABLE;
874
875 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
876 settings |= MGMT_SETTING_DISCOVERABLE;
877
878 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
879 settings |= MGMT_SETTING_BONDABLE;
880
881 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
882 settings |= MGMT_SETTING_BREDR;
883
884 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
885 settings |= MGMT_SETTING_LE;
886
887 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
888 settings |= MGMT_SETTING_LINK_SECURITY;
889
890 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
891 settings |= MGMT_SETTING_SSP;
892
893 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
894 settings |= MGMT_SETTING_ADVERTISING;
895
896 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
897 settings |= MGMT_SETTING_SECURE_CONN;
898
899 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
900 settings |= MGMT_SETTING_DEBUG_KEYS;
901
902 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
903 settings |= MGMT_SETTING_PRIVACY;
904
905 /* The current setting for static address has two purposes. The
906 * first is to indicate if the static address will be used and
907 * the second is to indicate if it is actually set.
908 *
909 * This means if the static address is not configured, this flag
910 * will never be set. If the address is configured, then if the
911 * address is actually used decides if the flag is set or not.
912 *
913 * For single mode LE only controllers and dual-mode controllers
914 * with BR/EDR disabled, the existence of the static address will
915 * be evaluated.
916 */
917 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
918 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
919 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
920 if (bacmp(&hdev->static_addr, BDADDR_ANY))
921 settings |= MGMT_SETTING_STATIC_ADDRESS;
922 }
923
924 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
925 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
926
927 if (cis_central_capable(hdev))
928 settings |= MGMT_SETTING_CIS_CENTRAL;
929
930 if (cis_peripheral_capable(hdev))
931 settings |= MGMT_SETTING_CIS_PERIPHERAL;
932
933 if (bis_capable(hdev))
934 settings |= MGMT_SETTING_ISO_BROADCASTER;
935
936 if (sync_recv_capable(hdev))
937 settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
938
939 if (ll_privacy_capable(hdev))
940 settings |= MGMT_SETTING_LL_PRIVACY;
941
942 return settings;
943 }
944
pending_find(u16 opcode,struct hci_dev * hdev)945 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
946 {
947 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
948 }
949
mgmt_get_adv_discov_flags(struct hci_dev * hdev)950 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
951 {
952 struct mgmt_pending_cmd *cmd;
953
954 /* If there's a pending mgmt command the flags will not yet have
955 * their final values, so check for this first.
956 */
957 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
958 if (cmd) {
959 struct mgmt_mode *cp = cmd->param;
960 if (cp->val == 0x01)
961 return LE_AD_GENERAL;
962 else if (cp->val == 0x02)
963 return LE_AD_LIMITED;
964 } else {
965 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
966 return LE_AD_LIMITED;
967 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
968 return LE_AD_GENERAL;
969 }
970
971 return 0;
972 }
973
mgmt_get_connectable(struct hci_dev * hdev)974 bool mgmt_get_connectable(struct hci_dev *hdev)
975 {
976 struct mgmt_pending_cmd *cmd;
977
978 /* If there's a pending mgmt command the flag will not yet have
979 * it's final value, so check for this first.
980 */
981 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
982 if (cmd) {
983 struct mgmt_mode *cp = cmd->param;
984
985 return cp->val;
986 }
987
988 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
989 }
990
service_cache_sync(struct hci_dev * hdev,void * data)991 static int service_cache_sync(struct hci_dev *hdev, void *data)
992 {
993 hci_update_eir_sync(hdev);
994 hci_update_class_sync(hdev);
995
996 return 0;
997 }
998
service_cache_off(struct work_struct * work)999 static void service_cache_off(struct work_struct *work)
1000 {
1001 struct hci_dev *hdev = container_of(work, struct hci_dev,
1002 service_cache.work);
1003
1004 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1005 return;
1006
1007 hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1008 }
1009
rpa_expired_sync(struct hci_dev * hdev,void * data)1010 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1011 {
1012 /* The generation of a new RPA and programming it into the
1013 * controller happens in the hci_req_enable_advertising()
1014 * function.
1015 */
1016 if (ext_adv_capable(hdev))
1017 return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1018 else
1019 return hci_enable_advertising_sync(hdev);
1020 }
1021
rpa_expired(struct work_struct * work)1022 static void rpa_expired(struct work_struct *work)
1023 {
1024 struct hci_dev *hdev = container_of(work, struct hci_dev,
1025 rpa_expired.work);
1026
1027 bt_dev_dbg(hdev, "");
1028
1029 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1030
1031 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1032 return;
1033
1034 hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1035 }
1036
1037 static int set_discoverable_sync(struct hci_dev *hdev, void *data);
1038
discov_off(struct work_struct * work)1039 static void discov_off(struct work_struct *work)
1040 {
1041 struct hci_dev *hdev = container_of(work, struct hci_dev,
1042 discov_off.work);
1043
1044 bt_dev_dbg(hdev, "");
1045
1046 hci_dev_lock(hdev);
1047
1048 /* When discoverable timeout triggers, then just make sure
1049 * the limited discoverable flag is cleared. Even in the case
1050 * of a timeout triggered from general discoverable, it is
1051 * safe to unconditionally clear the flag.
1052 */
1053 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1054 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1055 hdev->discov_timeout = 0;
1056
1057 hci_cmd_sync_queue(hdev, set_discoverable_sync, NULL, NULL);
1058
1059 mgmt_new_settings(hdev);
1060
1061 hci_dev_unlock(hdev);
1062 }
1063
1064 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1065
mesh_send_complete(struct hci_dev * hdev,struct mgmt_mesh_tx * mesh_tx,bool silent)1066 static void mesh_send_complete(struct hci_dev *hdev,
1067 struct mgmt_mesh_tx *mesh_tx, bool silent)
1068 {
1069 u8 handle = mesh_tx->handle;
1070
1071 if (!silent)
1072 mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1073 sizeof(handle), NULL);
1074
1075 mgmt_mesh_remove(mesh_tx);
1076 }
1077
mesh_send_done_sync(struct hci_dev * hdev,void * data)1078 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1079 {
1080 struct mgmt_mesh_tx *mesh_tx;
1081
1082 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1083 hci_disable_advertising_sync(hdev);
1084 mesh_tx = mgmt_mesh_next(hdev, NULL);
1085
1086 if (mesh_tx)
1087 mesh_send_complete(hdev, mesh_tx, false);
1088
1089 return 0;
1090 }
1091
1092 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1093 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
mesh_next(struct hci_dev * hdev,void * data,int err)1094 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1095 {
1096 struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1097
1098 if (!mesh_tx)
1099 return;
1100
1101 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1102 mesh_send_start_complete);
1103
1104 if (err < 0)
1105 mesh_send_complete(hdev, mesh_tx, false);
1106 else
1107 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1108 }
1109
mesh_send_done(struct work_struct * work)1110 static void mesh_send_done(struct work_struct *work)
1111 {
1112 struct hci_dev *hdev = container_of(work, struct hci_dev,
1113 mesh_send_done.work);
1114
1115 if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1116 return;
1117
1118 hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1119 }
1120
mgmt_init_hdev(struct sock * sk,struct hci_dev * hdev)1121 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1122 {
1123 if (hci_dev_test_flag(hdev, HCI_MGMT))
1124 return;
1125
1126 BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1127
1128 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1129 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1130 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1131 INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1132
1133 /* Non-mgmt controlled devices get this bit set
1134 * implicitly so that pairing works for them, however
1135 * for mgmt we require user-space to explicitly enable
1136 * it
1137 */
1138 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1139
1140 hci_dev_set_flag(hdev, HCI_MGMT);
1141 }
1142
read_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1143 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1144 void *data, u16 data_len)
1145 {
1146 struct mgmt_rp_read_info rp;
1147
1148 bt_dev_dbg(hdev, "sock %p", sk);
1149
1150 hci_dev_lock(hdev);
1151
1152 memset(&rp, 0, sizeof(rp));
1153
1154 bacpy(&rp.bdaddr, &hdev->bdaddr);
1155
1156 rp.version = hdev->hci_ver;
1157 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1158
1159 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1160 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1161
1162 memcpy(rp.dev_class, hdev->dev_class, 3);
1163
1164 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1165 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1166
1167 hci_dev_unlock(hdev);
1168
1169 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1170 sizeof(rp));
1171 }
1172
append_eir_data_to_buf(struct hci_dev * hdev,u8 * eir)1173 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1174 {
1175 u16 eir_len = 0;
1176 size_t name_len;
1177
1178 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1179 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1180 hdev->dev_class, 3);
1181
1182 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1183 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1184 hdev->appearance);
1185
1186 name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1187 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1188 hdev->dev_name, name_len);
1189
1190 name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1191 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1192 hdev->short_name, name_len);
1193
1194 return eir_len;
1195 }
1196
read_ext_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1197 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1198 void *data, u16 data_len)
1199 {
1200 char buf[512];
1201 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1202 u16 eir_len;
1203
1204 bt_dev_dbg(hdev, "sock %p", sk);
1205
1206 memset(&buf, 0, sizeof(buf));
1207
1208 hci_dev_lock(hdev);
1209
1210 bacpy(&rp->bdaddr, &hdev->bdaddr);
1211
1212 rp->version = hdev->hci_ver;
1213 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1214
1215 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1216 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1217
1218
1219 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1220 rp->eir_len = cpu_to_le16(eir_len);
1221
1222 hci_dev_unlock(hdev);
1223
1224 /* If this command is called at least once, then the events
1225 * for class of device and local name changes are disabled
1226 * and only the new extended controller information event
1227 * is used.
1228 */
1229 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1230 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1231 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1232
1233 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1234 sizeof(*rp) + eir_len);
1235 }
1236
ext_info_changed(struct hci_dev * hdev,struct sock * skip)1237 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1238 {
1239 char buf[512];
1240 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1241 u16 eir_len;
1242
1243 memset(buf, 0, sizeof(buf));
1244
1245 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1246 ev->eir_len = cpu_to_le16(eir_len);
1247
1248 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1249 sizeof(*ev) + eir_len,
1250 HCI_MGMT_EXT_INFO_EVENTS, skip);
1251 }
1252
send_settings_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)1253 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1254 {
1255 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1256
1257 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1258 sizeof(settings));
1259 }
1260
mgmt_advertising_added(struct sock * sk,struct hci_dev * hdev,u8 instance)1261 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1262 {
1263 struct mgmt_ev_advertising_added ev;
1264
1265 ev.instance = instance;
1266
1267 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1268 }
1269
mgmt_advertising_removed(struct sock * sk,struct hci_dev * hdev,u8 instance)1270 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1271 u8 instance)
1272 {
1273 struct mgmt_ev_advertising_removed ev;
1274
1275 ev.instance = instance;
1276
1277 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1278 }
1279
cancel_adv_timeout(struct hci_dev * hdev)1280 static void cancel_adv_timeout(struct hci_dev *hdev)
1281 {
1282 if (hdev->adv_instance_timeout) {
1283 hdev->adv_instance_timeout = 0;
1284 cancel_delayed_work(&hdev->adv_instance_expire);
1285 }
1286 }
1287
1288 /* This function requires the caller holds hdev->lock */
restart_le_actions(struct hci_dev * hdev)1289 static void restart_le_actions(struct hci_dev *hdev)
1290 {
1291 struct hci_conn_params *p;
1292
1293 list_for_each_entry(p, &hdev->le_conn_params, list) {
1294 /* Needed for AUTO_OFF case where might not "really"
1295 * have been powered off.
1296 */
1297 hci_pend_le_list_del_init(p);
1298
1299 switch (p->auto_connect) {
1300 case HCI_AUTO_CONN_DIRECT:
1301 case HCI_AUTO_CONN_ALWAYS:
1302 hci_pend_le_list_add(p, &hdev->pend_le_conns);
1303 break;
1304 case HCI_AUTO_CONN_REPORT:
1305 hci_pend_le_list_add(p, &hdev->pend_le_reports);
1306 break;
1307 default:
1308 break;
1309 }
1310 }
1311 }
1312
new_settings(struct hci_dev * hdev,struct sock * skip)1313 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1314 {
1315 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1316
1317 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1318 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1319 }
1320
mgmt_set_powered_complete(struct hci_dev * hdev,void * data,int err)1321 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1322 {
1323 struct mgmt_pending_cmd *cmd = data;
1324 struct mgmt_mode *cp;
1325
1326 /* Make sure cmd still outstanding. */
1327 if (err == -ECANCELED ||
1328 cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1329 return;
1330
1331 cp = cmd->param;
1332
1333 bt_dev_dbg(hdev, "err %d", err);
1334
1335 if (!err) {
1336 if (cp->val) {
1337 hci_dev_lock(hdev);
1338 restart_le_actions(hdev);
1339 hci_update_passive_scan(hdev);
1340 hci_dev_unlock(hdev);
1341 }
1342
1343 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1344
1345 /* Only call new_setting for power on as power off is deferred
1346 * to hdev->power_off work which does call hci_dev_do_close.
1347 */
1348 if (cp->val)
1349 new_settings(hdev, cmd->sk);
1350 } else {
1351 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1352 mgmt_status(err));
1353 }
1354
1355 mgmt_pending_remove(cmd);
1356 }
1357
set_powered_sync(struct hci_dev * hdev,void * data)1358 static int set_powered_sync(struct hci_dev *hdev, void *data)
1359 {
1360 struct mgmt_pending_cmd *cmd = data;
1361 struct mgmt_mode *cp;
1362
1363 /* Make sure cmd still outstanding. */
1364 if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1365 return -ECANCELED;
1366
1367 cp = cmd->param;
1368
1369 BT_DBG("%s", hdev->name);
1370
1371 return hci_set_powered_sync(hdev, cp->val);
1372 }
1373
set_powered(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1374 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1375 u16 len)
1376 {
1377 struct mgmt_mode *cp = data;
1378 struct mgmt_pending_cmd *cmd;
1379 int err;
1380
1381 bt_dev_dbg(hdev, "sock %p", sk);
1382
1383 if (cp->val != 0x00 && cp->val != 0x01)
1384 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1385 MGMT_STATUS_INVALID_PARAMS);
1386
1387 hci_dev_lock(hdev);
1388
1389 if (!cp->val) {
1390 if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN)) {
1391 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1392 MGMT_STATUS_BUSY);
1393 goto failed;
1394 }
1395 }
1396
1397 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1398 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1399 MGMT_STATUS_BUSY);
1400 goto failed;
1401 }
1402
1403 if (!!cp->val == hdev_is_powered(hdev)) {
1404 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1405 goto failed;
1406 }
1407
1408 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1409 if (!cmd) {
1410 err = -ENOMEM;
1411 goto failed;
1412 }
1413
1414 /* Cancel potentially blocking sync operation before power off */
1415 if (cp->val == 0x00) {
1416 hci_cmd_sync_cancel_sync(hdev, -EHOSTDOWN);
1417 err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1418 mgmt_set_powered_complete);
1419 } else {
1420 /* Use hci_cmd_sync_submit since hdev might not be running */
1421 err = hci_cmd_sync_submit(hdev, set_powered_sync, cmd,
1422 mgmt_set_powered_complete);
1423 }
1424
1425 if (err < 0)
1426 mgmt_pending_remove(cmd);
1427
1428 failed:
1429 hci_dev_unlock(hdev);
1430 return err;
1431 }
1432
mgmt_new_settings(struct hci_dev * hdev)1433 int mgmt_new_settings(struct hci_dev *hdev)
1434 {
1435 return new_settings(hdev, NULL);
1436 }
1437
1438 struct cmd_lookup {
1439 struct sock *sk;
1440 struct hci_dev *hdev;
1441 u8 mgmt_status;
1442 };
1443
settings_rsp(struct mgmt_pending_cmd * cmd,void * data)1444 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1445 {
1446 struct cmd_lookup *match = data;
1447
1448 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1449
1450 list_del(&cmd->list);
1451
1452 if (match->sk == NULL) {
1453 match->sk = cmd->sk;
1454 sock_hold(match->sk);
1455 }
1456
1457 mgmt_pending_free(cmd);
1458 }
1459
cmd_status_rsp(struct mgmt_pending_cmd * cmd,void * data)1460 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1461 {
1462 u8 *status = data;
1463
1464 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1465 mgmt_pending_remove(cmd);
1466 }
1467
cmd_complete_rsp(struct mgmt_pending_cmd * cmd,void * data)1468 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1469 {
1470 struct cmd_lookup *match = data;
1471
1472 /* dequeue cmd_sync entries using cmd as data as that is about to be
1473 * removed/freed.
1474 */
1475 hci_cmd_sync_dequeue(match->hdev, NULL, cmd, NULL);
1476
1477 if (cmd->cmd_complete) {
1478 cmd->cmd_complete(cmd, match->mgmt_status);
1479 mgmt_pending_remove(cmd);
1480
1481 return;
1482 }
1483
1484 cmd_status_rsp(cmd, data);
1485 }
1486
generic_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1487 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1488 {
1489 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1490 cmd->param, cmd->param_len);
1491 }
1492
addr_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1493 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1494 {
1495 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1496 cmd->param, sizeof(struct mgmt_addr_info));
1497 }
1498
mgmt_bredr_support(struct hci_dev * hdev)1499 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1500 {
1501 if (!lmp_bredr_capable(hdev))
1502 return MGMT_STATUS_NOT_SUPPORTED;
1503 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1504 return MGMT_STATUS_REJECTED;
1505 else
1506 return MGMT_STATUS_SUCCESS;
1507 }
1508
mgmt_le_support(struct hci_dev * hdev)1509 static u8 mgmt_le_support(struct hci_dev *hdev)
1510 {
1511 if (!lmp_le_capable(hdev))
1512 return MGMT_STATUS_NOT_SUPPORTED;
1513 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1514 return MGMT_STATUS_REJECTED;
1515 else
1516 return MGMT_STATUS_SUCCESS;
1517 }
1518
mgmt_set_discoverable_complete(struct hci_dev * hdev,void * data,int err)1519 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1520 int err)
1521 {
1522 struct mgmt_pending_cmd *cmd = data;
1523
1524 bt_dev_dbg(hdev, "err %d", err);
1525
1526 /* Make sure cmd still outstanding. */
1527 if (err == -ECANCELED ||
1528 cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1529 return;
1530
1531 hci_dev_lock(hdev);
1532
1533 if (err) {
1534 u8 mgmt_err = mgmt_status(err);
1535 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1536 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1537 goto done;
1538 }
1539
1540 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1541 hdev->discov_timeout > 0) {
1542 int to = secs_to_jiffies(hdev->discov_timeout);
1543 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1544 }
1545
1546 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1547 new_settings(hdev, cmd->sk);
1548
1549 done:
1550 mgmt_pending_remove(cmd);
1551 hci_dev_unlock(hdev);
1552 }
1553
set_discoverable_sync(struct hci_dev * hdev,void * data)1554 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1555 {
1556 BT_DBG("%s", hdev->name);
1557
1558 return hci_update_discoverable_sync(hdev);
1559 }
1560
set_discoverable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1561 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1562 u16 len)
1563 {
1564 struct mgmt_cp_set_discoverable *cp = data;
1565 struct mgmt_pending_cmd *cmd;
1566 u16 timeout;
1567 int err;
1568
1569 bt_dev_dbg(hdev, "sock %p", sk);
1570
1571 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1572 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1573 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1574 MGMT_STATUS_REJECTED);
1575
1576 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1577 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1578 MGMT_STATUS_INVALID_PARAMS);
1579
1580 timeout = __le16_to_cpu(cp->timeout);
1581
1582 /* Disabling discoverable requires that no timeout is set,
1583 * and enabling limited discoverable requires a timeout.
1584 */
1585 if ((cp->val == 0x00 && timeout > 0) ||
1586 (cp->val == 0x02 && timeout == 0))
1587 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1588 MGMT_STATUS_INVALID_PARAMS);
1589
1590 hci_dev_lock(hdev);
1591
1592 if (!hdev_is_powered(hdev) && timeout > 0) {
1593 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1594 MGMT_STATUS_NOT_POWERED);
1595 goto failed;
1596 }
1597
1598 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1599 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1600 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1601 MGMT_STATUS_BUSY);
1602 goto failed;
1603 }
1604
1605 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1606 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1607 MGMT_STATUS_REJECTED);
1608 goto failed;
1609 }
1610
1611 if (hdev->advertising_paused) {
1612 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1613 MGMT_STATUS_BUSY);
1614 goto failed;
1615 }
1616
1617 if (!hdev_is_powered(hdev)) {
1618 bool changed = false;
1619
1620 /* Setting limited discoverable when powered off is
1621 * not a valid operation since it requires a timeout
1622 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1623 */
1624 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1625 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1626 changed = true;
1627 }
1628
1629 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1630 if (err < 0)
1631 goto failed;
1632
1633 if (changed)
1634 err = new_settings(hdev, sk);
1635
1636 goto failed;
1637 }
1638
1639 /* If the current mode is the same, then just update the timeout
1640 * value with the new value. And if only the timeout gets updated,
1641 * then no need for any HCI transactions.
1642 */
1643 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1644 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1645 HCI_LIMITED_DISCOVERABLE)) {
1646 cancel_delayed_work(&hdev->discov_off);
1647 hdev->discov_timeout = timeout;
1648
1649 if (cp->val && hdev->discov_timeout > 0) {
1650 int to = secs_to_jiffies(hdev->discov_timeout);
1651 queue_delayed_work(hdev->req_workqueue,
1652 &hdev->discov_off, to);
1653 }
1654
1655 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1656 goto failed;
1657 }
1658
1659 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1660 if (!cmd) {
1661 err = -ENOMEM;
1662 goto failed;
1663 }
1664
1665 /* Cancel any potential discoverable timeout that might be
1666 * still active and store new timeout value. The arming of
1667 * the timeout happens in the complete handler.
1668 */
1669 cancel_delayed_work(&hdev->discov_off);
1670 hdev->discov_timeout = timeout;
1671
1672 if (cp->val)
1673 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1674 else
1675 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1676
1677 /* Limited discoverable mode */
1678 if (cp->val == 0x02)
1679 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1680 else
1681 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1682
1683 err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1684 mgmt_set_discoverable_complete);
1685
1686 if (err < 0)
1687 mgmt_pending_remove(cmd);
1688
1689 failed:
1690 hci_dev_unlock(hdev);
1691 return err;
1692 }
1693
mgmt_set_connectable_complete(struct hci_dev * hdev,void * data,int err)1694 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1695 int err)
1696 {
1697 struct mgmt_pending_cmd *cmd = data;
1698
1699 bt_dev_dbg(hdev, "err %d", err);
1700
1701 /* Make sure cmd still outstanding. */
1702 if (err == -ECANCELED ||
1703 cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1704 return;
1705
1706 hci_dev_lock(hdev);
1707
1708 if (err) {
1709 u8 mgmt_err = mgmt_status(err);
1710 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1711 goto done;
1712 }
1713
1714 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1715 new_settings(hdev, cmd->sk);
1716
1717 done:
1718 mgmt_pending_remove(cmd);
1719
1720 hci_dev_unlock(hdev);
1721 }
1722
set_connectable_update_settings(struct hci_dev * hdev,struct sock * sk,u8 val)1723 static int set_connectable_update_settings(struct hci_dev *hdev,
1724 struct sock *sk, u8 val)
1725 {
1726 bool changed = false;
1727 int err;
1728
1729 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1730 changed = true;
1731
1732 if (val) {
1733 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1734 } else {
1735 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1736 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1737 }
1738
1739 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1740 if (err < 0)
1741 return err;
1742
1743 if (changed) {
1744 hci_update_scan(hdev);
1745 hci_update_passive_scan(hdev);
1746 return new_settings(hdev, sk);
1747 }
1748
1749 return 0;
1750 }
1751
set_connectable_sync(struct hci_dev * hdev,void * data)1752 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1753 {
1754 BT_DBG("%s", hdev->name);
1755
1756 return hci_update_connectable_sync(hdev);
1757 }
1758
set_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1759 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1760 u16 len)
1761 {
1762 struct mgmt_mode *cp = data;
1763 struct mgmt_pending_cmd *cmd;
1764 int err;
1765
1766 bt_dev_dbg(hdev, "sock %p", sk);
1767
1768 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1769 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1770 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1771 MGMT_STATUS_REJECTED);
1772
1773 if (cp->val != 0x00 && cp->val != 0x01)
1774 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1775 MGMT_STATUS_INVALID_PARAMS);
1776
1777 hci_dev_lock(hdev);
1778
1779 if (!hdev_is_powered(hdev)) {
1780 err = set_connectable_update_settings(hdev, sk, cp->val);
1781 goto failed;
1782 }
1783
1784 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1785 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1786 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1787 MGMT_STATUS_BUSY);
1788 goto failed;
1789 }
1790
1791 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1792 if (!cmd) {
1793 err = -ENOMEM;
1794 goto failed;
1795 }
1796
1797 if (cp->val) {
1798 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1799 } else {
1800 if (hdev->discov_timeout > 0)
1801 cancel_delayed_work(&hdev->discov_off);
1802
1803 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1804 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1805 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1806 }
1807
1808 err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1809 mgmt_set_connectable_complete);
1810
1811 if (err < 0)
1812 mgmt_pending_remove(cmd);
1813
1814 failed:
1815 hci_dev_unlock(hdev);
1816 return err;
1817 }
1818
set_bondable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1819 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1820 u16 len)
1821 {
1822 struct mgmt_mode *cp = data;
1823 bool changed;
1824 int err;
1825
1826 bt_dev_dbg(hdev, "sock %p", sk);
1827
1828 if (cp->val != 0x00 && cp->val != 0x01)
1829 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1830 MGMT_STATUS_INVALID_PARAMS);
1831
1832 hci_dev_lock(hdev);
1833
1834 if (cp->val)
1835 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1836 else
1837 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1838
1839 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1840 if (err < 0)
1841 goto unlock;
1842
1843 if (changed) {
1844 /* In limited privacy mode the change of bondable mode
1845 * may affect the local advertising address.
1846 */
1847 hci_update_discoverable(hdev);
1848
1849 err = new_settings(hdev, sk);
1850 }
1851
1852 unlock:
1853 hci_dev_unlock(hdev);
1854 return err;
1855 }
1856
set_link_security(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1857 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1858 u16 len)
1859 {
1860 struct mgmt_mode *cp = data;
1861 struct mgmt_pending_cmd *cmd;
1862 u8 val, status;
1863 int err;
1864
1865 bt_dev_dbg(hdev, "sock %p", sk);
1866
1867 status = mgmt_bredr_support(hdev);
1868 if (status)
1869 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1870 status);
1871
1872 if (cp->val != 0x00 && cp->val != 0x01)
1873 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1874 MGMT_STATUS_INVALID_PARAMS);
1875
1876 hci_dev_lock(hdev);
1877
1878 if (!hdev_is_powered(hdev)) {
1879 bool changed = false;
1880
1881 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1882 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1883 changed = true;
1884 }
1885
1886 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1887 if (err < 0)
1888 goto failed;
1889
1890 if (changed)
1891 err = new_settings(hdev, sk);
1892
1893 goto failed;
1894 }
1895
1896 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1897 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1898 MGMT_STATUS_BUSY);
1899 goto failed;
1900 }
1901
1902 val = !!cp->val;
1903
1904 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1905 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1906 goto failed;
1907 }
1908
1909 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1910 if (!cmd) {
1911 err = -ENOMEM;
1912 goto failed;
1913 }
1914
1915 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1916 if (err < 0) {
1917 mgmt_pending_remove(cmd);
1918 goto failed;
1919 }
1920
1921 failed:
1922 hci_dev_unlock(hdev);
1923 return err;
1924 }
1925
set_ssp_complete(struct hci_dev * hdev,void * data,int err)1926 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1927 {
1928 struct cmd_lookup match = { NULL, hdev };
1929 struct mgmt_pending_cmd *cmd = data;
1930 struct mgmt_mode *cp = cmd->param;
1931 u8 enable = cp->val;
1932 bool changed;
1933
1934 /* Make sure cmd still outstanding. */
1935 if (err == -ECANCELED || cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1936 return;
1937
1938 if (err) {
1939 u8 mgmt_err = mgmt_status(err);
1940
1941 if (enable && hci_dev_test_and_clear_flag(hdev,
1942 HCI_SSP_ENABLED)) {
1943 new_settings(hdev, NULL);
1944 }
1945
1946 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1947 &mgmt_err);
1948 return;
1949 }
1950
1951 if (enable) {
1952 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1953 } else {
1954 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1955 }
1956
1957 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1958
1959 if (changed)
1960 new_settings(hdev, match.sk);
1961
1962 if (match.sk)
1963 sock_put(match.sk);
1964
1965 hci_update_eir_sync(hdev);
1966 }
1967
set_ssp_sync(struct hci_dev * hdev,void * data)1968 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1969 {
1970 struct mgmt_pending_cmd *cmd = data;
1971 struct mgmt_mode *cp = cmd->param;
1972 bool changed = false;
1973 int err;
1974
1975 if (cp->val)
1976 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1977
1978 err = hci_write_ssp_mode_sync(hdev, cp->val);
1979
1980 if (!err && changed)
1981 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1982
1983 return err;
1984 }
1985
set_ssp(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1986 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1987 {
1988 struct mgmt_mode *cp = data;
1989 struct mgmt_pending_cmd *cmd;
1990 u8 status;
1991 int err;
1992
1993 bt_dev_dbg(hdev, "sock %p", sk);
1994
1995 status = mgmt_bredr_support(hdev);
1996 if (status)
1997 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1998
1999 if (!lmp_ssp_capable(hdev))
2000 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2001 MGMT_STATUS_NOT_SUPPORTED);
2002
2003 if (cp->val != 0x00 && cp->val != 0x01)
2004 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2005 MGMT_STATUS_INVALID_PARAMS);
2006
2007 hci_dev_lock(hdev);
2008
2009 if (!hdev_is_powered(hdev)) {
2010 bool changed;
2011
2012 if (cp->val) {
2013 changed = !hci_dev_test_and_set_flag(hdev,
2014 HCI_SSP_ENABLED);
2015 } else {
2016 changed = hci_dev_test_and_clear_flag(hdev,
2017 HCI_SSP_ENABLED);
2018 }
2019
2020 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2021 if (err < 0)
2022 goto failed;
2023
2024 if (changed)
2025 err = new_settings(hdev, sk);
2026
2027 goto failed;
2028 }
2029
2030 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2031 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2032 MGMT_STATUS_BUSY);
2033 goto failed;
2034 }
2035
2036 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2037 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2038 goto failed;
2039 }
2040
2041 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2042 if (!cmd)
2043 err = -ENOMEM;
2044 else
2045 err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2046 set_ssp_complete);
2047
2048 if (err < 0) {
2049 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2050 MGMT_STATUS_FAILED);
2051
2052 if (cmd)
2053 mgmt_pending_remove(cmd);
2054 }
2055
2056 failed:
2057 hci_dev_unlock(hdev);
2058 return err;
2059 }
2060
set_hs(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2061 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2062 {
2063 bt_dev_dbg(hdev, "sock %p", sk);
2064
2065 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2066 MGMT_STATUS_NOT_SUPPORTED);
2067 }
2068
set_le_complete(struct hci_dev * hdev,void * data,int err)2069 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2070 {
2071 struct cmd_lookup match = { NULL, hdev };
2072 u8 status = mgmt_status(err);
2073
2074 bt_dev_dbg(hdev, "err %d", err);
2075
2076 if (status) {
2077 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2078 &status);
2079 return;
2080 }
2081
2082 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2083
2084 new_settings(hdev, match.sk);
2085
2086 if (match.sk)
2087 sock_put(match.sk);
2088 }
2089
set_le_sync(struct hci_dev * hdev,void * data)2090 static int set_le_sync(struct hci_dev *hdev, void *data)
2091 {
2092 struct mgmt_pending_cmd *cmd = data;
2093 struct mgmt_mode *cp = cmd->param;
2094 u8 val = !!cp->val;
2095 int err;
2096
2097 if (!val) {
2098 hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2099
2100 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2101 hci_disable_advertising_sync(hdev);
2102
2103 if (ext_adv_capable(hdev))
2104 hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2105 } else {
2106 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2107 }
2108
2109 err = hci_write_le_host_supported_sync(hdev, val, 0);
2110
2111 /* Make sure the controller has a good default for
2112 * advertising data. Restrict the update to when LE
2113 * has actually been enabled. During power on, the
2114 * update in powered_update_hci will take care of it.
2115 */
2116 if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2117 if (ext_adv_capable(hdev)) {
2118 int status;
2119
2120 status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2121 if (!status)
2122 hci_update_scan_rsp_data_sync(hdev, 0x00);
2123 } else {
2124 hci_update_adv_data_sync(hdev, 0x00);
2125 hci_update_scan_rsp_data_sync(hdev, 0x00);
2126 }
2127
2128 hci_update_passive_scan(hdev);
2129 }
2130
2131 return err;
2132 }
2133
set_mesh_complete(struct hci_dev * hdev,void * data,int err)2134 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2135 {
2136 struct mgmt_pending_cmd *cmd = data;
2137 u8 status = mgmt_status(err);
2138 struct sock *sk = cmd->sk;
2139
2140 if (status) {
2141 mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev,
2142 cmd_status_rsp, &status);
2143 return;
2144 }
2145
2146 mgmt_pending_remove(cmd);
2147 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2148 }
2149
set_mesh_sync(struct hci_dev * hdev,void * data)2150 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2151 {
2152 struct mgmt_pending_cmd *cmd = data;
2153 struct mgmt_cp_set_mesh *cp = cmd->param;
2154 size_t len = cmd->param_len;
2155
2156 memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2157
2158 if (cp->enable)
2159 hci_dev_set_flag(hdev, HCI_MESH);
2160 else
2161 hci_dev_clear_flag(hdev, HCI_MESH);
2162
2163 len -= sizeof(*cp);
2164
2165 /* If filters don't fit, forward all adv pkts */
2166 if (len <= sizeof(hdev->mesh_ad_types))
2167 memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2168
2169 hci_update_passive_scan_sync(hdev);
2170 return 0;
2171 }
2172
set_mesh(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2173 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2174 {
2175 struct mgmt_cp_set_mesh *cp = data;
2176 struct mgmt_pending_cmd *cmd;
2177 int err = 0;
2178
2179 bt_dev_dbg(hdev, "sock %p", sk);
2180
2181 if (!lmp_le_capable(hdev) ||
2182 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2183 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2184 MGMT_STATUS_NOT_SUPPORTED);
2185
2186 if (cp->enable != 0x00 && cp->enable != 0x01)
2187 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2188 MGMT_STATUS_INVALID_PARAMS);
2189
2190 hci_dev_lock(hdev);
2191
2192 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2193 if (!cmd)
2194 err = -ENOMEM;
2195 else
2196 err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2197 set_mesh_complete);
2198
2199 if (err < 0) {
2200 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2201 MGMT_STATUS_FAILED);
2202
2203 if (cmd)
2204 mgmt_pending_remove(cmd);
2205 }
2206
2207 hci_dev_unlock(hdev);
2208 return err;
2209 }
2210
mesh_send_start_complete(struct hci_dev * hdev,void * data,int err)2211 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2212 {
2213 struct mgmt_mesh_tx *mesh_tx = data;
2214 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2215 unsigned long mesh_send_interval;
2216 u8 mgmt_err = mgmt_status(err);
2217
2218 /* Report any errors here, but don't report completion */
2219
2220 if (mgmt_err) {
2221 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2222 /* Send Complete Error Code for handle */
2223 mesh_send_complete(hdev, mesh_tx, false);
2224 return;
2225 }
2226
2227 mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2228 queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2229 mesh_send_interval);
2230 }
2231
mesh_send_sync(struct hci_dev * hdev,void * data)2232 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2233 {
2234 struct mgmt_mesh_tx *mesh_tx = data;
2235 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2236 struct adv_info *adv, *next_instance;
2237 u8 instance = hdev->le_num_of_adv_sets + 1;
2238 u16 timeout, duration;
2239 int err = 0;
2240
2241 if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2242 return MGMT_STATUS_BUSY;
2243
2244 timeout = 1000;
2245 duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2246 adv = hci_add_adv_instance(hdev, instance, 0,
2247 send->adv_data_len, send->adv_data,
2248 0, NULL,
2249 timeout, duration,
2250 HCI_ADV_TX_POWER_NO_PREFERENCE,
2251 hdev->le_adv_min_interval,
2252 hdev->le_adv_max_interval,
2253 mesh_tx->handle);
2254
2255 if (!IS_ERR(adv))
2256 mesh_tx->instance = instance;
2257 else
2258 err = PTR_ERR(adv);
2259
2260 if (hdev->cur_adv_instance == instance) {
2261 /* If the currently advertised instance is being changed then
2262 * cancel the current advertising and schedule the next
2263 * instance. If there is only one instance then the overridden
2264 * advertising data will be visible right away.
2265 */
2266 cancel_adv_timeout(hdev);
2267
2268 next_instance = hci_get_next_instance(hdev, instance);
2269 if (next_instance)
2270 instance = next_instance->instance;
2271 else
2272 instance = 0;
2273 } else if (hdev->adv_instance_timeout) {
2274 /* Immediately advertise the new instance if no other, or
2275 * let it go naturally from queue if ADV is already happening
2276 */
2277 instance = 0;
2278 }
2279
2280 if (instance)
2281 return hci_schedule_adv_instance_sync(hdev, instance, true);
2282
2283 return err;
2284 }
2285
send_count(struct mgmt_mesh_tx * mesh_tx,void * data)2286 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2287 {
2288 struct mgmt_rp_mesh_read_features *rp = data;
2289
2290 if (rp->used_handles >= rp->max_handles)
2291 return;
2292
2293 rp->handles[rp->used_handles++] = mesh_tx->handle;
2294 }
2295
mesh_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2296 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2297 void *data, u16 len)
2298 {
2299 struct mgmt_rp_mesh_read_features rp;
2300
2301 if (!lmp_le_capable(hdev) ||
2302 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2303 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2304 MGMT_STATUS_NOT_SUPPORTED);
2305
2306 memset(&rp, 0, sizeof(rp));
2307 rp.index = cpu_to_le16(hdev->id);
2308 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2309 rp.max_handles = MESH_HANDLES_MAX;
2310
2311 hci_dev_lock(hdev);
2312
2313 if (rp.max_handles)
2314 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2315
2316 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2317 rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2318
2319 hci_dev_unlock(hdev);
2320 return 0;
2321 }
2322
send_cancel(struct hci_dev * hdev,void * data)2323 static int send_cancel(struct hci_dev *hdev, void *data)
2324 {
2325 struct mgmt_pending_cmd *cmd = data;
2326 struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2327 struct mgmt_mesh_tx *mesh_tx;
2328
2329 if (!cancel->handle) {
2330 do {
2331 mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2332
2333 if (mesh_tx)
2334 mesh_send_complete(hdev, mesh_tx, false);
2335 } while (mesh_tx);
2336 } else {
2337 mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2338
2339 if (mesh_tx && mesh_tx->sk == cmd->sk)
2340 mesh_send_complete(hdev, mesh_tx, false);
2341 }
2342
2343 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2344 0, NULL, 0);
2345 mgmt_pending_free(cmd);
2346
2347 return 0;
2348 }
2349
mesh_send_cancel(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2350 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2351 void *data, u16 len)
2352 {
2353 struct mgmt_pending_cmd *cmd;
2354 int err;
2355
2356 if (!lmp_le_capable(hdev) ||
2357 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2358 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2359 MGMT_STATUS_NOT_SUPPORTED);
2360
2361 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2362 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2363 MGMT_STATUS_REJECTED);
2364
2365 hci_dev_lock(hdev);
2366 cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2367 if (!cmd)
2368 err = -ENOMEM;
2369 else
2370 err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2371
2372 if (err < 0) {
2373 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2374 MGMT_STATUS_FAILED);
2375
2376 if (cmd)
2377 mgmt_pending_free(cmd);
2378 }
2379
2380 hci_dev_unlock(hdev);
2381 return err;
2382 }
2383
mesh_send(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2384 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2385 {
2386 struct mgmt_mesh_tx *mesh_tx;
2387 struct mgmt_cp_mesh_send *send = data;
2388 struct mgmt_rp_mesh_read_features rp;
2389 bool sending;
2390 int err = 0;
2391
2392 if (!lmp_le_capable(hdev) ||
2393 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2394 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2395 MGMT_STATUS_NOT_SUPPORTED);
2396 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2397 len <= MGMT_MESH_SEND_SIZE ||
2398 len > (MGMT_MESH_SEND_SIZE + 31))
2399 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2400 MGMT_STATUS_REJECTED);
2401
2402 hci_dev_lock(hdev);
2403
2404 memset(&rp, 0, sizeof(rp));
2405 rp.max_handles = MESH_HANDLES_MAX;
2406
2407 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2408
2409 if (rp.max_handles <= rp.used_handles) {
2410 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2411 MGMT_STATUS_BUSY);
2412 goto done;
2413 }
2414
2415 sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2416 mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2417
2418 if (!mesh_tx)
2419 err = -ENOMEM;
2420 else if (!sending)
2421 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2422 mesh_send_start_complete);
2423
2424 if (err < 0) {
2425 bt_dev_err(hdev, "Send Mesh Failed %d", err);
2426 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2427 MGMT_STATUS_FAILED);
2428
2429 if (mesh_tx) {
2430 if (sending)
2431 mgmt_mesh_remove(mesh_tx);
2432 }
2433 } else {
2434 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2435
2436 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2437 &mesh_tx->handle, 1);
2438 }
2439
2440 done:
2441 hci_dev_unlock(hdev);
2442 return err;
2443 }
2444
set_le(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2445 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2446 {
2447 struct mgmt_mode *cp = data;
2448 struct mgmt_pending_cmd *cmd;
2449 int err;
2450 u8 val, enabled;
2451
2452 bt_dev_dbg(hdev, "sock %p", sk);
2453
2454 if (!lmp_le_capable(hdev))
2455 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2456 MGMT_STATUS_NOT_SUPPORTED);
2457
2458 if (cp->val != 0x00 && cp->val != 0x01)
2459 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2460 MGMT_STATUS_INVALID_PARAMS);
2461
2462 /* Bluetooth single mode LE only controllers or dual-mode
2463 * controllers configured as LE only devices, do not allow
2464 * switching LE off. These have either LE enabled explicitly
2465 * or BR/EDR has been previously switched off.
2466 *
2467 * When trying to enable an already enabled LE, then gracefully
2468 * send a positive response. Trying to disable it however will
2469 * result into rejection.
2470 */
2471 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2472 if (cp->val == 0x01)
2473 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2474
2475 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2476 MGMT_STATUS_REJECTED);
2477 }
2478
2479 hci_dev_lock(hdev);
2480
2481 val = !!cp->val;
2482 enabled = lmp_host_le_capable(hdev);
2483
2484 if (!hdev_is_powered(hdev) || val == enabled) {
2485 bool changed = false;
2486
2487 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2488 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2489 changed = true;
2490 }
2491
2492 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2493 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2494 changed = true;
2495 }
2496
2497 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2498 if (err < 0)
2499 goto unlock;
2500
2501 if (changed)
2502 err = new_settings(hdev, sk);
2503
2504 goto unlock;
2505 }
2506
2507 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2508 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2509 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2510 MGMT_STATUS_BUSY);
2511 goto unlock;
2512 }
2513
2514 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2515 if (!cmd)
2516 err = -ENOMEM;
2517 else
2518 err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2519 set_le_complete);
2520
2521 if (err < 0) {
2522 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2523 MGMT_STATUS_FAILED);
2524
2525 if (cmd)
2526 mgmt_pending_remove(cmd);
2527 }
2528
2529 unlock:
2530 hci_dev_unlock(hdev);
2531 return err;
2532 }
2533
send_hci_cmd_sync(struct hci_dev * hdev,void * data)2534 static int send_hci_cmd_sync(struct hci_dev *hdev, void *data)
2535 {
2536 struct mgmt_pending_cmd *cmd = data;
2537 struct mgmt_cp_hci_cmd_sync *cp = cmd->param;
2538 struct sk_buff *skb;
2539
2540 skb = __hci_cmd_sync_ev(hdev, le16_to_cpu(cp->opcode),
2541 le16_to_cpu(cp->params_len), cp->params,
2542 cp->event, cp->timeout ?
2543 secs_to_jiffies(cp->timeout) :
2544 HCI_CMD_TIMEOUT);
2545 if (IS_ERR(skb)) {
2546 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
2547 mgmt_status(PTR_ERR(skb)));
2548 goto done;
2549 }
2550
2551 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_HCI_CMD_SYNC, 0,
2552 skb->data, skb->len);
2553
2554 kfree_skb(skb);
2555
2556 done:
2557 mgmt_pending_free(cmd);
2558
2559 return 0;
2560 }
2561
mgmt_hci_cmd_sync(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2562 static int mgmt_hci_cmd_sync(struct sock *sk, struct hci_dev *hdev,
2563 void *data, u16 len)
2564 {
2565 struct mgmt_cp_hci_cmd_sync *cp = data;
2566 struct mgmt_pending_cmd *cmd;
2567 int err;
2568
2569 if (len < sizeof(*cp))
2570 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
2571 MGMT_STATUS_INVALID_PARAMS);
2572
2573 hci_dev_lock(hdev);
2574 cmd = mgmt_pending_new(sk, MGMT_OP_HCI_CMD_SYNC, hdev, data, len);
2575 if (!cmd)
2576 err = -ENOMEM;
2577 else
2578 err = hci_cmd_sync_queue(hdev, send_hci_cmd_sync, cmd, NULL);
2579
2580 if (err < 0) {
2581 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
2582 MGMT_STATUS_FAILED);
2583
2584 if (cmd)
2585 mgmt_pending_free(cmd);
2586 }
2587
2588 hci_dev_unlock(hdev);
2589 return err;
2590 }
2591
2592 /* This is a helper function to test for pending mgmt commands that can
2593 * cause CoD or EIR HCI commands. We can only allow one such pending
2594 * mgmt command at a time since otherwise we cannot easily track what
2595 * the current values are, will be, and based on that calculate if a new
2596 * HCI command needs to be sent and if yes with what value.
2597 */
pending_eir_or_class(struct hci_dev * hdev)2598 static bool pending_eir_or_class(struct hci_dev *hdev)
2599 {
2600 struct mgmt_pending_cmd *cmd;
2601
2602 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2603 switch (cmd->opcode) {
2604 case MGMT_OP_ADD_UUID:
2605 case MGMT_OP_REMOVE_UUID:
2606 case MGMT_OP_SET_DEV_CLASS:
2607 case MGMT_OP_SET_POWERED:
2608 return true;
2609 }
2610 }
2611
2612 return false;
2613 }
2614
2615 static const u8 bluetooth_base_uuid[] = {
2616 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2617 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2618 };
2619
get_uuid_size(const u8 * uuid)2620 static u8 get_uuid_size(const u8 *uuid)
2621 {
2622 u32 val;
2623
2624 if (memcmp(uuid, bluetooth_base_uuid, 12))
2625 return 128;
2626
2627 val = get_unaligned_le32(&uuid[12]);
2628 if (val > 0xffff)
2629 return 32;
2630
2631 return 16;
2632 }
2633
mgmt_class_complete(struct hci_dev * hdev,void * data,int err)2634 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2635 {
2636 struct mgmt_pending_cmd *cmd = data;
2637
2638 bt_dev_dbg(hdev, "err %d", err);
2639
2640 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2641 mgmt_status(err), hdev->dev_class, 3);
2642
2643 mgmt_pending_free(cmd);
2644 }
2645
add_uuid_sync(struct hci_dev * hdev,void * data)2646 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2647 {
2648 int err;
2649
2650 err = hci_update_class_sync(hdev);
2651 if (err)
2652 return err;
2653
2654 return hci_update_eir_sync(hdev);
2655 }
2656
add_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2657 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2658 {
2659 struct mgmt_cp_add_uuid *cp = data;
2660 struct mgmt_pending_cmd *cmd;
2661 struct bt_uuid *uuid;
2662 int err;
2663
2664 bt_dev_dbg(hdev, "sock %p", sk);
2665
2666 hci_dev_lock(hdev);
2667
2668 if (pending_eir_or_class(hdev)) {
2669 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2670 MGMT_STATUS_BUSY);
2671 goto failed;
2672 }
2673
2674 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2675 if (!uuid) {
2676 err = -ENOMEM;
2677 goto failed;
2678 }
2679
2680 memcpy(uuid->uuid, cp->uuid, 16);
2681 uuid->svc_hint = cp->svc_hint;
2682 uuid->size = get_uuid_size(cp->uuid);
2683
2684 list_add_tail(&uuid->list, &hdev->uuids);
2685
2686 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2687 if (!cmd) {
2688 err = -ENOMEM;
2689 goto failed;
2690 }
2691
2692 /* MGMT_OP_ADD_UUID don't require adapter the UP/Running so use
2693 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2694 */
2695 err = hci_cmd_sync_submit(hdev, add_uuid_sync, cmd,
2696 mgmt_class_complete);
2697 if (err < 0) {
2698 mgmt_pending_free(cmd);
2699 goto failed;
2700 }
2701
2702 failed:
2703 hci_dev_unlock(hdev);
2704 return err;
2705 }
2706
enable_service_cache(struct hci_dev * hdev)2707 static bool enable_service_cache(struct hci_dev *hdev)
2708 {
2709 if (!hdev_is_powered(hdev))
2710 return false;
2711
2712 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2713 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2714 CACHE_TIMEOUT);
2715 return true;
2716 }
2717
2718 return false;
2719 }
2720
remove_uuid_sync(struct hci_dev * hdev,void * data)2721 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2722 {
2723 int err;
2724
2725 err = hci_update_class_sync(hdev);
2726 if (err)
2727 return err;
2728
2729 return hci_update_eir_sync(hdev);
2730 }
2731
remove_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2732 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2733 u16 len)
2734 {
2735 struct mgmt_cp_remove_uuid *cp = data;
2736 struct mgmt_pending_cmd *cmd;
2737 struct bt_uuid *match, *tmp;
2738 static const u8 bt_uuid_any[] = {
2739 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2740 };
2741 int err, found;
2742
2743 bt_dev_dbg(hdev, "sock %p", sk);
2744
2745 hci_dev_lock(hdev);
2746
2747 if (pending_eir_or_class(hdev)) {
2748 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2749 MGMT_STATUS_BUSY);
2750 goto unlock;
2751 }
2752
2753 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2754 hci_uuids_clear(hdev);
2755
2756 if (enable_service_cache(hdev)) {
2757 err = mgmt_cmd_complete(sk, hdev->id,
2758 MGMT_OP_REMOVE_UUID,
2759 0, hdev->dev_class, 3);
2760 goto unlock;
2761 }
2762
2763 goto update_class;
2764 }
2765
2766 found = 0;
2767
2768 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2769 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2770 continue;
2771
2772 list_del(&match->list);
2773 kfree(match);
2774 found++;
2775 }
2776
2777 if (found == 0) {
2778 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2779 MGMT_STATUS_INVALID_PARAMS);
2780 goto unlock;
2781 }
2782
2783 update_class:
2784 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2785 if (!cmd) {
2786 err = -ENOMEM;
2787 goto unlock;
2788 }
2789
2790 /* MGMT_OP_REMOVE_UUID don't require adapter the UP/Running so use
2791 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2792 */
2793 err = hci_cmd_sync_submit(hdev, remove_uuid_sync, cmd,
2794 mgmt_class_complete);
2795 if (err < 0)
2796 mgmt_pending_free(cmd);
2797
2798 unlock:
2799 hci_dev_unlock(hdev);
2800 return err;
2801 }
2802
set_class_sync(struct hci_dev * hdev,void * data)2803 static int set_class_sync(struct hci_dev *hdev, void *data)
2804 {
2805 int err = 0;
2806
2807 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2808 cancel_delayed_work_sync(&hdev->service_cache);
2809 err = hci_update_eir_sync(hdev);
2810 }
2811
2812 if (err)
2813 return err;
2814
2815 return hci_update_class_sync(hdev);
2816 }
2817
set_dev_class(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2818 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2819 u16 len)
2820 {
2821 struct mgmt_cp_set_dev_class *cp = data;
2822 struct mgmt_pending_cmd *cmd;
2823 int err;
2824
2825 bt_dev_dbg(hdev, "sock %p", sk);
2826
2827 if (!lmp_bredr_capable(hdev))
2828 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2829 MGMT_STATUS_NOT_SUPPORTED);
2830
2831 hci_dev_lock(hdev);
2832
2833 if (pending_eir_or_class(hdev)) {
2834 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2835 MGMT_STATUS_BUSY);
2836 goto unlock;
2837 }
2838
2839 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2840 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2841 MGMT_STATUS_INVALID_PARAMS);
2842 goto unlock;
2843 }
2844
2845 hdev->major_class = cp->major;
2846 hdev->minor_class = cp->minor;
2847
2848 if (!hdev_is_powered(hdev)) {
2849 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2850 hdev->dev_class, 3);
2851 goto unlock;
2852 }
2853
2854 cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2855 if (!cmd) {
2856 err = -ENOMEM;
2857 goto unlock;
2858 }
2859
2860 /* MGMT_OP_SET_DEV_CLASS don't require adapter the UP/Running so use
2861 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2862 */
2863 err = hci_cmd_sync_submit(hdev, set_class_sync, cmd,
2864 mgmt_class_complete);
2865 if (err < 0)
2866 mgmt_pending_free(cmd);
2867
2868 unlock:
2869 hci_dev_unlock(hdev);
2870 return err;
2871 }
2872
load_link_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2873 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2874 u16 len)
2875 {
2876 struct mgmt_cp_load_link_keys *cp = data;
2877 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2878 sizeof(struct mgmt_link_key_info));
2879 u16 key_count, expected_len;
2880 bool changed;
2881 int i;
2882
2883 bt_dev_dbg(hdev, "sock %p", sk);
2884
2885 if (!lmp_bredr_capable(hdev))
2886 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2887 MGMT_STATUS_NOT_SUPPORTED);
2888
2889 key_count = __le16_to_cpu(cp->key_count);
2890 if (key_count > max_key_count) {
2891 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2892 key_count);
2893 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2894 MGMT_STATUS_INVALID_PARAMS);
2895 }
2896
2897 expected_len = struct_size(cp, keys, key_count);
2898 if (expected_len != len) {
2899 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2900 expected_len, len);
2901 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2902 MGMT_STATUS_INVALID_PARAMS);
2903 }
2904
2905 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2906 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2907 MGMT_STATUS_INVALID_PARAMS);
2908
2909 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2910 key_count);
2911
2912 hci_dev_lock(hdev);
2913
2914 hci_link_keys_clear(hdev);
2915
2916 if (cp->debug_keys)
2917 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2918 else
2919 changed = hci_dev_test_and_clear_flag(hdev,
2920 HCI_KEEP_DEBUG_KEYS);
2921
2922 if (changed)
2923 new_settings(hdev, NULL);
2924
2925 for (i = 0; i < key_count; i++) {
2926 struct mgmt_link_key_info *key = &cp->keys[i];
2927
2928 if (hci_is_blocked_key(hdev,
2929 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2930 key->val)) {
2931 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2932 &key->addr.bdaddr);
2933 continue;
2934 }
2935
2936 if (key->addr.type != BDADDR_BREDR) {
2937 bt_dev_warn(hdev,
2938 "Invalid link address type %u for %pMR",
2939 key->addr.type, &key->addr.bdaddr);
2940 continue;
2941 }
2942
2943 if (key->type > 0x08) {
2944 bt_dev_warn(hdev, "Invalid link key type %u for %pMR",
2945 key->type, &key->addr.bdaddr);
2946 continue;
2947 }
2948
2949 /* Always ignore debug keys and require a new pairing if
2950 * the user wants to use them.
2951 */
2952 if (key->type == HCI_LK_DEBUG_COMBINATION)
2953 continue;
2954
2955 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2956 key->type, key->pin_len, NULL);
2957 }
2958
2959 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2960
2961 hci_dev_unlock(hdev);
2962
2963 return 0;
2964 }
2965
device_unpaired(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,struct sock * skip_sk)2966 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2967 u8 addr_type, struct sock *skip_sk)
2968 {
2969 struct mgmt_ev_device_unpaired ev;
2970
2971 bacpy(&ev.addr.bdaddr, bdaddr);
2972 ev.addr.type = addr_type;
2973
2974 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2975 skip_sk);
2976 }
2977
unpair_device_complete(struct hci_dev * hdev,void * data,int err)2978 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2979 {
2980 struct mgmt_pending_cmd *cmd = data;
2981 struct mgmt_cp_unpair_device *cp = cmd->param;
2982
2983 if (!err)
2984 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2985
2986 cmd->cmd_complete(cmd, err);
2987 mgmt_pending_free(cmd);
2988 }
2989
unpair_device_sync(struct hci_dev * hdev,void * data)2990 static int unpair_device_sync(struct hci_dev *hdev, void *data)
2991 {
2992 struct mgmt_pending_cmd *cmd = data;
2993 struct mgmt_cp_unpair_device *cp = cmd->param;
2994 struct hci_conn *conn;
2995
2996 if (cp->addr.type == BDADDR_BREDR)
2997 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2998 &cp->addr.bdaddr);
2999 else
3000 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3001 le_addr_type(cp->addr.type));
3002
3003 if (!conn)
3004 return 0;
3005
3006 /* Disregard any possible error since the likes of hci_abort_conn_sync
3007 * will clean up the connection no matter the error.
3008 */
3009 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3010
3011 return 0;
3012 }
3013
unpair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3014 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3015 u16 len)
3016 {
3017 struct mgmt_cp_unpair_device *cp = data;
3018 struct mgmt_rp_unpair_device rp;
3019 struct hci_conn_params *params;
3020 struct mgmt_pending_cmd *cmd;
3021 struct hci_conn *conn;
3022 u8 addr_type;
3023 int err;
3024
3025 memset(&rp, 0, sizeof(rp));
3026 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3027 rp.addr.type = cp->addr.type;
3028
3029 if (!bdaddr_type_is_valid(cp->addr.type))
3030 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3031 MGMT_STATUS_INVALID_PARAMS,
3032 &rp, sizeof(rp));
3033
3034 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
3035 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3036 MGMT_STATUS_INVALID_PARAMS,
3037 &rp, sizeof(rp));
3038
3039 hci_dev_lock(hdev);
3040
3041 if (!hdev_is_powered(hdev)) {
3042 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3043 MGMT_STATUS_NOT_POWERED, &rp,
3044 sizeof(rp));
3045 goto unlock;
3046 }
3047
3048 if (cp->addr.type == BDADDR_BREDR) {
3049 /* If disconnection is requested, then look up the
3050 * connection. If the remote device is connected, it
3051 * will be later used to terminate the link.
3052 *
3053 * Setting it to NULL explicitly will cause no
3054 * termination of the link.
3055 */
3056 if (cp->disconnect)
3057 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3058 &cp->addr.bdaddr);
3059 else
3060 conn = NULL;
3061
3062 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
3063 if (err < 0) {
3064 err = mgmt_cmd_complete(sk, hdev->id,
3065 MGMT_OP_UNPAIR_DEVICE,
3066 MGMT_STATUS_NOT_PAIRED, &rp,
3067 sizeof(rp));
3068 goto unlock;
3069 }
3070
3071 goto done;
3072 }
3073
3074 /* LE address type */
3075 addr_type = le_addr_type(cp->addr.type);
3076
3077 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
3078 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
3079 if (err < 0) {
3080 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3081 MGMT_STATUS_NOT_PAIRED, &rp,
3082 sizeof(rp));
3083 goto unlock;
3084 }
3085
3086 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3087 if (!conn) {
3088 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3089 goto done;
3090 }
3091
3092
3093 /* Defer clearing up the connection parameters until closing to
3094 * give a chance of keeping them if a repairing happens.
3095 */
3096 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3097
3098 /* Disable auto-connection parameters if present */
3099 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3100 if (params) {
3101 if (params->explicit_connect)
3102 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3103 else
3104 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3105 }
3106
3107 /* If disconnection is not requested, then clear the connection
3108 * variable so that the link is not terminated.
3109 */
3110 if (!cp->disconnect)
3111 conn = NULL;
3112
3113 done:
3114 /* If the connection variable is set, then termination of the
3115 * link is requested.
3116 */
3117 if (!conn) {
3118 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3119 &rp, sizeof(rp));
3120 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3121 goto unlock;
3122 }
3123
3124 cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3125 sizeof(*cp));
3126 if (!cmd) {
3127 err = -ENOMEM;
3128 goto unlock;
3129 }
3130
3131 cmd->cmd_complete = addr_cmd_complete;
3132
3133 err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3134 unpair_device_complete);
3135 if (err < 0)
3136 mgmt_pending_free(cmd);
3137
3138 unlock:
3139 hci_dev_unlock(hdev);
3140 return err;
3141 }
3142
disconnect_complete(struct hci_dev * hdev,void * data,int err)3143 static void disconnect_complete(struct hci_dev *hdev, void *data, int err)
3144 {
3145 struct mgmt_pending_cmd *cmd = data;
3146
3147 cmd->cmd_complete(cmd, mgmt_status(err));
3148 mgmt_pending_free(cmd);
3149 }
3150
disconnect_sync(struct hci_dev * hdev,void * data)3151 static int disconnect_sync(struct hci_dev *hdev, void *data)
3152 {
3153 struct mgmt_pending_cmd *cmd = data;
3154 struct mgmt_cp_disconnect *cp = cmd->param;
3155 struct hci_conn *conn;
3156
3157 if (cp->addr.type == BDADDR_BREDR)
3158 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3159 &cp->addr.bdaddr);
3160 else
3161 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3162 le_addr_type(cp->addr.type));
3163
3164 if (!conn)
3165 return -ENOTCONN;
3166
3167 /* Disregard any possible error since the likes of hci_abort_conn_sync
3168 * will clean up the connection no matter the error.
3169 */
3170 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3171
3172 return 0;
3173 }
3174
disconnect(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3175 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3176 u16 len)
3177 {
3178 struct mgmt_cp_disconnect *cp = data;
3179 struct mgmt_rp_disconnect rp;
3180 struct mgmt_pending_cmd *cmd;
3181 int err;
3182
3183 bt_dev_dbg(hdev, "sock %p", sk);
3184
3185 memset(&rp, 0, sizeof(rp));
3186 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3187 rp.addr.type = cp->addr.type;
3188
3189 if (!bdaddr_type_is_valid(cp->addr.type))
3190 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3191 MGMT_STATUS_INVALID_PARAMS,
3192 &rp, sizeof(rp));
3193
3194 hci_dev_lock(hdev);
3195
3196 if (!test_bit(HCI_UP, &hdev->flags)) {
3197 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3198 MGMT_STATUS_NOT_POWERED, &rp,
3199 sizeof(rp));
3200 goto failed;
3201 }
3202
3203 cmd = mgmt_pending_new(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3204 if (!cmd) {
3205 err = -ENOMEM;
3206 goto failed;
3207 }
3208
3209 cmd->cmd_complete = generic_cmd_complete;
3210
3211 err = hci_cmd_sync_queue(hdev, disconnect_sync, cmd,
3212 disconnect_complete);
3213 if (err < 0)
3214 mgmt_pending_free(cmd);
3215
3216 failed:
3217 hci_dev_unlock(hdev);
3218 return err;
3219 }
3220
link_to_bdaddr(u8 link_type,u8 addr_type)3221 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3222 {
3223 switch (link_type) {
3224 case ISO_LINK:
3225 case LE_LINK:
3226 switch (addr_type) {
3227 case ADDR_LE_DEV_PUBLIC:
3228 return BDADDR_LE_PUBLIC;
3229
3230 default:
3231 /* Fallback to LE Random address type */
3232 return BDADDR_LE_RANDOM;
3233 }
3234
3235 default:
3236 /* Fallback to BR/EDR type */
3237 return BDADDR_BREDR;
3238 }
3239 }
3240
get_connections(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)3241 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3242 u16 data_len)
3243 {
3244 struct mgmt_rp_get_connections *rp;
3245 struct hci_conn *c;
3246 int err;
3247 u16 i;
3248
3249 bt_dev_dbg(hdev, "sock %p", sk);
3250
3251 hci_dev_lock(hdev);
3252
3253 if (!hdev_is_powered(hdev)) {
3254 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3255 MGMT_STATUS_NOT_POWERED);
3256 goto unlock;
3257 }
3258
3259 i = 0;
3260 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3261 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3262 i++;
3263 }
3264
3265 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3266 if (!rp) {
3267 err = -ENOMEM;
3268 goto unlock;
3269 }
3270
3271 i = 0;
3272 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3273 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3274 continue;
3275 bacpy(&rp->addr[i].bdaddr, &c->dst);
3276 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3277 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3278 continue;
3279 i++;
3280 }
3281
3282 rp->conn_count = cpu_to_le16(i);
3283
3284 /* Recalculate length in case of filtered SCO connections, etc */
3285 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3286 struct_size(rp, addr, i));
3287
3288 kfree(rp);
3289
3290 unlock:
3291 hci_dev_unlock(hdev);
3292 return err;
3293 }
3294
send_pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_pin_code_neg_reply * cp)3295 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3296 struct mgmt_cp_pin_code_neg_reply *cp)
3297 {
3298 struct mgmt_pending_cmd *cmd;
3299 int err;
3300
3301 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3302 sizeof(*cp));
3303 if (!cmd)
3304 return -ENOMEM;
3305
3306 cmd->cmd_complete = addr_cmd_complete;
3307
3308 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3309 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3310 if (err < 0)
3311 mgmt_pending_remove(cmd);
3312
3313 return err;
3314 }
3315
pin_code_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3316 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3317 u16 len)
3318 {
3319 struct hci_conn *conn;
3320 struct mgmt_cp_pin_code_reply *cp = data;
3321 struct hci_cp_pin_code_reply reply;
3322 struct mgmt_pending_cmd *cmd;
3323 int err;
3324
3325 bt_dev_dbg(hdev, "sock %p", sk);
3326
3327 hci_dev_lock(hdev);
3328
3329 if (!hdev_is_powered(hdev)) {
3330 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3331 MGMT_STATUS_NOT_POWERED);
3332 goto failed;
3333 }
3334
3335 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3336 if (!conn) {
3337 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3338 MGMT_STATUS_NOT_CONNECTED);
3339 goto failed;
3340 }
3341
3342 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3343 struct mgmt_cp_pin_code_neg_reply ncp;
3344
3345 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3346
3347 bt_dev_err(hdev, "PIN code is not 16 bytes long");
3348
3349 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3350 if (err >= 0)
3351 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3352 MGMT_STATUS_INVALID_PARAMS);
3353
3354 goto failed;
3355 }
3356
3357 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3358 if (!cmd) {
3359 err = -ENOMEM;
3360 goto failed;
3361 }
3362
3363 cmd->cmd_complete = addr_cmd_complete;
3364
3365 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3366 reply.pin_len = cp->pin_len;
3367 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3368
3369 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3370 if (err < 0)
3371 mgmt_pending_remove(cmd);
3372
3373 failed:
3374 hci_dev_unlock(hdev);
3375 return err;
3376 }
3377
set_io_capability(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3378 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3379 u16 len)
3380 {
3381 struct mgmt_cp_set_io_capability *cp = data;
3382
3383 bt_dev_dbg(hdev, "sock %p", sk);
3384
3385 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3386 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3387 MGMT_STATUS_INVALID_PARAMS);
3388
3389 hci_dev_lock(hdev);
3390
3391 hdev->io_capability = cp->io_capability;
3392
3393 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3394
3395 hci_dev_unlock(hdev);
3396
3397 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3398 NULL, 0);
3399 }
3400
find_pairing(struct hci_conn * conn)3401 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3402 {
3403 struct hci_dev *hdev = conn->hdev;
3404 struct mgmt_pending_cmd *cmd;
3405
3406 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3407 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3408 continue;
3409
3410 if (cmd->user_data != conn)
3411 continue;
3412
3413 return cmd;
3414 }
3415
3416 return NULL;
3417 }
3418
pairing_complete(struct mgmt_pending_cmd * cmd,u8 status)3419 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3420 {
3421 struct mgmt_rp_pair_device rp;
3422 struct hci_conn *conn = cmd->user_data;
3423 int err;
3424
3425 bacpy(&rp.addr.bdaddr, &conn->dst);
3426 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3427
3428 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3429 status, &rp, sizeof(rp));
3430
3431 /* So we don't get further callbacks for this connection */
3432 conn->connect_cfm_cb = NULL;
3433 conn->security_cfm_cb = NULL;
3434 conn->disconn_cfm_cb = NULL;
3435
3436 hci_conn_drop(conn);
3437
3438 /* The device is paired so there is no need to remove
3439 * its connection parameters anymore.
3440 */
3441 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3442
3443 hci_conn_put(conn);
3444
3445 return err;
3446 }
3447
mgmt_smp_complete(struct hci_conn * conn,bool complete)3448 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3449 {
3450 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3451 struct mgmt_pending_cmd *cmd;
3452
3453 cmd = find_pairing(conn);
3454 if (cmd) {
3455 cmd->cmd_complete(cmd, status);
3456 mgmt_pending_remove(cmd);
3457 }
3458 }
3459
pairing_complete_cb(struct hci_conn * conn,u8 status)3460 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3461 {
3462 struct mgmt_pending_cmd *cmd;
3463
3464 BT_DBG("status %u", status);
3465
3466 cmd = find_pairing(conn);
3467 if (!cmd) {
3468 BT_DBG("Unable to find a pending command");
3469 return;
3470 }
3471
3472 cmd->cmd_complete(cmd, mgmt_status(status));
3473 mgmt_pending_remove(cmd);
3474 }
3475
le_pairing_complete_cb(struct hci_conn * conn,u8 status)3476 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3477 {
3478 struct mgmt_pending_cmd *cmd;
3479
3480 BT_DBG("status %u", status);
3481
3482 if (!status)
3483 return;
3484
3485 cmd = find_pairing(conn);
3486 if (!cmd) {
3487 BT_DBG("Unable to find a pending command");
3488 return;
3489 }
3490
3491 cmd->cmd_complete(cmd, mgmt_status(status));
3492 mgmt_pending_remove(cmd);
3493 }
3494
pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3495 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3496 u16 len)
3497 {
3498 struct mgmt_cp_pair_device *cp = data;
3499 struct mgmt_rp_pair_device rp;
3500 struct mgmt_pending_cmd *cmd;
3501 u8 sec_level, auth_type;
3502 struct hci_conn *conn;
3503 int err;
3504
3505 bt_dev_dbg(hdev, "sock %p", sk);
3506
3507 memset(&rp, 0, sizeof(rp));
3508 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3509 rp.addr.type = cp->addr.type;
3510
3511 if (!bdaddr_type_is_valid(cp->addr.type))
3512 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3513 MGMT_STATUS_INVALID_PARAMS,
3514 &rp, sizeof(rp));
3515
3516 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3517 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3518 MGMT_STATUS_INVALID_PARAMS,
3519 &rp, sizeof(rp));
3520
3521 hci_dev_lock(hdev);
3522
3523 if (!hdev_is_powered(hdev)) {
3524 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3525 MGMT_STATUS_NOT_POWERED, &rp,
3526 sizeof(rp));
3527 goto unlock;
3528 }
3529
3530 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3531 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3532 MGMT_STATUS_ALREADY_PAIRED, &rp,
3533 sizeof(rp));
3534 goto unlock;
3535 }
3536
3537 sec_level = BT_SECURITY_MEDIUM;
3538 auth_type = HCI_AT_DEDICATED_BONDING;
3539
3540 if (cp->addr.type == BDADDR_BREDR) {
3541 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3542 auth_type, CONN_REASON_PAIR_DEVICE,
3543 HCI_ACL_CONN_TIMEOUT);
3544 } else {
3545 u8 addr_type = le_addr_type(cp->addr.type);
3546 struct hci_conn_params *p;
3547
3548 /* When pairing a new device, it is expected to remember
3549 * this device for future connections. Adding the connection
3550 * parameter information ahead of time allows tracking
3551 * of the peripheral preferred values and will speed up any
3552 * further connection establishment.
3553 *
3554 * If connection parameters already exist, then they
3555 * will be kept and this function does nothing.
3556 */
3557 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3558 if (!p) {
3559 err = -EIO;
3560 goto unlock;
3561 }
3562
3563 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3564 p->auto_connect = HCI_AUTO_CONN_DISABLED;
3565
3566 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3567 sec_level, HCI_LE_CONN_TIMEOUT,
3568 CONN_REASON_PAIR_DEVICE);
3569 }
3570
3571 if (IS_ERR(conn)) {
3572 int status;
3573
3574 if (PTR_ERR(conn) == -EBUSY)
3575 status = MGMT_STATUS_BUSY;
3576 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3577 status = MGMT_STATUS_NOT_SUPPORTED;
3578 else if (PTR_ERR(conn) == -ECONNREFUSED)
3579 status = MGMT_STATUS_REJECTED;
3580 else
3581 status = MGMT_STATUS_CONNECT_FAILED;
3582
3583 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3584 status, &rp, sizeof(rp));
3585 goto unlock;
3586 }
3587
3588 if (conn->connect_cfm_cb) {
3589 hci_conn_drop(conn);
3590 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3591 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3592 goto unlock;
3593 }
3594
3595 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3596 if (!cmd) {
3597 err = -ENOMEM;
3598 hci_conn_drop(conn);
3599 goto unlock;
3600 }
3601
3602 cmd->cmd_complete = pairing_complete;
3603
3604 /* For LE, just connecting isn't a proof that the pairing finished */
3605 if (cp->addr.type == BDADDR_BREDR) {
3606 conn->connect_cfm_cb = pairing_complete_cb;
3607 conn->security_cfm_cb = pairing_complete_cb;
3608 conn->disconn_cfm_cb = pairing_complete_cb;
3609 } else {
3610 conn->connect_cfm_cb = le_pairing_complete_cb;
3611 conn->security_cfm_cb = le_pairing_complete_cb;
3612 conn->disconn_cfm_cb = le_pairing_complete_cb;
3613 }
3614
3615 conn->io_capability = cp->io_cap;
3616 cmd->user_data = hci_conn_get(conn);
3617
3618 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3619 hci_conn_security(conn, sec_level, auth_type, true)) {
3620 cmd->cmd_complete(cmd, 0);
3621 mgmt_pending_remove(cmd);
3622 }
3623
3624 err = 0;
3625
3626 unlock:
3627 hci_dev_unlock(hdev);
3628 return err;
3629 }
3630
cancel_pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3631 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3632 u16 len)
3633 {
3634 struct mgmt_addr_info *addr = data;
3635 struct mgmt_pending_cmd *cmd;
3636 struct hci_conn *conn;
3637 int err;
3638
3639 bt_dev_dbg(hdev, "sock %p", sk);
3640
3641 hci_dev_lock(hdev);
3642
3643 if (!hdev_is_powered(hdev)) {
3644 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3645 MGMT_STATUS_NOT_POWERED);
3646 goto unlock;
3647 }
3648
3649 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3650 if (!cmd) {
3651 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3652 MGMT_STATUS_INVALID_PARAMS);
3653 goto unlock;
3654 }
3655
3656 conn = cmd->user_data;
3657
3658 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3659 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3660 MGMT_STATUS_INVALID_PARAMS);
3661 goto unlock;
3662 }
3663
3664 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3665 mgmt_pending_remove(cmd);
3666
3667 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3668 addr, sizeof(*addr));
3669
3670 /* Since user doesn't want to proceed with the connection, abort any
3671 * ongoing pairing and then terminate the link if it was created
3672 * because of the pair device action.
3673 */
3674 if (addr->type == BDADDR_BREDR)
3675 hci_remove_link_key(hdev, &addr->bdaddr);
3676 else
3677 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3678 le_addr_type(addr->type));
3679
3680 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3681 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3682
3683 unlock:
3684 hci_dev_unlock(hdev);
3685 return err;
3686 }
3687
user_pairing_resp(struct sock * sk,struct hci_dev * hdev,struct mgmt_addr_info * addr,u16 mgmt_op,u16 hci_op,__le32 passkey)3688 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3689 struct mgmt_addr_info *addr, u16 mgmt_op,
3690 u16 hci_op, __le32 passkey)
3691 {
3692 struct mgmt_pending_cmd *cmd;
3693 struct hci_conn *conn;
3694 int err;
3695
3696 hci_dev_lock(hdev);
3697
3698 if (!hdev_is_powered(hdev)) {
3699 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3700 MGMT_STATUS_NOT_POWERED, addr,
3701 sizeof(*addr));
3702 goto done;
3703 }
3704
3705 if (addr->type == BDADDR_BREDR)
3706 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3707 else
3708 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3709 le_addr_type(addr->type));
3710
3711 if (!conn) {
3712 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3713 MGMT_STATUS_NOT_CONNECTED, addr,
3714 sizeof(*addr));
3715 goto done;
3716 }
3717
3718 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3719 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3720 if (!err)
3721 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3722 MGMT_STATUS_SUCCESS, addr,
3723 sizeof(*addr));
3724 else
3725 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3726 MGMT_STATUS_FAILED, addr,
3727 sizeof(*addr));
3728
3729 goto done;
3730 }
3731
3732 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3733 if (!cmd) {
3734 err = -ENOMEM;
3735 goto done;
3736 }
3737
3738 cmd->cmd_complete = addr_cmd_complete;
3739
3740 /* Continue with pairing via HCI */
3741 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3742 struct hci_cp_user_passkey_reply cp;
3743
3744 bacpy(&cp.bdaddr, &addr->bdaddr);
3745 cp.passkey = passkey;
3746 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3747 } else
3748 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3749 &addr->bdaddr);
3750
3751 if (err < 0)
3752 mgmt_pending_remove(cmd);
3753
3754 done:
3755 hci_dev_unlock(hdev);
3756 return err;
3757 }
3758
pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3759 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3760 void *data, u16 len)
3761 {
3762 struct mgmt_cp_pin_code_neg_reply *cp = data;
3763
3764 bt_dev_dbg(hdev, "sock %p", sk);
3765
3766 return user_pairing_resp(sk, hdev, &cp->addr,
3767 MGMT_OP_PIN_CODE_NEG_REPLY,
3768 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3769 }
3770
user_confirm_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3771 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3772 u16 len)
3773 {
3774 struct mgmt_cp_user_confirm_reply *cp = data;
3775
3776 bt_dev_dbg(hdev, "sock %p", sk);
3777
3778 if (len != sizeof(*cp))
3779 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3780 MGMT_STATUS_INVALID_PARAMS);
3781
3782 return user_pairing_resp(sk, hdev, &cp->addr,
3783 MGMT_OP_USER_CONFIRM_REPLY,
3784 HCI_OP_USER_CONFIRM_REPLY, 0);
3785 }
3786
user_confirm_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3787 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3788 void *data, u16 len)
3789 {
3790 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3791
3792 bt_dev_dbg(hdev, "sock %p", sk);
3793
3794 return user_pairing_resp(sk, hdev, &cp->addr,
3795 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3796 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3797 }
3798
user_passkey_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3799 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3800 u16 len)
3801 {
3802 struct mgmt_cp_user_passkey_reply *cp = data;
3803
3804 bt_dev_dbg(hdev, "sock %p", sk);
3805
3806 return user_pairing_resp(sk, hdev, &cp->addr,
3807 MGMT_OP_USER_PASSKEY_REPLY,
3808 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3809 }
3810
user_passkey_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3811 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3812 void *data, u16 len)
3813 {
3814 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3815
3816 bt_dev_dbg(hdev, "sock %p", sk);
3817
3818 return user_pairing_resp(sk, hdev, &cp->addr,
3819 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3820 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3821 }
3822
adv_expire_sync(struct hci_dev * hdev,u32 flags)3823 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3824 {
3825 struct adv_info *adv_instance;
3826
3827 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3828 if (!adv_instance)
3829 return 0;
3830
3831 /* stop if current instance doesn't need to be changed */
3832 if (!(adv_instance->flags & flags))
3833 return 0;
3834
3835 cancel_adv_timeout(hdev);
3836
3837 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3838 if (!adv_instance)
3839 return 0;
3840
3841 hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3842
3843 return 0;
3844 }
3845
name_changed_sync(struct hci_dev * hdev,void * data)3846 static int name_changed_sync(struct hci_dev *hdev, void *data)
3847 {
3848 return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3849 }
3850
set_name_complete(struct hci_dev * hdev,void * data,int err)3851 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3852 {
3853 struct mgmt_pending_cmd *cmd = data;
3854 struct mgmt_cp_set_local_name *cp = cmd->param;
3855 u8 status = mgmt_status(err);
3856
3857 bt_dev_dbg(hdev, "err %d", err);
3858
3859 if (err == -ECANCELED ||
3860 cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3861 return;
3862
3863 if (status) {
3864 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3865 status);
3866 } else {
3867 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3868 cp, sizeof(*cp));
3869
3870 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3871 hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3872 }
3873
3874 mgmt_pending_remove(cmd);
3875 }
3876
set_name_sync(struct hci_dev * hdev,void * data)3877 static int set_name_sync(struct hci_dev *hdev, void *data)
3878 {
3879 if (lmp_bredr_capable(hdev)) {
3880 hci_update_name_sync(hdev);
3881 hci_update_eir_sync(hdev);
3882 }
3883
3884 /* The name is stored in the scan response data and so
3885 * no need to update the advertising data here.
3886 */
3887 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3888 hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3889
3890 return 0;
3891 }
3892
set_local_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3893 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3894 u16 len)
3895 {
3896 struct mgmt_cp_set_local_name *cp = data;
3897 struct mgmt_pending_cmd *cmd;
3898 int err;
3899
3900 bt_dev_dbg(hdev, "sock %p", sk);
3901
3902 hci_dev_lock(hdev);
3903
3904 /* If the old values are the same as the new ones just return a
3905 * direct command complete event.
3906 */
3907 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3908 !memcmp(hdev->short_name, cp->short_name,
3909 sizeof(hdev->short_name))) {
3910 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3911 data, len);
3912 goto failed;
3913 }
3914
3915 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3916
3917 if (!hdev_is_powered(hdev)) {
3918 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3919
3920 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3921 data, len);
3922 if (err < 0)
3923 goto failed;
3924
3925 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3926 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3927 ext_info_changed(hdev, sk);
3928
3929 goto failed;
3930 }
3931
3932 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3933 if (!cmd)
3934 err = -ENOMEM;
3935 else
3936 err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3937 set_name_complete);
3938
3939 if (err < 0) {
3940 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3941 MGMT_STATUS_FAILED);
3942
3943 if (cmd)
3944 mgmt_pending_remove(cmd);
3945
3946 goto failed;
3947 }
3948
3949 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3950
3951 failed:
3952 hci_dev_unlock(hdev);
3953 return err;
3954 }
3955
appearance_changed_sync(struct hci_dev * hdev,void * data)3956 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3957 {
3958 return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3959 }
3960
set_appearance(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3961 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3962 u16 len)
3963 {
3964 struct mgmt_cp_set_appearance *cp = data;
3965 u16 appearance;
3966 int err;
3967
3968 bt_dev_dbg(hdev, "sock %p", sk);
3969
3970 if (!lmp_le_capable(hdev))
3971 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3972 MGMT_STATUS_NOT_SUPPORTED);
3973
3974 appearance = le16_to_cpu(cp->appearance);
3975
3976 hci_dev_lock(hdev);
3977
3978 if (hdev->appearance != appearance) {
3979 hdev->appearance = appearance;
3980
3981 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3982 hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3983 NULL);
3984
3985 ext_info_changed(hdev, sk);
3986 }
3987
3988 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3989 0);
3990
3991 hci_dev_unlock(hdev);
3992
3993 return err;
3994 }
3995
get_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3996 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3997 void *data, u16 len)
3998 {
3999 struct mgmt_rp_get_phy_configuration rp;
4000
4001 bt_dev_dbg(hdev, "sock %p", sk);
4002
4003 hci_dev_lock(hdev);
4004
4005 memset(&rp, 0, sizeof(rp));
4006
4007 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
4008 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
4009 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
4010
4011 hci_dev_unlock(hdev);
4012
4013 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
4014 &rp, sizeof(rp));
4015 }
4016
mgmt_phy_configuration_changed(struct hci_dev * hdev,struct sock * skip)4017 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
4018 {
4019 struct mgmt_ev_phy_configuration_changed ev;
4020
4021 memset(&ev, 0, sizeof(ev));
4022
4023 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
4024
4025 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
4026 sizeof(ev), skip);
4027 }
4028
set_default_phy_complete(struct hci_dev * hdev,void * data,int err)4029 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
4030 {
4031 struct mgmt_pending_cmd *cmd = data;
4032 struct sk_buff *skb = cmd->skb;
4033 u8 status = mgmt_status(err);
4034
4035 if (err == -ECANCELED ||
4036 cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
4037 return;
4038
4039 if (!status) {
4040 if (!skb)
4041 status = MGMT_STATUS_FAILED;
4042 else if (IS_ERR(skb))
4043 status = mgmt_status(PTR_ERR(skb));
4044 else
4045 status = mgmt_status(skb->data[0]);
4046 }
4047
4048 bt_dev_dbg(hdev, "status %d", status);
4049
4050 if (status) {
4051 mgmt_cmd_status(cmd->sk, hdev->id,
4052 MGMT_OP_SET_PHY_CONFIGURATION, status);
4053 } else {
4054 mgmt_cmd_complete(cmd->sk, hdev->id,
4055 MGMT_OP_SET_PHY_CONFIGURATION, 0,
4056 NULL, 0);
4057
4058 mgmt_phy_configuration_changed(hdev, cmd->sk);
4059 }
4060
4061 if (skb && !IS_ERR(skb))
4062 kfree_skb(skb);
4063
4064 mgmt_pending_remove(cmd);
4065 }
4066
set_default_phy_sync(struct hci_dev * hdev,void * data)4067 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
4068 {
4069 struct mgmt_pending_cmd *cmd = data;
4070 struct mgmt_cp_set_phy_configuration *cp = cmd->param;
4071 struct hci_cp_le_set_default_phy cp_phy;
4072 u32 selected_phys = __le32_to_cpu(cp->selected_phys);
4073
4074 memset(&cp_phy, 0, sizeof(cp_phy));
4075
4076 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
4077 cp_phy.all_phys |= 0x01;
4078
4079 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
4080 cp_phy.all_phys |= 0x02;
4081
4082 if (selected_phys & MGMT_PHY_LE_1M_TX)
4083 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
4084
4085 if (selected_phys & MGMT_PHY_LE_2M_TX)
4086 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
4087
4088 if (selected_phys & MGMT_PHY_LE_CODED_TX)
4089 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
4090
4091 if (selected_phys & MGMT_PHY_LE_1M_RX)
4092 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
4093
4094 if (selected_phys & MGMT_PHY_LE_2M_RX)
4095 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
4096
4097 if (selected_phys & MGMT_PHY_LE_CODED_RX)
4098 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
4099
4100 cmd->skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4101 sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
4102
4103 return 0;
4104 }
4105
set_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4106 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4107 void *data, u16 len)
4108 {
4109 struct mgmt_cp_set_phy_configuration *cp = data;
4110 struct mgmt_pending_cmd *cmd;
4111 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4112 u16 pkt_type = (HCI_DH1 | HCI_DM1);
4113 bool changed = false;
4114 int err;
4115
4116 bt_dev_dbg(hdev, "sock %p", sk);
4117
4118 configurable_phys = get_configurable_phys(hdev);
4119 supported_phys = get_supported_phys(hdev);
4120 selected_phys = __le32_to_cpu(cp->selected_phys);
4121
4122 if (selected_phys & ~supported_phys)
4123 return mgmt_cmd_status(sk, hdev->id,
4124 MGMT_OP_SET_PHY_CONFIGURATION,
4125 MGMT_STATUS_INVALID_PARAMS);
4126
4127 unconfigure_phys = supported_phys & ~configurable_phys;
4128
4129 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4130 return mgmt_cmd_status(sk, hdev->id,
4131 MGMT_OP_SET_PHY_CONFIGURATION,
4132 MGMT_STATUS_INVALID_PARAMS);
4133
4134 if (selected_phys == get_selected_phys(hdev))
4135 return mgmt_cmd_complete(sk, hdev->id,
4136 MGMT_OP_SET_PHY_CONFIGURATION,
4137 0, NULL, 0);
4138
4139 hci_dev_lock(hdev);
4140
4141 if (!hdev_is_powered(hdev)) {
4142 err = mgmt_cmd_status(sk, hdev->id,
4143 MGMT_OP_SET_PHY_CONFIGURATION,
4144 MGMT_STATUS_REJECTED);
4145 goto unlock;
4146 }
4147
4148 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4149 err = mgmt_cmd_status(sk, hdev->id,
4150 MGMT_OP_SET_PHY_CONFIGURATION,
4151 MGMT_STATUS_BUSY);
4152 goto unlock;
4153 }
4154
4155 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4156 pkt_type |= (HCI_DH3 | HCI_DM3);
4157 else
4158 pkt_type &= ~(HCI_DH3 | HCI_DM3);
4159
4160 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4161 pkt_type |= (HCI_DH5 | HCI_DM5);
4162 else
4163 pkt_type &= ~(HCI_DH5 | HCI_DM5);
4164
4165 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4166 pkt_type &= ~HCI_2DH1;
4167 else
4168 pkt_type |= HCI_2DH1;
4169
4170 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4171 pkt_type &= ~HCI_2DH3;
4172 else
4173 pkt_type |= HCI_2DH3;
4174
4175 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4176 pkt_type &= ~HCI_2DH5;
4177 else
4178 pkt_type |= HCI_2DH5;
4179
4180 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4181 pkt_type &= ~HCI_3DH1;
4182 else
4183 pkt_type |= HCI_3DH1;
4184
4185 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4186 pkt_type &= ~HCI_3DH3;
4187 else
4188 pkt_type |= HCI_3DH3;
4189
4190 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4191 pkt_type &= ~HCI_3DH5;
4192 else
4193 pkt_type |= HCI_3DH5;
4194
4195 if (pkt_type != hdev->pkt_type) {
4196 hdev->pkt_type = pkt_type;
4197 changed = true;
4198 }
4199
4200 if ((selected_phys & MGMT_PHY_LE_MASK) ==
4201 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4202 if (changed)
4203 mgmt_phy_configuration_changed(hdev, sk);
4204
4205 err = mgmt_cmd_complete(sk, hdev->id,
4206 MGMT_OP_SET_PHY_CONFIGURATION,
4207 0, NULL, 0);
4208
4209 goto unlock;
4210 }
4211
4212 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4213 len);
4214 if (!cmd)
4215 err = -ENOMEM;
4216 else
4217 err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4218 set_default_phy_complete);
4219
4220 if (err < 0) {
4221 err = mgmt_cmd_status(sk, hdev->id,
4222 MGMT_OP_SET_PHY_CONFIGURATION,
4223 MGMT_STATUS_FAILED);
4224
4225 if (cmd)
4226 mgmt_pending_remove(cmd);
4227 }
4228
4229 unlock:
4230 hci_dev_unlock(hdev);
4231
4232 return err;
4233 }
4234
set_blocked_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4235 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4236 u16 len)
4237 {
4238 int err = MGMT_STATUS_SUCCESS;
4239 struct mgmt_cp_set_blocked_keys *keys = data;
4240 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4241 sizeof(struct mgmt_blocked_key_info));
4242 u16 key_count, expected_len;
4243 int i;
4244
4245 bt_dev_dbg(hdev, "sock %p", sk);
4246
4247 key_count = __le16_to_cpu(keys->key_count);
4248 if (key_count > max_key_count) {
4249 bt_dev_err(hdev, "too big key_count value %u", key_count);
4250 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4251 MGMT_STATUS_INVALID_PARAMS);
4252 }
4253
4254 expected_len = struct_size(keys, keys, key_count);
4255 if (expected_len != len) {
4256 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4257 expected_len, len);
4258 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4259 MGMT_STATUS_INVALID_PARAMS);
4260 }
4261
4262 hci_dev_lock(hdev);
4263
4264 hci_blocked_keys_clear(hdev);
4265
4266 for (i = 0; i < key_count; ++i) {
4267 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4268
4269 if (!b) {
4270 err = MGMT_STATUS_NO_RESOURCES;
4271 break;
4272 }
4273
4274 b->type = keys->keys[i].type;
4275 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4276 list_add_rcu(&b->list, &hdev->blocked_keys);
4277 }
4278 hci_dev_unlock(hdev);
4279
4280 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4281 err, NULL, 0);
4282 }
4283
set_wideband_speech(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4284 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4285 void *data, u16 len)
4286 {
4287 struct mgmt_mode *cp = data;
4288 int err;
4289 bool changed = false;
4290
4291 bt_dev_dbg(hdev, "sock %p", sk);
4292
4293 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
4294 return mgmt_cmd_status(sk, hdev->id,
4295 MGMT_OP_SET_WIDEBAND_SPEECH,
4296 MGMT_STATUS_NOT_SUPPORTED);
4297
4298 if (cp->val != 0x00 && cp->val != 0x01)
4299 return mgmt_cmd_status(sk, hdev->id,
4300 MGMT_OP_SET_WIDEBAND_SPEECH,
4301 MGMT_STATUS_INVALID_PARAMS);
4302
4303 hci_dev_lock(hdev);
4304
4305 if (hdev_is_powered(hdev) &&
4306 !!cp->val != hci_dev_test_flag(hdev,
4307 HCI_WIDEBAND_SPEECH_ENABLED)) {
4308 err = mgmt_cmd_status(sk, hdev->id,
4309 MGMT_OP_SET_WIDEBAND_SPEECH,
4310 MGMT_STATUS_REJECTED);
4311 goto unlock;
4312 }
4313
4314 if (cp->val)
4315 changed = !hci_dev_test_and_set_flag(hdev,
4316 HCI_WIDEBAND_SPEECH_ENABLED);
4317 else
4318 changed = hci_dev_test_and_clear_flag(hdev,
4319 HCI_WIDEBAND_SPEECH_ENABLED);
4320
4321 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4322 if (err < 0)
4323 goto unlock;
4324
4325 if (changed)
4326 err = new_settings(hdev, sk);
4327
4328 unlock:
4329 hci_dev_unlock(hdev);
4330 return err;
4331 }
4332
read_controller_cap(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4333 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4334 void *data, u16 data_len)
4335 {
4336 char buf[20];
4337 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4338 u16 cap_len = 0;
4339 u8 flags = 0;
4340 u8 tx_power_range[2];
4341
4342 bt_dev_dbg(hdev, "sock %p", sk);
4343
4344 memset(&buf, 0, sizeof(buf));
4345
4346 hci_dev_lock(hdev);
4347
4348 /* When the Read Simple Pairing Options command is supported, then
4349 * the remote public key validation is supported.
4350 *
4351 * Alternatively, when Microsoft extensions are available, they can
4352 * indicate support for public key validation as well.
4353 */
4354 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4355 flags |= 0x01; /* Remote public key validation (BR/EDR) */
4356
4357 flags |= 0x02; /* Remote public key validation (LE) */
4358
4359 /* When the Read Encryption Key Size command is supported, then the
4360 * encryption key size is enforced.
4361 */
4362 if (hdev->commands[20] & 0x10)
4363 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
4364
4365 flags |= 0x08; /* Encryption key size enforcement (LE) */
4366
4367 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4368 &flags, 1);
4369
4370 /* When the Read Simple Pairing Options command is supported, then
4371 * also max encryption key size information is provided.
4372 */
4373 if (hdev->commands[41] & 0x08)
4374 cap_len = eir_append_le16(rp->cap, cap_len,
4375 MGMT_CAP_MAX_ENC_KEY_SIZE,
4376 hdev->max_enc_key_size);
4377
4378 cap_len = eir_append_le16(rp->cap, cap_len,
4379 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4380 SMP_MAX_ENC_KEY_SIZE);
4381
4382 /* Append the min/max LE tx power parameters if we were able to fetch
4383 * it from the controller
4384 */
4385 if (hdev->commands[38] & 0x80) {
4386 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4387 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4388 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4389 tx_power_range, 2);
4390 }
4391
4392 rp->cap_len = cpu_to_le16(cap_len);
4393
4394 hci_dev_unlock(hdev);
4395
4396 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4397 rp, sizeof(*rp) + cap_len);
4398 }
4399
4400 #ifdef CONFIG_BT_FEATURE_DEBUG
4401 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4402 static const u8 debug_uuid[16] = {
4403 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4404 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4405 };
4406 #endif
4407
4408 /* 330859bc-7506-492d-9370-9a6f0614037f */
4409 static const u8 quality_report_uuid[16] = {
4410 0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4411 0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4412 };
4413
4414 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4415 static const u8 offload_codecs_uuid[16] = {
4416 0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4417 0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4418 };
4419
4420 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4421 static const u8 le_simultaneous_roles_uuid[16] = {
4422 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4423 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4424 };
4425
4426 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4427 static const u8 iso_socket_uuid[16] = {
4428 0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4429 0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4430 };
4431
4432 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4433 static const u8 mgmt_mesh_uuid[16] = {
4434 0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4435 0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4436 };
4437
read_exp_features_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4438 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4439 void *data, u16 data_len)
4440 {
4441 struct mgmt_rp_read_exp_features_info *rp;
4442 size_t len;
4443 u16 idx = 0;
4444 u32 flags;
4445 int status;
4446
4447 bt_dev_dbg(hdev, "sock %p", sk);
4448
4449 /* Enough space for 7 features */
4450 len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4451 rp = kzalloc(len, GFP_KERNEL);
4452 if (!rp)
4453 return -ENOMEM;
4454
4455 #ifdef CONFIG_BT_FEATURE_DEBUG
4456 if (!hdev) {
4457 flags = bt_dbg_get() ? BIT(0) : 0;
4458
4459 memcpy(rp->features[idx].uuid, debug_uuid, 16);
4460 rp->features[idx].flags = cpu_to_le32(flags);
4461 idx++;
4462 }
4463 #endif
4464
4465 if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4466 if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4467 flags = BIT(0);
4468 else
4469 flags = 0;
4470
4471 memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4472 rp->features[idx].flags = cpu_to_le32(flags);
4473 idx++;
4474 }
4475
4476 if (hdev && (aosp_has_quality_report(hdev) ||
4477 hdev->set_quality_report)) {
4478 if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4479 flags = BIT(0);
4480 else
4481 flags = 0;
4482
4483 memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4484 rp->features[idx].flags = cpu_to_le32(flags);
4485 idx++;
4486 }
4487
4488 if (hdev && hdev->get_data_path_id) {
4489 if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4490 flags = BIT(0);
4491 else
4492 flags = 0;
4493
4494 memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4495 rp->features[idx].flags = cpu_to_le32(flags);
4496 idx++;
4497 }
4498
4499 if (IS_ENABLED(CONFIG_BT_LE)) {
4500 flags = iso_enabled() ? BIT(0) : 0;
4501 memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4502 rp->features[idx].flags = cpu_to_le32(flags);
4503 idx++;
4504 }
4505
4506 if (hdev && lmp_le_capable(hdev)) {
4507 if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4508 flags = BIT(0);
4509 else
4510 flags = 0;
4511
4512 memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4513 rp->features[idx].flags = cpu_to_le32(flags);
4514 idx++;
4515 }
4516
4517 rp->feature_count = cpu_to_le16(idx);
4518
4519 /* After reading the experimental features information, enable
4520 * the events to update client on any future change.
4521 */
4522 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4523
4524 status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4525 MGMT_OP_READ_EXP_FEATURES_INFO,
4526 0, rp, sizeof(*rp) + (20 * idx));
4527
4528 kfree(rp);
4529 return status;
4530 }
4531
exp_feature_changed(struct hci_dev * hdev,const u8 * uuid,bool enabled,struct sock * skip)4532 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4533 bool enabled, struct sock *skip)
4534 {
4535 struct mgmt_ev_exp_feature_changed ev;
4536
4537 memset(&ev, 0, sizeof(ev));
4538 memcpy(ev.uuid, uuid, 16);
4539 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4540
4541 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4542 &ev, sizeof(ev),
4543 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4544 }
4545
4546 #define EXP_FEAT(_uuid, _set_func) \
4547 { \
4548 .uuid = _uuid, \
4549 .set_func = _set_func, \
4550 }
4551
4552 /* The zero key uuid is special. Multiple exp features are set through it. */
set_zero_key_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4553 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4554 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4555 {
4556 struct mgmt_rp_set_exp_feature rp;
4557
4558 memset(rp.uuid, 0, 16);
4559 rp.flags = cpu_to_le32(0);
4560
4561 #ifdef CONFIG_BT_FEATURE_DEBUG
4562 if (!hdev) {
4563 bool changed = bt_dbg_get();
4564
4565 bt_dbg_set(false);
4566
4567 if (changed)
4568 exp_feature_changed(NULL, ZERO_KEY, false, sk);
4569 }
4570 #endif
4571
4572 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4573
4574 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4575 MGMT_OP_SET_EXP_FEATURE, 0,
4576 &rp, sizeof(rp));
4577 }
4578
4579 #ifdef CONFIG_BT_FEATURE_DEBUG
set_debug_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4580 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4581 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4582 {
4583 struct mgmt_rp_set_exp_feature rp;
4584
4585 bool val, changed;
4586 int err;
4587
4588 /* Command requires to use the non-controller index */
4589 if (hdev)
4590 return mgmt_cmd_status(sk, hdev->id,
4591 MGMT_OP_SET_EXP_FEATURE,
4592 MGMT_STATUS_INVALID_INDEX);
4593
4594 /* Parameters are limited to a single octet */
4595 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4596 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4597 MGMT_OP_SET_EXP_FEATURE,
4598 MGMT_STATUS_INVALID_PARAMS);
4599
4600 /* Only boolean on/off is supported */
4601 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4602 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4603 MGMT_OP_SET_EXP_FEATURE,
4604 MGMT_STATUS_INVALID_PARAMS);
4605
4606 val = !!cp->param[0];
4607 changed = val ? !bt_dbg_get() : bt_dbg_get();
4608 bt_dbg_set(val);
4609
4610 memcpy(rp.uuid, debug_uuid, 16);
4611 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4612
4613 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4614
4615 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4616 MGMT_OP_SET_EXP_FEATURE, 0,
4617 &rp, sizeof(rp));
4618
4619 if (changed)
4620 exp_feature_changed(hdev, debug_uuid, val, sk);
4621
4622 return err;
4623 }
4624 #endif
4625
set_mgmt_mesh_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4626 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4627 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4628 {
4629 struct mgmt_rp_set_exp_feature rp;
4630 bool val, changed;
4631 int err;
4632
4633 /* Command requires to use the controller index */
4634 if (!hdev)
4635 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4636 MGMT_OP_SET_EXP_FEATURE,
4637 MGMT_STATUS_INVALID_INDEX);
4638
4639 /* Parameters are limited to a single octet */
4640 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4641 return mgmt_cmd_status(sk, hdev->id,
4642 MGMT_OP_SET_EXP_FEATURE,
4643 MGMT_STATUS_INVALID_PARAMS);
4644
4645 /* Only boolean on/off is supported */
4646 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4647 return mgmt_cmd_status(sk, hdev->id,
4648 MGMT_OP_SET_EXP_FEATURE,
4649 MGMT_STATUS_INVALID_PARAMS);
4650
4651 val = !!cp->param[0];
4652
4653 if (val) {
4654 changed = !hci_dev_test_and_set_flag(hdev,
4655 HCI_MESH_EXPERIMENTAL);
4656 } else {
4657 hci_dev_clear_flag(hdev, HCI_MESH);
4658 changed = hci_dev_test_and_clear_flag(hdev,
4659 HCI_MESH_EXPERIMENTAL);
4660 }
4661
4662 memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4663 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4664
4665 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4666
4667 err = mgmt_cmd_complete(sk, hdev->id,
4668 MGMT_OP_SET_EXP_FEATURE, 0,
4669 &rp, sizeof(rp));
4670
4671 if (changed)
4672 exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4673
4674 return err;
4675 }
4676
set_quality_report_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4677 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4678 struct mgmt_cp_set_exp_feature *cp,
4679 u16 data_len)
4680 {
4681 struct mgmt_rp_set_exp_feature rp;
4682 bool val, changed;
4683 int err;
4684
4685 /* Command requires to use a valid controller index */
4686 if (!hdev)
4687 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4688 MGMT_OP_SET_EXP_FEATURE,
4689 MGMT_STATUS_INVALID_INDEX);
4690
4691 /* Parameters are limited to a single octet */
4692 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4693 return mgmt_cmd_status(sk, hdev->id,
4694 MGMT_OP_SET_EXP_FEATURE,
4695 MGMT_STATUS_INVALID_PARAMS);
4696
4697 /* Only boolean on/off is supported */
4698 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4699 return mgmt_cmd_status(sk, hdev->id,
4700 MGMT_OP_SET_EXP_FEATURE,
4701 MGMT_STATUS_INVALID_PARAMS);
4702
4703 hci_req_sync_lock(hdev);
4704
4705 val = !!cp->param[0];
4706 changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4707
4708 if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4709 err = mgmt_cmd_status(sk, hdev->id,
4710 MGMT_OP_SET_EXP_FEATURE,
4711 MGMT_STATUS_NOT_SUPPORTED);
4712 goto unlock_quality_report;
4713 }
4714
4715 if (changed) {
4716 if (hdev->set_quality_report)
4717 err = hdev->set_quality_report(hdev, val);
4718 else
4719 err = aosp_set_quality_report(hdev, val);
4720
4721 if (err) {
4722 err = mgmt_cmd_status(sk, hdev->id,
4723 MGMT_OP_SET_EXP_FEATURE,
4724 MGMT_STATUS_FAILED);
4725 goto unlock_quality_report;
4726 }
4727
4728 if (val)
4729 hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4730 else
4731 hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4732 }
4733
4734 bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4735
4736 memcpy(rp.uuid, quality_report_uuid, 16);
4737 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4738 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4739
4740 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4741 &rp, sizeof(rp));
4742
4743 if (changed)
4744 exp_feature_changed(hdev, quality_report_uuid, val, sk);
4745
4746 unlock_quality_report:
4747 hci_req_sync_unlock(hdev);
4748 return err;
4749 }
4750
set_offload_codec_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4751 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4752 struct mgmt_cp_set_exp_feature *cp,
4753 u16 data_len)
4754 {
4755 bool val, changed;
4756 int err;
4757 struct mgmt_rp_set_exp_feature rp;
4758
4759 /* Command requires to use a valid controller index */
4760 if (!hdev)
4761 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4762 MGMT_OP_SET_EXP_FEATURE,
4763 MGMT_STATUS_INVALID_INDEX);
4764
4765 /* Parameters are limited to a single octet */
4766 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4767 return mgmt_cmd_status(sk, hdev->id,
4768 MGMT_OP_SET_EXP_FEATURE,
4769 MGMT_STATUS_INVALID_PARAMS);
4770
4771 /* Only boolean on/off is supported */
4772 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4773 return mgmt_cmd_status(sk, hdev->id,
4774 MGMT_OP_SET_EXP_FEATURE,
4775 MGMT_STATUS_INVALID_PARAMS);
4776
4777 val = !!cp->param[0];
4778 changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4779
4780 if (!hdev->get_data_path_id) {
4781 return mgmt_cmd_status(sk, hdev->id,
4782 MGMT_OP_SET_EXP_FEATURE,
4783 MGMT_STATUS_NOT_SUPPORTED);
4784 }
4785
4786 if (changed) {
4787 if (val)
4788 hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4789 else
4790 hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4791 }
4792
4793 bt_dev_info(hdev, "offload codecs enable %d changed %d",
4794 val, changed);
4795
4796 memcpy(rp.uuid, offload_codecs_uuid, 16);
4797 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4798 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4799 err = mgmt_cmd_complete(sk, hdev->id,
4800 MGMT_OP_SET_EXP_FEATURE, 0,
4801 &rp, sizeof(rp));
4802
4803 if (changed)
4804 exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4805
4806 return err;
4807 }
4808
set_le_simultaneous_roles_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4809 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4810 struct mgmt_cp_set_exp_feature *cp,
4811 u16 data_len)
4812 {
4813 bool val, changed;
4814 int err;
4815 struct mgmt_rp_set_exp_feature rp;
4816
4817 /* Command requires to use a valid controller index */
4818 if (!hdev)
4819 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4820 MGMT_OP_SET_EXP_FEATURE,
4821 MGMT_STATUS_INVALID_INDEX);
4822
4823 /* Parameters are limited to a single octet */
4824 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4825 return mgmt_cmd_status(sk, hdev->id,
4826 MGMT_OP_SET_EXP_FEATURE,
4827 MGMT_STATUS_INVALID_PARAMS);
4828
4829 /* Only boolean on/off is supported */
4830 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4831 return mgmt_cmd_status(sk, hdev->id,
4832 MGMT_OP_SET_EXP_FEATURE,
4833 MGMT_STATUS_INVALID_PARAMS);
4834
4835 val = !!cp->param[0];
4836 changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4837
4838 if (!hci_dev_le_state_simultaneous(hdev)) {
4839 return mgmt_cmd_status(sk, hdev->id,
4840 MGMT_OP_SET_EXP_FEATURE,
4841 MGMT_STATUS_NOT_SUPPORTED);
4842 }
4843
4844 if (changed) {
4845 if (val)
4846 hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4847 else
4848 hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4849 }
4850
4851 bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4852 val, changed);
4853
4854 memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4855 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4856 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4857 err = mgmt_cmd_complete(sk, hdev->id,
4858 MGMT_OP_SET_EXP_FEATURE, 0,
4859 &rp, sizeof(rp));
4860
4861 if (changed)
4862 exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4863
4864 return err;
4865 }
4866
4867 #ifdef CONFIG_BT_LE
set_iso_socket_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4868 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4869 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4870 {
4871 struct mgmt_rp_set_exp_feature rp;
4872 bool val, changed = false;
4873 int err;
4874
4875 /* Command requires to use the non-controller index */
4876 if (hdev)
4877 return mgmt_cmd_status(sk, hdev->id,
4878 MGMT_OP_SET_EXP_FEATURE,
4879 MGMT_STATUS_INVALID_INDEX);
4880
4881 /* Parameters are limited to a single octet */
4882 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4883 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4884 MGMT_OP_SET_EXP_FEATURE,
4885 MGMT_STATUS_INVALID_PARAMS);
4886
4887 /* Only boolean on/off is supported */
4888 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4889 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4890 MGMT_OP_SET_EXP_FEATURE,
4891 MGMT_STATUS_INVALID_PARAMS);
4892
4893 val = cp->param[0] ? true : false;
4894 if (val)
4895 err = iso_init();
4896 else
4897 err = iso_exit();
4898
4899 if (!err)
4900 changed = true;
4901
4902 memcpy(rp.uuid, iso_socket_uuid, 16);
4903 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4904
4905 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4906
4907 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4908 MGMT_OP_SET_EXP_FEATURE, 0,
4909 &rp, sizeof(rp));
4910
4911 if (changed)
4912 exp_feature_changed(hdev, iso_socket_uuid, val, sk);
4913
4914 return err;
4915 }
4916 #endif
4917
4918 static const struct mgmt_exp_feature {
4919 const u8 *uuid;
4920 int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4921 struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4922 } exp_features[] = {
4923 EXP_FEAT(ZERO_KEY, set_zero_key_func),
4924 #ifdef CONFIG_BT_FEATURE_DEBUG
4925 EXP_FEAT(debug_uuid, set_debug_func),
4926 #endif
4927 EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
4928 EXP_FEAT(quality_report_uuid, set_quality_report_func),
4929 EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
4930 EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
4931 #ifdef CONFIG_BT_LE
4932 EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
4933 #endif
4934
4935 /* end with a null feature */
4936 EXP_FEAT(NULL, NULL)
4937 };
4938
set_exp_feature(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4939 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4940 void *data, u16 data_len)
4941 {
4942 struct mgmt_cp_set_exp_feature *cp = data;
4943 size_t i = 0;
4944
4945 bt_dev_dbg(hdev, "sock %p", sk);
4946
4947 for (i = 0; exp_features[i].uuid; i++) {
4948 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
4949 return exp_features[i].set_func(sk, hdev, cp, data_len);
4950 }
4951
4952 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4953 MGMT_OP_SET_EXP_FEATURE,
4954 MGMT_STATUS_NOT_SUPPORTED);
4955 }
4956
get_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4957 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4958 u16 data_len)
4959 {
4960 struct mgmt_cp_get_device_flags *cp = data;
4961 struct mgmt_rp_get_device_flags rp;
4962 struct bdaddr_list_with_flags *br_params;
4963 struct hci_conn_params *params;
4964 u32 supported_flags;
4965 u32 current_flags = 0;
4966 u8 status = MGMT_STATUS_INVALID_PARAMS;
4967
4968 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4969 &cp->addr.bdaddr, cp->addr.type);
4970
4971 hci_dev_lock(hdev);
4972
4973 supported_flags = hdev->conn_flags;
4974
4975 memset(&rp, 0, sizeof(rp));
4976
4977 if (cp->addr.type == BDADDR_BREDR) {
4978 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4979 &cp->addr.bdaddr,
4980 cp->addr.type);
4981 if (!br_params)
4982 goto done;
4983
4984 current_flags = br_params->flags;
4985 } else {
4986 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4987 le_addr_type(cp->addr.type));
4988 if (!params)
4989 goto done;
4990
4991 current_flags = params->flags;
4992 }
4993
4994 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4995 rp.addr.type = cp->addr.type;
4996 rp.supported_flags = cpu_to_le32(supported_flags);
4997 rp.current_flags = cpu_to_le32(current_flags);
4998
4999 status = MGMT_STATUS_SUCCESS;
5000
5001 done:
5002 hci_dev_unlock(hdev);
5003
5004 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5005 &rp, sizeof(rp));
5006 }
5007
device_flags_changed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u32 supported_flags,u32 current_flags)5008 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5009 bdaddr_t *bdaddr, u8 bdaddr_type,
5010 u32 supported_flags, u32 current_flags)
5011 {
5012 struct mgmt_ev_device_flags_changed ev;
5013
5014 bacpy(&ev.addr.bdaddr, bdaddr);
5015 ev.addr.type = bdaddr_type;
5016 ev.supported_flags = cpu_to_le32(supported_flags);
5017 ev.current_flags = cpu_to_le32(current_flags);
5018
5019 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5020 }
5021
set_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5022 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5023 u16 len)
5024 {
5025 struct mgmt_cp_set_device_flags *cp = data;
5026 struct bdaddr_list_with_flags *br_params;
5027 struct hci_conn_params *params;
5028 u8 status = MGMT_STATUS_INVALID_PARAMS;
5029 u32 supported_flags;
5030 u32 current_flags = __le32_to_cpu(cp->current_flags);
5031
5032 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5033 &cp->addr.bdaddr, cp->addr.type, current_flags);
5034
5035 // We should take hci_dev_lock() early, I think.. conn_flags can change
5036 supported_flags = hdev->conn_flags;
5037
5038 if ((supported_flags | current_flags) != supported_flags) {
5039 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5040 current_flags, supported_flags);
5041 goto done;
5042 }
5043
5044 hci_dev_lock(hdev);
5045
5046 if (cp->addr.type == BDADDR_BREDR) {
5047 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5048 &cp->addr.bdaddr,
5049 cp->addr.type);
5050
5051 if (br_params) {
5052 br_params->flags = current_flags;
5053 status = MGMT_STATUS_SUCCESS;
5054 } else {
5055 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5056 &cp->addr.bdaddr, cp->addr.type);
5057 }
5058
5059 goto unlock;
5060 }
5061
5062 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5063 le_addr_type(cp->addr.type));
5064 if (!params) {
5065 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5066 &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5067 goto unlock;
5068 }
5069
5070 supported_flags = hdev->conn_flags;
5071
5072 if ((supported_flags | current_flags) != supported_flags) {
5073 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5074 current_flags, supported_flags);
5075 goto unlock;
5076 }
5077
5078 WRITE_ONCE(params->flags, current_flags);
5079 status = MGMT_STATUS_SUCCESS;
5080
5081 /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5082 * has been set.
5083 */
5084 if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5085 hci_update_passive_scan(hdev);
5086
5087 unlock:
5088 hci_dev_unlock(hdev);
5089
5090 done:
5091 if (status == MGMT_STATUS_SUCCESS)
5092 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5093 supported_flags, current_flags);
5094
5095 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5096 &cp->addr, sizeof(cp->addr));
5097 }
5098
mgmt_adv_monitor_added(struct sock * sk,struct hci_dev * hdev,u16 handle)5099 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5100 u16 handle)
5101 {
5102 struct mgmt_ev_adv_monitor_added ev;
5103
5104 ev.monitor_handle = cpu_to_le16(handle);
5105
5106 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5107 }
5108
mgmt_adv_monitor_removed(struct hci_dev * hdev,u16 handle)5109 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
5110 {
5111 struct mgmt_ev_adv_monitor_removed ev;
5112 struct mgmt_pending_cmd *cmd;
5113 struct sock *sk_skip = NULL;
5114 struct mgmt_cp_remove_adv_monitor *cp;
5115
5116 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
5117 if (cmd) {
5118 cp = cmd->param;
5119
5120 if (cp->monitor_handle)
5121 sk_skip = cmd->sk;
5122 }
5123
5124 ev.monitor_handle = cpu_to_le16(handle);
5125
5126 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
5127 }
5128
read_adv_mon_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5129 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5130 void *data, u16 len)
5131 {
5132 struct adv_monitor *monitor = NULL;
5133 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5134 int handle, err;
5135 size_t rp_size = 0;
5136 __u32 supported = 0;
5137 __u32 enabled = 0;
5138 __u16 num_handles = 0;
5139 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5140
5141 BT_DBG("request for %s", hdev->name);
5142
5143 hci_dev_lock(hdev);
5144
5145 if (msft_monitor_supported(hdev))
5146 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5147
5148 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5149 handles[num_handles++] = monitor->handle;
5150
5151 hci_dev_unlock(hdev);
5152
5153 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5154 rp = kmalloc(rp_size, GFP_KERNEL);
5155 if (!rp)
5156 return -ENOMEM;
5157
5158 /* All supported features are currently enabled */
5159 enabled = supported;
5160
5161 rp->supported_features = cpu_to_le32(supported);
5162 rp->enabled_features = cpu_to_le32(enabled);
5163 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5164 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5165 rp->num_handles = cpu_to_le16(num_handles);
5166 if (num_handles)
5167 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5168
5169 err = mgmt_cmd_complete(sk, hdev->id,
5170 MGMT_OP_READ_ADV_MONITOR_FEATURES,
5171 MGMT_STATUS_SUCCESS, rp, rp_size);
5172
5173 kfree(rp);
5174
5175 return err;
5176 }
5177
mgmt_add_adv_patterns_monitor_complete(struct hci_dev * hdev,void * data,int status)5178 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5179 void *data, int status)
5180 {
5181 struct mgmt_rp_add_adv_patterns_monitor rp;
5182 struct mgmt_pending_cmd *cmd = data;
5183 struct adv_monitor *monitor = cmd->user_data;
5184
5185 hci_dev_lock(hdev);
5186
5187 rp.monitor_handle = cpu_to_le16(monitor->handle);
5188
5189 if (!status) {
5190 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5191 hdev->adv_monitors_cnt++;
5192 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5193 monitor->state = ADV_MONITOR_STATE_REGISTERED;
5194 hci_update_passive_scan(hdev);
5195 }
5196
5197 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5198 mgmt_status(status), &rp, sizeof(rp));
5199 mgmt_pending_remove(cmd);
5200
5201 hci_dev_unlock(hdev);
5202 bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5203 rp.monitor_handle, status);
5204 }
5205
mgmt_add_adv_patterns_monitor_sync(struct hci_dev * hdev,void * data)5206 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5207 {
5208 struct mgmt_pending_cmd *cmd = data;
5209 struct adv_monitor *monitor = cmd->user_data;
5210
5211 return hci_add_adv_monitor(hdev, monitor);
5212 }
5213
__add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,struct adv_monitor * m,u8 status,void * data,u16 len,u16 op)5214 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5215 struct adv_monitor *m, u8 status,
5216 void *data, u16 len, u16 op)
5217 {
5218 struct mgmt_pending_cmd *cmd;
5219 int err;
5220
5221 hci_dev_lock(hdev);
5222
5223 if (status)
5224 goto unlock;
5225
5226 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5227 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5228 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
5229 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
5230 status = MGMT_STATUS_BUSY;
5231 goto unlock;
5232 }
5233
5234 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5235 if (!cmd) {
5236 status = MGMT_STATUS_NO_RESOURCES;
5237 goto unlock;
5238 }
5239
5240 cmd->user_data = m;
5241 err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5242 mgmt_add_adv_patterns_monitor_complete);
5243 if (err) {
5244 if (err == -ENOMEM)
5245 status = MGMT_STATUS_NO_RESOURCES;
5246 else
5247 status = MGMT_STATUS_FAILED;
5248
5249 goto unlock;
5250 }
5251
5252 hci_dev_unlock(hdev);
5253
5254 return 0;
5255
5256 unlock:
5257 hci_free_adv_monitor(hdev, m);
5258 hci_dev_unlock(hdev);
5259 return mgmt_cmd_status(sk, hdev->id, op, status);
5260 }
5261
parse_adv_monitor_rssi(struct adv_monitor * m,struct mgmt_adv_rssi_thresholds * rssi)5262 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5263 struct mgmt_adv_rssi_thresholds *rssi)
5264 {
5265 if (rssi) {
5266 m->rssi.low_threshold = rssi->low_threshold;
5267 m->rssi.low_threshold_timeout =
5268 __le16_to_cpu(rssi->low_threshold_timeout);
5269 m->rssi.high_threshold = rssi->high_threshold;
5270 m->rssi.high_threshold_timeout =
5271 __le16_to_cpu(rssi->high_threshold_timeout);
5272 m->rssi.sampling_period = rssi->sampling_period;
5273 } else {
5274 /* Default values. These numbers are the least constricting
5275 * parameters for MSFT API to work, so it behaves as if there
5276 * are no rssi parameter to consider. May need to be changed
5277 * if other API are to be supported.
5278 */
5279 m->rssi.low_threshold = -127;
5280 m->rssi.low_threshold_timeout = 60;
5281 m->rssi.high_threshold = -127;
5282 m->rssi.high_threshold_timeout = 0;
5283 m->rssi.sampling_period = 0;
5284 }
5285 }
5286
parse_adv_monitor_pattern(struct adv_monitor * m,u8 pattern_count,struct mgmt_adv_pattern * patterns)5287 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5288 struct mgmt_adv_pattern *patterns)
5289 {
5290 u8 offset = 0, length = 0;
5291 struct adv_pattern *p = NULL;
5292 int i;
5293
5294 for (i = 0; i < pattern_count; i++) {
5295 offset = patterns[i].offset;
5296 length = patterns[i].length;
5297 if (offset >= HCI_MAX_EXT_AD_LENGTH ||
5298 length > HCI_MAX_EXT_AD_LENGTH ||
5299 (offset + length) > HCI_MAX_EXT_AD_LENGTH)
5300 return MGMT_STATUS_INVALID_PARAMS;
5301
5302 p = kmalloc(sizeof(*p), GFP_KERNEL);
5303 if (!p)
5304 return MGMT_STATUS_NO_RESOURCES;
5305
5306 p->ad_type = patterns[i].ad_type;
5307 p->offset = patterns[i].offset;
5308 p->length = patterns[i].length;
5309 memcpy(p->value, patterns[i].value, p->length);
5310
5311 INIT_LIST_HEAD(&p->list);
5312 list_add(&p->list, &m->patterns);
5313 }
5314
5315 return MGMT_STATUS_SUCCESS;
5316 }
5317
add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5318 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5319 void *data, u16 len)
5320 {
5321 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5322 struct adv_monitor *m = NULL;
5323 u8 status = MGMT_STATUS_SUCCESS;
5324 size_t expected_size = sizeof(*cp);
5325
5326 BT_DBG("request for %s", hdev->name);
5327
5328 if (len <= sizeof(*cp)) {
5329 status = MGMT_STATUS_INVALID_PARAMS;
5330 goto done;
5331 }
5332
5333 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5334 if (len != expected_size) {
5335 status = MGMT_STATUS_INVALID_PARAMS;
5336 goto done;
5337 }
5338
5339 m = kzalloc(sizeof(*m), GFP_KERNEL);
5340 if (!m) {
5341 status = MGMT_STATUS_NO_RESOURCES;
5342 goto done;
5343 }
5344
5345 INIT_LIST_HEAD(&m->patterns);
5346
5347 parse_adv_monitor_rssi(m, NULL);
5348 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5349
5350 done:
5351 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5352 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5353 }
5354
add_adv_patterns_monitor_rssi(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5355 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5356 void *data, u16 len)
5357 {
5358 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5359 struct adv_monitor *m = NULL;
5360 u8 status = MGMT_STATUS_SUCCESS;
5361 size_t expected_size = sizeof(*cp);
5362
5363 BT_DBG("request for %s", hdev->name);
5364
5365 if (len <= sizeof(*cp)) {
5366 status = MGMT_STATUS_INVALID_PARAMS;
5367 goto done;
5368 }
5369
5370 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5371 if (len != expected_size) {
5372 status = MGMT_STATUS_INVALID_PARAMS;
5373 goto done;
5374 }
5375
5376 m = kzalloc(sizeof(*m), GFP_KERNEL);
5377 if (!m) {
5378 status = MGMT_STATUS_NO_RESOURCES;
5379 goto done;
5380 }
5381
5382 INIT_LIST_HEAD(&m->patterns);
5383
5384 parse_adv_monitor_rssi(m, &cp->rssi);
5385 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5386
5387 done:
5388 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5389 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5390 }
5391
mgmt_remove_adv_monitor_complete(struct hci_dev * hdev,void * data,int status)5392 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5393 void *data, int status)
5394 {
5395 struct mgmt_rp_remove_adv_monitor rp;
5396 struct mgmt_pending_cmd *cmd = data;
5397 struct mgmt_cp_remove_adv_monitor *cp;
5398
5399 if (status == -ECANCELED ||
5400 cmd != pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev))
5401 return;
5402
5403 hci_dev_lock(hdev);
5404
5405 cp = cmd->param;
5406
5407 rp.monitor_handle = cp->monitor_handle;
5408
5409 if (!status)
5410 hci_update_passive_scan(hdev);
5411
5412 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5413 mgmt_status(status), &rp, sizeof(rp));
5414 mgmt_pending_remove(cmd);
5415
5416 hci_dev_unlock(hdev);
5417 bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5418 rp.monitor_handle, status);
5419 }
5420
mgmt_remove_adv_monitor_sync(struct hci_dev * hdev,void * data)5421 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5422 {
5423 struct mgmt_pending_cmd *cmd = data;
5424
5425 if (cmd != pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev))
5426 return -ECANCELED;
5427
5428 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5429 u16 handle = __le16_to_cpu(cp->monitor_handle);
5430
5431 if (!handle)
5432 return hci_remove_all_adv_monitor(hdev);
5433
5434 return hci_remove_single_adv_monitor(hdev, handle);
5435 }
5436
remove_adv_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5437 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5438 void *data, u16 len)
5439 {
5440 struct mgmt_pending_cmd *cmd;
5441 int err, status;
5442
5443 hci_dev_lock(hdev);
5444
5445 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5446 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
5447 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5448 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5449 status = MGMT_STATUS_BUSY;
5450 goto unlock;
5451 }
5452
5453 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5454 if (!cmd) {
5455 status = MGMT_STATUS_NO_RESOURCES;
5456 goto unlock;
5457 }
5458
5459 err = hci_cmd_sync_submit(hdev, mgmt_remove_adv_monitor_sync, cmd,
5460 mgmt_remove_adv_monitor_complete);
5461
5462 if (err) {
5463 mgmt_pending_remove(cmd);
5464
5465 if (err == -ENOMEM)
5466 status = MGMT_STATUS_NO_RESOURCES;
5467 else
5468 status = MGMT_STATUS_FAILED;
5469
5470 goto unlock;
5471 }
5472
5473 hci_dev_unlock(hdev);
5474
5475 return 0;
5476
5477 unlock:
5478 hci_dev_unlock(hdev);
5479 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5480 status);
5481 }
5482
read_local_oob_data_complete(struct hci_dev * hdev,void * data,int err)5483 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5484 {
5485 struct mgmt_rp_read_local_oob_data mgmt_rp;
5486 size_t rp_size = sizeof(mgmt_rp);
5487 struct mgmt_pending_cmd *cmd = data;
5488 struct sk_buff *skb = cmd->skb;
5489 u8 status = mgmt_status(err);
5490
5491 if (!status) {
5492 if (!skb)
5493 status = MGMT_STATUS_FAILED;
5494 else if (IS_ERR(skb))
5495 status = mgmt_status(PTR_ERR(skb));
5496 else
5497 status = mgmt_status(skb->data[0]);
5498 }
5499
5500 bt_dev_dbg(hdev, "status %d", status);
5501
5502 if (status) {
5503 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5504 goto remove;
5505 }
5506
5507 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5508
5509 if (!bredr_sc_enabled(hdev)) {
5510 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5511
5512 if (skb->len < sizeof(*rp)) {
5513 mgmt_cmd_status(cmd->sk, hdev->id,
5514 MGMT_OP_READ_LOCAL_OOB_DATA,
5515 MGMT_STATUS_FAILED);
5516 goto remove;
5517 }
5518
5519 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5520 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5521
5522 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5523 } else {
5524 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5525
5526 if (skb->len < sizeof(*rp)) {
5527 mgmt_cmd_status(cmd->sk, hdev->id,
5528 MGMT_OP_READ_LOCAL_OOB_DATA,
5529 MGMT_STATUS_FAILED);
5530 goto remove;
5531 }
5532
5533 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5534 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5535
5536 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5537 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5538 }
5539
5540 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5541 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5542
5543 remove:
5544 if (skb && !IS_ERR(skb))
5545 kfree_skb(skb);
5546
5547 mgmt_pending_free(cmd);
5548 }
5549
read_local_oob_data_sync(struct hci_dev * hdev,void * data)5550 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5551 {
5552 struct mgmt_pending_cmd *cmd = data;
5553
5554 if (bredr_sc_enabled(hdev))
5555 cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5556 else
5557 cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5558
5559 if (IS_ERR(cmd->skb))
5560 return PTR_ERR(cmd->skb);
5561 else
5562 return 0;
5563 }
5564
read_local_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)5565 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5566 void *data, u16 data_len)
5567 {
5568 struct mgmt_pending_cmd *cmd;
5569 int err;
5570
5571 bt_dev_dbg(hdev, "sock %p", sk);
5572
5573 hci_dev_lock(hdev);
5574
5575 if (!hdev_is_powered(hdev)) {
5576 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5577 MGMT_STATUS_NOT_POWERED);
5578 goto unlock;
5579 }
5580
5581 if (!lmp_ssp_capable(hdev)) {
5582 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5583 MGMT_STATUS_NOT_SUPPORTED);
5584 goto unlock;
5585 }
5586
5587 cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5588 if (!cmd)
5589 err = -ENOMEM;
5590 else
5591 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5592 read_local_oob_data_complete);
5593
5594 if (err < 0) {
5595 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5596 MGMT_STATUS_FAILED);
5597
5598 if (cmd)
5599 mgmt_pending_free(cmd);
5600 }
5601
5602 unlock:
5603 hci_dev_unlock(hdev);
5604 return err;
5605 }
5606
add_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5607 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5608 void *data, u16 len)
5609 {
5610 struct mgmt_addr_info *addr = data;
5611 int err;
5612
5613 bt_dev_dbg(hdev, "sock %p", sk);
5614
5615 if (!bdaddr_type_is_valid(addr->type))
5616 return mgmt_cmd_complete(sk, hdev->id,
5617 MGMT_OP_ADD_REMOTE_OOB_DATA,
5618 MGMT_STATUS_INVALID_PARAMS,
5619 addr, sizeof(*addr));
5620
5621 hci_dev_lock(hdev);
5622
5623 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5624 struct mgmt_cp_add_remote_oob_data *cp = data;
5625 u8 status;
5626
5627 if (cp->addr.type != BDADDR_BREDR) {
5628 err = mgmt_cmd_complete(sk, hdev->id,
5629 MGMT_OP_ADD_REMOTE_OOB_DATA,
5630 MGMT_STATUS_INVALID_PARAMS,
5631 &cp->addr, sizeof(cp->addr));
5632 goto unlock;
5633 }
5634
5635 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5636 cp->addr.type, cp->hash,
5637 cp->rand, NULL, NULL);
5638 if (err < 0)
5639 status = MGMT_STATUS_FAILED;
5640 else
5641 status = MGMT_STATUS_SUCCESS;
5642
5643 err = mgmt_cmd_complete(sk, hdev->id,
5644 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5645 &cp->addr, sizeof(cp->addr));
5646 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5647 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5648 u8 *rand192, *hash192, *rand256, *hash256;
5649 u8 status;
5650
5651 if (bdaddr_type_is_le(cp->addr.type)) {
5652 /* Enforce zero-valued 192-bit parameters as
5653 * long as legacy SMP OOB isn't implemented.
5654 */
5655 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5656 memcmp(cp->hash192, ZERO_KEY, 16)) {
5657 err = mgmt_cmd_complete(sk, hdev->id,
5658 MGMT_OP_ADD_REMOTE_OOB_DATA,
5659 MGMT_STATUS_INVALID_PARAMS,
5660 addr, sizeof(*addr));
5661 goto unlock;
5662 }
5663
5664 rand192 = NULL;
5665 hash192 = NULL;
5666 } else {
5667 /* In case one of the P-192 values is set to zero,
5668 * then just disable OOB data for P-192.
5669 */
5670 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5671 !memcmp(cp->hash192, ZERO_KEY, 16)) {
5672 rand192 = NULL;
5673 hash192 = NULL;
5674 } else {
5675 rand192 = cp->rand192;
5676 hash192 = cp->hash192;
5677 }
5678 }
5679
5680 /* In case one of the P-256 values is set to zero, then just
5681 * disable OOB data for P-256.
5682 */
5683 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5684 !memcmp(cp->hash256, ZERO_KEY, 16)) {
5685 rand256 = NULL;
5686 hash256 = NULL;
5687 } else {
5688 rand256 = cp->rand256;
5689 hash256 = cp->hash256;
5690 }
5691
5692 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5693 cp->addr.type, hash192, rand192,
5694 hash256, rand256);
5695 if (err < 0)
5696 status = MGMT_STATUS_FAILED;
5697 else
5698 status = MGMT_STATUS_SUCCESS;
5699
5700 err = mgmt_cmd_complete(sk, hdev->id,
5701 MGMT_OP_ADD_REMOTE_OOB_DATA,
5702 status, &cp->addr, sizeof(cp->addr));
5703 } else {
5704 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5705 len);
5706 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5707 MGMT_STATUS_INVALID_PARAMS);
5708 }
5709
5710 unlock:
5711 hci_dev_unlock(hdev);
5712 return err;
5713 }
5714
remove_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5715 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5716 void *data, u16 len)
5717 {
5718 struct mgmt_cp_remove_remote_oob_data *cp = data;
5719 u8 status;
5720 int err;
5721
5722 bt_dev_dbg(hdev, "sock %p", sk);
5723
5724 if (cp->addr.type != BDADDR_BREDR)
5725 return mgmt_cmd_complete(sk, hdev->id,
5726 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5727 MGMT_STATUS_INVALID_PARAMS,
5728 &cp->addr, sizeof(cp->addr));
5729
5730 hci_dev_lock(hdev);
5731
5732 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5733 hci_remote_oob_data_clear(hdev);
5734 status = MGMT_STATUS_SUCCESS;
5735 goto done;
5736 }
5737
5738 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5739 if (err < 0)
5740 status = MGMT_STATUS_INVALID_PARAMS;
5741 else
5742 status = MGMT_STATUS_SUCCESS;
5743
5744 done:
5745 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5746 status, &cp->addr, sizeof(cp->addr));
5747
5748 hci_dev_unlock(hdev);
5749 return err;
5750 }
5751
discovery_type_is_valid(struct hci_dev * hdev,uint8_t type,uint8_t * mgmt_status)5752 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5753 uint8_t *mgmt_status)
5754 {
5755 switch (type) {
5756 case DISCOV_TYPE_LE:
5757 *mgmt_status = mgmt_le_support(hdev);
5758 if (*mgmt_status)
5759 return false;
5760 break;
5761 case DISCOV_TYPE_INTERLEAVED:
5762 *mgmt_status = mgmt_le_support(hdev);
5763 if (*mgmt_status)
5764 return false;
5765 fallthrough;
5766 case DISCOV_TYPE_BREDR:
5767 *mgmt_status = mgmt_bredr_support(hdev);
5768 if (*mgmt_status)
5769 return false;
5770 break;
5771 default:
5772 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5773 return false;
5774 }
5775
5776 return true;
5777 }
5778
start_discovery_complete(struct hci_dev * hdev,void * data,int err)5779 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5780 {
5781 struct mgmt_pending_cmd *cmd = data;
5782
5783 bt_dev_dbg(hdev, "err %d", err);
5784
5785 if (err == -ECANCELED)
5786 return;
5787
5788 if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5789 cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5790 cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5791 return;
5792
5793 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5794 cmd->param, 1);
5795 mgmt_pending_remove(cmd);
5796
5797 hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5798 DISCOVERY_FINDING);
5799 }
5800
start_discovery_sync(struct hci_dev * hdev,void * data)5801 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5802 {
5803 return hci_start_discovery_sync(hdev);
5804 }
5805
start_discovery_internal(struct sock * sk,struct hci_dev * hdev,u16 op,void * data,u16 len)5806 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5807 u16 op, void *data, u16 len)
5808 {
5809 struct mgmt_cp_start_discovery *cp = data;
5810 struct mgmt_pending_cmd *cmd;
5811 u8 status;
5812 int err;
5813
5814 bt_dev_dbg(hdev, "sock %p", sk);
5815
5816 hci_dev_lock(hdev);
5817
5818 if (!hdev_is_powered(hdev)) {
5819 err = mgmt_cmd_complete(sk, hdev->id, op,
5820 MGMT_STATUS_NOT_POWERED,
5821 &cp->type, sizeof(cp->type));
5822 goto failed;
5823 }
5824
5825 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5826 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5827 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5828 &cp->type, sizeof(cp->type));
5829 goto failed;
5830 }
5831
5832 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5833 err = mgmt_cmd_complete(sk, hdev->id, op, status,
5834 &cp->type, sizeof(cp->type));
5835 goto failed;
5836 }
5837
5838 /* Can't start discovery when it is paused */
5839 if (hdev->discovery_paused) {
5840 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5841 &cp->type, sizeof(cp->type));
5842 goto failed;
5843 }
5844
5845 /* Clear the discovery filter first to free any previously
5846 * allocated memory for the UUID list.
5847 */
5848 hci_discovery_filter_clear(hdev);
5849
5850 hdev->discovery.type = cp->type;
5851 hdev->discovery.report_invalid_rssi = false;
5852 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5853 hdev->discovery.limited = true;
5854 else
5855 hdev->discovery.limited = false;
5856
5857 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5858 if (!cmd) {
5859 err = -ENOMEM;
5860 goto failed;
5861 }
5862
5863 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5864 start_discovery_complete);
5865 if (err < 0) {
5866 mgmt_pending_remove(cmd);
5867 goto failed;
5868 }
5869
5870 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5871
5872 failed:
5873 hci_dev_unlock(hdev);
5874 return err;
5875 }
5876
start_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5877 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5878 void *data, u16 len)
5879 {
5880 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5881 data, len);
5882 }
5883
start_limited_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5884 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5885 void *data, u16 len)
5886 {
5887 return start_discovery_internal(sk, hdev,
5888 MGMT_OP_START_LIMITED_DISCOVERY,
5889 data, len);
5890 }
5891
start_service_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5892 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5893 void *data, u16 len)
5894 {
5895 struct mgmt_cp_start_service_discovery *cp = data;
5896 struct mgmt_pending_cmd *cmd;
5897 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5898 u16 uuid_count, expected_len;
5899 u8 status;
5900 int err;
5901
5902 bt_dev_dbg(hdev, "sock %p", sk);
5903
5904 hci_dev_lock(hdev);
5905
5906 if (!hdev_is_powered(hdev)) {
5907 err = mgmt_cmd_complete(sk, hdev->id,
5908 MGMT_OP_START_SERVICE_DISCOVERY,
5909 MGMT_STATUS_NOT_POWERED,
5910 &cp->type, sizeof(cp->type));
5911 goto failed;
5912 }
5913
5914 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5915 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5916 err = mgmt_cmd_complete(sk, hdev->id,
5917 MGMT_OP_START_SERVICE_DISCOVERY,
5918 MGMT_STATUS_BUSY, &cp->type,
5919 sizeof(cp->type));
5920 goto failed;
5921 }
5922
5923 if (hdev->discovery_paused) {
5924 err = mgmt_cmd_complete(sk, hdev->id,
5925 MGMT_OP_START_SERVICE_DISCOVERY,
5926 MGMT_STATUS_BUSY, &cp->type,
5927 sizeof(cp->type));
5928 goto failed;
5929 }
5930
5931 uuid_count = __le16_to_cpu(cp->uuid_count);
5932 if (uuid_count > max_uuid_count) {
5933 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5934 uuid_count);
5935 err = mgmt_cmd_complete(sk, hdev->id,
5936 MGMT_OP_START_SERVICE_DISCOVERY,
5937 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5938 sizeof(cp->type));
5939 goto failed;
5940 }
5941
5942 expected_len = sizeof(*cp) + uuid_count * 16;
5943 if (expected_len != len) {
5944 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
5945 expected_len, len);
5946 err = mgmt_cmd_complete(sk, hdev->id,
5947 MGMT_OP_START_SERVICE_DISCOVERY,
5948 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5949 sizeof(cp->type));
5950 goto failed;
5951 }
5952
5953 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5954 err = mgmt_cmd_complete(sk, hdev->id,
5955 MGMT_OP_START_SERVICE_DISCOVERY,
5956 status, &cp->type, sizeof(cp->type));
5957 goto failed;
5958 }
5959
5960 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
5961 hdev, data, len);
5962 if (!cmd) {
5963 err = -ENOMEM;
5964 goto failed;
5965 }
5966
5967 /* Clear the discovery filter first to free any previously
5968 * allocated memory for the UUID list.
5969 */
5970 hci_discovery_filter_clear(hdev);
5971
5972 hdev->discovery.result_filtering = true;
5973 hdev->discovery.type = cp->type;
5974 hdev->discovery.rssi = cp->rssi;
5975 hdev->discovery.uuid_count = uuid_count;
5976
5977 if (uuid_count > 0) {
5978 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
5979 GFP_KERNEL);
5980 if (!hdev->discovery.uuids) {
5981 err = mgmt_cmd_complete(sk, hdev->id,
5982 MGMT_OP_START_SERVICE_DISCOVERY,
5983 MGMT_STATUS_FAILED,
5984 &cp->type, sizeof(cp->type));
5985 mgmt_pending_remove(cmd);
5986 goto failed;
5987 }
5988 }
5989
5990 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5991 start_discovery_complete);
5992 if (err < 0) {
5993 mgmt_pending_remove(cmd);
5994 goto failed;
5995 }
5996
5997 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5998
5999 failed:
6000 hci_dev_unlock(hdev);
6001 return err;
6002 }
6003
stop_discovery_complete(struct hci_dev * hdev,void * data,int err)6004 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6005 {
6006 struct mgmt_pending_cmd *cmd = data;
6007
6008 if (err == -ECANCELED ||
6009 cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
6010 return;
6011
6012 bt_dev_dbg(hdev, "err %d", err);
6013
6014 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
6015 cmd->param, 1);
6016 mgmt_pending_remove(cmd);
6017
6018 if (!err)
6019 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6020 }
6021
stop_discovery_sync(struct hci_dev * hdev,void * data)6022 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6023 {
6024 return hci_stop_discovery_sync(hdev);
6025 }
6026
stop_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6027 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6028 u16 len)
6029 {
6030 struct mgmt_cp_stop_discovery *mgmt_cp = data;
6031 struct mgmt_pending_cmd *cmd;
6032 int err;
6033
6034 bt_dev_dbg(hdev, "sock %p", sk);
6035
6036 hci_dev_lock(hdev);
6037
6038 if (!hci_discovery_active(hdev)) {
6039 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6040 MGMT_STATUS_REJECTED, &mgmt_cp->type,
6041 sizeof(mgmt_cp->type));
6042 goto unlock;
6043 }
6044
6045 if (hdev->discovery.type != mgmt_cp->type) {
6046 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6047 MGMT_STATUS_INVALID_PARAMS,
6048 &mgmt_cp->type, sizeof(mgmt_cp->type));
6049 goto unlock;
6050 }
6051
6052 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6053 if (!cmd) {
6054 err = -ENOMEM;
6055 goto unlock;
6056 }
6057
6058 err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6059 stop_discovery_complete);
6060 if (err < 0) {
6061 mgmt_pending_remove(cmd);
6062 goto unlock;
6063 }
6064
6065 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6066
6067 unlock:
6068 hci_dev_unlock(hdev);
6069 return err;
6070 }
6071
confirm_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6072 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6073 u16 len)
6074 {
6075 struct mgmt_cp_confirm_name *cp = data;
6076 struct inquiry_entry *e;
6077 int err;
6078
6079 bt_dev_dbg(hdev, "sock %p", sk);
6080
6081 hci_dev_lock(hdev);
6082
6083 if (!hci_discovery_active(hdev)) {
6084 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6085 MGMT_STATUS_FAILED, &cp->addr,
6086 sizeof(cp->addr));
6087 goto failed;
6088 }
6089
6090 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6091 if (!e) {
6092 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6093 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6094 sizeof(cp->addr));
6095 goto failed;
6096 }
6097
6098 if (cp->name_known) {
6099 e->name_state = NAME_KNOWN;
6100 list_del(&e->list);
6101 } else {
6102 e->name_state = NAME_NEEDED;
6103 hci_inquiry_cache_update_resolve(hdev, e);
6104 }
6105
6106 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6107 &cp->addr, sizeof(cp->addr));
6108
6109 failed:
6110 hci_dev_unlock(hdev);
6111 return err;
6112 }
6113
block_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6114 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6115 u16 len)
6116 {
6117 struct mgmt_cp_block_device *cp = data;
6118 u8 status;
6119 int err;
6120
6121 bt_dev_dbg(hdev, "sock %p", sk);
6122
6123 if (!bdaddr_type_is_valid(cp->addr.type))
6124 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6125 MGMT_STATUS_INVALID_PARAMS,
6126 &cp->addr, sizeof(cp->addr));
6127
6128 hci_dev_lock(hdev);
6129
6130 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6131 cp->addr.type);
6132 if (err < 0) {
6133 status = MGMT_STATUS_FAILED;
6134 goto done;
6135 }
6136
6137 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6138 sk);
6139 status = MGMT_STATUS_SUCCESS;
6140
6141 done:
6142 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6143 &cp->addr, sizeof(cp->addr));
6144
6145 hci_dev_unlock(hdev);
6146
6147 return err;
6148 }
6149
unblock_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6150 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6151 u16 len)
6152 {
6153 struct mgmt_cp_unblock_device *cp = data;
6154 u8 status;
6155 int err;
6156
6157 bt_dev_dbg(hdev, "sock %p", sk);
6158
6159 if (!bdaddr_type_is_valid(cp->addr.type))
6160 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6161 MGMT_STATUS_INVALID_PARAMS,
6162 &cp->addr, sizeof(cp->addr));
6163
6164 hci_dev_lock(hdev);
6165
6166 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6167 cp->addr.type);
6168 if (err < 0) {
6169 status = MGMT_STATUS_INVALID_PARAMS;
6170 goto done;
6171 }
6172
6173 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6174 sk);
6175 status = MGMT_STATUS_SUCCESS;
6176
6177 done:
6178 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6179 &cp->addr, sizeof(cp->addr));
6180
6181 hci_dev_unlock(hdev);
6182
6183 return err;
6184 }
6185
set_device_id_sync(struct hci_dev * hdev,void * data)6186 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6187 {
6188 return hci_update_eir_sync(hdev);
6189 }
6190
set_device_id(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6191 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6192 u16 len)
6193 {
6194 struct mgmt_cp_set_device_id *cp = data;
6195 int err;
6196 __u16 source;
6197
6198 bt_dev_dbg(hdev, "sock %p", sk);
6199
6200 source = __le16_to_cpu(cp->source);
6201
6202 if (source > 0x0002)
6203 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6204 MGMT_STATUS_INVALID_PARAMS);
6205
6206 hci_dev_lock(hdev);
6207
6208 hdev->devid_source = source;
6209 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6210 hdev->devid_product = __le16_to_cpu(cp->product);
6211 hdev->devid_version = __le16_to_cpu(cp->version);
6212
6213 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6214 NULL, 0);
6215
6216 hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6217
6218 hci_dev_unlock(hdev);
6219
6220 return err;
6221 }
6222
enable_advertising_instance(struct hci_dev * hdev,int err)6223 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6224 {
6225 if (err)
6226 bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6227 else
6228 bt_dev_dbg(hdev, "status %d", err);
6229 }
6230
set_advertising_complete(struct hci_dev * hdev,void * data,int err)6231 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6232 {
6233 struct cmd_lookup match = { NULL, hdev };
6234 u8 instance;
6235 struct adv_info *adv_instance;
6236 u8 status = mgmt_status(err);
6237
6238 if (status) {
6239 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
6240 cmd_status_rsp, &status);
6241 return;
6242 }
6243
6244 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6245 hci_dev_set_flag(hdev, HCI_ADVERTISING);
6246 else
6247 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6248
6249 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
6250 &match);
6251
6252 new_settings(hdev, match.sk);
6253
6254 if (match.sk)
6255 sock_put(match.sk);
6256
6257 /* If "Set Advertising" was just disabled and instance advertising was
6258 * set up earlier, then re-enable multi-instance advertising.
6259 */
6260 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6261 list_empty(&hdev->adv_instances))
6262 return;
6263
6264 instance = hdev->cur_adv_instance;
6265 if (!instance) {
6266 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6267 struct adv_info, list);
6268 if (!adv_instance)
6269 return;
6270
6271 instance = adv_instance->instance;
6272 }
6273
6274 err = hci_schedule_adv_instance_sync(hdev, instance, true);
6275
6276 enable_advertising_instance(hdev, err);
6277 }
6278
set_adv_sync(struct hci_dev * hdev,void * data)6279 static int set_adv_sync(struct hci_dev *hdev, void *data)
6280 {
6281 struct mgmt_pending_cmd *cmd = data;
6282 struct mgmt_mode *cp = cmd->param;
6283 u8 val = !!cp->val;
6284
6285 if (cp->val == 0x02)
6286 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6287 else
6288 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6289
6290 cancel_adv_timeout(hdev);
6291
6292 if (val) {
6293 /* Switch to instance "0" for the Set Advertising setting.
6294 * We cannot use update_[adv|scan_rsp]_data() here as the
6295 * HCI_ADVERTISING flag is not yet set.
6296 */
6297 hdev->cur_adv_instance = 0x00;
6298
6299 if (ext_adv_capable(hdev)) {
6300 hci_start_ext_adv_sync(hdev, 0x00);
6301 } else {
6302 hci_update_adv_data_sync(hdev, 0x00);
6303 hci_update_scan_rsp_data_sync(hdev, 0x00);
6304 hci_enable_advertising_sync(hdev);
6305 }
6306 } else {
6307 hci_disable_advertising_sync(hdev);
6308 }
6309
6310 return 0;
6311 }
6312
set_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6313 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6314 u16 len)
6315 {
6316 struct mgmt_mode *cp = data;
6317 struct mgmt_pending_cmd *cmd;
6318 u8 val, status;
6319 int err;
6320
6321 bt_dev_dbg(hdev, "sock %p", sk);
6322
6323 status = mgmt_le_support(hdev);
6324 if (status)
6325 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6326 status);
6327
6328 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6329 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6330 MGMT_STATUS_INVALID_PARAMS);
6331
6332 if (hdev->advertising_paused)
6333 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6334 MGMT_STATUS_BUSY);
6335
6336 hci_dev_lock(hdev);
6337
6338 val = !!cp->val;
6339
6340 /* The following conditions are ones which mean that we should
6341 * not do any HCI communication but directly send a mgmt
6342 * response to user space (after toggling the flag if
6343 * necessary).
6344 */
6345 if (!hdev_is_powered(hdev) ||
6346 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6347 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6348 hci_dev_test_flag(hdev, HCI_MESH) ||
6349 hci_conn_num(hdev, LE_LINK) > 0 ||
6350 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6351 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6352 bool changed;
6353
6354 if (cp->val) {
6355 hdev->cur_adv_instance = 0x00;
6356 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6357 if (cp->val == 0x02)
6358 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6359 else
6360 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6361 } else {
6362 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6363 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6364 }
6365
6366 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6367 if (err < 0)
6368 goto unlock;
6369
6370 if (changed)
6371 err = new_settings(hdev, sk);
6372
6373 goto unlock;
6374 }
6375
6376 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6377 pending_find(MGMT_OP_SET_LE, hdev)) {
6378 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6379 MGMT_STATUS_BUSY);
6380 goto unlock;
6381 }
6382
6383 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6384 if (!cmd)
6385 err = -ENOMEM;
6386 else
6387 err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6388 set_advertising_complete);
6389
6390 if (err < 0 && cmd)
6391 mgmt_pending_remove(cmd);
6392
6393 unlock:
6394 hci_dev_unlock(hdev);
6395 return err;
6396 }
6397
set_static_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6398 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6399 void *data, u16 len)
6400 {
6401 struct mgmt_cp_set_static_address *cp = data;
6402 int err;
6403
6404 bt_dev_dbg(hdev, "sock %p", sk);
6405
6406 if (!lmp_le_capable(hdev))
6407 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6408 MGMT_STATUS_NOT_SUPPORTED);
6409
6410 if (hdev_is_powered(hdev))
6411 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6412 MGMT_STATUS_REJECTED);
6413
6414 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6415 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6416 return mgmt_cmd_status(sk, hdev->id,
6417 MGMT_OP_SET_STATIC_ADDRESS,
6418 MGMT_STATUS_INVALID_PARAMS);
6419
6420 /* Two most significant bits shall be set */
6421 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6422 return mgmt_cmd_status(sk, hdev->id,
6423 MGMT_OP_SET_STATIC_ADDRESS,
6424 MGMT_STATUS_INVALID_PARAMS);
6425 }
6426
6427 hci_dev_lock(hdev);
6428
6429 bacpy(&hdev->static_addr, &cp->bdaddr);
6430
6431 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6432 if (err < 0)
6433 goto unlock;
6434
6435 err = new_settings(hdev, sk);
6436
6437 unlock:
6438 hci_dev_unlock(hdev);
6439 return err;
6440 }
6441
set_scan_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6442 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6443 void *data, u16 len)
6444 {
6445 struct mgmt_cp_set_scan_params *cp = data;
6446 __u16 interval, window;
6447 int err;
6448
6449 bt_dev_dbg(hdev, "sock %p", sk);
6450
6451 if (!lmp_le_capable(hdev))
6452 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6453 MGMT_STATUS_NOT_SUPPORTED);
6454
6455 interval = __le16_to_cpu(cp->interval);
6456
6457 if (interval < 0x0004 || interval > 0x4000)
6458 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6459 MGMT_STATUS_INVALID_PARAMS);
6460
6461 window = __le16_to_cpu(cp->window);
6462
6463 if (window < 0x0004 || window > 0x4000)
6464 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6465 MGMT_STATUS_INVALID_PARAMS);
6466
6467 if (window > interval)
6468 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6469 MGMT_STATUS_INVALID_PARAMS);
6470
6471 hci_dev_lock(hdev);
6472
6473 hdev->le_scan_interval = interval;
6474 hdev->le_scan_window = window;
6475
6476 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6477 NULL, 0);
6478
6479 /* If background scan is running, restart it so new parameters are
6480 * loaded.
6481 */
6482 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6483 hdev->discovery.state == DISCOVERY_STOPPED)
6484 hci_update_passive_scan(hdev);
6485
6486 hci_dev_unlock(hdev);
6487
6488 return err;
6489 }
6490
fast_connectable_complete(struct hci_dev * hdev,void * data,int err)6491 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6492 {
6493 struct mgmt_pending_cmd *cmd = data;
6494
6495 bt_dev_dbg(hdev, "err %d", err);
6496
6497 if (err) {
6498 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6499 mgmt_status(err));
6500 } else {
6501 struct mgmt_mode *cp = cmd->param;
6502
6503 if (cp->val)
6504 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6505 else
6506 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6507
6508 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6509 new_settings(hdev, cmd->sk);
6510 }
6511
6512 mgmt_pending_free(cmd);
6513 }
6514
write_fast_connectable_sync(struct hci_dev * hdev,void * data)6515 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6516 {
6517 struct mgmt_pending_cmd *cmd = data;
6518 struct mgmt_mode *cp = cmd->param;
6519
6520 return hci_write_fast_connectable_sync(hdev, cp->val);
6521 }
6522
set_fast_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6523 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6524 void *data, u16 len)
6525 {
6526 struct mgmt_mode *cp = data;
6527 struct mgmt_pending_cmd *cmd;
6528 int err;
6529
6530 bt_dev_dbg(hdev, "sock %p", sk);
6531
6532 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6533 hdev->hci_ver < BLUETOOTH_VER_1_2)
6534 return mgmt_cmd_status(sk, hdev->id,
6535 MGMT_OP_SET_FAST_CONNECTABLE,
6536 MGMT_STATUS_NOT_SUPPORTED);
6537
6538 if (cp->val != 0x00 && cp->val != 0x01)
6539 return mgmt_cmd_status(sk, hdev->id,
6540 MGMT_OP_SET_FAST_CONNECTABLE,
6541 MGMT_STATUS_INVALID_PARAMS);
6542
6543 hci_dev_lock(hdev);
6544
6545 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6546 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6547 goto unlock;
6548 }
6549
6550 if (!hdev_is_powered(hdev)) {
6551 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6552 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6553 new_settings(hdev, sk);
6554 goto unlock;
6555 }
6556
6557 cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6558 len);
6559 if (!cmd)
6560 err = -ENOMEM;
6561 else
6562 err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6563 fast_connectable_complete);
6564
6565 if (err < 0) {
6566 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6567 MGMT_STATUS_FAILED);
6568
6569 if (cmd)
6570 mgmt_pending_free(cmd);
6571 }
6572
6573 unlock:
6574 hci_dev_unlock(hdev);
6575
6576 return err;
6577 }
6578
set_bredr_complete(struct hci_dev * hdev,void * data,int err)6579 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6580 {
6581 struct mgmt_pending_cmd *cmd = data;
6582
6583 bt_dev_dbg(hdev, "err %d", err);
6584
6585 if (err) {
6586 u8 mgmt_err = mgmt_status(err);
6587
6588 /* We need to restore the flag if related HCI commands
6589 * failed.
6590 */
6591 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6592
6593 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6594 } else {
6595 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6596 new_settings(hdev, cmd->sk);
6597 }
6598
6599 mgmt_pending_free(cmd);
6600 }
6601
set_bredr_sync(struct hci_dev * hdev,void * data)6602 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6603 {
6604 int status;
6605
6606 status = hci_write_fast_connectable_sync(hdev, false);
6607
6608 if (!status)
6609 status = hci_update_scan_sync(hdev);
6610
6611 /* Since only the advertising data flags will change, there
6612 * is no need to update the scan response data.
6613 */
6614 if (!status)
6615 status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6616
6617 return status;
6618 }
6619
set_bredr(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6620 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6621 {
6622 struct mgmt_mode *cp = data;
6623 struct mgmt_pending_cmd *cmd;
6624 int err;
6625
6626 bt_dev_dbg(hdev, "sock %p", sk);
6627
6628 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6629 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6630 MGMT_STATUS_NOT_SUPPORTED);
6631
6632 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6633 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6634 MGMT_STATUS_REJECTED);
6635
6636 if (cp->val != 0x00 && cp->val != 0x01)
6637 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6638 MGMT_STATUS_INVALID_PARAMS);
6639
6640 hci_dev_lock(hdev);
6641
6642 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6643 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6644 goto unlock;
6645 }
6646
6647 if (!hdev_is_powered(hdev)) {
6648 if (!cp->val) {
6649 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6650 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6651 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6652 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6653 }
6654
6655 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6656
6657 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6658 if (err < 0)
6659 goto unlock;
6660
6661 err = new_settings(hdev, sk);
6662 goto unlock;
6663 }
6664
6665 /* Reject disabling when powered on */
6666 if (!cp->val) {
6667 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6668 MGMT_STATUS_REJECTED);
6669 goto unlock;
6670 } else {
6671 /* When configuring a dual-mode controller to operate
6672 * with LE only and using a static address, then switching
6673 * BR/EDR back on is not allowed.
6674 *
6675 * Dual-mode controllers shall operate with the public
6676 * address as its identity address for BR/EDR and LE. So
6677 * reject the attempt to create an invalid configuration.
6678 *
6679 * The same restrictions applies when secure connections
6680 * has been enabled. For BR/EDR this is a controller feature
6681 * while for LE it is a host stack feature. This means that
6682 * switching BR/EDR back on when secure connections has been
6683 * enabled is not a supported transaction.
6684 */
6685 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6686 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6687 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6688 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6689 MGMT_STATUS_REJECTED);
6690 goto unlock;
6691 }
6692 }
6693
6694 cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6695 if (!cmd)
6696 err = -ENOMEM;
6697 else
6698 err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6699 set_bredr_complete);
6700
6701 if (err < 0) {
6702 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6703 MGMT_STATUS_FAILED);
6704 if (cmd)
6705 mgmt_pending_free(cmd);
6706
6707 goto unlock;
6708 }
6709
6710 /* We need to flip the bit already here so that
6711 * hci_req_update_adv_data generates the correct flags.
6712 */
6713 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6714
6715 unlock:
6716 hci_dev_unlock(hdev);
6717 return err;
6718 }
6719
set_secure_conn_complete(struct hci_dev * hdev,void * data,int err)6720 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6721 {
6722 struct mgmt_pending_cmd *cmd = data;
6723 struct mgmt_mode *cp;
6724
6725 bt_dev_dbg(hdev, "err %d", err);
6726
6727 if (err) {
6728 u8 mgmt_err = mgmt_status(err);
6729
6730 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6731 goto done;
6732 }
6733
6734 cp = cmd->param;
6735
6736 switch (cp->val) {
6737 case 0x00:
6738 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6739 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6740 break;
6741 case 0x01:
6742 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6743 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6744 break;
6745 case 0x02:
6746 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6747 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6748 break;
6749 }
6750
6751 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6752 new_settings(hdev, cmd->sk);
6753
6754 done:
6755 mgmt_pending_free(cmd);
6756 }
6757
set_secure_conn_sync(struct hci_dev * hdev,void * data)6758 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6759 {
6760 struct mgmt_pending_cmd *cmd = data;
6761 struct mgmt_mode *cp = cmd->param;
6762 u8 val = !!cp->val;
6763
6764 /* Force write of val */
6765 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6766
6767 return hci_write_sc_support_sync(hdev, val);
6768 }
6769
set_secure_conn(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6770 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6771 void *data, u16 len)
6772 {
6773 struct mgmt_mode *cp = data;
6774 struct mgmt_pending_cmd *cmd;
6775 u8 val;
6776 int err;
6777
6778 bt_dev_dbg(hdev, "sock %p", sk);
6779
6780 if (!lmp_sc_capable(hdev) &&
6781 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6782 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6783 MGMT_STATUS_NOT_SUPPORTED);
6784
6785 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6786 lmp_sc_capable(hdev) &&
6787 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6788 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6789 MGMT_STATUS_REJECTED);
6790
6791 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6792 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6793 MGMT_STATUS_INVALID_PARAMS);
6794
6795 hci_dev_lock(hdev);
6796
6797 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6798 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6799 bool changed;
6800
6801 if (cp->val) {
6802 changed = !hci_dev_test_and_set_flag(hdev,
6803 HCI_SC_ENABLED);
6804 if (cp->val == 0x02)
6805 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6806 else
6807 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6808 } else {
6809 changed = hci_dev_test_and_clear_flag(hdev,
6810 HCI_SC_ENABLED);
6811 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6812 }
6813
6814 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6815 if (err < 0)
6816 goto failed;
6817
6818 if (changed)
6819 err = new_settings(hdev, sk);
6820
6821 goto failed;
6822 }
6823
6824 val = !!cp->val;
6825
6826 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6827 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6828 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6829 goto failed;
6830 }
6831
6832 cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6833 if (!cmd)
6834 err = -ENOMEM;
6835 else
6836 err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6837 set_secure_conn_complete);
6838
6839 if (err < 0) {
6840 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6841 MGMT_STATUS_FAILED);
6842 if (cmd)
6843 mgmt_pending_free(cmd);
6844 }
6845
6846 failed:
6847 hci_dev_unlock(hdev);
6848 return err;
6849 }
6850
set_debug_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6851 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6852 void *data, u16 len)
6853 {
6854 struct mgmt_mode *cp = data;
6855 bool changed, use_changed;
6856 int err;
6857
6858 bt_dev_dbg(hdev, "sock %p", sk);
6859
6860 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6861 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6862 MGMT_STATUS_INVALID_PARAMS);
6863
6864 hci_dev_lock(hdev);
6865
6866 if (cp->val)
6867 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6868 else
6869 changed = hci_dev_test_and_clear_flag(hdev,
6870 HCI_KEEP_DEBUG_KEYS);
6871
6872 if (cp->val == 0x02)
6873 use_changed = !hci_dev_test_and_set_flag(hdev,
6874 HCI_USE_DEBUG_KEYS);
6875 else
6876 use_changed = hci_dev_test_and_clear_flag(hdev,
6877 HCI_USE_DEBUG_KEYS);
6878
6879 if (hdev_is_powered(hdev) && use_changed &&
6880 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6881 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6882 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6883 sizeof(mode), &mode);
6884 }
6885
6886 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6887 if (err < 0)
6888 goto unlock;
6889
6890 if (changed)
6891 err = new_settings(hdev, sk);
6892
6893 unlock:
6894 hci_dev_unlock(hdev);
6895 return err;
6896 }
6897
set_privacy(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)6898 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6899 u16 len)
6900 {
6901 struct mgmt_cp_set_privacy *cp = cp_data;
6902 bool changed;
6903 int err;
6904
6905 bt_dev_dbg(hdev, "sock %p", sk);
6906
6907 if (!lmp_le_capable(hdev))
6908 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6909 MGMT_STATUS_NOT_SUPPORTED);
6910
6911 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6912 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6913 MGMT_STATUS_INVALID_PARAMS);
6914
6915 if (hdev_is_powered(hdev))
6916 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6917 MGMT_STATUS_REJECTED);
6918
6919 hci_dev_lock(hdev);
6920
6921 /* If user space supports this command it is also expected to
6922 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
6923 */
6924 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6925
6926 if (cp->privacy) {
6927 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
6928 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
6929 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
6930 hci_adv_instances_set_rpa_expired(hdev, true);
6931 if (cp->privacy == 0x02)
6932 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
6933 else
6934 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6935 } else {
6936 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
6937 memset(hdev->irk, 0, sizeof(hdev->irk));
6938 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
6939 hci_adv_instances_set_rpa_expired(hdev, false);
6940 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6941 }
6942
6943 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
6944 if (err < 0)
6945 goto unlock;
6946
6947 if (changed)
6948 err = new_settings(hdev, sk);
6949
6950 unlock:
6951 hci_dev_unlock(hdev);
6952 return err;
6953 }
6954
irk_is_valid(struct mgmt_irk_info * irk)6955 static bool irk_is_valid(struct mgmt_irk_info *irk)
6956 {
6957 switch (irk->addr.type) {
6958 case BDADDR_LE_PUBLIC:
6959 return true;
6960
6961 case BDADDR_LE_RANDOM:
6962 /* Two most significant bits shall be set */
6963 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6964 return false;
6965 return true;
6966 }
6967
6968 return false;
6969 }
6970
load_irks(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)6971 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6972 u16 len)
6973 {
6974 struct mgmt_cp_load_irks *cp = cp_data;
6975 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
6976 sizeof(struct mgmt_irk_info));
6977 u16 irk_count, expected_len;
6978 int i, err;
6979
6980 bt_dev_dbg(hdev, "sock %p", sk);
6981
6982 if (!lmp_le_capable(hdev))
6983 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6984 MGMT_STATUS_NOT_SUPPORTED);
6985
6986 irk_count = __le16_to_cpu(cp->irk_count);
6987 if (irk_count > max_irk_count) {
6988 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
6989 irk_count);
6990 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6991 MGMT_STATUS_INVALID_PARAMS);
6992 }
6993
6994 expected_len = struct_size(cp, irks, irk_count);
6995 if (expected_len != len) {
6996 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
6997 expected_len, len);
6998 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6999 MGMT_STATUS_INVALID_PARAMS);
7000 }
7001
7002 bt_dev_dbg(hdev, "irk_count %u", irk_count);
7003
7004 for (i = 0; i < irk_count; i++) {
7005 struct mgmt_irk_info *key = &cp->irks[i];
7006
7007 if (!irk_is_valid(key))
7008 return mgmt_cmd_status(sk, hdev->id,
7009 MGMT_OP_LOAD_IRKS,
7010 MGMT_STATUS_INVALID_PARAMS);
7011 }
7012
7013 hci_dev_lock(hdev);
7014
7015 hci_smp_irks_clear(hdev);
7016
7017 for (i = 0; i < irk_count; i++) {
7018 struct mgmt_irk_info *irk = &cp->irks[i];
7019
7020 if (hci_is_blocked_key(hdev,
7021 HCI_BLOCKED_KEY_TYPE_IRK,
7022 irk->val)) {
7023 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7024 &irk->addr.bdaddr);
7025 continue;
7026 }
7027
7028 hci_add_irk(hdev, &irk->addr.bdaddr,
7029 le_addr_type(irk->addr.type), irk->val,
7030 BDADDR_ANY);
7031 }
7032
7033 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7034
7035 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7036
7037 hci_dev_unlock(hdev);
7038
7039 return err;
7040 }
7041
ltk_is_valid(struct mgmt_ltk_info * key)7042 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7043 {
7044 if (key->initiator != 0x00 && key->initiator != 0x01)
7045 return false;
7046
7047 switch (key->addr.type) {
7048 case BDADDR_LE_PUBLIC:
7049 return true;
7050
7051 case BDADDR_LE_RANDOM:
7052 /* Two most significant bits shall be set */
7053 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7054 return false;
7055 return true;
7056 }
7057
7058 return false;
7059 }
7060
load_long_term_keys(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)7061 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7062 void *cp_data, u16 len)
7063 {
7064 struct mgmt_cp_load_long_term_keys *cp = cp_data;
7065 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7066 sizeof(struct mgmt_ltk_info));
7067 u16 key_count, expected_len;
7068 int i, err;
7069
7070 bt_dev_dbg(hdev, "sock %p", sk);
7071
7072 if (!lmp_le_capable(hdev))
7073 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7074 MGMT_STATUS_NOT_SUPPORTED);
7075
7076 key_count = __le16_to_cpu(cp->key_count);
7077 if (key_count > max_key_count) {
7078 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7079 key_count);
7080 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7081 MGMT_STATUS_INVALID_PARAMS);
7082 }
7083
7084 expected_len = struct_size(cp, keys, key_count);
7085 if (expected_len != len) {
7086 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7087 expected_len, len);
7088 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7089 MGMT_STATUS_INVALID_PARAMS);
7090 }
7091
7092 bt_dev_dbg(hdev, "key_count %u", key_count);
7093
7094 hci_dev_lock(hdev);
7095
7096 hci_smp_ltks_clear(hdev);
7097
7098 for (i = 0; i < key_count; i++) {
7099 struct mgmt_ltk_info *key = &cp->keys[i];
7100 u8 type, authenticated;
7101
7102 if (hci_is_blocked_key(hdev,
7103 HCI_BLOCKED_KEY_TYPE_LTK,
7104 key->val)) {
7105 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7106 &key->addr.bdaddr);
7107 continue;
7108 }
7109
7110 if (!ltk_is_valid(key)) {
7111 bt_dev_warn(hdev, "Invalid LTK for %pMR",
7112 &key->addr.bdaddr);
7113 continue;
7114 }
7115
7116 switch (key->type) {
7117 case MGMT_LTK_UNAUTHENTICATED:
7118 authenticated = 0x00;
7119 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7120 break;
7121 case MGMT_LTK_AUTHENTICATED:
7122 authenticated = 0x01;
7123 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7124 break;
7125 case MGMT_LTK_P256_UNAUTH:
7126 authenticated = 0x00;
7127 type = SMP_LTK_P256;
7128 break;
7129 case MGMT_LTK_P256_AUTH:
7130 authenticated = 0x01;
7131 type = SMP_LTK_P256;
7132 break;
7133 case MGMT_LTK_P256_DEBUG:
7134 authenticated = 0x00;
7135 type = SMP_LTK_P256_DEBUG;
7136 fallthrough;
7137 default:
7138 continue;
7139 }
7140
7141 hci_add_ltk(hdev, &key->addr.bdaddr,
7142 le_addr_type(key->addr.type), type, authenticated,
7143 key->val, key->enc_size, key->ediv, key->rand);
7144 }
7145
7146 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7147 NULL, 0);
7148
7149 hci_dev_unlock(hdev);
7150
7151 return err;
7152 }
7153
get_conn_info_complete(struct hci_dev * hdev,void * data,int err)7154 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
7155 {
7156 struct mgmt_pending_cmd *cmd = data;
7157 struct hci_conn *conn = cmd->user_data;
7158 struct mgmt_cp_get_conn_info *cp = cmd->param;
7159 struct mgmt_rp_get_conn_info rp;
7160 u8 status;
7161
7162 bt_dev_dbg(hdev, "err %d", err);
7163
7164 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
7165
7166 status = mgmt_status(err);
7167 if (status == MGMT_STATUS_SUCCESS) {
7168 rp.rssi = conn->rssi;
7169 rp.tx_power = conn->tx_power;
7170 rp.max_tx_power = conn->max_tx_power;
7171 } else {
7172 rp.rssi = HCI_RSSI_INVALID;
7173 rp.tx_power = HCI_TX_POWER_INVALID;
7174 rp.max_tx_power = HCI_TX_POWER_INVALID;
7175 }
7176
7177 mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
7178 &rp, sizeof(rp));
7179
7180 mgmt_pending_free(cmd);
7181 }
7182
get_conn_info_sync(struct hci_dev * hdev,void * data)7183 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
7184 {
7185 struct mgmt_pending_cmd *cmd = data;
7186 struct mgmt_cp_get_conn_info *cp = cmd->param;
7187 struct hci_conn *conn;
7188 int err;
7189 __le16 handle;
7190
7191 /* Make sure we are still connected */
7192 if (cp->addr.type == BDADDR_BREDR)
7193 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7194 &cp->addr.bdaddr);
7195 else
7196 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7197
7198 if (!conn || conn->state != BT_CONNECTED)
7199 return MGMT_STATUS_NOT_CONNECTED;
7200
7201 cmd->user_data = conn;
7202 handle = cpu_to_le16(conn->handle);
7203
7204 /* Refresh RSSI each time */
7205 err = hci_read_rssi_sync(hdev, handle);
7206
7207 /* For LE links TX power does not change thus we don't need to
7208 * query for it once value is known.
7209 */
7210 if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
7211 conn->tx_power == HCI_TX_POWER_INVALID))
7212 err = hci_read_tx_power_sync(hdev, handle, 0x00);
7213
7214 /* Max TX power needs to be read only once per connection */
7215 if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
7216 err = hci_read_tx_power_sync(hdev, handle, 0x01);
7217
7218 return err;
7219 }
7220
get_conn_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7221 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7222 u16 len)
7223 {
7224 struct mgmt_cp_get_conn_info *cp = data;
7225 struct mgmt_rp_get_conn_info rp;
7226 struct hci_conn *conn;
7227 unsigned long conn_info_age;
7228 int err = 0;
7229
7230 bt_dev_dbg(hdev, "sock %p", sk);
7231
7232 memset(&rp, 0, sizeof(rp));
7233 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7234 rp.addr.type = cp->addr.type;
7235
7236 if (!bdaddr_type_is_valid(cp->addr.type))
7237 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7238 MGMT_STATUS_INVALID_PARAMS,
7239 &rp, sizeof(rp));
7240
7241 hci_dev_lock(hdev);
7242
7243 if (!hdev_is_powered(hdev)) {
7244 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7245 MGMT_STATUS_NOT_POWERED, &rp,
7246 sizeof(rp));
7247 goto unlock;
7248 }
7249
7250 if (cp->addr.type == BDADDR_BREDR)
7251 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7252 &cp->addr.bdaddr);
7253 else
7254 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7255
7256 if (!conn || conn->state != BT_CONNECTED) {
7257 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7258 MGMT_STATUS_NOT_CONNECTED, &rp,
7259 sizeof(rp));
7260 goto unlock;
7261 }
7262
7263 /* To avoid client trying to guess when to poll again for information we
7264 * calculate conn info age as random value between min/max set in hdev.
7265 */
7266 conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age,
7267 hdev->conn_info_max_age - 1);
7268
7269 /* Query controller to refresh cached values if they are too old or were
7270 * never read.
7271 */
7272 if (time_after(jiffies, conn->conn_info_timestamp +
7273 msecs_to_jiffies(conn_info_age)) ||
7274 !conn->conn_info_timestamp) {
7275 struct mgmt_pending_cmd *cmd;
7276
7277 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
7278 len);
7279 if (!cmd) {
7280 err = -ENOMEM;
7281 } else {
7282 err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
7283 cmd, get_conn_info_complete);
7284 }
7285
7286 if (err < 0) {
7287 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7288 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7289
7290 if (cmd)
7291 mgmt_pending_free(cmd);
7292
7293 goto unlock;
7294 }
7295
7296 conn->conn_info_timestamp = jiffies;
7297 } else {
7298 /* Cache is valid, just reply with values cached in hci_conn */
7299 rp.rssi = conn->rssi;
7300 rp.tx_power = conn->tx_power;
7301 rp.max_tx_power = conn->max_tx_power;
7302
7303 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7304 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7305 }
7306
7307 unlock:
7308 hci_dev_unlock(hdev);
7309 return err;
7310 }
7311
get_clock_info_complete(struct hci_dev * hdev,void * data,int err)7312 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
7313 {
7314 struct mgmt_pending_cmd *cmd = data;
7315 struct mgmt_cp_get_clock_info *cp = cmd->param;
7316 struct mgmt_rp_get_clock_info rp;
7317 struct hci_conn *conn = cmd->user_data;
7318 u8 status = mgmt_status(err);
7319
7320 bt_dev_dbg(hdev, "err %d", err);
7321
7322 memset(&rp, 0, sizeof(rp));
7323 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7324 rp.addr.type = cp->addr.type;
7325
7326 if (err)
7327 goto complete;
7328
7329 rp.local_clock = cpu_to_le32(hdev->clock);
7330
7331 if (conn) {
7332 rp.piconet_clock = cpu_to_le32(conn->clock);
7333 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7334 }
7335
7336 complete:
7337 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
7338 sizeof(rp));
7339
7340 mgmt_pending_free(cmd);
7341 }
7342
get_clock_info_sync(struct hci_dev * hdev,void * data)7343 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
7344 {
7345 struct mgmt_pending_cmd *cmd = data;
7346 struct mgmt_cp_get_clock_info *cp = cmd->param;
7347 struct hci_cp_read_clock hci_cp;
7348 struct hci_conn *conn;
7349
7350 memset(&hci_cp, 0, sizeof(hci_cp));
7351 hci_read_clock_sync(hdev, &hci_cp);
7352
7353 /* Make sure connection still exists */
7354 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
7355 if (!conn || conn->state != BT_CONNECTED)
7356 return MGMT_STATUS_NOT_CONNECTED;
7357
7358 cmd->user_data = conn;
7359 hci_cp.handle = cpu_to_le16(conn->handle);
7360 hci_cp.which = 0x01; /* Piconet clock */
7361
7362 return hci_read_clock_sync(hdev, &hci_cp);
7363 }
7364
get_clock_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7365 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7366 u16 len)
7367 {
7368 struct mgmt_cp_get_clock_info *cp = data;
7369 struct mgmt_rp_get_clock_info rp;
7370 struct mgmt_pending_cmd *cmd;
7371 struct hci_conn *conn;
7372 int err;
7373
7374 bt_dev_dbg(hdev, "sock %p", sk);
7375
7376 memset(&rp, 0, sizeof(rp));
7377 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7378 rp.addr.type = cp->addr.type;
7379
7380 if (cp->addr.type != BDADDR_BREDR)
7381 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7382 MGMT_STATUS_INVALID_PARAMS,
7383 &rp, sizeof(rp));
7384
7385 hci_dev_lock(hdev);
7386
7387 if (!hdev_is_powered(hdev)) {
7388 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7389 MGMT_STATUS_NOT_POWERED, &rp,
7390 sizeof(rp));
7391 goto unlock;
7392 }
7393
7394 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7395 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7396 &cp->addr.bdaddr);
7397 if (!conn || conn->state != BT_CONNECTED) {
7398 err = mgmt_cmd_complete(sk, hdev->id,
7399 MGMT_OP_GET_CLOCK_INFO,
7400 MGMT_STATUS_NOT_CONNECTED,
7401 &rp, sizeof(rp));
7402 goto unlock;
7403 }
7404 } else {
7405 conn = NULL;
7406 }
7407
7408 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7409 if (!cmd)
7410 err = -ENOMEM;
7411 else
7412 err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7413 get_clock_info_complete);
7414
7415 if (err < 0) {
7416 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7417 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7418
7419 if (cmd)
7420 mgmt_pending_free(cmd);
7421 }
7422
7423
7424 unlock:
7425 hci_dev_unlock(hdev);
7426 return err;
7427 }
7428
is_connected(struct hci_dev * hdev,bdaddr_t * addr,u8 type)7429 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7430 {
7431 struct hci_conn *conn;
7432
7433 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7434 if (!conn)
7435 return false;
7436
7437 if (conn->dst_type != type)
7438 return false;
7439
7440 if (conn->state != BT_CONNECTED)
7441 return false;
7442
7443 return true;
7444 }
7445
7446 /* This function requires the caller holds hdev->lock */
hci_conn_params_set(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type,u8 auto_connect)7447 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7448 u8 addr_type, u8 auto_connect)
7449 {
7450 struct hci_conn_params *params;
7451
7452 params = hci_conn_params_add(hdev, addr, addr_type);
7453 if (!params)
7454 return -EIO;
7455
7456 if (params->auto_connect == auto_connect)
7457 return 0;
7458
7459 hci_pend_le_list_del_init(params);
7460
7461 switch (auto_connect) {
7462 case HCI_AUTO_CONN_DISABLED:
7463 case HCI_AUTO_CONN_LINK_LOSS:
7464 /* If auto connect is being disabled when we're trying to
7465 * connect to device, keep connecting.
7466 */
7467 if (params->explicit_connect)
7468 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7469 break;
7470 case HCI_AUTO_CONN_REPORT:
7471 if (params->explicit_connect)
7472 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7473 else
7474 hci_pend_le_list_add(params, &hdev->pend_le_reports);
7475 break;
7476 case HCI_AUTO_CONN_DIRECT:
7477 case HCI_AUTO_CONN_ALWAYS:
7478 if (!is_connected(hdev, addr, addr_type))
7479 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7480 break;
7481 }
7482
7483 params->auto_connect = auto_connect;
7484
7485 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7486 addr, addr_type, auto_connect);
7487
7488 return 0;
7489 }
7490
device_added(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type,u8 action)7491 static void device_added(struct sock *sk, struct hci_dev *hdev,
7492 bdaddr_t *bdaddr, u8 type, u8 action)
7493 {
7494 struct mgmt_ev_device_added ev;
7495
7496 bacpy(&ev.addr.bdaddr, bdaddr);
7497 ev.addr.type = type;
7498 ev.action = action;
7499
7500 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7501 }
7502
add_device_complete(struct hci_dev * hdev,void * data,int err)7503 static void add_device_complete(struct hci_dev *hdev, void *data, int err)
7504 {
7505 struct mgmt_pending_cmd *cmd = data;
7506 struct mgmt_cp_add_device *cp = cmd->param;
7507
7508 if (!err) {
7509 device_added(cmd->sk, hdev, &cp->addr.bdaddr, cp->addr.type,
7510 cp->action);
7511 device_flags_changed(NULL, hdev, &cp->addr.bdaddr,
7512 cp->addr.type, hdev->conn_flags,
7513 PTR_UINT(cmd->user_data));
7514 }
7515
7516 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_ADD_DEVICE,
7517 mgmt_status(err), &cp->addr, sizeof(cp->addr));
7518 mgmt_pending_free(cmd);
7519 }
7520
add_device_sync(struct hci_dev * hdev,void * data)7521 static int add_device_sync(struct hci_dev *hdev, void *data)
7522 {
7523 return hci_update_passive_scan_sync(hdev);
7524 }
7525
add_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7526 static int add_device(struct sock *sk, struct hci_dev *hdev,
7527 void *data, u16 len)
7528 {
7529 struct mgmt_pending_cmd *cmd;
7530 struct mgmt_cp_add_device *cp = data;
7531 u8 auto_conn, addr_type;
7532 struct hci_conn_params *params;
7533 int err;
7534 u32 current_flags = 0;
7535 u32 supported_flags;
7536
7537 bt_dev_dbg(hdev, "sock %p", sk);
7538
7539 if (!bdaddr_type_is_valid(cp->addr.type) ||
7540 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7541 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7542 MGMT_STATUS_INVALID_PARAMS,
7543 &cp->addr, sizeof(cp->addr));
7544
7545 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7546 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7547 MGMT_STATUS_INVALID_PARAMS,
7548 &cp->addr, sizeof(cp->addr));
7549
7550 hci_dev_lock(hdev);
7551
7552 if (cp->addr.type == BDADDR_BREDR) {
7553 /* Only incoming connections action is supported for now */
7554 if (cp->action != 0x01) {
7555 err = mgmt_cmd_complete(sk, hdev->id,
7556 MGMT_OP_ADD_DEVICE,
7557 MGMT_STATUS_INVALID_PARAMS,
7558 &cp->addr, sizeof(cp->addr));
7559 goto unlock;
7560 }
7561
7562 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7563 &cp->addr.bdaddr,
7564 cp->addr.type, 0);
7565 if (err)
7566 goto unlock;
7567
7568 hci_update_scan(hdev);
7569
7570 goto added;
7571 }
7572
7573 addr_type = le_addr_type(cp->addr.type);
7574
7575 if (cp->action == 0x02)
7576 auto_conn = HCI_AUTO_CONN_ALWAYS;
7577 else if (cp->action == 0x01)
7578 auto_conn = HCI_AUTO_CONN_DIRECT;
7579 else
7580 auto_conn = HCI_AUTO_CONN_REPORT;
7581
7582 /* Kernel internally uses conn_params with resolvable private
7583 * address, but Add Device allows only identity addresses.
7584 * Make sure it is enforced before calling
7585 * hci_conn_params_lookup.
7586 */
7587 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7588 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7589 MGMT_STATUS_INVALID_PARAMS,
7590 &cp->addr, sizeof(cp->addr));
7591 goto unlock;
7592 }
7593
7594 /* If the connection parameters don't exist for this device,
7595 * they will be created and configured with defaults.
7596 */
7597 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7598 auto_conn) < 0) {
7599 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7600 MGMT_STATUS_FAILED, &cp->addr,
7601 sizeof(cp->addr));
7602 goto unlock;
7603 } else {
7604 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7605 addr_type);
7606 if (params)
7607 current_flags = params->flags;
7608 }
7609
7610 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
7611 if (!cmd) {
7612 err = -ENOMEM;
7613 goto unlock;
7614 }
7615
7616 cmd->user_data = UINT_PTR(current_flags);
7617
7618 err = hci_cmd_sync_queue(hdev, add_device_sync, cmd,
7619 add_device_complete);
7620 if (err < 0) {
7621 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7622 MGMT_STATUS_FAILED, &cp->addr,
7623 sizeof(cp->addr));
7624 mgmt_pending_free(cmd);
7625 }
7626
7627 goto unlock;
7628
7629 added:
7630 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7631 supported_flags = hdev->conn_flags;
7632 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7633 supported_flags, current_flags);
7634
7635 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7636 MGMT_STATUS_SUCCESS, &cp->addr,
7637 sizeof(cp->addr));
7638
7639 unlock:
7640 hci_dev_unlock(hdev);
7641 return err;
7642 }
7643
device_removed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)7644 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7645 bdaddr_t *bdaddr, u8 type)
7646 {
7647 struct mgmt_ev_device_removed ev;
7648
7649 bacpy(&ev.addr.bdaddr, bdaddr);
7650 ev.addr.type = type;
7651
7652 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7653 }
7654
remove_device_sync(struct hci_dev * hdev,void * data)7655 static int remove_device_sync(struct hci_dev *hdev, void *data)
7656 {
7657 return hci_update_passive_scan_sync(hdev);
7658 }
7659
remove_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7660 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7661 void *data, u16 len)
7662 {
7663 struct mgmt_cp_remove_device *cp = data;
7664 int err;
7665
7666 bt_dev_dbg(hdev, "sock %p", sk);
7667
7668 hci_dev_lock(hdev);
7669
7670 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7671 struct hci_conn_params *params;
7672 u8 addr_type;
7673
7674 if (!bdaddr_type_is_valid(cp->addr.type)) {
7675 err = mgmt_cmd_complete(sk, hdev->id,
7676 MGMT_OP_REMOVE_DEVICE,
7677 MGMT_STATUS_INVALID_PARAMS,
7678 &cp->addr, sizeof(cp->addr));
7679 goto unlock;
7680 }
7681
7682 if (cp->addr.type == BDADDR_BREDR) {
7683 err = hci_bdaddr_list_del(&hdev->accept_list,
7684 &cp->addr.bdaddr,
7685 cp->addr.type);
7686 if (err) {
7687 err = mgmt_cmd_complete(sk, hdev->id,
7688 MGMT_OP_REMOVE_DEVICE,
7689 MGMT_STATUS_INVALID_PARAMS,
7690 &cp->addr,
7691 sizeof(cp->addr));
7692 goto unlock;
7693 }
7694
7695 hci_update_scan(hdev);
7696
7697 device_removed(sk, hdev, &cp->addr.bdaddr,
7698 cp->addr.type);
7699 goto complete;
7700 }
7701
7702 addr_type = le_addr_type(cp->addr.type);
7703
7704 /* Kernel internally uses conn_params with resolvable private
7705 * address, but Remove Device allows only identity addresses.
7706 * Make sure it is enforced before calling
7707 * hci_conn_params_lookup.
7708 */
7709 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7710 err = mgmt_cmd_complete(sk, hdev->id,
7711 MGMT_OP_REMOVE_DEVICE,
7712 MGMT_STATUS_INVALID_PARAMS,
7713 &cp->addr, sizeof(cp->addr));
7714 goto unlock;
7715 }
7716
7717 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7718 addr_type);
7719 if (!params) {
7720 err = mgmt_cmd_complete(sk, hdev->id,
7721 MGMT_OP_REMOVE_DEVICE,
7722 MGMT_STATUS_INVALID_PARAMS,
7723 &cp->addr, sizeof(cp->addr));
7724 goto unlock;
7725 }
7726
7727 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7728 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7729 err = mgmt_cmd_complete(sk, hdev->id,
7730 MGMT_OP_REMOVE_DEVICE,
7731 MGMT_STATUS_INVALID_PARAMS,
7732 &cp->addr, sizeof(cp->addr));
7733 goto unlock;
7734 }
7735
7736 hci_conn_params_free(params);
7737
7738 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7739 } else {
7740 struct hci_conn_params *p, *tmp;
7741 struct bdaddr_list *b, *btmp;
7742
7743 if (cp->addr.type) {
7744 err = mgmt_cmd_complete(sk, hdev->id,
7745 MGMT_OP_REMOVE_DEVICE,
7746 MGMT_STATUS_INVALID_PARAMS,
7747 &cp->addr, sizeof(cp->addr));
7748 goto unlock;
7749 }
7750
7751 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7752 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7753 list_del(&b->list);
7754 kfree(b);
7755 }
7756
7757 hci_update_scan(hdev);
7758
7759 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7760 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7761 continue;
7762 device_removed(sk, hdev, &p->addr, p->addr_type);
7763 if (p->explicit_connect) {
7764 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7765 continue;
7766 }
7767 hci_conn_params_free(p);
7768 }
7769
7770 bt_dev_dbg(hdev, "All LE connection parameters were removed");
7771 }
7772
7773 hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7774
7775 complete:
7776 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7777 MGMT_STATUS_SUCCESS, &cp->addr,
7778 sizeof(cp->addr));
7779 unlock:
7780 hci_dev_unlock(hdev);
7781 return err;
7782 }
7783
conn_update_sync(struct hci_dev * hdev,void * data)7784 static int conn_update_sync(struct hci_dev *hdev, void *data)
7785 {
7786 struct hci_conn_params *params = data;
7787 struct hci_conn *conn;
7788
7789 conn = hci_conn_hash_lookup_le(hdev, ¶ms->addr, params->addr_type);
7790 if (!conn)
7791 return -ECANCELED;
7792
7793 return hci_le_conn_update_sync(hdev, conn, params);
7794 }
7795
load_conn_param(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7796 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7797 u16 len)
7798 {
7799 struct mgmt_cp_load_conn_param *cp = data;
7800 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7801 sizeof(struct mgmt_conn_param));
7802 u16 param_count, expected_len;
7803 int i;
7804
7805 if (!lmp_le_capable(hdev))
7806 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7807 MGMT_STATUS_NOT_SUPPORTED);
7808
7809 param_count = __le16_to_cpu(cp->param_count);
7810 if (param_count > max_param_count) {
7811 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7812 param_count);
7813 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7814 MGMT_STATUS_INVALID_PARAMS);
7815 }
7816
7817 expected_len = struct_size(cp, params, param_count);
7818 if (expected_len != len) {
7819 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7820 expected_len, len);
7821 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7822 MGMT_STATUS_INVALID_PARAMS);
7823 }
7824
7825 bt_dev_dbg(hdev, "param_count %u", param_count);
7826
7827 hci_dev_lock(hdev);
7828
7829 if (param_count > 1)
7830 hci_conn_params_clear_disabled(hdev);
7831
7832 for (i = 0; i < param_count; i++) {
7833 struct mgmt_conn_param *param = &cp->params[i];
7834 struct hci_conn_params *hci_param;
7835 u16 min, max, latency, timeout;
7836 bool update = false;
7837 u8 addr_type;
7838
7839 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
7840 param->addr.type);
7841
7842 if (param->addr.type == BDADDR_LE_PUBLIC) {
7843 addr_type = ADDR_LE_DEV_PUBLIC;
7844 } else if (param->addr.type == BDADDR_LE_RANDOM) {
7845 addr_type = ADDR_LE_DEV_RANDOM;
7846 } else {
7847 bt_dev_err(hdev, "ignoring invalid connection parameters");
7848 continue;
7849 }
7850
7851 min = le16_to_cpu(param->min_interval);
7852 max = le16_to_cpu(param->max_interval);
7853 latency = le16_to_cpu(param->latency);
7854 timeout = le16_to_cpu(param->timeout);
7855
7856 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7857 min, max, latency, timeout);
7858
7859 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7860 bt_dev_err(hdev, "ignoring invalid connection parameters");
7861 continue;
7862 }
7863
7864 /* Detect when the loading is for an existing parameter then
7865 * attempt to trigger the connection update procedure.
7866 */
7867 if (!i && param_count == 1) {
7868 hci_param = hci_conn_params_lookup(hdev,
7869 ¶m->addr.bdaddr,
7870 addr_type);
7871 if (hci_param)
7872 update = true;
7873 else
7874 hci_conn_params_clear_disabled(hdev);
7875 }
7876
7877 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
7878 addr_type);
7879 if (!hci_param) {
7880 bt_dev_err(hdev, "failed to add connection parameters");
7881 continue;
7882 }
7883
7884 hci_param->conn_min_interval = min;
7885 hci_param->conn_max_interval = max;
7886 hci_param->conn_latency = latency;
7887 hci_param->supervision_timeout = timeout;
7888
7889 /* Check if we need to trigger a connection update */
7890 if (update) {
7891 struct hci_conn *conn;
7892
7893 /* Lookup for existing connection as central and check
7894 * if parameters match and if they don't then trigger
7895 * a connection update.
7896 */
7897 conn = hci_conn_hash_lookup_le(hdev, &hci_param->addr,
7898 addr_type);
7899 if (conn && conn->role == HCI_ROLE_MASTER &&
7900 (conn->le_conn_min_interval != min ||
7901 conn->le_conn_max_interval != max ||
7902 conn->le_conn_latency != latency ||
7903 conn->le_supv_timeout != timeout))
7904 hci_cmd_sync_queue(hdev, conn_update_sync,
7905 hci_param, NULL);
7906 }
7907 }
7908
7909 hci_dev_unlock(hdev);
7910
7911 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7912 NULL, 0);
7913 }
7914
set_external_config(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7915 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7916 void *data, u16 len)
7917 {
7918 struct mgmt_cp_set_external_config *cp = data;
7919 bool changed;
7920 int err;
7921
7922 bt_dev_dbg(hdev, "sock %p", sk);
7923
7924 if (hdev_is_powered(hdev))
7925 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7926 MGMT_STATUS_REJECTED);
7927
7928 if (cp->config != 0x00 && cp->config != 0x01)
7929 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7930 MGMT_STATUS_INVALID_PARAMS);
7931
7932 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7933 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7934 MGMT_STATUS_NOT_SUPPORTED);
7935
7936 hci_dev_lock(hdev);
7937
7938 if (cp->config)
7939 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7940 else
7941 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7942
7943 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7944 if (err < 0)
7945 goto unlock;
7946
7947 if (!changed)
7948 goto unlock;
7949
7950 err = new_options(hdev, sk);
7951
7952 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7953 mgmt_index_removed(hdev);
7954
7955 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7956 hci_dev_set_flag(hdev, HCI_CONFIG);
7957 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7958
7959 queue_work(hdev->req_workqueue, &hdev->power_on);
7960 } else {
7961 set_bit(HCI_RAW, &hdev->flags);
7962 mgmt_index_added(hdev);
7963 }
7964 }
7965
7966 unlock:
7967 hci_dev_unlock(hdev);
7968 return err;
7969 }
7970
set_public_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7971 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
7972 void *data, u16 len)
7973 {
7974 struct mgmt_cp_set_public_address *cp = data;
7975 bool changed;
7976 int err;
7977
7978 bt_dev_dbg(hdev, "sock %p", sk);
7979
7980 if (hdev_is_powered(hdev))
7981 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7982 MGMT_STATUS_REJECTED);
7983
7984 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
7985 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7986 MGMT_STATUS_INVALID_PARAMS);
7987
7988 if (!hdev->set_bdaddr)
7989 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7990 MGMT_STATUS_NOT_SUPPORTED);
7991
7992 hci_dev_lock(hdev);
7993
7994 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
7995 bacpy(&hdev->public_addr, &cp->bdaddr);
7996
7997 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
7998 if (err < 0)
7999 goto unlock;
8000
8001 if (!changed)
8002 goto unlock;
8003
8004 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
8005 err = new_options(hdev, sk);
8006
8007 if (is_configured(hdev)) {
8008 mgmt_index_removed(hdev);
8009
8010 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
8011
8012 hci_dev_set_flag(hdev, HCI_CONFIG);
8013 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8014
8015 queue_work(hdev->req_workqueue, &hdev->power_on);
8016 }
8017
8018 unlock:
8019 hci_dev_unlock(hdev);
8020 return err;
8021 }
8022
read_local_oob_ext_data_complete(struct hci_dev * hdev,void * data,int err)8023 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
8024 int err)
8025 {
8026 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8027 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8028 u8 *h192, *r192, *h256, *r256;
8029 struct mgmt_pending_cmd *cmd = data;
8030 struct sk_buff *skb = cmd->skb;
8031 u8 status = mgmt_status(err);
8032 u16 eir_len;
8033
8034 if (err == -ECANCELED ||
8035 cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
8036 return;
8037
8038 if (!status) {
8039 if (!skb)
8040 status = MGMT_STATUS_FAILED;
8041 else if (IS_ERR(skb))
8042 status = mgmt_status(PTR_ERR(skb));
8043 else
8044 status = mgmt_status(skb->data[0]);
8045 }
8046
8047 bt_dev_dbg(hdev, "status %u", status);
8048
8049 mgmt_cp = cmd->param;
8050
8051 if (status) {
8052 status = mgmt_status(status);
8053 eir_len = 0;
8054
8055 h192 = NULL;
8056 r192 = NULL;
8057 h256 = NULL;
8058 r256 = NULL;
8059 } else if (!bredr_sc_enabled(hdev)) {
8060 struct hci_rp_read_local_oob_data *rp;
8061
8062 if (skb->len != sizeof(*rp)) {
8063 status = MGMT_STATUS_FAILED;
8064 eir_len = 0;
8065 } else {
8066 status = MGMT_STATUS_SUCCESS;
8067 rp = (void *)skb->data;
8068
8069 eir_len = 5 + 18 + 18;
8070 h192 = rp->hash;
8071 r192 = rp->rand;
8072 h256 = NULL;
8073 r256 = NULL;
8074 }
8075 } else {
8076 struct hci_rp_read_local_oob_ext_data *rp;
8077
8078 if (skb->len != sizeof(*rp)) {
8079 status = MGMT_STATUS_FAILED;
8080 eir_len = 0;
8081 } else {
8082 status = MGMT_STATUS_SUCCESS;
8083 rp = (void *)skb->data;
8084
8085 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8086 eir_len = 5 + 18 + 18;
8087 h192 = NULL;
8088 r192 = NULL;
8089 } else {
8090 eir_len = 5 + 18 + 18 + 18 + 18;
8091 h192 = rp->hash192;
8092 r192 = rp->rand192;
8093 }
8094
8095 h256 = rp->hash256;
8096 r256 = rp->rand256;
8097 }
8098 }
8099
8100 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8101 if (!mgmt_rp)
8102 goto done;
8103
8104 if (eir_len == 0)
8105 goto send_rsp;
8106
8107 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8108 hdev->dev_class, 3);
8109
8110 if (h192 && r192) {
8111 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8112 EIR_SSP_HASH_C192, h192, 16);
8113 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8114 EIR_SSP_RAND_R192, r192, 16);
8115 }
8116
8117 if (h256 && r256) {
8118 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8119 EIR_SSP_HASH_C256, h256, 16);
8120 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8121 EIR_SSP_RAND_R256, r256, 16);
8122 }
8123
8124 send_rsp:
8125 mgmt_rp->type = mgmt_cp->type;
8126 mgmt_rp->eir_len = cpu_to_le16(eir_len);
8127
8128 err = mgmt_cmd_complete(cmd->sk, hdev->id,
8129 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8130 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8131 if (err < 0 || status)
8132 goto done;
8133
8134 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8135
8136 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8137 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8138 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8139 done:
8140 if (skb && !IS_ERR(skb))
8141 kfree_skb(skb);
8142
8143 kfree(mgmt_rp);
8144 mgmt_pending_remove(cmd);
8145 }
8146
read_local_ssp_oob_req(struct hci_dev * hdev,struct sock * sk,struct mgmt_cp_read_local_oob_ext_data * cp)8147 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8148 struct mgmt_cp_read_local_oob_ext_data *cp)
8149 {
8150 struct mgmt_pending_cmd *cmd;
8151 int err;
8152
8153 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8154 cp, sizeof(*cp));
8155 if (!cmd)
8156 return -ENOMEM;
8157
8158 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
8159 read_local_oob_ext_data_complete);
8160
8161 if (err < 0) {
8162 mgmt_pending_remove(cmd);
8163 return err;
8164 }
8165
8166 return 0;
8167 }
8168
read_local_oob_ext_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8169 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8170 void *data, u16 data_len)
8171 {
8172 struct mgmt_cp_read_local_oob_ext_data *cp = data;
8173 struct mgmt_rp_read_local_oob_ext_data *rp;
8174 size_t rp_len;
8175 u16 eir_len;
8176 u8 status, flags, role, addr[7], hash[16], rand[16];
8177 int err;
8178
8179 bt_dev_dbg(hdev, "sock %p", sk);
8180
8181 if (hdev_is_powered(hdev)) {
8182 switch (cp->type) {
8183 case BIT(BDADDR_BREDR):
8184 status = mgmt_bredr_support(hdev);
8185 if (status)
8186 eir_len = 0;
8187 else
8188 eir_len = 5;
8189 break;
8190 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8191 status = mgmt_le_support(hdev);
8192 if (status)
8193 eir_len = 0;
8194 else
8195 eir_len = 9 + 3 + 18 + 18 + 3;
8196 break;
8197 default:
8198 status = MGMT_STATUS_INVALID_PARAMS;
8199 eir_len = 0;
8200 break;
8201 }
8202 } else {
8203 status = MGMT_STATUS_NOT_POWERED;
8204 eir_len = 0;
8205 }
8206
8207 rp_len = sizeof(*rp) + eir_len;
8208 rp = kmalloc(rp_len, GFP_ATOMIC);
8209 if (!rp)
8210 return -ENOMEM;
8211
8212 if (!status && !lmp_ssp_capable(hdev)) {
8213 status = MGMT_STATUS_NOT_SUPPORTED;
8214 eir_len = 0;
8215 }
8216
8217 if (status)
8218 goto complete;
8219
8220 hci_dev_lock(hdev);
8221
8222 eir_len = 0;
8223 switch (cp->type) {
8224 case BIT(BDADDR_BREDR):
8225 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8226 err = read_local_ssp_oob_req(hdev, sk, cp);
8227 hci_dev_unlock(hdev);
8228 if (!err)
8229 goto done;
8230
8231 status = MGMT_STATUS_FAILED;
8232 goto complete;
8233 } else {
8234 eir_len = eir_append_data(rp->eir, eir_len,
8235 EIR_CLASS_OF_DEV,
8236 hdev->dev_class, 3);
8237 }
8238 break;
8239 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8240 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8241 smp_generate_oob(hdev, hash, rand) < 0) {
8242 hci_dev_unlock(hdev);
8243 status = MGMT_STATUS_FAILED;
8244 goto complete;
8245 }
8246
8247 /* This should return the active RPA, but since the RPA
8248 * is only programmed on demand, it is really hard to fill
8249 * this in at the moment. For now disallow retrieving
8250 * local out-of-band data when privacy is in use.
8251 *
8252 * Returning the identity address will not help here since
8253 * pairing happens before the identity resolving key is
8254 * known and thus the connection establishment happens
8255 * based on the RPA and not the identity address.
8256 */
8257 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8258 hci_dev_unlock(hdev);
8259 status = MGMT_STATUS_REJECTED;
8260 goto complete;
8261 }
8262
8263 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8264 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8265 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8266 bacmp(&hdev->static_addr, BDADDR_ANY))) {
8267 memcpy(addr, &hdev->static_addr, 6);
8268 addr[6] = 0x01;
8269 } else {
8270 memcpy(addr, &hdev->bdaddr, 6);
8271 addr[6] = 0x00;
8272 }
8273
8274 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8275 addr, sizeof(addr));
8276
8277 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8278 role = 0x02;
8279 else
8280 role = 0x01;
8281
8282 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8283 &role, sizeof(role));
8284
8285 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8286 eir_len = eir_append_data(rp->eir, eir_len,
8287 EIR_LE_SC_CONFIRM,
8288 hash, sizeof(hash));
8289
8290 eir_len = eir_append_data(rp->eir, eir_len,
8291 EIR_LE_SC_RANDOM,
8292 rand, sizeof(rand));
8293 }
8294
8295 flags = mgmt_get_adv_discov_flags(hdev);
8296
8297 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8298 flags |= LE_AD_NO_BREDR;
8299
8300 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8301 &flags, sizeof(flags));
8302 break;
8303 }
8304
8305 hci_dev_unlock(hdev);
8306
8307 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8308
8309 status = MGMT_STATUS_SUCCESS;
8310
8311 complete:
8312 rp->type = cp->type;
8313 rp->eir_len = cpu_to_le16(eir_len);
8314
8315 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8316 status, rp, sizeof(*rp) + eir_len);
8317 if (err < 0 || status)
8318 goto done;
8319
8320 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8321 rp, sizeof(*rp) + eir_len,
8322 HCI_MGMT_OOB_DATA_EVENTS, sk);
8323
8324 done:
8325 kfree(rp);
8326
8327 return err;
8328 }
8329
get_supported_adv_flags(struct hci_dev * hdev)8330 static u32 get_supported_adv_flags(struct hci_dev *hdev)
8331 {
8332 u32 flags = 0;
8333
8334 flags |= MGMT_ADV_FLAG_CONNECTABLE;
8335 flags |= MGMT_ADV_FLAG_DISCOV;
8336 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8337 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8338 flags |= MGMT_ADV_FLAG_APPEARANCE;
8339 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8340 flags |= MGMT_ADV_PARAM_DURATION;
8341 flags |= MGMT_ADV_PARAM_TIMEOUT;
8342 flags |= MGMT_ADV_PARAM_INTERVALS;
8343 flags |= MGMT_ADV_PARAM_TX_POWER;
8344 flags |= MGMT_ADV_PARAM_SCAN_RSP;
8345
8346 /* In extended adv TX_POWER returned from Set Adv Param
8347 * will be always valid.
8348 */
8349 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
8350 flags |= MGMT_ADV_FLAG_TX_POWER;
8351
8352 if (ext_adv_capable(hdev)) {
8353 flags |= MGMT_ADV_FLAG_SEC_1M;
8354 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8355 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8356
8357 if (le_2m_capable(hdev))
8358 flags |= MGMT_ADV_FLAG_SEC_2M;
8359
8360 if (le_coded_capable(hdev))
8361 flags |= MGMT_ADV_FLAG_SEC_CODED;
8362 }
8363
8364 return flags;
8365 }
8366
read_adv_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8367 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8368 void *data, u16 data_len)
8369 {
8370 struct mgmt_rp_read_adv_features *rp;
8371 size_t rp_len;
8372 int err;
8373 struct adv_info *adv_instance;
8374 u32 supported_flags;
8375 u8 *instance;
8376
8377 bt_dev_dbg(hdev, "sock %p", sk);
8378
8379 if (!lmp_le_capable(hdev))
8380 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8381 MGMT_STATUS_REJECTED);
8382
8383 hci_dev_lock(hdev);
8384
8385 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8386 rp = kmalloc(rp_len, GFP_ATOMIC);
8387 if (!rp) {
8388 hci_dev_unlock(hdev);
8389 return -ENOMEM;
8390 }
8391
8392 supported_flags = get_supported_adv_flags(hdev);
8393
8394 rp->supported_flags = cpu_to_le32(supported_flags);
8395 rp->max_adv_data_len = max_adv_len(hdev);
8396 rp->max_scan_rsp_len = max_adv_len(hdev);
8397 rp->max_instances = hdev->le_num_of_adv_sets;
8398 rp->num_instances = hdev->adv_instance_cnt;
8399
8400 instance = rp->instance;
8401 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8402 /* Only instances 1-le_num_of_adv_sets are externally visible */
8403 if (adv_instance->instance <= hdev->adv_instance_cnt) {
8404 *instance = adv_instance->instance;
8405 instance++;
8406 } else {
8407 rp->num_instances--;
8408 rp_len--;
8409 }
8410 }
8411
8412 hci_dev_unlock(hdev);
8413
8414 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8415 MGMT_STATUS_SUCCESS, rp, rp_len);
8416
8417 kfree(rp);
8418
8419 return err;
8420 }
8421
calculate_name_len(struct hci_dev * hdev)8422 static u8 calculate_name_len(struct hci_dev *hdev)
8423 {
8424 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 2]; /* len + type + name */
8425
8426 return eir_append_local_name(hdev, buf, 0);
8427 }
8428
tlv_data_max_len(struct hci_dev * hdev,u32 adv_flags,bool is_adv_data)8429 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8430 bool is_adv_data)
8431 {
8432 u8 max_len = max_adv_len(hdev);
8433
8434 if (is_adv_data) {
8435 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8436 MGMT_ADV_FLAG_LIMITED_DISCOV |
8437 MGMT_ADV_FLAG_MANAGED_FLAGS))
8438 max_len -= 3;
8439
8440 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8441 max_len -= 3;
8442 } else {
8443 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8444 max_len -= calculate_name_len(hdev);
8445
8446 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8447 max_len -= 4;
8448 }
8449
8450 return max_len;
8451 }
8452
flags_managed(u32 adv_flags)8453 static bool flags_managed(u32 adv_flags)
8454 {
8455 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8456 MGMT_ADV_FLAG_LIMITED_DISCOV |
8457 MGMT_ADV_FLAG_MANAGED_FLAGS);
8458 }
8459
tx_power_managed(u32 adv_flags)8460 static bool tx_power_managed(u32 adv_flags)
8461 {
8462 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8463 }
8464
name_managed(u32 adv_flags)8465 static bool name_managed(u32 adv_flags)
8466 {
8467 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8468 }
8469
appearance_managed(u32 adv_flags)8470 static bool appearance_managed(u32 adv_flags)
8471 {
8472 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8473 }
8474
tlv_data_is_valid(struct hci_dev * hdev,u32 adv_flags,u8 * data,u8 len,bool is_adv_data)8475 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8476 u8 len, bool is_adv_data)
8477 {
8478 int i, cur_len;
8479 u8 max_len;
8480
8481 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8482
8483 if (len > max_len)
8484 return false;
8485
8486 /* Make sure that the data is correctly formatted. */
8487 for (i = 0; i < len; i += (cur_len + 1)) {
8488 cur_len = data[i];
8489
8490 if (!cur_len)
8491 continue;
8492
8493 if (data[i + 1] == EIR_FLAGS &&
8494 (!is_adv_data || flags_managed(adv_flags)))
8495 return false;
8496
8497 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8498 return false;
8499
8500 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8501 return false;
8502
8503 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8504 return false;
8505
8506 if (data[i + 1] == EIR_APPEARANCE &&
8507 appearance_managed(adv_flags))
8508 return false;
8509
8510 /* If the current field length would exceed the total data
8511 * length, then it's invalid.
8512 */
8513 if (i + cur_len >= len)
8514 return false;
8515 }
8516
8517 return true;
8518 }
8519
requested_adv_flags_are_valid(struct hci_dev * hdev,u32 adv_flags)8520 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8521 {
8522 u32 supported_flags, phy_flags;
8523
8524 /* The current implementation only supports a subset of the specified
8525 * flags. Also need to check mutual exclusiveness of sec flags.
8526 */
8527 supported_flags = get_supported_adv_flags(hdev);
8528 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8529 if (adv_flags & ~supported_flags ||
8530 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8531 return false;
8532
8533 return true;
8534 }
8535
adv_busy(struct hci_dev * hdev)8536 static bool adv_busy(struct hci_dev *hdev)
8537 {
8538 return pending_find(MGMT_OP_SET_LE, hdev);
8539 }
8540
add_adv_complete(struct hci_dev * hdev,struct sock * sk,u8 instance,int err)8541 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8542 int err)
8543 {
8544 struct adv_info *adv, *n;
8545
8546 bt_dev_dbg(hdev, "err %d", err);
8547
8548 hci_dev_lock(hdev);
8549
8550 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8551 u8 instance;
8552
8553 if (!adv->pending)
8554 continue;
8555
8556 if (!err) {
8557 adv->pending = false;
8558 continue;
8559 }
8560
8561 instance = adv->instance;
8562
8563 if (hdev->cur_adv_instance == instance)
8564 cancel_adv_timeout(hdev);
8565
8566 hci_remove_adv_instance(hdev, instance);
8567 mgmt_advertising_removed(sk, hdev, instance);
8568 }
8569
8570 hci_dev_unlock(hdev);
8571 }
8572
add_advertising_complete(struct hci_dev * hdev,void * data,int err)8573 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8574 {
8575 struct mgmt_pending_cmd *cmd = data;
8576 struct mgmt_cp_add_advertising *cp = cmd->param;
8577 struct mgmt_rp_add_advertising rp;
8578
8579 memset(&rp, 0, sizeof(rp));
8580
8581 rp.instance = cp->instance;
8582
8583 if (err)
8584 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8585 mgmt_status(err));
8586 else
8587 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8588 mgmt_status(err), &rp, sizeof(rp));
8589
8590 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8591
8592 mgmt_pending_free(cmd);
8593 }
8594
add_advertising_sync(struct hci_dev * hdev,void * data)8595 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8596 {
8597 struct mgmt_pending_cmd *cmd = data;
8598 struct mgmt_cp_add_advertising *cp = cmd->param;
8599
8600 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8601 }
8602
add_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8603 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8604 void *data, u16 data_len)
8605 {
8606 struct mgmt_cp_add_advertising *cp = data;
8607 struct mgmt_rp_add_advertising rp;
8608 u32 flags;
8609 u8 status;
8610 u16 timeout, duration;
8611 unsigned int prev_instance_cnt;
8612 u8 schedule_instance = 0;
8613 struct adv_info *adv, *next_instance;
8614 int err;
8615 struct mgmt_pending_cmd *cmd;
8616
8617 bt_dev_dbg(hdev, "sock %p", sk);
8618
8619 status = mgmt_le_support(hdev);
8620 if (status)
8621 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8622 status);
8623
8624 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8625 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8626 MGMT_STATUS_INVALID_PARAMS);
8627
8628 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8629 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8630 MGMT_STATUS_INVALID_PARAMS);
8631
8632 flags = __le32_to_cpu(cp->flags);
8633 timeout = __le16_to_cpu(cp->timeout);
8634 duration = __le16_to_cpu(cp->duration);
8635
8636 if (!requested_adv_flags_are_valid(hdev, flags))
8637 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8638 MGMT_STATUS_INVALID_PARAMS);
8639
8640 hci_dev_lock(hdev);
8641
8642 if (timeout && !hdev_is_powered(hdev)) {
8643 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8644 MGMT_STATUS_REJECTED);
8645 goto unlock;
8646 }
8647
8648 if (adv_busy(hdev)) {
8649 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8650 MGMT_STATUS_BUSY);
8651 goto unlock;
8652 }
8653
8654 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8655 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8656 cp->scan_rsp_len, false)) {
8657 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8658 MGMT_STATUS_INVALID_PARAMS);
8659 goto unlock;
8660 }
8661
8662 prev_instance_cnt = hdev->adv_instance_cnt;
8663
8664 adv = hci_add_adv_instance(hdev, cp->instance, flags,
8665 cp->adv_data_len, cp->data,
8666 cp->scan_rsp_len,
8667 cp->data + cp->adv_data_len,
8668 timeout, duration,
8669 HCI_ADV_TX_POWER_NO_PREFERENCE,
8670 hdev->le_adv_min_interval,
8671 hdev->le_adv_max_interval, 0);
8672 if (IS_ERR(adv)) {
8673 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8674 MGMT_STATUS_FAILED);
8675 goto unlock;
8676 }
8677
8678 /* Only trigger an advertising added event if a new instance was
8679 * actually added.
8680 */
8681 if (hdev->adv_instance_cnt > prev_instance_cnt)
8682 mgmt_advertising_added(sk, hdev, cp->instance);
8683
8684 if (hdev->cur_adv_instance == cp->instance) {
8685 /* If the currently advertised instance is being changed then
8686 * cancel the current advertising and schedule the next
8687 * instance. If there is only one instance then the overridden
8688 * advertising data will be visible right away.
8689 */
8690 cancel_adv_timeout(hdev);
8691
8692 next_instance = hci_get_next_instance(hdev, cp->instance);
8693 if (next_instance)
8694 schedule_instance = next_instance->instance;
8695 } else if (!hdev->adv_instance_timeout) {
8696 /* Immediately advertise the new instance if no other
8697 * instance is currently being advertised.
8698 */
8699 schedule_instance = cp->instance;
8700 }
8701
8702 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
8703 * there is no instance to be advertised then we have no HCI
8704 * communication to make. Simply return.
8705 */
8706 if (!hdev_is_powered(hdev) ||
8707 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8708 !schedule_instance) {
8709 rp.instance = cp->instance;
8710 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8711 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8712 goto unlock;
8713 }
8714
8715 /* We're good to go, update advertising data, parameters, and start
8716 * advertising.
8717 */
8718 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8719 data_len);
8720 if (!cmd) {
8721 err = -ENOMEM;
8722 goto unlock;
8723 }
8724
8725 cp->instance = schedule_instance;
8726
8727 err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8728 add_advertising_complete);
8729 if (err < 0)
8730 mgmt_pending_free(cmd);
8731
8732 unlock:
8733 hci_dev_unlock(hdev);
8734
8735 return err;
8736 }
8737
add_ext_adv_params_complete(struct hci_dev * hdev,void * data,int err)8738 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8739 int err)
8740 {
8741 struct mgmt_pending_cmd *cmd = data;
8742 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8743 struct mgmt_rp_add_ext_adv_params rp;
8744 struct adv_info *adv;
8745 u32 flags;
8746
8747 BT_DBG("%s", hdev->name);
8748
8749 hci_dev_lock(hdev);
8750
8751 adv = hci_find_adv_instance(hdev, cp->instance);
8752 if (!adv)
8753 goto unlock;
8754
8755 rp.instance = cp->instance;
8756 rp.tx_power = adv->tx_power;
8757
8758 /* While we're at it, inform userspace of the available space for this
8759 * advertisement, given the flags that will be used.
8760 */
8761 flags = __le32_to_cpu(cp->flags);
8762 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8763 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8764
8765 if (err) {
8766 /* If this advertisement was previously advertising and we
8767 * failed to update it, we signal that it has been removed and
8768 * delete its structure
8769 */
8770 if (!adv->pending)
8771 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8772
8773 hci_remove_adv_instance(hdev, cp->instance);
8774
8775 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8776 mgmt_status(err));
8777 } else {
8778 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8779 mgmt_status(err), &rp, sizeof(rp));
8780 }
8781
8782 unlock:
8783 mgmt_pending_free(cmd);
8784
8785 hci_dev_unlock(hdev);
8786 }
8787
add_ext_adv_params_sync(struct hci_dev * hdev,void * data)8788 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8789 {
8790 struct mgmt_pending_cmd *cmd = data;
8791 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8792
8793 return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8794 }
8795
add_ext_adv_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8796 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8797 void *data, u16 data_len)
8798 {
8799 struct mgmt_cp_add_ext_adv_params *cp = data;
8800 struct mgmt_rp_add_ext_adv_params rp;
8801 struct mgmt_pending_cmd *cmd = NULL;
8802 struct adv_info *adv;
8803 u32 flags, min_interval, max_interval;
8804 u16 timeout, duration;
8805 u8 status;
8806 s8 tx_power;
8807 int err;
8808
8809 BT_DBG("%s", hdev->name);
8810
8811 status = mgmt_le_support(hdev);
8812 if (status)
8813 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8814 status);
8815
8816 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8817 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8818 MGMT_STATUS_INVALID_PARAMS);
8819
8820 /* The purpose of breaking add_advertising into two separate MGMT calls
8821 * for params and data is to allow more parameters to be added to this
8822 * structure in the future. For this reason, we verify that we have the
8823 * bare minimum structure we know of when the interface was defined. Any
8824 * extra parameters we don't know about will be ignored in this request.
8825 */
8826 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8827 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8828 MGMT_STATUS_INVALID_PARAMS);
8829
8830 flags = __le32_to_cpu(cp->flags);
8831
8832 if (!requested_adv_flags_are_valid(hdev, flags))
8833 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8834 MGMT_STATUS_INVALID_PARAMS);
8835
8836 hci_dev_lock(hdev);
8837
8838 /* In new interface, we require that we are powered to register */
8839 if (!hdev_is_powered(hdev)) {
8840 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8841 MGMT_STATUS_REJECTED);
8842 goto unlock;
8843 }
8844
8845 if (adv_busy(hdev)) {
8846 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8847 MGMT_STATUS_BUSY);
8848 goto unlock;
8849 }
8850
8851 /* Parse defined parameters from request, use defaults otherwise */
8852 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8853 __le16_to_cpu(cp->timeout) : 0;
8854
8855 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8856 __le16_to_cpu(cp->duration) :
8857 hdev->def_multi_adv_rotation_duration;
8858
8859 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8860 __le32_to_cpu(cp->min_interval) :
8861 hdev->le_adv_min_interval;
8862
8863 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8864 __le32_to_cpu(cp->max_interval) :
8865 hdev->le_adv_max_interval;
8866
8867 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8868 cp->tx_power :
8869 HCI_ADV_TX_POWER_NO_PREFERENCE;
8870
8871 /* Create advertising instance with no advertising or response data */
8872 adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
8873 timeout, duration, tx_power, min_interval,
8874 max_interval, 0);
8875
8876 if (IS_ERR(adv)) {
8877 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8878 MGMT_STATUS_FAILED);
8879 goto unlock;
8880 }
8881
8882 /* Submit request for advertising params if ext adv available */
8883 if (ext_adv_capable(hdev)) {
8884 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8885 data, data_len);
8886 if (!cmd) {
8887 err = -ENOMEM;
8888 hci_remove_adv_instance(hdev, cp->instance);
8889 goto unlock;
8890 }
8891
8892 err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8893 add_ext_adv_params_complete);
8894 if (err < 0)
8895 mgmt_pending_free(cmd);
8896 } else {
8897 rp.instance = cp->instance;
8898 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8899 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8900 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8901 err = mgmt_cmd_complete(sk, hdev->id,
8902 MGMT_OP_ADD_EXT_ADV_PARAMS,
8903 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8904 }
8905
8906 unlock:
8907 hci_dev_unlock(hdev);
8908
8909 return err;
8910 }
8911
add_ext_adv_data_complete(struct hci_dev * hdev,void * data,int err)8912 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8913 {
8914 struct mgmt_pending_cmd *cmd = data;
8915 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8916 struct mgmt_rp_add_advertising rp;
8917
8918 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8919
8920 memset(&rp, 0, sizeof(rp));
8921
8922 rp.instance = cp->instance;
8923
8924 if (err)
8925 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8926 mgmt_status(err));
8927 else
8928 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8929 mgmt_status(err), &rp, sizeof(rp));
8930
8931 mgmt_pending_free(cmd);
8932 }
8933
add_ext_adv_data_sync(struct hci_dev * hdev,void * data)8934 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8935 {
8936 struct mgmt_pending_cmd *cmd = data;
8937 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8938 int err;
8939
8940 if (ext_adv_capable(hdev)) {
8941 err = hci_update_adv_data_sync(hdev, cp->instance);
8942 if (err)
8943 return err;
8944
8945 err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
8946 if (err)
8947 return err;
8948
8949 return hci_enable_ext_advertising_sync(hdev, cp->instance);
8950 }
8951
8952 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8953 }
8954
add_ext_adv_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8955 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8956 u16 data_len)
8957 {
8958 struct mgmt_cp_add_ext_adv_data *cp = data;
8959 struct mgmt_rp_add_ext_adv_data rp;
8960 u8 schedule_instance = 0;
8961 struct adv_info *next_instance;
8962 struct adv_info *adv_instance;
8963 int err = 0;
8964 struct mgmt_pending_cmd *cmd;
8965
8966 BT_DBG("%s", hdev->name);
8967
8968 hci_dev_lock(hdev);
8969
8970 adv_instance = hci_find_adv_instance(hdev, cp->instance);
8971
8972 if (!adv_instance) {
8973 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8974 MGMT_STATUS_INVALID_PARAMS);
8975 goto unlock;
8976 }
8977
8978 /* In new interface, we require that we are powered to register */
8979 if (!hdev_is_powered(hdev)) {
8980 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8981 MGMT_STATUS_REJECTED);
8982 goto clear_new_instance;
8983 }
8984
8985 if (adv_busy(hdev)) {
8986 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8987 MGMT_STATUS_BUSY);
8988 goto clear_new_instance;
8989 }
8990
8991 /* Validate new data */
8992 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
8993 cp->adv_data_len, true) ||
8994 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
8995 cp->adv_data_len, cp->scan_rsp_len, false)) {
8996 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8997 MGMT_STATUS_INVALID_PARAMS);
8998 goto clear_new_instance;
8999 }
9000
9001 /* Set the data in the advertising instance */
9002 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
9003 cp->data, cp->scan_rsp_len,
9004 cp->data + cp->adv_data_len);
9005
9006 /* If using software rotation, determine next instance to use */
9007 if (hdev->cur_adv_instance == cp->instance) {
9008 /* If the currently advertised instance is being changed
9009 * then cancel the current advertising and schedule the
9010 * next instance. If there is only one instance then the
9011 * overridden advertising data will be visible right
9012 * away
9013 */
9014 cancel_adv_timeout(hdev);
9015
9016 next_instance = hci_get_next_instance(hdev, cp->instance);
9017 if (next_instance)
9018 schedule_instance = next_instance->instance;
9019 } else if (!hdev->adv_instance_timeout) {
9020 /* Immediately advertise the new instance if no other
9021 * instance is currently being advertised.
9022 */
9023 schedule_instance = cp->instance;
9024 }
9025
9026 /* If the HCI_ADVERTISING flag is set or there is no instance to
9027 * be advertised then we have no HCI communication to make.
9028 * Simply return.
9029 */
9030 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
9031 if (adv_instance->pending) {
9032 mgmt_advertising_added(sk, hdev, cp->instance);
9033 adv_instance->pending = false;
9034 }
9035 rp.instance = cp->instance;
9036 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9037 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9038 goto unlock;
9039 }
9040
9041 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9042 data_len);
9043 if (!cmd) {
9044 err = -ENOMEM;
9045 goto clear_new_instance;
9046 }
9047
9048 err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
9049 add_ext_adv_data_complete);
9050 if (err < 0) {
9051 mgmt_pending_free(cmd);
9052 goto clear_new_instance;
9053 }
9054
9055 /* We were successful in updating data, so trigger advertising_added
9056 * event if this is an instance that wasn't previously advertising. If
9057 * a failure occurs in the requests we initiated, we will remove the
9058 * instance again in add_advertising_complete
9059 */
9060 if (adv_instance->pending)
9061 mgmt_advertising_added(sk, hdev, cp->instance);
9062
9063 goto unlock;
9064
9065 clear_new_instance:
9066 hci_remove_adv_instance(hdev, cp->instance);
9067
9068 unlock:
9069 hci_dev_unlock(hdev);
9070
9071 return err;
9072 }
9073
remove_advertising_complete(struct hci_dev * hdev,void * data,int err)9074 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
9075 int err)
9076 {
9077 struct mgmt_pending_cmd *cmd = data;
9078 struct mgmt_cp_remove_advertising *cp = cmd->param;
9079 struct mgmt_rp_remove_advertising rp;
9080
9081 bt_dev_dbg(hdev, "err %d", err);
9082
9083 memset(&rp, 0, sizeof(rp));
9084 rp.instance = cp->instance;
9085
9086 if (err)
9087 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9088 mgmt_status(err));
9089 else
9090 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9091 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9092
9093 mgmt_pending_free(cmd);
9094 }
9095
remove_advertising_sync(struct hci_dev * hdev,void * data)9096 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
9097 {
9098 struct mgmt_pending_cmd *cmd = data;
9099 struct mgmt_cp_remove_advertising *cp = cmd->param;
9100 int err;
9101
9102 err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
9103 if (err)
9104 return err;
9105
9106 if (list_empty(&hdev->adv_instances))
9107 err = hci_disable_advertising_sync(hdev);
9108
9109 return err;
9110 }
9111
remove_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9112 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9113 void *data, u16 data_len)
9114 {
9115 struct mgmt_cp_remove_advertising *cp = data;
9116 struct mgmt_pending_cmd *cmd;
9117 int err;
9118
9119 bt_dev_dbg(hdev, "sock %p", sk);
9120
9121 hci_dev_lock(hdev);
9122
9123 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
9124 err = mgmt_cmd_status(sk, hdev->id,
9125 MGMT_OP_REMOVE_ADVERTISING,
9126 MGMT_STATUS_INVALID_PARAMS);
9127 goto unlock;
9128 }
9129
9130 if (pending_find(MGMT_OP_SET_LE, hdev)) {
9131 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9132 MGMT_STATUS_BUSY);
9133 goto unlock;
9134 }
9135
9136 if (list_empty(&hdev->adv_instances)) {
9137 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9138 MGMT_STATUS_INVALID_PARAMS);
9139 goto unlock;
9140 }
9141
9142 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9143 data_len);
9144 if (!cmd) {
9145 err = -ENOMEM;
9146 goto unlock;
9147 }
9148
9149 err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
9150 remove_advertising_complete);
9151 if (err < 0)
9152 mgmt_pending_free(cmd);
9153
9154 unlock:
9155 hci_dev_unlock(hdev);
9156
9157 return err;
9158 }
9159
get_adv_size_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9160 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9161 void *data, u16 data_len)
9162 {
9163 struct mgmt_cp_get_adv_size_info *cp = data;
9164 struct mgmt_rp_get_adv_size_info rp;
9165 u32 flags, supported_flags;
9166
9167 bt_dev_dbg(hdev, "sock %p", sk);
9168
9169 if (!lmp_le_capable(hdev))
9170 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9171 MGMT_STATUS_REJECTED);
9172
9173 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9174 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9175 MGMT_STATUS_INVALID_PARAMS);
9176
9177 flags = __le32_to_cpu(cp->flags);
9178
9179 /* The current implementation only supports a subset of the specified
9180 * flags.
9181 */
9182 supported_flags = get_supported_adv_flags(hdev);
9183 if (flags & ~supported_flags)
9184 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9185 MGMT_STATUS_INVALID_PARAMS);
9186
9187 rp.instance = cp->instance;
9188 rp.flags = cp->flags;
9189 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9190 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9191
9192 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9193 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9194 }
9195
9196 static const struct hci_mgmt_handler mgmt_handlers[] = {
9197 { NULL }, /* 0x0000 (no command) */
9198 { read_version, MGMT_READ_VERSION_SIZE,
9199 HCI_MGMT_NO_HDEV |
9200 HCI_MGMT_UNTRUSTED },
9201 { read_commands, MGMT_READ_COMMANDS_SIZE,
9202 HCI_MGMT_NO_HDEV |
9203 HCI_MGMT_UNTRUSTED },
9204 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
9205 HCI_MGMT_NO_HDEV |
9206 HCI_MGMT_UNTRUSTED },
9207 { read_controller_info, MGMT_READ_INFO_SIZE,
9208 HCI_MGMT_UNTRUSTED },
9209 { set_powered, MGMT_SETTING_SIZE },
9210 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
9211 { set_connectable, MGMT_SETTING_SIZE },
9212 { set_fast_connectable, MGMT_SETTING_SIZE },
9213 { set_bondable, MGMT_SETTING_SIZE },
9214 { set_link_security, MGMT_SETTING_SIZE },
9215 { set_ssp, MGMT_SETTING_SIZE },
9216 { set_hs, MGMT_SETTING_SIZE },
9217 { set_le, MGMT_SETTING_SIZE },
9218 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
9219 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
9220 { add_uuid, MGMT_ADD_UUID_SIZE },
9221 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
9222 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
9223 HCI_MGMT_VAR_LEN },
9224 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9225 HCI_MGMT_VAR_LEN },
9226 { disconnect, MGMT_DISCONNECT_SIZE },
9227 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
9228 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
9229 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
9230 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
9231 { pair_device, MGMT_PAIR_DEVICE_SIZE },
9232 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
9233 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
9234 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
9235 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9236 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
9237 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9238 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
9239 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9240 HCI_MGMT_VAR_LEN },
9241 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9242 { start_discovery, MGMT_START_DISCOVERY_SIZE },
9243 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
9244 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
9245 { block_device, MGMT_BLOCK_DEVICE_SIZE },
9246 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
9247 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
9248 { set_advertising, MGMT_SETTING_SIZE },
9249 { set_bredr, MGMT_SETTING_SIZE },
9250 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
9251 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
9252 { set_secure_conn, MGMT_SETTING_SIZE },
9253 { set_debug_keys, MGMT_SETTING_SIZE },
9254 { set_privacy, MGMT_SET_PRIVACY_SIZE },
9255 { load_irks, MGMT_LOAD_IRKS_SIZE,
9256 HCI_MGMT_VAR_LEN },
9257 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
9258 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
9259 { add_device, MGMT_ADD_DEVICE_SIZE },
9260 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
9261 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
9262 HCI_MGMT_VAR_LEN },
9263 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9264 HCI_MGMT_NO_HDEV |
9265 HCI_MGMT_UNTRUSTED },
9266 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
9267 HCI_MGMT_UNCONFIGURED |
9268 HCI_MGMT_UNTRUSTED },
9269 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
9270 HCI_MGMT_UNCONFIGURED },
9271 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
9272 HCI_MGMT_UNCONFIGURED },
9273 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9274 HCI_MGMT_VAR_LEN },
9275 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9276 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
9277 HCI_MGMT_NO_HDEV |
9278 HCI_MGMT_UNTRUSTED },
9279 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
9280 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
9281 HCI_MGMT_VAR_LEN },
9282 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
9283 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
9284 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9285 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9286 HCI_MGMT_UNTRUSTED },
9287 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
9288 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
9289 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
9290 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9291 HCI_MGMT_VAR_LEN },
9292 { set_wideband_speech, MGMT_SETTING_SIZE },
9293 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
9294 HCI_MGMT_UNTRUSTED },
9295 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
9296 HCI_MGMT_UNTRUSTED |
9297 HCI_MGMT_HDEV_OPTIONAL },
9298 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
9299 HCI_MGMT_VAR_LEN |
9300 HCI_MGMT_HDEV_OPTIONAL },
9301 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9302 HCI_MGMT_UNTRUSTED },
9303 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9304 HCI_MGMT_VAR_LEN },
9305 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9306 HCI_MGMT_UNTRUSTED },
9307 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9308 HCI_MGMT_VAR_LEN },
9309 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
9310 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
9311 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9312 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9313 HCI_MGMT_VAR_LEN },
9314 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
9315 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
9316 HCI_MGMT_VAR_LEN },
9317 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
9318 HCI_MGMT_VAR_LEN },
9319 { add_adv_patterns_monitor_rssi,
9320 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
9321 HCI_MGMT_VAR_LEN },
9322 { set_mesh, MGMT_SET_MESH_RECEIVER_SIZE,
9323 HCI_MGMT_VAR_LEN },
9324 { mesh_features, MGMT_MESH_READ_FEATURES_SIZE },
9325 { mesh_send, MGMT_MESH_SEND_SIZE,
9326 HCI_MGMT_VAR_LEN },
9327 { mesh_send_cancel, MGMT_MESH_SEND_CANCEL_SIZE },
9328 { mgmt_hci_cmd_sync, MGMT_HCI_CMD_SYNC_SIZE, HCI_MGMT_VAR_LEN },
9329 };
9330
mgmt_index_added(struct hci_dev * hdev)9331 void mgmt_index_added(struct hci_dev *hdev)
9332 {
9333 struct mgmt_ev_ext_index ev;
9334
9335 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9336 return;
9337
9338 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9339 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0,
9340 HCI_MGMT_UNCONF_INDEX_EVENTS);
9341 ev.type = 0x01;
9342 } else {
9343 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
9344 HCI_MGMT_INDEX_EVENTS);
9345 ev.type = 0x00;
9346 }
9347
9348 ev.bus = hdev->bus;
9349
9350 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
9351 HCI_MGMT_EXT_INDEX_EVENTS);
9352 }
9353
mgmt_index_removed(struct hci_dev * hdev)9354 void mgmt_index_removed(struct hci_dev *hdev)
9355 {
9356 struct mgmt_ev_ext_index ev;
9357 struct cmd_lookup match = { NULL, hdev, MGMT_STATUS_INVALID_INDEX };
9358
9359 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9360 return;
9361
9362 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &match);
9363
9364 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9365 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0,
9366 HCI_MGMT_UNCONF_INDEX_EVENTS);
9367 ev.type = 0x01;
9368 } else {
9369 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
9370 HCI_MGMT_INDEX_EVENTS);
9371 ev.type = 0x00;
9372 }
9373
9374 ev.bus = hdev->bus;
9375
9376 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
9377 HCI_MGMT_EXT_INDEX_EVENTS);
9378
9379 /* Cancel any remaining timed work */
9380 if (!hci_dev_test_flag(hdev, HCI_MGMT))
9381 return;
9382 cancel_delayed_work_sync(&hdev->discov_off);
9383 cancel_delayed_work_sync(&hdev->service_cache);
9384 cancel_delayed_work_sync(&hdev->rpa_expired);
9385 }
9386
mgmt_power_on(struct hci_dev * hdev,int err)9387 void mgmt_power_on(struct hci_dev *hdev, int err)
9388 {
9389 struct cmd_lookup match = { NULL, hdev };
9390
9391 bt_dev_dbg(hdev, "err %d", err);
9392
9393 hci_dev_lock(hdev);
9394
9395 if (!err) {
9396 restart_le_actions(hdev);
9397 hci_update_passive_scan(hdev);
9398 }
9399
9400 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9401
9402 new_settings(hdev, match.sk);
9403
9404 if (match.sk)
9405 sock_put(match.sk);
9406
9407 hci_dev_unlock(hdev);
9408 }
9409
__mgmt_power_off(struct hci_dev * hdev)9410 void __mgmt_power_off(struct hci_dev *hdev)
9411 {
9412 struct cmd_lookup match = { NULL, hdev };
9413 u8 zero_cod[] = { 0, 0, 0 };
9414
9415 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9416
9417 /* If the power off is because of hdev unregistration let
9418 * use the appropriate INVALID_INDEX status. Otherwise use
9419 * NOT_POWERED. We cover both scenarios here since later in
9420 * mgmt_index_removed() any hci_conn callbacks will have already
9421 * been triggered, potentially causing misleading DISCONNECTED
9422 * status responses.
9423 */
9424 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9425 match.mgmt_status = MGMT_STATUS_INVALID_INDEX;
9426 else
9427 match.mgmt_status = MGMT_STATUS_NOT_POWERED;
9428
9429 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &match);
9430
9431 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9432 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9433 zero_cod, sizeof(zero_cod),
9434 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9435 ext_info_changed(hdev, NULL);
9436 }
9437
9438 new_settings(hdev, match.sk);
9439
9440 if (match.sk)
9441 sock_put(match.sk);
9442 }
9443
mgmt_set_powered_failed(struct hci_dev * hdev,int err)9444 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9445 {
9446 struct mgmt_pending_cmd *cmd;
9447 u8 status;
9448
9449 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9450 if (!cmd)
9451 return;
9452
9453 if (err == -ERFKILL)
9454 status = MGMT_STATUS_RFKILLED;
9455 else
9456 status = MGMT_STATUS_FAILED;
9457
9458 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9459
9460 mgmt_pending_remove(cmd);
9461 }
9462
mgmt_new_link_key(struct hci_dev * hdev,struct link_key * key,bool persistent)9463 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9464 bool persistent)
9465 {
9466 struct mgmt_ev_new_link_key ev;
9467
9468 memset(&ev, 0, sizeof(ev));
9469
9470 ev.store_hint = persistent;
9471 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9472 ev.key.addr.type = BDADDR_BREDR;
9473 ev.key.type = key->type;
9474 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9475 ev.key.pin_len = key->pin_len;
9476
9477 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9478 }
9479
mgmt_ltk_type(struct smp_ltk * ltk)9480 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9481 {
9482 switch (ltk->type) {
9483 case SMP_LTK:
9484 case SMP_LTK_RESPONDER:
9485 if (ltk->authenticated)
9486 return MGMT_LTK_AUTHENTICATED;
9487 return MGMT_LTK_UNAUTHENTICATED;
9488 case SMP_LTK_P256:
9489 if (ltk->authenticated)
9490 return MGMT_LTK_P256_AUTH;
9491 return MGMT_LTK_P256_UNAUTH;
9492 case SMP_LTK_P256_DEBUG:
9493 return MGMT_LTK_P256_DEBUG;
9494 }
9495
9496 return MGMT_LTK_UNAUTHENTICATED;
9497 }
9498
mgmt_new_ltk(struct hci_dev * hdev,struct smp_ltk * key,bool persistent)9499 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9500 {
9501 struct mgmt_ev_new_long_term_key ev;
9502
9503 memset(&ev, 0, sizeof(ev));
9504
9505 /* Devices using resolvable or non-resolvable random addresses
9506 * without providing an identity resolving key don't require
9507 * to store long term keys. Their addresses will change the
9508 * next time around.
9509 *
9510 * Only when a remote device provides an identity address
9511 * make sure the long term key is stored. If the remote
9512 * identity is known, the long term keys are internally
9513 * mapped to the identity address. So allow static random
9514 * and public addresses here.
9515 */
9516 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9517 (key->bdaddr.b[5] & 0xc0) != 0xc0)
9518 ev.store_hint = 0x00;
9519 else
9520 ev.store_hint = persistent;
9521
9522 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9523 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
9524 ev.key.type = mgmt_ltk_type(key);
9525 ev.key.enc_size = key->enc_size;
9526 ev.key.ediv = key->ediv;
9527 ev.key.rand = key->rand;
9528
9529 if (key->type == SMP_LTK)
9530 ev.key.initiator = 1;
9531
9532 /* Make sure we copy only the significant bytes based on the
9533 * encryption key size, and set the rest of the value to zeroes.
9534 */
9535 memcpy(ev.key.val, key->val, key->enc_size);
9536 memset(ev.key.val + key->enc_size, 0,
9537 sizeof(ev.key.val) - key->enc_size);
9538
9539 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9540 }
9541
mgmt_new_irk(struct hci_dev * hdev,struct smp_irk * irk,bool persistent)9542 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9543 {
9544 struct mgmt_ev_new_irk ev;
9545
9546 memset(&ev, 0, sizeof(ev));
9547
9548 ev.store_hint = persistent;
9549
9550 bacpy(&ev.rpa, &irk->rpa);
9551 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9552 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
9553 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9554
9555 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9556 }
9557
mgmt_new_csrk(struct hci_dev * hdev,struct smp_csrk * csrk,bool persistent)9558 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9559 bool persistent)
9560 {
9561 struct mgmt_ev_new_csrk ev;
9562
9563 memset(&ev, 0, sizeof(ev));
9564
9565 /* Devices using resolvable or non-resolvable random addresses
9566 * without providing an identity resolving key don't require
9567 * to store signature resolving keys. Their addresses will change
9568 * the next time around.
9569 *
9570 * Only when a remote device provides an identity address
9571 * make sure the signature resolving key is stored. So allow
9572 * static random and public addresses here.
9573 */
9574 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9575 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9576 ev.store_hint = 0x00;
9577 else
9578 ev.store_hint = persistent;
9579
9580 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9581 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
9582 ev.key.type = csrk->type;
9583 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9584
9585 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9586 }
9587
mgmt_new_conn_param(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u8 store_hint,u16 min_interval,u16 max_interval,u16 latency,u16 timeout)9588 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9589 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9590 u16 max_interval, u16 latency, u16 timeout)
9591 {
9592 struct mgmt_ev_new_conn_param ev;
9593
9594 if (!hci_is_identity_address(bdaddr, bdaddr_type))
9595 return;
9596
9597 memset(&ev, 0, sizeof(ev));
9598 bacpy(&ev.addr.bdaddr, bdaddr);
9599 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9600 ev.store_hint = store_hint;
9601 ev.min_interval = cpu_to_le16(min_interval);
9602 ev.max_interval = cpu_to_le16(max_interval);
9603 ev.latency = cpu_to_le16(latency);
9604 ev.timeout = cpu_to_le16(timeout);
9605
9606 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9607 }
9608
mgmt_device_connected(struct hci_dev * hdev,struct hci_conn * conn,u8 * name,u8 name_len)9609 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9610 u8 *name, u8 name_len)
9611 {
9612 struct sk_buff *skb;
9613 struct mgmt_ev_device_connected *ev;
9614 u16 eir_len = 0;
9615 u32 flags = 0;
9616
9617 if (test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
9618 return;
9619
9620 /* allocate buff for LE or BR/EDR adv */
9621 if (conn->le_adv_data_len > 0)
9622 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9623 sizeof(*ev) + conn->le_adv_data_len);
9624 else
9625 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9626 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9627 eir_precalc_len(sizeof(conn->dev_class)));
9628
9629 if (!skb)
9630 return;
9631
9632 ev = skb_put(skb, sizeof(*ev));
9633 bacpy(&ev->addr.bdaddr, &conn->dst);
9634 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9635
9636 if (conn->out)
9637 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9638
9639 ev->flags = __cpu_to_le32(flags);
9640
9641 /* We must ensure that the EIR Data fields are ordered and
9642 * unique. Keep it simple for now and avoid the problem by not
9643 * adding any BR/EDR data to the LE adv.
9644 */
9645 if (conn->le_adv_data_len > 0) {
9646 skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9647 eir_len = conn->le_adv_data_len;
9648 } else {
9649 if (name)
9650 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9651
9652 if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9653 eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9654 conn->dev_class, sizeof(conn->dev_class));
9655 }
9656
9657 ev->eir_len = cpu_to_le16(eir_len);
9658
9659 mgmt_event_skb(skb, NULL);
9660 }
9661
unpair_device_rsp(struct mgmt_pending_cmd * cmd,void * data)9662 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9663 {
9664 struct hci_dev *hdev = data;
9665 struct mgmt_cp_unpair_device *cp = cmd->param;
9666
9667 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9668
9669 cmd->cmd_complete(cmd, 0);
9670 mgmt_pending_remove(cmd);
9671 }
9672
mgmt_powering_down(struct hci_dev * hdev)9673 bool mgmt_powering_down(struct hci_dev *hdev)
9674 {
9675 struct mgmt_pending_cmd *cmd;
9676 struct mgmt_mode *cp;
9677
9678 if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
9679 return true;
9680
9681 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9682 if (!cmd)
9683 return false;
9684
9685 cp = cmd->param;
9686 if (!cp->val)
9687 return true;
9688
9689 return false;
9690 }
9691
mgmt_device_disconnected(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 reason,bool mgmt_connected)9692 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9693 u8 link_type, u8 addr_type, u8 reason,
9694 bool mgmt_connected)
9695 {
9696 struct mgmt_ev_device_disconnected ev;
9697 struct sock *sk = NULL;
9698
9699 if (!mgmt_connected)
9700 return;
9701
9702 if (link_type != ACL_LINK && link_type != LE_LINK)
9703 return;
9704
9705 bacpy(&ev.addr.bdaddr, bdaddr);
9706 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9707 ev.reason = reason;
9708
9709 /* Report disconnects due to suspend */
9710 if (hdev->suspended)
9711 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9712
9713 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9714
9715 if (sk)
9716 sock_put(sk);
9717 }
9718
mgmt_disconnect_failed(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9719 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9720 u8 link_type, u8 addr_type, u8 status)
9721 {
9722 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9723 struct mgmt_cp_disconnect *cp;
9724 struct mgmt_pending_cmd *cmd;
9725
9726 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9727 hdev);
9728
9729 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9730 if (!cmd)
9731 return;
9732
9733 cp = cmd->param;
9734
9735 if (bacmp(bdaddr, &cp->addr.bdaddr))
9736 return;
9737
9738 if (cp->addr.type != bdaddr_type)
9739 return;
9740
9741 cmd->cmd_complete(cmd, mgmt_status(status));
9742 mgmt_pending_remove(cmd);
9743 }
9744
mgmt_connect_failed(struct hci_dev * hdev,struct hci_conn * conn,u8 status)9745 void mgmt_connect_failed(struct hci_dev *hdev, struct hci_conn *conn, u8 status)
9746 {
9747 struct mgmt_ev_connect_failed ev;
9748
9749 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
9750 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
9751 conn->dst_type, status, true);
9752 return;
9753 }
9754
9755 bacpy(&ev.addr.bdaddr, &conn->dst);
9756 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9757 ev.status = mgmt_status(status);
9758
9759 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9760 }
9761
mgmt_pin_code_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 secure)9762 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9763 {
9764 struct mgmt_ev_pin_code_request ev;
9765
9766 bacpy(&ev.addr.bdaddr, bdaddr);
9767 ev.addr.type = BDADDR_BREDR;
9768 ev.secure = secure;
9769
9770 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9771 }
9772
mgmt_pin_code_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)9773 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9774 u8 status)
9775 {
9776 struct mgmt_pending_cmd *cmd;
9777
9778 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9779 if (!cmd)
9780 return;
9781
9782 cmd->cmd_complete(cmd, mgmt_status(status));
9783 mgmt_pending_remove(cmd);
9784 }
9785
mgmt_pin_code_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)9786 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9787 u8 status)
9788 {
9789 struct mgmt_pending_cmd *cmd;
9790
9791 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9792 if (!cmd)
9793 return;
9794
9795 cmd->cmd_complete(cmd, mgmt_status(status));
9796 mgmt_pending_remove(cmd);
9797 }
9798
mgmt_user_confirm_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 value,u8 confirm_hint)9799 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9800 u8 link_type, u8 addr_type, u32 value,
9801 u8 confirm_hint)
9802 {
9803 struct mgmt_ev_user_confirm_request ev;
9804
9805 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9806
9807 bacpy(&ev.addr.bdaddr, bdaddr);
9808 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9809 ev.confirm_hint = confirm_hint;
9810 ev.value = cpu_to_le32(value);
9811
9812 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9813 NULL);
9814 }
9815
mgmt_user_passkey_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type)9816 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9817 u8 link_type, u8 addr_type)
9818 {
9819 struct mgmt_ev_user_passkey_request ev;
9820
9821 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9822
9823 bacpy(&ev.addr.bdaddr, bdaddr);
9824 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9825
9826 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9827 NULL);
9828 }
9829
user_pairing_resp_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status,u8 opcode)9830 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9831 u8 link_type, u8 addr_type, u8 status,
9832 u8 opcode)
9833 {
9834 struct mgmt_pending_cmd *cmd;
9835
9836 cmd = pending_find(opcode, hdev);
9837 if (!cmd)
9838 return -ENOENT;
9839
9840 cmd->cmd_complete(cmd, mgmt_status(status));
9841 mgmt_pending_remove(cmd);
9842
9843 return 0;
9844 }
9845
mgmt_user_confirm_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9846 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9847 u8 link_type, u8 addr_type, u8 status)
9848 {
9849 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9850 status, MGMT_OP_USER_CONFIRM_REPLY);
9851 }
9852
mgmt_user_confirm_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9853 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9854 u8 link_type, u8 addr_type, u8 status)
9855 {
9856 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9857 status,
9858 MGMT_OP_USER_CONFIRM_NEG_REPLY);
9859 }
9860
mgmt_user_passkey_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9861 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9862 u8 link_type, u8 addr_type, u8 status)
9863 {
9864 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9865 status, MGMT_OP_USER_PASSKEY_REPLY);
9866 }
9867
mgmt_user_passkey_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9868 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9869 u8 link_type, u8 addr_type, u8 status)
9870 {
9871 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9872 status,
9873 MGMT_OP_USER_PASSKEY_NEG_REPLY);
9874 }
9875
mgmt_user_passkey_notify(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 passkey,u8 entered)9876 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9877 u8 link_type, u8 addr_type, u32 passkey,
9878 u8 entered)
9879 {
9880 struct mgmt_ev_passkey_notify ev;
9881
9882 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9883
9884 bacpy(&ev.addr.bdaddr, bdaddr);
9885 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9886 ev.passkey = __cpu_to_le32(passkey);
9887 ev.entered = entered;
9888
9889 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9890 }
9891
mgmt_auth_failed(struct hci_conn * conn,u8 hci_status)9892 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9893 {
9894 struct mgmt_ev_auth_failed ev;
9895 struct mgmt_pending_cmd *cmd;
9896 u8 status = mgmt_status(hci_status);
9897
9898 bacpy(&ev.addr.bdaddr, &conn->dst);
9899 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9900 ev.status = status;
9901
9902 cmd = find_pairing(conn);
9903
9904 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9905 cmd ? cmd->sk : NULL);
9906
9907 if (cmd) {
9908 cmd->cmd_complete(cmd, status);
9909 mgmt_pending_remove(cmd);
9910 }
9911 }
9912
mgmt_auth_enable_complete(struct hci_dev * hdev,u8 status)9913 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9914 {
9915 struct cmd_lookup match = { NULL, hdev };
9916 bool changed;
9917
9918 if (status) {
9919 u8 mgmt_err = mgmt_status(status);
9920 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9921 cmd_status_rsp, &mgmt_err);
9922 return;
9923 }
9924
9925 if (test_bit(HCI_AUTH, &hdev->flags))
9926 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9927 else
9928 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9929
9930 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
9931 &match);
9932
9933 if (changed)
9934 new_settings(hdev, match.sk);
9935
9936 if (match.sk)
9937 sock_put(match.sk);
9938 }
9939
sk_lookup(struct mgmt_pending_cmd * cmd,void * data)9940 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9941 {
9942 struct cmd_lookup *match = data;
9943
9944 if (match->sk == NULL) {
9945 match->sk = cmd->sk;
9946 sock_hold(match->sk);
9947 }
9948 }
9949
mgmt_set_class_of_dev_complete(struct hci_dev * hdev,u8 * dev_class,u8 status)9950 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9951 u8 status)
9952 {
9953 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
9954
9955 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
9956 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
9957 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
9958
9959 if (!status) {
9960 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
9961 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9962 ext_info_changed(hdev, NULL);
9963 }
9964
9965 if (match.sk)
9966 sock_put(match.sk);
9967 }
9968
mgmt_set_local_name_complete(struct hci_dev * hdev,u8 * name,u8 status)9969 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
9970 {
9971 struct mgmt_cp_set_local_name ev;
9972 struct mgmt_pending_cmd *cmd;
9973
9974 if (status)
9975 return;
9976
9977 memset(&ev, 0, sizeof(ev));
9978 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
9979 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
9980
9981 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
9982 if (!cmd) {
9983 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
9984
9985 /* If this is a HCI command related to powering on the
9986 * HCI dev don't send any mgmt signals.
9987 */
9988 if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
9989 return;
9990
9991 if (pending_find(MGMT_OP_SET_POWERED, hdev))
9992 return;
9993 }
9994
9995 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
9996 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
9997 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
9998 }
9999
has_uuid(u8 * uuid,u16 uuid_count,u8 (* uuids)[16])10000 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
10001 {
10002 int i;
10003
10004 for (i = 0; i < uuid_count; i++) {
10005 if (!memcmp(uuid, uuids[i], 16))
10006 return true;
10007 }
10008
10009 return false;
10010 }
10011
eir_has_uuids(u8 * eir,u16 eir_len,u16 uuid_count,u8 (* uuids)[16])10012 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
10013 {
10014 u16 parsed = 0;
10015
10016 while (parsed < eir_len) {
10017 u8 field_len = eir[0];
10018 u8 uuid[16];
10019 int i;
10020
10021 if (field_len == 0)
10022 break;
10023
10024 if (eir_len - parsed < field_len + 1)
10025 break;
10026
10027 switch (eir[1]) {
10028 case EIR_UUID16_ALL:
10029 case EIR_UUID16_SOME:
10030 for (i = 0; i + 3 <= field_len; i += 2) {
10031 memcpy(uuid, bluetooth_base_uuid, 16);
10032 uuid[13] = eir[i + 3];
10033 uuid[12] = eir[i + 2];
10034 if (has_uuid(uuid, uuid_count, uuids))
10035 return true;
10036 }
10037 break;
10038 case EIR_UUID32_ALL:
10039 case EIR_UUID32_SOME:
10040 for (i = 0; i + 5 <= field_len; i += 4) {
10041 memcpy(uuid, bluetooth_base_uuid, 16);
10042 uuid[15] = eir[i + 5];
10043 uuid[14] = eir[i + 4];
10044 uuid[13] = eir[i + 3];
10045 uuid[12] = eir[i + 2];
10046 if (has_uuid(uuid, uuid_count, uuids))
10047 return true;
10048 }
10049 break;
10050 case EIR_UUID128_ALL:
10051 case EIR_UUID128_SOME:
10052 for (i = 0; i + 17 <= field_len; i += 16) {
10053 memcpy(uuid, eir + i + 2, 16);
10054 if (has_uuid(uuid, uuid_count, uuids))
10055 return true;
10056 }
10057 break;
10058 }
10059
10060 parsed += field_len + 1;
10061 eir += field_len + 1;
10062 }
10063
10064 return false;
10065 }
10066
is_filter_match(struct hci_dev * hdev,s8 rssi,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len)10067 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10068 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10069 {
10070 /* If a RSSI threshold has been specified, and
10071 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10072 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10073 * is set, let it through for further processing, as we might need to
10074 * restart the scan.
10075 *
10076 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10077 * the results are also dropped.
10078 */
10079 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10080 (rssi == HCI_RSSI_INVALID ||
10081 (rssi < hdev->discovery.rssi &&
10082 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
10083 return false;
10084
10085 if (hdev->discovery.uuid_count != 0) {
10086 /* If a list of UUIDs is provided in filter, results with no
10087 * matching UUID should be dropped.
10088 */
10089 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10090 hdev->discovery.uuids) &&
10091 !eir_has_uuids(scan_rsp, scan_rsp_len,
10092 hdev->discovery.uuid_count,
10093 hdev->discovery.uuids))
10094 return false;
10095 }
10096
10097 /* If duplicate filtering does not report RSSI changes, then restart
10098 * scanning to ensure updated result with updated RSSI values.
10099 */
10100 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
10101 /* Validate RSSI value against the RSSI threshold once more. */
10102 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10103 rssi < hdev->discovery.rssi)
10104 return false;
10105 }
10106
10107 return true;
10108 }
10109
mgmt_adv_monitor_device_lost(struct hci_dev * hdev,u16 handle,bdaddr_t * bdaddr,u8 addr_type)10110 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
10111 bdaddr_t *bdaddr, u8 addr_type)
10112 {
10113 struct mgmt_ev_adv_monitor_device_lost ev;
10114
10115 ev.monitor_handle = cpu_to_le16(handle);
10116 bacpy(&ev.addr.bdaddr, bdaddr);
10117 ev.addr.type = addr_type;
10118
10119 mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
10120 NULL);
10121 }
10122
mgmt_send_adv_monitor_device_found(struct hci_dev * hdev,struct sk_buff * skb,struct sock * skip_sk,u16 handle)10123 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
10124 struct sk_buff *skb,
10125 struct sock *skip_sk,
10126 u16 handle)
10127 {
10128 struct sk_buff *advmon_skb;
10129 size_t advmon_skb_len;
10130 __le16 *monitor_handle;
10131
10132 if (!skb)
10133 return;
10134
10135 advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
10136 sizeof(struct mgmt_ev_device_found)) + skb->len;
10137 advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
10138 advmon_skb_len);
10139 if (!advmon_skb)
10140 return;
10141
10142 /* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10143 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10144 * store monitor_handle of the matched monitor.
10145 */
10146 monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
10147 *monitor_handle = cpu_to_le16(handle);
10148 skb_put_data(advmon_skb, skb->data, skb->len);
10149
10150 mgmt_event_skb(advmon_skb, skip_sk);
10151 }
10152
mgmt_adv_monitor_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,bool report_device,struct sk_buff * skb,struct sock * skip_sk)10153 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
10154 bdaddr_t *bdaddr, bool report_device,
10155 struct sk_buff *skb,
10156 struct sock *skip_sk)
10157 {
10158 struct monitored_device *dev, *tmp;
10159 bool matched = false;
10160 bool notified = false;
10161
10162 /* We have received the Advertisement Report because:
10163 * 1. the kernel has initiated active discovery
10164 * 2. if not, we have pend_le_reports > 0 in which case we are doing
10165 * passive scanning
10166 * 3. if none of the above is true, we have one or more active
10167 * Advertisement Monitor
10168 *
10169 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10170 * and report ONLY one advertisement per device for the matched Monitor
10171 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10172 *
10173 * For case 3, since we are not active scanning and all advertisements
10174 * received are due to a matched Advertisement Monitor, report all
10175 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10176 */
10177 if (report_device && !hdev->advmon_pend_notify) {
10178 mgmt_event_skb(skb, skip_sk);
10179 return;
10180 }
10181
10182 hdev->advmon_pend_notify = false;
10183
10184 list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
10185 if (!bacmp(&dev->bdaddr, bdaddr)) {
10186 matched = true;
10187
10188 if (!dev->notified) {
10189 mgmt_send_adv_monitor_device_found(hdev, skb,
10190 skip_sk,
10191 dev->handle);
10192 notified = true;
10193 dev->notified = true;
10194 }
10195 }
10196
10197 if (!dev->notified)
10198 hdev->advmon_pend_notify = true;
10199 }
10200
10201 if (!report_device &&
10202 ((matched && !notified) || !msft_monitor_supported(hdev))) {
10203 /* Handle 0 indicates that we are not active scanning and this
10204 * is a subsequent advertisement report for an already matched
10205 * Advertisement Monitor or the controller offloading support
10206 * is not available.
10207 */
10208 mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
10209 }
10210
10211 if (report_device)
10212 mgmt_event_skb(skb, skip_sk);
10213 else
10214 kfree_skb(skb);
10215 }
10216
mesh_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len,u64 instant)10217 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
10218 u8 addr_type, s8 rssi, u32 flags, u8 *eir,
10219 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10220 u64 instant)
10221 {
10222 struct sk_buff *skb;
10223 struct mgmt_ev_mesh_device_found *ev;
10224 int i, j;
10225
10226 if (!hdev->mesh_ad_types[0])
10227 goto accepted;
10228
10229 /* Scan for requested AD types */
10230 if (eir_len > 0) {
10231 for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
10232 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10233 if (!hdev->mesh_ad_types[j])
10234 break;
10235
10236 if (hdev->mesh_ad_types[j] == eir[i + 1])
10237 goto accepted;
10238 }
10239 }
10240 }
10241
10242 if (scan_rsp_len > 0) {
10243 for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
10244 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10245 if (!hdev->mesh_ad_types[j])
10246 break;
10247
10248 if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
10249 goto accepted;
10250 }
10251 }
10252 }
10253
10254 return;
10255
10256 accepted:
10257 skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
10258 sizeof(*ev) + eir_len + scan_rsp_len);
10259 if (!skb)
10260 return;
10261
10262 ev = skb_put(skb, sizeof(*ev));
10263
10264 bacpy(&ev->addr.bdaddr, bdaddr);
10265 ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
10266 ev->rssi = rssi;
10267 ev->flags = cpu_to_le32(flags);
10268 ev->instant = cpu_to_le64(instant);
10269
10270 if (eir_len > 0)
10271 /* Copy EIR or advertising data into event */
10272 skb_put_data(skb, eir, eir_len);
10273
10274 if (scan_rsp_len > 0)
10275 /* Append scan response data to event */
10276 skb_put_data(skb, scan_rsp, scan_rsp_len);
10277
10278 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10279
10280 mgmt_event_skb(skb, NULL);
10281 }
10282
mgmt_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 * dev_class,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len,u64 instant)10283 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10284 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10285 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10286 u64 instant)
10287 {
10288 struct sk_buff *skb;
10289 struct mgmt_ev_device_found *ev;
10290 bool report_device = hci_discovery_active(hdev);
10291
10292 if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
10293 mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
10294 eir, eir_len, scan_rsp, scan_rsp_len,
10295 instant);
10296
10297 /* Don't send events for a non-kernel initiated discovery. With
10298 * LE one exception is if we have pend_le_reports > 0 in which
10299 * case we're doing passive scanning and want these events.
10300 */
10301 if (!hci_discovery_active(hdev)) {
10302 if (link_type == ACL_LINK)
10303 return;
10304 if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
10305 report_device = true;
10306 else if (!hci_is_adv_monitoring(hdev))
10307 return;
10308 }
10309
10310 if (hdev->discovery.result_filtering) {
10311 /* We are using service discovery */
10312 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10313 scan_rsp_len))
10314 return;
10315 }
10316
10317 if (hdev->discovery.limited) {
10318 /* Check for limited discoverable bit */
10319 if (dev_class) {
10320 if (!(dev_class[1] & 0x20))
10321 return;
10322 } else {
10323 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10324 if (!flags || !(flags[0] & LE_AD_LIMITED))
10325 return;
10326 }
10327 }
10328
10329 /* Allocate skb. The 5 extra bytes are for the potential CoD field */
10330 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10331 sizeof(*ev) + eir_len + scan_rsp_len + 5);
10332 if (!skb)
10333 return;
10334
10335 ev = skb_put(skb, sizeof(*ev));
10336
10337 /* In case of device discovery with BR/EDR devices (pre 1.2), the
10338 * RSSI value was reported as 0 when not available. This behavior
10339 * is kept when using device discovery. This is required for full
10340 * backwards compatibility with the API.
10341 *
10342 * However when using service discovery, the value 127 will be
10343 * returned when the RSSI is not available.
10344 */
10345 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10346 link_type == ACL_LINK)
10347 rssi = 0;
10348
10349 bacpy(&ev->addr.bdaddr, bdaddr);
10350 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10351 ev->rssi = rssi;
10352 ev->flags = cpu_to_le32(flags);
10353
10354 if (eir_len > 0)
10355 /* Copy EIR or advertising data into event */
10356 skb_put_data(skb, eir, eir_len);
10357
10358 if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
10359 u8 eir_cod[5];
10360
10361 eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
10362 dev_class, 3);
10363 skb_put_data(skb, eir_cod, sizeof(eir_cod));
10364 }
10365
10366 if (scan_rsp_len > 0)
10367 /* Append scan response data to event */
10368 skb_put_data(skb, scan_rsp, scan_rsp_len);
10369
10370 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10371
10372 mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
10373 }
10374
mgmt_remote_name(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,s8 rssi,u8 * name,u8 name_len)10375 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10376 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10377 {
10378 struct sk_buff *skb;
10379 struct mgmt_ev_device_found *ev;
10380 u16 eir_len = 0;
10381 u32 flags = 0;
10382
10383 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10384 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
10385 if (!skb)
10386 return;
10387
10388 ev = skb_put(skb, sizeof(*ev));
10389 bacpy(&ev->addr.bdaddr, bdaddr);
10390 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10391 ev->rssi = rssi;
10392
10393 if (name)
10394 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
10395 else
10396 flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
10397
10398 ev->eir_len = cpu_to_le16(eir_len);
10399 ev->flags = cpu_to_le32(flags);
10400
10401 mgmt_event_skb(skb, NULL);
10402 }
10403
mgmt_discovering(struct hci_dev * hdev,u8 discovering)10404 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10405 {
10406 struct mgmt_ev_discovering ev;
10407
10408 bt_dev_dbg(hdev, "discovering %u", discovering);
10409
10410 memset(&ev, 0, sizeof(ev));
10411 ev.type = hdev->discovery.type;
10412 ev.discovering = discovering;
10413
10414 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10415 }
10416
mgmt_suspending(struct hci_dev * hdev,u8 state)10417 void mgmt_suspending(struct hci_dev *hdev, u8 state)
10418 {
10419 struct mgmt_ev_controller_suspend ev;
10420
10421 ev.suspend_state = state;
10422 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10423 }
10424
mgmt_resuming(struct hci_dev * hdev,u8 reason,bdaddr_t * bdaddr,u8 addr_type)10425 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10426 u8 addr_type)
10427 {
10428 struct mgmt_ev_controller_resume ev;
10429
10430 ev.wake_reason = reason;
10431 if (bdaddr) {
10432 bacpy(&ev.addr.bdaddr, bdaddr);
10433 ev.addr.type = addr_type;
10434 } else {
10435 memset(&ev.addr, 0, sizeof(ev.addr));
10436 }
10437
10438 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
10439 }
10440
10441 static struct hci_mgmt_chan chan = {
10442 .channel = HCI_CHANNEL_CONTROL,
10443 .handler_count = ARRAY_SIZE(mgmt_handlers),
10444 .handlers = mgmt_handlers,
10445 .hdev_init = mgmt_init_hdev,
10446 };
10447
mgmt_init(void)10448 int mgmt_init(void)
10449 {
10450 return hci_mgmt_chan_register(&chan);
10451 }
10452
mgmt_exit(void)10453 void mgmt_exit(void)
10454 {
10455 hci_mgmt_chan_unregister(&chan);
10456 }
10457
mgmt_cleanup(struct sock * sk)10458 void mgmt_cleanup(struct sock *sk)
10459 {
10460 struct mgmt_mesh_tx *mesh_tx;
10461 struct hci_dev *hdev;
10462
10463 read_lock(&hci_dev_list_lock);
10464
10465 list_for_each_entry(hdev, &hci_dev_list, list) {
10466 do {
10467 mesh_tx = mgmt_mesh_next(hdev, sk);
10468
10469 if (mesh_tx)
10470 mesh_send_complete(hdev, mesh_tx, true);
10471 } while (mesh_tx);
10472 }
10473
10474 read_unlock(&hci_dev_list_lock);
10475 }
10476