1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <linux/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35
36 #include "smp.h"
37 #include "mgmt_util.h"
38 #include "mgmt_config.h"
39 #include "msft.h"
40 #include "eir.h"
41 #include "aosp.h"
42
43 #define MGMT_VERSION 1
44 #define MGMT_REVISION 23
45
46 static const u16 mgmt_commands[] = {
47 MGMT_OP_READ_INDEX_LIST,
48 MGMT_OP_READ_INFO,
49 MGMT_OP_SET_POWERED,
50 MGMT_OP_SET_DISCOVERABLE,
51 MGMT_OP_SET_CONNECTABLE,
52 MGMT_OP_SET_FAST_CONNECTABLE,
53 MGMT_OP_SET_BONDABLE,
54 MGMT_OP_SET_LINK_SECURITY,
55 MGMT_OP_SET_SSP,
56 MGMT_OP_SET_HS,
57 MGMT_OP_SET_LE,
58 MGMT_OP_SET_DEV_CLASS,
59 MGMT_OP_SET_LOCAL_NAME,
60 MGMT_OP_ADD_UUID,
61 MGMT_OP_REMOVE_UUID,
62 MGMT_OP_LOAD_LINK_KEYS,
63 MGMT_OP_LOAD_LONG_TERM_KEYS,
64 MGMT_OP_DISCONNECT,
65 MGMT_OP_GET_CONNECTIONS,
66 MGMT_OP_PIN_CODE_REPLY,
67 MGMT_OP_PIN_CODE_NEG_REPLY,
68 MGMT_OP_SET_IO_CAPABILITY,
69 MGMT_OP_PAIR_DEVICE,
70 MGMT_OP_CANCEL_PAIR_DEVICE,
71 MGMT_OP_UNPAIR_DEVICE,
72 MGMT_OP_USER_CONFIRM_REPLY,
73 MGMT_OP_USER_CONFIRM_NEG_REPLY,
74 MGMT_OP_USER_PASSKEY_REPLY,
75 MGMT_OP_USER_PASSKEY_NEG_REPLY,
76 MGMT_OP_READ_LOCAL_OOB_DATA,
77 MGMT_OP_ADD_REMOTE_OOB_DATA,
78 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
79 MGMT_OP_START_DISCOVERY,
80 MGMT_OP_STOP_DISCOVERY,
81 MGMT_OP_CONFIRM_NAME,
82 MGMT_OP_BLOCK_DEVICE,
83 MGMT_OP_UNBLOCK_DEVICE,
84 MGMT_OP_SET_DEVICE_ID,
85 MGMT_OP_SET_ADVERTISING,
86 MGMT_OP_SET_BREDR,
87 MGMT_OP_SET_STATIC_ADDRESS,
88 MGMT_OP_SET_SCAN_PARAMS,
89 MGMT_OP_SET_SECURE_CONN,
90 MGMT_OP_SET_DEBUG_KEYS,
91 MGMT_OP_SET_PRIVACY,
92 MGMT_OP_LOAD_IRKS,
93 MGMT_OP_GET_CONN_INFO,
94 MGMT_OP_GET_CLOCK_INFO,
95 MGMT_OP_ADD_DEVICE,
96 MGMT_OP_REMOVE_DEVICE,
97 MGMT_OP_LOAD_CONN_PARAM,
98 MGMT_OP_READ_UNCONF_INDEX_LIST,
99 MGMT_OP_READ_CONFIG_INFO,
100 MGMT_OP_SET_EXTERNAL_CONFIG,
101 MGMT_OP_SET_PUBLIC_ADDRESS,
102 MGMT_OP_START_SERVICE_DISCOVERY,
103 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
104 MGMT_OP_READ_EXT_INDEX_LIST,
105 MGMT_OP_READ_ADV_FEATURES,
106 MGMT_OP_ADD_ADVERTISING,
107 MGMT_OP_REMOVE_ADVERTISING,
108 MGMT_OP_GET_ADV_SIZE_INFO,
109 MGMT_OP_START_LIMITED_DISCOVERY,
110 MGMT_OP_READ_EXT_INFO,
111 MGMT_OP_SET_APPEARANCE,
112 MGMT_OP_GET_PHY_CONFIGURATION,
113 MGMT_OP_SET_PHY_CONFIGURATION,
114 MGMT_OP_SET_BLOCKED_KEYS,
115 MGMT_OP_SET_WIDEBAND_SPEECH,
116 MGMT_OP_READ_CONTROLLER_CAP,
117 MGMT_OP_READ_EXP_FEATURES_INFO,
118 MGMT_OP_SET_EXP_FEATURE,
119 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
120 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
121 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
122 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
123 MGMT_OP_GET_DEVICE_FLAGS,
124 MGMT_OP_SET_DEVICE_FLAGS,
125 MGMT_OP_READ_ADV_MONITOR_FEATURES,
126 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
127 MGMT_OP_REMOVE_ADV_MONITOR,
128 MGMT_OP_ADD_EXT_ADV_PARAMS,
129 MGMT_OP_ADD_EXT_ADV_DATA,
130 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
131 MGMT_OP_SET_MESH_RECEIVER,
132 MGMT_OP_MESH_READ_FEATURES,
133 MGMT_OP_MESH_SEND,
134 MGMT_OP_MESH_SEND_CANCEL,
135 MGMT_OP_HCI_CMD_SYNC,
136 };
137
138 static const u16 mgmt_events[] = {
139 MGMT_EV_CONTROLLER_ERROR,
140 MGMT_EV_INDEX_ADDED,
141 MGMT_EV_INDEX_REMOVED,
142 MGMT_EV_NEW_SETTINGS,
143 MGMT_EV_CLASS_OF_DEV_CHANGED,
144 MGMT_EV_LOCAL_NAME_CHANGED,
145 MGMT_EV_NEW_LINK_KEY,
146 MGMT_EV_NEW_LONG_TERM_KEY,
147 MGMT_EV_DEVICE_CONNECTED,
148 MGMT_EV_DEVICE_DISCONNECTED,
149 MGMT_EV_CONNECT_FAILED,
150 MGMT_EV_PIN_CODE_REQUEST,
151 MGMT_EV_USER_CONFIRM_REQUEST,
152 MGMT_EV_USER_PASSKEY_REQUEST,
153 MGMT_EV_AUTH_FAILED,
154 MGMT_EV_DEVICE_FOUND,
155 MGMT_EV_DISCOVERING,
156 MGMT_EV_DEVICE_BLOCKED,
157 MGMT_EV_DEVICE_UNBLOCKED,
158 MGMT_EV_DEVICE_UNPAIRED,
159 MGMT_EV_PASSKEY_NOTIFY,
160 MGMT_EV_NEW_IRK,
161 MGMT_EV_NEW_CSRK,
162 MGMT_EV_DEVICE_ADDED,
163 MGMT_EV_DEVICE_REMOVED,
164 MGMT_EV_NEW_CONN_PARAM,
165 MGMT_EV_UNCONF_INDEX_ADDED,
166 MGMT_EV_UNCONF_INDEX_REMOVED,
167 MGMT_EV_NEW_CONFIG_OPTIONS,
168 MGMT_EV_EXT_INDEX_ADDED,
169 MGMT_EV_EXT_INDEX_REMOVED,
170 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
171 MGMT_EV_ADVERTISING_ADDED,
172 MGMT_EV_ADVERTISING_REMOVED,
173 MGMT_EV_EXT_INFO_CHANGED,
174 MGMT_EV_PHY_CONFIGURATION_CHANGED,
175 MGMT_EV_EXP_FEATURE_CHANGED,
176 MGMT_EV_DEVICE_FLAGS_CHANGED,
177 MGMT_EV_ADV_MONITOR_ADDED,
178 MGMT_EV_ADV_MONITOR_REMOVED,
179 MGMT_EV_CONTROLLER_SUSPEND,
180 MGMT_EV_CONTROLLER_RESUME,
181 MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
182 MGMT_EV_ADV_MONITOR_DEVICE_LOST,
183 };
184
185 static const u16 mgmt_untrusted_commands[] = {
186 MGMT_OP_READ_INDEX_LIST,
187 MGMT_OP_READ_INFO,
188 MGMT_OP_READ_UNCONF_INDEX_LIST,
189 MGMT_OP_READ_CONFIG_INFO,
190 MGMT_OP_READ_EXT_INDEX_LIST,
191 MGMT_OP_READ_EXT_INFO,
192 MGMT_OP_READ_CONTROLLER_CAP,
193 MGMT_OP_READ_EXP_FEATURES_INFO,
194 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
195 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
196 };
197
198 static const u16 mgmt_untrusted_events[] = {
199 MGMT_EV_INDEX_ADDED,
200 MGMT_EV_INDEX_REMOVED,
201 MGMT_EV_NEW_SETTINGS,
202 MGMT_EV_CLASS_OF_DEV_CHANGED,
203 MGMT_EV_LOCAL_NAME_CHANGED,
204 MGMT_EV_UNCONF_INDEX_ADDED,
205 MGMT_EV_UNCONF_INDEX_REMOVED,
206 MGMT_EV_NEW_CONFIG_OPTIONS,
207 MGMT_EV_EXT_INDEX_ADDED,
208 MGMT_EV_EXT_INDEX_REMOVED,
209 MGMT_EV_EXT_INFO_CHANGED,
210 MGMT_EV_EXP_FEATURE_CHANGED,
211 };
212
213 #define CACHE_TIMEOUT secs_to_jiffies(2)
214
215 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
216 "\x00\x00\x00\x00\x00\x00\x00\x00"
217
218 /* HCI to MGMT error code conversion table */
219 static const u8 mgmt_status_table[] = {
220 MGMT_STATUS_SUCCESS,
221 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
222 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
223 MGMT_STATUS_FAILED, /* Hardware Failure */
224 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
225 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
226 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
227 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
228 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
229 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
230 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
231 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
232 MGMT_STATUS_BUSY, /* Command Disallowed */
233 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
234 MGMT_STATUS_REJECTED, /* Rejected Security */
235 MGMT_STATUS_REJECTED, /* Rejected Personal */
236 MGMT_STATUS_TIMEOUT, /* Host Timeout */
237 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
238 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
239 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
240 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
241 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
242 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
243 MGMT_STATUS_BUSY, /* Repeated Attempts */
244 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
245 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
246 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
247 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
248 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
249 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
250 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
251 MGMT_STATUS_FAILED, /* Unspecified Error */
252 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
253 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
254 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
255 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
256 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
257 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
258 MGMT_STATUS_FAILED, /* Unit Link Key Used */
259 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
260 MGMT_STATUS_TIMEOUT, /* Instant Passed */
261 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
262 MGMT_STATUS_FAILED, /* Transaction Collision */
263 MGMT_STATUS_FAILED, /* Reserved for future use */
264 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
265 MGMT_STATUS_REJECTED, /* QoS Rejected */
266 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
267 MGMT_STATUS_REJECTED, /* Insufficient Security */
268 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
269 MGMT_STATUS_FAILED, /* Reserved for future use */
270 MGMT_STATUS_BUSY, /* Role Switch Pending */
271 MGMT_STATUS_FAILED, /* Reserved for future use */
272 MGMT_STATUS_FAILED, /* Slot Violation */
273 MGMT_STATUS_FAILED, /* Role Switch Failed */
274 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
275 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
276 MGMT_STATUS_BUSY, /* Host Busy Pairing */
277 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
278 MGMT_STATUS_BUSY, /* Controller Busy */
279 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
280 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
281 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
282 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
283 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
284 };
285
mgmt_errno_status(int err)286 static u8 mgmt_errno_status(int err)
287 {
288 switch (err) {
289 case 0:
290 return MGMT_STATUS_SUCCESS;
291 case -EPERM:
292 return MGMT_STATUS_REJECTED;
293 case -EINVAL:
294 return MGMT_STATUS_INVALID_PARAMS;
295 case -EOPNOTSUPP:
296 return MGMT_STATUS_NOT_SUPPORTED;
297 case -EBUSY:
298 return MGMT_STATUS_BUSY;
299 case -ETIMEDOUT:
300 return MGMT_STATUS_AUTH_FAILED;
301 case -ENOMEM:
302 return MGMT_STATUS_NO_RESOURCES;
303 case -EISCONN:
304 return MGMT_STATUS_ALREADY_CONNECTED;
305 case -ENOTCONN:
306 return MGMT_STATUS_DISCONNECTED;
307 }
308
309 return MGMT_STATUS_FAILED;
310 }
311
mgmt_status(int err)312 static u8 mgmt_status(int err)
313 {
314 if (err < 0)
315 return mgmt_errno_status(err);
316
317 if (err < ARRAY_SIZE(mgmt_status_table))
318 return mgmt_status_table[err];
319
320 return MGMT_STATUS_FAILED;
321 }
322
mgmt_index_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag)323 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
324 u16 len, int flag)
325 {
326 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
327 flag, NULL);
328 }
329
mgmt_limited_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag,struct sock * skip_sk)330 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
331 u16 len, int flag, struct sock *skip_sk)
332 {
333 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
334 flag, skip_sk);
335 }
336
mgmt_event(u16 event,struct hci_dev * hdev,void * data,u16 len,struct sock * skip_sk)337 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
338 struct sock *skip_sk)
339 {
340 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
341 HCI_SOCK_TRUSTED, skip_sk);
342 }
343
mgmt_event_skb(struct sk_buff * skb,struct sock * skip_sk)344 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
345 {
346 return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
347 skip_sk);
348 }
349
le_addr_type(u8 mgmt_addr_type)350 static u8 le_addr_type(u8 mgmt_addr_type)
351 {
352 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
353 return ADDR_LE_DEV_PUBLIC;
354 else
355 return ADDR_LE_DEV_RANDOM;
356 }
357
mgmt_fill_version_info(void * ver)358 void mgmt_fill_version_info(void *ver)
359 {
360 struct mgmt_rp_read_version *rp = ver;
361
362 rp->version = MGMT_VERSION;
363 rp->revision = cpu_to_le16(MGMT_REVISION);
364 }
365
read_version(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)366 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
367 u16 data_len)
368 {
369 struct mgmt_rp_read_version rp;
370
371 bt_dev_dbg(hdev, "sock %p", sk);
372
373 mgmt_fill_version_info(&rp);
374
375 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
376 &rp, sizeof(rp));
377 }
378
read_commands(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)379 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
380 u16 data_len)
381 {
382 struct mgmt_rp_read_commands *rp;
383 u16 num_commands, num_events;
384 size_t rp_size;
385 int i, err;
386
387 bt_dev_dbg(hdev, "sock %p", sk);
388
389 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
390 num_commands = ARRAY_SIZE(mgmt_commands);
391 num_events = ARRAY_SIZE(mgmt_events);
392 } else {
393 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
394 num_events = ARRAY_SIZE(mgmt_untrusted_events);
395 }
396
397 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
398
399 rp = kmalloc(rp_size, GFP_KERNEL);
400 if (!rp)
401 return -ENOMEM;
402
403 rp->num_commands = cpu_to_le16(num_commands);
404 rp->num_events = cpu_to_le16(num_events);
405
406 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
407 __le16 *opcode = rp->opcodes;
408
409 for (i = 0; i < num_commands; i++, opcode++)
410 put_unaligned_le16(mgmt_commands[i], opcode);
411
412 for (i = 0; i < num_events; i++, opcode++)
413 put_unaligned_le16(mgmt_events[i], opcode);
414 } else {
415 __le16 *opcode = rp->opcodes;
416
417 for (i = 0; i < num_commands; i++, opcode++)
418 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
419
420 for (i = 0; i < num_events; i++, opcode++)
421 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
422 }
423
424 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
425 rp, rp_size);
426 kfree(rp);
427
428 return err;
429 }
430
read_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)431 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
432 u16 data_len)
433 {
434 struct mgmt_rp_read_index_list *rp;
435 struct hci_dev *d;
436 size_t rp_len;
437 u16 count;
438 int err;
439
440 bt_dev_dbg(hdev, "sock %p", sk);
441
442 read_lock(&hci_dev_list_lock);
443
444 count = 0;
445 list_for_each_entry(d, &hci_dev_list, list) {
446 if (!hci_dev_test_flag(d, HCI_UNCONFIGURED))
447 count++;
448 }
449
450 rp_len = sizeof(*rp) + (2 * count);
451 rp = kmalloc(rp_len, GFP_ATOMIC);
452 if (!rp) {
453 read_unlock(&hci_dev_list_lock);
454 return -ENOMEM;
455 }
456
457 count = 0;
458 list_for_each_entry(d, &hci_dev_list, list) {
459 if (hci_dev_test_flag(d, HCI_SETUP) ||
460 hci_dev_test_flag(d, HCI_CONFIG) ||
461 hci_dev_test_flag(d, HCI_USER_CHANNEL))
462 continue;
463
464 /* Devices marked as raw-only are neither configured
465 * nor unconfigured controllers.
466 */
467 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
468 continue;
469
470 if (!hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
471 rp->index[count++] = cpu_to_le16(d->id);
472 bt_dev_dbg(hdev, "Added hci%u", d->id);
473 }
474 }
475
476 rp->num_controllers = cpu_to_le16(count);
477 rp_len = sizeof(*rp) + (2 * count);
478
479 read_unlock(&hci_dev_list_lock);
480
481 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
482 0, rp, rp_len);
483
484 kfree(rp);
485
486 return err;
487 }
488
read_unconf_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)489 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
490 void *data, u16 data_len)
491 {
492 struct mgmt_rp_read_unconf_index_list *rp;
493 struct hci_dev *d;
494 size_t rp_len;
495 u16 count;
496 int err;
497
498 bt_dev_dbg(hdev, "sock %p", sk);
499
500 read_lock(&hci_dev_list_lock);
501
502 count = 0;
503 list_for_each_entry(d, &hci_dev_list, list) {
504 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
505 count++;
506 }
507
508 rp_len = sizeof(*rp) + (2 * count);
509 rp = kmalloc(rp_len, GFP_ATOMIC);
510 if (!rp) {
511 read_unlock(&hci_dev_list_lock);
512 return -ENOMEM;
513 }
514
515 count = 0;
516 list_for_each_entry(d, &hci_dev_list, list) {
517 if (hci_dev_test_flag(d, HCI_SETUP) ||
518 hci_dev_test_flag(d, HCI_CONFIG) ||
519 hci_dev_test_flag(d, HCI_USER_CHANNEL))
520 continue;
521
522 /* Devices marked as raw-only are neither configured
523 * nor unconfigured controllers.
524 */
525 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
526 continue;
527
528 if (hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
529 rp->index[count++] = cpu_to_le16(d->id);
530 bt_dev_dbg(hdev, "Added hci%u", d->id);
531 }
532 }
533
534 rp->num_controllers = cpu_to_le16(count);
535 rp_len = sizeof(*rp) + (2 * count);
536
537 read_unlock(&hci_dev_list_lock);
538
539 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
540 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
541
542 kfree(rp);
543
544 return err;
545 }
546
read_ext_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)547 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
548 void *data, u16 data_len)
549 {
550 struct mgmt_rp_read_ext_index_list *rp;
551 struct hci_dev *d;
552 u16 count;
553 int err;
554
555 bt_dev_dbg(hdev, "sock %p", sk);
556
557 read_lock(&hci_dev_list_lock);
558
559 count = 0;
560 list_for_each_entry(d, &hci_dev_list, list)
561 count++;
562
563 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
564 if (!rp) {
565 read_unlock(&hci_dev_list_lock);
566 return -ENOMEM;
567 }
568
569 count = 0;
570 list_for_each_entry(d, &hci_dev_list, list) {
571 if (hci_dev_test_flag(d, HCI_SETUP) ||
572 hci_dev_test_flag(d, HCI_CONFIG) ||
573 hci_dev_test_flag(d, HCI_USER_CHANNEL))
574 continue;
575
576 /* Devices marked as raw-only are neither configured
577 * nor unconfigured controllers.
578 */
579 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
580 continue;
581
582 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
583 rp->entry[count].type = 0x01;
584 else
585 rp->entry[count].type = 0x00;
586
587 rp->entry[count].bus = d->bus;
588 rp->entry[count++].index = cpu_to_le16(d->id);
589 bt_dev_dbg(hdev, "Added hci%u", d->id);
590 }
591
592 rp->num_controllers = cpu_to_le16(count);
593
594 read_unlock(&hci_dev_list_lock);
595
596 /* If this command is called at least once, then all the
597 * default index and unconfigured index events are disabled
598 * and from now on only extended index events are used.
599 */
600 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
601 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
602 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
603
604 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
605 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
606 struct_size(rp, entry, count));
607
608 kfree(rp);
609
610 return err;
611 }
612
is_configured(struct hci_dev * hdev)613 static bool is_configured(struct hci_dev *hdev)
614 {
615 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
616 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
617 return false;
618
619 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
620 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
621 !bacmp(&hdev->public_addr, BDADDR_ANY))
622 return false;
623
624 return true;
625 }
626
get_missing_options(struct hci_dev * hdev)627 static __le32 get_missing_options(struct hci_dev *hdev)
628 {
629 u32 options = 0;
630
631 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
632 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
633 options |= MGMT_OPTION_EXTERNAL_CONFIG;
634
635 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
636 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
637 !bacmp(&hdev->public_addr, BDADDR_ANY))
638 options |= MGMT_OPTION_PUBLIC_ADDRESS;
639
640 return cpu_to_le32(options);
641 }
642
new_options(struct hci_dev * hdev,struct sock * skip)643 static int new_options(struct hci_dev *hdev, struct sock *skip)
644 {
645 __le32 options = get_missing_options(hdev);
646
647 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
648 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
649 }
650
send_options_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)651 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
652 {
653 __le32 options = get_missing_options(hdev);
654
655 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
656 sizeof(options));
657 }
658
read_config_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)659 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
660 void *data, u16 data_len)
661 {
662 struct mgmt_rp_read_config_info rp;
663 u32 options = 0;
664
665 bt_dev_dbg(hdev, "sock %p", sk);
666
667 hci_dev_lock(hdev);
668
669 memset(&rp, 0, sizeof(rp));
670 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
671
672 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
673 options |= MGMT_OPTION_EXTERNAL_CONFIG;
674
675 if (hdev->set_bdaddr)
676 options |= MGMT_OPTION_PUBLIC_ADDRESS;
677
678 rp.supported_options = cpu_to_le32(options);
679 rp.missing_options = get_missing_options(hdev);
680
681 hci_dev_unlock(hdev);
682
683 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
684 &rp, sizeof(rp));
685 }
686
get_supported_phys(struct hci_dev * hdev)687 static u32 get_supported_phys(struct hci_dev *hdev)
688 {
689 u32 supported_phys = 0;
690
691 if (lmp_bredr_capable(hdev)) {
692 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
693
694 if (hdev->features[0][0] & LMP_3SLOT)
695 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
696
697 if (hdev->features[0][0] & LMP_5SLOT)
698 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
699
700 if (lmp_edr_2m_capable(hdev)) {
701 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
702
703 if (lmp_edr_3slot_capable(hdev))
704 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
705
706 if (lmp_edr_5slot_capable(hdev))
707 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
708
709 if (lmp_edr_3m_capable(hdev)) {
710 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
711
712 if (lmp_edr_3slot_capable(hdev))
713 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
714
715 if (lmp_edr_5slot_capable(hdev))
716 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
717 }
718 }
719 }
720
721 if (lmp_le_capable(hdev)) {
722 supported_phys |= MGMT_PHY_LE_1M_TX;
723 supported_phys |= MGMT_PHY_LE_1M_RX;
724
725 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
726 supported_phys |= MGMT_PHY_LE_2M_TX;
727 supported_phys |= MGMT_PHY_LE_2M_RX;
728 }
729
730 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
731 supported_phys |= MGMT_PHY_LE_CODED_TX;
732 supported_phys |= MGMT_PHY_LE_CODED_RX;
733 }
734 }
735
736 return supported_phys;
737 }
738
get_selected_phys(struct hci_dev * hdev)739 static u32 get_selected_phys(struct hci_dev *hdev)
740 {
741 u32 selected_phys = 0;
742
743 if (lmp_bredr_capable(hdev)) {
744 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
745
746 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
747 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
748
749 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
750 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
751
752 if (lmp_edr_2m_capable(hdev)) {
753 if (!(hdev->pkt_type & HCI_2DH1))
754 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
755
756 if (lmp_edr_3slot_capable(hdev) &&
757 !(hdev->pkt_type & HCI_2DH3))
758 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
759
760 if (lmp_edr_5slot_capable(hdev) &&
761 !(hdev->pkt_type & HCI_2DH5))
762 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
763
764 if (lmp_edr_3m_capable(hdev)) {
765 if (!(hdev->pkt_type & HCI_3DH1))
766 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
767
768 if (lmp_edr_3slot_capable(hdev) &&
769 !(hdev->pkt_type & HCI_3DH3))
770 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
771
772 if (lmp_edr_5slot_capable(hdev) &&
773 !(hdev->pkt_type & HCI_3DH5))
774 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
775 }
776 }
777 }
778
779 if (lmp_le_capable(hdev)) {
780 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
781 selected_phys |= MGMT_PHY_LE_1M_TX;
782
783 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
784 selected_phys |= MGMT_PHY_LE_1M_RX;
785
786 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
787 selected_phys |= MGMT_PHY_LE_2M_TX;
788
789 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
790 selected_phys |= MGMT_PHY_LE_2M_RX;
791
792 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
793 selected_phys |= MGMT_PHY_LE_CODED_TX;
794
795 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
796 selected_phys |= MGMT_PHY_LE_CODED_RX;
797 }
798
799 return selected_phys;
800 }
801
get_configurable_phys(struct hci_dev * hdev)802 static u32 get_configurable_phys(struct hci_dev *hdev)
803 {
804 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
805 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
806 }
807
get_supported_settings(struct hci_dev * hdev)808 static u32 get_supported_settings(struct hci_dev *hdev)
809 {
810 u32 settings = 0;
811
812 settings |= MGMT_SETTING_POWERED;
813 settings |= MGMT_SETTING_BONDABLE;
814 settings |= MGMT_SETTING_DEBUG_KEYS;
815 settings |= MGMT_SETTING_CONNECTABLE;
816 settings |= MGMT_SETTING_DISCOVERABLE;
817
818 if (lmp_bredr_capable(hdev)) {
819 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
820 settings |= MGMT_SETTING_FAST_CONNECTABLE;
821 settings |= MGMT_SETTING_BREDR;
822 settings |= MGMT_SETTING_LINK_SECURITY;
823
824 if (lmp_ssp_capable(hdev)) {
825 settings |= MGMT_SETTING_SSP;
826 }
827
828 if (lmp_sc_capable(hdev))
829 settings |= MGMT_SETTING_SECURE_CONN;
830
831 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
832 &hdev->quirks))
833 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
834 }
835
836 if (lmp_le_capable(hdev)) {
837 settings |= MGMT_SETTING_LE;
838 settings |= MGMT_SETTING_SECURE_CONN;
839 settings |= MGMT_SETTING_PRIVACY;
840 settings |= MGMT_SETTING_STATIC_ADDRESS;
841 settings |= MGMT_SETTING_ADVERTISING;
842 }
843
844 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
845 hdev->set_bdaddr)
846 settings |= MGMT_SETTING_CONFIGURATION;
847
848 if (cis_central_capable(hdev))
849 settings |= MGMT_SETTING_CIS_CENTRAL;
850
851 if (cis_peripheral_capable(hdev))
852 settings |= MGMT_SETTING_CIS_PERIPHERAL;
853
854 if (ll_privacy_capable(hdev))
855 settings |= MGMT_SETTING_LL_PRIVACY;
856
857 settings |= MGMT_SETTING_PHY_CONFIGURATION;
858
859 return settings;
860 }
861
get_current_settings(struct hci_dev * hdev)862 static u32 get_current_settings(struct hci_dev *hdev)
863 {
864 u32 settings = 0;
865
866 if (hdev_is_powered(hdev))
867 settings |= MGMT_SETTING_POWERED;
868
869 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
870 settings |= MGMT_SETTING_CONNECTABLE;
871
872 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
873 settings |= MGMT_SETTING_FAST_CONNECTABLE;
874
875 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
876 settings |= MGMT_SETTING_DISCOVERABLE;
877
878 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
879 settings |= MGMT_SETTING_BONDABLE;
880
881 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
882 settings |= MGMT_SETTING_BREDR;
883
884 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
885 settings |= MGMT_SETTING_LE;
886
887 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
888 settings |= MGMT_SETTING_LINK_SECURITY;
889
890 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
891 settings |= MGMT_SETTING_SSP;
892
893 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
894 settings |= MGMT_SETTING_ADVERTISING;
895
896 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
897 settings |= MGMT_SETTING_SECURE_CONN;
898
899 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
900 settings |= MGMT_SETTING_DEBUG_KEYS;
901
902 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
903 settings |= MGMT_SETTING_PRIVACY;
904
905 /* The current setting for static address has two purposes. The
906 * first is to indicate if the static address will be used and
907 * the second is to indicate if it is actually set.
908 *
909 * This means if the static address is not configured, this flag
910 * will never be set. If the address is configured, then if the
911 * address is actually used decides if the flag is set or not.
912 *
913 * For single mode LE only controllers and dual-mode controllers
914 * with BR/EDR disabled, the existence of the static address will
915 * be evaluated.
916 */
917 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
918 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
919 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
920 if (bacmp(&hdev->static_addr, BDADDR_ANY))
921 settings |= MGMT_SETTING_STATIC_ADDRESS;
922 }
923
924 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
925 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
926
927 if (cis_central_capable(hdev))
928 settings |= MGMT_SETTING_CIS_CENTRAL;
929
930 if (cis_peripheral_capable(hdev))
931 settings |= MGMT_SETTING_CIS_PERIPHERAL;
932
933 if (bis_capable(hdev))
934 settings |= MGMT_SETTING_ISO_BROADCASTER;
935
936 if (sync_recv_capable(hdev))
937 settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
938
939 if (ll_privacy_capable(hdev))
940 settings |= MGMT_SETTING_LL_PRIVACY;
941
942 return settings;
943 }
944
pending_find(u16 opcode,struct hci_dev * hdev)945 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
946 {
947 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
948 }
949
mgmt_get_adv_discov_flags(struct hci_dev * hdev)950 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
951 {
952 struct mgmt_pending_cmd *cmd;
953
954 /* If there's a pending mgmt command the flags will not yet have
955 * their final values, so check for this first.
956 */
957 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
958 if (cmd) {
959 struct mgmt_mode *cp = cmd->param;
960 if (cp->val == 0x01)
961 return LE_AD_GENERAL;
962 else if (cp->val == 0x02)
963 return LE_AD_LIMITED;
964 } else {
965 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
966 return LE_AD_LIMITED;
967 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
968 return LE_AD_GENERAL;
969 }
970
971 return 0;
972 }
973
mgmt_get_connectable(struct hci_dev * hdev)974 bool mgmt_get_connectable(struct hci_dev *hdev)
975 {
976 struct mgmt_pending_cmd *cmd;
977
978 /* If there's a pending mgmt command the flag will not yet have
979 * it's final value, so check for this first.
980 */
981 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
982 if (cmd) {
983 struct mgmt_mode *cp = cmd->param;
984
985 return cp->val;
986 }
987
988 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
989 }
990
service_cache_sync(struct hci_dev * hdev,void * data)991 static int service_cache_sync(struct hci_dev *hdev, void *data)
992 {
993 hci_update_eir_sync(hdev);
994 hci_update_class_sync(hdev);
995
996 return 0;
997 }
998
service_cache_off(struct work_struct * work)999 static void service_cache_off(struct work_struct *work)
1000 {
1001 struct hci_dev *hdev = container_of(work, struct hci_dev,
1002 service_cache.work);
1003
1004 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1005 return;
1006
1007 hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1008 }
1009
rpa_expired_sync(struct hci_dev * hdev,void * data)1010 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1011 {
1012 /* The generation of a new RPA and programming it into the
1013 * controller happens in the hci_req_enable_advertising()
1014 * function.
1015 */
1016 if (ext_adv_capable(hdev))
1017 return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1018 else
1019 return hci_enable_advertising_sync(hdev);
1020 }
1021
rpa_expired(struct work_struct * work)1022 static void rpa_expired(struct work_struct *work)
1023 {
1024 struct hci_dev *hdev = container_of(work, struct hci_dev,
1025 rpa_expired.work);
1026
1027 bt_dev_dbg(hdev, "");
1028
1029 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1030
1031 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1032 return;
1033
1034 hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1035 }
1036
1037 static int set_discoverable_sync(struct hci_dev *hdev, void *data);
1038
discov_off(struct work_struct * work)1039 static void discov_off(struct work_struct *work)
1040 {
1041 struct hci_dev *hdev = container_of(work, struct hci_dev,
1042 discov_off.work);
1043
1044 bt_dev_dbg(hdev, "");
1045
1046 hci_dev_lock(hdev);
1047
1048 /* When discoverable timeout triggers, then just make sure
1049 * the limited discoverable flag is cleared. Even in the case
1050 * of a timeout triggered from general discoverable, it is
1051 * safe to unconditionally clear the flag.
1052 */
1053 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1054 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1055 hdev->discov_timeout = 0;
1056
1057 hci_cmd_sync_queue(hdev, set_discoverable_sync, NULL, NULL);
1058
1059 mgmt_new_settings(hdev);
1060
1061 hci_dev_unlock(hdev);
1062 }
1063
1064 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1065
mesh_send_complete(struct hci_dev * hdev,struct mgmt_mesh_tx * mesh_tx,bool silent)1066 static void mesh_send_complete(struct hci_dev *hdev,
1067 struct mgmt_mesh_tx *mesh_tx, bool silent)
1068 {
1069 u8 handle = mesh_tx->handle;
1070
1071 if (!silent)
1072 mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1073 sizeof(handle), NULL);
1074
1075 mgmt_mesh_remove(mesh_tx);
1076 }
1077
mesh_send_done_sync(struct hci_dev * hdev,void * data)1078 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1079 {
1080 struct mgmt_mesh_tx *mesh_tx;
1081
1082 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1083 hci_disable_advertising_sync(hdev);
1084 mesh_tx = mgmt_mesh_next(hdev, NULL);
1085
1086 if (mesh_tx)
1087 mesh_send_complete(hdev, mesh_tx, false);
1088
1089 return 0;
1090 }
1091
1092 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1093 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
mesh_next(struct hci_dev * hdev,void * data,int err)1094 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1095 {
1096 struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1097
1098 if (!mesh_tx)
1099 return;
1100
1101 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1102 mesh_send_start_complete);
1103
1104 if (err < 0)
1105 mesh_send_complete(hdev, mesh_tx, false);
1106 else
1107 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1108 }
1109
mesh_send_done(struct work_struct * work)1110 static void mesh_send_done(struct work_struct *work)
1111 {
1112 struct hci_dev *hdev = container_of(work, struct hci_dev,
1113 mesh_send_done.work);
1114
1115 if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1116 return;
1117
1118 hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1119 }
1120
mgmt_init_hdev(struct sock * sk,struct hci_dev * hdev)1121 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1122 {
1123 if (hci_dev_test_flag(hdev, HCI_MGMT))
1124 return;
1125
1126 BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1127
1128 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1129 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1130 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1131 INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1132
1133 /* Non-mgmt controlled devices get this bit set
1134 * implicitly so that pairing works for them, however
1135 * for mgmt we require user-space to explicitly enable
1136 * it
1137 */
1138 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1139
1140 hci_dev_set_flag(hdev, HCI_MGMT);
1141 }
1142
read_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1143 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1144 void *data, u16 data_len)
1145 {
1146 struct mgmt_rp_read_info rp;
1147
1148 bt_dev_dbg(hdev, "sock %p", sk);
1149
1150 hci_dev_lock(hdev);
1151
1152 memset(&rp, 0, sizeof(rp));
1153
1154 bacpy(&rp.bdaddr, &hdev->bdaddr);
1155
1156 rp.version = hdev->hci_ver;
1157 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1158
1159 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1160 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1161
1162 memcpy(rp.dev_class, hdev->dev_class, 3);
1163
1164 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1165 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1166
1167 hci_dev_unlock(hdev);
1168
1169 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1170 sizeof(rp));
1171 }
1172
append_eir_data_to_buf(struct hci_dev * hdev,u8 * eir)1173 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1174 {
1175 u16 eir_len = 0;
1176 size_t name_len;
1177
1178 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1179 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1180 hdev->dev_class, 3);
1181
1182 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1183 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1184 hdev->appearance);
1185
1186 name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1187 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1188 hdev->dev_name, name_len);
1189
1190 name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1191 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1192 hdev->short_name, name_len);
1193
1194 return eir_len;
1195 }
1196
read_ext_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1197 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1198 void *data, u16 data_len)
1199 {
1200 char buf[512];
1201 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1202 u16 eir_len;
1203
1204 bt_dev_dbg(hdev, "sock %p", sk);
1205
1206 memset(&buf, 0, sizeof(buf));
1207
1208 hci_dev_lock(hdev);
1209
1210 bacpy(&rp->bdaddr, &hdev->bdaddr);
1211
1212 rp->version = hdev->hci_ver;
1213 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1214
1215 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1216 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1217
1218
1219 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1220 rp->eir_len = cpu_to_le16(eir_len);
1221
1222 hci_dev_unlock(hdev);
1223
1224 /* If this command is called at least once, then the events
1225 * for class of device and local name changes are disabled
1226 * and only the new extended controller information event
1227 * is used.
1228 */
1229 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1230 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1231 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1232
1233 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1234 sizeof(*rp) + eir_len);
1235 }
1236
ext_info_changed(struct hci_dev * hdev,struct sock * skip)1237 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1238 {
1239 char buf[512];
1240 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1241 u16 eir_len;
1242
1243 memset(buf, 0, sizeof(buf));
1244
1245 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1246 ev->eir_len = cpu_to_le16(eir_len);
1247
1248 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1249 sizeof(*ev) + eir_len,
1250 HCI_MGMT_EXT_INFO_EVENTS, skip);
1251 }
1252
send_settings_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)1253 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1254 {
1255 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1256
1257 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1258 sizeof(settings));
1259 }
1260
mgmt_advertising_added(struct sock * sk,struct hci_dev * hdev,u8 instance)1261 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1262 {
1263 struct mgmt_ev_advertising_added ev;
1264
1265 ev.instance = instance;
1266
1267 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1268 }
1269
mgmt_advertising_removed(struct sock * sk,struct hci_dev * hdev,u8 instance)1270 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1271 u8 instance)
1272 {
1273 struct mgmt_ev_advertising_removed ev;
1274
1275 ev.instance = instance;
1276
1277 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1278 }
1279
cancel_adv_timeout(struct hci_dev * hdev)1280 static void cancel_adv_timeout(struct hci_dev *hdev)
1281 {
1282 if (hdev->adv_instance_timeout) {
1283 hdev->adv_instance_timeout = 0;
1284 cancel_delayed_work(&hdev->adv_instance_expire);
1285 }
1286 }
1287
1288 /* This function requires the caller holds hdev->lock */
restart_le_actions(struct hci_dev * hdev)1289 static void restart_le_actions(struct hci_dev *hdev)
1290 {
1291 struct hci_conn_params *p;
1292
1293 list_for_each_entry(p, &hdev->le_conn_params, list) {
1294 /* Needed for AUTO_OFF case where might not "really"
1295 * have been powered off.
1296 */
1297 hci_pend_le_list_del_init(p);
1298
1299 switch (p->auto_connect) {
1300 case HCI_AUTO_CONN_DIRECT:
1301 case HCI_AUTO_CONN_ALWAYS:
1302 hci_pend_le_list_add(p, &hdev->pend_le_conns);
1303 break;
1304 case HCI_AUTO_CONN_REPORT:
1305 hci_pend_le_list_add(p, &hdev->pend_le_reports);
1306 break;
1307 default:
1308 break;
1309 }
1310 }
1311 }
1312
new_settings(struct hci_dev * hdev,struct sock * skip)1313 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1314 {
1315 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1316
1317 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1318 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1319 }
1320
mgmt_set_powered_complete(struct hci_dev * hdev,void * data,int err)1321 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1322 {
1323 struct mgmt_pending_cmd *cmd = data;
1324 struct mgmt_mode *cp;
1325
1326 /* Make sure cmd still outstanding. */
1327 if (err == -ECANCELED ||
1328 cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1329 return;
1330
1331 cp = cmd->param;
1332
1333 bt_dev_dbg(hdev, "err %d", err);
1334
1335 if (!err) {
1336 if (cp->val) {
1337 hci_dev_lock(hdev);
1338 restart_le_actions(hdev);
1339 hci_update_passive_scan(hdev);
1340 hci_dev_unlock(hdev);
1341 }
1342
1343 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1344
1345 /* Only call new_setting for power on as power off is deferred
1346 * to hdev->power_off work which does call hci_dev_do_close.
1347 */
1348 if (cp->val)
1349 new_settings(hdev, cmd->sk);
1350 } else {
1351 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1352 mgmt_status(err));
1353 }
1354
1355 mgmt_pending_remove(cmd);
1356 }
1357
set_powered_sync(struct hci_dev * hdev,void * data)1358 static int set_powered_sync(struct hci_dev *hdev, void *data)
1359 {
1360 struct mgmt_pending_cmd *cmd = data;
1361 struct mgmt_mode *cp;
1362
1363 /* Make sure cmd still outstanding. */
1364 if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1365 return -ECANCELED;
1366
1367 cp = cmd->param;
1368
1369 BT_DBG("%s", hdev->name);
1370
1371 return hci_set_powered_sync(hdev, cp->val);
1372 }
1373
set_powered(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1374 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1375 u16 len)
1376 {
1377 struct mgmt_mode *cp = data;
1378 struct mgmt_pending_cmd *cmd;
1379 int err;
1380
1381 bt_dev_dbg(hdev, "sock %p", sk);
1382
1383 if (cp->val != 0x00 && cp->val != 0x01)
1384 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1385 MGMT_STATUS_INVALID_PARAMS);
1386
1387 hci_dev_lock(hdev);
1388
1389 if (!cp->val) {
1390 if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN)) {
1391 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1392 MGMT_STATUS_BUSY);
1393 goto failed;
1394 }
1395 }
1396
1397 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1398 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1399 MGMT_STATUS_BUSY);
1400 goto failed;
1401 }
1402
1403 if (!!cp->val == hdev_is_powered(hdev)) {
1404 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1405 goto failed;
1406 }
1407
1408 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1409 if (!cmd) {
1410 err = -ENOMEM;
1411 goto failed;
1412 }
1413
1414 /* Cancel potentially blocking sync operation before power off */
1415 if (cp->val == 0x00) {
1416 hci_cmd_sync_cancel_sync(hdev, -EHOSTDOWN);
1417 err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1418 mgmt_set_powered_complete);
1419 } else {
1420 /* Use hci_cmd_sync_submit since hdev might not be running */
1421 err = hci_cmd_sync_submit(hdev, set_powered_sync, cmd,
1422 mgmt_set_powered_complete);
1423 }
1424
1425 if (err < 0)
1426 mgmt_pending_remove(cmd);
1427
1428 failed:
1429 hci_dev_unlock(hdev);
1430 return err;
1431 }
1432
mgmt_new_settings(struct hci_dev * hdev)1433 int mgmt_new_settings(struct hci_dev *hdev)
1434 {
1435 return new_settings(hdev, NULL);
1436 }
1437
1438 struct cmd_lookup {
1439 struct sock *sk;
1440 struct hci_dev *hdev;
1441 u8 mgmt_status;
1442 };
1443
settings_rsp(struct mgmt_pending_cmd * cmd,void * data)1444 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1445 {
1446 struct cmd_lookup *match = data;
1447
1448 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1449
1450 if (match->sk == NULL) {
1451 match->sk = cmd->sk;
1452 sock_hold(match->sk);
1453 }
1454 }
1455
cmd_status_rsp(struct mgmt_pending_cmd * cmd,void * data)1456 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1457 {
1458 u8 *status = data;
1459
1460 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, *status);
1461 }
1462
cmd_complete_rsp(struct mgmt_pending_cmd * cmd,void * data)1463 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1464 {
1465 struct cmd_lookup *match = data;
1466
1467 /* dequeue cmd_sync entries using cmd as data as that is about to be
1468 * removed/freed.
1469 */
1470 hci_cmd_sync_dequeue(match->hdev, NULL, cmd, NULL);
1471
1472 if (cmd->cmd_complete) {
1473 cmd->cmd_complete(cmd, match->mgmt_status);
1474 return;
1475 }
1476
1477 cmd_status_rsp(cmd, data);
1478 }
1479
generic_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1480 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1481 {
1482 return mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, status,
1483 cmd->param, cmd->param_len);
1484 }
1485
addr_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1486 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1487 {
1488 return mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, status,
1489 cmd->param, sizeof(struct mgmt_addr_info));
1490 }
1491
mgmt_bredr_support(struct hci_dev * hdev)1492 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1493 {
1494 if (!lmp_bredr_capable(hdev))
1495 return MGMT_STATUS_NOT_SUPPORTED;
1496 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1497 return MGMT_STATUS_REJECTED;
1498 else
1499 return MGMT_STATUS_SUCCESS;
1500 }
1501
mgmt_le_support(struct hci_dev * hdev)1502 static u8 mgmt_le_support(struct hci_dev *hdev)
1503 {
1504 if (!lmp_le_capable(hdev))
1505 return MGMT_STATUS_NOT_SUPPORTED;
1506 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1507 return MGMT_STATUS_REJECTED;
1508 else
1509 return MGMT_STATUS_SUCCESS;
1510 }
1511
mgmt_set_discoverable_complete(struct hci_dev * hdev,void * data,int err)1512 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1513 int err)
1514 {
1515 struct mgmt_pending_cmd *cmd = data;
1516
1517 bt_dev_dbg(hdev, "err %d", err);
1518
1519 /* Make sure cmd still outstanding. */
1520 if (err == -ECANCELED ||
1521 cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1522 return;
1523
1524 hci_dev_lock(hdev);
1525
1526 if (err) {
1527 u8 mgmt_err = mgmt_status(err);
1528 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
1529 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1530 goto done;
1531 }
1532
1533 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1534 hdev->discov_timeout > 0) {
1535 int to = secs_to_jiffies(hdev->discov_timeout);
1536 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1537 }
1538
1539 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1540 new_settings(hdev, cmd->sk);
1541
1542 done:
1543 mgmt_pending_remove(cmd);
1544 hci_dev_unlock(hdev);
1545 }
1546
set_discoverable_sync(struct hci_dev * hdev,void * data)1547 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1548 {
1549 BT_DBG("%s", hdev->name);
1550
1551 return hci_update_discoverable_sync(hdev);
1552 }
1553
set_discoverable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1554 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1555 u16 len)
1556 {
1557 struct mgmt_cp_set_discoverable *cp = data;
1558 struct mgmt_pending_cmd *cmd;
1559 u16 timeout;
1560 int err;
1561
1562 bt_dev_dbg(hdev, "sock %p", sk);
1563
1564 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1565 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1566 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1567 MGMT_STATUS_REJECTED);
1568
1569 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1570 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1571 MGMT_STATUS_INVALID_PARAMS);
1572
1573 timeout = __le16_to_cpu(cp->timeout);
1574
1575 /* Disabling discoverable requires that no timeout is set,
1576 * and enabling limited discoverable requires a timeout.
1577 */
1578 if ((cp->val == 0x00 && timeout > 0) ||
1579 (cp->val == 0x02 && timeout == 0))
1580 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1581 MGMT_STATUS_INVALID_PARAMS);
1582
1583 hci_dev_lock(hdev);
1584
1585 if (!hdev_is_powered(hdev) && timeout > 0) {
1586 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1587 MGMT_STATUS_NOT_POWERED);
1588 goto failed;
1589 }
1590
1591 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1592 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1593 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1594 MGMT_STATUS_BUSY);
1595 goto failed;
1596 }
1597
1598 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1599 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1600 MGMT_STATUS_REJECTED);
1601 goto failed;
1602 }
1603
1604 if (hdev->advertising_paused) {
1605 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1606 MGMT_STATUS_BUSY);
1607 goto failed;
1608 }
1609
1610 if (!hdev_is_powered(hdev)) {
1611 bool changed = false;
1612
1613 /* Setting limited discoverable when powered off is
1614 * not a valid operation since it requires a timeout
1615 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1616 */
1617 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1618 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1619 changed = true;
1620 }
1621
1622 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1623 if (err < 0)
1624 goto failed;
1625
1626 if (changed)
1627 err = new_settings(hdev, sk);
1628
1629 goto failed;
1630 }
1631
1632 /* If the current mode is the same, then just update the timeout
1633 * value with the new value. And if only the timeout gets updated,
1634 * then no need for any HCI transactions.
1635 */
1636 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1637 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1638 HCI_LIMITED_DISCOVERABLE)) {
1639 cancel_delayed_work(&hdev->discov_off);
1640 hdev->discov_timeout = timeout;
1641
1642 if (cp->val && hdev->discov_timeout > 0) {
1643 int to = secs_to_jiffies(hdev->discov_timeout);
1644 queue_delayed_work(hdev->req_workqueue,
1645 &hdev->discov_off, to);
1646 }
1647
1648 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1649 goto failed;
1650 }
1651
1652 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1653 if (!cmd) {
1654 err = -ENOMEM;
1655 goto failed;
1656 }
1657
1658 /* Cancel any potential discoverable timeout that might be
1659 * still active and store new timeout value. The arming of
1660 * the timeout happens in the complete handler.
1661 */
1662 cancel_delayed_work(&hdev->discov_off);
1663 hdev->discov_timeout = timeout;
1664
1665 if (cp->val)
1666 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1667 else
1668 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1669
1670 /* Limited discoverable mode */
1671 if (cp->val == 0x02)
1672 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1673 else
1674 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1675
1676 err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1677 mgmt_set_discoverable_complete);
1678
1679 if (err < 0)
1680 mgmt_pending_remove(cmd);
1681
1682 failed:
1683 hci_dev_unlock(hdev);
1684 return err;
1685 }
1686
mgmt_set_connectable_complete(struct hci_dev * hdev,void * data,int err)1687 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1688 int err)
1689 {
1690 struct mgmt_pending_cmd *cmd = data;
1691
1692 bt_dev_dbg(hdev, "err %d", err);
1693
1694 /* Make sure cmd still outstanding. */
1695 if (err == -ECANCELED ||
1696 cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1697 return;
1698
1699 hci_dev_lock(hdev);
1700
1701 if (err) {
1702 u8 mgmt_err = mgmt_status(err);
1703 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
1704 goto done;
1705 }
1706
1707 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1708 new_settings(hdev, cmd->sk);
1709
1710 done:
1711 mgmt_pending_remove(cmd);
1712
1713 hci_dev_unlock(hdev);
1714 }
1715
set_connectable_update_settings(struct hci_dev * hdev,struct sock * sk,u8 val)1716 static int set_connectable_update_settings(struct hci_dev *hdev,
1717 struct sock *sk, u8 val)
1718 {
1719 bool changed = false;
1720 int err;
1721
1722 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1723 changed = true;
1724
1725 if (val) {
1726 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1727 } else {
1728 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1729 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1730 }
1731
1732 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1733 if (err < 0)
1734 return err;
1735
1736 if (changed) {
1737 hci_update_scan(hdev);
1738 hci_update_passive_scan(hdev);
1739 return new_settings(hdev, sk);
1740 }
1741
1742 return 0;
1743 }
1744
set_connectable_sync(struct hci_dev * hdev,void * data)1745 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1746 {
1747 BT_DBG("%s", hdev->name);
1748
1749 return hci_update_connectable_sync(hdev);
1750 }
1751
set_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1752 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1753 u16 len)
1754 {
1755 struct mgmt_mode *cp = data;
1756 struct mgmt_pending_cmd *cmd;
1757 int err;
1758
1759 bt_dev_dbg(hdev, "sock %p", sk);
1760
1761 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1762 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1763 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1764 MGMT_STATUS_REJECTED);
1765
1766 if (cp->val != 0x00 && cp->val != 0x01)
1767 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1768 MGMT_STATUS_INVALID_PARAMS);
1769
1770 hci_dev_lock(hdev);
1771
1772 if (!hdev_is_powered(hdev)) {
1773 err = set_connectable_update_settings(hdev, sk, cp->val);
1774 goto failed;
1775 }
1776
1777 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1778 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1779 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1780 MGMT_STATUS_BUSY);
1781 goto failed;
1782 }
1783
1784 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1785 if (!cmd) {
1786 err = -ENOMEM;
1787 goto failed;
1788 }
1789
1790 if (cp->val) {
1791 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1792 } else {
1793 if (hdev->discov_timeout > 0)
1794 cancel_delayed_work(&hdev->discov_off);
1795
1796 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1797 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1798 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1799 }
1800
1801 err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1802 mgmt_set_connectable_complete);
1803
1804 if (err < 0)
1805 mgmt_pending_remove(cmd);
1806
1807 failed:
1808 hci_dev_unlock(hdev);
1809 return err;
1810 }
1811
set_bondable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1812 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1813 u16 len)
1814 {
1815 struct mgmt_mode *cp = data;
1816 bool changed;
1817 int err;
1818
1819 bt_dev_dbg(hdev, "sock %p", sk);
1820
1821 if (cp->val != 0x00 && cp->val != 0x01)
1822 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1823 MGMT_STATUS_INVALID_PARAMS);
1824
1825 hci_dev_lock(hdev);
1826
1827 if (cp->val)
1828 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1829 else
1830 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1831
1832 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1833 if (err < 0)
1834 goto unlock;
1835
1836 if (changed) {
1837 /* In limited privacy mode the change of bondable mode
1838 * may affect the local advertising address.
1839 */
1840 hci_update_discoverable(hdev);
1841
1842 err = new_settings(hdev, sk);
1843 }
1844
1845 unlock:
1846 hci_dev_unlock(hdev);
1847 return err;
1848 }
1849
set_link_security(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1850 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1851 u16 len)
1852 {
1853 struct mgmt_mode *cp = data;
1854 struct mgmt_pending_cmd *cmd;
1855 u8 val, status;
1856 int err;
1857
1858 bt_dev_dbg(hdev, "sock %p", sk);
1859
1860 status = mgmt_bredr_support(hdev);
1861 if (status)
1862 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1863 status);
1864
1865 if (cp->val != 0x00 && cp->val != 0x01)
1866 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1867 MGMT_STATUS_INVALID_PARAMS);
1868
1869 hci_dev_lock(hdev);
1870
1871 if (!hdev_is_powered(hdev)) {
1872 bool changed = false;
1873
1874 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1875 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1876 changed = true;
1877 }
1878
1879 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1880 if (err < 0)
1881 goto failed;
1882
1883 if (changed)
1884 err = new_settings(hdev, sk);
1885
1886 goto failed;
1887 }
1888
1889 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1890 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1891 MGMT_STATUS_BUSY);
1892 goto failed;
1893 }
1894
1895 val = !!cp->val;
1896
1897 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1898 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1899 goto failed;
1900 }
1901
1902 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1903 if (!cmd) {
1904 err = -ENOMEM;
1905 goto failed;
1906 }
1907
1908 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1909 if (err < 0) {
1910 mgmt_pending_remove(cmd);
1911 goto failed;
1912 }
1913
1914 failed:
1915 hci_dev_unlock(hdev);
1916 return err;
1917 }
1918
set_ssp_complete(struct hci_dev * hdev,void * data,int err)1919 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1920 {
1921 struct cmd_lookup match = { NULL, hdev };
1922 struct mgmt_pending_cmd *cmd = data;
1923 struct mgmt_mode *cp = cmd->param;
1924 u8 enable = cp->val;
1925 bool changed;
1926
1927 /* Make sure cmd still outstanding. */
1928 if (err == -ECANCELED || cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1929 return;
1930
1931 if (err) {
1932 u8 mgmt_err = mgmt_status(err);
1933
1934 if (enable && hci_dev_test_and_clear_flag(hdev,
1935 HCI_SSP_ENABLED)) {
1936 new_settings(hdev, NULL);
1937 }
1938
1939 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, true,
1940 cmd_status_rsp, &mgmt_err);
1941 return;
1942 }
1943
1944 if (enable) {
1945 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1946 } else {
1947 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1948 }
1949
1950 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, true, settings_rsp, &match);
1951
1952 if (changed)
1953 new_settings(hdev, match.sk);
1954
1955 if (match.sk)
1956 sock_put(match.sk);
1957
1958 hci_update_eir_sync(hdev);
1959 }
1960
set_ssp_sync(struct hci_dev * hdev,void * data)1961 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1962 {
1963 struct mgmt_pending_cmd *cmd = data;
1964 struct mgmt_mode *cp = cmd->param;
1965 bool changed = false;
1966 int err;
1967
1968 if (cp->val)
1969 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1970
1971 err = hci_write_ssp_mode_sync(hdev, cp->val);
1972
1973 if (!err && changed)
1974 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1975
1976 return err;
1977 }
1978
set_ssp(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1979 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1980 {
1981 struct mgmt_mode *cp = data;
1982 struct mgmt_pending_cmd *cmd;
1983 u8 status;
1984 int err;
1985
1986 bt_dev_dbg(hdev, "sock %p", sk);
1987
1988 status = mgmt_bredr_support(hdev);
1989 if (status)
1990 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1991
1992 if (!lmp_ssp_capable(hdev))
1993 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1994 MGMT_STATUS_NOT_SUPPORTED);
1995
1996 if (cp->val != 0x00 && cp->val != 0x01)
1997 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1998 MGMT_STATUS_INVALID_PARAMS);
1999
2000 hci_dev_lock(hdev);
2001
2002 if (!hdev_is_powered(hdev)) {
2003 bool changed;
2004
2005 if (cp->val) {
2006 changed = !hci_dev_test_and_set_flag(hdev,
2007 HCI_SSP_ENABLED);
2008 } else {
2009 changed = hci_dev_test_and_clear_flag(hdev,
2010 HCI_SSP_ENABLED);
2011 }
2012
2013 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2014 if (err < 0)
2015 goto failed;
2016
2017 if (changed)
2018 err = new_settings(hdev, sk);
2019
2020 goto failed;
2021 }
2022
2023 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2024 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2025 MGMT_STATUS_BUSY);
2026 goto failed;
2027 }
2028
2029 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2030 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2031 goto failed;
2032 }
2033
2034 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2035 if (!cmd)
2036 err = -ENOMEM;
2037 else
2038 err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2039 set_ssp_complete);
2040
2041 if (err < 0) {
2042 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2043 MGMT_STATUS_FAILED);
2044
2045 if (cmd)
2046 mgmt_pending_remove(cmd);
2047 }
2048
2049 failed:
2050 hci_dev_unlock(hdev);
2051 return err;
2052 }
2053
set_hs(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2054 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2055 {
2056 bt_dev_dbg(hdev, "sock %p", sk);
2057
2058 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2059 MGMT_STATUS_NOT_SUPPORTED);
2060 }
2061
set_le_complete(struct hci_dev * hdev,void * data,int err)2062 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2063 {
2064 struct cmd_lookup match = { NULL, hdev };
2065 u8 status = mgmt_status(err);
2066
2067 bt_dev_dbg(hdev, "err %d", err);
2068
2069 if (status) {
2070 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, true, cmd_status_rsp,
2071 &status);
2072 return;
2073 }
2074
2075 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, true, settings_rsp, &match);
2076
2077 new_settings(hdev, match.sk);
2078
2079 if (match.sk)
2080 sock_put(match.sk);
2081 }
2082
set_le_sync(struct hci_dev * hdev,void * data)2083 static int set_le_sync(struct hci_dev *hdev, void *data)
2084 {
2085 struct mgmt_pending_cmd *cmd = data;
2086 struct mgmt_mode *cp = cmd->param;
2087 u8 val = !!cp->val;
2088 int err;
2089
2090 if (!val) {
2091 hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2092
2093 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2094 hci_disable_advertising_sync(hdev);
2095
2096 if (ext_adv_capable(hdev))
2097 hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2098 } else {
2099 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2100 }
2101
2102 err = hci_write_le_host_supported_sync(hdev, val, 0);
2103
2104 /* Make sure the controller has a good default for
2105 * advertising data. Restrict the update to when LE
2106 * has actually been enabled. During power on, the
2107 * update in powered_update_hci will take care of it.
2108 */
2109 if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2110 if (ext_adv_capable(hdev)) {
2111 int status;
2112
2113 status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2114 if (!status)
2115 hci_update_scan_rsp_data_sync(hdev, 0x00);
2116 } else {
2117 hci_update_adv_data_sync(hdev, 0x00);
2118 hci_update_scan_rsp_data_sync(hdev, 0x00);
2119 }
2120
2121 hci_update_passive_scan(hdev);
2122 }
2123
2124 return err;
2125 }
2126
set_mesh_complete(struct hci_dev * hdev,void * data,int err)2127 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2128 {
2129 struct mgmt_pending_cmd *cmd = data;
2130 u8 status = mgmt_status(err);
2131 struct sock *sk = cmd->sk;
2132
2133 if (status) {
2134 mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev, true,
2135 cmd_status_rsp, &status);
2136 return;
2137 }
2138
2139 mgmt_pending_remove(cmd);
2140 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2141 }
2142
set_mesh_sync(struct hci_dev * hdev,void * data)2143 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2144 {
2145 struct mgmt_pending_cmd *cmd = data;
2146 struct mgmt_cp_set_mesh *cp = cmd->param;
2147 size_t len = cmd->param_len;
2148
2149 memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2150
2151 if (cp->enable)
2152 hci_dev_set_flag(hdev, HCI_MESH);
2153 else
2154 hci_dev_clear_flag(hdev, HCI_MESH);
2155
2156 len -= sizeof(*cp);
2157
2158 /* If filters don't fit, forward all adv pkts */
2159 if (len <= sizeof(hdev->mesh_ad_types))
2160 memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2161
2162 hci_update_passive_scan_sync(hdev);
2163 return 0;
2164 }
2165
set_mesh(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2166 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2167 {
2168 struct mgmt_cp_set_mesh *cp = data;
2169 struct mgmt_pending_cmd *cmd;
2170 int err = 0;
2171
2172 bt_dev_dbg(hdev, "sock %p", sk);
2173
2174 if (!lmp_le_capable(hdev) ||
2175 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2176 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2177 MGMT_STATUS_NOT_SUPPORTED);
2178
2179 if (cp->enable != 0x00 && cp->enable != 0x01)
2180 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2181 MGMT_STATUS_INVALID_PARAMS);
2182
2183 hci_dev_lock(hdev);
2184
2185 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2186 if (!cmd)
2187 err = -ENOMEM;
2188 else
2189 err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2190 set_mesh_complete);
2191
2192 if (err < 0) {
2193 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2194 MGMT_STATUS_FAILED);
2195
2196 if (cmd)
2197 mgmt_pending_remove(cmd);
2198 }
2199
2200 hci_dev_unlock(hdev);
2201 return err;
2202 }
2203
mesh_send_start_complete(struct hci_dev * hdev,void * data,int err)2204 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2205 {
2206 struct mgmt_mesh_tx *mesh_tx = data;
2207 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2208 unsigned long mesh_send_interval;
2209 u8 mgmt_err = mgmt_status(err);
2210
2211 /* Report any errors here, but don't report completion */
2212
2213 if (mgmt_err) {
2214 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2215 /* Send Complete Error Code for handle */
2216 mesh_send_complete(hdev, mesh_tx, false);
2217 return;
2218 }
2219
2220 mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2221 queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2222 mesh_send_interval);
2223 }
2224
mesh_send_sync(struct hci_dev * hdev,void * data)2225 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2226 {
2227 struct mgmt_mesh_tx *mesh_tx = data;
2228 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2229 struct adv_info *adv, *next_instance;
2230 u8 instance = hdev->le_num_of_adv_sets + 1;
2231 u16 timeout, duration;
2232 int err = 0;
2233
2234 if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2235 return MGMT_STATUS_BUSY;
2236
2237 timeout = 1000;
2238 duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2239 adv = hci_add_adv_instance(hdev, instance, 0,
2240 send->adv_data_len, send->adv_data,
2241 0, NULL,
2242 timeout, duration,
2243 HCI_ADV_TX_POWER_NO_PREFERENCE,
2244 hdev->le_adv_min_interval,
2245 hdev->le_adv_max_interval,
2246 mesh_tx->handle);
2247
2248 if (!IS_ERR(adv))
2249 mesh_tx->instance = instance;
2250 else
2251 err = PTR_ERR(adv);
2252
2253 if (hdev->cur_adv_instance == instance) {
2254 /* If the currently advertised instance is being changed then
2255 * cancel the current advertising and schedule the next
2256 * instance. If there is only one instance then the overridden
2257 * advertising data will be visible right away.
2258 */
2259 cancel_adv_timeout(hdev);
2260
2261 next_instance = hci_get_next_instance(hdev, instance);
2262 if (next_instance)
2263 instance = next_instance->instance;
2264 else
2265 instance = 0;
2266 } else if (hdev->adv_instance_timeout) {
2267 /* Immediately advertise the new instance if no other, or
2268 * let it go naturally from queue if ADV is already happening
2269 */
2270 instance = 0;
2271 }
2272
2273 if (instance)
2274 return hci_schedule_adv_instance_sync(hdev, instance, true);
2275
2276 return err;
2277 }
2278
send_count(struct mgmt_mesh_tx * mesh_tx,void * data)2279 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2280 {
2281 struct mgmt_rp_mesh_read_features *rp = data;
2282
2283 if (rp->used_handles >= rp->max_handles)
2284 return;
2285
2286 rp->handles[rp->used_handles++] = mesh_tx->handle;
2287 }
2288
mesh_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2289 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2290 void *data, u16 len)
2291 {
2292 struct mgmt_rp_mesh_read_features rp;
2293
2294 if (!lmp_le_capable(hdev) ||
2295 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2296 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2297 MGMT_STATUS_NOT_SUPPORTED);
2298
2299 memset(&rp, 0, sizeof(rp));
2300 rp.index = cpu_to_le16(hdev->id);
2301 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2302 rp.max_handles = MESH_HANDLES_MAX;
2303
2304 hci_dev_lock(hdev);
2305
2306 if (rp.max_handles)
2307 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2308
2309 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2310 rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2311
2312 hci_dev_unlock(hdev);
2313 return 0;
2314 }
2315
send_cancel(struct hci_dev * hdev,void * data)2316 static int send_cancel(struct hci_dev *hdev, void *data)
2317 {
2318 struct mgmt_pending_cmd *cmd = data;
2319 struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2320 struct mgmt_mesh_tx *mesh_tx;
2321
2322 if (!cancel->handle) {
2323 do {
2324 mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2325
2326 if (mesh_tx)
2327 mesh_send_complete(hdev, mesh_tx, false);
2328 } while (mesh_tx);
2329 } else {
2330 mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2331
2332 if (mesh_tx && mesh_tx->sk == cmd->sk)
2333 mesh_send_complete(hdev, mesh_tx, false);
2334 }
2335
2336 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2337 0, NULL, 0);
2338 mgmt_pending_free(cmd);
2339
2340 return 0;
2341 }
2342
mesh_send_cancel(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2343 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2344 void *data, u16 len)
2345 {
2346 struct mgmt_pending_cmd *cmd;
2347 int err;
2348
2349 if (!lmp_le_capable(hdev) ||
2350 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2351 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2352 MGMT_STATUS_NOT_SUPPORTED);
2353
2354 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2355 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2356 MGMT_STATUS_REJECTED);
2357
2358 hci_dev_lock(hdev);
2359 cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2360 if (!cmd)
2361 err = -ENOMEM;
2362 else
2363 err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2364
2365 if (err < 0) {
2366 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2367 MGMT_STATUS_FAILED);
2368
2369 if (cmd)
2370 mgmt_pending_free(cmd);
2371 }
2372
2373 hci_dev_unlock(hdev);
2374 return err;
2375 }
2376
mesh_send(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2377 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2378 {
2379 struct mgmt_mesh_tx *mesh_tx;
2380 struct mgmt_cp_mesh_send *send = data;
2381 struct mgmt_rp_mesh_read_features rp;
2382 bool sending;
2383 int err = 0;
2384
2385 if (!lmp_le_capable(hdev) ||
2386 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2387 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2388 MGMT_STATUS_NOT_SUPPORTED);
2389 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2390 len <= MGMT_MESH_SEND_SIZE ||
2391 len > (MGMT_MESH_SEND_SIZE + 31))
2392 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2393 MGMT_STATUS_REJECTED);
2394
2395 hci_dev_lock(hdev);
2396
2397 memset(&rp, 0, sizeof(rp));
2398 rp.max_handles = MESH_HANDLES_MAX;
2399
2400 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2401
2402 if (rp.max_handles <= rp.used_handles) {
2403 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2404 MGMT_STATUS_BUSY);
2405 goto done;
2406 }
2407
2408 sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2409 mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2410
2411 if (!mesh_tx)
2412 err = -ENOMEM;
2413 else if (!sending)
2414 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2415 mesh_send_start_complete);
2416
2417 if (err < 0) {
2418 bt_dev_err(hdev, "Send Mesh Failed %d", err);
2419 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2420 MGMT_STATUS_FAILED);
2421
2422 if (mesh_tx) {
2423 if (sending)
2424 mgmt_mesh_remove(mesh_tx);
2425 }
2426 } else {
2427 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2428
2429 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2430 &mesh_tx->handle, 1);
2431 }
2432
2433 done:
2434 hci_dev_unlock(hdev);
2435 return err;
2436 }
2437
set_le(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2438 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2439 {
2440 struct mgmt_mode *cp = data;
2441 struct mgmt_pending_cmd *cmd;
2442 int err;
2443 u8 val, enabled;
2444
2445 bt_dev_dbg(hdev, "sock %p", sk);
2446
2447 if (!lmp_le_capable(hdev))
2448 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2449 MGMT_STATUS_NOT_SUPPORTED);
2450
2451 if (cp->val != 0x00 && cp->val != 0x01)
2452 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2453 MGMT_STATUS_INVALID_PARAMS);
2454
2455 /* Bluetooth single mode LE only controllers or dual-mode
2456 * controllers configured as LE only devices, do not allow
2457 * switching LE off. These have either LE enabled explicitly
2458 * or BR/EDR has been previously switched off.
2459 *
2460 * When trying to enable an already enabled LE, then gracefully
2461 * send a positive response. Trying to disable it however will
2462 * result into rejection.
2463 */
2464 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2465 if (cp->val == 0x01)
2466 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2467
2468 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2469 MGMT_STATUS_REJECTED);
2470 }
2471
2472 hci_dev_lock(hdev);
2473
2474 val = !!cp->val;
2475 enabled = lmp_host_le_capable(hdev);
2476
2477 if (!hdev_is_powered(hdev) || val == enabled) {
2478 bool changed = false;
2479
2480 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2481 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2482 changed = true;
2483 }
2484
2485 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2486 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2487 changed = true;
2488 }
2489
2490 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2491 if (err < 0)
2492 goto unlock;
2493
2494 if (changed)
2495 err = new_settings(hdev, sk);
2496
2497 goto unlock;
2498 }
2499
2500 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2501 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2502 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2503 MGMT_STATUS_BUSY);
2504 goto unlock;
2505 }
2506
2507 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2508 if (!cmd)
2509 err = -ENOMEM;
2510 else
2511 err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2512 set_le_complete);
2513
2514 if (err < 0) {
2515 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2516 MGMT_STATUS_FAILED);
2517
2518 if (cmd)
2519 mgmt_pending_remove(cmd);
2520 }
2521
2522 unlock:
2523 hci_dev_unlock(hdev);
2524 return err;
2525 }
2526
send_hci_cmd_sync(struct hci_dev * hdev,void * data)2527 static int send_hci_cmd_sync(struct hci_dev *hdev, void *data)
2528 {
2529 struct mgmt_pending_cmd *cmd = data;
2530 struct mgmt_cp_hci_cmd_sync *cp = cmd->param;
2531 struct sk_buff *skb;
2532
2533 skb = __hci_cmd_sync_ev(hdev, le16_to_cpu(cp->opcode),
2534 le16_to_cpu(cp->params_len), cp->params,
2535 cp->event, cp->timeout ?
2536 secs_to_jiffies(cp->timeout) :
2537 HCI_CMD_TIMEOUT);
2538 if (IS_ERR(skb)) {
2539 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
2540 mgmt_status(PTR_ERR(skb)));
2541 goto done;
2542 }
2543
2544 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_HCI_CMD_SYNC, 0,
2545 skb->data, skb->len);
2546
2547 kfree_skb(skb);
2548
2549 done:
2550 mgmt_pending_free(cmd);
2551
2552 return 0;
2553 }
2554
mgmt_hci_cmd_sync(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2555 static int mgmt_hci_cmd_sync(struct sock *sk, struct hci_dev *hdev,
2556 void *data, u16 len)
2557 {
2558 struct mgmt_cp_hci_cmd_sync *cp = data;
2559 struct mgmt_pending_cmd *cmd;
2560 int err;
2561
2562 if (len != (offsetof(struct mgmt_cp_hci_cmd_sync, params) +
2563 le16_to_cpu(cp->params_len)))
2564 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
2565 MGMT_STATUS_INVALID_PARAMS);
2566
2567 hci_dev_lock(hdev);
2568 cmd = mgmt_pending_new(sk, MGMT_OP_HCI_CMD_SYNC, hdev, data, len);
2569 if (!cmd)
2570 err = -ENOMEM;
2571 else
2572 err = hci_cmd_sync_queue(hdev, send_hci_cmd_sync, cmd, NULL);
2573
2574 if (err < 0) {
2575 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
2576 MGMT_STATUS_FAILED);
2577
2578 if (cmd)
2579 mgmt_pending_free(cmd);
2580 }
2581
2582 hci_dev_unlock(hdev);
2583 return err;
2584 }
2585
2586 /* This is a helper function to test for pending mgmt commands that can
2587 * cause CoD or EIR HCI commands. We can only allow one such pending
2588 * mgmt command at a time since otherwise we cannot easily track what
2589 * the current values are, will be, and based on that calculate if a new
2590 * HCI command needs to be sent and if yes with what value.
2591 */
pending_eir_or_class(struct hci_dev * hdev)2592 static bool pending_eir_or_class(struct hci_dev *hdev)
2593 {
2594 struct mgmt_pending_cmd *cmd;
2595
2596 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2597 switch (cmd->opcode) {
2598 case MGMT_OP_ADD_UUID:
2599 case MGMT_OP_REMOVE_UUID:
2600 case MGMT_OP_SET_DEV_CLASS:
2601 case MGMT_OP_SET_POWERED:
2602 return true;
2603 }
2604 }
2605
2606 return false;
2607 }
2608
2609 static const u8 bluetooth_base_uuid[] = {
2610 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2611 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2612 };
2613
get_uuid_size(const u8 * uuid)2614 static u8 get_uuid_size(const u8 *uuid)
2615 {
2616 u32 val;
2617
2618 if (memcmp(uuid, bluetooth_base_uuid, 12))
2619 return 128;
2620
2621 val = get_unaligned_le32(&uuid[12]);
2622 if (val > 0xffff)
2623 return 32;
2624
2625 return 16;
2626 }
2627
mgmt_class_complete(struct hci_dev * hdev,void * data,int err)2628 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2629 {
2630 struct mgmt_pending_cmd *cmd = data;
2631
2632 bt_dev_dbg(hdev, "err %d", err);
2633
2634 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
2635 mgmt_status(err), hdev->dev_class, 3);
2636
2637 mgmt_pending_free(cmd);
2638 }
2639
add_uuid_sync(struct hci_dev * hdev,void * data)2640 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2641 {
2642 int err;
2643
2644 err = hci_update_class_sync(hdev);
2645 if (err)
2646 return err;
2647
2648 return hci_update_eir_sync(hdev);
2649 }
2650
add_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2651 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2652 {
2653 struct mgmt_cp_add_uuid *cp = data;
2654 struct mgmt_pending_cmd *cmd;
2655 struct bt_uuid *uuid;
2656 int err;
2657
2658 bt_dev_dbg(hdev, "sock %p", sk);
2659
2660 hci_dev_lock(hdev);
2661
2662 if (pending_eir_or_class(hdev)) {
2663 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2664 MGMT_STATUS_BUSY);
2665 goto failed;
2666 }
2667
2668 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2669 if (!uuid) {
2670 err = -ENOMEM;
2671 goto failed;
2672 }
2673
2674 memcpy(uuid->uuid, cp->uuid, 16);
2675 uuid->svc_hint = cp->svc_hint;
2676 uuid->size = get_uuid_size(cp->uuid);
2677
2678 list_add_tail(&uuid->list, &hdev->uuids);
2679
2680 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2681 if (!cmd) {
2682 err = -ENOMEM;
2683 goto failed;
2684 }
2685
2686 /* MGMT_OP_ADD_UUID don't require adapter the UP/Running so use
2687 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2688 */
2689 err = hci_cmd_sync_submit(hdev, add_uuid_sync, cmd,
2690 mgmt_class_complete);
2691 if (err < 0) {
2692 mgmt_pending_free(cmd);
2693 goto failed;
2694 }
2695
2696 failed:
2697 hci_dev_unlock(hdev);
2698 return err;
2699 }
2700
enable_service_cache(struct hci_dev * hdev)2701 static bool enable_service_cache(struct hci_dev *hdev)
2702 {
2703 if (!hdev_is_powered(hdev))
2704 return false;
2705
2706 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2707 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2708 CACHE_TIMEOUT);
2709 return true;
2710 }
2711
2712 return false;
2713 }
2714
remove_uuid_sync(struct hci_dev * hdev,void * data)2715 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2716 {
2717 int err;
2718
2719 err = hci_update_class_sync(hdev);
2720 if (err)
2721 return err;
2722
2723 return hci_update_eir_sync(hdev);
2724 }
2725
remove_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2726 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2727 u16 len)
2728 {
2729 struct mgmt_cp_remove_uuid *cp = data;
2730 struct mgmt_pending_cmd *cmd;
2731 struct bt_uuid *match, *tmp;
2732 static const u8 bt_uuid_any[] = {
2733 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2734 };
2735 int err, found;
2736
2737 bt_dev_dbg(hdev, "sock %p", sk);
2738
2739 hci_dev_lock(hdev);
2740
2741 if (pending_eir_or_class(hdev)) {
2742 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2743 MGMT_STATUS_BUSY);
2744 goto unlock;
2745 }
2746
2747 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2748 hci_uuids_clear(hdev);
2749
2750 if (enable_service_cache(hdev)) {
2751 err = mgmt_cmd_complete(sk, hdev->id,
2752 MGMT_OP_REMOVE_UUID,
2753 0, hdev->dev_class, 3);
2754 goto unlock;
2755 }
2756
2757 goto update_class;
2758 }
2759
2760 found = 0;
2761
2762 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2763 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2764 continue;
2765
2766 list_del(&match->list);
2767 kfree(match);
2768 found++;
2769 }
2770
2771 if (found == 0) {
2772 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2773 MGMT_STATUS_INVALID_PARAMS);
2774 goto unlock;
2775 }
2776
2777 update_class:
2778 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2779 if (!cmd) {
2780 err = -ENOMEM;
2781 goto unlock;
2782 }
2783
2784 /* MGMT_OP_REMOVE_UUID don't require adapter the UP/Running so use
2785 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2786 */
2787 err = hci_cmd_sync_submit(hdev, remove_uuid_sync, cmd,
2788 mgmt_class_complete);
2789 if (err < 0)
2790 mgmt_pending_free(cmd);
2791
2792 unlock:
2793 hci_dev_unlock(hdev);
2794 return err;
2795 }
2796
set_class_sync(struct hci_dev * hdev,void * data)2797 static int set_class_sync(struct hci_dev *hdev, void *data)
2798 {
2799 int err = 0;
2800
2801 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2802 cancel_delayed_work_sync(&hdev->service_cache);
2803 err = hci_update_eir_sync(hdev);
2804 }
2805
2806 if (err)
2807 return err;
2808
2809 return hci_update_class_sync(hdev);
2810 }
2811
set_dev_class(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2812 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2813 u16 len)
2814 {
2815 struct mgmt_cp_set_dev_class *cp = data;
2816 struct mgmt_pending_cmd *cmd;
2817 int err;
2818
2819 bt_dev_dbg(hdev, "sock %p", sk);
2820
2821 if (!lmp_bredr_capable(hdev))
2822 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2823 MGMT_STATUS_NOT_SUPPORTED);
2824
2825 hci_dev_lock(hdev);
2826
2827 if (pending_eir_or_class(hdev)) {
2828 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2829 MGMT_STATUS_BUSY);
2830 goto unlock;
2831 }
2832
2833 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2834 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2835 MGMT_STATUS_INVALID_PARAMS);
2836 goto unlock;
2837 }
2838
2839 hdev->major_class = cp->major;
2840 hdev->minor_class = cp->minor;
2841
2842 if (!hdev_is_powered(hdev)) {
2843 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2844 hdev->dev_class, 3);
2845 goto unlock;
2846 }
2847
2848 cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2849 if (!cmd) {
2850 err = -ENOMEM;
2851 goto unlock;
2852 }
2853
2854 /* MGMT_OP_SET_DEV_CLASS don't require adapter the UP/Running so use
2855 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2856 */
2857 err = hci_cmd_sync_submit(hdev, set_class_sync, cmd,
2858 mgmt_class_complete);
2859 if (err < 0)
2860 mgmt_pending_free(cmd);
2861
2862 unlock:
2863 hci_dev_unlock(hdev);
2864 return err;
2865 }
2866
load_link_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2867 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2868 u16 len)
2869 {
2870 struct mgmt_cp_load_link_keys *cp = data;
2871 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2872 sizeof(struct mgmt_link_key_info));
2873 u16 key_count, expected_len;
2874 bool changed;
2875 int i;
2876
2877 bt_dev_dbg(hdev, "sock %p", sk);
2878
2879 if (!lmp_bredr_capable(hdev))
2880 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2881 MGMT_STATUS_NOT_SUPPORTED);
2882
2883 key_count = __le16_to_cpu(cp->key_count);
2884 if (key_count > max_key_count) {
2885 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2886 key_count);
2887 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2888 MGMT_STATUS_INVALID_PARAMS);
2889 }
2890
2891 expected_len = struct_size(cp, keys, key_count);
2892 if (expected_len != len) {
2893 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2894 expected_len, len);
2895 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2896 MGMT_STATUS_INVALID_PARAMS);
2897 }
2898
2899 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2900 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2901 MGMT_STATUS_INVALID_PARAMS);
2902
2903 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2904 key_count);
2905
2906 hci_dev_lock(hdev);
2907
2908 hci_link_keys_clear(hdev);
2909
2910 if (cp->debug_keys)
2911 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2912 else
2913 changed = hci_dev_test_and_clear_flag(hdev,
2914 HCI_KEEP_DEBUG_KEYS);
2915
2916 if (changed)
2917 new_settings(hdev, NULL);
2918
2919 for (i = 0; i < key_count; i++) {
2920 struct mgmt_link_key_info *key = &cp->keys[i];
2921
2922 if (hci_is_blocked_key(hdev,
2923 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2924 key->val)) {
2925 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2926 &key->addr.bdaddr);
2927 continue;
2928 }
2929
2930 if (key->addr.type != BDADDR_BREDR) {
2931 bt_dev_warn(hdev,
2932 "Invalid link address type %u for %pMR",
2933 key->addr.type, &key->addr.bdaddr);
2934 continue;
2935 }
2936
2937 if (key->type > 0x08) {
2938 bt_dev_warn(hdev, "Invalid link key type %u for %pMR",
2939 key->type, &key->addr.bdaddr);
2940 continue;
2941 }
2942
2943 /* Always ignore debug keys and require a new pairing if
2944 * the user wants to use them.
2945 */
2946 if (key->type == HCI_LK_DEBUG_COMBINATION)
2947 continue;
2948
2949 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2950 key->type, key->pin_len, NULL);
2951 }
2952
2953 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2954
2955 hci_dev_unlock(hdev);
2956
2957 return 0;
2958 }
2959
device_unpaired(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,struct sock * skip_sk)2960 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2961 u8 addr_type, struct sock *skip_sk)
2962 {
2963 struct mgmt_ev_device_unpaired ev;
2964
2965 bacpy(&ev.addr.bdaddr, bdaddr);
2966 ev.addr.type = addr_type;
2967
2968 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2969 skip_sk);
2970 }
2971
unpair_device_complete(struct hci_dev * hdev,void * data,int err)2972 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2973 {
2974 struct mgmt_pending_cmd *cmd = data;
2975 struct mgmt_cp_unpair_device *cp = cmd->param;
2976
2977 if (!err)
2978 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2979
2980 cmd->cmd_complete(cmd, err);
2981 mgmt_pending_free(cmd);
2982 }
2983
unpair_device_sync(struct hci_dev * hdev,void * data)2984 static int unpair_device_sync(struct hci_dev *hdev, void *data)
2985 {
2986 struct mgmt_pending_cmd *cmd = data;
2987 struct mgmt_cp_unpair_device *cp = cmd->param;
2988 struct hci_conn *conn;
2989
2990 if (cp->addr.type == BDADDR_BREDR)
2991 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2992 &cp->addr.bdaddr);
2993 else
2994 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2995 le_addr_type(cp->addr.type));
2996
2997 if (!conn)
2998 return 0;
2999
3000 /* Disregard any possible error since the likes of hci_abort_conn_sync
3001 * will clean up the connection no matter the error.
3002 */
3003 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3004
3005 return 0;
3006 }
3007
unpair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3008 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3009 u16 len)
3010 {
3011 struct mgmt_cp_unpair_device *cp = data;
3012 struct mgmt_rp_unpair_device rp;
3013 struct hci_conn_params *params;
3014 struct mgmt_pending_cmd *cmd;
3015 struct hci_conn *conn;
3016 u8 addr_type;
3017 int err;
3018
3019 memset(&rp, 0, sizeof(rp));
3020 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3021 rp.addr.type = cp->addr.type;
3022
3023 if (!bdaddr_type_is_valid(cp->addr.type))
3024 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3025 MGMT_STATUS_INVALID_PARAMS,
3026 &rp, sizeof(rp));
3027
3028 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
3029 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3030 MGMT_STATUS_INVALID_PARAMS,
3031 &rp, sizeof(rp));
3032
3033 hci_dev_lock(hdev);
3034
3035 if (!hdev_is_powered(hdev)) {
3036 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3037 MGMT_STATUS_NOT_POWERED, &rp,
3038 sizeof(rp));
3039 goto unlock;
3040 }
3041
3042 if (cp->addr.type == BDADDR_BREDR) {
3043 /* If disconnection is requested, then look up the
3044 * connection. If the remote device is connected, it
3045 * will be later used to terminate the link.
3046 *
3047 * Setting it to NULL explicitly will cause no
3048 * termination of the link.
3049 */
3050 if (cp->disconnect)
3051 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3052 &cp->addr.bdaddr);
3053 else
3054 conn = NULL;
3055
3056 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
3057 if (err < 0) {
3058 err = mgmt_cmd_complete(sk, hdev->id,
3059 MGMT_OP_UNPAIR_DEVICE,
3060 MGMT_STATUS_NOT_PAIRED, &rp,
3061 sizeof(rp));
3062 goto unlock;
3063 }
3064
3065 goto done;
3066 }
3067
3068 /* LE address type */
3069 addr_type = le_addr_type(cp->addr.type);
3070
3071 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
3072 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
3073 if (err < 0) {
3074 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3075 MGMT_STATUS_NOT_PAIRED, &rp,
3076 sizeof(rp));
3077 goto unlock;
3078 }
3079
3080 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3081 if (!conn) {
3082 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3083 goto done;
3084 }
3085
3086
3087 /* Defer clearing up the connection parameters until closing to
3088 * give a chance of keeping them if a repairing happens.
3089 */
3090 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3091
3092 /* Disable auto-connection parameters if present */
3093 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3094 if (params) {
3095 if (params->explicit_connect)
3096 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3097 else
3098 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3099 }
3100
3101 /* If disconnection is not requested, then clear the connection
3102 * variable so that the link is not terminated.
3103 */
3104 if (!cp->disconnect)
3105 conn = NULL;
3106
3107 done:
3108 /* If the connection variable is set, then termination of the
3109 * link is requested.
3110 */
3111 if (!conn) {
3112 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3113 &rp, sizeof(rp));
3114 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3115 goto unlock;
3116 }
3117
3118 cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3119 sizeof(*cp));
3120 if (!cmd) {
3121 err = -ENOMEM;
3122 goto unlock;
3123 }
3124
3125 cmd->cmd_complete = addr_cmd_complete;
3126
3127 err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3128 unpair_device_complete);
3129 if (err < 0)
3130 mgmt_pending_free(cmd);
3131
3132 unlock:
3133 hci_dev_unlock(hdev);
3134 return err;
3135 }
3136
disconnect_complete(struct hci_dev * hdev,void * data,int err)3137 static void disconnect_complete(struct hci_dev *hdev, void *data, int err)
3138 {
3139 struct mgmt_pending_cmd *cmd = data;
3140
3141 cmd->cmd_complete(cmd, mgmt_status(err));
3142 mgmt_pending_free(cmd);
3143 }
3144
disconnect_sync(struct hci_dev * hdev,void * data)3145 static int disconnect_sync(struct hci_dev *hdev, void *data)
3146 {
3147 struct mgmt_pending_cmd *cmd = data;
3148 struct mgmt_cp_disconnect *cp = cmd->param;
3149 struct hci_conn *conn;
3150
3151 if (cp->addr.type == BDADDR_BREDR)
3152 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3153 &cp->addr.bdaddr);
3154 else
3155 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3156 le_addr_type(cp->addr.type));
3157
3158 if (!conn)
3159 return -ENOTCONN;
3160
3161 /* Disregard any possible error since the likes of hci_abort_conn_sync
3162 * will clean up the connection no matter the error.
3163 */
3164 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3165
3166 return 0;
3167 }
3168
disconnect(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3169 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3170 u16 len)
3171 {
3172 struct mgmt_cp_disconnect *cp = data;
3173 struct mgmt_rp_disconnect rp;
3174 struct mgmt_pending_cmd *cmd;
3175 int err;
3176
3177 bt_dev_dbg(hdev, "sock %p", sk);
3178
3179 memset(&rp, 0, sizeof(rp));
3180 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3181 rp.addr.type = cp->addr.type;
3182
3183 if (!bdaddr_type_is_valid(cp->addr.type))
3184 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3185 MGMT_STATUS_INVALID_PARAMS,
3186 &rp, sizeof(rp));
3187
3188 hci_dev_lock(hdev);
3189
3190 if (!test_bit(HCI_UP, &hdev->flags)) {
3191 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3192 MGMT_STATUS_NOT_POWERED, &rp,
3193 sizeof(rp));
3194 goto failed;
3195 }
3196
3197 cmd = mgmt_pending_new(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3198 if (!cmd) {
3199 err = -ENOMEM;
3200 goto failed;
3201 }
3202
3203 cmd->cmd_complete = generic_cmd_complete;
3204
3205 err = hci_cmd_sync_queue(hdev, disconnect_sync, cmd,
3206 disconnect_complete);
3207 if (err < 0)
3208 mgmt_pending_free(cmd);
3209
3210 failed:
3211 hci_dev_unlock(hdev);
3212 return err;
3213 }
3214
link_to_bdaddr(u8 link_type,u8 addr_type)3215 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3216 {
3217 switch (link_type) {
3218 case CIS_LINK:
3219 case BIS_LINK:
3220 case LE_LINK:
3221 switch (addr_type) {
3222 case ADDR_LE_DEV_PUBLIC:
3223 return BDADDR_LE_PUBLIC;
3224
3225 default:
3226 /* Fallback to LE Random address type */
3227 return BDADDR_LE_RANDOM;
3228 }
3229
3230 default:
3231 /* Fallback to BR/EDR type */
3232 return BDADDR_BREDR;
3233 }
3234 }
3235
get_connections(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)3236 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3237 u16 data_len)
3238 {
3239 struct mgmt_rp_get_connections *rp;
3240 struct hci_conn *c;
3241 int err;
3242 u16 i;
3243
3244 bt_dev_dbg(hdev, "sock %p", sk);
3245
3246 hci_dev_lock(hdev);
3247
3248 if (!hdev_is_powered(hdev)) {
3249 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3250 MGMT_STATUS_NOT_POWERED);
3251 goto unlock;
3252 }
3253
3254 i = 0;
3255 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3256 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3257 i++;
3258 }
3259
3260 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3261 if (!rp) {
3262 err = -ENOMEM;
3263 goto unlock;
3264 }
3265
3266 i = 0;
3267 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3268 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3269 continue;
3270 bacpy(&rp->addr[i].bdaddr, &c->dst);
3271 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3272 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3273 continue;
3274 i++;
3275 }
3276
3277 rp->conn_count = cpu_to_le16(i);
3278
3279 /* Recalculate length in case of filtered SCO connections, etc */
3280 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3281 struct_size(rp, addr, i));
3282
3283 kfree(rp);
3284
3285 unlock:
3286 hci_dev_unlock(hdev);
3287 return err;
3288 }
3289
send_pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_pin_code_neg_reply * cp)3290 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3291 struct mgmt_cp_pin_code_neg_reply *cp)
3292 {
3293 struct mgmt_pending_cmd *cmd;
3294 int err;
3295
3296 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3297 sizeof(*cp));
3298 if (!cmd)
3299 return -ENOMEM;
3300
3301 cmd->cmd_complete = addr_cmd_complete;
3302
3303 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3304 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3305 if (err < 0)
3306 mgmt_pending_remove(cmd);
3307
3308 return err;
3309 }
3310
pin_code_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3311 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3312 u16 len)
3313 {
3314 struct hci_conn *conn;
3315 struct mgmt_cp_pin_code_reply *cp = data;
3316 struct hci_cp_pin_code_reply reply;
3317 struct mgmt_pending_cmd *cmd;
3318 int err;
3319
3320 bt_dev_dbg(hdev, "sock %p", sk);
3321
3322 hci_dev_lock(hdev);
3323
3324 if (!hdev_is_powered(hdev)) {
3325 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3326 MGMT_STATUS_NOT_POWERED);
3327 goto failed;
3328 }
3329
3330 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3331 if (!conn) {
3332 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3333 MGMT_STATUS_NOT_CONNECTED);
3334 goto failed;
3335 }
3336
3337 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3338 struct mgmt_cp_pin_code_neg_reply ncp;
3339
3340 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3341
3342 bt_dev_err(hdev, "PIN code is not 16 bytes long");
3343
3344 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3345 if (err >= 0)
3346 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3347 MGMT_STATUS_INVALID_PARAMS);
3348
3349 goto failed;
3350 }
3351
3352 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3353 if (!cmd) {
3354 err = -ENOMEM;
3355 goto failed;
3356 }
3357
3358 cmd->cmd_complete = addr_cmd_complete;
3359
3360 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3361 reply.pin_len = cp->pin_len;
3362 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3363
3364 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3365 if (err < 0)
3366 mgmt_pending_remove(cmd);
3367
3368 failed:
3369 hci_dev_unlock(hdev);
3370 return err;
3371 }
3372
set_io_capability(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3373 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3374 u16 len)
3375 {
3376 struct mgmt_cp_set_io_capability *cp = data;
3377
3378 bt_dev_dbg(hdev, "sock %p", sk);
3379
3380 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3381 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3382 MGMT_STATUS_INVALID_PARAMS);
3383
3384 hci_dev_lock(hdev);
3385
3386 hdev->io_capability = cp->io_capability;
3387
3388 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3389
3390 hci_dev_unlock(hdev);
3391
3392 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3393 NULL, 0);
3394 }
3395
find_pairing(struct hci_conn * conn)3396 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3397 {
3398 struct hci_dev *hdev = conn->hdev;
3399 struct mgmt_pending_cmd *cmd;
3400
3401 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3402 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3403 continue;
3404
3405 if (cmd->user_data != conn)
3406 continue;
3407
3408 return cmd;
3409 }
3410
3411 return NULL;
3412 }
3413
pairing_complete(struct mgmt_pending_cmd * cmd,u8 status)3414 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3415 {
3416 struct mgmt_rp_pair_device rp;
3417 struct hci_conn *conn = cmd->user_data;
3418 int err;
3419
3420 bacpy(&rp.addr.bdaddr, &conn->dst);
3421 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3422
3423 err = mgmt_cmd_complete(cmd->sk, cmd->hdev->id, MGMT_OP_PAIR_DEVICE,
3424 status, &rp, sizeof(rp));
3425
3426 /* So we don't get further callbacks for this connection */
3427 conn->connect_cfm_cb = NULL;
3428 conn->security_cfm_cb = NULL;
3429 conn->disconn_cfm_cb = NULL;
3430
3431 hci_conn_drop(conn);
3432
3433 /* The device is paired so there is no need to remove
3434 * its connection parameters anymore.
3435 */
3436 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3437
3438 hci_conn_put(conn);
3439
3440 return err;
3441 }
3442
mgmt_smp_complete(struct hci_conn * conn,bool complete)3443 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3444 {
3445 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3446 struct mgmt_pending_cmd *cmd;
3447
3448 cmd = find_pairing(conn);
3449 if (cmd) {
3450 cmd->cmd_complete(cmd, status);
3451 mgmt_pending_remove(cmd);
3452 }
3453 }
3454
pairing_complete_cb(struct hci_conn * conn,u8 status)3455 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3456 {
3457 struct mgmt_pending_cmd *cmd;
3458
3459 BT_DBG("status %u", status);
3460
3461 cmd = find_pairing(conn);
3462 if (!cmd) {
3463 BT_DBG("Unable to find a pending command");
3464 return;
3465 }
3466
3467 cmd->cmd_complete(cmd, mgmt_status(status));
3468 mgmt_pending_remove(cmd);
3469 }
3470
le_pairing_complete_cb(struct hci_conn * conn,u8 status)3471 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3472 {
3473 struct mgmt_pending_cmd *cmd;
3474
3475 BT_DBG("status %u", status);
3476
3477 if (!status)
3478 return;
3479
3480 cmd = find_pairing(conn);
3481 if (!cmd) {
3482 BT_DBG("Unable to find a pending command");
3483 return;
3484 }
3485
3486 cmd->cmd_complete(cmd, mgmt_status(status));
3487 mgmt_pending_remove(cmd);
3488 }
3489
pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3490 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3491 u16 len)
3492 {
3493 struct mgmt_cp_pair_device *cp = data;
3494 struct mgmt_rp_pair_device rp;
3495 struct mgmt_pending_cmd *cmd;
3496 u8 sec_level, auth_type;
3497 struct hci_conn *conn;
3498 int err;
3499
3500 bt_dev_dbg(hdev, "sock %p", sk);
3501
3502 memset(&rp, 0, sizeof(rp));
3503 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3504 rp.addr.type = cp->addr.type;
3505
3506 if (!bdaddr_type_is_valid(cp->addr.type))
3507 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3508 MGMT_STATUS_INVALID_PARAMS,
3509 &rp, sizeof(rp));
3510
3511 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3512 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3513 MGMT_STATUS_INVALID_PARAMS,
3514 &rp, sizeof(rp));
3515
3516 hci_dev_lock(hdev);
3517
3518 if (!hdev_is_powered(hdev)) {
3519 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3520 MGMT_STATUS_NOT_POWERED, &rp,
3521 sizeof(rp));
3522 goto unlock;
3523 }
3524
3525 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3526 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3527 MGMT_STATUS_ALREADY_PAIRED, &rp,
3528 sizeof(rp));
3529 goto unlock;
3530 }
3531
3532 sec_level = BT_SECURITY_MEDIUM;
3533 auth_type = HCI_AT_DEDICATED_BONDING;
3534
3535 if (cp->addr.type == BDADDR_BREDR) {
3536 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3537 auth_type, CONN_REASON_PAIR_DEVICE,
3538 HCI_ACL_CONN_TIMEOUT);
3539 } else {
3540 u8 addr_type = le_addr_type(cp->addr.type);
3541 struct hci_conn_params *p;
3542
3543 /* When pairing a new device, it is expected to remember
3544 * this device for future connections. Adding the connection
3545 * parameter information ahead of time allows tracking
3546 * of the peripheral preferred values and will speed up any
3547 * further connection establishment.
3548 *
3549 * If connection parameters already exist, then they
3550 * will be kept and this function does nothing.
3551 */
3552 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3553 if (!p) {
3554 err = -EIO;
3555 goto unlock;
3556 }
3557
3558 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3559 p->auto_connect = HCI_AUTO_CONN_DISABLED;
3560
3561 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3562 sec_level, HCI_LE_CONN_TIMEOUT,
3563 CONN_REASON_PAIR_DEVICE);
3564 }
3565
3566 if (IS_ERR(conn)) {
3567 int status;
3568
3569 if (PTR_ERR(conn) == -EBUSY)
3570 status = MGMT_STATUS_BUSY;
3571 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3572 status = MGMT_STATUS_NOT_SUPPORTED;
3573 else if (PTR_ERR(conn) == -ECONNREFUSED)
3574 status = MGMT_STATUS_REJECTED;
3575 else
3576 status = MGMT_STATUS_CONNECT_FAILED;
3577
3578 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3579 status, &rp, sizeof(rp));
3580 goto unlock;
3581 }
3582
3583 if (conn->connect_cfm_cb) {
3584 hci_conn_drop(conn);
3585 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3586 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3587 goto unlock;
3588 }
3589
3590 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3591 if (!cmd) {
3592 err = -ENOMEM;
3593 hci_conn_drop(conn);
3594 goto unlock;
3595 }
3596
3597 cmd->cmd_complete = pairing_complete;
3598
3599 /* For LE, just connecting isn't a proof that the pairing finished */
3600 if (cp->addr.type == BDADDR_BREDR) {
3601 conn->connect_cfm_cb = pairing_complete_cb;
3602 conn->security_cfm_cb = pairing_complete_cb;
3603 conn->disconn_cfm_cb = pairing_complete_cb;
3604 } else {
3605 conn->connect_cfm_cb = le_pairing_complete_cb;
3606 conn->security_cfm_cb = le_pairing_complete_cb;
3607 conn->disconn_cfm_cb = le_pairing_complete_cb;
3608 }
3609
3610 conn->io_capability = cp->io_cap;
3611 cmd->user_data = hci_conn_get(conn);
3612
3613 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3614 hci_conn_security(conn, sec_level, auth_type, true)) {
3615 cmd->cmd_complete(cmd, 0);
3616 mgmt_pending_remove(cmd);
3617 }
3618
3619 err = 0;
3620
3621 unlock:
3622 hci_dev_unlock(hdev);
3623 return err;
3624 }
3625
cancel_pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3626 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3627 u16 len)
3628 {
3629 struct mgmt_addr_info *addr = data;
3630 struct mgmt_pending_cmd *cmd;
3631 struct hci_conn *conn;
3632 int err;
3633
3634 bt_dev_dbg(hdev, "sock %p", sk);
3635
3636 hci_dev_lock(hdev);
3637
3638 if (!hdev_is_powered(hdev)) {
3639 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3640 MGMT_STATUS_NOT_POWERED);
3641 goto unlock;
3642 }
3643
3644 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3645 if (!cmd) {
3646 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3647 MGMT_STATUS_INVALID_PARAMS);
3648 goto unlock;
3649 }
3650
3651 conn = cmd->user_data;
3652
3653 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3654 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3655 MGMT_STATUS_INVALID_PARAMS);
3656 goto unlock;
3657 }
3658
3659 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3660 mgmt_pending_remove(cmd);
3661
3662 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3663 addr, sizeof(*addr));
3664
3665 /* Since user doesn't want to proceed with the connection, abort any
3666 * ongoing pairing and then terminate the link if it was created
3667 * because of the pair device action.
3668 */
3669 if (addr->type == BDADDR_BREDR)
3670 hci_remove_link_key(hdev, &addr->bdaddr);
3671 else
3672 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3673 le_addr_type(addr->type));
3674
3675 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3676 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3677
3678 unlock:
3679 hci_dev_unlock(hdev);
3680 return err;
3681 }
3682
user_pairing_resp(struct sock * sk,struct hci_dev * hdev,struct mgmt_addr_info * addr,u16 mgmt_op,u16 hci_op,__le32 passkey)3683 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3684 struct mgmt_addr_info *addr, u16 mgmt_op,
3685 u16 hci_op, __le32 passkey)
3686 {
3687 struct mgmt_pending_cmd *cmd;
3688 struct hci_conn *conn;
3689 int err;
3690
3691 hci_dev_lock(hdev);
3692
3693 if (!hdev_is_powered(hdev)) {
3694 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3695 MGMT_STATUS_NOT_POWERED, addr,
3696 sizeof(*addr));
3697 goto done;
3698 }
3699
3700 if (addr->type == BDADDR_BREDR)
3701 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3702 else
3703 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3704 le_addr_type(addr->type));
3705
3706 if (!conn) {
3707 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3708 MGMT_STATUS_NOT_CONNECTED, addr,
3709 sizeof(*addr));
3710 goto done;
3711 }
3712
3713 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3714 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3715 if (!err)
3716 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3717 MGMT_STATUS_SUCCESS, addr,
3718 sizeof(*addr));
3719 else
3720 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3721 MGMT_STATUS_FAILED, addr,
3722 sizeof(*addr));
3723
3724 goto done;
3725 }
3726
3727 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3728 if (!cmd) {
3729 err = -ENOMEM;
3730 goto done;
3731 }
3732
3733 cmd->cmd_complete = addr_cmd_complete;
3734
3735 /* Continue with pairing via HCI */
3736 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3737 struct hci_cp_user_passkey_reply cp;
3738
3739 bacpy(&cp.bdaddr, &addr->bdaddr);
3740 cp.passkey = passkey;
3741 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3742 } else
3743 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3744 &addr->bdaddr);
3745
3746 if (err < 0)
3747 mgmt_pending_remove(cmd);
3748
3749 done:
3750 hci_dev_unlock(hdev);
3751 return err;
3752 }
3753
pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3754 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3755 void *data, u16 len)
3756 {
3757 struct mgmt_cp_pin_code_neg_reply *cp = data;
3758
3759 bt_dev_dbg(hdev, "sock %p", sk);
3760
3761 return user_pairing_resp(sk, hdev, &cp->addr,
3762 MGMT_OP_PIN_CODE_NEG_REPLY,
3763 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3764 }
3765
user_confirm_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3766 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3767 u16 len)
3768 {
3769 struct mgmt_cp_user_confirm_reply *cp = data;
3770
3771 bt_dev_dbg(hdev, "sock %p", sk);
3772
3773 if (len != sizeof(*cp))
3774 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3775 MGMT_STATUS_INVALID_PARAMS);
3776
3777 return user_pairing_resp(sk, hdev, &cp->addr,
3778 MGMT_OP_USER_CONFIRM_REPLY,
3779 HCI_OP_USER_CONFIRM_REPLY, 0);
3780 }
3781
user_confirm_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3782 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3783 void *data, u16 len)
3784 {
3785 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3786
3787 bt_dev_dbg(hdev, "sock %p", sk);
3788
3789 return user_pairing_resp(sk, hdev, &cp->addr,
3790 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3791 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3792 }
3793
user_passkey_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3794 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3795 u16 len)
3796 {
3797 struct mgmt_cp_user_passkey_reply *cp = data;
3798
3799 bt_dev_dbg(hdev, "sock %p", sk);
3800
3801 return user_pairing_resp(sk, hdev, &cp->addr,
3802 MGMT_OP_USER_PASSKEY_REPLY,
3803 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3804 }
3805
user_passkey_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3806 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3807 void *data, u16 len)
3808 {
3809 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3810
3811 bt_dev_dbg(hdev, "sock %p", sk);
3812
3813 return user_pairing_resp(sk, hdev, &cp->addr,
3814 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3815 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3816 }
3817
adv_expire_sync(struct hci_dev * hdev,u32 flags)3818 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3819 {
3820 struct adv_info *adv_instance;
3821
3822 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3823 if (!adv_instance)
3824 return 0;
3825
3826 /* stop if current instance doesn't need to be changed */
3827 if (!(adv_instance->flags & flags))
3828 return 0;
3829
3830 cancel_adv_timeout(hdev);
3831
3832 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3833 if (!adv_instance)
3834 return 0;
3835
3836 hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3837
3838 return 0;
3839 }
3840
name_changed_sync(struct hci_dev * hdev,void * data)3841 static int name_changed_sync(struct hci_dev *hdev, void *data)
3842 {
3843 return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3844 }
3845
set_name_complete(struct hci_dev * hdev,void * data,int err)3846 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3847 {
3848 struct mgmt_pending_cmd *cmd = data;
3849 struct mgmt_cp_set_local_name *cp = cmd->param;
3850 u8 status = mgmt_status(err);
3851
3852 bt_dev_dbg(hdev, "err %d", err);
3853
3854 if (err == -ECANCELED ||
3855 cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3856 return;
3857
3858 if (status) {
3859 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3860 status);
3861 } else {
3862 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3863 cp, sizeof(*cp));
3864
3865 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3866 hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3867 }
3868
3869 mgmt_pending_remove(cmd);
3870 }
3871
set_name_sync(struct hci_dev * hdev,void * data)3872 static int set_name_sync(struct hci_dev *hdev, void *data)
3873 {
3874 if (lmp_bredr_capable(hdev)) {
3875 hci_update_name_sync(hdev);
3876 hci_update_eir_sync(hdev);
3877 }
3878
3879 /* The name is stored in the scan response data and so
3880 * no need to update the advertising data here.
3881 */
3882 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3883 hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3884
3885 return 0;
3886 }
3887
set_local_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3888 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3889 u16 len)
3890 {
3891 struct mgmt_cp_set_local_name *cp = data;
3892 struct mgmt_pending_cmd *cmd;
3893 int err;
3894
3895 bt_dev_dbg(hdev, "sock %p", sk);
3896
3897 hci_dev_lock(hdev);
3898
3899 /* If the old values are the same as the new ones just return a
3900 * direct command complete event.
3901 */
3902 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3903 !memcmp(hdev->short_name, cp->short_name,
3904 sizeof(hdev->short_name))) {
3905 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3906 data, len);
3907 goto failed;
3908 }
3909
3910 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3911
3912 if (!hdev_is_powered(hdev)) {
3913 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3914
3915 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3916 data, len);
3917 if (err < 0)
3918 goto failed;
3919
3920 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3921 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3922 ext_info_changed(hdev, sk);
3923
3924 goto failed;
3925 }
3926
3927 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3928 if (!cmd)
3929 err = -ENOMEM;
3930 else
3931 err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3932 set_name_complete);
3933
3934 if (err < 0) {
3935 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3936 MGMT_STATUS_FAILED);
3937
3938 if (cmd)
3939 mgmt_pending_remove(cmd);
3940
3941 goto failed;
3942 }
3943
3944 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3945
3946 failed:
3947 hci_dev_unlock(hdev);
3948 return err;
3949 }
3950
appearance_changed_sync(struct hci_dev * hdev,void * data)3951 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3952 {
3953 return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3954 }
3955
set_appearance(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3956 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3957 u16 len)
3958 {
3959 struct mgmt_cp_set_appearance *cp = data;
3960 u16 appearance;
3961 int err;
3962
3963 bt_dev_dbg(hdev, "sock %p", sk);
3964
3965 if (!lmp_le_capable(hdev))
3966 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3967 MGMT_STATUS_NOT_SUPPORTED);
3968
3969 appearance = le16_to_cpu(cp->appearance);
3970
3971 hci_dev_lock(hdev);
3972
3973 if (hdev->appearance != appearance) {
3974 hdev->appearance = appearance;
3975
3976 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3977 hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3978 NULL);
3979
3980 ext_info_changed(hdev, sk);
3981 }
3982
3983 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3984 0);
3985
3986 hci_dev_unlock(hdev);
3987
3988 return err;
3989 }
3990
get_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3991 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3992 void *data, u16 len)
3993 {
3994 struct mgmt_rp_get_phy_configuration rp;
3995
3996 bt_dev_dbg(hdev, "sock %p", sk);
3997
3998 hci_dev_lock(hdev);
3999
4000 memset(&rp, 0, sizeof(rp));
4001
4002 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
4003 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
4004 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
4005
4006 hci_dev_unlock(hdev);
4007
4008 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
4009 &rp, sizeof(rp));
4010 }
4011
mgmt_phy_configuration_changed(struct hci_dev * hdev,struct sock * skip)4012 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
4013 {
4014 struct mgmt_ev_phy_configuration_changed ev;
4015
4016 memset(&ev, 0, sizeof(ev));
4017
4018 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
4019
4020 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
4021 sizeof(ev), skip);
4022 }
4023
set_default_phy_complete(struct hci_dev * hdev,void * data,int err)4024 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
4025 {
4026 struct mgmt_pending_cmd *cmd = data;
4027 struct sk_buff *skb = cmd->skb;
4028 u8 status = mgmt_status(err);
4029
4030 if (err == -ECANCELED ||
4031 cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
4032 return;
4033
4034 if (!status) {
4035 if (!skb)
4036 status = MGMT_STATUS_FAILED;
4037 else if (IS_ERR(skb))
4038 status = mgmt_status(PTR_ERR(skb));
4039 else
4040 status = mgmt_status(skb->data[0]);
4041 }
4042
4043 bt_dev_dbg(hdev, "status %d", status);
4044
4045 if (status) {
4046 mgmt_cmd_status(cmd->sk, hdev->id,
4047 MGMT_OP_SET_PHY_CONFIGURATION, status);
4048 } else {
4049 mgmt_cmd_complete(cmd->sk, hdev->id,
4050 MGMT_OP_SET_PHY_CONFIGURATION, 0,
4051 NULL, 0);
4052
4053 mgmt_phy_configuration_changed(hdev, cmd->sk);
4054 }
4055
4056 if (skb && !IS_ERR(skb))
4057 kfree_skb(skb);
4058
4059 mgmt_pending_remove(cmd);
4060 }
4061
set_default_phy_sync(struct hci_dev * hdev,void * data)4062 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
4063 {
4064 struct mgmt_pending_cmd *cmd = data;
4065 struct mgmt_cp_set_phy_configuration *cp = cmd->param;
4066 struct hci_cp_le_set_default_phy cp_phy;
4067 u32 selected_phys = __le32_to_cpu(cp->selected_phys);
4068
4069 memset(&cp_phy, 0, sizeof(cp_phy));
4070
4071 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
4072 cp_phy.all_phys |= 0x01;
4073
4074 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
4075 cp_phy.all_phys |= 0x02;
4076
4077 if (selected_phys & MGMT_PHY_LE_1M_TX)
4078 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
4079
4080 if (selected_phys & MGMT_PHY_LE_2M_TX)
4081 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
4082
4083 if (selected_phys & MGMT_PHY_LE_CODED_TX)
4084 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
4085
4086 if (selected_phys & MGMT_PHY_LE_1M_RX)
4087 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
4088
4089 if (selected_phys & MGMT_PHY_LE_2M_RX)
4090 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
4091
4092 if (selected_phys & MGMT_PHY_LE_CODED_RX)
4093 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
4094
4095 cmd->skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4096 sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
4097
4098 return 0;
4099 }
4100
set_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4101 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4102 void *data, u16 len)
4103 {
4104 struct mgmt_cp_set_phy_configuration *cp = data;
4105 struct mgmt_pending_cmd *cmd;
4106 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4107 u16 pkt_type = (HCI_DH1 | HCI_DM1);
4108 bool changed = false;
4109 int err;
4110
4111 bt_dev_dbg(hdev, "sock %p", sk);
4112
4113 configurable_phys = get_configurable_phys(hdev);
4114 supported_phys = get_supported_phys(hdev);
4115 selected_phys = __le32_to_cpu(cp->selected_phys);
4116
4117 if (selected_phys & ~supported_phys)
4118 return mgmt_cmd_status(sk, hdev->id,
4119 MGMT_OP_SET_PHY_CONFIGURATION,
4120 MGMT_STATUS_INVALID_PARAMS);
4121
4122 unconfigure_phys = supported_phys & ~configurable_phys;
4123
4124 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4125 return mgmt_cmd_status(sk, hdev->id,
4126 MGMT_OP_SET_PHY_CONFIGURATION,
4127 MGMT_STATUS_INVALID_PARAMS);
4128
4129 if (selected_phys == get_selected_phys(hdev))
4130 return mgmt_cmd_complete(sk, hdev->id,
4131 MGMT_OP_SET_PHY_CONFIGURATION,
4132 0, NULL, 0);
4133
4134 hci_dev_lock(hdev);
4135
4136 if (!hdev_is_powered(hdev)) {
4137 err = mgmt_cmd_status(sk, hdev->id,
4138 MGMT_OP_SET_PHY_CONFIGURATION,
4139 MGMT_STATUS_REJECTED);
4140 goto unlock;
4141 }
4142
4143 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4144 err = mgmt_cmd_status(sk, hdev->id,
4145 MGMT_OP_SET_PHY_CONFIGURATION,
4146 MGMT_STATUS_BUSY);
4147 goto unlock;
4148 }
4149
4150 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4151 pkt_type |= (HCI_DH3 | HCI_DM3);
4152 else
4153 pkt_type &= ~(HCI_DH3 | HCI_DM3);
4154
4155 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4156 pkt_type |= (HCI_DH5 | HCI_DM5);
4157 else
4158 pkt_type &= ~(HCI_DH5 | HCI_DM5);
4159
4160 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4161 pkt_type &= ~HCI_2DH1;
4162 else
4163 pkt_type |= HCI_2DH1;
4164
4165 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4166 pkt_type &= ~HCI_2DH3;
4167 else
4168 pkt_type |= HCI_2DH3;
4169
4170 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4171 pkt_type &= ~HCI_2DH5;
4172 else
4173 pkt_type |= HCI_2DH5;
4174
4175 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4176 pkt_type &= ~HCI_3DH1;
4177 else
4178 pkt_type |= HCI_3DH1;
4179
4180 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4181 pkt_type &= ~HCI_3DH3;
4182 else
4183 pkt_type |= HCI_3DH3;
4184
4185 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4186 pkt_type &= ~HCI_3DH5;
4187 else
4188 pkt_type |= HCI_3DH5;
4189
4190 if (pkt_type != hdev->pkt_type) {
4191 hdev->pkt_type = pkt_type;
4192 changed = true;
4193 }
4194
4195 if ((selected_phys & MGMT_PHY_LE_MASK) ==
4196 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4197 if (changed)
4198 mgmt_phy_configuration_changed(hdev, sk);
4199
4200 err = mgmt_cmd_complete(sk, hdev->id,
4201 MGMT_OP_SET_PHY_CONFIGURATION,
4202 0, NULL, 0);
4203
4204 goto unlock;
4205 }
4206
4207 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4208 len);
4209 if (!cmd)
4210 err = -ENOMEM;
4211 else
4212 err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4213 set_default_phy_complete);
4214
4215 if (err < 0) {
4216 err = mgmt_cmd_status(sk, hdev->id,
4217 MGMT_OP_SET_PHY_CONFIGURATION,
4218 MGMT_STATUS_FAILED);
4219
4220 if (cmd)
4221 mgmt_pending_remove(cmd);
4222 }
4223
4224 unlock:
4225 hci_dev_unlock(hdev);
4226
4227 return err;
4228 }
4229
set_blocked_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4230 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4231 u16 len)
4232 {
4233 int err = MGMT_STATUS_SUCCESS;
4234 struct mgmt_cp_set_blocked_keys *keys = data;
4235 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4236 sizeof(struct mgmt_blocked_key_info));
4237 u16 key_count, expected_len;
4238 int i;
4239
4240 bt_dev_dbg(hdev, "sock %p", sk);
4241
4242 key_count = __le16_to_cpu(keys->key_count);
4243 if (key_count > max_key_count) {
4244 bt_dev_err(hdev, "too big key_count value %u", key_count);
4245 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4246 MGMT_STATUS_INVALID_PARAMS);
4247 }
4248
4249 expected_len = struct_size(keys, keys, key_count);
4250 if (expected_len != len) {
4251 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4252 expected_len, len);
4253 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4254 MGMT_STATUS_INVALID_PARAMS);
4255 }
4256
4257 hci_dev_lock(hdev);
4258
4259 hci_blocked_keys_clear(hdev);
4260
4261 for (i = 0; i < key_count; ++i) {
4262 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4263
4264 if (!b) {
4265 err = MGMT_STATUS_NO_RESOURCES;
4266 break;
4267 }
4268
4269 b->type = keys->keys[i].type;
4270 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4271 list_add_rcu(&b->list, &hdev->blocked_keys);
4272 }
4273 hci_dev_unlock(hdev);
4274
4275 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4276 err, NULL, 0);
4277 }
4278
set_wideband_speech(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4279 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4280 void *data, u16 len)
4281 {
4282 struct mgmt_mode *cp = data;
4283 int err;
4284 bool changed = false;
4285
4286 bt_dev_dbg(hdev, "sock %p", sk);
4287
4288 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
4289 return mgmt_cmd_status(sk, hdev->id,
4290 MGMT_OP_SET_WIDEBAND_SPEECH,
4291 MGMT_STATUS_NOT_SUPPORTED);
4292
4293 if (cp->val != 0x00 && cp->val != 0x01)
4294 return mgmt_cmd_status(sk, hdev->id,
4295 MGMT_OP_SET_WIDEBAND_SPEECH,
4296 MGMT_STATUS_INVALID_PARAMS);
4297
4298 hci_dev_lock(hdev);
4299
4300 if (hdev_is_powered(hdev) &&
4301 !!cp->val != hci_dev_test_flag(hdev,
4302 HCI_WIDEBAND_SPEECH_ENABLED)) {
4303 err = mgmt_cmd_status(sk, hdev->id,
4304 MGMT_OP_SET_WIDEBAND_SPEECH,
4305 MGMT_STATUS_REJECTED);
4306 goto unlock;
4307 }
4308
4309 if (cp->val)
4310 changed = !hci_dev_test_and_set_flag(hdev,
4311 HCI_WIDEBAND_SPEECH_ENABLED);
4312 else
4313 changed = hci_dev_test_and_clear_flag(hdev,
4314 HCI_WIDEBAND_SPEECH_ENABLED);
4315
4316 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4317 if (err < 0)
4318 goto unlock;
4319
4320 if (changed)
4321 err = new_settings(hdev, sk);
4322
4323 unlock:
4324 hci_dev_unlock(hdev);
4325 return err;
4326 }
4327
read_controller_cap(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4328 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4329 void *data, u16 data_len)
4330 {
4331 char buf[20];
4332 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4333 u16 cap_len = 0;
4334 u8 flags = 0;
4335 u8 tx_power_range[2];
4336
4337 bt_dev_dbg(hdev, "sock %p", sk);
4338
4339 memset(&buf, 0, sizeof(buf));
4340
4341 hci_dev_lock(hdev);
4342
4343 /* When the Read Simple Pairing Options command is supported, then
4344 * the remote public key validation is supported.
4345 *
4346 * Alternatively, when Microsoft extensions are available, they can
4347 * indicate support for public key validation as well.
4348 */
4349 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4350 flags |= 0x01; /* Remote public key validation (BR/EDR) */
4351
4352 flags |= 0x02; /* Remote public key validation (LE) */
4353
4354 /* When the Read Encryption Key Size command is supported, then the
4355 * encryption key size is enforced.
4356 */
4357 if (hdev->commands[20] & 0x10)
4358 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
4359
4360 flags |= 0x08; /* Encryption key size enforcement (LE) */
4361
4362 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4363 &flags, 1);
4364
4365 /* When the Read Simple Pairing Options command is supported, then
4366 * also max encryption key size information is provided.
4367 */
4368 if (hdev->commands[41] & 0x08)
4369 cap_len = eir_append_le16(rp->cap, cap_len,
4370 MGMT_CAP_MAX_ENC_KEY_SIZE,
4371 hdev->max_enc_key_size);
4372
4373 cap_len = eir_append_le16(rp->cap, cap_len,
4374 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4375 SMP_MAX_ENC_KEY_SIZE);
4376
4377 /* Append the min/max LE tx power parameters if we were able to fetch
4378 * it from the controller
4379 */
4380 if (hdev->commands[38] & 0x80) {
4381 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4382 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4383 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4384 tx_power_range, 2);
4385 }
4386
4387 rp->cap_len = cpu_to_le16(cap_len);
4388
4389 hci_dev_unlock(hdev);
4390
4391 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4392 rp, sizeof(*rp) + cap_len);
4393 }
4394
4395 #ifdef CONFIG_BT_FEATURE_DEBUG
4396 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4397 static const u8 debug_uuid[16] = {
4398 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4399 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4400 };
4401 #endif
4402
4403 /* 330859bc-7506-492d-9370-9a6f0614037f */
4404 static const u8 quality_report_uuid[16] = {
4405 0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4406 0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4407 };
4408
4409 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4410 static const u8 offload_codecs_uuid[16] = {
4411 0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4412 0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4413 };
4414
4415 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4416 static const u8 le_simultaneous_roles_uuid[16] = {
4417 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4418 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4419 };
4420
4421 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4422 static const u8 iso_socket_uuid[16] = {
4423 0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4424 0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4425 };
4426
4427 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4428 static const u8 mgmt_mesh_uuid[16] = {
4429 0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4430 0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4431 };
4432
read_exp_features_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4433 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4434 void *data, u16 data_len)
4435 {
4436 struct mgmt_rp_read_exp_features_info *rp;
4437 size_t len;
4438 u16 idx = 0;
4439 u32 flags;
4440 int status;
4441
4442 bt_dev_dbg(hdev, "sock %p", sk);
4443
4444 /* Enough space for 7 features */
4445 len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4446 rp = kzalloc(len, GFP_KERNEL);
4447 if (!rp)
4448 return -ENOMEM;
4449
4450 #ifdef CONFIG_BT_FEATURE_DEBUG
4451 if (!hdev) {
4452 flags = bt_dbg_get() ? BIT(0) : 0;
4453
4454 memcpy(rp->features[idx].uuid, debug_uuid, 16);
4455 rp->features[idx].flags = cpu_to_le32(flags);
4456 idx++;
4457 }
4458 #endif
4459
4460 if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4461 if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4462 flags = BIT(0);
4463 else
4464 flags = 0;
4465
4466 memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4467 rp->features[idx].flags = cpu_to_le32(flags);
4468 idx++;
4469 }
4470
4471 if (hdev && (aosp_has_quality_report(hdev) ||
4472 hdev->set_quality_report)) {
4473 if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4474 flags = BIT(0);
4475 else
4476 flags = 0;
4477
4478 memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4479 rp->features[idx].flags = cpu_to_le32(flags);
4480 idx++;
4481 }
4482
4483 if (hdev && hdev->get_data_path_id) {
4484 if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4485 flags = BIT(0);
4486 else
4487 flags = 0;
4488
4489 memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4490 rp->features[idx].flags = cpu_to_le32(flags);
4491 idx++;
4492 }
4493
4494 if (IS_ENABLED(CONFIG_BT_LE)) {
4495 flags = iso_enabled() ? BIT(0) : 0;
4496 memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4497 rp->features[idx].flags = cpu_to_le32(flags);
4498 idx++;
4499 }
4500
4501 if (hdev && lmp_le_capable(hdev)) {
4502 if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4503 flags = BIT(0);
4504 else
4505 flags = 0;
4506
4507 memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4508 rp->features[idx].flags = cpu_to_le32(flags);
4509 idx++;
4510 }
4511
4512 rp->feature_count = cpu_to_le16(idx);
4513
4514 /* After reading the experimental features information, enable
4515 * the events to update client on any future change.
4516 */
4517 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4518
4519 status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4520 MGMT_OP_READ_EXP_FEATURES_INFO,
4521 0, rp, sizeof(*rp) + (20 * idx));
4522
4523 kfree(rp);
4524 return status;
4525 }
4526
exp_feature_changed(struct hci_dev * hdev,const u8 * uuid,bool enabled,struct sock * skip)4527 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4528 bool enabled, struct sock *skip)
4529 {
4530 struct mgmt_ev_exp_feature_changed ev;
4531
4532 memset(&ev, 0, sizeof(ev));
4533 memcpy(ev.uuid, uuid, 16);
4534 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4535
4536 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4537 &ev, sizeof(ev),
4538 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4539 }
4540
4541 #define EXP_FEAT(_uuid, _set_func) \
4542 { \
4543 .uuid = _uuid, \
4544 .set_func = _set_func, \
4545 }
4546
4547 /* The zero key uuid is special. Multiple exp features are set through it. */
set_zero_key_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4548 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4549 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4550 {
4551 struct mgmt_rp_set_exp_feature rp;
4552
4553 memset(rp.uuid, 0, 16);
4554 rp.flags = cpu_to_le32(0);
4555
4556 #ifdef CONFIG_BT_FEATURE_DEBUG
4557 if (!hdev) {
4558 bool changed = bt_dbg_get();
4559
4560 bt_dbg_set(false);
4561
4562 if (changed)
4563 exp_feature_changed(NULL, ZERO_KEY, false, sk);
4564 }
4565 #endif
4566
4567 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4568
4569 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4570 MGMT_OP_SET_EXP_FEATURE, 0,
4571 &rp, sizeof(rp));
4572 }
4573
4574 #ifdef CONFIG_BT_FEATURE_DEBUG
set_debug_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4575 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4576 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4577 {
4578 struct mgmt_rp_set_exp_feature rp;
4579
4580 bool val, changed;
4581 int err;
4582
4583 /* Command requires to use the non-controller index */
4584 if (hdev)
4585 return mgmt_cmd_status(sk, hdev->id,
4586 MGMT_OP_SET_EXP_FEATURE,
4587 MGMT_STATUS_INVALID_INDEX);
4588
4589 /* Parameters are limited to a single octet */
4590 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4591 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4592 MGMT_OP_SET_EXP_FEATURE,
4593 MGMT_STATUS_INVALID_PARAMS);
4594
4595 /* Only boolean on/off is supported */
4596 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4597 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4598 MGMT_OP_SET_EXP_FEATURE,
4599 MGMT_STATUS_INVALID_PARAMS);
4600
4601 val = !!cp->param[0];
4602 changed = val ? !bt_dbg_get() : bt_dbg_get();
4603 bt_dbg_set(val);
4604
4605 memcpy(rp.uuid, debug_uuid, 16);
4606 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4607
4608 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4609
4610 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4611 MGMT_OP_SET_EXP_FEATURE, 0,
4612 &rp, sizeof(rp));
4613
4614 if (changed)
4615 exp_feature_changed(hdev, debug_uuid, val, sk);
4616
4617 return err;
4618 }
4619 #endif
4620
set_mgmt_mesh_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4621 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4622 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4623 {
4624 struct mgmt_rp_set_exp_feature rp;
4625 bool val, changed;
4626 int err;
4627
4628 /* Command requires to use the controller index */
4629 if (!hdev)
4630 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4631 MGMT_OP_SET_EXP_FEATURE,
4632 MGMT_STATUS_INVALID_INDEX);
4633
4634 /* Parameters are limited to a single octet */
4635 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4636 return mgmt_cmd_status(sk, hdev->id,
4637 MGMT_OP_SET_EXP_FEATURE,
4638 MGMT_STATUS_INVALID_PARAMS);
4639
4640 /* Only boolean on/off is supported */
4641 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4642 return mgmt_cmd_status(sk, hdev->id,
4643 MGMT_OP_SET_EXP_FEATURE,
4644 MGMT_STATUS_INVALID_PARAMS);
4645
4646 val = !!cp->param[0];
4647
4648 if (val) {
4649 changed = !hci_dev_test_and_set_flag(hdev,
4650 HCI_MESH_EXPERIMENTAL);
4651 } else {
4652 hci_dev_clear_flag(hdev, HCI_MESH);
4653 changed = hci_dev_test_and_clear_flag(hdev,
4654 HCI_MESH_EXPERIMENTAL);
4655 }
4656
4657 memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4658 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4659
4660 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4661
4662 err = mgmt_cmd_complete(sk, hdev->id,
4663 MGMT_OP_SET_EXP_FEATURE, 0,
4664 &rp, sizeof(rp));
4665
4666 if (changed)
4667 exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4668
4669 return err;
4670 }
4671
set_quality_report_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4672 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4673 struct mgmt_cp_set_exp_feature *cp,
4674 u16 data_len)
4675 {
4676 struct mgmt_rp_set_exp_feature rp;
4677 bool val, changed;
4678 int err;
4679
4680 /* Command requires to use a valid controller index */
4681 if (!hdev)
4682 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4683 MGMT_OP_SET_EXP_FEATURE,
4684 MGMT_STATUS_INVALID_INDEX);
4685
4686 /* Parameters are limited to a single octet */
4687 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4688 return mgmt_cmd_status(sk, hdev->id,
4689 MGMT_OP_SET_EXP_FEATURE,
4690 MGMT_STATUS_INVALID_PARAMS);
4691
4692 /* Only boolean on/off is supported */
4693 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4694 return mgmt_cmd_status(sk, hdev->id,
4695 MGMT_OP_SET_EXP_FEATURE,
4696 MGMT_STATUS_INVALID_PARAMS);
4697
4698 hci_req_sync_lock(hdev);
4699
4700 val = !!cp->param[0];
4701 changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4702
4703 if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4704 err = mgmt_cmd_status(sk, hdev->id,
4705 MGMT_OP_SET_EXP_FEATURE,
4706 MGMT_STATUS_NOT_SUPPORTED);
4707 goto unlock_quality_report;
4708 }
4709
4710 if (changed) {
4711 if (hdev->set_quality_report)
4712 err = hdev->set_quality_report(hdev, val);
4713 else
4714 err = aosp_set_quality_report(hdev, val);
4715
4716 if (err) {
4717 err = mgmt_cmd_status(sk, hdev->id,
4718 MGMT_OP_SET_EXP_FEATURE,
4719 MGMT_STATUS_FAILED);
4720 goto unlock_quality_report;
4721 }
4722
4723 if (val)
4724 hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4725 else
4726 hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4727 }
4728
4729 bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4730
4731 memcpy(rp.uuid, quality_report_uuid, 16);
4732 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4733 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4734
4735 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4736 &rp, sizeof(rp));
4737
4738 if (changed)
4739 exp_feature_changed(hdev, quality_report_uuid, val, sk);
4740
4741 unlock_quality_report:
4742 hci_req_sync_unlock(hdev);
4743 return err;
4744 }
4745
set_offload_codec_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4746 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4747 struct mgmt_cp_set_exp_feature *cp,
4748 u16 data_len)
4749 {
4750 bool val, changed;
4751 int err;
4752 struct mgmt_rp_set_exp_feature rp;
4753
4754 /* Command requires to use a valid controller index */
4755 if (!hdev)
4756 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4757 MGMT_OP_SET_EXP_FEATURE,
4758 MGMT_STATUS_INVALID_INDEX);
4759
4760 /* Parameters are limited to a single octet */
4761 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4762 return mgmt_cmd_status(sk, hdev->id,
4763 MGMT_OP_SET_EXP_FEATURE,
4764 MGMT_STATUS_INVALID_PARAMS);
4765
4766 /* Only boolean on/off is supported */
4767 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4768 return mgmt_cmd_status(sk, hdev->id,
4769 MGMT_OP_SET_EXP_FEATURE,
4770 MGMT_STATUS_INVALID_PARAMS);
4771
4772 val = !!cp->param[0];
4773 changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4774
4775 if (!hdev->get_data_path_id) {
4776 return mgmt_cmd_status(sk, hdev->id,
4777 MGMT_OP_SET_EXP_FEATURE,
4778 MGMT_STATUS_NOT_SUPPORTED);
4779 }
4780
4781 if (changed) {
4782 if (val)
4783 hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4784 else
4785 hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4786 }
4787
4788 bt_dev_info(hdev, "offload codecs enable %d changed %d",
4789 val, changed);
4790
4791 memcpy(rp.uuid, offload_codecs_uuid, 16);
4792 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4793 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4794 err = mgmt_cmd_complete(sk, hdev->id,
4795 MGMT_OP_SET_EXP_FEATURE, 0,
4796 &rp, sizeof(rp));
4797
4798 if (changed)
4799 exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4800
4801 return err;
4802 }
4803
set_le_simultaneous_roles_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4804 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4805 struct mgmt_cp_set_exp_feature *cp,
4806 u16 data_len)
4807 {
4808 bool val, changed;
4809 int err;
4810 struct mgmt_rp_set_exp_feature rp;
4811
4812 /* Command requires to use a valid controller index */
4813 if (!hdev)
4814 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4815 MGMT_OP_SET_EXP_FEATURE,
4816 MGMT_STATUS_INVALID_INDEX);
4817
4818 /* Parameters are limited to a single octet */
4819 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4820 return mgmt_cmd_status(sk, hdev->id,
4821 MGMT_OP_SET_EXP_FEATURE,
4822 MGMT_STATUS_INVALID_PARAMS);
4823
4824 /* Only boolean on/off is supported */
4825 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4826 return mgmt_cmd_status(sk, hdev->id,
4827 MGMT_OP_SET_EXP_FEATURE,
4828 MGMT_STATUS_INVALID_PARAMS);
4829
4830 val = !!cp->param[0];
4831 changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4832
4833 if (!hci_dev_le_state_simultaneous(hdev)) {
4834 return mgmt_cmd_status(sk, hdev->id,
4835 MGMT_OP_SET_EXP_FEATURE,
4836 MGMT_STATUS_NOT_SUPPORTED);
4837 }
4838
4839 if (changed) {
4840 if (val)
4841 hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4842 else
4843 hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4844 }
4845
4846 bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4847 val, changed);
4848
4849 memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4850 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4851 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4852 err = mgmt_cmd_complete(sk, hdev->id,
4853 MGMT_OP_SET_EXP_FEATURE, 0,
4854 &rp, sizeof(rp));
4855
4856 if (changed)
4857 exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4858
4859 return err;
4860 }
4861
4862 #ifdef CONFIG_BT_LE
set_iso_socket_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4863 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4864 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4865 {
4866 struct mgmt_rp_set_exp_feature rp;
4867 bool val, changed = false;
4868 int err;
4869
4870 /* Command requires to use the non-controller index */
4871 if (hdev)
4872 return mgmt_cmd_status(sk, hdev->id,
4873 MGMT_OP_SET_EXP_FEATURE,
4874 MGMT_STATUS_INVALID_INDEX);
4875
4876 /* Parameters are limited to a single octet */
4877 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4878 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4879 MGMT_OP_SET_EXP_FEATURE,
4880 MGMT_STATUS_INVALID_PARAMS);
4881
4882 /* Only boolean on/off is supported */
4883 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4884 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4885 MGMT_OP_SET_EXP_FEATURE,
4886 MGMT_STATUS_INVALID_PARAMS);
4887
4888 val = cp->param[0] ? true : false;
4889 if (val)
4890 err = iso_init();
4891 else
4892 err = iso_exit();
4893
4894 if (!err)
4895 changed = true;
4896
4897 memcpy(rp.uuid, iso_socket_uuid, 16);
4898 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4899
4900 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4901
4902 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4903 MGMT_OP_SET_EXP_FEATURE, 0,
4904 &rp, sizeof(rp));
4905
4906 if (changed)
4907 exp_feature_changed(hdev, iso_socket_uuid, val, sk);
4908
4909 return err;
4910 }
4911 #endif
4912
4913 static const struct mgmt_exp_feature {
4914 const u8 *uuid;
4915 int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4916 struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4917 } exp_features[] = {
4918 EXP_FEAT(ZERO_KEY, set_zero_key_func),
4919 #ifdef CONFIG_BT_FEATURE_DEBUG
4920 EXP_FEAT(debug_uuid, set_debug_func),
4921 #endif
4922 EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
4923 EXP_FEAT(quality_report_uuid, set_quality_report_func),
4924 EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
4925 EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
4926 #ifdef CONFIG_BT_LE
4927 EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
4928 #endif
4929
4930 /* end with a null feature */
4931 EXP_FEAT(NULL, NULL)
4932 };
4933
set_exp_feature(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4934 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4935 void *data, u16 data_len)
4936 {
4937 struct mgmt_cp_set_exp_feature *cp = data;
4938 size_t i = 0;
4939
4940 bt_dev_dbg(hdev, "sock %p", sk);
4941
4942 for (i = 0; exp_features[i].uuid; i++) {
4943 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
4944 return exp_features[i].set_func(sk, hdev, cp, data_len);
4945 }
4946
4947 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4948 MGMT_OP_SET_EXP_FEATURE,
4949 MGMT_STATUS_NOT_SUPPORTED);
4950 }
4951
get_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4952 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4953 u16 data_len)
4954 {
4955 struct mgmt_cp_get_device_flags *cp = data;
4956 struct mgmt_rp_get_device_flags rp;
4957 struct bdaddr_list_with_flags *br_params;
4958 struct hci_conn_params *params;
4959 u32 supported_flags;
4960 u32 current_flags = 0;
4961 u8 status = MGMT_STATUS_INVALID_PARAMS;
4962
4963 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4964 &cp->addr.bdaddr, cp->addr.type);
4965
4966 hci_dev_lock(hdev);
4967
4968 supported_flags = hdev->conn_flags;
4969
4970 memset(&rp, 0, sizeof(rp));
4971
4972 if (cp->addr.type == BDADDR_BREDR) {
4973 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4974 &cp->addr.bdaddr,
4975 cp->addr.type);
4976 if (!br_params)
4977 goto done;
4978
4979 current_flags = br_params->flags;
4980 } else {
4981 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4982 le_addr_type(cp->addr.type));
4983 if (!params)
4984 goto done;
4985
4986 current_flags = params->flags;
4987 }
4988
4989 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4990 rp.addr.type = cp->addr.type;
4991 rp.supported_flags = cpu_to_le32(supported_flags);
4992 rp.current_flags = cpu_to_le32(current_flags);
4993
4994 status = MGMT_STATUS_SUCCESS;
4995
4996 done:
4997 hci_dev_unlock(hdev);
4998
4999 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5000 &rp, sizeof(rp));
5001 }
5002
device_flags_changed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u32 supported_flags,u32 current_flags)5003 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5004 bdaddr_t *bdaddr, u8 bdaddr_type,
5005 u32 supported_flags, u32 current_flags)
5006 {
5007 struct mgmt_ev_device_flags_changed ev;
5008
5009 bacpy(&ev.addr.bdaddr, bdaddr);
5010 ev.addr.type = bdaddr_type;
5011 ev.supported_flags = cpu_to_le32(supported_flags);
5012 ev.current_flags = cpu_to_le32(current_flags);
5013
5014 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5015 }
5016
set_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5017 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5018 u16 len)
5019 {
5020 struct mgmt_cp_set_device_flags *cp = data;
5021 struct bdaddr_list_with_flags *br_params;
5022 struct hci_conn_params *params;
5023 u8 status = MGMT_STATUS_INVALID_PARAMS;
5024 u32 supported_flags;
5025 u32 current_flags = __le32_to_cpu(cp->current_flags);
5026
5027 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5028 &cp->addr.bdaddr, cp->addr.type, current_flags);
5029
5030 // We should take hci_dev_lock() early, I think.. conn_flags can change
5031 supported_flags = hdev->conn_flags;
5032
5033 if ((supported_flags | current_flags) != supported_flags) {
5034 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5035 current_flags, supported_flags);
5036 goto done;
5037 }
5038
5039 hci_dev_lock(hdev);
5040
5041 if (cp->addr.type == BDADDR_BREDR) {
5042 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5043 &cp->addr.bdaddr,
5044 cp->addr.type);
5045
5046 if (br_params) {
5047 br_params->flags = current_flags;
5048 status = MGMT_STATUS_SUCCESS;
5049 } else {
5050 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5051 &cp->addr.bdaddr, cp->addr.type);
5052 }
5053
5054 goto unlock;
5055 }
5056
5057 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5058 le_addr_type(cp->addr.type));
5059 if (!params) {
5060 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5061 &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5062 goto unlock;
5063 }
5064
5065 supported_flags = hdev->conn_flags;
5066
5067 if ((supported_flags | current_flags) != supported_flags) {
5068 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5069 current_flags, supported_flags);
5070 goto unlock;
5071 }
5072
5073 WRITE_ONCE(params->flags, current_flags);
5074 status = MGMT_STATUS_SUCCESS;
5075
5076 /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5077 * has been set.
5078 */
5079 if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5080 hci_update_passive_scan(hdev);
5081
5082 unlock:
5083 hci_dev_unlock(hdev);
5084
5085 done:
5086 if (status == MGMT_STATUS_SUCCESS)
5087 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5088 supported_flags, current_flags);
5089
5090 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5091 &cp->addr, sizeof(cp->addr));
5092 }
5093
mgmt_adv_monitor_added(struct sock * sk,struct hci_dev * hdev,u16 handle)5094 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5095 u16 handle)
5096 {
5097 struct mgmt_ev_adv_monitor_added ev;
5098
5099 ev.monitor_handle = cpu_to_le16(handle);
5100
5101 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5102 }
5103
mgmt_adv_monitor_removed(struct sock * sk,struct hci_dev * hdev,__le16 handle)5104 static void mgmt_adv_monitor_removed(struct sock *sk, struct hci_dev *hdev,
5105 __le16 handle)
5106 {
5107 struct mgmt_ev_adv_monitor_removed ev;
5108
5109 ev.monitor_handle = handle;
5110
5111 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk);
5112 }
5113
read_adv_mon_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5114 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5115 void *data, u16 len)
5116 {
5117 struct adv_monitor *monitor = NULL;
5118 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5119 int handle, err;
5120 size_t rp_size = 0;
5121 __u32 supported = 0;
5122 __u32 enabled = 0;
5123 __u16 num_handles = 0;
5124 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5125
5126 BT_DBG("request for %s", hdev->name);
5127
5128 hci_dev_lock(hdev);
5129
5130 if (msft_monitor_supported(hdev))
5131 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5132
5133 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5134 handles[num_handles++] = monitor->handle;
5135
5136 hci_dev_unlock(hdev);
5137
5138 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5139 rp = kmalloc(rp_size, GFP_KERNEL);
5140 if (!rp)
5141 return -ENOMEM;
5142
5143 /* All supported features are currently enabled */
5144 enabled = supported;
5145
5146 rp->supported_features = cpu_to_le32(supported);
5147 rp->enabled_features = cpu_to_le32(enabled);
5148 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5149 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5150 rp->num_handles = cpu_to_le16(num_handles);
5151 if (num_handles)
5152 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5153
5154 err = mgmt_cmd_complete(sk, hdev->id,
5155 MGMT_OP_READ_ADV_MONITOR_FEATURES,
5156 MGMT_STATUS_SUCCESS, rp, rp_size);
5157
5158 kfree(rp);
5159
5160 return err;
5161 }
5162
mgmt_add_adv_patterns_monitor_complete(struct hci_dev * hdev,void * data,int status)5163 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5164 void *data, int status)
5165 {
5166 struct mgmt_rp_add_adv_patterns_monitor rp;
5167 struct mgmt_pending_cmd *cmd = data;
5168 struct adv_monitor *monitor = cmd->user_data;
5169
5170 hci_dev_lock(hdev);
5171
5172 rp.monitor_handle = cpu_to_le16(monitor->handle);
5173
5174 if (!status) {
5175 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5176 hdev->adv_monitors_cnt++;
5177 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5178 monitor->state = ADV_MONITOR_STATE_REGISTERED;
5179 hci_update_passive_scan(hdev);
5180 }
5181
5182 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
5183 mgmt_status(status), &rp, sizeof(rp));
5184 mgmt_pending_remove(cmd);
5185
5186 hci_dev_unlock(hdev);
5187 bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5188 rp.monitor_handle, status);
5189 }
5190
mgmt_add_adv_patterns_monitor_sync(struct hci_dev * hdev,void * data)5191 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5192 {
5193 struct mgmt_pending_cmd *cmd = data;
5194 struct adv_monitor *monitor = cmd->user_data;
5195
5196 return hci_add_adv_monitor(hdev, monitor);
5197 }
5198
__add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,struct adv_monitor * m,u8 status,void * data,u16 len,u16 op)5199 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5200 struct adv_monitor *m, u8 status,
5201 void *data, u16 len, u16 op)
5202 {
5203 struct mgmt_pending_cmd *cmd;
5204 int err;
5205
5206 hci_dev_lock(hdev);
5207
5208 if (status)
5209 goto unlock;
5210
5211 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5212 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5213 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5214 status = MGMT_STATUS_BUSY;
5215 goto unlock;
5216 }
5217
5218 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5219 if (!cmd) {
5220 status = MGMT_STATUS_NO_RESOURCES;
5221 goto unlock;
5222 }
5223
5224 cmd->user_data = m;
5225 err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5226 mgmt_add_adv_patterns_monitor_complete);
5227 if (err) {
5228 if (err == -ENOMEM)
5229 status = MGMT_STATUS_NO_RESOURCES;
5230 else
5231 status = MGMT_STATUS_FAILED;
5232
5233 goto unlock;
5234 }
5235
5236 hci_dev_unlock(hdev);
5237
5238 return 0;
5239
5240 unlock:
5241 hci_free_adv_monitor(hdev, m);
5242 hci_dev_unlock(hdev);
5243 return mgmt_cmd_status(sk, hdev->id, op, status);
5244 }
5245
parse_adv_monitor_rssi(struct adv_monitor * m,struct mgmt_adv_rssi_thresholds * rssi)5246 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5247 struct mgmt_adv_rssi_thresholds *rssi)
5248 {
5249 if (rssi) {
5250 m->rssi.low_threshold = rssi->low_threshold;
5251 m->rssi.low_threshold_timeout =
5252 __le16_to_cpu(rssi->low_threshold_timeout);
5253 m->rssi.high_threshold = rssi->high_threshold;
5254 m->rssi.high_threshold_timeout =
5255 __le16_to_cpu(rssi->high_threshold_timeout);
5256 m->rssi.sampling_period = rssi->sampling_period;
5257 } else {
5258 /* Default values. These numbers are the least constricting
5259 * parameters for MSFT API to work, so it behaves as if there
5260 * are no rssi parameter to consider. May need to be changed
5261 * if other API are to be supported.
5262 */
5263 m->rssi.low_threshold = -127;
5264 m->rssi.low_threshold_timeout = 60;
5265 m->rssi.high_threshold = -127;
5266 m->rssi.high_threshold_timeout = 0;
5267 m->rssi.sampling_period = 0;
5268 }
5269 }
5270
parse_adv_monitor_pattern(struct adv_monitor * m,u8 pattern_count,struct mgmt_adv_pattern * patterns)5271 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5272 struct mgmt_adv_pattern *patterns)
5273 {
5274 u8 offset = 0, length = 0;
5275 struct adv_pattern *p = NULL;
5276 int i;
5277
5278 for (i = 0; i < pattern_count; i++) {
5279 offset = patterns[i].offset;
5280 length = patterns[i].length;
5281 if (offset >= HCI_MAX_EXT_AD_LENGTH ||
5282 length > HCI_MAX_EXT_AD_LENGTH ||
5283 (offset + length) > HCI_MAX_EXT_AD_LENGTH)
5284 return MGMT_STATUS_INVALID_PARAMS;
5285
5286 p = kmalloc(sizeof(*p), GFP_KERNEL);
5287 if (!p)
5288 return MGMT_STATUS_NO_RESOURCES;
5289
5290 p->ad_type = patterns[i].ad_type;
5291 p->offset = patterns[i].offset;
5292 p->length = patterns[i].length;
5293 memcpy(p->value, patterns[i].value, p->length);
5294
5295 INIT_LIST_HEAD(&p->list);
5296 list_add(&p->list, &m->patterns);
5297 }
5298
5299 return MGMT_STATUS_SUCCESS;
5300 }
5301
add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5302 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5303 void *data, u16 len)
5304 {
5305 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5306 struct adv_monitor *m = NULL;
5307 u8 status = MGMT_STATUS_SUCCESS;
5308 size_t expected_size = sizeof(*cp);
5309
5310 BT_DBG("request for %s", hdev->name);
5311
5312 if (len <= sizeof(*cp)) {
5313 status = MGMT_STATUS_INVALID_PARAMS;
5314 goto done;
5315 }
5316
5317 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5318 if (len != expected_size) {
5319 status = MGMT_STATUS_INVALID_PARAMS;
5320 goto done;
5321 }
5322
5323 m = kzalloc(sizeof(*m), GFP_KERNEL);
5324 if (!m) {
5325 status = MGMT_STATUS_NO_RESOURCES;
5326 goto done;
5327 }
5328
5329 INIT_LIST_HEAD(&m->patterns);
5330
5331 parse_adv_monitor_rssi(m, NULL);
5332 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5333
5334 done:
5335 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5336 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5337 }
5338
add_adv_patterns_monitor_rssi(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5339 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5340 void *data, u16 len)
5341 {
5342 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5343 struct adv_monitor *m = NULL;
5344 u8 status = MGMT_STATUS_SUCCESS;
5345 size_t expected_size = sizeof(*cp);
5346
5347 BT_DBG("request for %s", hdev->name);
5348
5349 if (len <= sizeof(*cp)) {
5350 status = MGMT_STATUS_INVALID_PARAMS;
5351 goto done;
5352 }
5353
5354 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5355 if (len != expected_size) {
5356 status = MGMT_STATUS_INVALID_PARAMS;
5357 goto done;
5358 }
5359
5360 m = kzalloc(sizeof(*m), GFP_KERNEL);
5361 if (!m) {
5362 status = MGMT_STATUS_NO_RESOURCES;
5363 goto done;
5364 }
5365
5366 INIT_LIST_HEAD(&m->patterns);
5367
5368 parse_adv_monitor_rssi(m, &cp->rssi);
5369 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5370
5371 done:
5372 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5373 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5374 }
5375
mgmt_remove_adv_monitor_complete(struct hci_dev * hdev,void * data,int status)5376 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5377 void *data, int status)
5378 {
5379 struct mgmt_rp_remove_adv_monitor rp;
5380 struct mgmt_pending_cmd *cmd = data;
5381 struct mgmt_cp_remove_adv_monitor *cp;
5382
5383 if (status == -ECANCELED)
5384 return;
5385
5386 hci_dev_lock(hdev);
5387
5388 cp = cmd->param;
5389
5390 rp.monitor_handle = cp->monitor_handle;
5391
5392 if (!status) {
5393 mgmt_adv_monitor_removed(cmd->sk, hdev, cp->monitor_handle);
5394 hci_update_passive_scan(hdev);
5395 }
5396
5397 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
5398 mgmt_status(status), &rp, sizeof(rp));
5399 mgmt_pending_free(cmd);
5400
5401 hci_dev_unlock(hdev);
5402 bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5403 rp.monitor_handle, status);
5404 }
5405
mgmt_remove_adv_monitor_sync(struct hci_dev * hdev,void * data)5406 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5407 {
5408 struct mgmt_pending_cmd *cmd = data;
5409 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5410 u16 handle = __le16_to_cpu(cp->monitor_handle);
5411
5412 if (!handle)
5413 return hci_remove_all_adv_monitor(hdev);
5414
5415 return hci_remove_single_adv_monitor(hdev, handle);
5416 }
5417
remove_adv_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5418 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5419 void *data, u16 len)
5420 {
5421 struct mgmt_pending_cmd *cmd;
5422 int err, status;
5423
5424 hci_dev_lock(hdev);
5425
5426 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5427 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5428 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5429 status = MGMT_STATUS_BUSY;
5430 goto unlock;
5431 }
5432
5433 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5434 if (!cmd) {
5435 status = MGMT_STATUS_NO_RESOURCES;
5436 goto unlock;
5437 }
5438
5439 err = hci_cmd_sync_submit(hdev, mgmt_remove_adv_monitor_sync, cmd,
5440 mgmt_remove_adv_monitor_complete);
5441
5442 if (err) {
5443 mgmt_pending_free(cmd);
5444
5445 if (err == -ENOMEM)
5446 status = MGMT_STATUS_NO_RESOURCES;
5447 else
5448 status = MGMT_STATUS_FAILED;
5449
5450 goto unlock;
5451 }
5452
5453 hci_dev_unlock(hdev);
5454
5455 return 0;
5456
5457 unlock:
5458 hci_dev_unlock(hdev);
5459 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5460 status);
5461 }
5462
read_local_oob_data_complete(struct hci_dev * hdev,void * data,int err)5463 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5464 {
5465 struct mgmt_rp_read_local_oob_data mgmt_rp;
5466 size_t rp_size = sizeof(mgmt_rp);
5467 struct mgmt_pending_cmd *cmd = data;
5468 struct sk_buff *skb = cmd->skb;
5469 u8 status = mgmt_status(err);
5470
5471 if (!status) {
5472 if (!skb)
5473 status = MGMT_STATUS_FAILED;
5474 else if (IS_ERR(skb))
5475 status = mgmt_status(PTR_ERR(skb));
5476 else
5477 status = mgmt_status(skb->data[0]);
5478 }
5479
5480 bt_dev_dbg(hdev, "status %d", status);
5481
5482 if (status) {
5483 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5484 goto remove;
5485 }
5486
5487 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5488
5489 if (!bredr_sc_enabled(hdev)) {
5490 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5491
5492 if (skb->len < sizeof(*rp)) {
5493 mgmt_cmd_status(cmd->sk, hdev->id,
5494 MGMT_OP_READ_LOCAL_OOB_DATA,
5495 MGMT_STATUS_FAILED);
5496 goto remove;
5497 }
5498
5499 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5500 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5501
5502 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5503 } else {
5504 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5505
5506 if (skb->len < sizeof(*rp)) {
5507 mgmt_cmd_status(cmd->sk, hdev->id,
5508 MGMT_OP_READ_LOCAL_OOB_DATA,
5509 MGMT_STATUS_FAILED);
5510 goto remove;
5511 }
5512
5513 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5514 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5515
5516 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5517 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5518 }
5519
5520 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5521 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5522
5523 remove:
5524 if (skb && !IS_ERR(skb))
5525 kfree_skb(skb);
5526
5527 mgmt_pending_free(cmd);
5528 }
5529
read_local_oob_data_sync(struct hci_dev * hdev,void * data)5530 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5531 {
5532 struct mgmt_pending_cmd *cmd = data;
5533
5534 if (bredr_sc_enabled(hdev))
5535 cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5536 else
5537 cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5538
5539 if (IS_ERR(cmd->skb))
5540 return PTR_ERR(cmd->skb);
5541 else
5542 return 0;
5543 }
5544
read_local_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)5545 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5546 void *data, u16 data_len)
5547 {
5548 struct mgmt_pending_cmd *cmd;
5549 int err;
5550
5551 bt_dev_dbg(hdev, "sock %p", sk);
5552
5553 hci_dev_lock(hdev);
5554
5555 if (!hdev_is_powered(hdev)) {
5556 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5557 MGMT_STATUS_NOT_POWERED);
5558 goto unlock;
5559 }
5560
5561 if (!lmp_ssp_capable(hdev)) {
5562 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5563 MGMT_STATUS_NOT_SUPPORTED);
5564 goto unlock;
5565 }
5566
5567 cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5568 if (!cmd)
5569 err = -ENOMEM;
5570 else
5571 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5572 read_local_oob_data_complete);
5573
5574 if (err < 0) {
5575 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5576 MGMT_STATUS_FAILED);
5577
5578 if (cmd)
5579 mgmt_pending_free(cmd);
5580 }
5581
5582 unlock:
5583 hci_dev_unlock(hdev);
5584 return err;
5585 }
5586
add_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5587 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5588 void *data, u16 len)
5589 {
5590 struct mgmt_addr_info *addr = data;
5591 int err;
5592
5593 bt_dev_dbg(hdev, "sock %p", sk);
5594
5595 if (!bdaddr_type_is_valid(addr->type))
5596 return mgmt_cmd_complete(sk, hdev->id,
5597 MGMT_OP_ADD_REMOTE_OOB_DATA,
5598 MGMT_STATUS_INVALID_PARAMS,
5599 addr, sizeof(*addr));
5600
5601 hci_dev_lock(hdev);
5602
5603 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5604 struct mgmt_cp_add_remote_oob_data *cp = data;
5605 u8 status;
5606
5607 if (cp->addr.type != BDADDR_BREDR) {
5608 err = mgmt_cmd_complete(sk, hdev->id,
5609 MGMT_OP_ADD_REMOTE_OOB_DATA,
5610 MGMT_STATUS_INVALID_PARAMS,
5611 &cp->addr, sizeof(cp->addr));
5612 goto unlock;
5613 }
5614
5615 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5616 cp->addr.type, cp->hash,
5617 cp->rand, NULL, NULL);
5618 if (err < 0)
5619 status = MGMT_STATUS_FAILED;
5620 else
5621 status = MGMT_STATUS_SUCCESS;
5622
5623 err = mgmt_cmd_complete(sk, hdev->id,
5624 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5625 &cp->addr, sizeof(cp->addr));
5626 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5627 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5628 u8 *rand192, *hash192, *rand256, *hash256;
5629 u8 status;
5630
5631 if (bdaddr_type_is_le(cp->addr.type)) {
5632 /* Enforce zero-valued 192-bit parameters as
5633 * long as legacy SMP OOB isn't implemented.
5634 */
5635 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5636 memcmp(cp->hash192, ZERO_KEY, 16)) {
5637 err = mgmt_cmd_complete(sk, hdev->id,
5638 MGMT_OP_ADD_REMOTE_OOB_DATA,
5639 MGMT_STATUS_INVALID_PARAMS,
5640 addr, sizeof(*addr));
5641 goto unlock;
5642 }
5643
5644 rand192 = NULL;
5645 hash192 = NULL;
5646 } else {
5647 /* In case one of the P-192 values is set to zero,
5648 * then just disable OOB data for P-192.
5649 */
5650 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5651 !memcmp(cp->hash192, ZERO_KEY, 16)) {
5652 rand192 = NULL;
5653 hash192 = NULL;
5654 } else {
5655 rand192 = cp->rand192;
5656 hash192 = cp->hash192;
5657 }
5658 }
5659
5660 /* In case one of the P-256 values is set to zero, then just
5661 * disable OOB data for P-256.
5662 */
5663 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5664 !memcmp(cp->hash256, ZERO_KEY, 16)) {
5665 rand256 = NULL;
5666 hash256 = NULL;
5667 } else {
5668 rand256 = cp->rand256;
5669 hash256 = cp->hash256;
5670 }
5671
5672 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5673 cp->addr.type, hash192, rand192,
5674 hash256, rand256);
5675 if (err < 0)
5676 status = MGMT_STATUS_FAILED;
5677 else
5678 status = MGMT_STATUS_SUCCESS;
5679
5680 err = mgmt_cmd_complete(sk, hdev->id,
5681 MGMT_OP_ADD_REMOTE_OOB_DATA,
5682 status, &cp->addr, sizeof(cp->addr));
5683 } else {
5684 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5685 len);
5686 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5687 MGMT_STATUS_INVALID_PARAMS);
5688 }
5689
5690 unlock:
5691 hci_dev_unlock(hdev);
5692 return err;
5693 }
5694
remove_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5695 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5696 void *data, u16 len)
5697 {
5698 struct mgmt_cp_remove_remote_oob_data *cp = data;
5699 u8 status;
5700 int err;
5701
5702 bt_dev_dbg(hdev, "sock %p", sk);
5703
5704 if (cp->addr.type != BDADDR_BREDR)
5705 return mgmt_cmd_complete(sk, hdev->id,
5706 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5707 MGMT_STATUS_INVALID_PARAMS,
5708 &cp->addr, sizeof(cp->addr));
5709
5710 hci_dev_lock(hdev);
5711
5712 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5713 hci_remote_oob_data_clear(hdev);
5714 status = MGMT_STATUS_SUCCESS;
5715 goto done;
5716 }
5717
5718 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5719 if (err < 0)
5720 status = MGMT_STATUS_INVALID_PARAMS;
5721 else
5722 status = MGMT_STATUS_SUCCESS;
5723
5724 done:
5725 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5726 status, &cp->addr, sizeof(cp->addr));
5727
5728 hci_dev_unlock(hdev);
5729 return err;
5730 }
5731
discovery_type_is_valid(struct hci_dev * hdev,uint8_t type,uint8_t * mgmt_status)5732 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5733 uint8_t *mgmt_status)
5734 {
5735 switch (type) {
5736 case DISCOV_TYPE_LE:
5737 *mgmt_status = mgmt_le_support(hdev);
5738 if (*mgmt_status)
5739 return false;
5740 break;
5741 case DISCOV_TYPE_INTERLEAVED:
5742 *mgmt_status = mgmt_le_support(hdev);
5743 if (*mgmt_status)
5744 return false;
5745 fallthrough;
5746 case DISCOV_TYPE_BREDR:
5747 *mgmt_status = mgmt_bredr_support(hdev);
5748 if (*mgmt_status)
5749 return false;
5750 break;
5751 default:
5752 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5753 return false;
5754 }
5755
5756 return true;
5757 }
5758
start_discovery_complete(struct hci_dev * hdev,void * data,int err)5759 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5760 {
5761 struct mgmt_pending_cmd *cmd = data;
5762
5763 bt_dev_dbg(hdev, "err %d", err);
5764
5765 if (err == -ECANCELED)
5766 return;
5767
5768 if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5769 cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5770 cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5771 return;
5772
5773 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err),
5774 cmd->param, 1);
5775 mgmt_pending_remove(cmd);
5776
5777 hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5778 DISCOVERY_FINDING);
5779 }
5780
start_discovery_sync(struct hci_dev * hdev,void * data)5781 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5782 {
5783 return hci_start_discovery_sync(hdev);
5784 }
5785
start_discovery_internal(struct sock * sk,struct hci_dev * hdev,u16 op,void * data,u16 len)5786 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5787 u16 op, void *data, u16 len)
5788 {
5789 struct mgmt_cp_start_discovery *cp = data;
5790 struct mgmt_pending_cmd *cmd;
5791 u8 status;
5792 int err;
5793
5794 bt_dev_dbg(hdev, "sock %p", sk);
5795
5796 hci_dev_lock(hdev);
5797
5798 if (!hdev_is_powered(hdev)) {
5799 err = mgmt_cmd_complete(sk, hdev->id, op,
5800 MGMT_STATUS_NOT_POWERED,
5801 &cp->type, sizeof(cp->type));
5802 goto failed;
5803 }
5804
5805 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5806 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5807 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5808 &cp->type, sizeof(cp->type));
5809 goto failed;
5810 }
5811
5812 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5813 err = mgmt_cmd_complete(sk, hdev->id, op, status,
5814 &cp->type, sizeof(cp->type));
5815 goto failed;
5816 }
5817
5818 /* Can't start discovery when it is paused */
5819 if (hdev->discovery_paused) {
5820 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5821 &cp->type, sizeof(cp->type));
5822 goto failed;
5823 }
5824
5825 /* Clear the discovery filter first to free any previously
5826 * allocated memory for the UUID list.
5827 */
5828 hci_discovery_filter_clear(hdev);
5829
5830 hdev->discovery.type = cp->type;
5831 hdev->discovery.report_invalid_rssi = false;
5832 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5833 hdev->discovery.limited = true;
5834 else
5835 hdev->discovery.limited = false;
5836
5837 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5838 if (!cmd) {
5839 err = -ENOMEM;
5840 goto failed;
5841 }
5842
5843 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5844 start_discovery_complete);
5845 if (err < 0) {
5846 mgmt_pending_remove(cmd);
5847 goto failed;
5848 }
5849
5850 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5851
5852 failed:
5853 hci_dev_unlock(hdev);
5854 return err;
5855 }
5856
start_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5857 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5858 void *data, u16 len)
5859 {
5860 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5861 data, len);
5862 }
5863
start_limited_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5864 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5865 void *data, u16 len)
5866 {
5867 return start_discovery_internal(sk, hdev,
5868 MGMT_OP_START_LIMITED_DISCOVERY,
5869 data, len);
5870 }
5871
start_service_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5872 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5873 void *data, u16 len)
5874 {
5875 struct mgmt_cp_start_service_discovery *cp = data;
5876 struct mgmt_pending_cmd *cmd;
5877 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5878 u16 uuid_count, expected_len;
5879 u8 status;
5880 int err;
5881
5882 bt_dev_dbg(hdev, "sock %p", sk);
5883
5884 hci_dev_lock(hdev);
5885
5886 if (!hdev_is_powered(hdev)) {
5887 err = mgmt_cmd_complete(sk, hdev->id,
5888 MGMT_OP_START_SERVICE_DISCOVERY,
5889 MGMT_STATUS_NOT_POWERED,
5890 &cp->type, sizeof(cp->type));
5891 goto failed;
5892 }
5893
5894 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5895 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5896 err = mgmt_cmd_complete(sk, hdev->id,
5897 MGMT_OP_START_SERVICE_DISCOVERY,
5898 MGMT_STATUS_BUSY, &cp->type,
5899 sizeof(cp->type));
5900 goto failed;
5901 }
5902
5903 if (hdev->discovery_paused) {
5904 err = mgmt_cmd_complete(sk, hdev->id,
5905 MGMT_OP_START_SERVICE_DISCOVERY,
5906 MGMT_STATUS_BUSY, &cp->type,
5907 sizeof(cp->type));
5908 goto failed;
5909 }
5910
5911 uuid_count = __le16_to_cpu(cp->uuid_count);
5912 if (uuid_count > max_uuid_count) {
5913 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5914 uuid_count);
5915 err = mgmt_cmd_complete(sk, hdev->id,
5916 MGMT_OP_START_SERVICE_DISCOVERY,
5917 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5918 sizeof(cp->type));
5919 goto failed;
5920 }
5921
5922 expected_len = sizeof(*cp) + uuid_count * 16;
5923 if (expected_len != len) {
5924 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
5925 expected_len, len);
5926 err = mgmt_cmd_complete(sk, hdev->id,
5927 MGMT_OP_START_SERVICE_DISCOVERY,
5928 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5929 sizeof(cp->type));
5930 goto failed;
5931 }
5932
5933 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5934 err = mgmt_cmd_complete(sk, hdev->id,
5935 MGMT_OP_START_SERVICE_DISCOVERY,
5936 status, &cp->type, sizeof(cp->type));
5937 goto failed;
5938 }
5939
5940 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
5941 hdev, data, len);
5942 if (!cmd) {
5943 err = -ENOMEM;
5944 goto failed;
5945 }
5946
5947 /* Clear the discovery filter first to free any previously
5948 * allocated memory for the UUID list.
5949 */
5950 hci_discovery_filter_clear(hdev);
5951
5952 hdev->discovery.result_filtering = true;
5953 hdev->discovery.type = cp->type;
5954 hdev->discovery.rssi = cp->rssi;
5955 hdev->discovery.uuid_count = uuid_count;
5956
5957 if (uuid_count > 0) {
5958 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
5959 GFP_KERNEL);
5960 if (!hdev->discovery.uuids) {
5961 err = mgmt_cmd_complete(sk, hdev->id,
5962 MGMT_OP_START_SERVICE_DISCOVERY,
5963 MGMT_STATUS_FAILED,
5964 &cp->type, sizeof(cp->type));
5965 mgmt_pending_remove(cmd);
5966 goto failed;
5967 }
5968 }
5969
5970 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5971 start_discovery_complete);
5972 if (err < 0) {
5973 mgmt_pending_remove(cmd);
5974 goto failed;
5975 }
5976
5977 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5978
5979 failed:
5980 hci_dev_unlock(hdev);
5981 return err;
5982 }
5983
stop_discovery_complete(struct hci_dev * hdev,void * data,int err)5984 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
5985 {
5986 struct mgmt_pending_cmd *cmd = data;
5987
5988 if (err == -ECANCELED ||
5989 cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
5990 return;
5991
5992 bt_dev_dbg(hdev, "err %d", err);
5993
5994 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err),
5995 cmd->param, 1);
5996 mgmt_pending_remove(cmd);
5997
5998 if (!err)
5999 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6000 }
6001
stop_discovery_sync(struct hci_dev * hdev,void * data)6002 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6003 {
6004 return hci_stop_discovery_sync(hdev);
6005 }
6006
stop_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6007 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6008 u16 len)
6009 {
6010 struct mgmt_cp_stop_discovery *mgmt_cp = data;
6011 struct mgmt_pending_cmd *cmd;
6012 int err;
6013
6014 bt_dev_dbg(hdev, "sock %p", sk);
6015
6016 hci_dev_lock(hdev);
6017
6018 if (!hci_discovery_active(hdev)) {
6019 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6020 MGMT_STATUS_REJECTED, &mgmt_cp->type,
6021 sizeof(mgmt_cp->type));
6022 goto unlock;
6023 }
6024
6025 if (hdev->discovery.type != mgmt_cp->type) {
6026 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6027 MGMT_STATUS_INVALID_PARAMS,
6028 &mgmt_cp->type, sizeof(mgmt_cp->type));
6029 goto unlock;
6030 }
6031
6032 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6033 if (!cmd) {
6034 err = -ENOMEM;
6035 goto unlock;
6036 }
6037
6038 err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6039 stop_discovery_complete);
6040 if (err < 0) {
6041 mgmt_pending_remove(cmd);
6042 goto unlock;
6043 }
6044
6045 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6046
6047 unlock:
6048 hci_dev_unlock(hdev);
6049 return err;
6050 }
6051
confirm_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6052 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6053 u16 len)
6054 {
6055 struct mgmt_cp_confirm_name *cp = data;
6056 struct inquiry_entry *e;
6057 int err;
6058
6059 bt_dev_dbg(hdev, "sock %p", sk);
6060
6061 hci_dev_lock(hdev);
6062
6063 if (!hci_discovery_active(hdev)) {
6064 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6065 MGMT_STATUS_FAILED, &cp->addr,
6066 sizeof(cp->addr));
6067 goto failed;
6068 }
6069
6070 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6071 if (!e) {
6072 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6073 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6074 sizeof(cp->addr));
6075 goto failed;
6076 }
6077
6078 if (cp->name_known) {
6079 e->name_state = NAME_KNOWN;
6080 list_del(&e->list);
6081 } else {
6082 e->name_state = NAME_NEEDED;
6083 hci_inquiry_cache_update_resolve(hdev, e);
6084 }
6085
6086 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6087 &cp->addr, sizeof(cp->addr));
6088
6089 failed:
6090 hci_dev_unlock(hdev);
6091 return err;
6092 }
6093
block_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6094 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6095 u16 len)
6096 {
6097 struct mgmt_cp_block_device *cp = data;
6098 u8 status;
6099 int err;
6100
6101 bt_dev_dbg(hdev, "sock %p", sk);
6102
6103 if (!bdaddr_type_is_valid(cp->addr.type))
6104 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6105 MGMT_STATUS_INVALID_PARAMS,
6106 &cp->addr, sizeof(cp->addr));
6107
6108 hci_dev_lock(hdev);
6109
6110 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6111 cp->addr.type);
6112 if (err < 0) {
6113 status = MGMT_STATUS_FAILED;
6114 goto done;
6115 }
6116
6117 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6118 sk);
6119 status = MGMT_STATUS_SUCCESS;
6120
6121 done:
6122 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6123 &cp->addr, sizeof(cp->addr));
6124
6125 hci_dev_unlock(hdev);
6126
6127 return err;
6128 }
6129
unblock_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6130 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6131 u16 len)
6132 {
6133 struct mgmt_cp_unblock_device *cp = data;
6134 u8 status;
6135 int err;
6136
6137 bt_dev_dbg(hdev, "sock %p", sk);
6138
6139 if (!bdaddr_type_is_valid(cp->addr.type))
6140 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6141 MGMT_STATUS_INVALID_PARAMS,
6142 &cp->addr, sizeof(cp->addr));
6143
6144 hci_dev_lock(hdev);
6145
6146 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6147 cp->addr.type);
6148 if (err < 0) {
6149 status = MGMT_STATUS_INVALID_PARAMS;
6150 goto done;
6151 }
6152
6153 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6154 sk);
6155 status = MGMT_STATUS_SUCCESS;
6156
6157 done:
6158 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6159 &cp->addr, sizeof(cp->addr));
6160
6161 hci_dev_unlock(hdev);
6162
6163 return err;
6164 }
6165
set_device_id_sync(struct hci_dev * hdev,void * data)6166 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6167 {
6168 return hci_update_eir_sync(hdev);
6169 }
6170
set_device_id(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6171 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6172 u16 len)
6173 {
6174 struct mgmt_cp_set_device_id *cp = data;
6175 int err;
6176 __u16 source;
6177
6178 bt_dev_dbg(hdev, "sock %p", sk);
6179
6180 source = __le16_to_cpu(cp->source);
6181
6182 if (source > 0x0002)
6183 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6184 MGMT_STATUS_INVALID_PARAMS);
6185
6186 hci_dev_lock(hdev);
6187
6188 hdev->devid_source = source;
6189 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6190 hdev->devid_product = __le16_to_cpu(cp->product);
6191 hdev->devid_version = __le16_to_cpu(cp->version);
6192
6193 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6194 NULL, 0);
6195
6196 hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6197
6198 hci_dev_unlock(hdev);
6199
6200 return err;
6201 }
6202
enable_advertising_instance(struct hci_dev * hdev,int err)6203 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6204 {
6205 if (err)
6206 bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6207 else
6208 bt_dev_dbg(hdev, "status %d", err);
6209 }
6210
set_advertising_complete(struct hci_dev * hdev,void * data,int err)6211 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6212 {
6213 struct cmd_lookup match = { NULL, hdev };
6214 u8 instance;
6215 struct adv_info *adv_instance;
6216 u8 status = mgmt_status(err);
6217
6218 if (status) {
6219 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, true,
6220 cmd_status_rsp, &status);
6221 return;
6222 }
6223
6224 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6225 hci_dev_set_flag(hdev, HCI_ADVERTISING);
6226 else
6227 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6228
6229 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, true, settings_rsp,
6230 &match);
6231
6232 new_settings(hdev, match.sk);
6233
6234 if (match.sk)
6235 sock_put(match.sk);
6236
6237 /* If "Set Advertising" was just disabled and instance advertising was
6238 * set up earlier, then re-enable multi-instance advertising.
6239 */
6240 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6241 list_empty(&hdev->adv_instances))
6242 return;
6243
6244 instance = hdev->cur_adv_instance;
6245 if (!instance) {
6246 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6247 struct adv_info, list);
6248 if (!adv_instance)
6249 return;
6250
6251 instance = adv_instance->instance;
6252 }
6253
6254 err = hci_schedule_adv_instance_sync(hdev, instance, true);
6255
6256 enable_advertising_instance(hdev, err);
6257 }
6258
set_adv_sync(struct hci_dev * hdev,void * data)6259 static int set_adv_sync(struct hci_dev *hdev, void *data)
6260 {
6261 struct mgmt_pending_cmd *cmd = data;
6262 struct mgmt_mode *cp = cmd->param;
6263 u8 val = !!cp->val;
6264
6265 if (cp->val == 0x02)
6266 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6267 else
6268 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6269
6270 cancel_adv_timeout(hdev);
6271
6272 if (val) {
6273 /* Switch to instance "0" for the Set Advertising setting.
6274 * We cannot use update_[adv|scan_rsp]_data() here as the
6275 * HCI_ADVERTISING flag is not yet set.
6276 */
6277 hdev->cur_adv_instance = 0x00;
6278
6279 if (ext_adv_capable(hdev)) {
6280 hci_start_ext_adv_sync(hdev, 0x00);
6281 } else {
6282 hci_update_adv_data_sync(hdev, 0x00);
6283 hci_update_scan_rsp_data_sync(hdev, 0x00);
6284 hci_enable_advertising_sync(hdev);
6285 }
6286 } else {
6287 hci_disable_advertising_sync(hdev);
6288 }
6289
6290 return 0;
6291 }
6292
set_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6293 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6294 u16 len)
6295 {
6296 struct mgmt_mode *cp = data;
6297 struct mgmt_pending_cmd *cmd;
6298 u8 val, status;
6299 int err;
6300
6301 bt_dev_dbg(hdev, "sock %p", sk);
6302
6303 status = mgmt_le_support(hdev);
6304 if (status)
6305 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6306 status);
6307
6308 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6309 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6310 MGMT_STATUS_INVALID_PARAMS);
6311
6312 if (hdev->advertising_paused)
6313 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6314 MGMT_STATUS_BUSY);
6315
6316 hci_dev_lock(hdev);
6317
6318 val = !!cp->val;
6319
6320 /* The following conditions are ones which mean that we should
6321 * not do any HCI communication but directly send a mgmt
6322 * response to user space (after toggling the flag if
6323 * necessary).
6324 */
6325 if (!hdev_is_powered(hdev) ||
6326 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6327 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6328 hci_dev_test_flag(hdev, HCI_MESH) ||
6329 hci_conn_num(hdev, LE_LINK) > 0 ||
6330 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6331 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6332 bool changed;
6333
6334 if (cp->val) {
6335 hdev->cur_adv_instance = 0x00;
6336 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6337 if (cp->val == 0x02)
6338 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6339 else
6340 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6341 } else {
6342 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6343 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6344 }
6345
6346 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6347 if (err < 0)
6348 goto unlock;
6349
6350 if (changed)
6351 err = new_settings(hdev, sk);
6352
6353 goto unlock;
6354 }
6355
6356 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6357 pending_find(MGMT_OP_SET_LE, hdev)) {
6358 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6359 MGMT_STATUS_BUSY);
6360 goto unlock;
6361 }
6362
6363 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6364 if (!cmd)
6365 err = -ENOMEM;
6366 else
6367 err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6368 set_advertising_complete);
6369
6370 if (err < 0 && cmd)
6371 mgmt_pending_remove(cmd);
6372
6373 unlock:
6374 hci_dev_unlock(hdev);
6375 return err;
6376 }
6377
set_static_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6378 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6379 void *data, u16 len)
6380 {
6381 struct mgmt_cp_set_static_address *cp = data;
6382 int err;
6383
6384 bt_dev_dbg(hdev, "sock %p", sk);
6385
6386 if (!lmp_le_capable(hdev))
6387 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6388 MGMT_STATUS_NOT_SUPPORTED);
6389
6390 if (hdev_is_powered(hdev))
6391 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6392 MGMT_STATUS_REJECTED);
6393
6394 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6395 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6396 return mgmt_cmd_status(sk, hdev->id,
6397 MGMT_OP_SET_STATIC_ADDRESS,
6398 MGMT_STATUS_INVALID_PARAMS);
6399
6400 /* Two most significant bits shall be set */
6401 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6402 return mgmt_cmd_status(sk, hdev->id,
6403 MGMT_OP_SET_STATIC_ADDRESS,
6404 MGMT_STATUS_INVALID_PARAMS);
6405 }
6406
6407 hci_dev_lock(hdev);
6408
6409 bacpy(&hdev->static_addr, &cp->bdaddr);
6410
6411 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6412 if (err < 0)
6413 goto unlock;
6414
6415 err = new_settings(hdev, sk);
6416
6417 unlock:
6418 hci_dev_unlock(hdev);
6419 return err;
6420 }
6421
set_scan_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6422 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6423 void *data, u16 len)
6424 {
6425 struct mgmt_cp_set_scan_params *cp = data;
6426 __u16 interval, window;
6427 int err;
6428
6429 bt_dev_dbg(hdev, "sock %p", sk);
6430
6431 if (!lmp_le_capable(hdev))
6432 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6433 MGMT_STATUS_NOT_SUPPORTED);
6434
6435 interval = __le16_to_cpu(cp->interval);
6436
6437 if (interval < 0x0004 || interval > 0x4000)
6438 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6439 MGMT_STATUS_INVALID_PARAMS);
6440
6441 window = __le16_to_cpu(cp->window);
6442
6443 if (window < 0x0004 || window > 0x4000)
6444 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6445 MGMT_STATUS_INVALID_PARAMS);
6446
6447 if (window > interval)
6448 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6449 MGMT_STATUS_INVALID_PARAMS);
6450
6451 hci_dev_lock(hdev);
6452
6453 hdev->le_scan_interval = interval;
6454 hdev->le_scan_window = window;
6455
6456 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6457 NULL, 0);
6458
6459 /* If background scan is running, restart it so new parameters are
6460 * loaded.
6461 */
6462 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6463 hdev->discovery.state == DISCOVERY_STOPPED)
6464 hci_update_passive_scan(hdev);
6465
6466 hci_dev_unlock(hdev);
6467
6468 return err;
6469 }
6470
fast_connectable_complete(struct hci_dev * hdev,void * data,int err)6471 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6472 {
6473 struct mgmt_pending_cmd *cmd = data;
6474
6475 bt_dev_dbg(hdev, "err %d", err);
6476
6477 if (err) {
6478 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6479 mgmt_status(err));
6480 } else {
6481 struct mgmt_mode *cp = cmd->param;
6482
6483 if (cp->val)
6484 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6485 else
6486 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6487
6488 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6489 new_settings(hdev, cmd->sk);
6490 }
6491
6492 mgmt_pending_free(cmd);
6493 }
6494
write_fast_connectable_sync(struct hci_dev * hdev,void * data)6495 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6496 {
6497 struct mgmt_pending_cmd *cmd = data;
6498 struct mgmt_mode *cp = cmd->param;
6499
6500 return hci_write_fast_connectable_sync(hdev, cp->val);
6501 }
6502
set_fast_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6503 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6504 void *data, u16 len)
6505 {
6506 struct mgmt_mode *cp = data;
6507 struct mgmt_pending_cmd *cmd;
6508 int err;
6509
6510 bt_dev_dbg(hdev, "sock %p", sk);
6511
6512 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6513 hdev->hci_ver < BLUETOOTH_VER_1_2)
6514 return mgmt_cmd_status(sk, hdev->id,
6515 MGMT_OP_SET_FAST_CONNECTABLE,
6516 MGMT_STATUS_NOT_SUPPORTED);
6517
6518 if (cp->val != 0x00 && cp->val != 0x01)
6519 return mgmt_cmd_status(sk, hdev->id,
6520 MGMT_OP_SET_FAST_CONNECTABLE,
6521 MGMT_STATUS_INVALID_PARAMS);
6522
6523 hci_dev_lock(hdev);
6524
6525 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6526 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6527 goto unlock;
6528 }
6529
6530 if (!hdev_is_powered(hdev)) {
6531 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6532 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6533 new_settings(hdev, sk);
6534 goto unlock;
6535 }
6536
6537 cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6538 len);
6539 if (!cmd)
6540 err = -ENOMEM;
6541 else
6542 err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6543 fast_connectable_complete);
6544
6545 if (err < 0) {
6546 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6547 MGMT_STATUS_FAILED);
6548
6549 if (cmd)
6550 mgmt_pending_free(cmd);
6551 }
6552
6553 unlock:
6554 hci_dev_unlock(hdev);
6555
6556 return err;
6557 }
6558
set_bredr_complete(struct hci_dev * hdev,void * data,int err)6559 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6560 {
6561 struct mgmt_pending_cmd *cmd = data;
6562
6563 bt_dev_dbg(hdev, "err %d", err);
6564
6565 if (err) {
6566 u8 mgmt_err = mgmt_status(err);
6567
6568 /* We need to restore the flag if related HCI commands
6569 * failed.
6570 */
6571 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6572
6573 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
6574 } else {
6575 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6576 new_settings(hdev, cmd->sk);
6577 }
6578
6579 mgmt_pending_free(cmd);
6580 }
6581
set_bredr_sync(struct hci_dev * hdev,void * data)6582 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6583 {
6584 int status;
6585
6586 status = hci_write_fast_connectable_sync(hdev, false);
6587
6588 if (!status)
6589 status = hci_update_scan_sync(hdev);
6590
6591 /* Since only the advertising data flags will change, there
6592 * is no need to update the scan response data.
6593 */
6594 if (!status)
6595 status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6596
6597 return status;
6598 }
6599
set_bredr(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6600 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6601 {
6602 struct mgmt_mode *cp = data;
6603 struct mgmt_pending_cmd *cmd;
6604 int err;
6605
6606 bt_dev_dbg(hdev, "sock %p", sk);
6607
6608 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6609 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6610 MGMT_STATUS_NOT_SUPPORTED);
6611
6612 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6613 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6614 MGMT_STATUS_REJECTED);
6615
6616 if (cp->val != 0x00 && cp->val != 0x01)
6617 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6618 MGMT_STATUS_INVALID_PARAMS);
6619
6620 hci_dev_lock(hdev);
6621
6622 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6623 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6624 goto unlock;
6625 }
6626
6627 if (!hdev_is_powered(hdev)) {
6628 if (!cp->val) {
6629 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6630 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6631 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6632 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6633 }
6634
6635 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6636
6637 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6638 if (err < 0)
6639 goto unlock;
6640
6641 err = new_settings(hdev, sk);
6642 goto unlock;
6643 }
6644
6645 /* Reject disabling when powered on */
6646 if (!cp->val) {
6647 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6648 MGMT_STATUS_REJECTED);
6649 goto unlock;
6650 } else {
6651 /* When configuring a dual-mode controller to operate
6652 * with LE only and using a static address, then switching
6653 * BR/EDR back on is not allowed.
6654 *
6655 * Dual-mode controllers shall operate with the public
6656 * address as its identity address for BR/EDR and LE. So
6657 * reject the attempt to create an invalid configuration.
6658 *
6659 * The same restrictions applies when secure connections
6660 * has been enabled. For BR/EDR this is a controller feature
6661 * while for LE it is a host stack feature. This means that
6662 * switching BR/EDR back on when secure connections has been
6663 * enabled is not a supported transaction.
6664 */
6665 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6666 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6667 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6668 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6669 MGMT_STATUS_REJECTED);
6670 goto unlock;
6671 }
6672 }
6673
6674 cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6675 if (!cmd)
6676 err = -ENOMEM;
6677 else
6678 err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6679 set_bredr_complete);
6680
6681 if (err < 0) {
6682 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6683 MGMT_STATUS_FAILED);
6684 if (cmd)
6685 mgmt_pending_free(cmd);
6686
6687 goto unlock;
6688 }
6689
6690 /* We need to flip the bit already here so that
6691 * hci_req_update_adv_data generates the correct flags.
6692 */
6693 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6694
6695 unlock:
6696 hci_dev_unlock(hdev);
6697 return err;
6698 }
6699
set_secure_conn_complete(struct hci_dev * hdev,void * data,int err)6700 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6701 {
6702 struct mgmt_pending_cmd *cmd = data;
6703 struct mgmt_mode *cp;
6704
6705 bt_dev_dbg(hdev, "err %d", err);
6706
6707 if (err) {
6708 u8 mgmt_err = mgmt_status(err);
6709
6710 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
6711 goto done;
6712 }
6713
6714 cp = cmd->param;
6715
6716 switch (cp->val) {
6717 case 0x00:
6718 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6719 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6720 break;
6721 case 0x01:
6722 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6723 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6724 break;
6725 case 0x02:
6726 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6727 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6728 break;
6729 }
6730
6731 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6732 new_settings(hdev, cmd->sk);
6733
6734 done:
6735 mgmt_pending_free(cmd);
6736 }
6737
set_secure_conn_sync(struct hci_dev * hdev,void * data)6738 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6739 {
6740 struct mgmt_pending_cmd *cmd = data;
6741 struct mgmt_mode *cp = cmd->param;
6742 u8 val = !!cp->val;
6743
6744 /* Force write of val */
6745 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6746
6747 return hci_write_sc_support_sync(hdev, val);
6748 }
6749
set_secure_conn(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6750 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6751 void *data, u16 len)
6752 {
6753 struct mgmt_mode *cp = data;
6754 struct mgmt_pending_cmd *cmd;
6755 u8 val;
6756 int err;
6757
6758 bt_dev_dbg(hdev, "sock %p", sk);
6759
6760 if (!lmp_sc_capable(hdev) &&
6761 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6762 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6763 MGMT_STATUS_NOT_SUPPORTED);
6764
6765 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6766 lmp_sc_capable(hdev) &&
6767 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6768 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6769 MGMT_STATUS_REJECTED);
6770
6771 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6772 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6773 MGMT_STATUS_INVALID_PARAMS);
6774
6775 hci_dev_lock(hdev);
6776
6777 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6778 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6779 bool changed;
6780
6781 if (cp->val) {
6782 changed = !hci_dev_test_and_set_flag(hdev,
6783 HCI_SC_ENABLED);
6784 if (cp->val == 0x02)
6785 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6786 else
6787 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6788 } else {
6789 changed = hci_dev_test_and_clear_flag(hdev,
6790 HCI_SC_ENABLED);
6791 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6792 }
6793
6794 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6795 if (err < 0)
6796 goto failed;
6797
6798 if (changed)
6799 err = new_settings(hdev, sk);
6800
6801 goto failed;
6802 }
6803
6804 val = !!cp->val;
6805
6806 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6807 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6808 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6809 goto failed;
6810 }
6811
6812 cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6813 if (!cmd)
6814 err = -ENOMEM;
6815 else
6816 err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6817 set_secure_conn_complete);
6818
6819 if (err < 0) {
6820 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6821 MGMT_STATUS_FAILED);
6822 if (cmd)
6823 mgmt_pending_free(cmd);
6824 }
6825
6826 failed:
6827 hci_dev_unlock(hdev);
6828 return err;
6829 }
6830
set_debug_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6831 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6832 void *data, u16 len)
6833 {
6834 struct mgmt_mode *cp = data;
6835 bool changed, use_changed;
6836 int err;
6837
6838 bt_dev_dbg(hdev, "sock %p", sk);
6839
6840 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6841 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6842 MGMT_STATUS_INVALID_PARAMS);
6843
6844 hci_dev_lock(hdev);
6845
6846 if (cp->val)
6847 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6848 else
6849 changed = hci_dev_test_and_clear_flag(hdev,
6850 HCI_KEEP_DEBUG_KEYS);
6851
6852 if (cp->val == 0x02)
6853 use_changed = !hci_dev_test_and_set_flag(hdev,
6854 HCI_USE_DEBUG_KEYS);
6855 else
6856 use_changed = hci_dev_test_and_clear_flag(hdev,
6857 HCI_USE_DEBUG_KEYS);
6858
6859 if (hdev_is_powered(hdev) && use_changed &&
6860 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6861 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6862 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6863 sizeof(mode), &mode);
6864 }
6865
6866 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6867 if (err < 0)
6868 goto unlock;
6869
6870 if (changed)
6871 err = new_settings(hdev, sk);
6872
6873 unlock:
6874 hci_dev_unlock(hdev);
6875 return err;
6876 }
6877
set_privacy(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)6878 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6879 u16 len)
6880 {
6881 struct mgmt_cp_set_privacy *cp = cp_data;
6882 bool changed;
6883 int err;
6884
6885 bt_dev_dbg(hdev, "sock %p", sk);
6886
6887 if (!lmp_le_capable(hdev))
6888 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6889 MGMT_STATUS_NOT_SUPPORTED);
6890
6891 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6892 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6893 MGMT_STATUS_INVALID_PARAMS);
6894
6895 if (hdev_is_powered(hdev))
6896 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6897 MGMT_STATUS_REJECTED);
6898
6899 hci_dev_lock(hdev);
6900
6901 /* If user space supports this command it is also expected to
6902 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
6903 */
6904 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6905
6906 if (cp->privacy) {
6907 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
6908 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
6909 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
6910 hci_adv_instances_set_rpa_expired(hdev, true);
6911 if (cp->privacy == 0x02)
6912 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
6913 else
6914 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6915 } else {
6916 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
6917 memset(hdev->irk, 0, sizeof(hdev->irk));
6918 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
6919 hci_adv_instances_set_rpa_expired(hdev, false);
6920 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6921 }
6922
6923 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
6924 if (err < 0)
6925 goto unlock;
6926
6927 if (changed)
6928 err = new_settings(hdev, sk);
6929
6930 unlock:
6931 hci_dev_unlock(hdev);
6932 return err;
6933 }
6934
irk_is_valid(struct mgmt_irk_info * irk)6935 static bool irk_is_valid(struct mgmt_irk_info *irk)
6936 {
6937 switch (irk->addr.type) {
6938 case BDADDR_LE_PUBLIC:
6939 return true;
6940
6941 case BDADDR_LE_RANDOM:
6942 /* Two most significant bits shall be set */
6943 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6944 return false;
6945 return true;
6946 }
6947
6948 return false;
6949 }
6950
load_irks(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)6951 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6952 u16 len)
6953 {
6954 struct mgmt_cp_load_irks *cp = cp_data;
6955 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
6956 sizeof(struct mgmt_irk_info));
6957 u16 irk_count, expected_len;
6958 int i, err;
6959
6960 bt_dev_dbg(hdev, "sock %p", sk);
6961
6962 if (!lmp_le_capable(hdev))
6963 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6964 MGMT_STATUS_NOT_SUPPORTED);
6965
6966 irk_count = __le16_to_cpu(cp->irk_count);
6967 if (irk_count > max_irk_count) {
6968 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
6969 irk_count);
6970 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6971 MGMT_STATUS_INVALID_PARAMS);
6972 }
6973
6974 expected_len = struct_size(cp, irks, irk_count);
6975 if (expected_len != len) {
6976 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
6977 expected_len, len);
6978 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6979 MGMT_STATUS_INVALID_PARAMS);
6980 }
6981
6982 bt_dev_dbg(hdev, "irk_count %u", irk_count);
6983
6984 for (i = 0; i < irk_count; i++) {
6985 struct mgmt_irk_info *key = &cp->irks[i];
6986
6987 if (!irk_is_valid(key))
6988 return mgmt_cmd_status(sk, hdev->id,
6989 MGMT_OP_LOAD_IRKS,
6990 MGMT_STATUS_INVALID_PARAMS);
6991 }
6992
6993 hci_dev_lock(hdev);
6994
6995 hci_smp_irks_clear(hdev);
6996
6997 for (i = 0; i < irk_count; i++) {
6998 struct mgmt_irk_info *irk = &cp->irks[i];
6999
7000 if (hci_is_blocked_key(hdev,
7001 HCI_BLOCKED_KEY_TYPE_IRK,
7002 irk->val)) {
7003 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7004 &irk->addr.bdaddr);
7005 continue;
7006 }
7007
7008 hci_add_irk(hdev, &irk->addr.bdaddr,
7009 le_addr_type(irk->addr.type), irk->val,
7010 BDADDR_ANY);
7011 }
7012
7013 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7014
7015 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7016
7017 hci_dev_unlock(hdev);
7018
7019 return err;
7020 }
7021
ltk_is_valid(struct mgmt_ltk_info * key)7022 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7023 {
7024 if (key->initiator != 0x00 && key->initiator != 0x01)
7025 return false;
7026
7027 switch (key->addr.type) {
7028 case BDADDR_LE_PUBLIC:
7029 return true;
7030
7031 case BDADDR_LE_RANDOM:
7032 /* Two most significant bits shall be set */
7033 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7034 return false;
7035 return true;
7036 }
7037
7038 return false;
7039 }
7040
load_long_term_keys(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)7041 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7042 void *cp_data, u16 len)
7043 {
7044 struct mgmt_cp_load_long_term_keys *cp = cp_data;
7045 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7046 sizeof(struct mgmt_ltk_info));
7047 u16 key_count, expected_len;
7048 int i, err;
7049
7050 bt_dev_dbg(hdev, "sock %p", sk);
7051
7052 if (!lmp_le_capable(hdev))
7053 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7054 MGMT_STATUS_NOT_SUPPORTED);
7055
7056 key_count = __le16_to_cpu(cp->key_count);
7057 if (key_count > max_key_count) {
7058 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7059 key_count);
7060 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7061 MGMT_STATUS_INVALID_PARAMS);
7062 }
7063
7064 expected_len = struct_size(cp, keys, key_count);
7065 if (expected_len != len) {
7066 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7067 expected_len, len);
7068 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7069 MGMT_STATUS_INVALID_PARAMS);
7070 }
7071
7072 bt_dev_dbg(hdev, "key_count %u", key_count);
7073
7074 hci_dev_lock(hdev);
7075
7076 hci_smp_ltks_clear(hdev);
7077
7078 for (i = 0; i < key_count; i++) {
7079 struct mgmt_ltk_info *key = &cp->keys[i];
7080 u8 type, authenticated;
7081
7082 if (hci_is_blocked_key(hdev,
7083 HCI_BLOCKED_KEY_TYPE_LTK,
7084 key->val)) {
7085 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7086 &key->addr.bdaddr);
7087 continue;
7088 }
7089
7090 if (!ltk_is_valid(key)) {
7091 bt_dev_warn(hdev, "Invalid LTK for %pMR",
7092 &key->addr.bdaddr);
7093 continue;
7094 }
7095
7096 switch (key->type) {
7097 case MGMT_LTK_UNAUTHENTICATED:
7098 authenticated = 0x00;
7099 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7100 break;
7101 case MGMT_LTK_AUTHENTICATED:
7102 authenticated = 0x01;
7103 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7104 break;
7105 case MGMT_LTK_P256_UNAUTH:
7106 authenticated = 0x00;
7107 type = SMP_LTK_P256;
7108 break;
7109 case MGMT_LTK_P256_AUTH:
7110 authenticated = 0x01;
7111 type = SMP_LTK_P256;
7112 break;
7113 case MGMT_LTK_P256_DEBUG:
7114 authenticated = 0x00;
7115 type = SMP_LTK_P256_DEBUG;
7116 fallthrough;
7117 default:
7118 continue;
7119 }
7120
7121 hci_add_ltk(hdev, &key->addr.bdaddr,
7122 le_addr_type(key->addr.type), type, authenticated,
7123 key->val, key->enc_size, key->ediv, key->rand);
7124 }
7125
7126 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7127 NULL, 0);
7128
7129 hci_dev_unlock(hdev);
7130
7131 return err;
7132 }
7133
get_conn_info_complete(struct hci_dev * hdev,void * data,int err)7134 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
7135 {
7136 struct mgmt_pending_cmd *cmd = data;
7137 struct hci_conn *conn = cmd->user_data;
7138 struct mgmt_cp_get_conn_info *cp = cmd->param;
7139 struct mgmt_rp_get_conn_info rp;
7140 u8 status;
7141
7142 bt_dev_dbg(hdev, "err %d", err);
7143
7144 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
7145
7146 status = mgmt_status(err);
7147 if (status == MGMT_STATUS_SUCCESS) {
7148 rp.rssi = conn->rssi;
7149 rp.tx_power = conn->tx_power;
7150 rp.max_tx_power = conn->max_tx_power;
7151 } else {
7152 rp.rssi = HCI_RSSI_INVALID;
7153 rp.tx_power = HCI_TX_POWER_INVALID;
7154 rp.max_tx_power = HCI_TX_POWER_INVALID;
7155 }
7156
7157 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, MGMT_OP_GET_CONN_INFO, status,
7158 &rp, sizeof(rp));
7159
7160 mgmt_pending_free(cmd);
7161 }
7162
get_conn_info_sync(struct hci_dev * hdev,void * data)7163 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
7164 {
7165 struct mgmt_pending_cmd *cmd = data;
7166 struct mgmt_cp_get_conn_info *cp = cmd->param;
7167 struct hci_conn *conn;
7168 int err;
7169 __le16 handle;
7170
7171 /* Make sure we are still connected */
7172 if (cp->addr.type == BDADDR_BREDR)
7173 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7174 &cp->addr.bdaddr);
7175 else
7176 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7177
7178 if (!conn || conn->state != BT_CONNECTED)
7179 return MGMT_STATUS_NOT_CONNECTED;
7180
7181 cmd->user_data = conn;
7182 handle = cpu_to_le16(conn->handle);
7183
7184 /* Refresh RSSI each time */
7185 err = hci_read_rssi_sync(hdev, handle);
7186
7187 /* For LE links TX power does not change thus we don't need to
7188 * query for it once value is known.
7189 */
7190 if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
7191 conn->tx_power == HCI_TX_POWER_INVALID))
7192 err = hci_read_tx_power_sync(hdev, handle, 0x00);
7193
7194 /* Max TX power needs to be read only once per connection */
7195 if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
7196 err = hci_read_tx_power_sync(hdev, handle, 0x01);
7197
7198 return err;
7199 }
7200
get_conn_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7201 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7202 u16 len)
7203 {
7204 struct mgmt_cp_get_conn_info *cp = data;
7205 struct mgmt_rp_get_conn_info rp;
7206 struct hci_conn *conn;
7207 unsigned long conn_info_age;
7208 int err = 0;
7209
7210 bt_dev_dbg(hdev, "sock %p", sk);
7211
7212 memset(&rp, 0, sizeof(rp));
7213 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7214 rp.addr.type = cp->addr.type;
7215
7216 if (!bdaddr_type_is_valid(cp->addr.type))
7217 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7218 MGMT_STATUS_INVALID_PARAMS,
7219 &rp, sizeof(rp));
7220
7221 hci_dev_lock(hdev);
7222
7223 if (!hdev_is_powered(hdev)) {
7224 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7225 MGMT_STATUS_NOT_POWERED, &rp,
7226 sizeof(rp));
7227 goto unlock;
7228 }
7229
7230 if (cp->addr.type == BDADDR_BREDR)
7231 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7232 &cp->addr.bdaddr);
7233 else
7234 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7235
7236 if (!conn || conn->state != BT_CONNECTED) {
7237 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7238 MGMT_STATUS_NOT_CONNECTED, &rp,
7239 sizeof(rp));
7240 goto unlock;
7241 }
7242
7243 /* To avoid client trying to guess when to poll again for information we
7244 * calculate conn info age as random value between min/max set in hdev.
7245 */
7246 conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age,
7247 hdev->conn_info_max_age - 1);
7248
7249 /* Query controller to refresh cached values if they are too old or were
7250 * never read.
7251 */
7252 if (time_after(jiffies, conn->conn_info_timestamp +
7253 msecs_to_jiffies(conn_info_age)) ||
7254 !conn->conn_info_timestamp) {
7255 struct mgmt_pending_cmd *cmd;
7256
7257 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
7258 len);
7259 if (!cmd) {
7260 err = -ENOMEM;
7261 } else {
7262 err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
7263 cmd, get_conn_info_complete);
7264 }
7265
7266 if (err < 0) {
7267 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7268 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7269
7270 if (cmd)
7271 mgmt_pending_free(cmd);
7272
7273 goto unlock;
7274 }
7275
7276 conn->conn_info_timestamp = jiffies;
7277 } else {
7278 /* Cache is valid, just reply with values cached in hci_conn */
7279 rp.rssi = conn->rssi;
7280 rp.tx_power = conn->tx_power;
7281 rp.max_tx_power = conn->max_tx_power;
7282
7283 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7284 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7285 }
7286
7287 unlock:
7288 hci_dev_unlock(hdev);
7289 return err;
7290 }
7291
get_clock_info_complete(struct hci_dev * hdev,void * data,int err)7292 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
7293 {
7294 struct mgmt_pending_cmd *cmd = data;
7295 struct mgmt_cp_get_clock_info *cp = cmd->param;
7296 struct mgmt_rp_get_clock_info rp;
7297 struct hci_conn *conn = cmd->user_data;
7298 u8 status = mgmt_status(err);
7299
7300 bt_dev_dbg(hdev, "err %d", err);
7301
7302 memset(&rp, 0, sizeof(rp));
7303 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7304 rp.addr.type = cp->addr.type;
7305
7306 if (err)
7307 goto complete;
7308
7309 rp.local_clock = cpu_to_le32(hdev->clock);
7310
7311 if (conn) {
7312 rp.piconet_clock = cpu_to_le32(conn->clock);
7313 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7314 }
7315
7316 complete:
7317 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, status, &rp,
7318 sizeof(rp));
7319
7320 mgmt_pending_free(cmd);
7321 }
7322
get_clock_info_sync(struct hci_dev * hdev,void * data)7323 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
7324 {
7325 struct mgmt_pending_cmd *cmd = data;
7326 struct mgmt_cp_get_clock_info *cp = cmd->param;
7327 struct hci_cp_read_clock hci_cp;
7328 struct hci_conn *conn;
7329
7330 memset(&hci_cp, 0, sizeof(hci_cp));
7331 hci_read_clock_sync(hdev, &hci_cp);
7332
7333 /* Make sure connection still exists */
7334 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
7335 if (!conn || conn->state != BT_CONNECTED)
7336 return MGMT_STATUS_NOT_CONNECTED;
7337
7338 cmd->user_data = conn;
7339 hci_cp.handle = cpu_to_le16(conn->handle);
7340 hci_cp.which = 0x01; /* Piconet clock */
7341
7342 return hci_read_clock_sync(hdev, &hci_cp);
7343 }
7344
get_clock_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7345 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7346 u16 len)
7347 {
7348 struct mgmt_cp_get_clock_info *cp = data;
7349 struct mgmt_rp_get_clock_info rp;
7350 struct mgmt_pending_cmd *cmd;
7351 struct hci_conn *conn;
7352 int err;
7353
7354 bt_dev_dbg(hdev, "sock %p", sk);
7355
7356 memset(&rp, 0, sizeof(rp));
7357 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7358 rp.addr.type = cp->addr.type;
7359
7360 if (cp->addr.type != BDADDR_BREDR)
7361 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7362 MGMT_STATUS_INVALID_PARAMS,
7363 &rp, sizeof(rp));
7364
7365 hci_dev_lock(hdev);
7366
7367 if (!hdev_is_powered(hdev)) {
7368 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7369 MGMT_STATUS_NOT_POWERED, &rp,
7370 sizeof(rp));
7371 goto unlock;
7372 }
7373
7374 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7375 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7376 &cp->addr.bdaddr);
7377 if (!conn || conn->state != BT_CONNECTED) {
7378 err = mgmt_cmd_complete(sk, hdev->id,
7379 MGMT_OP_GET_CLOCK_INFO,
7380 MGMT_STATUS_NOT_CONNECTED,
7381 &rp, sizeof(rp));
7382 goto unlock;
7383 }
7384 } else {
7385 conn = NULL;
7386 }
7387
7388 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7389 if (!cmd)
7390 err = -ENOMEM;
7391 else
7392 err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7393 get_clock_info_complete);
7394
7395 if (err < 0) {
7396 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7397 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7398
7399 if (cmd)
7400 mgmt_pending_free(cmd);
7401 }
7402
7403
7404 unlock:
7405 hci_dev_unlock(hdev);
7406 return err;
7407 }
7408
is_connected(struct hci_dev * hdev,bdaddr_t * addr,u8 type)7409 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7410 {
7411 struct hci_conn *conn;
7412
7413 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7414 if (!conn)
7415 return false;
7416
7417 if (conn->dst_type != type)
7418 return false;
7419
7420 if (conn->state != BT_CONNECTED)
7421 return false;
7422
7423 return true;
7424 }
7425
7426 /* This function requires the caller holds hdev->lock */
hci_conn_params_set(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type,u8 auto_connect)7427 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7428 u8 addr_type, u8 auto_connect)
7429 {
7430 struct hci_conn_params *params;
7431
7432 params = hci_conn_params_add(hdev, addr, addr_type);
7433 if (!params)
7434 return -EIO;
7435
7436 if (params->auto_connect == auto_connect)
7437 return 0;
7438
7439 hci_pend_le_list_del_init(params);
7440
7441 switch (auto_connect) {
7442 case HCI_AUTO_CONN_DISABLED:
7443 case HCI_AUTO_CONN_LINK_LOSS:
7444 /* If auto connect is being disabled when we're trying to
7445 * connect to device, keep connecting.
7446 */
7447 if (params->explicit_connect)
7448 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7449 break;
7450 case HCI_AUTO_CONN_REPORT:
7451 if (params->explicit_connect)
7452 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7453 else
7454 hci_pend_le_list_add(params, &hdev->pend_le_reports);
7455 break;
7456 case HCI_AUTO_CONN_DIRECT:
7457 case HCI_AUTO_CONN_ALWAYS:
7458 if (!is_connected(hdev, addr, addr_type))
7459 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7460 break;
7461 }
7462
7463 params->auto_connect = auto_connect;
7464
7465 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7466 addr, addr_type, auto_connect);
7467
7468 return 0;
7469 }
7470
device_added(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type,u8 action)7471 static void device_added(struct sock *sk, struct hci_dev *hdev,
7472 bdaddr_t *bdaddr, u8 type, u8 action)
7473 {
7474 struct mgmt_ev_device_added ev;
7475
7476 bacpy(&ev.addr.bdaddr, bdaddr);
7477 ev.addr.type = type;
7478 ev.action = action;
7479
7480 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7481 }
7482
add_device_complete(struct hci_dev * hdev,void * data,int err)7483 static void add_device_complete(struct hci_dev *hdev, void *data, int err)
7484 {
7485 struct mgmt_pending_cmd *cmd = data;
7486 struct mgmt_cp_add_device *cp = cmd->param;
7487
7488 if (!err) {
7489 struct hci_conn_params *params;
7490
7491 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7492 le_addr_type(cp->addr.type));
7493
7494 device_added(cmd->sk, hdev, &cp->addr.bdaddr, cp->addr.type,
7495 cp->action);
7496 device_flags_changed(NULL, hdev, &cp->addr.bdaddr,
7497 cp->addr.type, hdev->conn_flags,
7498 params ? params->flags : 0);
7499 }
7500
7501 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_ADD_DEVICE,
7502 mgmt_status(err), &cp->addr, sizeof(cp->addr));
7503 mgmt_pending_free(cmd);
7504 }
7505
add_device_sync(struct hci_dev * hdev,void * data)7506 static int add_device_sync(struct hci_dev *hdev, void *data)
7507 {
7508 return hci_update_passive_scan_sync(hdev);
7509 }
7510
add_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7511 static int add_device(struct sock *sk, struct hci_dev *hdev,
7512 void *data, u16 len)
7513 {
7514 struct mgmt_pending_cmd *cmd;
7515 struct mgmt_cp_add_device *cp = data;
7516 u8 auto_conn, addr_type;
7517 struct hci_conn_params *params;
7518 int err;
7519 u32 current_flags = 0;
7520 u32 supported_flags;
7521
7522 bt_dev_dbg(hdev, "sock %p", sk);
7523
7524 if (!bdaddr_type_is_valid(cp->addr.type) ||
7525 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7526 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7527 MGMT_STATUS_INVALID_PARAMS,
7528 &cp->addr, sizeof(cp->addr));
7529
7530 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7531 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7532 MGMT_STATUS_INVALID_PARAMS,
7533 &cp->addr, sizeof(cp->addr));
7534
7535 hci_dev_lock(hdev);
7536
7537 if (cp->addr.type == BDADDR_BREDR) {
7538 /* Only incoming connections action is supported for now */
7539 if (cp->action != 0x01) {
7540 err = mgmt_cmd_complete(sk, hdev->id,
7541 MGMT_OP_ADD_DEVICE,
7542 MGMT_STATUS_INVALID_PARAMS,
7543 &cp->addr, sizeof(cp->addr));
7544 goto unlock;
7545 }
7546
7547 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7548 &cp->addr.bdaddr,
7549 cp->addr.type, 0);
7550 if (err)
7551 goto unlock;
7552
7553 hci_update_scan(hdev);
7554
7555 goto added;
7556 }
7557
7558 addr_type = le_addr_type(cp->addr.type);
7559
7560 if (cp->action == 0x02)
7561 auto_conn = HCI_AUTO_CONN_ALWAYS;
7562 else if (cp->action == 0x01)
7563 auto_conn = HCI_AUTO_CONN_DIRECT;
7564 else
7565 auto_conn = HCI_AUTO_CONN_REPORT;
7566
7567 /* Kernel internally uses conn_params with resolvable private
7568 * address, but Add Device allows only identity addresses.
7569 * Make sure it is enforced before calling
7570 * hci_conn_params_lookup.
7571 */
7572 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7573 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7574 MGMT_STATUS_INVALID_PARAMS,
7575 &cp->addr, sizeof(cp->addr));
7576 goto unlock;
7577 }
7578
7579 /* If the connection parameters don't exist for this device,
7580 * they will be created and configured with defaults.
7581 */
7582 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7583 auto_conn) < 0) {
7584 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7585 MGMT_STATUS_FAILED, &cp->addr,
7586 sizeof(cp->addr));
7587 goto unlock;
7588 } else {
7589 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7590 addr_type);
7591 if (params)
7592 current_flags = params->flags;
7593 }
7594
7595 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
7596 if (!cmd) {
7597 err = -ENOMEM;
7598 goto unlock;
7599 }
7600
7601 err = hci_cmd_sync_queue(hdev, add_device_sync, cmd,
7602 add_device_complete);
7603 if (err < 0) {
7604 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7605 MGMT_STATUS_FAILED, &cp->addr,
7606 sizeof(cp->addr));
7607 mgmt_pending_free(cmd);
7608 }
7609
7610 goto unlock;
7611
7612 added:
7613 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7614 supported_flags = hdev->conn_flags;
7615 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7616 supported_flags, current_flags);
7617
7618 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7619 MGMT_STATUS_SUCCESS, &cp->addr,
7620 sizeof(cp->addr));
7621
7622 unlock:
7623 hci_dev_unlock(hdev);
7624 return err;
7625 }
7626
device_removed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)7627 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7628 bdaddr_t *bdaddr, u8 type)
7629 {
7630 struct mgmt_ev_device_removed ev;
7631
7632 bacpy(&ev.addr.bdaddr, bdaddr);
7633 ev.addr.type = type;
7634
7635 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7636 }
7637
remove_device_sync(struct hci_dev * hdev,void * data)7638 static int remove_device_sync(struct hci_dev *hdev, void *data)
7639 {
7640 return hci_update_passive_scan_sync(hdev);
7641 }
7642
remove_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7643 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7644 void *data, u16 len)
7645 {
7646 struct mgmt_cp_remove_device *cp = data;
7647 int err;
7648
7649 bt_dev_dbg(hdev, "sock %p", sk);
7650
7651 hci_dev_lock(hdev);
7652
7653 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7654 struct hci_conn_params *params;
7655 u8 addr_type;
7656
7657 if (!bdaddr_type_is_valid(cp->addr.type)) {
7658 err = mgmt_cmd_complete(sk, hdev->id,
7659 MGMT_OP_REMOVE_DEVICE,
7660 MGMT_STATUS_INVALID_PARAMS,
7661 &cp->addr, sizeof(cp->addr));
7662 goto unlock;
7663 }
7664
7665 if (cp->addr.type == BDADDR_BREDR) {
7666 err = hci_bdaddr_list_del(&hdev->accept_list,
7667 &cp->addr.bdaddr,
7668 cp->addr.type);
7669 if (err) {
7670 err = mgmt_cmd_complete(sk, hdev->id,
7671 MGMT_OP_REMOVE_DEVICE,
7672 MGMT_STATUS_INVALID_PARAMS,
7673 &cp->addr,
7674 sizeof(cp->addr));
7675 goto unlock;
7676 }
7677
7678 hci_update_scan(hdev);
7679
7680 device_removed(sk, hdev, &cp->addr.bdaddr,
7681 cp->addr.type);
7682 goto complete;
7683 }
7684
7685 addr_type = le_addr_type(cp->addr.type);
7686
7687 /* Kernel internally uses conn_params with resolvable private
7688 * address, but Remove Device allows only identity addresses.
7689 * Make sure it is enforced before calling
7690 * hci_conn_params_lookup.
7691 */
7692 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7693 err = mgmt_cmd_complete(sk, hdev->id,
7694 MGMT_OP_REMOVE_DEVICE,
7695 MGMT_STATUS_INVALID_PARAMS,
7696 &cp->addr, sizeof(cp->addr));
7697 goto unlock;
7698 }
7699
7700 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7701 addr_type);
7702 if (!params) {
7703 err = mgmt_cmd_complete(sk, hdev->id,
7704 MGMT_OP_REMOVE_DEVICE,
7705 MGMT_STATUS_INVALID_PARAMS,
7706 &cp->addr, sizeof(cp->addr));
7707 goto unlock;
7708 }
7709
7710 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7711 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7712 err = mgmt_cmd_complete(sk, hdev->id,
7713 MGMT_OP_REMOVE_DEVICE,
7714 MGMT_STATUS_INVALID_PARAMS,
7715 &cp->addr, sizeof(cp->addr));
7716 goto unlock;
7717 }
7718
7719 hci_conn_params_free(params);
7720
7721 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7722 } else {
7723 struct hci_conn_params *p, *tmp;
7724 struct bdaddr_list *b, *btmp;
7725
7726 if (cp->addr.type) {
7727 err = mgmt_cmd_complete(sk, hdev->id,
7728 MGMT_OP_REMOVE_DEVICE,
7729 MGMT_STATUS_INVALID_PARAMS,
7730 &cp->addr, sizeof(cp->addr));
7731 goto unlock;
7732 }
7733
7734 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7735 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7736 list_del(&b->list);
7737 kfree(b);
7738 }
7739
7740 hci_update_scan(hdev);
7741
7742 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7743 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7744 continue;
7745 device_removed(sk, hdev, &p->addr, p->addr_type);
7746 if (p->explicit_connect) {
7747 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7748 continue;
7749 }
7750 hci_conn_params_free(p);
7751 }
7752
7753 bt_dev_dbg(hdev, "All LE connection parameters were removed");
7754 }
7755
7756 hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7757
7758 complete:
7759 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7760 MGMT_STATUS_SUCCESS, &cp->addr,
7761 sizeof(cp->addr));
7762 unlock:
7763 hci_dev_unlock(hdev);
7764 return err;
7765 }
7766
conn_update_sync(struct hci_dev * hdev,void * data)7767 static int conn_update_sync(struct hci_dev *hdev, void *data)
7768 {
7769 struct hci_conn_params *params = data;
7770 struct hci_conn *conn;
7771
7772 conn = hci_conn_hash_lookup_le(hdev, ¶ms->addr, params->addr_type);
7773 if (!conn)
7774 return -ECANCELED;
7775
7776 return hci_le_conn_update_sync(hdev, conn, params);
7777 }
7778
load_conn_param(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7779 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7780 u16 len)
7781 {
7782 struct mgmt_cp_load_conn_param *cp = data;
7783 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7784 sizeof(struct mgmt_conn_param));
7785 u16 param_count, expected_len;
7786 int i;
7787
7788 if (!lmp_le_capable(hdev))
7789 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7790 MGMT_STATUS_NOT_SUPPORTED);
7791
7792 param_count = __le16_to_cpu(cp->param_count);
7793 if (param_count > max_param_count) {
7794 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7795 param_count);
7796 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7797 MGMT_STATUS_INVALID_PARAMS);
7798 }
7799
7800 expected_len = struct_size(cp, params, param_count);
7801 if (expected_len != len) {
7802 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7803 expected_len, len);
7804 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7805 MGMT_STATUS_INVALID_PARAMS);
7806 }
7807
7808 bt_dev_dbg(hdev, "param_count %u", param_count);
7809
7810 hci_dev_lock(hdev);
7811
7812 if (param_count > 1)
7813 hci_conn_params_clear_disabled(hdev);
7814
7815 for (i = 0; i < param_count; i++) {
7816 struct mgmt_conn_param *param = &cp->params[i];
7817 struct hci_conn_params *hci_param;
7818 u16 min, max, latency, timeout;
7819 bool update = false;
7820 u8 addr_type;
7821
7822 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
7823 param->addr.type);
7824
7825 if (param->addr.type == BDADDR_LE_PUBLIC) {
7826 addr_type = ADDR_LE_DEV_PUBLIC;
7827 } else if (param->addr.type == BDADDR_LE_RANDOM) {
7828 addr_type = ADDR_LE_DEV_RANDOM;
7829 } else {
7830 bt_dev_err(hdev, "ignoring invalid connection parameters");
7831 continue;
7832 }
7833
7834 min = le16_to_cpu(param->min_interval);
7835 max = le16_to_cpu(param->max_interval);
7836 latency = le16_to_cpu(param->latency);
7837 timeout = le16_to_cpu(param->timeout);
7838
7839 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7840 min, max, latency, timeout);
7841
7842 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7843 bt_dev_err(hdev, "ignoring invalid connection parameters");
7844 continue;
7845 }
7846
7847 /* Detect when the loading is for an existing parameter then
7848 * attempt to trigger the connection update procedure.
7849 */
7850 if (!i && param_count == 1) {
7851 hci_param = hci_conn_params_lookup(hdev,
7852 ¶m->addr.bdaddr,
7853 addr_type);
7854 if (hci_param)
7855 update = true;
7856 else
7857 hci_conn_params_clear_disabled(hdev);
7858 }
7859
7860 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
7861 addr_type);
7862 if (!hci_param) {
7863 bt_dev_err(hdev, "failed to add connection parameters");
7864 continue;
7865 }
7866
7867 hci_param->conn_min_interval = min;
7868 hci_param->conn_max_interval = max;
7869 hci_param->conn_latency = latency;
7870 hci_param->supervision_timeout = timeout;
7871
7872 /* Check if we need to trigger a connection update */
7873 if (update) {
7874 struct hci_conn *conn;
7875
7876 /* Lookup for existing connection as central and check
7877 * if parameters match and if they don't then trigger
7878 * a connection update.
7879 */
7880 conn = hci_conn_hash_lookup_le(hdev, &hci_param->addr,
7881 addr_type);
7882 if (conn && conn->role == HCI_ROLE_MASTER &&
7883 (conn->le_conn_min_interval != min ||
7884 conn->le_conn_max_interval != max ||
7885 conn->le_conn_latency != latency ||
7886 conn->le_supv_timeout != timeout))
7887 hci_cmd_sync_queue(hdev, conn_update_sync,
7888 hci_param, NULL);
7889 }
7890 }
7891
7892 hci_dev_unlock(hdev);
7893
7894 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7895 NULL, 0);
7896 }
7897
set_external_config(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7898 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7899 void *data, u16 len)
7900 {
7901 struct mgmt_cp_set_external_config *cp = data;
7902 bool changed;
7903 int err;
7904
7905 bt_dev_dbg(hdev, "sock %p", sk);
7906
7907 if (hdev_is_powered(hdev))
7908 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7909 MGMT_STATUS_REJECTED);
7910
7911 if (cp->config != 0x00 && cp->config != 0x01)
7912 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7913 MGMT_STATUS_INVALID_PARAMS);
7914
7915 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7916 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7917 MGMT_STATUS_NOT_SUPPORTED);
7918
7919 hci_dev_lock(hdev);
7920
7921 if (cp->config)
7922 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7923 else
7924 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7925
7926 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7927 if (err < 0)
7928 goto unlock;
7929
7930 if (!changed)
7931 goto unlock;
7932
7933 err = new_options(hdev, sk);
7934
7935 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7936 mgmt_index_removed(hdev);
7937
7938 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7939 hci_dev_set_flag(hdev, HCI_CONFIG);
7940 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7941
7942 queue_work(hdev->req_workqueue, &hdev->power_on);
7943 } else {
7944 set_bit(HCI_RAW, &hdev->flags);
7945 mgmt_index_added(hdev);
7946 }
7947 }
7948
7949 unlock:
7950 hci_dev_unlock(hdev);
7951 return err;
7952 }
7953
set_public_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7954 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
7955 void *data, u16 len)
7956 {
7957 struct mgmt_cp_set_public_address *cp = data;
7958 bool changed;
7959 int err;
7960
7961 bt_dev_dbg(hdev, "sock %p", sk);
7962
7963 if (hdev_is_powered(hdev))
7964 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7965 MGMT_STATUS_REJECTED);
7966
7967 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
7968 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7969 MGMT_STATUS_INVALID_PARAMS);
7970
7971 if (!hdev->set_bdaddr)
7972 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7973 MGMT_STATUS_NOT_SUPPORTED);
7974
7975 hci_dev_lock(hdev);
7976
7977 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
7978 bacpy(&hdev->public_addr, &cp->bdaddr);
7979
7980 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
7981 if (err < 0)
7982 goto unlock;
7983
7984 if (!changed)
7985 goto unlock;
7986
7987 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
7988 err = new_options(hdev, sk);
7989
7990 if (is_configured(hdev)) {
7991 mgmt_index_removed(hdev);
7992
7993 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
7994
7995 hci_dev_set_flag(hdev, HCI_CONFIG);
7996 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7997
7998 queue_work(hdev->req_workqueue, &hdev->power_on);
7999 }
8000
8001 unlock:
8002 hci_dev_unlock(hdev);
8003 return err;
8004 }
8005
read_local_oob_ext_data_complete(struct hci_dev * hdev,void * data,int err)8006 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
8007 int err)
8008 {
8009 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8010 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8011 u8 *h192, *r192, *h256, *r256;
8012 struct mgmt_pending_cmd *cmd = data;
8013 struct sk_buff *skb = cmd->skb;
8014 u8 status = mgmt_status(err);
8015 u16 eir_len;
8016
8017 if (err == -ECANCELED ||
8018 cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
8019 return;
8020
8021 if (!status) {
8022 if (!skb)
8023 status = MGMT_STATUS_FAILED;
8024 else if (IS_ERR(skb))
8025 status = mgmt_status(PTR_ERR(skb));
8026 else
8027 status = mgmt_status(skb->data[0]);
8028 }
8029
8030 bt_dev_dbg(hdev, "status %u", status);
8031
8032 mgmt_cp = cmd->param;
8033
8034 if (status) {
8035 status = mgmt_status(status);
8036 eir_len = 0;
8037
8038 h192 = NULL;
8039 r192 = NULL;
8040 h256 = NULL;
8041 r256 = NULL;
8042 } else if (!bredr_sc_enabled(hdev)) {
8043 struct hci_rp_read_local_oob_data *rp;
8044
8045 if (skb->len != sizeof(*rp)) {
8046 status = MGMT_STATUS_FAILED;
8047 eir_len = 0;
8048 } else {
8049 status = MGMT_STATUS_SUCCESS;
8050 rp = (void *)skb->data;
8051
8052 eir_len = 5 + 18 + 18;
8053 h192 = rp->hash;
8054 r192 = rp->rand;
8055 h256 = NULL;
8056 r256 = NULL;
8057 }
8058 } else {
8059 struct hci_rp_read_local_oob_ext_data *rp;
8060
8061 if (skb->len != sizeof(*rp)) {
8062 status = MGMT_STATUS_FAILED;
8063 eir_len = 0;
8064 } else {
8065 status = MGMT_STATUS_SUCCESS;
8066 rp = (void *)skb->data;
8067
8068 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8069 eir_len = 5 + 18 + 18;
8070 h192 = NULL;
8071 r192 = NULL;
8072 } else {
8073 eir_len = 5 + 18 + 18 + 18 + 18;
8074 h192 = rp->hash192;
8075 r192 = rp->rand192;
8076 }
8077
8078 h256 = rp->hash256;
8079 r256 = rp->rand256;
8080 }
8081 }
8082
8083 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8084 if (!mgmt_rp)
8085 goto done;
8086
8087 if (eir_len == 0)
8088 goto send_rsp;
8089
8090 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8091 hdev->dev_class, 3);
8092
8093 if (h192 && r192) {
8094 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8095 EIR_SSP_HASH_C192, h192, 16);
8096 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8097 EIR_SSP_RAND_R192, r192, 16);
8098 }
8099
8100 if (h256 && r256) {
8101 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8102 EIR_SSP_HASH_C256, h256, 16);
8103 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8104 EIR_SSP_RAND_R256, r256, 16);
8105 }
8106
8107 send_rsp:
8108 mgmt_rp->type = mgmt_cp->type;
8109 mgmt_rp->eir_len = cpu_to_le16(eir_len);
8110
8111 err = mgmt_cmd_complete(cmd->sk, hdev->id,
8112 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8113 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8114 if (err < 0 || status)
8115 goto done;
8116
8117 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8118
8119 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8120 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8121 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8122 done:
8123 if (skb && !IS_ERR(skb))
8124 kfree_skb(skb);
8125
8126 kfree(mgmt_rp);
8127 mgmt_pending_remove(cmd);
8128 }
8129
read_local_ssp_oob_req(struct hci_dev * hdev,struct sock * sk,struct mgmt_cp_read_local_oob_ext_data * cp)8130 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8131 struct mgmt_cp_read_local_oob_ext_data *cp)
8132 {
8133 struct mgmt_pending_cmd *cmd;
8134 int err;
8135
8136 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8137 cp, sizeof(*cp));
8138 if (!cmd)
8139 return -ENOMEM;
8140
8141 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
8142 read_local_oob_ext_data_complete);
8143
8144 if (err < 0) {
8145 mgmt_pending_remove(cmd);
8146 return err;
8147 }
8148
8149 return 0;
8150 }
8151
read_local_oob_ext_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8152 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8153 void *data, u16 data_len)
8154 {
8155 struct mgmt_cp_read_local_oob_ext_data *cp = data;
8156 struct mgmt_rp_read_local_oob_ext_data *rp;
8157 size_t rp_len;
8158 u16 eir_len;
8159 u8 status, flags, role, addr[7], hash[16], rand[16];
8160 int err;
8161
8162 bt_dev_dbg(hdev, "sock %p", sk);
8163
8164 if (hdev_is_powered(hdev)) {
8165 switch (cp->type) {
8166 case BIT(BDADDR_BREDR):
8167 status = mgmt_bredr_support(hdev);
8168 if (status)
8169 eir_len = 0;
8170 else
8171 eir_len = 5;
8172 break;
8173 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8174 status = mgmt_le_support(hdev);
8175 if (status)
8176 eir_len = 0;
8177 else
8178 eir_len = 9 + 3 + 18 + 18 + 3;
8179 break;
8180 default:
8181 status = MGMT_STATUS_INVALID_PARAMS;
8182 eir_len = 0;
8183 break;
8184 }
8185 } else {
8186 status = MGMT_STATUS_NOT_POWERED;
8187 eir_len = 0;
8188 }
8189
8190 rp_len = sizeof(*rp) + eir_len;
8191 rp = kmalloc(rp_len, GFP_ATOMIC);
8192 if (!rp)
8193 return -ENOMEM;
8194
8195 if (!status && !lmp_ssp_capable(hdev)) {
8196 status = MGMT_STATUS_NOT_SUPPORTED;
8197 eir_len = 0;
8198 }
8199
8200 if (status)
8201 goto complete;
8202
8203 hci_dev_lock(hdev);
8204
8205 eir_len = 0;
8206 switch (cp->type) {
8207 case BIT(BDADDR_BREDR):
8208 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8209 err = read_local_ssp_oob_req(hdev, sk, cp);
8210 hci_dev_unlock(hdev);
8211 if (!err)
8212 goto done;
8213
8214 status = MGMT_STATUS_FAILED;
8215 goto complete;
8216 } else {
8217 eir_len = eir_append_data(rp->eir, eir_len,
8218 EIR_CLASS_OF_DEV,
8219 hdev->dev_class, 3);
8220 }
8221 break;
8222 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8223 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8224 smp_generate_oob(hdev, hash, rand) < 0) {
8225 hci_dev_unlock(hdev);
8226 status = MGMT_STATUS_FAILED;
8227 goto complete;
8228 }
8229
8230 /* This should return the active RPA, but since the RPA
8231 * is only programmed on demand, it is really hard to fill
8232 * this in at the moment. For now disallow retrieving
8233 * local out-of-band data when privacy is in use.
8234 *
8235 * Returning the identity address will not help here since
8236 * pairing happens before the identity resolving key is
8237 * known and thus the connection establishment happens
8238 * based on the RPA and not the identity address.
8239 */
8240 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8241 hci_dev_unlock(hdev);
8242 status = MGMT_STATUS_REJECTED;
8243 goto complete;
8244 }
8245
8246 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8247 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8248 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8249 bacmp(&hdev->static_addr, BDADDR_ANY))) {
8250 memcpy(addr, &hdev->static_addr, 6);
8251 addr[6] = 0x01;
8252 } else {
8253 memcpy(addr, &hdev->bdaddr, 6);
8254 addr[6] = 0x00;
8255 }
8256
8257 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8258 addr, sizeof(addr));
8259
8260 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8261 role = 0x02;
8262 else
8263 role = 0x01;
8264
8265 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8266 &role, sizeof(role));
8267
8268 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8269 eir_len = eir_append_data(rp->eir, eir_len,
8270 EIR_LE_SC_CONFIRM,
8271 hash, sizeof(hash));
8272
8273 eir_len = eir_append_data(rp->eir, eir_len,
8274 EIR_LE_SC_RANDOM,
8275 rand, sizeof(rand));
8276 }
8277
8278 flags = mgmt_get_adv_discov_flags(hdev);
8279
8280 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8281 flags |= LE_AD_NO_BREDR;
8282
8283 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8284 &flags, sizeof(flags));
8285 break;
8286 }
8287
8288 hci_dev_unlock(hdev);
8289
8290 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8291
8292 status = MGMT_STATUS_SUCCESS;
8293
8294 complete:
8295 rp->type = cp->type;
8296 rp->eir_len = cpu_to_le16(eir_len);
8297
8298 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8299 status, rp, sizeof(*rp) + eir_len);
8300 if (err < 0 || status)
8301 goto done;
8302
8303 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8304 rp, sizeof(*rp) + eir_len,
8305 HCI_MGMT_OOB_DATA_EVENTS, sk);
8306
8307 done:
8308 kfree(rp);
8309
8310 return err;
8311 }
8312
get_supported_adv_flags(struct hci_dev * hdev)8313 static u32 get_supported_adv_flags(struct hci_dev *hdev)
8314 {
8315 u32 flags = 0;
8316
8317 flags |= MGMT_ADV_FLAG_CONNECTABLE;
8318 flags |= MGMT_ADV_FLAG_DISCOV;
8319 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8320 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8321 flags |= MGMT_ADV_FLAG_APPEARANCE;
8322 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8323 flags |= MGMT_ADV_PARAM_DURATION;
8324 flags |= MGMT_ADV_PARAM_TIMEOUT;
8325 flags |= MGMT_ADV_PARAM_INTERVALS;
8326 flags |= MGMT_ADV_PARAM_TX_POWER;
8327 flags |= MGMT_ADV_PARAM_SCAN_RSP;
8328
8329 /* In extended adv TX_POWER returned from Set Adv Param
8330 * will be always valid.
8331 */
8332 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
8333 flags |= MGMT_ADV_FLAG_TX_POWER;
8334
8335 if (ext_adv_capable(hdev)) {
8336 flags |= MGMT_ADV_FLAG_SEC_1M;
8337 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8338 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8339
8340 if (le_2m_capable(hdev))
8341 flags |= MGMT_ADV_FLAG_SEC_2M;
8342
8343 if (le_coded_capable(hdev))
8344 flags |= MGMT_ADV_FLAG_SEC_CODED;
8345 }
8346
8347 return flags;
8348 }
8349
read_adv_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8350 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8351 void *data, u16 data_len)
8352 {
8353 struct mgmt_rp_read_adv_features *rp;
8354 size_t rp_len;
8355 int err;
8356 struct adv_info *adv_instance;
8357 u32 supported_flags;
8358 u8 *instance;
8359
8360 bt_dev_dbg(hdev, "sock %p", sk);
8361
8362 if (!lmp_le_capable(hdev))
8363 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8364 MGMT_STATUS_REJECTED);
8365
8366 hci_dev_lock(hdev);
8367
8368 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8369 rp = kmalloc(rp_len, GFP_ATOMIC);
8370 if (!rp) {
8371 hci_dev_unlock(hdev);
8372 return -ENOMEM;
8373 }
8374
8375 supported_flags = get_supported_adv_flags(hdev);
8376
8377 rp->supported_flags = cpu_to_le32(supported_flags);
8378 rp->max_adv_data_len = max_adv_len(hdev);
8379 rp->max_scan_rsp_len = max_adv_len(hdev);
8380 rp->max_instances = hdev->le_num_of_adv_sets;
8381 rp->num_instances = hdev->adv_instance_cnt;
8382
8383 instance = rp->instance;
8384 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8385 /* Only instances 1-le_num_of_adv_sets are externally visible */
8386 if (adv_instance->instance <= hdev->adv_instance_cnt) {
8387 *instance = adv_instance->instance;
8388 instance++;
8389 } else {
8390 rp->num_instances--;
8391 rp_len--;
8392 }
8393 }
8394
8395 hci_dev_unlock(hdev);
8396
8397 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8398 MGMT_STATUS_SUCCESS, rp, rp_len);
8399
8400 kfree(rp);
8401
8402 return err;
8403 }
8404
calculate_name_len(struct hci_dev * hdev)8405 static u8 calculate_name_len(struct hci_dev *hdev)
8406 {
8407 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 2]; /* len + type + name */
8408
8409 return eir_append_local_name(hdev, buf, 0);
8410 }
8411
tlv_data_max_len(struct hci_dev * hdev,u32 adv_flags,bool is_adv_data)8412 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8413 bool is_adv_data)
8414 {
8415 u8 max_len = max_adv_len(hdev);
8416
8417 if (is_adv_data) {
8418 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8419 MGMT_ADV_FLAG_LIMITED_DISCOV |
8420 MGMT_ADV_FLAG_MANAGED_FLAGS))
8421 max_len -= 3;
8422
8423 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8424 max_len -= 3;
8425 } else {
8426 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8427 max_len -= calculate_name_len(hdev);
8428
8429 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8430 max_len -= 4;
8431 }
8432
8433 return max_len;
8434 }
8435
flags_managed(u32 adv_flags)8436 static bool flags_managed(u32 adv_flags)
8437 {
8438 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8439 MGMT_ADV_FLAG_LIMITED_DISCOV |
8440 MGMT_ADV_FLAG_MANAGED_FLAGS);
8441 }
8442
tx_power_managed(u32 adv_flags)8443 static bool tx_power_managed(u32 adv_flags)
8444 {
8445 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8446 }
8447
name_managed(u32 adv_flags)8448 static bool name_managed(u32 adv_flags)
8449 {
8450 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8451 }
8452
appearance_managed(u32 adv_flags)8453 static bool appearance_managed(u32 adv_flags)
8454 {
8455 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8456 }
8457
tlv_data_is_valid(struct hci_dev * hdev,u32 adv_flags,u8 * data,u8 len,bool is_adv_data)8458 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8459 u8 len, bool is_adv_data)
8460 {
8461 int i, cur_len;
8462 u8 max_len;
8463
8464 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8465
8466 if (len > max_len)
8467 return false;
8468
8469 /* Make sure that the data is correctly formatted. */
8470 for (i = 0; i < len; i += (cur_len + 1)) {
8471 cur_len = data[i];
8472
8473 if (!cur_len)
8474 continue;
8475
8476 if (data[i + 1] == EIR_FLAGS &&
8477 (!is_adv_data || flags_managed(adv_flags)))
8478 return false;
8479
8480 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8481 return false;
8482
8483 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8484 return false;
8485
8486 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8487 return false;
8488
8489 if (data[i + 1] == EIR_APPEARANCE &&
8490 appearance_managed(adv_flags))
8491 return false;
8492
8493 /* If the current field length would exceed the total data
8494 * length, then it's invalid.
8495 */
8496 if (i + cur_len >= len)
8497 return false;
8498 }
8499
8500 return true;
8501 }
8502
requested_adv_flags_are_valid(struct hci_dev * hdev,u32 adv_flags)8503 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8504 {
8505 u32 supported_flags, phy_flags;
8506
8507 /* The current implementation only supports a subset of the specified
8508 * flags. Also need to check mutual exclusiveness of sec flags.
8509 */
8510 supported_flags = get_supported_adv_flags(hdev);
8511 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8512 if (adv_flags & ~supported_flags ||
8513 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8514 return false;
8515
8516 return true;
8517 }
8518
adv_busy(struct hci_dev * hdev)8519 static bool adv_busy(struct hci_dev *hdev)
8520 {
8521 return pending_find(MGMT_OP_SET_LE, hdev);
8522 }
8523
add_adv_complete(struct hci_dev * hdev,struct sock * sk,u8 instance,int err)8524 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8525 int err)
8526 {
8527 struct adv_info *adv, *n;
8528
8529 bt_dev_dbg(hdev, "err %d", err);
8530
8531 hci_dev_lock(hdev);
8532
8533 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8534 u8 instance;
8535
8536 if (!adv->pending)
8537 continue;
8538
8539 if (!err) {
8540 adv->pending = false;
8541 continue;
8542 }
8543
8544 instance = adv->instance;
8545
8546 if (hdev->cur_adv_instance == instance)
8547 cancel_adv_timeout(hdev);
8548
8549 hci_remove_adv_instance(hdev, instance);
8550 mgmt_advertising_removed(sk, hdev, instance);
8551 }
8552
8553 hci_dev_unlock(hdev);
8554 }
8555
add_advertising_complete(struct hci_dev * hdev,void * data,int err)8556 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8557 {
8558 struct mgmt_pending_cmd *cmd = data;
8559 struct mgmt_cp_add_advertising *cp = cmd->param;
8560 struct mgmt_rp_add_advertising rp;
8561
8562 memset(&rp, 0, sizeof(rp));
8563
8564 rp.instance = cp->instance;
8565
8566 if (err)
8567 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
8568 mgmt_status(err));
8569 else
8570 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
8571 mgmt_status(err), &rp, sizeof(rp));
8572
8573 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8574
8575 mgmt_pending_free(cmd);
8576 }
8577
add_advertising_sync(struct hci_dev * hdev,void * data)8578 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8579 {
8580 struct mgmt_pending_cmd *cmd = data;
8581 struct mgmt_cp_add_advertising *cp = cmd->param;
8582
8583 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8584 }
8585
add_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8586 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8587 void *data, u16 data_len)
8588 {
8589 struct mgmt_cp_add_advertising *cp = data;
8590 struct mgmt_rp_add_advertising rp;
8591 u32 flags;
8592 u8 status;
8593 u16 timeout, duration;
8594 unsigned int prev_instance_cnt;
8595 u8 schedule_instance = 0;
8596 struct adv_info *adv, *next_instance;
8597 int err;
8598 struct mgmt_pending_cmd *cmd;
8599
8600 bt_dev_dbg(hdev, "sock %p", sk);
8601
8602 status = mgmt_le_support(hdev);
8603 if (status)
8604 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8605 status);
8606
8607 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8608 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8609 MGMT_STATUS_INVALID_PARAMS);
8610
8611 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8612 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8613 MGMT_STATUS_INVALID_PARAMS);
8614
8615 flags = __le32_to_cpu(cp->flags);
8616 timeout = __le16_to_cpu(cp->timeout);
8617 duration = __le16_to_cpu(cp->duration);
8618
8619 if (!requested_adv_flags_are_valid(hdev, flags))
8620 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8621 MGMT_STATUS_INVALID_PARAMS);
8622
8623 hci_dev_lock(hdev);
8624
8625 if (timeout && !hdev_is_powered(hdev)) {
8626 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8627 MGMT_STATUS_REJECTED);
8628 goto unlock;
8629 }
8630
8631 if (adv_busy(hdev)) {
8632 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8633 MGMT_STATUS_BUSY);
8634 goto unlock;
8635 }
8636
8637 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8638 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8639 cp->scan_rsp_len, false)) {
8640 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8641 MGMT_STATUS_INVALID_PARAMS);
8642 goto unlock;
8643 }
8644
8645 prev_instance_cnt = hdev->adv_instance_cnt;
8646
8647 adv = hci_add_adv_instance(hdev, cp->instance, flags,
8648 cp->adv_data_len, cp->data,
8649 cp->scan_rsp_len,
8650 cp->data + cp->adv_data_len,
8651 timeout, duration,
8652 HCI_ADV_TX_POWER_NO_PREFERENCE,
8653 hdev->le_adv_min_interval,
8654 hdev->le_adv_max_interval, 0);
8655 if (IS_ERR(adv)) {
8656 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8657 MGMT_STATUS_FAILED);
8658 goto unlock;
8659 }
8660
8661 /* Only trigger an advertising added event if a new instance was
8662 * actually added.
8663 */
8664 if (hdev->adv_instance_cnt > prev_instance_cnt)
8665 mgmt_advertising_added(sk, hdev, cp->instance);
8666
8667 if (hdev->cur_adv_instance == cp->instance) {
8668 /* If the currently advertised instance is being changed then
8669 * cancel the current advertising and schedule the next
8670 * instance. If there is only one instance then the overridden
8671 * advertising data will be visible right away.
8672 */
8673 cancel_adv_timeout(hdev);
8674
8675 next_instance = hci_get_next_instance(hdev, cp->instance);
8676 if (next_instance)
8677 schedule_instance = next_instance->instance;
8678 } else if (!hdev->adv_instance_timeout) {
8679 /* Immediately advertise the new instance if no other
8680 * instance is currently being advertised.
8681 */
8682 schedule_instance = cp->instance;
8683 }
8684
8685 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
8686 * there is no instance to be advertised then we have no HCI
8687 * communication to make. Simply return.
8688 */
8689 if (!hdev_is_powered(hdev) ||
8690 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8691 !schedule_instance) {
8692 rp.instance = cp->instance;
8693 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8694 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8695 goto unlock;
8696 }
8697
8698 /* We're good to go, update advertising data, parameters, and start
8699 * advertising.
8700 */
8701 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8702 data_len);
8703 if (!cmd) {
8704 err = -ENOMEM;
8705 goto unlock;
8706 }
8707
8708 cp->instance = schedule_instance;
8709
8710 err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8711 add_advertising_complete);
8712 if (err < 0)
8713 mgmt_pending_free(cmd);
8714
8715 unlock:
8716 hci_dev_unlock(hdev);
8717
8718 return err;
8719 }
8720
add_ext_adv_params_complete(struct hci_dev * hdev,void * data,int err)8721 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8722 int err)
8723 {
8724 struct mgmt_pending_cmd *cmd = data;
8725 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8726 struct mgmt_rp_add_ext_adv_params rp;
8727 struct adv_info *adv;
8728 u32 flags;
8729
8730 BT_DBG("%s", hdev->name);
8731
8732 hci_dev_lock(hdev);
8733
8734 adv = hci_find_adv_instance(hdev, cp->instance);
8735 if (!adv)
8736 goto unlock;
8737
8738 rp.instance = cp->instance;
8739 rp.tx_power = adv->tx_power;
8740
8741 /* While we're at it, inform userspace of the available space for this
8742 * advertisement, given the flags that will be used.
8743 */
8744 flags = __le32_to_cpu(cp->flags);
8745 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8746 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8747
8748 if (err) {
8749 /* If this advertisement was previously advertising and we
8750 * failed to update it, we signal that it has been removed and
8751 * delete its structure
8752 */
8753 if (!adv->pending)
8754 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8755
8756 hci_remove_adv_instance(hdev, cp->instance);
8757
8758 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
8759 mgmt_status(err));
8760 } else {
8761 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
8762 mgmt_status(err), &rp, sizeof(rp));
8763 }
8764
8765 unlock:
8766 mgmt_pending_free(cmd);
8767
8768 hci_dev_unlock(hdev);
8769 }
8770
add_ext_adv_params_sync(struct hci_dev * hdev,void * data)8771 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8772 {
8773 struct mgmt_pending_cmd *cmd = data;
8774 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8775
8776 return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8777 }
8778
add_ext_adv_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8779 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8780 void *data, u16 data_len)
8781 {
8782 struct mgmt_cp_add_ext_adv_params *cp = data;
8783 struct mgmt_rp_add_ext_adv_params rp;
8784 struct mgmt_pending_cmd *cmd = NULL;
8785 struct adv_info *adv;
8786 u32 flags, min_interval, max_interval;
8787 u16 timeout, duration;
8788 u8 status;
8789 s8 tx_power;
8790 int err;
8791
8792 BT_DBG("%s", hdev->name);
8793
8794 status = mgmt_le_support(hdev);
8795 if (status)
8796 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8797 status);
8798
8799 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8800 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8801 MGMT_STATUS_INVALID_PARAMS);
8802
8803 /* The purpose of breaking add_advertising into two separate MGMT calls
8804 * for params and data is to allow more parameters to be added to this
8805 * structure in the future. For this reason, we verify that we have the
8806 * bare minimum structure we know of when the interface was defined. Any
8807 * extra parameters we don't know about will be ignored in this request.
8808 */
8809 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8810 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8811 MGMT_STATUS_INVALID_PARAMS);
8812
8813 flags = __le32_to_cpu(cp->flags);
8814
8815 if (!requested_adv_flags_are_valid(hdev, flags))
8816 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8817 MGMT_STATUS_INVALID_PARAMS);
8818
8819 hci_dev_lock(hdev);
8820
8821 /* In new interface, we require that we are powered to register */
8822 if (!hdev_is_powered(hdev)) {
8823 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8824 MGMT_STATUS_REJECTED);
8825 goto unlock;
8826 }
8827
8828 if (adv_busy(hdev)) {
8829 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8830 MGMT_STATUS_BUSY);
8831 goto unlock;
8832 }
8833
8834 /* Parse defined parameters from request, use defaults otherwise */
8835 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8836 __le16_to_cpu(cp->timeout) : 0;
8837
8838 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8839 __le16_to_cpu(cp->duration) :
8840 hdev->def_multi_adv_rotation_duration;
8841
8842 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8843 __le32_to_cpu(cp->min_interval) :
8844 hdev->le_adv_min_interval;
8845
8846 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8847 __le32_to_cpu(cp->max_interval) :
8848 hdev->le_adv_max_interval;
8849
8850 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8851 cp->tx_power :
8852 HCI_ADV_TX_POWER_NO_PREFERENCE;
8853
8854 /* Create advertising instance with no advertising or response data */
8855 adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
8856 timeout, duration, tx_power, min_interval,
8857 max_interval, 0);
8858
8859 if (IS_ERR(adv)) {
8860 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8861 MGMT_STATUS_FAILED);
8862 goto unlock;
8863 }
8864
8865 /* Submit request for advertising params if ext adv available */
8866 if (ext_adv_capable(hdev)) {
8867 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8868 data, data_len);
8869 if (!cmd) {
8870 err = -ENOMEM;
8871 hci_remove_adv_instance(hdev, cp->instance);
8872 goto unlock;
8873 }
8874
8875 err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8876 add_ext_adv_params_complete);
8877 if (err < 0)
8878 mgmt_pending_free(cmd);
8879 } else {
8880 rp.instance = cp->instance;
8881 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8882 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8883 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8884 err = mgmt_cmd_complete(sk, hdev->id,
8885 MGMT_OP_ADD_EXT_ADV_PARAMS,
8886 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8887 }
8888
8889 unlock:
8890 hci_dev_unlock(hdev);
8891
8892 return err;
8893 }
8894
add_ext_adv_data_complete(struct hci_dev * hdev,void * data,int err)8895 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8896 {
8897 struct mgmt_pending_cmd *cmd = data;
8898 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8899 struct mgmt_rp_add_advertising rp;
8900
8901 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8902
8903 memset(&rp, 0, sizeof(rp));
8904
8905 rp.instance = cp->instance;
8906
8907 if (err)
8908 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
8909 mgmt_status(err));
8910 else
8911 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
8912 mgmt_status(err), &rp, sizeof(rp));
8913
8914 mgmt_pending_free(cmd);
8915 }
8916
add_ext_adv_data_sync(struct hci_dev * hdev,void * data)8917 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8918 {
8919 struct mgmt_pending_cmd *cmd = data;
8920 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8921 int err;
8922
8923 if (ext_adv_capable(hdev)) {
8924 err = hci_update_adv_data_sync(hdev, cp->instance);
8925 if (err)
8926 return err;
8927
8928 err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
8929 if (err)
8930 return err;
8931
8932 return hci_enable_ext_advertising_sync(hdev, cp->instance);
8933 }
8934
8935 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8936 }
8937
add_ext_adv_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8938 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8939 u16 data_len)
8940 {
8941 struct mgmt_cp_add_ext_adv_data *cp = data;
8942 struct mgmt_rp_add_ext_adv_data rp;
8943 u8 schedule_instance = 0;
8944 struct adv_info *next_instance;
8945 struct adv_info *adv_instance;
8946 int err = 0;
8947 struct mgmt_pending_cmd *cmd;
8948
8949 BT_DBG("%s", hdev->name);
8950
8951 hci_dev_lock(hdev);
8952
8953 adv_instance = hci_find_adv_instance(hdev, cp->instance);
8954
8955 if (!adv_instance) {
8956 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8957 MGMT_STATUS_INVALID_PARAMS);
8958 goto unlock;
8959 }
8960
8961 /* In new interface, we require that we are powered to register */
8962 if (!hdev_is_powered(hdev)) {
8963 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8964 MGMT_STATUS_REJECTED);
8965 goto clear_new_instance;
8966 }
8967
8968 if (adv_busy(hdev)) {
8969 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8970 MGMT_STATUS_BUSY);
8971 goto clear_new_instance;
8972 }
8973
8974 /* Validate new data */
8975 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
8976 cp->adv_data_len, true) ||
8977 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
8978 cp->adv_data_len, cp->scan_rsp_len, false)) {
8979 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8980 MGMT_STATUS_INVALID_PARAMS);
8981 goto clear_new_instance;
8982 }
8983
8984 /* Set the data in the advertising instance */
8985 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
8986 cp->data, cp->scan_rsp_len,
8987 cp->data + cp->adv_data_len);
8988
8989 /* If using software rotation, determine next instance to use */
8990 if (hdev->cur_adv_instance == cp->instance) {
8991 /* If the currently advertised instance is being changed
8992 * then cancel the current advertising and schedule the
8993 * next instance. If there is only one instance then the
8994 * overridden advertising data will be visible right
8995 * away
8996 */
8997 cancel_adv_timeout(hdev);
8998
8999 next_instance = hci_get_next_instance(hdev, cp->instance);
9000 if (next_instance)
9001 schedule_instance = next_instance->instance;
9002 } else if (!hdev->adv_instance_timeout) {
9003 /* Immediately advertise the new instance if no other
9004 * instance is currently being advertised.
9005 */
9006 schedule_instance = cp->instance;
9007 }
9008
9009 /* If the HCI_ADVERTISING flag is set or there is no instance to
9010 * be advertised then we have no HCI communication to make.
9011 * Simply return.
9012 */
9013 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
9014 if (adv_instance->pending) {
9015 mgmt_advertising_added(sk, hdev, cp->instance);
9016 adv_instance->pending = false;
9017 }
9018 rp.instance = cp->instance;
9019 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9020 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9021 goto unlock;
9022 }
9023
9024 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9025 data_len);
9026 if (!cmd) {
9027 err = -ENOMEM;
9028 goto clear_new_instance;
9029 }
9030
9031 err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
9032 add_ext_adv_data_complete);
9033 if (err < 0) {
9034 mgmt_pending_free(cmd);
9035 goto clear_new_instance;
9036 }
9037
9038 /* We were successful in updating data, so trigger advertising_added
9039 * event if this is an instance that wasn't previously advertising. If
9040 * a failure occurs in the requests we initiated, we will remove the
9041 * instance again in add_advertising_complete
9042 */
9043 if (adv_instance->pending)
9044 mgmt_advertising_added(sk, hdev, cp->instance);
9045
9046 goto unlock;
9047
9048 clear_new_instance:
9049 hci_remove_adv_instance(hdev, cp->instance);
9050
9051 unlock:
9052 hci_dev_unlock(hdev);
9053
9054 return err;
9055 }
9056
remove_advertising_complete(struct hci_dev * hdev,void * data,int err)9057 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
9058 int err)
9059 {
9060 struct mgmt_pending_cmd *cmd = data;
9061 struct mgmt_cp_remove_advertising *cp = cmd->param;
9062 struct mgmt_rp_remove_advertising rp;
9063
9064 bt_dev_dbg(hdev, "err %d", err);
9065
9066 memset(&rp, 0, sizeof(rp));
9067 rp.instance = cp->instance;
9068
9069 if (err)
9070 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
9071 mgmt_status(err));
9072 else
9073 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
9074 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9075
9076 mgmt_pending_free(cmd);
9077 }
9078
remove_advertising_sync(struct hci_dev * hdev,void * data)9079 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
9080 {
9081 struct mgmt_pending_cmd *cmd = data;
9082 struct mgmt_cp_remove_advertising *cp = cmd->param;
9083 int err;
9084
9085 err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
9086 if (err)
9087 return err;
9088
9089 if (list_empty(&hdev->adv_instances))
9090 err = hci_disable_advertising_sync(hdev);
9091
9092 return err;
9093 }
9094
remove_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9095 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9096 void *data, u16 data_len)
9097 {
9098 struct mgmt_cp_remove_advertising *cp = data;
9099 struct mgmt_pending_cmd *cmd;
9100 int err;
9101
9102 bt_dev_dbg(hdev, "sock %p", sk);
9103
9104 hci_dev_lock(hdev);
9105
9106 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
9107 err = mgmt_cmd_status(sk, hdev->id,
9108 MGMT_OP_REMOVE_ADVERTISING,
9109 MGMT_STATUS_INVALID_PARAMS);
9110 goto unlock;
9111 }
9112
9113 if (pending_find(MGMT_OP_SET_LE, hdev)) {
9114 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9115 MGMT_STATUS_BUSY);
9116 goto unlock;
9117 }
9118
9119 if (list_empty(&hdev->adv_instances)) {
9120 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9121 MGMT_STATUS_INVALID_PARAMS);
9122 goto unlock;
9123 }
9124
9125 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9126 data_len);
9127 if (!cmd) {
9128 err = -ENOMEM;
9129 goto unlock;
9130 }
9131
9132 err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
9133 remove_advertising_complete);
9134 if (err < 0)
9135 mgmt_pending_free(cmd);
9136
9137 unlock:
9138 hci_dev_unlock(hdev);
9139
9140 return err;
9141 }
9142
get_adv_size_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9143 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9144 void *data, u16 data_len)
9145 {
9146 struct mgmt_cp_get_adv_size_info *cp = data;
9147 struct mgmt_rp_get_adv_size_info rp;
9148 u32 flags, supported_flags;
9149
9150 bt_dev_dbg(hdev, "sock %p", sk);
9151
9152 if (!lmp_le_capable(hdev))
9153 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9154 MGMT_STATUS_REJECTED);
9155
9156 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9157 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9158 MGMT_STATUS_INVALID_PARAMS);
9159
9160 flags = __le32_to_cpu(cp->flags);
9161
9162 /* The current implementation only supports a subset of the specified
9163 * flags.
9164 */
9165 supported_flags = get_supported_adv_flags(hdev);
9166 if (flags & ~supported_flags)
9167 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9168 MGMT_STATUS_INVALID_PARAMS);
9169
9170 rp.instance = cp->instance;
9171 rp.flags = cp->flags;
9172 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9173 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9174
9175 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9176 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9177 }
9178
9179 static const struct hci_mgmt_handler mgmt_handlers[] = {
9180 { NULL }, /* 0x0000 (no command) */
9181 { read_version, MGMT_READ_VERSION_SIZE,
9182 HCI_MGMT_NO_HDEV |
9183 HCI_MGMT_UNTRUSTED },
9184 { read_commands, MGMT_READ_COMMANDS_SIZE,
9185 HCI_MGMT_NO_HDEV |
9186 HCI_MGMT_UNTRUSTED },
9187 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
9188 HCI_MGMT_NO_HDEV |
9189 HCI_MGMT_UNTRUSTED },
9190 { read_controller_info, MGMT_READ_INFO_SIZE,
9191 HCI_MGMT_UNTRUSTED },
9192 { set_powered, MGMT_SETTING_SIZE },
9193 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
9194 { set_connectable, MGMT_SETTING_SIZE },
9195 { set_fast_connectable, MGMT_SETTING_SIZE },
9196 { set_bondable, MGMT_SETTING_SIZE },
9197 { set_link_security, MGMT_SETTING_SIZE },
9198 { set_ssp, MGMT_SETTING_SIZE },
9199 { set_hs, MGMT_SETTING_SIZE },
9200 { set_le, MGMT_SETTING_SIZE },
9201 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
9202 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
9203 { add_uuid, MGMT_ADD_UUID_SIZE },
9204 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
9205 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
9206 HCI_MGMT_VAR_LEN },
9207 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9208 HCI_MGMT_VAR_LEN },
9209 { disconnect, MGMT_DISCONNECT_SIZE },
9210 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
9211 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
9212 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
9213 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
9214 { pair_device, MGMT_PAIR_DEVICE_SIZE },
9215 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
9216 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
9217 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
9218 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9219 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
9220 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9221 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
9222 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9223 HCI_MGMT_VAR_LEN },
9224 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9225 { start_discovery, MGMT_START_DISCOVERY_SIZE },
9226 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
9227 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
9228 { block_device, MGMT_BLOCK_DEVICE_SIZE },
9229 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
9230 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
9231 { set_advertising, MGMT_SETTING_SIZE },
9232 { set_bredr, MGMT_SETTING_SIZE },
9233 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
9234 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
9235 { set_secure_conn, MGMT_SETTING_SIZE },
9236 { set_debug_keys, MGMT_SETTING_SIZE },
9237 { set_privacy, MGMT_SET_PRIVACY_SIZE },
9238 { load_irks, MGMT_LOAD_IRKS_SIZE,
9239 HCI_MGMT_VAR_LEN },
9240 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
9241 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
9242 { add_device, MGMT_ADD_DEVICE_SIZE },
9243 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
9244 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
9245 HCI_MGMT_VAR_LEN },
9246 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9247 HCI_MGMT_NO_HDEV |
9248 HCI_MGMT_UNTRUSTED },
9249 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
9250 HCI_MGMT_UNCONFIGURED |
9251 HCI_MGMT_UNTRUSTED },
9252 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
9253 HCI_MGMT_UNCONFIGURED },
9254 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
9255 HCI_MGMT_UNCONFIGURED },
9256 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9257 HCI_MGMT_VAR_LEN },
9258 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9259 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
9260 HCI_MGMT_NO_HDEV |
9261 HCI_MGMT_UNTRUSTED },
9262 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
9263 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
9264 HCI_MGMT_VAR_LEN },
9265 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
9266 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
9267 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9268 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9269 HCI_MGMT_UNTRUSTED },
9270 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
9271 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
9272 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
9273 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9274 HCI_MGMT_VAR_LEN },
9275 { set_wideband_speech, MGMT_SETTING_SIZE },
9276 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
9277 HCI_MGMT_UNTRUSTED },
9278 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
9279 HCI_MGMT_UNTRUSTED |
9280 HCI_MGMT_HDEV_OPTIONAL },
9281 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
9282 HCI_MGMT_VAR_LEN |
9283 HCI_MGMT_HDEV_OPTIONAL },
9284 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9285 HCI_MGMT_UNTRUSTED },
9286 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9287 HCI_MGMT_VAR_LEN },
9288 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9289 HCI_MGMT_UNTRUSTED },
9290 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9291 HCI_MGMT_VAR_LEN },
9292 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
9293 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
9294 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9295 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9296 HCI_MGMT_VAR_LEN },
9297 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
9298 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
9299 HCI_MGMT_VAR_LEN },
9300 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
9301 HCI_MGMT_VAR_LEN },
9302 { add_adv_patterns_monitor_rssi,
9303 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
9304 HCI_MGMT_VAR_LEN },
9305 { set_mesh, MGMT_SET_MESH_RECEIVER_SIZE,
9306 HCI_MGMT_VAR_LEN },
9307 { mesh_features, MGMT_MESH_READ_FEATURES_SIZE },
9308 { mesh_send, MGMT_MESH_SEND_SIZE,
9309 HCI_MGMT_VAR_LEN },
9310 { mesh_send_cancel, MGMT_MESH_SEND_CANCEL_SIZE },
9311 { mgmt_hci_cmd_sync, MGMT_HCI_CMD_SYNC_SIZE, HCI_MGMT_VAR_LEN },
9312 };
9313
mgmt_index_added(struct hci_dev * hdev)9314 void mgmt_index_added(struct hci_dev *hdev)
9315 {
9316 struct mgmt_ev_ext_index ev;
9317
9318 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9319 return;
9320
9321 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9322 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0,
9323 HCI_MGMT_UNCONF_INDEX_EVENTS);
9324 ev.type = 0x01;
9325 } else {
9326 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
9327 HCI_MGMT_INDEX_EVENTS);
9328 ev.type = 0x00;
9329 }
9330
9331 ev.bus = hdev->bus;
9332
9333 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
9334 HCI_MGMT_EXT_INDEX_EVENTS);
9335 }
9336
mgmt_index_removed(struct hci_dev * hdev)9337 void mgmt_index_removed(struct hci_dev *hdev)
9338 {
9339 struct mgmt_ev_ext_index ev;
9340 struct cmd_lookup match = { NULL, hdev, MGMT_STATUS_INVALID_INDEX };
9341
9342 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9343 return;
9344
9345 mgmt_pending_foreach(0, hdev, true, cmd_complete_rsp, &match);
9346
9347 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9348 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0,
9349 HCI_MGMT_UNCONF_INDEX_EVENTS);
9350 ev.type = 0x01;
9351 } else {
9352 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
9353 HCI_MGMT_INDEX_EVENTS);
9354 ev.type = 0x00;
9355 }
9356
9357 ev.bus = hdev->bus;
9358
9359 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
9360 HCI_MGMT_EXT_INDEX_EVENTS);
9361
9362 /* Cancel any remaining timed work */
9363 if (!hci_dev_test_flag(hdev, HCI_MGMT))
9364 return;
9365 cancel_delayed_work_sync(&hdev->discov_off);
9366 cancel_delayed_work_sync(&hdev->service_cache);
9367 cancel_delayed_work_sync(&hdev->rpa_expired);
9368 }
9369
mgmt_power_on(struct hci_dev * hdev,int err)9370 void mgmt_power_on(struct hci_dev *hdev, int err)
9371 {
9372 struct cmd_lookup match = { NULL, hdev };
9373
9374 bt_dev_dbg(hdev, "err %d", err);
9375
9376 hci_dev_lock(hdev);
9377
9378 if (!err) {
9379 restart_le_actions(hdev);
9380 hci_update_passive_scan(hdev);
9381 }
9382
9383 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, true, settings_rsp,
9384 &match);
9385
9386 new_settings(hdev, match.sk);
9387
9388 if (match.sk)
9389 sock_put(match.sk);
9390
9391 hci_dev_unlock(hdev);
9392 }
9393
__mgmt_power_off(struct hci_dev * hdev)9394 void __mgmt_power_off(struct hci_dev *hdev)
9395 {
9396 struct cmd_lookup match = { NULL, hdev };
9397 u8 zero_cod[] = { 0, 0, 0 };
9398
9399 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, true, settings_rsp,
9400 &match);
9401
9402 /* If the power off is because of hdev unregistration let
9403 * use the appropriate INVALID_INDEX status. Otherwise use
9404 * NOT_POWERED. We cover both scenarios here since later in
9405 * mgmt_index_removed() any hci_conn callbacks will have already
9406 * been triggered, potentially causing misleading DISCONNECTED
9407 * status responses.
9408 */
9409 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9410 match.mgmt_status = MGMT_STATUS_INVALID_INDEX;
9411 else
9412 match.mgmt_status = MGMT_STATUS_NOT_POWERED;
9413
9414 mgmt_pending_foreach(0, hdev, true, cmd_complete_rsp, &match);
9415
9416 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9417 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9418 zero_cod, sizeof(zero_cod),
9419 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9420 ext_info_changed(hdev, NULL);
9421 }
9422
9423 new_settings(hdev, match.sk);
9424
9425 if (match.sk)
9426 sock_put(match.sk);
9427 }
9428
mgmt_set_powered_failed(struct hci_dev * hdev,int err)9429 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9430 {
9431 struct mgmt_pending_cmd *cmd;
9432 u8 status;
9433
9434 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9435 if (!cmd)
9436 return;
9437
9438 if (err == -ERFKILL)
9439 status = MGMT_STATUS_RFKILLED;
9440 else
9441 status = MGMT_STATUS_FAILED;
9442
9443 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9444
9445 mgmt_pending_remove(cmd);
9446 }
9447
mgmt_new_link_key(struct hci_dev * hdev,struct link_key * key,bool persistent)9448 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9449 bool persistent)
9450 {
9451 struct mgmt_ev_new_link_key ev;
9452
9453 memset(&ev, 0, sizeof(ev));
9454
9455 ev.store_hint = persistent;
9456 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9457 ev.key.addr.type = BDADDR_BREDR;
9458 ev.key.type = key->type;
9459 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9460 ev.key.pin_len = key->pin_len;
9461
9462 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9463 }
9464
mgmt_ltk_type(struct smp_ltk * ltk)9465 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9466 {
9467 switch (ltk->type) {
9468 case SMP_LTK:
9469 case SMP_LTK_RESPONDER:
9470 if (ltk->authenticated)
9471 return MGMT_LTK_AUTHENTICATED;
9472 return MGMT_LTK_UNAUTHENTICATED;
9473 case SMP_LTK_P256:
9474 if (ltk->authenticated)
9475 return MGMT_LTK_P256_AUTH;
9476 return MGMT_LTK_P256_UNAUTH;
9477 case SMP_LTK_P256_DEBUG:
9478 return MGMT_LTK_P256_DEBUG;
9479 }
9480
9481 return MGMT_LTK_UNAUTHENTICATED;
9482 }
9483
mgmt_new_ltk(struct hci_dev * hdev,struct smp_ltk * key,bool persistent)9484 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9485 {
9486 struct mgmt_ev_new_long_term_key ev;
9487
9488 memset(&ev, 0, sizeof(ev));
9489
9490 /* Devices using resolvable or non-resolvable random addresses
9491 * without providing an identity resolving key don't require
9492 * to store long term keys. Their addresses will change the
9493 * next time around.
9494 *
9495 * Only when a remote device provides an identity address
9496 * make sure the long term key is stored. If the remote
9497 * identity is known, the long term keys are internally
9498 * mapped to the identity address. So allow static random
9499 * and public addresses here.
9500 */
9501 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9502 (key->bdaddr.b[5] & 0xc0) != 0xc0)
9503 ev.store_hint = 0x00;
9504 else
9505 ev.store_hint = persistent;
9506
9507 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9508 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
9509 ev.key.type = mgmt_ltk_type(key);
9510 ev.key.enc_size = key->enc_size;
9511 ev.key.ediv = key->ediv;
9512 ev.key.rand = key->rand;
9513
9514 if (key->type == SMP_LTK)
9515 ev.key.initiator = 1;
9516
9517 /* Make sure we copy only the significant bytes based on the
9518 * encryption key size, and set the rest of the value to zeroes.
9519 */
9520 memcpy(ev.key.val, key->val, key->enc_size);
9521 memset(ev.key.val + key->enc_size, 0,
9522 sizeof(ev.key.val) - key->enc_size);
9523
9524 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9525 }
9526
mgmt_new_irk(struct hci_dev * hdev,struct smp_irk * irk,bool persistent)9527 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9528 {
9529 struct mgmt_ev_new_irk ev;
9530
9531 memset(&ev, 0, sizeof(ev));
9532
9533 ev.store_hint = persistent;
9534
9535 bacpy(&ev.rpa, &irk->rpa);
9536 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9537 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
9538 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9539
9540 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9541 }
9542
mgmt_new_csrk(struct hci_dev * hdev,struct smp_csrk * csrk,bool persistent)9543 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9544 bool persistent)
9545 {
9546 struct mgmt_ev_new_csrk ev;
9547
9548 memset(&ev, 0, sizeof(ev));
9549
9550 /* Devices using resolvable or non-resolvable random addresses
9551 * without providing an identity resolving key don't require
9552 * to store signature resolving keys. Their addresses will change
9553 * the next time around.
9554 *
9555 * Only when a remote device provides an identity address
9556 * make sure the signature resolving key is stored. So allow
9557 * static random and public addresses here.
9558 */
9559 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9560 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9561 ev.store_hint = 0x00;
9562 else
9563 ev.store_hint = persistent;
9564
9565 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9566 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
9567 ev.key.type = csrk->type;
9568 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9569
9570 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9571 }
9572
mgmt_new_conn_param(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u8 store_hint,u16 min_interval,u16 max_interval,u16 latency,u16 timeout)9573 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9574 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9575 u16 max_interval, u16 latency, u16 timeout)
9576 {
9577 struct mgmt_ev_new_conn_param ev;
9578
9579 if (!hci_is_identity_address(bdaddr, bdaddr_type))
9580 return;
9581
9582 memset(&ev, 0, sizeof(ev));
9583 bacpy(&ev.addr.bdaddr, bdaddr);
9584 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9585 ev.store_hint = store_hint;
9586 ev.min_interval = cpu_to_le16(min_interval);
9587 ev.max_interval = cpu_to_le16(max_interval);
9588 ev.latency = cpu_to_le16(latency);
9589 ev.timeout = cpu_to_le16(timeout);
9590
9591 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9592 }
9593
mgmt_device_connected(struct hci_dev * hdev,struct hci_conn * conn,u8 * name,u8 name_len)9594 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9595 u8 *name, u8 name_len)
9596 {
9597 struct sk_buff *skb;
9598 struct mgmt_ev_device_connected *ev;
9599 u16 eir_len = 0;
9600 u32 flags = 0;
9601
9602 if (test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
9603 return;
9604
9605 /* allocate buff for LE or BR/EDR adv */
9606 if (conn->le_adv_data_len > 0)
9607 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9608 sizeof(*ev) + conn->le_adv_data_len);
9609 else
9610 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9611 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9612 eir_precalc_len(sizeof(conn->dev_class)));
9613
9614 if (!skb)
9615 return;
9616
9617 ev = skb_put(skb, sizeof(*ev));
9618 bacpy(&ev->addr.bdaddr, &conn->dst);
9619 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9620
9621 if (conn->out)
9622 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9623
9624 ev->flags = __cpu_to_le32(flags);
9625
9626 /* We must ensure that the EIR Data fields are ordered and
9627 * unique. Keep it simple for now and avoid the problem by not
9628 * adding any BR/EDR data to the LE adv.
9629 */
9630 if (conn->le_adv_data_len > 0) {
9631 skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9632 eir_len = conn->le_adv_data_len;
9633 } else {
9634 if (name)
9635 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9636
9637 if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9638 eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9639 conn->dev_class, sizeof(conn->dev_class));
9640 }
9641
9642 ev->eir_len = cpu_to_le16(eir_len);
9643
9644 mgmt_event_skb(skb, NULL);
9645 }
9646
unpair_device_rsp(struct mgmt_pending_cmd * cmd,void * data)9647 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9648 {
9649 struct hci_dev *hdev = data;
9650 struct mgmt_cp_unpair_device *cp = cmd->param;
9651
9652 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9653
9654 cmd->cmd_complete(cmd, 0);
9655 }
9656
mgmt_powering_down(struct hci_dev * hdev)9657 bool mgmt_powering_down(struct hci_dev *hdev)
9658 {
9659 struct mgmt_pending_cmd *cmd;
9660 struct mgmt_mode *cp;
9661
9662 if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
9663 return true;
9664
9665 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9666 if (!cmd)
9667 return false;
9668
9669 cp = cmd->param;
9670 if (!cp->val)
9671 return true;
9672
9673 return false;
9674 }
9675
mgmt_device_disconnected(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 reason,bool mgmt_connected)9676 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9677 u8 link_type, u8 addr_type, u8 reason,
9678 bool mgmt_connected)
9679 {
9680 struct mgmt_ev_device_disconnected ev;
9681 struct sock *sk = NULL;
9682
9683 if (!mgmt_connected)
9684 return;
9685
9686 if (link_type != ACL_LINK && link_type != LE_LINK)
9687 return;
9688
9689 bacpy(&ev.addr.bdaddr, bdaddr);
9690 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9691 ev.reason = reason;
9692
9693 /* Report disconnects due to suspend */
9694 if (hdev->suspended)
9695 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9696
9697 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9698
9699 if (sk)
9700 sock_put(sk);
9701 }
9702
mgmt_disconnect_failed(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9703 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9704 u8 link_type, u8 addr_type, u8 status)
9705 {
9706 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9707 struct mgmt_cp_disconnect *cp;
9708 struct mgmt_pending_cmd *cmd;
9709
9710 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, true,
9711 unpair_device_rsp, hdev);
9712
9713 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9714 if (!cmd)
9715 return;
9716
9717 cp = cmd->param;
9718
9719 if (bacmp(bdaddr, &cp->addr.bdaddr))
9720 return;
9721
9722 if (cp->addr.type != bdaddr_type)
9723 return;
9724
9725 cmd->cmd_complete(cmd, mgmt_status(status));
9726 mgmt_pending_remove(cmd);
9727 }
9728
mgmt_connect_failed(struct hci_dev * hdev,struct hci_conn * conn,u8 status)9729 void mgmt_connect_failed(struct hci_dev *hdev, struct hci_conn *conn, u8 status)
9730 {
9731 struct mgmt_ev_connect_failed ev;
9732
9733 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
9734 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
9735 conn->dst_type, status, true);
9736 return;
9737 }
9738
9739 bacpy(&ev.addr.bdaddr, &conn->dst);
9740 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9741 ev.status = mgmt_status(status);
9742
9743 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9744 }
9745
mgmt_pin_code_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 secure)9746 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9747 {
9748 struct mgmt_ev_pin_code_request ev;
9749
9750 bacpy(&ev.addr.bdaddr, bdaddr);
9751 ev.addr.type = BDADDR_BREDR;
9752 ev.secure = secure;
9753
9754 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9755 }
9756
mgmt_pin_code_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)9757 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9758 u8 status)
9759 {
9760 struct mgmt_pending_cmd *cmd;
9761
9762 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9763 if (!cmd)
9764 return;
9765
9766 cmd->cmd_complete(cmd, mgmt_status(status));
9767 mgmt_pending_remove(cmd);
9768 }
9769
mgmt_pin_code_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)9770 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9771 u8 status)
9772 {
9773 struct mgmt_pending_cmd *cmd;
9774
9775 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9776 if (!cmd)
9777 return;
9778
9779 cmd->cmd_complete(cmd, mgmt_status(status));
9780 mgmt_pending_remove(cmd);
9781 }
9782
mgmt_user_confirm_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 value,u8 confirm_hint)9783 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9784 u8 link_type, u8 addr_type, u32 value,
9785 u8 confirm_hint)
9786 {
9787 struct mgmt_ev_user_confirm_request ev;
9788
9789 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9790
9791 bacpy(&ev.addr.bdaddr, bdaddr);
9792 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9793 ev.confirm_hint = confirm_hint;
9794 ev.value = cpu_to_le32(value);
9795
9796 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9797 NULL);
9798 }
9799
mgmt_user_passkey_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type)9800 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9801 u8 link_type, u8 addr_type)
9802 {
9803 struct mgmt_ev_user_passkey_request ev;
9804
9805 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9806
9807 bacpy(&ev.addr.bdaddr, bdaddr);
9808 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9809
9810 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9811 NULL);
9812 }
9813
user_pairing_resp_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status,u8 opcode)9814 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9815 u8 link_type, u8 addr_type, u8 status,
9816 u8 opcode)
9817 {
9818 struct mgmt_pending_cmd *cmd;
9819
9820 cmd = pending_find(opcode, hdev);
9821 if (!cmd)
9822 return -ENOENT;
9823
9824 cmd->cmd_complete(cmd, mgmt_status(status));
9825 mgmt_pending_remove(cmd);
9826
9827 return 0;
9828 }
9829
mgmt_user_confirm_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9830 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9831 u8 link_type, u8 addr_type, u8 status)
9832 {
9833 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9834 status, MGMT_OP_USER_CONFIRM_REPLY);
9835 }
9836
mgmt_user_confirm_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9837 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9838 u8 link_type, u8 addr_type, u8 status)
9839 {
9840 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9841 status,
9842 MGMT_OP_USER_CONFIRM_NEG_REPLY);
9843 }
9844
mgmt_user_passkey_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9845 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9846 u8 link_type, u8 addr_type, u8 status)
9847 {
9848 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9849 status, MGMT_OP_USER_PASSKEY_REPLY);
9850 }
9851
mgmt_user_passkey_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9852 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9853 u8 link_type, u8 addr_type, u8 status)
9854 {
9855 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9856 status,
9857 MGMT_OP_USER_PASSKEY_NEG_REPLY);
9858 }
9859
mgmt_user_passkey_notify(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 passkey,u8 entered)9860 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9861 u8 link_type, u8 addr_type, u32 passkey,
9862 u8 entered)
9863 {
9864 struct mgmt_ev_passkey_notify ev;
9865
9866 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9867
9868 bacpy(&ev.addr.bdaddr, bdaddr);
9869 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9870 ev.passkey = __cpu_to_le32(passkey);
9871 ev.entered = entered;
9872
9873 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9874 }
9875
mgmt_auth_failed(struct hci_conn * conn,u8 hci_status)9876 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9877 {
9878 struct mgmt_ev_auth_failed ev;
9879 struct mgmt_pending_cmd *cmd;
9880 u8 status = mgmt_status(hci_status);
9881
9882 bacpy(&ev.addr.bdaddr, &conn->dst);
9883 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9884 ev.status = status;
9885
9886 cmd = find_pairing(conn);
9887
9888 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9889 cmd ? cmd->sk : NULL);
9890
9891 if (cmd) {
9892 cmd->cmd_complete(cmd, status);
9893 mgmt_pending_remove(cmd);
9894 }
9895 }
9896
mgmt_auth_enable_complete(struct hci_dev * hdev,u8 status)9897 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9898 {
9899 struct cmd_lookup match = { NULL, hdev };
9900 bool changed;
9901
9902 if (status) {
9903 u8 mgmt_err = mgmt_status(status);
9904 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, true,
9905 cmd_status_rsp, &mgmt_err);
9906 return;
9907 }
9908
9909 if (test_bit(HCI_AUTH, &hdev->flags))
9910 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9911 else
9912 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9913
9914 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, true,
9915 settings_rsp, &match);
9916
9917 if (changed)
9918 new_settings(hdev, match.sk);
9919
9920 if (match.sk)
9921 sock_put(match.sk);
9922 }
9923
sk_lookup(struct mgmt_pending_cmd * cmd,void * data)9924 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9925 {
9926 struct cmd_lookup *match = data;
9927
9928 if (match->sk == NULL) {
9929 match->sk = cmd->sk;
9930 sock_hold(match->sk);
9931 }
9932 }
9933
mgmt_set_class_of_dev_complete(struct hci_dev * hdev,u8 * dev_class,u8 status)9934 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9935 u8 status)
9936 {
9937 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
9938
9939 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, false, sk_lookup,
9940 &match);
9941 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, false, sk_lookup,
9942 &match);
9943 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, false, sk_lookup,
9944 &match);
9945
9946 if (!status) {
9947 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
9948 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9949 ext_info_changed(hdev, NULL);
9950 }
9951
9952 if (match.sk)
9953 sock_put(match.sk);
9954 }
9955
mgmt_set_local_name_complete(struct hci_dev * hdev,u8 * name,u8 status)9956 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
9957 {
9958 struct mgmt_cp_set_local_name ev;
9959 struct mgmt_pending_cmd *cmd;
9960
9961 if (status)
9962 return;
9963
9964 memset(&ev, 0, sizeof(ev));
9965 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
9966 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
9967
9968 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
9969 if (!cmd) {
9970 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
9971
9972 /* If this is a HCI command related to powering on the
9973 * HCI dev don't send any mgmt signals.
9974 */
9975 if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
9976 return;
9977
9978 if (pending_find(MGMT_OP_SET_POWERED, hdev))
9979 return;
9980 }
9981
9982 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
9983 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
9984 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
9985 }
9986
has_uuid(u8 * uuid,u16 uuid_count,u8 (* uuids)[16])9987 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
9988 {
9989 int i;
9990
9991 for (i = 0; i < uuid_count; i++) {
9992 if (!memcmp(uuid, uuids[i], 16))
9993 return true;
9994 }
9995
9996 return false;
9997 }
9998
eir_has_uuids(u8 * eir,u16 eir_len,u16 uuid_count,u8 (* uuids)[16])9999 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
10000 {
10001 u16 parsed = 0;
10002
10003 while (parsed < eir_len) {
10004 u8 field_len = eir[0];
10005 u8 uuid[16];
10006 int i;
10007
10008 if (field_len == 0)
10009 break;
10010
10011 if (eir_len - parsed < field_len + 1)
10012 break;
10013
10014 switch (eir[1]) {
10015 case EIR_UUID16_ALL:
10016 case EIR_UUID16_SOME:
10017 for (i = 0; i + 3 <= field_len; i += 2) {
10018 memcpy(uuid, bluetooth_base_uuid, 16);
10019 uuid[13] = eir[i + 3];
10020 uuid[12] = eir[i + 2];
10021 if (has_uuid(uuid, uuid_count, uuids))
10022 return true;
10023 }
10024 break;
10025 case EIR_UUID32_ALL:
10026 case EIR_UUID32_SOME:
10027 for (i = 0; i + 5 <= field_len; i += 4) {
10028 memcpy(uuid, bluetooth_base_uuid, 16);
10029 uuid[15] = eir[i + 5];
10030 uuid[14] = eir[i + 4];
10031 uuid[13] = eir[i + 3];
10032 uuid[12] = eir[i + 2];
10033 if (has_uuid(uuid, uuid_count, uuids))
10034 return true;
10035 }
10036 break;
10037 case EIR_UUID128_ALL:
10038 case EIR_UUID128_SOME:
10039 for (i = 0; i + 17 <= field_len; i += 16) {
10040 memcpy(uuid, eir + i + 2, 16);
10041 if (has_uuid(uuid, uuid_count, uuids))
10042 return true;
10043 }
10044 break;
10045 }
10046
10047 parsed += field_len + 1;
10048 eir += field_len + 1;
10049 }
10050
10051 return false;
10052 }
10053
is_filter_match(struct hci_dev * hdev,s8 rssi,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len)10054 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10055 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10056 {
10057 /* If a RSSI threshold has been specified, and
10058 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10059 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10060 * is set, let it through for further processing, as we might need to
10061 * restart the scan.
10062 *
10063 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10064 * the results are also dropped.
10065 */
10066 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10067 (rssi == HCI_RSSI_INVALID ||
10068 (rssi < hdev->discovery.rssi &&
10069 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
10070 return false;
10071
10072 if (hdev->discovery.uuid_count != 0) {
10073 /* If a list of UUIDs is provided in filter, results with no
10074 * matching UUID should be dropped.
10075 */
10076 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10077 hdev->discovery.uuids) &&
10078 !eir_has_uuids(scan_rsp, scan_rsp_len,
10079 hdev->discovery.uuid_count,
10080 hdev->discovery.uuids))
10081 return false;
10082 }
10083
10084 /* If duplicate filtering does not report RSSI changes, then restart
10085 * scanning to ensure updated result with updated RSSI values.
10086 */
10087 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
10088 /* Validate RSSI value against the RSSI threshold once more. */
10089 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10090 rssi < hdev->discovery.rssi)
10091 return false;
10092 }
10093
10094 return true;
10095 }
10096
mgmt_adv_monitor_device_lost(struct hci_dev * hdev,u16 handle,bdaddr_t * bdaddr,u8 addr_type)10097 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
10098 bdaddr_t *bdaddr, u8 addr_type)
10099 {
10100 struct mgmt_ev_adv_monitor_device_lost ev;
10101
10102 ev.monitor_handle = cpu_to_le16(handle);
10103 bacpy(&ev.addr.bdaddr, bdaddr);
10104 ev.addr.type = addr_type;
10105
10106 mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
10107 NULL);
10108 }
10109
mgmt_send_adv_monitor_device_found(struct hci_dev * hdev,struct sk_buff * skb,struct sock * skip_sk,u16 handle)10110 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
10111 struct sk_buff *skb,
10112 struct sock *skip_sk,
10113 u16 handle)
10114 {
10115 struct sk_buff *advmon_skb;
10116 size_t advmon_skb_len;
10117 __le16 *monitor_handle;
10118
10119 if (!skb)
10120 return;
10121
10122 advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
10123 sizeof(struct mgmt_ev_device_found)) + skb->len;
10124 advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
10125 advmon_skb_len);
10126 if (!advmon_skb)
10127 return;
10128
10129 /* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10130 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10131 * store monitor_handle of the matched monitor.
10132 */
10133 monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
10134 *monitor_handle = cpu_to_le16(handle);
10135 skb_put_data(advmon_skb, skb->data, skb->len);
10136
10137 mgmt_event_skb(advmon_skb, skip_sk);
10138 }
10139
mgmt_adv_monitor_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,bool report_device,struct sk_buff * skb,struct sock * skip_sk)10140 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
10141 bdaddr_t *bdaddr, bool report_device,
10142 struct sk_buff *skb,
10143 struct sock *skip_sk)
10144 {
10145 struct monitored_device *dev, *tmp;
10146 bool matched = false;
10147 bool notified = false;
10148
10149 /* We have received the Advertisement Report because:
10150 * 1. the kernel has initiated active discovery
10151 * 2. if not, we have pend_le_reports > 0 in which case we are doing
10152 * passive scanning
10153 * 3. if none of the above is true, we have one or more active
10154 * Advertisement Monitor
10155 *
10156 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10157 * and report ONLY one advertisement per device for the matched Monitor
10158 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10159 *
10160 * For case 3, since we are not active scanning and all advertisements
10161 * received are due to a matched Advertisement Monitor, report all
10162 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10163 */
10164 if (report_device && !hdev->advmon_pend_notify) {
10165 mgmt_event_skb(skb, skip_sk);
10166 return;
10167 }
10168
10169 hdev->advmon_pend_notify = false;
10170
10171 list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
10172 if (!bacmp(&dev->bdaddr, bdaddr)) {
10173 matched = true;
10174
10175 if (!dev->notified) {
10176 mgmt_send_adv_monitor_device_found(hdev, skb,
10177 skip_sk,
10178 dev->handle);
10179 notified = true;
10180 dev->notified = true;
10181 }
10182 }
10183
10184 if (!dev->notified)
10185 hdev->advmon_pend_notify = true;
10186 }
10187
10188 if (!report_device &&
10189 ((matched && !notified) || !msft_monitor_supported(hdev))) {
10190 /* Handle 0 indicates that we are not active scanning and this
10191 * is a subsequent advertisement report for an already matched
10192 * Advertisement Monitor or the controller offloading support
10193 * is not available.
10194 */
10195 mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
10196 }
10197
10198 if (report_device)
10199 mgmt_event_skb(skb, skip_sk);
10200 else
10201 kfree_skb(skb);
10202 }
10203
mesh_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len,u64 instant)10204 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
10205 u8 addr_type, s8 rssi, u32 flags, u8 *eir,
10206 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10207 u64 instant)
10208 {
10209 struct sk_buff *skb;
10210 struct mgmt_ev_mesh_device_found *ev;
10211 int i, j;
10212
10213 if (!hdev->mesh_ad_types[0])
10214 goto accepted;
10215
10216 /* Scan for requested AD types */
10217 if (eir_len > 0) {
10218 for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
10219 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10220 if (!hdev->mesh_ad_types[j])
10221 break;
10222
10223 if (hdev->mesh_ad_types[j] == eir[i + 1])
10224 goto accepted;
10225 }
10226 }
10227 }
10228
10229 if (scan_rsp_len > 0) {
10230 for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
10231 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10232 if (!hdev->mesh_ad_types[j])
10233 break;
10234
10235 if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
10236 goto accepted;
10237 }
10238 }
10239 }
10240
10241 return;
10242
10243 accepted:
10244 skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
10245 sizeof(*ev) + eir_len + scan_rsp_len);
10246 if (!skb)
10247 return;
10248
10249 ev = skb_put(skb, sizeof(*ev));
10250
10251 bacpy(&ev->addr.bdaddr, bdaddr);
10252 ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
10253 ev->rssi = rssi;
10254 ev->flags = cpu_to_le32(flags);
10255 ev->instant = cpu_to_le64(instant);
10256
10257 if (eir_len > 0)
10258 /* Copy EIR or advertising data into event */
10259 skb_put_data(skb, eir, eir_len);
10260
10261 if (scan_rsp_len > 0)
10262 /* Append scan response data to event */
10263 skb_put_data(skb, scan_rsp, scan_rsp_len);
10264
10265 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10266
10267 mgmt_event_skb(skb, NULL);
10268 }
10269
mgmt_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 * dev_class,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len,u64 instant)10270 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10271 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10272 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10273 u64 instant)
10274 {
10275 struct sk_buff *skb;
10276 struct mgmt_ev_device_found *ev;
10277 bool report_device = hci_discovery_active(hdev);
10278
10279 if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
10280 mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
10281 eir, eir_len, scan_rsp, scan_rsp_len,
10282 instant);
10283
10284 /* Don't send events for a non-kernel initiated discovery. With
10285 * LE one exception is if we have pend_le_reports > 0 in which
10286 * case we're doing passive scanning and want these events.
10287 */
10288 if (!hci_discovery_active(hdev)) {
10289 if (link_type == ACL_LINK)
10290 return;
10291 if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
10292 report_device = true;
10293 else if (!hci_is_adv_monitoring(hdev))
10294 return;
10295 }
10296
10297 if (hdev->discovery.result_filtering) {
10298 /* We are using service discovery */
10299 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10300 scan_rsp_len))
10301 return;
10302 }
10303
10304 if (hdev->discovery.limited) {
10305 /* Check for limited discoverable bit */
10306 if (dev_class) {
10307 if (!(dev_class[1] & 0x20))
10308 return;
10309 } else {
10310 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10311 if (!flags || !(flags[0] & LE_AD_LIMITED))
10312 return;
10313 }
10314 }
10315
10316 /* Allocate skb. The 5 extra bytes are for the potential CoD field */
10317 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10318 sizeof(*ev) + eir_len + scan_rsp_len + 5);
10319 if (!skb)
10320 return;
10321
10322 ev = skb_put(skb, sizeof(*ev));
10323
10324 /* In case of device discovery with BR/EDR devices (pre 1.2), the
10325 * RSSI value was reported as 0 when not available. This behavior
10326 * is kept when using device discovery. This is required for full
10327 * backwards compatibility with the API.
10328 *
10329 * However when using service discovery, the value 127 will be
10330 * returned when the RSSI is not available.
10331 */
10332 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10333 link_type == ACL_LINK)
10334 rssi = 0;
10335
10336 bacpy(&ev->addr.bdaddr, bdaddr);
10337 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10338 ev->rssi = rssi;
10339 ev->flags = cpu_to_le32(flags);
10340
10341 if (eir_len > 0)
10342 /* Copy EIR or advertising data into event */
10343 skb_put_data(skb, eir, eir_len);
10344
10345 if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
10346 u8 eir_cod[5];
10347
10348 eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
10349 dev_class, 3);
10350 skb_put_data(skb, eir_cod, sizeof(eir_cod));
10351 }
10352
10353 if (scan_rsp_len > 0)
10354 /* Append scan response data to event */
10355 skb_put_data(skb, scan_rsp, scan_rsp_len);
10356
10357 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10358
10359 mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
10360 }
10361
mgmt_remote_name(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,s8 rssi,u8 * name,u8 name_len)10362 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10363 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10364 {
10365 struct sk_buff *skb;
10366 struct mgmt_ev_device_found *ev;
10367 u16 eir_len = 0;
10368 u32 flags = 0;
10369
10370 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10371 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
10372 if (!skb)
10373 return;
10374
10375 ev = skb_put(skb, sizeof(*ev));
10376 bacpy(&ev->addr.bdaddr, bdaddr);
10377 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10378 ev->rssi = rssi;
10379
10380 if (name)
10381 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
10382 else
10383 flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
10384
10385 ev->eir_len = cpu_to_le16(eir_len);
10386 ev->flags = cpu_to_le32(flags);
10387
10388 mgmt_event_skb(skb, NULL);
10389 }
10390
mgmt_discovering(struct hci_dev * hdev,u8 discovering)10391 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10392 {
10393 struct mgmt_ev_discovering ev;
10394
10395 bt_dev_dbg(hdev, "discovering %u", discovering);
10396
10397 memset(&ev, 0, sizeof(ev));
10398 ev.type = hdev->discovery.type;
10399 ev.discovering = discovering;
10400
10401 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10402 }
10403
mgmt_suspending(struct hci_dev * hdev,u8 state)10404 void mgmt_suspending(struct hci_dev *hdev, u8 state)
10405 {
10406 struct mgmt_ev_controller_suspend ev;
10407
10408 ev.suspend_state = state;
10409 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10410 }
10411
mgmt_resuming(struct hci_dev * hdev,u8 reason,bdaddr_t * bdaddr,u8 addr_type)10412 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10413 u8 addr_type)
10414 {
10415 struct mgmt_ev_controller_resume ev;
10416
10417 ev.wake_reason = reason;
10418 if (bdaddr) {
10419 bacpy(&ev.addr.bdaddr, bdaddr);
10420 ev.addr.type = addr_type;
10421 } else {
10422 memset(&ev.addr, 0, sizeof(ev.addr));
10423 }
10424
10425 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
10426 }
10427
10428 static struct hci_mgmt_chan chan = {
10429 .channel = HCI_CHANNEL_CONTROL,
10430 .handler_count = ARRAY_SIZE(mgmt_handlers),
10431 .handlers = mgmt_handlers,
10432 .hdev_init = mgmt_init_hdev,
10433 };
10434
mgmt_init(void)10435 int mgmt_init(void)
10436 {
10437 return hci_mgmt_chan_register(&chan);
10438 }
10439
mgmt_exit(void)10440 void mgmt_exit(void)
10441 {
10442 hci_mgmt_chan_unregister(&chan);
10443 }
10444
mgmt_cleanup(struct sock * sk)10445 void mgmt_cleanup(struct sock *sk)
10446 {
10447 struct mgmt_mesh_tx *mesh_tx;
10448 struct hci_dev *hdev;
10449
10450 read_lock(&hci_dev_list_lock);
10451
10452 list_for_each_entry(hdev, &hci_dev_list, list) {
10453 do {
10454 mesh_tx = mgmt_mesh_next(hdev, sk);
10455
10456 if (mesh_tx)
10457 mesh_send_complete(hdev, mesh_tx, true);
10458 } while (mesh_tx);
10459 }
10460
10461 read_unlock(&hci_dev_list_lock);
10462 }
10463