1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <linux/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35
36 #include "smp.h"
37 #include "mgmt_util.h"
38 #include "mgmt_config.h"
39 #include "msft.h"
40 #include "eir.h"
41 #include "aosp.h"
42
43 #define MGMT_VERSION 1
44 #define MGMT_REVISION 23
45
46 static const u16 mgmt_commands[] = {
47 MGMT_OP_READ_INDEX_LIST,
48 MGMT_OP_READ_INFO,
49 MGMT_OP_SET_POWERED,
50 MGMT_OP_SET_DISCOVERABLE,
51 MGMT_OP_SET_CONNECTABLE,
52 MGMT_OP_SET_FAST_CONNECTABLE,
53 MGMT_OP_SET_BONDABLE,
54 MGMT_OP_SET_LINK_SECURITY,
55 MGMT_OP_SET_SSP,
56 MGMT_OP_SET_HS,
57 MGMT_OP_SET_LE,
58 MGMT_OP_SET_DEV_CLASS,
59 MGMT_OP_SET_LOCAL_NAME,
60 MGMT_OP_ADD_UUID,
61 MGMT_OP_REMOVE_UUID,
62 MGMT_OP_LOAD_LINK_KEYS,
63 MGMT_OP_LOAD_LONG_TERM_KEYS,
64 MGMT_OP_DISCONNECT,
65 MGMT_OP_GET_CONNECTIONS,
66 MGMT_OP_PIN_CODE_REPLY,
67 MGMT_OP_PIN_CODE_NEG_REPLY,
68 MGMT_OP_SET_IO_CAPABILITY,
69 MGMT_OP_PAIR_DEVICE,
70 MGMT_OP_CANCEL_PAIR_DEVICE,
71 MGMT_OP_UNPAIR_DEVICE,
72 MGMT_OP_USER_CONFIRM_REPLY,
73 MGMT_OP_USER_CONFIRM_NEG_REPLY,
74 MGMT_OP_USER_PASSKEY_REPLY,
75 MGMT_OP_USER_PASSKEY_NEG_REPLY,
76 MGMT_OP_READ_LOCAL_OOB_DATA,
77 MGMT_OP_ADD_REMOTE_OOB_DATA,
78 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
79 MGMT_OP_START_DISCOVERY,
80 MGMT_OP_STOP_DISCOVERY,
81 MGMT_OP_CONFIRM_NAME,
82 MGMT_OP_BLOCK_DEVICE,
83 MGMT_OP_UNBLOCK_DEVICE,
84 MGMT_OP_SET_DEVICE_ID,
85 MGMT_OP_SET_ADVERTISING,
86 MGMT_OP_SET_BREDR,
87 MGMT_OP_SET_STATIC_ADDRESS,
88 MGMT_OP_SET_SCAN_PARAMS,
89 MGMT_OP_SET_SECURE_CONN,
90 MGMT_OP_SET_DEBUG_KEYS,
91 MGMT_OP_SET_PRIVACY,
92 MGMT_OP_LOAD_IRKS,
93 MGMT_OP_GET_CONN_INFO,
94 MGMT_OP_GET_CLOCK_INFO,
95 MGMT_OP_ADD_DEVICE,
96 MGMT_OP_REMOVE_DEVICE,
97 MGMT_OP_LOAD_CONN_PARAM,
98 MGMT_OP_READ_UNCONF_INDEX_LIST,
99 MGMT_OP_READ_CONFIG_INFO,
100 MGMT_OP_SET_EXTERNAL_CONFIG,
101 MGMT_OP_SET_PUBLIC_ADDRESS,
102 MGMT_OP_START_SERVICE_DISCOVERY,
103 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
104 MGMT_OP_READ_EXT_INDEX_LIST,
105 MGMT_OP_READ_ADV_FEATURES,
106 MGMT_OP_ADD_ADVERTISING,
107 MGMT_OP_REMOVE_ADVERTISING,
108 MGMT_OP_GET_ADV_SIZE_INFO,
109 MGMT_OP_START_LIMITED_DISCOVERY,
110 MGMT_OP_READ_EXT_INFO,
111 MGMT_OP_SET_APPEARANCE,
112 MGMT_OP_GET_PHY_CONFIGURATION,
113 MGMT_OP_SET_PHY_CONFIGURATION,
114 MGMT_OP_SET_BLOCKED_KEYS,
115 MGMT_OP_SET_WIDEBAND_SPEECH,
116 MGMT_OP_READ_CONTROLLER_CAP,
117 MGMT_OP_READ_EXP_FEATURES_INFO,
118 MGMT_OP_SET_EXP_FEATURE,
119 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
120 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
121 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
122 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
123 MGMT_OP_GET_DEVICE_FLAGS,
124 MGMT_OP_SET_DEVICE_FLAGS,
125 MGMT_OP_READ_ADV_MONITOR_FEATURES,
126 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
127 MGMT_OP_REMOVE_ADV_MONITOR,
128 MGMT_OP_ADD_EXT_ADV_PARAMS,
129 MGMT_OP_ADD_EXT_ADV_DATA,
130 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
131 MGMT_OP_SET_MESH_RECEIVER,
132 MGMT_OP_MESH_READ_FEATURES,
133 MGMT_OP_MESH_SEND,
134 MGMT_OP_MESH_SEND_CANCEL,
135 MGMT_OP_HCI_CMD_SYNC,
136 };
137
138 static const u16 mgmt_events[] = {
139 MGMT_EV_CONTROLLER_ERROR,
140 MGMT_EV_INDEX_ADDED,
141 MGMT_EV_INDEX_REMOVED,
142 MGMT_EV_NEW_SETTINGS,
143 MGMT_EV_CLASS_OF_DEV_CHANGED,
144 MGMT_EV_LOCAL_NAME_CHANGED,
145 MGMT_EV_NEW_LINK_KEY,
146 MGMT_EV_NEW_LONG_TERM_KEY,
147 MGMT_EV_DEVICE_CONNECTED,
148 MGMT_EV_DEVICE_DISCONNECTED,
149 MGMT_EV_CONNECT_FAILED,
150 MGMT_EV_PIN_CODE_REQUEST,
151 MGMT_EV_USER_CONFIRM_REQUEST,
152 MGMT_EV_USER_PASSKEY_REQUEST,
153 MGMT_EV_AUTH_FAILED,
154 MGMT_EV_DEVICE_FOUND,
155 MGMT_EV_DISCOVERING,
156 MGMT_EV_DEVICE_BLOCKED,
157 MGMT_EV_DEVICE_UNBLOCKED,
158 MGMT_EV_DEVICE_UNPAIRED,
159 MGMT_EV_PASSKEY_NOTIFY,
160 MGMT_EV_NEW_IRK,
161 MGMT_EV_NEW_CSRK,
162 MGMT_EV_DEVICE_ADDED,
163 MGMT_EV_DEVICE_REMOVED,
164 MGMT_EV_NEW_CONN_PARAM,
165 MGMT_EV_UNCONF_INDEX_ADDED,
166 MGMT_EV_UNCONF_INDEX_REMOVED,
167 MGMT_EV_NEW_CONFIG_OPTIONS,
168 MGMT_EV_EXT_INDEX_ADDED,
169 MGMT_EV_EXT_INDEX_REMOVED,
170 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
171 MGMT_EV_ADVERTISING_ADDED,
172 MGMT_EV_ADVERTISING_REMOVED,
173 MGMT_EV_EXT_INFO_CHANGED,
174 MGMT_EV_PHY_CONFIGURATION_CHANGED,
175 MGMT_EV_EXP_FEATURE_CHANGED,
176 MGMT_EV_DEVICE_FLAGS_CHANGED,
177 MGMT_EV_ADV_MONITOR_ADDED,
178 MGMT_EV_ADV_MONITOR_REMOVED,
179 MGMT_EV_CONTROLLER_SUSPEND,
180 MGMT_EV_CONTROLLER_RESUME,
181 MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
182 MGMT_EV_ADV_MONITOR_DEVICE_LOST,
183 };
184
185 static const u16 mgmt_untrusted_commands[] = {
186 MGMT_OP_READ_INDEX_LIST,
187 MGMT_OP_READ_INFO,
188 MGMT_OP_READ_UNCONF_INDEX_LIST,
189 MGMT_OP_READ_CONFIG_INFO,
190 MGMT_OP_READ_EXT_INDEX_LIST,
191 MGMT_OP_READ_EXT_INFO,
192 MGMT_OP_READ_CONTROLLER_CAP,
193 MGMT_OP_READ_EXP_FEATURES_INFO,
194 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
195 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
196 };
197
198 static const u16 mgmt_untrusted_events[] = {
199 MGMT_EV_INDEX_ADDED,
200 MGMT_EV_INDEX_REMOVED,
201 MGMT_EV_NEW_SETTINGS,
202 MGMT_EV_CLASS_OF_DEV_CHANGED,
203 MGMT_EV_LOCAL_NAME_CHANGED,
204 MGMT_EV_UNCONF_INDEX_ADDED,
205 MGMT_EV_UNCONF_INDEX_REMOVED,
206 MGMT_EV_NEW_CONFIG_OPTIONS,
207 MGMT_EV_EXT_INDEX_ADDED,
208 MGMT_EV_EXT_INDEX_REMOVED,
209 MGMT_EV_EXT_INFO_CHANGED,
210 MGMT_EV_EXP_FEATURE_CHANGED,
211 };
212
213 #define CACHE_TIMEOUT secs_to_jiffies(2)
214
215 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
216 "\x00\x00\x00\x00\x00\x00\x00\x00"
217
218 /* HCI to MGMT error code conversion table */
219 static const u8 mgmt_status_table[] = {
220 MGMT_STATUS_SUCCESS,
221 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
222 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
223 MGMT_STATUS_FAILED, /* Hardware Failure */
224 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
225 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
226 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
227 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
228 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
229 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
230 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
231 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
232 MGMT_STATUS_BUSY, /* Command Disallowed */
233 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
234 MGMT_STATUS_REJECTED, /* Rejected Security */
235 MGMT_STATUS_REJECTED, /* Rejected Personal */
236 MGMT_STATUS_TIMEOUT, /* Host Timeout */
237 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
238 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
239 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
240 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
241 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
242 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
243 MGMT_STATUS_BUSY, /* Repeated Attempts */
244 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
245 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
246 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
247 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
248 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
249 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
250 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
251 MGMT_STATUS_FAILED, /* Unspecified Error */
252 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
253 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
254 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
255 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
256 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
257 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
258 MGMT_STATUS_FAILED, /* Unit Link Key Used */
259 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
260 MGMT_STATUS_TIMEOUT, /* Instant Passed */
261 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
262 MGMT_STATUS_FAILED, /* Transaction Collision */
263 MGMT_STATUS_FAILED, /* Reserved for future use */
264 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
265 MGMT_STATUS_REJECTED, /* QoS Rejected */
266 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
267 MGMT_STATUS_REJECTED, /* Insufficient Security */
268 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
269 MGMT_STATUS_FAILED, /* Reserved for future use */
270 MGMT_STATUS_BUSY, /* Role Switch Pending */
271 MGMT_STATUS_FAILED, /* Reserved for future use */
272 MGMT_STATUS_FAILED, /* Slot Violation */
273 MGMT_STATUS_FAILED, /* Role Switch Failed */
274 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
275 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
276 MGMT_STATUS_BUSY, /* Host Busy Pairing */
277 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
278 MGMT_STATUS_BUSY, /* Controller Busy */
279 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
280 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
281 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
282 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
283 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
284 };
285
mgmt_errno_status(int err)286 static u8 mgmt_errno_status(int err)
287 {
288 switch (err) {
289 case 0:
290 return MGMT_STATUS_SUCCESS;
291 case -EPERM:
292 return MGMT_STATUS_REJECTED;
293 case -EINVAL:
294 return MGMT_STATUS_INVALID_PARAMS;
295 case -EOPNOTSUPP:
296 return MGMT_STATUS_NOT_SUPPORTED;
297 case -EBUSY:
298 return MGMT_STATUS_BUSY;
299 case -ETIMEDOUT:
300 return MGMT_STATUS_AUTH_FAILED;
301 case -ENOMEM:
302 return MGMT_STATUS_NO_RESOURCES;
303 case -EISCONN:
304 return MGMT_STATUS_ALREADY_CONNECTED;
305 case -ENOTCONN:
306 return MGMT_STATUS_DISCONNECTED;
307 }
308
309 return MGMT_STATUS_FAILED;
310 }
311
mgmt_status(int err)312 static u8 mgmt_status(int err)
313 {
314 if (err < 0)
315 return mgmt_errno_status(err);
316
317 if (err < ARRAY_SIZE(mgmt_status_table))
318 return mgmt_status_table[err];
319
320 return MGMT_STATUS_FAILED;
321 }
322
mgmt_index_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag)323 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
324 u16 len, int flag)
325 {
326 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
327 flag, NULL);
328 }
329
mgmt_limited_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag,struct sock * skip_sk)330 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
331 u16 len, int flag, struct sock *skip_sk)
332 {
333 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
334 flag, skip_sk);
335 }
336
mgmt_event(u16 event,struct hci_dev * hdev,void * data,u16 len,struct sock * skip_sk)337 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
338 struct sock *skip_sk)
339 {
340 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
341 HCI_SOCK_TRUSTED, skip_sk);
342 }
343
mgmt_event_skb(struct sk_buff * skb,struct sock * skip_sk)344 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
345 {
346 return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
347 skip_sk);
348 }
349
le_addr_type(u8 mgmt_addr_type)350 static u8 le_addr_type(u8 mgmt_addr_type)
351 {
352 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
353 return ADDR_LE_DEV_PUBLIC;
354 else
355 return ADDR_LE_DEV_RANDOM;
356 }
357
mgmt_fill_version_info(void * ver)358 void mgmt_fill_version_info(void *ver)
359 {
360 struct mgmt_rp_read_version *rp = ver;
361
362 rp->version = MGMT_VERSION;
363 rp->revision = cpu_to_le16(MGMT_REVISION);
364 }
365
read_version(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)366 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
367 u16 data_len)
368 {
369 struct mgmt_rp_read_version rp;
370
371 bt_dev_dbg(hdev, "sock %p", sk);
372
373 mgmt_fill_version_info(&rp);
374
375 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
376 &rp, sizeof(rp));
377 }
378
read_commands(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)379 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
380 u16 data_len)
381 {
382 struct mgmt_rp_read_commands *rp;
383 u16 num_commands, num_events;
384 size_t rp_size;
385 int i, err;
386
387 bt_dev_dbg(hdev, "sock %p", sk);
388
389 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
390 num_commands = ARRAY_SIZE(mgmt_commands);
391 num_events = ARRAY_SIZE(mgmt_events);
392 } else {
393 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
394 num_events = ARRAY_SIZE(mgmt_untrusted_events);
395 }
396
397 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
398
399 rp = kmalloc(rp_size, GFP_KERNEL);
400 if (!rp)
401 return -ENOMEM;
402
403 rp->num_commands = cpu_to_le16(num_commands);
404 rp->num_events = cpu_to_le16(num_events);
405
406 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
407 __le16 *opcode = rp->opcodes;
408
409 for (i = 0; i < num_commands; i++, opcode++)
410 put_unaligned_le16(mgmt_commands[i], opcode);
411
412 for (i = 0; i < num_events; i++, opcode++)
413 put_unaligned_le16(mgmt_events[i], opcode);
414 } else {
415 __le16 *opcode = rp->opcodes;
416
417 for (i = 0; i < num_commands; i++, opcode++)
418 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
419
420 for (i = 0; i < num_events; i++, opcode++)
421 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
422 }
423
424 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
425 rp, rp_size);
426 kfree(rp);
427
428 return err;
429 }
430
read_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)431 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
432 u16 data_len)
433 {
434 struct mgmt_rp_read_index_list *rp;
435 struct hci_dev *d;
436 size_t rp_len;
437 u16 count;
438 int err;
439
440 bt_dev_dbg(hdev, "sock %p", sk);
441
442 read_lock(&hci_dev_list_lock);
443
444 count = 0;
445 list_for_each_entry(d, &hci_dev_list, list) {
446 if (!hci_dev_test_flag(d, HCI_UNCONFIGURED))
447 count++;
448 }
449
450 rp_len = sizeof(*rp) + (2 * count);
451 rp = kmalloc(rp_len, GFP_ATOMIC);
452 if (!rp) {
453 read_unlock(&hci_dev_list_lock);
454 return -ENOMEM;
455 }
456
457 count = 0;
458 list_for_each_entry(d, &hci_dev_list, list) {
459 if (hci_dev_test_flag(d, HCI_SETUP) ||
460 hci_dev_test_flag(d, HCI_CONFIG) ||
461 hci_dev_test_flag(d, HCI_USER_CHANNEL))
462 continue;
463
464 /* Devices marked as raw-only are neither configured
465 * nor unconfigured controllers.
466 */
467 if (hci_test_quirk(d, HCI_QUIRK_RAW_DEVICE))
468 continue;
469
470 if (!hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
471 rp->index[count++] = cpu_to_le16(d->id);
472 bt_dev_dbg(hdev, "Added hci%u", d->id);
473 }
474 }
475
476 rp->num_controllers = cpu_to_le16(count);
477 rp_len = sizeof(*rp) + (2 * count);
478
479 read_unlock(&hci_dev_list_lock);
480
481 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
482 0, rp, rp_len);
483
484 kfree(rp);
485
486 return err;
487 }
488
read_unconf_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)489 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
490 void *data, u16 data_len)
491 {
492 struct mgmt_rp_read_unconf_index_list *rp;
493 struct hci_dev *d;
494 size_t rp_len;
495 u16 count;
496 int err;
497
498 bt_dev_dbg(hdev, "sock %p", sk);
499
500 read_lock(&hci_dev_list_lock);
501
502 count = 0;
503 list_for_each_entry(d, &hci_dev_list, list) {
504 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
505 count++;
506 }
507
508 rp_len = sizeof(*rp) + (2 * count);
509 rp = kmalloc(rp_len, GFP_ATOMIC);
510 if (!rp) {
511 read_unlock(&hci_dev_list_lock);
512 return -ENOMEM;
513 }
514
515 count = 0;
516 list_for_each_entry(d, &hci_dev_list, list) {
517 if (hci_dev_test_flag(d, HCI_SETUP) ||
518 hci_dev_test_flag(d, HCI_CONFIG) ||
519 hci_dev_test_flag(d, HCI_USER_CHANNEL))
520 continue;
521
522 /* Devices marked as raw-only are neither configured
523 * nor unconfigured controllers.
524 */
525 if (hci_test_quirk(d, HCI_QUIRK_RAW_DEVICE))
526 continue;
527
528 if (hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
529 rp->index[count++] = cpu_to_le16(d->id);
530 bt_dev_dbg(hdev, "Added hci%u", d->id);
531 }
532 }
533
534 rp->num_controllers = cpu_to_le16(count);
535 rp_len = sizeof(*rp) + (2 * count);
536
537 read_unlock(&hci_dev_list_lock);
538
539 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
540 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
541
542 kfree(rp);
543
544 return err;
545 }
546
read_ext_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)547 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
548 void *data, u16 data_len)
549 {
550 struct mgmt_rp_read_ext_index_list *rp;
551 struct hci_dev *d;
552 u16 count;
553 int err;
554
555 bt_dev_dbg(hdev, "sock %p", sk);
556
557 read_lock(&hci_dev_list_lock);
558
559 count = 0;
560 list_for_each_entry(d, &hci_dev_list, list)
561 count++;
562
563 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
564 if (!rp) {
565 read_unlock(&hci_dev_list_lock);
566 return -ENOMEM;
567 }
568
569 count = 0;
570 list_for_each_entry(d, &hci_dev_list, list) {
571 if (hci_dev_test_flag(d, HCI_SETUP) ||
572 hci_dev_test_flag(d, HCI_CONFIG) ||
573 hci_dev_test_flag(d, HCI_USER_CHANNEL))
574 continue;
575
576 /* Devices marked as raw-only are neither configured
577 * nor unconfigured controllers.
578 */
579 if (hci_test_quirk(d, HCI_QUIRK_RAW_DEVICE))
580 continue;
581
582 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
583 rp->entry[count].type = 0x01;
584 else
585 rp->entry[count].type = 0x00;
586
587 rp->entry[count].bus = d->bus;
588 rp->entry[count++].index = cpu_to_le16(d->id);
589 bt_dev_dbg(hdev, "Added hci%u", d->id);
590 }
591
592 rp->num_controllers = cpu_to_le16(count);
593
594 read_unlock(&hci_dev_list_lock);
595
596 /* If this command is called at least once, then all the
597 * default index and unconfigured index events are disabled
598 * and from now on only extended index events are used.
599 */
600 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
601 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
602 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
603
604 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
605 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
606 struct_size(rp, entry, count));
607
608 kfree(rp);
609
610 return err;
611 }
612
is_configured(struct hci_dev * hdev)613 static bool is_configured(struct hci_dev *hdev)
614 {
615 if (hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG) &&
616 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
617 return false;
618
619 if ((hci_test_quirk(hdev, HCI_QUIRK_INVALID_BDADDR) ||
620 hci_test_quirk(hdev, HCI_QUIRK_USE_BDADDR_PROPERTY)) &&
621 !bacmp(&hdev->public_addr, BDADDR_ANY))
622 return false;
623
624 return true;
625 }
626
get_missing_options(struct hci_dev * hdev)627 static __le32 get_missing_options(struct hci_dev *hdev)
628 {
629 u32 options = 0;
630
631 if (hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG) &&
632 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
633 options |= MGMT_OPTION_EXTERNAL_CONFIG;
634
635 if ((hci_test_quirk(hdev, HCI_QUIRK_INVALID_BDADDR) ||
636 hci_test_quirk(hdev, HCI_QUIRK_USE_BDADDR_PROPERTY)) &&
637 !bacmp(&hdev->public_addr, BDADDR_ANY))
638 options |= MGMT_OPTION_PUBLIC_ADDRESS;
639
640 return cpu_to_le32(options);
641 }
642
new_options(struct hci_dev * hdev,struct sock * skip)643 static int new_options(struct hci_dev *hdev, struct sock *skip)
644 {
645 __le32 options = get_missing_options(hdev);
646
647 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
648 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
649 }
650
send_options_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)651 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
652 {
653 __le32 options = get_missing_options(hdev);
654
655 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
656 sizeof(options));
657 }
658
read_config_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)659 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
660 void *data, u16 data_len)
661 {
662 struct mgmt_rp_read_config_info rp;
663 u32 options = 0;
664
665 bt_dev_dbg(hdev, "sock %p", sk);
666
667 hci_dev_lock(hdev);
668
669 memset(&rp, 0, sizeof(rp));
670 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
671
672 if (hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG))
673 options |= MGMT_OPTION_EXTERNAL_CONFIG;
674
675 if (hdev->set_bdaddr)
676 options |= MGMT_OPTION_PUBLIC_ADDRESS;
677
678 rp.supported_options = cpu_to_le32(options);
679 rp.missing_options = get_missing_options(hdev);
680
681 hci_dev_unlock(hdev);
682
683 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
684 &rp, sizeof(rp));
685 }
686
get_supported_phys(struct hci_dev * hdev)687 static u32 get_supported_phys(struct hci_dev *hdev)
688 {
689 u32 supported_phys = 0;
690
691 if (lmp_bredr_capable(hdev)) {
692 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
693
694 if (hdev->features[0][0] & LMP_3SLOT)
695 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
696
697 if (hdev->features[0][0] & LMP_5SLOT)
698 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
699
700 if (lmp_edr_2m_capable(hdev)) {
701 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
702
703 if (lmp_edr_3slot_capable(hdev))
704 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
705
706 if (lmp_edr_5slot_capable(hdev))
707 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
708
709 if (lmp_edr_3m_capable(hdev)) {
710 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
711
712 if (lmp_edr_3slot_capable(hdev))
713 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
714
715 if (lmp_edr_5slot_capable(hdev))
716 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
717 }
718 }
719 }
720
721 if (lmp_le_capable(hdev)) {
722 supported_phys |= MGMT_PHY_LE_1M_TX;
723 supported_phys |= MGMT_PHY_LE_1M_RX;
724
725 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
726 supported_phys |= MGMT_PHY_LE_2M_TX;
727 supported_phys |= MGMT_PHY_LE_2M_RX;
728 }
729
730 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
731 supported_phys |= MGMT_PHY_LE_CODED_TX;
732 supported_phys |= MGMT_PHY_LE_CODED_RX;
733 }
734 }
735
736 return supported_phys;
737 }
738
get_selected_phys(struct hci_dev * hdev)739 static u32 get_selected_phys(struct hci_dev *hdev)
740 {
741 u32 selected_phys = 0;
742
743 if (lmp_bredr_capable(hdev)) {
744 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
745
746 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
747 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
748
749 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
750 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
751
752 if (lmp_edr_2m_capable(hdev)) {
753 if (!(hdev->pkt_type & HCI_2DH1))
754 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
755
756 if (lmp_edr_3slot_capable(hdev) &&
757 !(hdev->pkt_type & HCI_2DH3))
758 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
759
760 if (lmp_edr_5slot_capable(hdev) &&
761 !(hdev->pkt_type & HCI_2DH5))
762 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
763
764 if (lmp_edr_3m_capable(hdev)) {
765 if (!(hdev->pkt_type & HCI_3DH1))
766 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
767
768 if (lmp_edr_3slot_capable(hdev) &&
769 !(hdev->pkt_type & HCI_3DH3))
770 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
771
772 if (lmp_edr_5slot_capable(hdev) &&
773 !(hdev->pkt_type & HCI_3DH5))
774 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
775 }
776 }
777 }
778
779 if (lmp_le_capable(hdev)) {
780 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
781 selected_phys |= MGMT_PHY_LE_1M_TX;
782
783 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
784 selected_phys |= MGMT_PHY_LE_1M_RX;
785
786 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
787 selected_phys |= MGMT_PHY_LE_2M_TX;
788
789 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
790 selected_phys |= MGMT_PHY_LE_2M_RX;
791
792 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
793 selected_phys |= MGMT_PHY_LE_CODED_TX;
794
795 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
796 selected_phys |= MGMT_PHY_LE_CODED_RX;
797 }
798
799 return selected_phys;
800 }
801
get_configurable_phys(struct hci_dev * hdev)802 static u32 get_configurable_phys(struct hci_dev *hdev)
803 {
804 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
805 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
806 }
807
get_supported_settings(struct hci_dev * hdev)808 static u32 get_supported_settings(struct hci_dev *hdev)
809 {
810 u32 settings = 0;
811
812 settings |= MGMT_SETTING_POWERED;
813 settings |= MGMT_SETTING_BONDABLE;
814 settings |= MGMT_SETTING_DEBUG_KEYS;
815 settings |= MGMT_SETTING_CONNECTABLE;
816 settings |= MGMT_SETTING_DISCOVERABLE;
817
818 if (lmp_bredr_capable(hdev)) {
819 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
820 settings |= MGMT_SETTING_FAST_CONNECTABLE;
821 settings |= MGMT_SETTING_BREDR;
822 settings |= MGMT_SETTING_LINK_SECURITY;
823
824 if (lmp_ssp_capable(hdev)) {
825 settings |= MGMT_SETTING_SSP;
826 }
827
828 if (lmp_sc_capable(hdev))
829 settings |= MGMT_SETTING_SECURE_CONN;
830
831 if (hci_test_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED))
832 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
833 }
834
835 if (lmp_le_capable(hdev)) {
836 settings |= MGMT_SETTING_LE;
837 settings |= MGMT_SETTING_SECURE_CONN;
838 settings |= MGMT_SETTING_PRIVACY;
839 settings |= MGMT_SETTING_STATIC_ADDRESS;
840 settings |= MGMT_SETTING_ADVERTISING;
841 }
842
843 if (hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG) || hdev->set_bdaddr)
844 settings |= MGMT_SETTING_CONFIGURATION;
845
846 if (cis_central_capable(hdev))
847 settings |= MGMT_SETTING_CIS_CENTRAL;
848
849 if (cis_peripheral_capable(hdev))
850 settings |= MGMT_SETTING_CIS_PERIPHERAL;
851
852 if (bis_capable(hdev))
853 settings |= MGMT_SETTING_ISO_BROADCASTER;
854
855 if (sync_recv_capable(hdev))
856 settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
857
858 if (ll_privacy_capable(hdev))
859 settings |= MGMT_SETTING_LL_PRIVACY;
860
861 if (past_sender_capable(hdev))
862 settings |= MGMT_SETTING_PAST_SENDER;
863
864 if (past_receiver_capable(hdev))
865 settings |= MGMT_SETTING_PAST_RECEIVER;
866
867 settings |= MGMT_SETTING_PHY_CONFIGURATION;
868
869 return settings;
870 }
871
get_current_settings(struct hci_dev * hdev)872 static u32 get_current_settings(struct hci_dev *hdev)
873 {
874 u32 settings = 0;
875
876 if (hdev_is_powered(hdev))
877 settings |= MGMT_SETTING_POWERED;
878
879 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
880 settings |= MGMT_SETTING_CONNECTABLE;
881
882 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
883 settings |= MGMT_SETTING_FAST_CONNECTABLE;
884
885 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
886 settings |= MGMT_SETTING_DISCOVERABLE;
887
888 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
889 settings |= MGMT_SETTING_BONDABLE;
890
891 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
892 settings |= MGMT_SETTING_BREDR;
893
894 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
895 settings |= MGMT_SETTING_LE;
896
897 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
898 settings |= MGMT_SETTING_LINK_SECURITY;
899
900 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
901 settings |= MGMT_SETTING_SSP;
902
903 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
904 settings |= MGMT_SETTING_ADVERTISING;
905
906 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
907 settings |= MGMT_SETTING_SECURE_CONN;
908
909 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
910 settings |= MGMT_SETTING_DEBUG_KEYS;
911
912 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
913 settings |= MGMT_SETTING_PRIVACY;
914
915 /* The current setting for static address has two purposes. The
916 * first is to indicate if the static address will be used and
917 * the second is to indicate if it is actually set.
918 *
919 * This means if the static address is not configured, this flag
920 * will never be set. If the address is configured, then if the
921 * address is actually used decides if the flag is set or not.
922 *
923 * For single mode LE only controllers and dual-mode controllers
924 * with BR/EDR disabled, the existence of the static address will
925 * be evaluated.
926 */
927 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
928 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
929 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
930 if (bacmp(&hdev->static_addr, BDADDR_ANY))
931 settings |= MGMT_SETTING_STATIC_ADDRESS;
932 }
933
934 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
935 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
936
937 if (cis_central_enabled(hdev))
938 settings |= MGMT_SETTING_CIS_CENTRAL;
939
940 if (cis_peripheral_enabled(hdev))
941 settings |= MGMT_SETTING_CIS_PERIPHERAL;
942
943 if (bis_enabled(hdev))
944 settings |= MGMT_SETTING_ISO_BROADCASTER;
945
946 if (sync_recv_enabled(hdev))
947 settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
948
949 if (ll_privacy_enabled(hdev))
950 settings |= MGMT_SETTING_LL_PRIVACY;
951
952 if (past_sender_enabled(hdev))
953 settings |= MGMT_SETTING_PAST_SENDER;
954
955 if (past_receiver_enabled(hdev))
956 settings |= MGMT_SETTING_PAST_RECEIVER;
957
958 return settings;
959 }
960
pending_find(u16 opcode,struct hci_dev * hdev)961 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
962 {
963 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
964 }
965
mgmt_get_adv_discov_flags(struct hci_dev * hdev)966 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
967 {
968 struct mgmt_pending_cmd *cmd;
969
970 /* If there's a pending mgmt command the flags will not yet have
971 * their final values, so check for this first.
972 */
973 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
974 if (cmd) {
975 struct mgmt_mode *cp = cmd->param;
976 if (cp->val == 0x01)
977 return LE_AD_GENERAL;
978 else if (cp->val == 0x02)
979 return LE_AD_LIMITED;
980 } else {
981 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
982 return LE_AD_LIMITED;
983 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
984 return LE_AD_GENERAL;
985 }
986
987 return 0;
988 }
989
mgmt_get_connectable(struct hci_dev * hdev)990 bool mgmt_get_connectable(struct hci_dev *hdev)
991 {
992 struct mgmt_pending_cmd *cmd;
993
994 /* If there's a pending mgmt command the flag will not yet have
995 * it's final value, so check for this first.
996 */
997 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
998 if (cmd) {
999 struct mgmt_mode *cp = cmd->param;
1000
1001 return cp->val;
1002 }
1003
1004 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
1005 }
1006
service_cache_sync(struct hci_dev * hdev,void * data)1007 static int service_cache_sync(struct hci_dev *hdev, void *data)
1008 {
1009 hci_update_eir_sync(hdev);
1010 hci_update_class_sync(hdev);
1011
1012 return 0;
1013 }
1014
service_cache_off(struct work_struct * work)1015 static void service_cache_off(struct work_struct *work)
1016 {
1017 struct hci_dev *hdev = container_of(work, struct hci_dev,
1018 service_cache.work);
1019
1020 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1021 return;
1022
1023 hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1024 }
1025
rpa_expired_sync(struct hci_dev * hdev,void * data)1026 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1027 {
1028 /* The generation of a new RPA and programming it into the
1029 * controller happens in the hci_req_enable_advertising()
1030 * function.
1031 */
1032 if (ext_adv_capable(hdev))
1033 return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1034 else
1035 return hci_enable_advertising_sync(hdev);
1036 }
1037
rpa_expired(struct work_struct * work)1038 static void rpa_expired(struct work_struct *work)
1039 {
1040 struct hci_dev *hdev = container_of(work, struct hci_dev,
1041 rpa_expired.work);
1042
1043 bt_dev_dbg(hdev, "");
1044
1045 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1046
1047 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1048 return;
1049
1050 hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1051 }
1052
1053 static int set_discoverable_sync(struct hci_dev *hdev, void *data);
1054
discov_off(struct work_struct * work)1055 static void discov_off(struct work_struct *work)
1056 {
1057 struct hci_dev *hdev = container_of(work, struct hci_dev,
1058 discov_off.work);
1059
1060 bt_dev_dbg(hdev, "");
1061
1062 hci_dev_lock(hdev);
1063
1064 /* When discoverable timeout triggers, then just make sure
1065 * the limited discoverable flag is cleared. Even in the case
1066 * of a timeout triggered from general discoverable, it is
1067 * safe to unconditionally clear the flag.
1068 */
1069 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1070 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1071 hdev->discov_timeout = 0;
1072
1073 hci_cmd_sync_queue(hdev, set_discoverable_sync, NULL, NULL);
1074
1075 mgmt_new_settings(hdev);
1076
1077 hci_dev_unlock(hdev);
1078 }
1079
1080 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1081
mesh_send_complete(struct hci_dev * hdev,struct mgmt_mesh_tx * mesh_tx,bool silent)1082 static void mesh_send_complete(struct hci_dev *hdev,
1083 struct mgmt_mesh_tx *mesh_tx, bool silent)
1084 {
1085 u8 handle = mesh_tx->handle;
1086
1087 if (!silent)
1088 mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1089 sizeof(handle), NULL);
1090
1091 mgmt_mesh_remove(mesh_tx);
1092 }
1093
mesh_send_done_sync(struct hci_dev * hdev,void * data)1094 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1095 {
1096 struct mgmt_mesh_tx *mesh_tx;
1097
1098 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1099 if (list_empty(&hdev->adv_instances))
1100 hci_disable_advertising_sync(hdev);
1101 mesh_tx = mgmt_mesh_next(hdev, NULL);
1102
1103 if (mesh_tx)
1104 mesh_send_complete(hdev, mesh_tx, false);
1105
1106 return 0;
1107 }
1108
1109 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1110 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
mesh_next(struct hci_dev * hdev,void * data,int err)1111 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1112 {
1113 struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1114
1115 if (!mesh_tx)
1116 return;
1117
1118 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1119 mesh_send_start_complete);
1120
1121 if (err < 0)
1122 mesh_send_complete(hdev, mesh_tx, false);
1123 else
1124 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1125 }
1126
mesh_send_done(struct work_struct * work)1127 static void mesh_send_done(struct work_struct *work)
1128 {
1129 struct hci_dev *hdev = container_of(work, struct hci_dev,
1130 mesh_send_done.work);
1131
1132 if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1133 return;
1134
1135 hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1136 }
1137
mgmt_init_hdev(struct sock * sk,struct hci_dev * hdev)1138 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1139 {
1140 if (hci_dev_test_flag(hdev, HCI_MGMT))
1141 return;
1142
1143 BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1144
1145 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1146 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1147 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1148 INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1149
1150 /* Non-mgmt controlled devices get this bit set
1151 * implicitly so that pairing works for them, however
1152 * for mgmt we require user-space to explicitly enable
1153 * it
1154 */
1155 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1156
1157 hci_dev_set_flag(hdev, HCI_MGMT);
1158 }
1159
read_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1160 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1161 void *data, u16 data_len)
1162 {
1163 struct mgmt_rp_read_info rp;
1164
1165 bt_dev_dbg(hdev, "sock %p", sk);
1166
1167 hci_dev_lock(hdev);
1168
1169 memset(&rp, 0, sizeof(rp));
1170
1171 bacpy(&rp.bdaddr, &hdev->bdaddr);
1172
1173 rp.version = hdev->hci_ver;
1174 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1175
1176 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1177 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1178
1179 memcpy(rp.dev_class, hdev->dev_class, 3);
1180
1181 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1182 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1183
1184 hci_dev_unlock(hdev);
1185
1186 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1187 sizeof(rp));
1188 }
1189
append_eir_data_to_buf(struct hci_dev * hdev,u8 * eir)1190 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1191 {
1192 u16 eir_len = 0;
1193 size_t name_len;
1194
1195 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1196 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1197 hdev->dev_class, 3);
1198
1199 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1200 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1201 hdev->appearance);
1202
1203 name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1204 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1205 hdev->dev_name, name_len);
1206
1207 name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1208 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1209 hdev->short_name, name_len);
1210
1211 return eir_len;
1212 }
1213
read_ext_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1214 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1215 void *data, u16 data_len)
1216 {
1217 char buf[512];
1218 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1219 u16 eir_len;
1220
1221 bt_dev_dbg(hdev, "sock %p", sk);
1222
1223 memset(&buf, 0, sizeof(buf));
1224
1225 hci_dev_lock(hdev);
1226
1227 bacpy(&rp->bdaddr, &hdev->bdaddr);
1228
1229 rp->version = hdev->hci_ver;
1230 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1231
1232 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1233 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1234
1235
1236 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1237 rp->eir_len = cpu_to_le16(eir_len);
1238
1239 hci_dev_unlock(hdev);
1240
1241 /* If this command is called at least once, then the events
1242 * for class of device and local name changes are disabled
1243 * and only the new extended controller information event
1244 * is used.
1245 */
1246 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1247 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1248 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1249
1250 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1251 sizeof(*rp) + eir_len);
1252 }
1253
ext_info_changed(struct hci_dev * hdev,struct sock * skip)1254 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1255 {
1256 char buf[512];
1257 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1258 u16 eir_len;
1259
1260 memset(buf, 0, sizeof(buf));
1261
1262 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1263 ev->eir_len = cpu_to_le16(eir_len);
1264
1265 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1266 sizeof(*ev) + eir_len,
1267 HCI_MGMT_EXT_INFO_EVENTS, skip);
1268 }
1269
send_settings_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)1270 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1271 {
1272 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1273
1274 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1275 sizeof(settings));
1276 }
1277
mgmt_advertising_added(struct sock * sk,struct hci_dev * hdev,u8 instance)1278 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1279 {
1280 struct mgmt_ev_advertising_added ev;
1281
1282 ev.instance = instance;
1283
1284 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1285 }
1286
mgmt_advertising_removed(struct sock * sk,struct hci_dev * hdev,u8 instance)1287 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1288 u8 instance)
1289 {
1290 struct mgmt_ev_advertising_removed ev;
1291
1292 ev.instance = instance;
1293
1294 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1295 }
1296
cancel_adv_timeout(struct hci_dev * hdev)1297 static void cancel_adv_timeout(struct hci_dev *hdev)
1298 {
1299 if (hdev->adv_instance_timeout) {
1300 hdev->adv_instance_timeout = 0;
1301 cancel_delayed_work(&hdev->adv_instance_expire);
1302 }
1303 }
1304
1305 /* This function requires the caller holds hdev->lock */
restart_le_actions(struct hci_dev * hdev)1306 static void restart_le_actions(struct hci_dev *hdev)
1307 {
1308 struct hci_conn_params *p;
1309
1310 list_for_each_entry(p, &hdev->le_conn_params, list) {
1311 /* Needed for AUTO_OFF case where might not "really"
1312 * have been powered off.
1313 */
1314 hci_pend_le_list_del_init(p);
1315
1316 switch (p->auto_connect) {
1317 case HCI_AUTO_CONN_DIRECT:
1318 case HCI_AUTO_CONN_ALWAYS:
1319 hci_pend_le_list_add(p, &hdev->pend_le_conns);
1320 break;
1321 case HCI_AUTO_CONN_REPORT:
1322 hci_pend_le_list_add(p, &hdev->pend_le_reports);
1323 break;
1324 default:
1325 break;
1326 }
1327 }
1328 }
1329
new_settings(struct hci_dev * hdev,struct sock * skip)1330 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1331 {
1332 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1333
1334 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1335 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1336 }
1337
mgmt_set_powered_complete(struct hci_dev * hdev,void * data,int err)1338 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1339 {
1340 struct mgmt_pending_cmd *cmd = data;
1341 struct mgmt_mode *cp;
1342
1343 /* Make sure cmd still outstanding. */
1344 if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
1345 return;
1346
1347 cp = cmd->param;
1348
1349 bt_dev_dbg(hdev, "err %d", err);
1350
1351 if (!err) {
1352 if (cp->val) {
1353 hci_dev_lock(hdev);
1354 restart_le_actions(hdev);
1355 hci_update_passive_scan(hdev);
1356 hci_dev_unlock(hdev);
1357 }
1358
1359 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1360
1361 /* Only call new_setting for power on as power off is deferred
1362 * to hdev->power_off work which does call hci_dev_do_close.
1363 */
1364 if (cp->val)
1365 new_settings(hdev, cmd->sk);
1366 } else {
1367 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1368 mgmt_status(err));
1369 }
1370
1371 mgmt_pending_free(cmd);
1372 }
1373
set_powered_sync(struct hci_dev * hdev,void * data)1374 static int set_powered_sync(struct hci_dev *hdev, void *data)
1375 {
1376 struct mgmt_pending_cmd *cmd = data;
1377 struct mgmt_mode cp;
1378
1379 mutex_lock(&hdev->mgmt_pending_lock);
1380
1381 /* Make sure cmd still outstanding. */
1382 if (!__mgmt_pending_listed(hdev, cmd)) {
1383 mutex_unlock(&hdev->mgmt_pending_lock);
1384 return -ECANCELED;
1385 }
1386
1387 memcpy(&cp, cmd->param, sizeof(cp));
1388
1389 mutex_unlock(&hdev->mgmt_pending_lock);
1390
1391 BT_DBG("%s", hdev->name);
1392
1393 return hci_set_powered_sync(hdev, cp.val);
1394 }
1395
set_powered(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1396 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1397 u16 len)
1398 {
1399 struct mgmt_mode *cp = data;
1400 struct mgmt_pending_cmd *cmd;
1401 int err;
1402
1403 bt_dev_dbg(hdev, "sock %p", sk);
1404
1405 if (cp->val != 0x00 && cp->val != 0x01)
1406 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1407 MGMT_STATUS_INVALID_PARAMS);
1408
1409 hci_dev_lock(hdev);
1410
1411 if (!cp->val) {
1412 if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN)) {
1413 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1414 MGMT_STATUS_BUSY);
1415 goto failed;
1416 }
1417 }
1418
1419 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1420 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1421 MGMT_STATUS_BUSY);
1422 goto failed;
1423 }
1424
1425 if (!!cp->val == hdev_is_powered(hdev)) {
1426 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1427 goto failed;
1428 }
1429
1430 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1431 if (!cmd) {
1432 err = -ENOMEM;
1433 goto failed;
1434 }
1435
1436 /* Cancel potentially blocking sync operation before power off */
1437 if (cp->val == 0x00) {
1438 hci_cmd_sync_cancel_sync(hdev, -EHOSTDOWN);
1439 err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1440 mgmt_set_powered_complete);
1441 } else {
1442 /* Use hci_cmd_sync_submit since hdev might not be running */
1443 err = hci_cmd_sync_submit(hdev, set_powered_sync, cmd,
1444 mgmt_set_powered_complete);
1445 }
1446
1447 if (err < 0)
1448 mgmt_pending_remove(cmd);
1449
1450 failed:
1451 hci_dev_unlock(hdev);
1452 return err;
1453 }
1454
mgmt_new_settings(struct hci_dev * hdev)1455 int mgmt_new_settings(struct hci_dev *hdev)
1456 {
1457 return new_settings(hdev, NULL);
1458 }
1459
1460 struct cmd_lookup {
1461 struct sock *sk;
1462 struct hci_dev *hdev;
1463 u8 mgmt_status;
1464 };
1465
settings_rsp(struct mgmt_pending_cmd * cmd,void * data)1466 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1467 {
1468 struct cmd_lookup *match = data;
1469
1470 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1471
1472 if (match->sk == NULL) {
1473 match->sk = cmd->sk;
1474 sock_hold(match->sk);
1475 }
1476 }
1477
cmd_status_rsp(struct mgmt_pending_cmd * cmd,void * data)1478 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1479 {
1480 u8 *status = data;
1481
1482 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, *status);
1483 }
1484
cmd_complete_rsp(struct mgmt_pending_cmd * cmd,void * data)1485 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1486 {
1487 struct cmd_lookup *match = data;
1488
1489 /* dequeue cmd_sync entries using cmd as data as that is about to be
1490 * removed/freed.
1491 */
1492 hci_cmd_sync_dequeue(match->hdev, NULL, cmd, NULL);
1493
1494 if (cmd->cmd_complete) {
1495 cmd->cmd_complete(cmd, match->mgmt_status);
1496 return;
1497 }
1498
1499 cmd_status_rsp(cmd, data);
1500 }
1501
generic_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1502 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1503 {
1504 return mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, status,
1505 cmd->param, cmd->param_len);
1506 }
1507
addr_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1508 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1509 {
1510 return mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, status,
1511 cmd->param, sizeof(struct mgmt_addr_info));
1512 }
1513
mgmt_bredr_support(struct hci_dev * hdev)1514 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1515 {
1516 if (!lmp_bredr_capable(hdev))
1517 return MGMT_STATUS_NOT_SUPPORTED;
1518 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1519 return MGMT_STATUS_REJECTED;
1520 else
1521 return MGMT_STATUS_SUCCESS;
1522 }
1523
mgmt_le_support(struct hci_dev * hdev)1524 static u8 mgmt_le_support(struct hci_dev *hdev)
1525 {
1526 if (!lmp_le_capable(hdev))
1527 return MGMT_STATUS_NOT_SUPPORTED;
1528 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1529 return MGMT_STATUS_REJECTED;
1530 else
1531 return MGMT_STATUS_SUCCESS;
1532 }
1533
mgmt_set_discoverable_complete(struct hci_dev * hdev,void * data,int err)1534 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1535 int err)
1536 {
1537 struct mgmt_pending_cmd *cmd = data;
1538
1539 bt_dev_dbg(hdev, "err %d", err);
1540
1541 /* Make sure cmd still outstanding. */
1542 if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
1543 return;
1544
1545 hci_dev_lock(hdev);
1546
1547 if (err) {
1548 u8 mgmt_err = mgmt_status(err);
1549 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
1550 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1551 goto done;
1552 }
1553
1554 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1555 hdev->discov_timeout > 0) {
1556 int to = secs_to_jiffies(hdev->discov_timeout);
1557 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1558 }
1559
1560 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1561 new_settings(hdev, cmd->sk);
1562
1563 done:
1564 mgmt_pending_free(cmd);
1565 hci_dev_unlock(hdev);
1566 }
1567
set_discoverable_sync(struct hci_dev * hdev,void * data)1568 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1569 {
1570 if (!mgmt_pending_listed(hdev, data))
1571 return -ECANCELED;
1572
1573 BT_DBG("%s", hdev->name);
1574
1575 return hci_update_discoverable_sync(hdev);
1576 }
1577
set_discoverable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1578 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1579 u16 len)
1580 {
1581 struct mgmt_cp_set_discoverable *cp = data;
1582 struct mgmt_pending_cmd *cmd;
1583 u16 timeout;
1584 int err;
1585
1586 bt_dev_dbg(hdev, "sock %p", sk);
1587
1588 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1589 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1590 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1591 MGMT_STATUS_REJECTED);
1592
1593 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1594 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1595 MGMT_STATUS_INVALID_PARAMS);
1596
1597 timeout = __le16_to_cpu(cp->timeout);
1598
1599 /* Disabling discoverable requires that no timeout is set,
1600 * and enabling limited discoverable requires a timeout.
1601 */
1602 if ((cp->val == 0x00 && timeout > 0) ||
1603 (cp->val == 0x02 && timeout == 0))
1604 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1605 MGMT_STATUS_INVALID_PARAMS);
1606
1607 hci_dev_lock(hdev);
1608
1609 if (!hdev_is_powered(hdev) && timeout > 0) {
1610 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1611 MGMT_STATUS_NOT_POWERED);
1612 goto failed;
1613 }
1614
1615 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1616 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1617 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1618 MGMT_STATUS_BUSY);
1619 goto failed;
1620 }
1621
1622 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1623 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1624 MGMT_STATUS_REJECTED);
1625 goto failed;
1626 }
1627
1628 if (hdev->advertising_paused) {
1629 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1630 MGMT_STATUS_BUSY);
1631 goto failed;
1632 }
1633
1634 if (!hdev_is_powered(hdev)) {
1635 bool changed = false;
1636
1637 /* Setting limited discoverable when powered off is
1638 * not a valid operation since it requires a timeout
1639 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1640 */
1641 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1642 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1643 changed = true;
1644 }
1645
1646 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1647 if (err < 0)
1648 goto failed;
1649
1650 if (changed)
1651 err = new_settings(hdev, sk);
1652
1653 goto failed;
1654 }
1655
1656 /* If the current mode is the same, then just update the timeout
1657 * value with the new value. And if only the timeout gets updated,
1658 * then no need for any HCI transactions.
1659 */
1660 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1661 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1662 HCI_LIMITED_DISCOVERABLE)) {
1663 cancel_delayed_work(&hdev->discov_off);
1664 hdev->discov_timeout = timeout;
1665
1666 if (cp->val && hdev->discov_timeout > 0) {
1667 int to = secs_to_jiffies(hdev->discov_timeout);
1668 queue_delayed_work(hdev->req_workqueue,
1669 &hdev->discov_off, to);
1670 }
1671
1672 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1673 goto failed;
1674 }
1675
1676 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1677 if (!cmd) {
1678 err = -ENOMEM;
1679 goto failed;
1680 }
1681
1682 /* Cancel any potential discoverable timeout that might be
1683 * still active and store new timeout value. The arming of
1684 * the timeout happens in the complete handler.
1685 */
1686 cancel_delayed_work(&hdev->discov_off);
1687 hdev->discov_timeout = timeout;
1688
1689 if (cp->val)
1690 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1691 else
1692 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1693
1694 /* Limited discoverable mode */
1695 if (cp->val == 0x02)
1696 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1697 else
1698 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1699
1700 err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1701 mgmt_set_discoverable_complete);
1702
1703 if (err < 0)
1704 mgmt_pending_remove(cmd);
1705
1706 failed:
1707 hci_dev_unlock(hdev);
1708 return err;
1709 }
1710
mgmt_set_connectable_complete(struct hci_dev * hdev,void * data,int err)1711 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1712 int err)
1713 {
1714 struct mgmt_pending_cmd *cmd = data;
1715
1716 bt_dev_dbg(hdev, "err %d", err);
1717
1718 /* Make sure cmd still outstanding. */
1719 if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
1720 return;
1721
1722 hci_dev_lock(hdev);
1723
1724 if (err) {
1725 u8 mgmt_err = mgmt_status(err);
1726 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
1727 goto done;
1728 }
1729
1730 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1731 new_settings(hdev, cmd->sk);
1732
1733 done:
1734 mgmt_pending_free(cmd);
1735
1736 hci_dev_unlock(hdev);
1737 }
1738
set_connectable_update_settings(struct hci_dev * hdev,struct sock * sk,u8 val)1739 static int set_connectable_update_settings(struct hci_dev *hdev,
1740 struct sock *sk, u8 val)
1741 {
1742 bool changed = false;
1743 int err;
1744
1745 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1746 changed = true;
1747
1748 if (val) {
1749 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1750 } else {
1751 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1752 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1753 }
1754
1755 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1756 if (err < 0)
1757 return err;
1758
1759 if (changed) {
1760 hci_update_scan(hdev);
1761 hci_update_passive_scan(hdev);
1762 return new_settings(hdev, sk);
1763 }
1764
1765 return 0;
1766 }
1767
set_connectable_sync(struct hci_dev * hdev,void * data)1768 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1769 {
1770 if (!mgmt_pending_listed(hdev, data))
1771 return -ECANCELED;
1772
1773 BT_DBG("%s", hdev->name);
1774
1775 return hci_update_connectable_sync(hdev);
1776 }
1777
set_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1778 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1779 u16 len)
1780 {
1781 struct mgmt_mode *cp = data;
1782 struct mgmt_pending_cmd *cmd;
1783 int err;
1784
1785 bt_dev_dbg(hdev, "sock %p", sk);
1786
1787 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1788 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1789 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1790 MGMT_STATUS_REJECTED);
1791
1792 if (cp->val != 0x00 && cp->val != 0x01)
1793 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1794 MGMT_STATUS_INVALID_PARAMS);
1795
1796 hci_dev_lock(hdev);
1797
1798 if (!hdev_is_powered(hdev)) {
1799 err = set_connectable_update_settings(hdev, sk, cp->val);
1800 goto failed;
1801 }
1802
1803 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1804 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1805 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1806 MGMT_STATUS_BUSY);
1807 goto failed;
1808 }
1809
1810 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1811 if (!cmd) {
1812 err = -ENOMEM;
1813 goto failed;
1814 }
1815
1816 if (cp->val) {
1817 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1818 } else {
1819 if (hdev->discov_timeout > 0)
1820 cancel_delayed_work(&hdev->discov_off);
1821
1822 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1823 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1824 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1825 }
1826
1827 err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1828 mgmt_set_connectable_complete);
1829
1830 if (err < 0)
1831 mgmt_pending_remove(cmd);
1832
1833 failed:
1834 hci_dev_unlock(hdev);
1835 return err;
1836 }
1837
set_bondable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1838 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1839 u16 len)
1840 {
1841 struct mgmt_mode *cp = data;
1842 bool changed;
1843 int err;
1844
1845 bt_dev_dbg(hdev, "sock %p", sk);
1846
1847 if (cp->val != 0x00 && cp->val != 0x01)
1848 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1849 MGMT_STATUS_INVALID_PARAMS);
1850
1851 hci_dev_lock(hdev);
1852
1853 if (cp->val)
1854 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1855 else
1856 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1857
1858 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1859 if (err < 0)
1860 goto unlock;
1861
1862 if (changed) {
1863 /* In limited privacy mode the change of bondable mode
1864 * may affect the local advertising address.
1865 */
1866 hci_update_discoverable(hdev);
1867
1868 err = new_settings(hdev, sk);
1869 }
1870
1871 unlock:
1872 hci_dev_unlock(hdev);
1873 return err;
1874 }
1875
set_link_security(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1876 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1877 u16 len)
1878 {
1879 struct mgmt_mode *cp = data;
1880 struct mgmt_pending_cmd *cmd;
1881 u8 val, status;
1882 int err;
1883
1884 bt_dev_dbg(hdev, "sock %p", sk);
1885
1886 status = mgmt_bredr_support(hdev);
1887 if (status)
1888 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1889 status);
1890
1891 if (cp->val != 0x00 && cp->val != 0x01)
1892 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1893 MGMT_STATUS_INVALID_PARAMS);
1894
1895 hci_dev_lock(hdev);
1896
1897 if (!hdev_is_powered(hdev)) {
1898 bool changed = false;
1899
1900 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1901 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1902 changed = true;
1903 }
1904
1905 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1906 if (err < 0)
1907 goto failed;
1908
1909 if (changed)
1910 err = new_settings(hdev, sk);
1911
1912 goto failed;
1913 }
1914
1915 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1916 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1917 MGMT_STATUS_BUSY);
1918 goto failed;
1919 }
1920
1921 val = !!cp->val;
1922
1923 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1924 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1925 goto failed;
1926 }
1927
1928 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1929 if (!cmd) {
1930 err = -ENOMEM;
1931 goto failed;
1932 }
1933
1934 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1935 if (err < 0) {
1936 mgmt_pending_remove(cmd);
1937 goto failed;
1938 }
1939
1940 failed:
1941 hci_dev_unlock(hdev);
1942 return err;
1943 }
1944
set_ssp_complete(struct hci_dev * hdev,void * data,int err)1945 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1946 {
1947 struct cmd_lookup match = { NULL, hdev };
1948 struct mgmt_pending_cmd *cmd = data;
1949 struct mgmt_mode *cp;
1950 u8 enable;
1951 bool changed;
1952
1953 /* Make sure cmd still outstanding. */
1954 if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
1955 return;
1956
1957 cp = cmd->param;
1958 enable = cp->val;
1959
1960 if (err) {
1961 u8 mgmt_err = mgmt_status(err);
1962
1963 if (enable && hci_dev_test_and_clear_flag(hdev,
1964 HCI_SSP_ENABLED)) {
1965 new_settings(hdev, NULL);
1966 }
1967
1968 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
1969 return;
1970 }
1971
1972 if (enable) {
1973 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1974 } else {
1975 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1976 }
1977
1978 settings_rsp(cmd, &match);
1979
1980 if (changed)
1981 new_settings(hdev, match.sk);
1982
1983 if (match.sk)
1984 sock_put(match.sk);
1985
1986 hci_update_eir_sync(hdev);
1987 }
1988
set_ssp_sync(struct hci_dev * hdev,void * data)1989 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1990 {
1991 struct mgmt_pending_cmd *cmd = data;
1992 struct mgmt_mode cp;
1993 bool changed = false;
1994 int err;
1995
1996 mutex_lock(&hdev->mgmt_pending_lock);
1997
1998 if (!__mgmt_pending_listed(hdev, cmd)) {
1999 mutex_unlock(&hdev->mgmt_pending_lock);
2000 return -ECANCELED;
2001 }
2002
2003 memcpy(&cp, cmd->param, sizeof(cp));
2004
2005 mutex_unlock(&hdev->mgmt_pending_lock);
2006
2007 if (cp.val)
2008 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
2009
2010 err = hci_write_ssp_mode_sync(hdev, cp.val);
2011
2012 if (!err && changed)
2013 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
2014
2015 return err;
2016 }
2017
set_ssp(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2018 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2019 {
2020 struct mgmt_mode *cp = data;
2021 struct mgmt_pending_cmd *cmd;
2022 u8 status;
2023 int err;
2024
2025 bt_dev_dbg(hdev, "sock %p", sk);
2026
2027 status = mgmt_bredr_support(hdev);
2028 if (status)
2029 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2030
2031 if (!lmp_ssp_capable(hdev))
2032 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2033 MGMT_STATUS_NOT_SUPPORTED);
2034
2035 if (cp->val != 0x00 && cp->val != 0x01)
2036 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2037 MGMT_STATUS_INVALID_PARAMS);
2038
2039 hci_dev_lock(hdev);
2040
2041 if (!hdev_is_powered(hdev)) {
2042 bool changed;
2043
2044 if (cp->val) {
2045 changed = !hci_dev_test_and_set_flag(hdev,
2046 HCI_SSP_ENABLED);
2047 } else {
2048 changed = hci_dev_test_and_clear_flag(hdev,
2049 HCI_SSP_ENABLED);
2050 }
2051
2052 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2053 if (err < 0)
2054 goto failed;
2055
2056 if (changed)
2057 err = new_settings(hdev, sk);
2058
2059 goto failed;
2060 }
2061
2062 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2063 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2064 MGMT_STATUS_BUSY);
2065 goto failed;
2066 }
2067
2068 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2069 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2070 goto failed;
2071 }
2072
2073 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2074 if (!cmd)
2075 err = -ENOMEM;
2076 else
2077 err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2078 set_ssp_complete);
2079
2080 if (err < 0) {
2081 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2082 MGMT_STATUS_FAILED);
2083
2084 if (cmd)
2085 mgmt_pending_remove(cmd);
2086 }
2087
2088 failed:
2089 hci_dev_unlock(hdev);
2090 return err;
2091 }
2092
set_hs(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2093 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2094 {
2095 bt_dev_dbg(hdev, "sock %p", sk);
2096
2097 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2098 MGMT_STATUS_NOT_SUPPORTED);
2099 }
2100
set_le_complete(struct hci_dev * hdev,void * data,int err)2101 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2102 {
2103 struct mgmt_pending_cmd *cmd = data;
2104 struct cmd_lookup match = { NULL, hdev };
2105 u8 status = mgmt_status(err);
2106
2107 bt_dev_dbg(hdev, "err %d", err);
2108
2109 if (err == -ECANCELED || !mgmt_pending_valid(hdev, data))
2110 return;
2111
2112 if (status) {
2113 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, status);
2114 goto done;
2115 }
2116
2117 settings_rsp(cmd, &match);
2118
2119 new_settings(hdev, match.sk);
2120
2121 if (match.sk)
2122 sock_put(match.sk);
2123
2124 done:
2125 mgmt_pending_free(cmd);
2126 }
2127
set_le_sync(struct hci_dev * hdev,void * data)2128 static int set_le_sync(struct hci_dev *hdev, void *data)
2129 {
2130 struct mgmt_pending_cmd *cmd = data;
2131 struct mgmt_mode cp;
2132 u8 val;
2133 int err;
2134
2135 mutex_lock(&hdev->mgmt_pending_lock);
2136
2137 if (!__mgmt_pending_listed(hdev, cmd)) {
2138 mutex_unlock(&hdev->mgmt_pending_lock);
2139 return -ECANCELED;
2140 }
2141
2142 memcpy(&cp, cmd->param, sizeof(cp));
2143 val = !!cp.val;
2144
2145 mutex_unlock(&hdev->mgmt_pending_lock);
2146
2147 if (!val) {
2148 hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2149
2150 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2151 hci_disable_advertising_sync(hdev);
2152
2153 if (ext_adv_capable(hdev))
2154 hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2155 } else {
2156 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2157 }
2158
2159 err = hci_write_le_host_supported_sync(hdev, val, 0);
2160
2161 /* Make sure the controller has a good default for
2162 * advertising data. Restrict the update to when LE
2163 * has actually been enabled. During power on, the
2164 * update in powered_update_hci will take care of it.
2165 */
2166 if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2167 if (ext_adv_capable(hdev)) {
2168 int status;
2169
2170 status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2171 if (!status)
2172 hci_update_scan_rsp_data_sync(hdev, 0x00);
2173 } else {
2174 hci_update_adv_data_sync(hdev, 0x00);
2175 hci_update_scan_rsp_data_sync(hdev, 0x00);
2176 }
2177
2178 hci_update_passive_scan(hdev);
2179 }
2180
2181 return err;
2182 }
2183
set_mesh_complete(struct hci_dev * hdev,void * data,int err)2184 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2185 {
2186 struct mgmt_pending_cmd *cmd = data;
2187 u8 status = mgmt_status(err);
2188 struct sock *sk;
2189
2190 if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
2191 return;
2192
2193 sk = cmd->sk;
2194
2195 if (status) {
2196 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2197 status);
2198 mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev, true,
2199 cmd_status_rsp, &status);
2200 goto done;
2201 }
2202
2203 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2204
2205 done:
2206 mgmt_pending_free(cmd);
2207 }
2208
set_mesh_sync(struct hci_dev * hdev,void * data)2209 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2210 {
2211 struct mgmt_pending_cmd *cmd = data;
2212 DEFINE_FLEX(struct mgmt_cp_set_mesh, cp, ad_types, num_ad_types,
2213 sizeof(hdev->mesh_ad_types));
2214 size_t len;
2215
2216 mutex_lock(&hdev->mgmt_pending_lock);
2217
2218 if (!__mgmt_pending_listed(hdev, cmd)) {
2219 mutex_unlock(&hdev->mgmt_pending_lock);
2220 return -ECANCELED;
2221 }
2222
2223 len = cmd->param_len;
2224 memcpy(cp, cmd->param, min(__struct_size(cp), len));
2225
2226 mutex_unlock(&hdev->mgmt_pending_lock);
2227
2228 memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2229
2230 if (cp->enable)
2231 hci_dev_set_flag(hdev, HCI_MESH);
2232 else
2233 hci_dev_clear_flag(hdev, HCI_MESH);
2234
2235 hdev->le_scan_interval = __le16_to_cpu(cp->period);
2236 hdev->le_scan_window = __le16_to_cpu(cp->window);
2237
2238 len -= sizeof(struct mgmt_cp_set_mesh);
2239
2240 /* If filters don't fit, forward all adv pkts */
2241 if (len <= sizeof(hdev->mesh_ad_types))
2242 memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2243
2244 hci_update_passive_scan_sync(hdev);
2245 return 0;
2246 }
2247
set_mesh(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2248 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2249 {
2250 struct mgmt_cp_set_mesh *cp = data;
2251 struct mgmt_pending_cmd *cmd;
2252 __u16 period, window;
2253 int err = 0;
2254
2255 bt_dev_dbg(hdev, "sock %p", sk);
2256
2257 if (!lmp_le_capable(hdev) ||
2258 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2259 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2260 MGMT_STATUS_NOT_SUPPORTED);
2261
2262 if (cp->enable != 0x00 && cp->enable != 0x01)
2263 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2264 MGMT_STATUS_INVALID_PARAMS);
2265
2266 /* Keep allowed ranges in sync with set_scan_params() */
2267 period = __le16_to_cpu(cp->period);
2268
2269 if (period < 0x0004 || period > 0x4000)
2270 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2271 MGMT_STATUS_INVALID_PARAMS);
2272
2273 window = __le16_to_cpu(cp->window);
2274
2275 if (window < 0x0004 || window > 0x4000)
2276 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2277 MGMT_STATUS_INVALID_PARAMS);
2278
2279 if (window > period)
2280 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2281 MGMT_STATUS_INVALID_PARAMS);
2282
2283 hci_dev_lock(hdev);
2284
2285 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2286 if (!cmd)
2287 err = -ENOMEM;
2288 else
2289 err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2290 set_mesh_complete);
2291
2292 if (err < 0) {
2293 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2294 MGMT_STATUS_FAILED);
2295
2296 if (cmd)
2297 mgmt_pending_remove(cmd);
2298 }
2299
2300 hci_dev_unlock(hdev);
2301 return err;
2302 }
2303
mesh_send_start_complete(struct hci_dev * hdev,void * data,int err)2304 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2305 {
2306 struct mgmt_mesh_tx *mesh_tx = data;
2307 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2308 unsigned long mesh_send_interval;
2309 u8 mgmt_err = mgmt_status(err);
2310
2311 /* Report any errors here, but don't report completion */
2312
2313 if (mgmt_err) {
2314 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2315 /* Send Complete Error Code for handle */
2316 mesh_send_complete(hdev, mesh_tx, false);
2317 return;
2318 }
2319
2320 mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2321 queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2322 mesh_send_interval);
2323 }
2324
mesh_send_sync(struct hci_dev * hdev,void * data)2325 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2326 {
2327 struct mgmt_mesh_tx *mesh_tx = data;
2328 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2329 struct adv_info *adv, *next_instance;
2330 u8 instance = hdev->le_num_of_adv_sets + 1;
2331 u16 timeout, duration;
2332 int err = 0;
2333
2334 if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2335 return MGMT_STATUS_BUSY;
2336
2337 timeout = 1000;
2338 duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2339 adv = hci_add_adv_instance(hdev, instance, 0,
2340 send->adv_data_len, send->adv_data,
2341 0, NULL,
2342 timeout, duration,
2343 HCI_ADV_TX_POWER_NO_PREFERENCE,
2344 hdev->le_adv_min_interval,
2345 hdev->le_adv_max_interval,
2346 mesh_tx->handle);
2347
2348 if (!IS_ERR(adv))
2349 mesh_tx->instance = instance;
2350 else
2351 err = PTR_ERR(adv);
2352
2353 if (hdev->cur_adv_instance == instance) {
2354 /* If the currently advertised instance is being changed then
2355 * cancel the current advertising and schedule the next
2356 * instance. If there is only one instance then the overridden
2357 * advertising data will be visible right away.
2358 */
2359 cancel_adv_timeout(hdev);
2360
2361 next_instance = hci_get_next_instance(hdev, instance);
2362 if (next_instance)
2363 instance = next_instance->instance;
2364 else
2365 instance = 0;
2366 } else if (hdev->adv_instance_timeout) {
2367 /* Immediately advertise the new instance if no other, or
2368 * let it go naturally from queue if ADV is already happening
2369 */
2370 instance = 0;
2371 }
2372
2373 if (instance)
2374 return hci_schedule_adv_instance_sync(hdev, instance, true);
2375
2376 return err;
2377 }
2378
send_count(struct mgmt_mesh_tx * mesh_tx,void * data)2379 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2380 {
2381 struct mgmt_rp_mesh_read_features *rp = data;
2382
2383 if (rp->used_handles >= rp->max_handles)
2384 return;
2385
2386 rp->handles[rp->used_handles++] = mesh_tx->handle;
2387 }
2388
mesh_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2389 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2390 void *data, u16 len)
2391 {
2392 struct mgmt_rp_mesh_read_features rp;
2393
2394 if (!lmp_le_capable(hdev) ||
2395 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2396 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2397 MGMT_STATUS_NOT_SUPPORTED);
2398
2399 memset(&rp, 0, sizeof(rp));
2400 rp.index = cpu_to_le16(hdev->id);
2401 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2402 rp.max_handles = MESH_HANDLES_MAX;
2403
2404 hci_dev_lock(hdev);
2405
2406 if (rp.max_handles)
2407 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2408
2409 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2410 rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2411
2412 hci_dev_unlock(hdev);
2413 return 0;
2414 }
2415
send_cancel(struct hci_dev * hdev,void * data)2416 static int send_cancel(struct hci_dev *hdev, void *data)
2417 {
2418 struct mgmt_pending_cmd *cmd = data;
2419 struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2420 struct mgmt_mesh_tx *mesh_tx;
2421
2422 if (!cancel->handle) {
2423 do {
2424 mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2425
2426 if (mesh_tx)
2427 mesh_send_complete(hdev, mesh_tx, false);
2428 } while (mesh_tx);
2429 } else {
2430 mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2431
2432 if (mesh_tx && mesh_tx->sk == cmd->sk)
2433 mesh_send_complete(hdev, mesh_tx, false);
2434 }
2435
2436 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2437 0, NULL, 0);
2438 mgmt_pending_free(cmd);
2439
2440 return 0;
2441 }
2442
mesh_send_cancel(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2443 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2444 void *data, u16 len)
2445 {
2446 struct mgmt_pending_cmd *cmd;
2447 int err;
2448
2449 if (!lmp_le_capable(hdev) ||
2450 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2451 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2452 MGMT_STATUS_NOT_SUPPORTED);
2453
2454 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2455 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2456 MGMT_STATUS_REJECTED);
2457
2458 hci_dev_lock(hdev);
2459 cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2460 if (!cmd)
2461 err = -ENOMEM;
2462 else
2463 err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2464
2465 if (err < 0) {
2466 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2467 MGMT_STATUS_FAILED);
2468
2469 if (cmd)
2470 mgmt_pending_free(cmd);
2471 }
2472
2473 hci_dev_unlock(hdev);
2474 return err;
2475 }
2476
mesh_send(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2477 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2478 {
2479 struct mgmt_mesh_tx *mesh_tx;
2480 struct mgmt_cp_mesh_send *send = data;
2481 struct mgmt_rp_mesh_read_features rp;
2482 bool sending;
2483 int err = 0;
2484
2485 if (!lmp_le_capable(hdev) ||
2486 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2487 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2488 MGMT_STATUS_NOT_SUPPORTED);
2489 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2490 len <= MGMT_MESH_SEND_SIZE ||
2491 len > (MGMT_MESH_SEND_SIZE + 31))
2492 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2493 MGMT_STATUS_REJECTED);
2494
2495 hci_dev_lock(hdev);
2496
2497 memset(&rp, 0, sizeof(rp));
2498 rp.max_handles = MESH_HANDLES_MAX;
2499
2500 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2501
2502 if (rp.max_handles <= rp.used_handles) {
2503 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2504 MGMT_STATUS_BUSY);
2505 goto done;
2506 }
2507
2508 sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2509 mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2510
2511 if (!mesh_tx)
2512 err = -ENOMEM;
2513 else if (!sending)
2514 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2515 mesh_send_start_complete);
2516
2517 if (err < 0) {
2518 bt_dev_err(hdev, "Send Mesh Failed %d", err);
2519 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2520 MGMT_STATUS_FAILED);
2521
2522 if (mesh_tx) {
2523 if (sending)
2524 mgmt_mesh_remove(mesh_tx);
2525 }
2526 } else {
2527 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2528
2529 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2530 &mesh_tx->handle, 1);
2531 }
2532
2533 done:
2534 hci_dev_unlock(hdev);
2535 return err;
2536 }
2537
set_le(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2538 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2539 {
2540 struct mgmt_mode *cp = data;
2541 struct mgmt_pending_cmd *cmd;
2542 int err;
2543 u8 val, enabled;
2544
2545 bt_dev_dbg(hdev, "sock %p", sk);
2546
2547 if (!lmp_le_capable(hdev))
2548 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2549 MGMT_STATUS_NOT_SUPPORTED);
2550
2551 if (cp->val != 0x00 && cp->val != 0x01)
2552 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2553 MGMT_STATUS_INVALID_PARAMS);
2554
2555 /* Bluetooth single mode LE only controllers or dual-mode
2556 * controllers configured as LE only devices, do not allow
2557 * switching LE off. These have either LE enabled explicitly
2558 * or BR/EDR has been previously switched off.
2559 *
2560 * When trying to enable an already enabled LE, then gracefully
2561 * send a positive response. Trying to disable it however will
2562 * result into rejection.
2563 */
2564 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2565 if (cp->val == 0x01)
2566 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2567
2568 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2569 MGMT_STATUS_REJECTED);
2570 }
2571
2572 hci_dev_lock(hdev);
2573
2574 val = !!cp->val;
2575 enabled = lmp_host_le_capable(hdev);
2576
2577 if (!hdev_is_powered(hdev) || val == enabled) {
2578 bool changed = false;
2579
2580 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2581 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2582 changed = true;
2583 }
2584
2585 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2586 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2587 changed = true;
2588 }
2589
2590 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2591 if (err < 0)
2592 goto unlock;
2593
2594 if (changed)
2595 err = new_settings(hdev, sk);
2596
2597 goto unlock;
2598 }
2599
2600 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2601 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2602 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2603 MGMT_STATUS_BUSY);
2604 goto unlock;
2605 }
2606
2607 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2608 if (!cmd)
2609 err = -ENOMEM;
2610 else
2611 err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2612 set_le_complete);
2613
2614 if (err < 0) {
2615 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2616 MGMT_STATUS_FAILED);
2617
2618 if (cmd)
2619 mgmt_pending_remove(cmd);
2620 }
2621
2622 unlock:
2623 hci_dev_unlock(hdev);
2624 return err;
2625 }
2626
send_hci_cmd_sync(struct hci_dev * hdev,void * data)2627 static int send_hci_cmd_sync(struct hci_dev *hdev, void *data)
2628 {
2629 struct mgmt_pending_cmd *cmd = data;
2630 struct mgmt_cp_hci_cmd_sync *cp = cmd->param;
2631 struct sk_buff *skb;
2632
2633 skb = __hci_cmd_sync_ev(hdev, le16_to_cpu(cp->opcode),
2634 le16_to_cpu(cp->params_len), cp->params,
2635 cp->event, cp->timeout ?
2636 secs_to_jiffies(cp->timeout) :
2637 HCI_CMD_TIMEOUT);
2638 if (IS_ERR(skb)) {
2639 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
2640 mgmt_status(PTR_ERR(skb)));
2641 goto done;
2642 }
2643
2644 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_HCI_CMD_SYNC, 0,
2645 skb->data, skb->len);
2646
2647 kfree_skb(skb);
2648
2649 done:
2650 mgmt_pending_free(cmd);
2651
2652 return 0;
2653 }
2654
mgmt_hci_cmd_sync(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2655 static int mgmt_hci_cmd_sync(struct sock *sk, struct hci_dev *hdev,
2656 void *data, u16 len)
2657 {
2658 struct mgmt_cp_hci_cmd_sync *cp = data;
2659 struct mgmt_pending_cmd *cmd;
2660 int err;
2661
2662 if (len != (offsetof(struct mgmt_cp_hci_cmd_sync, params) +
2663 le16_to_cpu(cp->params_len)))
2664 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
2665 MGMT_STATUS_INVALID_PARAMS);
2666
2667 hci_dev_lock(hdev);
2668 cmd = mgmt_pending_new(sk, MGMT_OP_HCI_CMD_SYNC, hdev, data, len);
2669 if (!cmd)
2670 err = -ENOMEM;
2671 else
2672 err = hci_cmd_sync_queue(hdev, send_hci_cmd_sync, cmd, NULL);
2673
2674 if (err < 0) {
2675 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
2676 MGMT_STATUS_FAILED);
2677
2678 if (cmd)
2679 mgmt_pending_free(cmd);
2680 }
2681
2682 hci_dev_unlock(hdev);
2683 return err;
2684 }
2685
2686 /* This is a helper function to test for pending mgmt commands that can
2687 * cause CoD or EIR HCI commands. We can only allow one such pending
2688 * mgmt command at a time since otherwise we cannot easily track what
2689 * the current values are, will be, and based on that calculate if a new
2690 * HCI command needs to be sent and if yes with what value.
2691 */
pending_eir_or_class(struct hci_dev * hdev)2692 static bool pending_eir_or_class(struct hci_dev *hdev)
2693 {
2694 struct mgmt_pending_cmd *cmd;
2695
2696 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2697 switch (cmd->opcode) {
2698 case MGMT_OP_ADD_UUID:
2699 case MGMT_OP_REMOVE_UUID:
2700 case MGMT_OP_SET_DEV_CLASS:
2701 case MGMT_OP_SET_POWERED:
2702 return true;
2703 }
2704 }
2705
2706 return false;
2707 }
2708
2709 static const u8 bluetooth_base_uuid[] = {
2710 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2711 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2712 };
2713
get_uuid_size(const u8 * uuid)2714 static u8 get_uuid_size(const u8 *uuid)
2715 {
2716 u32 val;
2717
2718 if (memcmp(uuid, bluetooth_base_uuid, 12))
2719 return 128;
2720
2721 val = get_unaligned_le32(&uuid[12]);
2722 if (val > 0xffff)
2723 return 32;
2724
2725 return 16;
2726 }
2727
mgmt_class_complete(struct hci_dev * hdev,void * data,int err)2728 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2729 {
2730 struct mgmt_pending_cmd *cmd = data;
2731
2732 bt_dev_dbg(hdev, "err %d", err);
2733
2734 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
2735 mgmt_status(err), hdev->dev_class, 3);
2736
2737 mgmt_pending_free(cmd);
2738 }
2739
add_uuid_sync(struct hci_dev * hdev,void * data)2740 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2741 {
2742 int err;
2743
2744 err = hci_update_class_sync(hdev);
2745 if (err)
2746 return err;
2747
2748 return hci_update_eir_sync(hdev);
2749 }
2750
add_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2751 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2752 {
2753 struct mgmt_cp_add_uuid *cp = data;
2754 struct mgmt_pending_cmd *cmd;
2755 struct bt_uuid *uuid;
2756 int err;
2757
2758 bt_dev_dbg(hdev, "sock %p", sk);
2759
2760 hci_dev_lock(hdev);
2761
2762 if (pending_eir_or_class(hdev)) {
2763 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2764 MGMT_STATUS_BUSY);
2765 goto failed;
2766 }
2767
2768 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2769 if (!uuid) {
2770 err = -ENOMEM;
2771 goto failed;
2772 }
2773
2774 memcpy(uuid->uuid, cp->uuid, 16);
2775 uuid->svc_hint = cp->svc_hint;
2776 uuid->size = get_uuid_size(cp->uuid);
2777
2778 list_add_tail(&uuid->list, &hdev->uuids);
2779
2780 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2781 if (!cmd) {
2782 err = -ENOMEM;
2783 goto failed;
2784 }
2785
2786 /* MGMT_OP_ADD_UUID don't require adapter the UP/Running so use
2787 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2788 */
2789 err = hci_cmd_sync_submit(hdev, add_uuid_sync, cmd,
2790 mgmt_class_complete);
2791 if (err < 0) {
2792 mgmt_pending_free(cmd);
2793 goto failed;
2794 }
2795
2796 failed:
2797 hci_dev_unlock(hdev);
2798 return err;
2799 }
2800
enable_service_cache(struct hci_dev * hdev)2801 static bool enable_service_cache(struct hci_dev *hdev)
2802 {
2803 if (!hdev_is_powered(hdev))
2804 return false;
2805
2806 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2807 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2808 CACHE_TIMEOUT);
2809 return true;
2810 }
2811
2812 return false;
2813 }
2814
remove_uuid_sync(struct hci_dev * hdev,void * data)2815 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2816 {
2817 int err;
2818
2819 err = hci_update_class_sync(hdev);
2820 if (err)
2821 return err;
2822
2823 return hci_update_eir_sync(hdev);
2824 }
2825
remove_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2826 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2827 u16 len)
2828 {
2829 struct mgmt_cp_remove_uuid *cp = data;
2830 struct mgmt_pending_cmd *cmd;
2831 struct bt_uuid *match, *tmp;
2832 static const u8 bt_uuid_any[] = {
2833 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2834 };
2835 int err, found;
2836
2837 bt_dev_dbg(hdev, "sock %p", sk);
2838
2839 hci_dev_lock(hdev);
2840
2841 if (pending_eir_or_class(hdev)) {
2842 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2843 MGMT_STATUS_BUSY);
2844 goto unlock;
2845 }
2846
2847 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2848 hci_uuids_clear(hdev);
2849
2850 if (enable_service_cache(hdev)) {
2851 err = mgmt_cmd_complete(sk, hdev->id,
2852 MGMT_OP_REMOVE_UUID,
2853 0, hdev->dev_class, 3);
2854 goto unlock;
2855 }
2856
2857 goto update_class;
2858 }
2859
2860 found = 0;
2861
2862 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2863 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2864 continue;
2865
2866 list_del(&match->list);
2867 kfree(match);
2868 found++;
2869 }
2870
2871 if (found == 0) {
2872 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2873 MGMT_STATUS_INVALID_PARAMS);
2874 goto unlock;
2875 }
2876
2877 update_class:
2878 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2879 if (!cmd) {
2880 err = -ENOMEM;
2881 goto unlock;
2882 }
2883
2884 /* MGMT_OP_REMOVE_UUID don't require adapter the UP/Running so use
2885 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2886 */
2887 err = hci_cmd_sync_submit(hdev, remove_uuid_sync, cmd,
2888 mgmt_class_complete);
2889 if (err < 0)
2890 mgmt_pending_free(cmd);
2891
2892 unlock:
2893 hci_dev_unlock(hdev);
2894 return err;
2895 }
2896
set_class_sync(struct hci_dev * hdev,void * data)2897 static int set_class_sync(struct hci_dev *hdev, void *data)
2898 {
2899 int err = 0;
2900
2901 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2902 cancel_delayed_work_sync(&hdev->service_cache);
2903 err = hci_update_eir_sync(hdev);
2904 }
2905
2906 if (err)
2907 return err;
2908
2909 return hci_update_class_sync(hdev);
2910 }
2911
set_dev_class(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2912 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2913 u16 len)
2914 {
2915 struct mgmt_cp_set_dev_class *cp = data;
2916 struct mgmt_pending_cmd *cmd;
2917 int err;
2918
2919 bt_dev_dbg(hdev, "sock %p", sk);
2920
2921 if (!lmp_bredr_capable(hdev))
2922 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2923 MGMT_STATUS_NOT_SUPPORTED);
2924
2925 hci_dev_lock(hdev);
2926
2927 if (pending_eir_or_class(hdev)) {
2928 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2929 MGMT_STATUS_BUSY);
2930 goto unlock;
2931 }
2932
2933 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2934 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2935 MGMT_STATUS_INVALID_PARAMS);
2936 goto unlock;
2937 }
2938
2939 hdev->major_class = cp->major;
2940 hdev->minor_class = cp->minor;
2941
2942 if (!hdev_is_powered(hdev)) {
2943 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2944 hdev->dev_class, 3);
2945 goto unlock;
2946 }
2947
2948 cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2949 if (!cmd) {
2950 err = -ENOMEM;
2951 goto unlock;
2952 }
2953
2954 /* MGMT_OP_SET_DEV_CLASS don't require adapter the UP/Running so use
2955 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2956 */
2957 err = hci_cmd_sync_submit(hdev, set_class_sync, cmd,
2958 mgmt_class_complete);
2959 if (err < 0)
2960 mgmt_pending_free(cmd);
2961
2962 unlock:
2963 hci_dev_unlock(hdev);
2964 return err;
2965 }
2966
load_link_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2967 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2968 u16 len)
2969 {
2970 struct mgmt_cp_load_link_keys *cp = data;
2971 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2972 sizeof(struct mgmt_link_key_info));
2973 u16 key_count, expected_len;
2974 bool changed;
2975 int i;
2976
2977 bt_dev_dbg(hdev, "sock %p", sk);
2978
2979 if (!lmp_bredr_capable(hdev))
2980 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2981 MGMT_STATUS_NOT_SUPPORTED);
2982
2983 key_count = __le16_to_cpu(cp->key_count);
2984 if (key_count > max_key_count) {
2985 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2986 key_count);
2987 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2988 MGMT_STATUS_INVALID_PARAMS);
2989 }
2990
2991 expected_len = struct_size(cp, keys, key_count);
2992 if (expected_len != len) {
2993 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2994 expected_len, len);
2995 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2996 MGMT_STATUS_INVALID_PARAMS);
2997 }
2998
2999 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
3000 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
3001 MGMT_STATUS_INVALID_PARAMS);
3002
3003 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
3004 key_count);
3005
3006 hci_dev_lock(hdev);
3007
3008 hci_link_keys_clear(hdev);
3009
3010 if (cp->debug_keys)
3011 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
3012 else
3013 changed = hci_dev_test_and_clear_flag(hdev,
3014 HCI_KEEP_DEBUG_KEYS);
3015
3016 if (changed)
3017 new_settings(hdev, NULL);
3018
3019 for (i = 0; i < key_count; i++) {
3020 struct mgmt_link_key_info *key = &cp->keys[i];
3021
3022 if (hci_is_blocked_key(hdev,
3023 HCI_BLOCKED_KEY_TYPE_LINKKEY,
3024 key->val)) {
3025 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
3026 &key->addr.bdaddr);
3027 continue;
3028 }
3029
3030 if (key->addr.type != BDADDR_BREDR) {
3031 bt_dev_warn(hdev,
3032 "Invalid link address type %u for %pMR",
3033 key->addr.type, &key->addr.bdaddr);
3034 continue;
3035 }
3036
3037 if (key->type > 0x08) {
3038 bt_dev_warn(hdev, "Invalid link key type %u for %pMR",
3039 key->type, &key->addr.bdaddr);
3040 continue;
3041 }
3042
3043 /* Always ignore debug keys and require a new pairing if
3044 * the user wants to use them.
3045 */
3046 if (key->type == HCI_LK_DEBUG_COMBINATION)
3047 continue;
3048
3049 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
3050 key->type, key->pin_len, NULL);
3051 }
3052
3053 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
3054
3055 hci_dev_unlock(hdev);
3056
3057 return 0;
3058 }
3059
device_unpaired(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,struct sock * skip_sk)3060 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
3061 u8 addr_type, struct sock *skip_sk)
3062 {
3063 struct mgmt_ev_device_unpaired ev;
3064
3065 bacpy(&ev.addr.bdaddr, bdaddr);
3066 ev.addr.type = addr_type;
3067
3068 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
3069 skip_sk);
3070 }
3071
unpair_device_complete(struct hci_dev * hdev,void * data,int err)3072 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
3073 {
3074 struct mgmt_pending_cmd *cmd = data;
3075 struct mgmt_cp_unpair_device *cp = cmd->param;
3076
3077 if (!err)
3078 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
3079
3080 cmd->cmd_complete(cmd, err);
3081 mgmt_pending_free(cmd);
3082 }
3083
unpair_device_sync(struct hci_dev * hdev,void * data)3084 static int unpair_device_sync(struct hci_dev *hdev, void *data)
3085 {
3086 struct mgmt_pending_cmd *cmd = data;
3087 struct mgmt_cp_unpair_device *cp = cmd->param;
3088 struct hci_conn *conn;
3089
3090 if (cp->addr.type == BDADDR_BREDR)
3091 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3092 &cp->addr.bdaddr);
3093 else
3094 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3095 le_addr_type(cp->addr.type));
3096
3097 if (!conn)
3098 return 0;
3099
3100 /* Disregard any possible error since the likes of hci_abort_conn_sync
3101 * will clean up the connection no matter the error.
3102 */
3103 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3104
3105 return 0;
3106 }
3107
unpair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3108 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3109 u16 len)
3110 {
3111 struct mgmt_cp_unpair_device *cp = data;
3112 struct mgmt_rp_unpair_device rp;
3113 struct hci_conn_params *params;
3114 struct mgmt_pending_cmd *cmd;
3115 struct hci_conn *conn;
3116 u8 addr_type;
3117 int err;
3118
3119 memset(&rp, 0, sizeof(rp));
3120 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3121 rp.addr.type = cp->addr.type;
3122
3123 if (!bdaddr_type_is_valid(cp->addr.type))
3124 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3125 MGMT_STATUS_INVALID_PARAMS,
3126 &rp, sizeof(rp));
3127
3128 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
3129 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3130 MGMT_STATUS_INVALID_PARAMS,
3131 &rp, sizeof(rp));
3132
3133 hci_dev_lock(hdev);
3134
3135 if (!hdev_is_powered(hdev)) {
3136 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3137 MGMT_STATUS_NOT_POWERED, &rp,
3138 sizeof(rp));
3139 goto unlock;
3140 }
3141
3142 if (cp->addr.type == BDADDR_BREDR) {
3143 /* If disconnection is requested, then look up the
3144 * connection. If the remote device is connected, it
3145 * will be later used to terminate the link.
3146 *
3147 * Setting it to NULL explicitly will cause no
3148 * termination of the link.
3149 */
3150 if (cp->disconnect)
3151 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3152 &cp->addr.bdaddr);
3153 else
3154 conn = NULL;
3155
3156 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
3157 if (err < 0) {
3158 err = mgmt_cmd_complete(sk, hdev->id,
3159 MGMT_OP_UNPAIR_DEVICE,
3160 MGMT_STATUS_NOT_PAIRED, &rp,
3161 sizeof(rp));
3162 goto unlock;
3163 }
3164
3165 goto done;
3166 }
3167
3168 /* LE address type */
3169 addr_type = le_addr_type(cp->addr.type);
3170
3171 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
3172 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
3173 if (err < 0) {
3174 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3175 MGMT_STATUS_NOT_PAIRED, &rp,
3176 sizeof(rp));
3177 goto unlock;
3178 }
3179
3180 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3181 if (!conn) {
3182 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3183 goto done;
3184 }
3185
3186
3187 /* Defer clearing up the connection parameters until closing to
3188 * give a chance of keeping them if a repairing happens.
3189 */
3190 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3191
3192 /* Disable auto-connection parameters if present */
3193 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3194 if (params) {
3195 if (params->explicit_connect)
3196 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3197 else
3198 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3199 }
3200
3201 /* If disconnection is not requested, then clear the connection
3202 * variable so that the link is not terminated.
3203 */
3204 if (!cp->disconnect)
3205 conn = NULL;
3206
3207 done:
3208 /* If the connection variable is set, then termination of the
3209 * link is requested.
3210 */
3211 if (!conn) {
3212 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3213 &rp, sizeof(rp));
3214 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3215 goto unlock;
3216 }
3217
3218 cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3219 sizeof(*cp));
3220 if (!cmd) {
3221 err = -ENOMEM;
3222 goto unlock;
3223 }
3224
3225 cmd->cmd_complete = addr_cmd_complete;
3226
3227 err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3228 unpair_device_complete);
3229 if (err < 0)
3230 mgmt_pending_free(cmd);
3231
3232 unlock:
3233 hci_dev_unlock(hdev);
3234 return err;
3235 }
3236
disconnect_complete(struct hci_dev * hdev,void * data,int err)3237 static void disconnect_complete(struct hci_dev *hdev, void *data, int err)
3238 {
3239 struct mgmt_pending_cmd *cmd = data;
3240
3241 cmd->cmd_complete(cmd, mgmt_status(err));
3242 mgmt_pending_free(cmd);
3243 }
3244
disconnect_sync(struct hci_dev * hdev,void * data)3245 static int disconnect_sync(struct hci_dev *hdev, void *data)
3246 {
3247 struct mgmt_pending_cmd *cmd = data;
3248 struct mgmt_cp_disconnect *cp = cmd->param;
3249 struct hci_conn *conn;
3250
3251 if (cp->addr.type == BDADDR_BREDR)
3252 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3253 &cp->addr.bdaddr);
3254 else
3255 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3256 le_addr_type(cp->addr.type));
3257
3258 if (!conn)
3259 return -ENOTCONN;
3260
3261 /* Disregard any possible error since the likes of hci_abort_conn_sync
3262 * will clean up the connection no matter the error.
3263 */
3264 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3265
3266 return 0;
3267 }
3268
disconnect(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3269 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3270 u16 len)
3271 {
3272 struct mgmt_cp_disconnect *cp = data;
3273 struct mgmt_rp_disconnect rp;
3274 struct mgmt_pending_cmd *cmd;
3275 int err;
3276
3277 bt_dev_dbg(hdev, "sock %p", sk);
3278
3279 memset(&rp, 0, sizeof(rp));
3280 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3281 rp.addr.type = cp->addr.type;
3282
3283 if (!bdaddr_type_is_valid(cp->addr.type))
3284 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3285 MGMT_STATUS_INVALID_PARAMS,
3286 &rp, sizeof(rp));
3287
3288 hci_dev_lock(hdev);
3289
3290 if (!test_bit(HCI_UP, &hdev->flags)) {
3291 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3292 MGMT_STATUS_NOT_POWERED, &rp,
3293 sizeof(rp));
3294 goto failed;
3295 }
3296
3297 cmd = mgmt_pending_new(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3298 if (!cmd) {
3299 err = -ENOMEM;
3300 goto failed;
3301 }
3302
3303 cmd->cmd_complete = generic_cmd_complete;
3304
3305 err = hci_cmd_sync_queue(hdev, disconnect_sync, cmd,
3306 disconnect_complete);
3307 if (err < 0)
3308 mgmt_pending_free(cmd);
3309
3310 failed:
3311 hci_dev_unlock(hdev);
3312 return err;
3313 }
3314
link_to_bdaddr(u8 link_type,u8 addr_type)3315 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3316 {
3317 switch (link_type) {
3318 case CIS_LINK:
3319 case BIS_LINK:
3320 case PA_LINK:
3321 case LE_LINK:
3322 switch (addr_type) {
3323 case ADDR_LE_DEV_PUBLIC:
3324 return BDADDR_LE_PUBLIC;
3325
3326 default:
3327 /* Fallback to LE Random address type */
3328 return BDADDR_LE_RANDOM;
3329 }
3330
3331 default:
3332 /* Fallback to BR/EDR type */
3333 return BDADDR_BREDR;
3334 }
3335 }
3336
get_connections(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)3337 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3338 u16 data_len)
3339 {
3340 struct mgmt_rp_get_connections *rp;
3341 struct hci_conn *c;
3342 int err;
3343 u16 i;
3344
3345 bt_dev_dbg(hdev, "sock %p", sk);
3346
3347 hci_dev_lock(hdev);
3348
3349 if (!hdev_is_powered(hdev)) {
3350 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3351 MGMT_STATUS_NOT_POWERED);
3352 goto unlock;
3353 }
3354
3355 i = 0;
3356 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3357 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3358 i++;
3359 }
3360
3361 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3362 if (!rp) {
3363 err = -ENOMEM;
3364 goto unlock;
3365 }
3366
3367 i = 0;
3368 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3369 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3370 continue;
3371 bacpy(&rp->addr[i].bdaddr, &c->dst);
3372 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3373 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3374 continue;
3375 i++;
3376 }
3377
3378 rp->conn_count = cpu_to_le16(i);
3379
3380 /* Recalculate length in case of filtered SCO connections, etc */
3381 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3382 struct_size(rp, addr, i));
3383
3384 kfree(rp);
3385
3386 unlock:
3387 hci_dev_unlock(hdev);
3388 return err;
3389 }
3390
send_pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_pin_code_neg_reply * cp)3391 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3392 struct mgmt_cp_pin_code_neg_reply *cp)
3393 {
3394 struct mgmt_pending_cmd *cmd;
3395 int err;
3396
3397 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3398 sizeof(*cp));
3399 if (!cmd)
3400 return -ENOMEM;
3401
3402 cmd->cmd_complete = addr_cmd_complete;
3403
3404 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3405 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3406 if (err < 0)
3407 mgmt_pending_remove(cmd);
3408
3409 return err;
3410 }
3411
pin_code_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3412 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3413 u16 len)
3414 {
3415 struct hci_conn *conn;
3416 struct mgmt_cp_pin_code_reply *cp = data;
3417 struct hci_cp_pin_code_reply reply;
3418 struct mgmt_pending_cmd *cmd;
3419 int err;
3420
3421 bt_dev_dbg(hdev, "sock %p", sk);
3422
3423 hci_dev_lock(hdev);
3424
3425 if (!hdev_is_powered(hdev)) {
3426 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3427 MGMT_STATUS_NOT_POWERED);
3428 goto failed;
3429 }
3430
3431 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3432 if (!conn) {
3433 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3434 MGMT_STATUS_NOT_CONNECTED);
3435 goto failed;
3436 }
3437
3438 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3439 struct mgmt_cp_pin_code_neg_reply ncp;
3440
3441 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3442
3443 bt_dev_err(hdev, "PIN code is not 16 bytes long");
3444
3445 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3446 if (err >= 0)
3447 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3448 MGMT_STATUS_INVALID_PARAMS);
3449
3450 goto failed;
3451 }
3452
3453 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3454 if (!cmd) {
3455 err = -ENOMEM;
3456 goto failed;
3457 }
3458
3459 cmd->cmd_complete = addr_cmd_complete;
3460
3461 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3462 reply.pin_len = cp->pin_len;
3463 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3464
3465 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3466 if (err < 0)
3467 mgmt_pending_remove(cmd);
3468
3469 failed:
3470 hci_dev_unlock(hdev);
3471 return err;
3472 }
3473
set_io_capability(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3474 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3475 u16 len)
3476 {
3477 struct mgmt_cp_set_io_capability *cp = data;
3478
3479 bt_dev_dbg(hdev, "sock %p", sk);
3480
3481 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3482 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3483 MGMT_STATUS_INVALID_PARAMS);
3484
3485 hci_dev_lock(hdev);
3486
3487 hdev->io_capability = cp->io_capability;
3488
3489 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3490
3491 hci_dev_unlock(hdev);
3492
3493 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3494 NULL, 0);
3495 }
3496
find_pairing(struct hci_conn * conn)3497 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3498 {
3499 struct hci_dev *hdev = conn->hdev;
3500 struct mgmt_pending_cmd *cmd;
3501
3502 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3503 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3504 continue;
3505
3506 if (cmd->user_data != conn)
3507 continue;
3508
3509 return cmd;
3510 }
3511
3512 return NULL;
3513 }
3514
pairing_complete(struct mgmt_pending_cmd * cmd,u8 status)3515 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3516 {
3517 struct mgmt_rp_pair_device rp;
3518 struct hci_conn *conn = cmd->user_data;
3519 int err;
3520
3521 bacpy(&rp.addr.bdaddr, &conn->dst);
3522 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3523
3524 err = mgmt_cmd_complete(cmd->sk, cmd->hdev->id, MGMT_OP_PAIR_DEVICE,
3525 status, &rp, sizeof(rp));
3526
3527 /* So we don't get further callbacks for this connection */
3528 conn->connect_cfm_cb = NULL;
3529 conn->security_cfm_cb = NULL;
3530 conn->disconn_cfm_cb = NULL;
3531
3532 hci_conn_drop(conn);
3533
3534 /* The device is paired so there is no need to remove
3535 * its connection parameters anymore.
3536 */
3537 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3538
3539 hci_conn_put(conn);
3540
3541 return err;
3542 }
3543
mgmt_smp_complete(struct hci_conn * conn,bool complete)3544 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3545 {
3546 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3547 struct mgmt_pending_cmd *cmd;
3548
3549 cmd = find_pairing(conn);
3550 if (cmd) {
3551 cmd->cmd_complete(cmd, status);
3552 mgmt_pending_remove(cmd);
3553 }
3554 }
3555
pairing_complete_cb(struct hci_conn * conn,u8 status)3556 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3557 {
3558 struct mgmt_pending_cmd *cmd;
3559
3560 BT_DBG("status %u", status);
3561
3562 cmd = find_pairing(conn);
3563 if (!cmd) {
3564 BT_DBG("Unable to find a pending command");
3565 return;
3566 }
3567
3568 cmd->cmd_complete(cmd, mgmt_status(status));
3569 mgmt_pending_remove(cmd);
3570 }
3571
le_pairing_complete_cb(struct hci_conn * conn,u8 status)3572 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3573 {
3574 struct mgmt_pending_cmd *cmd;
3575
3576 BT_DBG("status %u", status);
3577
3578 if (!status)
3579 return;
3580
3581 cmd = find_pairing(conn);
3582 if (!cmd) {
3583 BT_DBG("Unable to find a pending command");
3584 return;
3585 }
3586
3587 cmd->cmd_complete(cmd, mgmt_status(status));
3588 mgmt_pending_remove(cmd);
3589 }
3590
pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3591 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3592 u16 len)
3593 {
3594 struct mgmt_cp_pair_device *cp = data;
3595 struct mgmt_rp_pair_device rp;
3596 struct mgmt_pending_cmd *cmd;
3597 u8 sec_level, auth_type;
3598 struct hci_conn *conn;
3599 int err;
3600
3601 bt_dev_dbg(hdev, "sock %p", sk);
3602
3603 memset(&rp, 0, sizeof(rp));
3604 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3605 rp.addr.type = cp->addr.type;
3606
3607 if (!bdaddr_type_is_valid(cp->addr.type))
3608 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3609 MGMT_STATUS_INVALID_PARAMS,
3610 &rp, sizeof(rp));
3611
3612 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3613 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3614 MGMT_STATUS_INVALID_PARAMS,
3615 &rp, sizeof(rp));
3616
3617 hci_dev_lock(hdev);
3618
3619 if (!hdev_is_powered(hdev)) {
3620 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3621 MGMT_STATUS_NOT_POWERED, &rp,
3622 sizeof(rp));
3623 goto unlock;
3624 }
3625
3626 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3627 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3628 MGMT_STATUS_ALREADY_PAIRED, &rp,
3629 sizeof(rp));
3630 goto unlock;
3631 }
3632
3633 sec_level = BT_SECURITY_MEDIUM;
3634 auth_type = HCI_AT_DEDICATED_BONDING;
3635
3636 if (cp->addr.type == BDADDR_BREDR) {
3637 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3638 auth_type, CONN_REASON_PAIR_DEVICE,
3639 HCI_ACL_CONN_TIMEOUT);
3640 } else {
3641 u8 addr_type = le_addr_type(cp->addr.type);
3642 struct hci_conn_params *p;
3643
3644 /* When pairing a new device, it is expected to remember
3645 * this device for future connections. Adding the connection
3646 * parameter information ahead of time allows tracking
3647 * of the peripheral preferred values and will speed up any
3648 * further connection establishment.
3649 *
3650 * If connection parameters already exist, then they
3651 * will be kept and this function does nothing.
3652 */
3653 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3654 if (!p) {
3655 err = -EIO;
3656 goto unlock;
3657 }
3658
3659 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3660 p->auto_connect = HCI_AUTO_CONN_DISABLED;
3661
3662 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3663 sec_level, HCI_LE_CONN_TIMEOUT,
3664 CONN_REASON_PAIR_DEVICE);
3665 }
3666
3667 if (IS_ERR(conn)) {
3668 int status;
3669
3670 if (PTR_ERR(conn) == -EBUSY)
3671 status = MGMT_STATUS_BUSY;
3672 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3673 status = MGMT_STATUS_NOT_SUPPORTED;
3674 else if (PTR_ERR(conn) == -ECONNREFUSED)
3675 status = MGMT_STATUS_REJECTED;
3676 else
3677 status = MGMT_STATUS_CONNECT_FAILED;
3678
3679 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3680 status, &rp, sizeof(rp));
3681 goto unlock;
3682 }
3683
3684 if (conn->connect_cfm_cb) {
3685 hci_conn_drop(conn);
3686 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3687 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3688 goto unlock;
3689 }
3690
3691 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3692 if (!cmd) {
3693 err = -ENOMEM;
3694 hci_conn_drop(conn);
3695 goto unlock;
3696 }
3697
3698 cmd->cmd_complete = pairing_complete;
3699
3700 /* For LE, just connecting isn't a proof that the pairing finished */
3701 if (cp->addr.type == BDADDR_BREDR) {
3702 conn->connect_cfm_cb = pairing_complete_cb;
3703 conn->security_cfm_cb = pairing_complete_cb;
3704 conn->disconn_cfm_cb = pairing_complete_cb;
3705 } else {
3706 conn->connect_cfm_cb = le_pairing_complete_cb;
3707 conn->security_cfm_cb = le_pairing_complete_cb;
3708 conn->disconn_cfm_cb = le_pairing_complete_cb;
3709 }
3710
3711 conn->io_capability = cp->io_cap;
3712 cmd->user_data = hci_conn_get(conn);
3713
3714 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3715 hci_conn_security(conn, sec_level, auth_type, true)) {
3716 cmd->cmd_complete(cmd, 0);
3717 mgmt_pending_remove(cmd);
3718 }
3719
3720 err = 0;
3721
3722 unlock:
3723 hci_dev_unlock(hdev);
3724 return err;
3725 }
3726
cancel_pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3727 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3728 u16 len)
3729 {
3730 struct mgmt_addr_info *addr = data;
3731 struct mgmt_pending_cmd *cmd;
3732 struct hci_conn *conn;
3733 int err;
3734
3735 bt_dev_dbg(hdev, "sock %p", sk);
3736
3737 hci_dev_lock(hdev);
3738
3739 if (!hdev_is_powered(hdev)) {
3740 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3741 MGMT_STATUS_NOT_POWERED);
3742 goto unlock;
3743 }
3744
3745 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3746 if (!cmd) {
3747 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3748 MGMT_STATUS_INVALID_PARAMS);
3749 goto unlock;
3750 }
3751
3752 conn = cmd->user_data;
3753
3754 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3755 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3756 MGMT_STATUS_INVALID_PARAMS);
3757 goto unlock;
3758 }
3759
3760 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3761 mgmt_pending_remove(cmd);
3762
3763 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3764 addr, sizeof(*addr));
3765
3766 /* Since user doesn't want to proceed with the connection, abort any
3767 * ongoing pairing and then terminate the link if it was created
3768 * because of the pair device action.
3769 */
3770 if (addr->type == BDADDR_BREDR)
3771 hci_remove_link_key(hdev, &addr->bdaddr);
3772 else
3773 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3774 le_addr_type(addr->type));
3775
3776 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3777 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3778
3779 unlock:
3780 hci_dev_unlock(hdev);
3781 return err;
3782 }
3783
user_pairing_resp(struct sock * sk,struct hci_dev * hdev,struct mgmt_addr_info * addr,u16 mgmt_op,u16 hci_op,__le32 passkey)3784 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3785 struct mgmt_addr_info *addr, u16 mgmt_op,
3786 u16 hci_op, __le32 passkey)
3787 {
3788 struct mgmt_pending_cmd *cmd;
3789 struct hci_conn *conn;
3790 int err;
3791
3792 hci_dev_lock(hdev);
3793
3794 if (!hdev_is_powered(hdev)) {
3795 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3796 MGMT_STATUS_NOT_POWERED, addr,
3797 sizeof(*addr));
3798 goto done;
3799 }
3800
3801 if (addr->type == BDADDR_BREDR)
3802 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3803 else
3804 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3805 le_addr_type(addr->type));
3806
3807 if (!conn) {
3808 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3809 MGMT_STATUS_NOT_CONNECTED, addr,
3810 sizeof(*addr));
3811 goto done;
3812 }
3813
3814 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3815 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3816 if (!err)
3817 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3818 MGMT_STATUS_SUCCESS, addr,
3819 sizeof(*addr));
3820 else
3821 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3822 MGMT_STATUS_FAILED, addr,
3823 sizeof(*addr));
3824
3825 goto done;
3826 }
3827
3828 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3829 if (!cmd) {
3830 err = -ENOMEM;
3831 goto done;
3832 }
3833
3834 cmd->cmd_complete = addr_cmd_complete;
3835
3836 /* Continue with pairing via HCI */
3837 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3838 struct hci_cp_user_passkey_reply cp;
3839
3840 bacpy(&cp.bdaddr, &addr->bdaddr);
3841 cp.passkey = passkey;
3842 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3843 } else
3844 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3845 &addr->bdaddr);
3846
3847 if (err < 0)
3848 mgmt_pending_remove(cmd);
3849
3850 done:
3851 hci_dev_unlock(hdev);
3852 return err;
3853 }
3854
pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3855 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3856 void *data, u16 len)
3857 {
3858 struct mgmt_cp_pin_code_neg_reply *cp = data;
3859
3860 bt_dev_dbg(hdev, "sock %p", sk);
3861
3862 return user_pairing_resp(sk, hdev, &cp->addr,
3863 MGMT_OP_PIN_CODE_NEG_REPLY,
3864 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3865 }
3866
user_confirm_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3867 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3868 u16 len)
3869 {
3870 struct mgmt_cp_user_confirm_reply *cp = data;
3871
3872 bt_dev_dbg(hdev, "sock %p", sk);
3873
3874 if (len != sizeof(*cp))
3875 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3876 MGMT_STATUS_INVALID_PARAMS);
3877
3878 return user_pairing_resp(sk, hdev, &cp->addr,
3879 MGMT_OP_USER_CONFIRM_REPLY,
3880 HCI_OP_USER_CONFIRM_REPLY, 0);
3881 }
3882
user_confirm_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3883 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3884 void *data, u16 len)
3885 {
3886 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3887
3888 bt_dev_dbg(hdev, "sock %p", sk);
3889
3890 return user_pairing_resp(sk, hdev, &cp->addr,
3891 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3892 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3893 }
3894
user_passkey_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3895 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3896 u16 len)
3897 {
3898 struct mgmt_cp_user_passkey_reply *cp = data;
3899
3900 bt_dev_dbg(hdev, "sock %p", sk);
3901
3902 return user_pairing_resp(sk, hdev, &cp->addr,
3903 MGMT_OP_USER_PASSKEY_REPLY,
3904 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3905 }
3906
user_passkey_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3907 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3908 void *data, u16 len)
3909 {
3910 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3911
3912 bt_dev_dbg(hdev, "sock %p", sk);
3913
3914 return user_pairing_resp(sk, hdev, &cp->addr,
3915 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3916 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3917 }
3918
adv_expire_sync(struct hci_dev * hdev,u32 flags)3919 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3920 {
3921 struct adv_info *adv_instance;
3922
3923 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3924 if (!adv_instance)
3925 return 0;
3926
3927 /* stop if current instance doesn't need to be changed */
3928 if (!(adv_instance->flags & flags))
3929 return 0;
3930
3931 cancel_adv_timeout(hdev);
3932
3933 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3934 if (!adv_instance)
3935 return 0;
3936
3937 hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3938
3939 return 0;
3940 }
3941
name_changed_sync(struct hci_dev * hdev,void * data)3942 static int name_changed_sync(struct hci_dev *hdev, void *data)
3943 {
3944 return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3945 }
3946
set_name_complete(struct hci_dev * hdev,void * data,int err)3947 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3948 {
3949 struct mgmt_pending_cmd *cmd = data;
3950 struct mgmt_cp_set_local_name *cp;
3951 u8 status = mgmt_status(err);
3952
3953 bt_dev_dbg(hdev, "err %d", err);
3954
3955 if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
3956 return;
3957
3958 cp = cmd->param;
3959
3960 if (status) {
3961 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3962 status);
3963 } else {
3964 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3965 cp, sizeof(*cp));
3966
3967 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3968 hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3969 }
3970
3971 mgmt_pending_free(cmd);
3972 }
3973
set_name_sync(struct hci_dev * hdev,void * data)3974 static int set_name_sync(struct hci_dev *hdev, void *data)
3975 {
3976 struct mgmt_pending_cmd *cmd = data;
3977 struct mgmt_cp_set_local_name cp;
3978
3979 mutex_lock(&hdev->mgmt_pending_lock);
3980
3981 if (!__mgmt_pending_listed(hdev, cmd)) {
3982 mutex_unlock(&hdev->mgmt_pending_lock);
3983 return -ECANCELED;
3984 }
3985
3986 memcpy(&cp, cmd->param, sizeof(cp));
3987
3988 mutex_unlock(&hdev->mgmt_pending_lock);
3989
3990 if (lmp_bredr_capable(hdev)) {
3991 hci_update_name_sync(hdev, cp.name);
3992 hci_update_eir_sync(hdev);
3993 }
3994
3995 /* The name is stored in the scan response data and so
3996 * no need to update the advertising data here.
3997 */
3998 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3999 hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
4000
4001 return 0;
4002 }
4003
set_local_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4004 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
4005 u16 len)
4006 {
4007 struct mgmt_cp_set_local_name *cp = data;
4008 struct mgmt_pending_cmd *cmd;
4009 int err;
4010
4011 bt_dev_dbg(hdev, "sock %p", sk);
4012
4013 hci_dev_lock(hdev);
4014
4015 /* If the old values are the same as the new ones just return a
4016 * direct command complete event.
4017 */
4018 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
4019 !memcmp(hdev->short_name, cp->short_name,
4020 sizeof(hdev->short_name))) {
4021 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
4022 data, len);
4023 goto failed;
4024 }
4025
4026 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
4027
4028 if (!hdev_is_powered(hdev)) {
4029 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
4030
4031 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
4032 data, len);
4033 if (err < 0)
4034 goto failed;
4035
4036 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
4037 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
4038 ext_info_changed(hdev, sk);
4039
4040 goto failed;
4041 }
4042
4043 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
4044 if (!cmd)
4045 err = -ENOMEM;
4046 else
4047 err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
4048 set_name_complete);
4049
4050 if (err < 0) {
4051 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
4052 MGMT_STATUS_FAILED);
4053
4054 if (cmd)
4055 mgmt_pending_remove(cmd);
4056
4057 goto failed;
4058 }
4059
4060 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
4061
4062 failed:
4063 hci_dev_unlock(hdev);
4064 return err;
4065 }
4066
appearance_changed_sync(struct hci_dev * hdev,void * data)4067 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
4068 {
4069 return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
4070 }
4071
set_appearance(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4072 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
4073 u16 len)
4074 {
4075 struct mgmt_cp_set_appearance *cp = data;
4076 u16 appearance;
4077 int err;
4078
4079 bt_dev_dbg(hdev, "sock %p", sk);
4080
4081 if (!lmp_le_capable(hdev))
4082 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
4083 MGMT_STATUS_NOT_SUPPORTED);
4084
4085 appearance = le16_to_cpu(cp->appearance);
4086
4087 hci_dev_lock(hdev);
4088
4089 if (hdev->appearance != appearance) {
4090 hdev->appearance = appearance;
4091
4092 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
4093 hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
4094 NULL);
4095
4096 ext_info_changed(hdev, sk);
4097 }
4098
4099 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
4100 0);
4101
4102 hci_dev_unlock(hdev);
4103
4104 return err;
4105 }
4106
get_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4107 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4108 void *data, u16 len)
4109 {
4110 struct mgmt_rp_get_phy_configuration rp;
4111
4112 bt_dev_dbg(hdev, "sock %p", sk);
4113
4114 hci_dev_lock(hdev);
4115
4116 memset(&rp, 0, sizeof(rp));
4117
4118 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
4119 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
4120 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
4121
4122 hci_dev_unlock(hdev);
4123
4124 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
4125 &rp, sizeof(rp));
4126 }
4127
mgmt_phy_configuration_changed(struct hci_dev * hdev,struct sock * skip)4128 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
4129 {
4130 struct mgmt_ev_phy_configuration_changed ev;
4131
4132 memset(&ev, 0, sizeof(ev));
4133
4134 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
4135
4136 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
4137 sizeof(ev), skip);
4138 }
4139
set_default_phy_complete(struct hci_dev * hdev,void * data,int err)4140 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
4141 {
4142 struct mgmt_pending_cmd *cmd = data;
4143 struct sk_buff *skb;
4144 u8 status = mgmt_status(err);
4145
4146 skb = cmd->skb;
4147
4148 if (!status) {
4149 if (!skb)
4150 status = MGMT_STATUS_FAILED;
4151 else if (IS_ERR(skb))
4152 status = mgmt_status(PTR_ERR(skb));
4153 else
4154 status = mgmt_status(skb->data[0]);
4155 }
4156
4157 bt_dev_dbg(hdev, "status %d", status);
4158
4159 if (status) {
4160 mgmt_cmd_status(cmd->sk, hdev->id,
4161 MGMT_OP_SET_PHY_CONFIGURATION, status);
4162 } else {
4163 mgmt_cmd_complete(cmd->sk, hdev->id,
4164 MGMT_OP_SET_PHY_CONFIGURATION, 0,
4165 NULL, 0);
4166
4167 mgmt_phy_configuration_changed(hdev, cmd->sk);
4168 }
4169
4170 if (skb && !IS_ERR(skb))
4171 kfree_skb(skb);
4172
4173 mgmt_pending_free(cmd);
4174 }
4175
set_default_phy_sync(struct hci_dev * hdev,void * data)4176 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
4177 {
4178 struct mgmt_pending_cmd *cmd = data;
4179 struct mgmt_cp_set_phy_configuration *cp = cmd->param;
4180 struct hci_cp_le_set_default_phy cp_phy;
4181 u32 selected_phys;
4182
4183 selected_phys = __le32_to_cpu(cp->selected_phys);
4184
4185 memset(&cp_phy, 0, sizeof(cp_phy));
4186
4187 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
4188 cp_phy.all_phys |= 0x01;
4189
4190 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
4191 cp_phy.all_phys |= 0x02;
4192
4193 if (selected_phys & MGMT_PHY_LE_1M_TX)
4194 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
4195
4196 if (selected_phys & MGMT_PHY_LE_2M_TX)
4197 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
4198
4199 if (selected_phys & MGMT_PHY_LE_CODED_TX)
4200 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
4201
4202 if (selected_phys & MGMT_PHY_LE_1M_RX)
4203 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
4204
4205 if (selected_phys & MGMT_PHY_LE_2M_RX)
4206 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
4207
4208 if (selected_phys & MGMT_PHY_LE_CODED_RX)
4209 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
4210
4211 cmd->skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4212 sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
4213
4214 return 0;
4215 }
4216
set_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4217 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4218 void *data, u16 len)
4219 {
4220 struct mgmt_cp_set_phy_configuration *cp = data;
4221 struct mgmt_pending_cmd *cmd;
4222 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4223 u16 pkt_type = (HCI_DH1 | HCI_DM1);
4224 bool changed = false;
4225 int err;
4226
4227 bt_dev_dbg(hdev, "sock %p", sk);
4228
4229 configurable_phys = get_configurable_phys(hdev);
4230 supported_phys = get_supported_phys(hdev);
4231 selected_phys = __le32_to_cpu(cp->selected_phys);
4232
4233 if (selected_phys & ~supported_phys)
4234 return mgmt_cmd_status(sk, hdev->id,
4235 MGMT_OP_SET_PHY_CONFIGURATION,
4236 MGMT_STATUS_INVALID_PARAMS);
4237
4238 unconfigure_phys = supported_phys & ~configurable_phys;
4239
4240 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4241 return mgmt_cmd_status(sk, hdev->id,
4242 MGMT_OP_SET_PHY_CONFIGURATION,
4243 MGMT_STATUS_INVALID_PARAMS);
4244
4245 if (selected_phys == get_selected_phys(hdev))
4246 return mgmt_cmd_complete(sk, hdev->id,
4247 MGMT_OP_SET_PHY_CONFIGURATION,
4248 0, NULL, 0);
4249
4250 hci_dev_lock(hdev);
4251
4252 if (!hdev_is_powered(hdev)) {
4253 err = mgmt_cmd_status(sk, hdev->id,
4254 MGMT_OP_SET_PHY_CONFIGURATION,
4255 MGMT_STATUS_REJECTED);
4256 goto unlock;
4257 }
4258
4259 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4260 err = mgmt_cmd_status(sk, hdev->id,
4261 MGMT_OP_SET_PHY_CONFIGURATION,
4262 MGMT_STATUS_BUSY);
4263 goto unlock;
4264 }
4265
4266 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4267 pkt_type |= (HCI_DH3 | HCI_DM3);
4268 else
4269 pkt_type &= ~(HCI_DH3 | HCI_DM3);
4270
4271 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4272 pkt_type |= (HCI_DH5 | HCI_DM5);
4273 else
4274 pkt_type &= ~(HCI_DH5 | HCI_DM5);
4275
4276 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4277 pkt_type &= ~HCI_2DH1;
4278 else
4279 pkt_type |= HCI_2DH1;
4280
4281 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4282 pkt_type &= ~HCI_2DH3;
4283 else
4284 pkt_type |= HCI_2DH3;
4285
4286 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4287 pkt_type &= ~HCI_2DH5;
4288 else
4289 pkt_type |= HCI_2DH5;
4290
4291 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4292 pkt_type &= ~HCI_3DH1;
4293 else
4294 pkt_type |= HCI_3DH1;
4295
4296 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4297 pkt_type &= ~HCI_3DH3;
4298 else
4299 pkt_type |= HCI_3DH3;
4300
4301 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4302 pkt_type &= ~HCI_3DH5;
4303 else
4304 pkt_type |= HCI_3DH5;
4305
4306 if (pkt_type != hdev->pkt_type) {
4307 hdev->pkt_type = pkt_type;
4308 changed = true;
4309 }
4310
4311 if ((selected_phys & MGMT_PHY_LE_MASK) ==
4312 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4313 if (changed)
4314 mgmt_phy_configuration_changed(hdev, sk);
4315
4316 err = mgmt_cmd_complete(sk, hdev->id,
4317 MGMT_OP_SET_PHY_CONFIGURATION,
4318 0, NULL, 0);
4319
4320 goto unlock;
4321 }
4322
4323 cmd = mgmt_pending_new(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4324 len);
4325 if (!cmd)
4326 err = -ENOMEM;
4327 else
4328 err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4329 set_default_phy_complete);
4330
4331 if (err < 0) {
4332 err = mgmt_cmd_status(sk, hdev->id,
4333 MGMT_OP_SET_PHY_CONFIGURATION,
4334 MGMT_STATUS_FAILED);
4335
4336 if (cmd)
4337 mgmt_pending_remove(cmd);
4338 }
4339
4340 unlock:
4341 hci_dev_unlock(hdev);
4342
4343 return err;
4344 }
4345
set_blocked_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4346 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4347 u16 len)
4348 {
4349 int err = MGMT_STATUS_SUCCESS;
4350 struct mgmt_cp_set_blocked_keys *keys = data;
4351 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4352 sizeof(struct mgmt_blocked_key_info));
4353 u16 key_count, expected_len;
4354 int i;
4355
4356 bt_dev_dbg(hdev, "sock %p", sk);
4357
4358 key_count = __le16_to_cpu(keys->key_count);
4359 if (key_count > max_key_count) {
4360 bt_dev_err(hdev, "too big key_count value %u", key_count);
4361 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4362 MGMT_STATUS_INVALID_PARAMS);
4363 }
4364
4365 expected_len = struct_size(keys, keys, key_count);
4366 if (expected_len != len) {
4367 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4368 expected_len, len);
4369 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4370 MGMT_STATUS_INVALID_PARAMS);
4371 }
4372
4373 hci_dev_lock(hdev);
4374
4375 hci_blocked_keys_clear(hdev);
4376
4377 for (i = 0; i < key_count; ++i) {
4378 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4379
4380 if (!b) {
4381 err = MGMT_STATUS_NO_RESOURCES;
4382 break;
4383 }
4384
4385 b->type = keys->keys[i].type;
4386 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4387 list_add_rcu(&b->list, &hdev->blocked_keys);
4388 }
4389 hci_dev_unlock(hdev);
4390
4391 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4392 err, NULL, 0);
4393 }
4394
set_wideband_speech(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4395 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4396 void *data, u16 len)
4397 {
4398 struct mgmt_mode *cp = data;
4399 int err;
4400 bool changed = false;
4401
4402 bt_dev_dbg(hdev, "sock %p", sk);
4403
4404 if (!hci_test_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED))
4405 return mgmt_cmd_status(sk, hdev->id,
4406 MGMT_OP_SET_WIDEBAND_SPEECH,
4407 MGMT_STATUS_NOT_SUPPORTED);
4408
4409 if (cp->val != 0x00 && cp->val != 0x01)
4410 return mgmt_cmd_status(sk, hdev->id,
4411 MGMT_OP_SET_WIDEBAND_SPEECH,
4412 MGMT_STATUS_INVALID_PARAMS);
4413
4414 hci_dev_lock(hdev);
4415
4416 if (hdev_is_powered(hdev) &&
4417 !!cp->val != hci_dev_test_flag(hdev,
4418 HCI_WIDEBAND_SPEECH_ENABLED)) {
4419 err = mgmt_cmd_status(sk, hdev->id,
4420 MGMT_OP_SET_WIDEBAND_SPEECH,
4421 MGMT_STATUS_REJECTED);
4422 goto unlock;
4423 }
4424
4425 if (cp->val)
4426 changed = !hci_dev_test_and_set_flag(hdev,
4427 HCI_WIDEBAND_SPEECH_ENABLED);
4428 else
4429 changed = hci_dev_test_and_clear_flag(hdev,
4430 HCI_WIDEBAND_SPEECH_ENABLED);
4431
4432 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4433 if (err < 0)
4434 goto unlock;
4435
4436 if (changed)
4437 err = new_settings(hdev, sk);
4438
4439 unlock:
4440 hci_dev_unlock(hdev);
4441 return err;
4442 }
4443
read_controller_cap(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4444 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4445 void *data, u16 data_len)
4446 {
4447 char buf[20];
4448 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4449 u16 cap_len = 0;
4450 u8 flags = 0;
4451 u8 tx_power_range[2];
4452
4453 bt_dev_dbg(hdev, "sock %p", sk);
4454
4455 memset(&buf, 0, sizeof(buf));
4456
4457 hci_dev_lock(hdev);
4458
4459 /* When the Read Simple Pairing Options command is supported, then
4460 * the remote public key validation is supported.
4461 *
4462 * Alternatively, when Microsoft extensions are available, they can
4463 * indicate support for public key validation as well.
4464 */
4465 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4466 flags |= 0x01; /* Remote public key validation (BR/EDR) */
4467
4468 flags |= 0x02; /* Remote public key validation (LE) */
4469
4470 /* When the Read Encryption Key Size command is supported, then the
4471 * encryption key size is enforced.
4472 */
4473 if (hdev->commands[20] & 0x10)
4474 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
4475
4476 flags |= 0x08; /* Encryption key size enforcement (LE) */
4477
4478 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4479 &flags, 1);
4480
4481 /* When the Read Simple Pairing Options command is supported, then
4482 * also max encryption key size information is provided.
4483 */
4484 if (hdev->commands[41] & 0x08)
4485 cap_len = eir_append_le16(rp->cap, cap_len,
4486 MGMT_CAP_MAX_ENC_KEY_SIZE,
4487 hdev->max_enc_key_size);
4488
4489 cap_len = eir_append_le16(rp->cap, cap_len,
4490 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4491 SMP_MAX_ENC_KEY_SIZE);
4492
4493 /* Append the min/max LE tx power parameters if we were able to fetch
4494 * it from the controller
4495 */
4496 if (hdev->commands[38] & 0x80) {
4497 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4498 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4499 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4500 tx_power_range, 2);
4501 }
4502
4503 rp->cap_len = cpu_to_le16(cap_len);
4504
4505 hci_dev_unlock(hdev);
4506
4507 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4508 rp, sizeof(*rp) + cap_len);
4509 }
4510
4511 #ifdef CONFIG_BT_FEATURE_DEBUG
4512 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4513 static const u8 debug_uuid[16] = {
4514 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4515 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4516 };
4517 #endif
4518
4519 /* 330859bc-7506-492d-9370-9a6f0614037f */
4520 static const u8 quality_report_uuid[16] = {
4521 0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4522 0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4523 };
4524
4525 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4526 static const u8 offload_codecs_uuid[16] = {
4527 0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4528 0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4529 };
4530
4531 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4532 static const u8 le_simultaneous_roles_uuid[16] = {
4533 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4534 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4535 };
4536
4537 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4538 static const u8 iso_socket_uuid[16] = {
4539 0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4540 0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4541 };
4542
4543 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4544 static const u8 mgmt_mesh_uuid[16] = {
4545 0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4546 0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4547 };
4548
read_exp_features_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4549 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4550 void *data, u16 data_len)
4551 {
4552 struct mgmt_rp_read_exp_features_info *rp;
4553 size_t len;
4554 u16 idx = 0;
4555 u32 flags;
4556 int status;
4557
4558 bt_dev_dbg(hdev, "sock %p", sk);
4559
4560 /* Enough space for 7 features */
4561 len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4562 rp = kzalloc(len, GFP_KERNEL);
4563 if (!rp)
4564 return -ENOMEM;
4565
4566 #ifdef CONFIG_BT_FEATURE_DEBUG
4567 flags = bt_dbg_get() ? BIT(0) : 0;
4568
4569 memcpy(rp->features[idx].uuid, debug_uuid, 16);
4570 rp->features[idx].flags = cpu_to_le32(flags);
4571 idx++;
4572 #endif
4573
4574 if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4575 if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4576 flags = BIT(0);
4577 else
4578 flags = 0;
4579
4580 memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4581 rp->features[idx].flags = cpu_to_le32(flags);
4582 idx++;
4583 }
4584
4585 if (hdev && (aosp_has_quality_report(hdev) ||
4586 hdev->set_quality_report)) {
4587 if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4588 flags = BIT(0);
4589 else
4590 flags = 0;
4591
4592 memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4593 rp->features[idx].flags = cpu_to_le32(flags);
4594 idx++;
4595 }
4596
4597 if (hdev && hdev->get_data_path_id) {
4598 if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4599 flags = BIT(0);
4600 else
4601 flags = 0;
4602
4603 memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4604 rp->features[idx].flags = cpu_to_le32(flags);
4605 idx++;
4606 }
4607
4608 if (IS_ENABLED(CONFIG_BT_LE)) {
4609 flags = iso_inited() ? BIT(0) : 0;
4610 memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4611 rp->features[idx].flags = cpu_to_le32(flags);
4612 idx++;
4613 }
4614
4615 if (hdev && lmp_le_capable(hdev)) {
4616 if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4617 flags = BIT(0);
4618 else
4619 flags = 0;
4620
4621 memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4622 rp->features[idx].flags = cpu_to_le32(flags);
4623 idx++;
4624 }
4625
4626 rp->feature_count = cpu_to_le16(idx);
4627
4628 /* After reading the experimental features information, enable
4629 * the events to update client on any future change.
4630 */
4631 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4632
4633 status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4634 MGMT_OP_READ_EXP_FEATURES_INFO,
4635 0, rp, sizeof(*rp) + (20 * idx));
4636
4637 kfree(rp);
4638 return status;
4639 }
4640
exp_feature_changed(struct hci_dev * hdev,const u8 * uuid,bool enabled,struct sock * skip)4641 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4642 bool enabled, struct sock *skip)
4643 {
4644 struct mgmt_ev_exp_feature_changed ev;
4645
4646 memset(&ev, 0, sizeof(ev));
4647 memcpy(ev.uuid, uuid, 16);
4648 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4649
4650 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4651 &ev, sizeof(ev),
4652 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4653 }
4654
4655 #define EXP_FEAT(_uuid, _set_func) \
4656 { \
4657 .uuid = _uuid, \
4658 .set_func = _set_func, \
4659 }
4660
4661 /* The zero key uuid is special. Multiple exp features are set through it. */
set_zero_key_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4662 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4663 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4664 {
4665 struct mgmt_rp_set_exp_feature rp;
4666
4667 memset(rp.uuid, 0, 16);
4668 rp.flags = cpu_to_le32(0);
4669
4670 #ifdef CONFIG_BT_FEATURE_DEBUG
4671 if (!hdev) {
4672 bool changed = bt_dbg_get();
4673
4674 bt_dbg_set(false);
4675
4676 if (changed)
4677 exp_feature_changed(NULL, ZERO_KEY, false, sk);
4678 }
4679 #endif
4680
4681 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4682
4683 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4684 MGMT_OP_SET_EXP_FEATURE, 0,
4685 &rp, sizeof(rp));
4686 }
4687
4688 #ifdef CONFIG_BT_FEATURE_DEBUG
set_debug_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4689 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4690 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4691 {
4692 struct mgmt_rp_set_exp_feature rp;
4693
4694 bool val, changed;
4695 int err;
4696
4697 /* Command requires to use the non-controller index */
4698 if (hdev)
4699 return mgmt_cmd_status(sk, hdev->id,
4700 MGMT_OP_SET_EXP_FEATURE,
4701 MGMT_STATUS_INVALID_INDEX);
4702
4703 /* Parameters are limited to a single octet */
4704 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4705 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4706 MGMT_OP_SET_EXP_FEATURE,
4707 MGMT_STATUS_INVALID_PARAMS);
4708
4709 /* Only boolean on/off is supported */
4710 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4711 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4712 MGMT_OP_SET_EXP_FEATURE,
4713 MGMT_STATUS_INVALID_PARAMS);
4714
4715 val = !!cp->param[0];
4716 changed = val ? !bt_dbg_get() : bt_dbg_get();
4717 bt_dbg_set(val);
4718
4719 memcpy(rp.uuid, debug_uuid, 16);
4720 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4721
4722 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4723
4724 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4725 MGMT_OP_SET_EXP_FEATURE, 0,
4726 &rp, sizeof(rp));
4727
4728 if (changed)
4729 exp_feature_changed(hdev, debug_uuid, val, sk);
4730
4731 return err;
4732 }
4733 #endif
4734
set_mgmt_mesh_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4735 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4736 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4737 {
4738 struct mgmt_rp_set_exp_feature rp;
4739 bool val, changed;
4740 int err;
4741
4742 /* Command requires to use the controller index */
4743 if (!hdev)
4744 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4745 MGMT_OP_SET_EXP_FEATURE,
4746 MGMT_STATUS_INVALID_INDEX);
4747
4748 /* Parameters are limited to a single octet */
4749 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4750 return mgmt_cmd_status(sk, hdev->id,
4751 MGMT_OP_SET_EXP_FEATURE,
4752 MGMT_STATUS_INVALID_PARAMS);
4753
4754 /* Only boolean on/off is supported */
4755 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4756 return mgmt_cmd_status(sk, hdev->id,
4757 MGMT_OP_SET_EXP_FEATURE,
4758 MGMT_STATUS_INVALID_PARAMS);
4759
4760 val = !!cp->param[0];
4761
4762 if (val) {
4763 changed = !hci_dev_test_and_set_flag(hdev,
4764 HCI_MESH_EXPERIMENTAL);
4765 } else {
4766 hci_dev_clear_flag(hdev, HCI_MESH);
4767 changed = hci_dev_test_and_clear_flag(hdev,
4768 HCI_MESH_EXPERIMENTAL);
4769 }
4770
4771 memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4772 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4773
4774 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4775
4776 err = mgmt_cmd_complete(sk, hdev->id,
4777 MGMT_OP_SET_EXP_FEATURE, 0,
4778 &rp, sizeof(rp));
4779
4780 if (changed)
4781 exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4782
4783 return err;
4784 }
4785
set_quality_report_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4786 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4787 struct mgmt_cp_set_exp_feature *cp,
4788 u16 data_len)
4789 {
4790 struct mgmt_rp_set_exp_feature rp;
4791 bool val, changed;
4792 int err;
4793
4794 /* Command requires to use a valid controller index */
4795 if (!hdev)
4796 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4797 MGMT_OP_SET_EXP_FEATURE,
4798 MGMT_STATUS_INVALID_INDEX);
4799
4800 /* Parameters are limited to a single octet */
4801 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4802 return mgmt_cmd_status(sk, hdev->id,
4803 MGMT_OP_SET_EXP_FEATURE,
4804 MGMT_STATUS_INVALID_PARAMS);
4805
4806 /* Only boolean on/off is supported */
4807 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4808 return mgmt_cmd_status(sk, hdev->id,
4809 MGMT_OP_SET_EXP_FEATURE,
4810 MGMT_STATUS_INVALID_PARAMS);
4811
4812 hci_req_sync_lock(hdev);
4813
4814 val = !!cp->param[0];
4815 changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4816
4817 if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4818 err = mgmt_cmd_status(sk, hdev->id,
4819 MGMT_OP_SET_EXP_FEATURE,
4820 MGMT_STATUS_NOT_SUPPORTED);
4821 goto unlock_quality_report;
4822 }
4823
4824 if (changed) {
4825 if (hdev->set_quality_report)
4826 err = hdev->set_quality_report(hdev, val);
4827 else
4828 err = aosp_set_quality_report(hdev, val);
4829
4830 if (err) {
4831 err = mgmt_cmd_status(sk, hdev->id,
4832 MGMT_OP_SET_EXP_FEATURE,
4833 MGMT_STATUS_FAILED);
4834 goto unlock_quality_report;
4835 }
4836
4837 if (val)
4838 hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4839 else
4840 hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4841 }
4842
4843 bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4844
4845 memcpy(rp.uuid, quality_report_uuid, 16);
4846 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4847 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4848
4849 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4850 &rp, sizeof(rp));
4851
4852 if (changed)
4853 exp_feature_changed(hdev, quality_report_uuid, val, sk);
4854
4855 unlock_quality_report:
4856 hci_req_sync_unlock(hdev);
4857 return err;
4858 }
4859
set_offload_codec_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4860 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4861 struct mgmt_cp_set_exp_feature *cp,
4862 u16 data_len)
4863 {
4864 bool val, changed;
4865 int err;
4866 struct mgmt_rp_set_exp_feature rp;
4867
4868 /* Command requires to use a valid controller index */
4869 if (!hdev)
4870 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4871 MGMT_OP_SET_EXP_FEATURE,
4872 MGMT_STATUS_INVALID_INDEX);
4873
4874 /* Parameters are limited to a single octet */
4875 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4876 return mgmt_cmd_status(sk, hdev->id,
4877 MGMT_OP_SET_EXP_FEATURE,
4878 MGMT_STATUS_INVALID_PARAMS);
4879
4880 /* Only boolean on/off is supported */
4881 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4882 return mgmt_cmd_status(sk, hdev->id,
4883 MGMT_OP_SET_EXP_FEATURE,
4884 MGMT_STATUS_INVALID_PARAMS);
4885
4886 val = !!cp->param[0];
4887 changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4888
4889 if (!hdev->get_data_path_id) {
4890 return mgmt_cmd_status(sk, hdev->id,
4891 MGMT_OP_SET_EXP_FEATURE,
4892 MGMT_STATUS_NOT_SUPPORTED);
4893 }
4894
4895 if (changed) {
4896 if (val)
4897 hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4898 else
4899 hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4900 }
4901
4902 bt_dev_info(hdev, "offload codecs enable %d changed %d",
4903 val, changed);
4904
4905 memcpy(rp.uuid, offload_codecs_uuid, 16);
4906 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4907 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4908 err = mgmt_cmd_complete(sk, hdev->id,
4909 MGMT_OP_SET_EXP_FEATURE, 0,
4910 &rp, sizeof(rp));
4911
4912 if (changed)
4913 exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4914
4915 return err;
4916 }
4917
set_le_simultaneous_roles_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4918 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4919 struct mgmt_cp_set_exp_feature *cp,
4920 u16 data_len)
4921 {
4922 bool val, changed;
4923 int err;
4924 struct mgmt_rp_set_exp_feature rp;
4925
4926 /* Command requires to use a valid controller index */
4927 if (!hdev)
4928 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4929 MGMT_OP_SET_EXP_FEATURE,
4930 MGMT_STATUS_INVALID_INDEX);
4931
4932 /* Parameters are limited to a single octet */
4933 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4934 return mgmt_cmd_status(sk, hdev->id,
4935 MGMT_OP_SET_EXP_FEATURE,
4936 MGMT_STATUS_INVALID_PARAMS);
4937
4938 /* Only boolean on/off is supported */
4939 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4940 return mgmt_cmd_status(sk, hdev->id,
4941 MGMT_OP_SET_EXP_FEATURE,
4942 MGMT_STATUS_INVALID_PARAMS);
4943
4944 val = !!cp->param[0];
4945 changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4946
4947 if (!hci_dev_le_state_simultaneous(hdev)) {
4948 return mgmt_cmd_status(sk, hdev->id,
4949 MGMT_OP_SET_EXP_FEATURE,
4950 MGMT_STATUS_NOT_SUPPORTED);
4951 }
4952
4953 if (changed) {
4954 if (val)
4955 hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4956 else
4957 hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4958 }
4959
4960 bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4961 val, changed);
4962
4963 memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4964 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4965 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4966 err = mgmt_cmd_complete(sk, hdev->id,
4967 MGMT_OP_SET_EXP_FEATURE, 0,
4968 &rp, sizeof(rp));
4969
4970 if (changed)
4971 exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4972
4973 return err;
4974 }
4975
4976 #ifdef CONFIG_BT_LE
set_iso_socket_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4977 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4978 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4979 {
4980 struct mgmt_rp_set_exp_feature rp;
4981 bool val, changed = false;
4982 int err;
4983
4984 /* Command requires to use the non-controller index */
4985 if (hdev)
4986 return mgmt_cmd_status(sk, hdev->id,
4987 MGMT_OP_SET_EXP_FEATURE,
4988 MGMT_STATUS_INVALID_INDEX);
4989
4990 /* Parameters are limited to a single octet */
4991 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4992 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4993 MGMT_OP_SET_EXP_FEATURE,
4994 MGMT_STATUS_INVALID_PARAMS);
4995
4996 /* Only boolean on/off is supported */
4997 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4998 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4999 MGMT_OP_SET_EXP_FEATURE,
5000 MGMT_STATUS_INVALID_PARAMS);
5001
5002 val = cp->param[0] ? true : false;
5003 if (val)
5004 err = iso_init();
5005 else
5006 err = iso_exit();
5007
5008 if (!err)
5009 changed = true;
5010
5011 memcpy(rp.uuid, iso_socket_uuid, 16);
5012 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
5013
5014 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
5015
5016 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
5017 MGMT_OP_SET_EXP_FEATURE, 0,
5018 &rp, sizeof(rp));
5019
5020 if (changed)
5021 exp_feature_changed(hdev, iso_socket_uuid, val, sk);
5022
5023 return err;
5024 }
5025 #endif
5026
5027 static const struct mgmt_exp_feature {
5028 const u8 *uuid;
5029 int (*set_func)(struct sock *sk, struct hci_dev *hdev,
5030 struct mgmt_cp_set_exp_feature *cp, u16 data_len);
5031 } exp_features[] = {
5032 EXP_FEAT(ZERO_KEY, set_zero_key_func),
5033 #ifdef CONFIG_BT_FEATURE_DEBUG
5034 EXP_FEAT(debug_uuid, set_debug_func),
5035 #endif
5036 EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
5037 EXP_FEAT(quality_report_uuid, set_quality_report_func),
5038 EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
5039 EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
5040 #ifdef CONFIG_BT_LE
5041 EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
5042 #endif
5043
5044 /* end with a null feature */
5045 EXP_FEAT(NULL, NULL)
5046 };
5047
set_exp_feature(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)5048 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
5049 void *data, u16 data_len)
5050 {
5051 struct mgmt_cp_set_exp_feature *cp = data;
5052 size_t i = 0;
5053
5054 bt_dev_dbg(hdev, "sock %p", sk);
5055
5056 for (i = 0; exp_features[i].uuid; i++) {
5057 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
5058 return exp_features[i].set_func(sk, hdev, cp, data_len);
5059 }
5060
5061 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
5062 MGMT_OP_SET_EXP_FEATURE,
5063 MGMT_STATUS_NOT_SUPPORTED);
5064 }
5065
get_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)5066 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5067 u16 data_len)
5068 {
5069 struct mgmt_cp_get_device_flags *cp = data;
5070 struct mgmt_rp_get_device_flags rp;
5071 struct bdaddr_list_with_flags *br_params;
5072 struct hci_conn_params *params;
5073 u32 supported_flags;
5074 u32 current_flags = 0;
5075 u8 status = MGMT_STATUS_INVALID_PARAMS;
5076
5077 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
5078 &cp->addr.bdaddr, cp->addr.type);
5079
5080 hci_dev_lock(hdev);
5081
5082 supported_flags = hdev->conn_flags;
5083
5084 memset(&rp, 0, sizeof(rp));
5085
5086 if (cp->addr.type == BDADDR_BREDR) {
5087 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5088 &cp->addr.bdaddr,
5089 cp->addr.type);
5090 if (!br_params)
5091 goto done;
5092
5093 current_flags = br_params->flags;
5094 } else {
5095 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5096 le_addr_type(cp->addr.type));
5097 if (!params)
5098 goto done;
5099
5100 current_flags = params->flags;
5101 }
5102
5103 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5104 rp.addr.type = cp->addr.type;
5105 rp.supported_flags = cpu_to_le32(supported_flags);
5106 rp.current_flags = cpu_to_le32(current_flags);
5107
5108 status = MGMT_STATUS_SUCCESS;
5109
5110 done:
5111 hci_dev_unlock(hdev);
5112
5113 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5114 &rp, sizeof(rp));
5115 }
5116
device_flags_changed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u32 supported_flags,u32 current_flags)5117 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5118 bdaddr_t *bdaddr, u8 bdaddr_type,
5119 u32 supported_flags, u32 current_flags)
5120 {
5121 struct mgmt_ev_device_flags_changed ev;
5122
5123 bacpy(&ev.addr.bdaddr, bdaddr);
5124 ev.addr.type = bdaddr_type;
5125 ev.supported_flags = cpu_to_le32(supported_flags);
5126 ev.current_flags = cpu_to_le32(current_flags);
5127
5128 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5129 }
5130
is_connected(struct hci_dev * hdev,bdaddr_t * addr,u8 type)5131 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
5132 {
5133 struct hci_conn *conn;
5134
5135 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
5136 if (!conn)
5137 return false;
5138
5139 if (conn->dst_type != type)
5140 return false;
5141
5142 if (conn->state != BT_CONNECTED)
5143 return false;
5144
5145 return true;
5146 }
5147
5148 /* This function requires the caller holds hdev->lock */
hci_conn_params_set(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type,u8 auto_connect)5149 static struct hci_conn_params *hci_conn_params_set(struct hci_dev *hdev,
5150 bdaddr_t *addr, u8 addr_type,
5151 u8 auto_connect)
5152 {
5153 struct hci_conn_params *params;
5154
5155 params = hci_conn_params_add(hdev, addr, addr_type);
5156 if (!params)
5157 return NULL;
5158
5159 if (params->auto_connect == auto_connect)
5160 return params;
5161
5162 hci_pend_le_list_del_init(params);
5163
5164 switch (auto_connect) {
5165 case HCI_AUTO_CONN_DISABLED:
5166 case HCI_AUTO_CONN_LINK_LOSS:
5167 /* If auto connect is being disabled when we're trying to
5168 * connect to device, keep connecting.
5169 */
5170 if (params->explicit_connect)
5171 hci_pend_le_list_add(params, &hdev->pend_le_conns);
5172 break;
5173 case HCI_AUTO_CONN_REPORT:
5174 if (params->explicit_connect)
5175 hci_pend_le_list_add(params, &hdev->pend_le_conns);
5176 else
5177 hci_pend_le_list_add(params, &hdev->pend_le_reports);
5178 break;
5179 case HCI_AUTO_CONN_DIRECT:
5180 case HCI_AUTO_CONN_ALWAYS:
5181 if (!is_connected(hdev, addr, addr_type))
5182 hci_pend_le_list_add(params, &hdev->pend_le_conns);
5183 break;
5184 }
5185
5186 params->auto_connect = auto_connect;
5187
5188 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
5189 addr, addr_type, auto_connect);
5190
5191 return params;
5192 }
5193
set_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5194 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5195 u16 len)
5196 {
5197 struct mgmt_cp_set_device_flags *cp = data;
5198 struct bdaddr_list_with_flags *br_params;
5199 struct hci_conn_params *params;
5200 u8 status = MGMT_STATUS_INVALID_PARAMS;
5201 u32 supported_flags;
5202 u32 current_flags = __le32_to_cpu(cp->current_flags);
5203
5204 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5205 &cp->addr.bdaddr, cp->addr.type, current_flags);
5206
5207 // We should take hci_dev_lock() early, I think.. conn_flags can change
5208 supported_flags = hdev->conn_flags;
5209
5210 if ((supported_flags | current_flags) != supported_flags) {
5211 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5212 current_flags, supported_flags);
5213 goto done;
5214 }
5215
5216 hci_dev_lock(hdev);
5217
5218 if (cp->addr.type == BDADDR_BREDR) {
5219 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5220 &cp->addr.bdaddr,
5221 cp->addr.type);
5222
5223 if (br_params) {
5224 br_params->flags = current_flags;
5225 status = MGMT_STATUS_SUCCESS;
5226 } else {
5227 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5228 &cp->addr.bdaddr, cp->addr.type);
5229 }
5230
5231 goto unlock;
5232 }
5233
5234 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5235 le_addr_type(cp->addr.type));
5236 if (!params) {
5237 /* Create a new hci_conn_params if it doesn't exist */
5238 params = hci_conn_params_set(hdev, &cp->addr.bdaddr,
5239 le_addr_type(cp->addr.type),
5240 HCI_AUTO_CONN_DISABLED);
5241 if (!params) {
5242 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5243 &cp->addr.bdaddr,
5244 le_addr_type(cp->addr.type));
5245 goto unlock;
5246 }
5247 }
5248
5249 supported_flags = hdev->conn_flags;
5250
5251 if ((supported_flags | current_flags) != supported_flags) {
5252 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5253 current_flags, supported_flags);
5254 goto unlock;
5255 }
5256
5257 WRITE_ONCE(params->flags, current_flags);
5258 status = MGMT_STATUS_SUCCESS;
5259
5260 /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5261 * has been set.
5262 */
5263 if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5264 hci_update_passive_scan(hdev);
5265
5266 unlock:
5267 hci_dev_unlock(hdev);
5268
5269 done:
5270 if (status == MGMT_STATUS_SUCCESS)
5271 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5272 supported_flags, current_flags);
5273
5274 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5275 &cp->addr, sizeof(cp->addr));
5276 }
5277
mgmt_adv_monitor_added(struct sock * sk,struct hci_dev * hdev,u16 handle)5278 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5279 u16 handle)
5280 {
5281 struct mgmt_ev_adv_monitor_added ev;
5282
5283 ev.monitor_handle = cpu_to_le16(handle);
5284
5285 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5286 }
5287
mgmt_adv_monitor_removed(struct sock * sk,struct hci_dev * hdev,__le16 handle)5288 static void mgmt_adv_monitor_removed(struct sock *sk, struct hci_dev *hdev,
5289 __le16 handle)
5290 {
5291 struct mgmt_ev_adv_monitor_removed ev;
5292
5293 ev.monitor_handle = handle;
5294
5295 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk);
5296 }
5297
read_adv_mon_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5298 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5299 void *data, u16 len)
5300 {
5301 struct adv_monitor *monitor = NULL;
5302 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5303 int handle, err;
5304 size_t rp_size = 0;
5305 __u32 supported = 0;
5306 __u32 enabled = 0;
5307 __u16 num_handles = 0;
5308 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5309
5310 BT_DBG("request for %s", hdev->name);
5311
5312 hci_dev_lock(hdev);
5313
5314 if (msft_monitor_supported(hdev))
5315 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5316
5317 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5318 handles[num_handles++] = monitor->handle;
5319
5320 hci_dev_unlock(hdev);
5321
5322 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5323 rp = kmalloc(rp_size, GFP_KERNEL);
5324 if (!rp)
5325 return -ENOMEM;
5326
5327 /* All supported features are currently enabled */
5328 enabled = supported;
5329
5330 rp->supported_features = cpu_to_le32(supported);
5331 rp->enabled_features = cpu_to_le32(enabled);
5332 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5333 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5334 rp->num_handles = cpu_to_le16(num_handles);
5335 if (num_handles)
5336 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5337
5338 err = mgmt_cmd_complete(sk, hdev->id,
5339 MGMT_OP_READ_ADV_MONITOR_FEATURES,
5340 MGMT_STATUS_SUCCESS, rp, rp_size);
5341
5342 kfree(rp);
5343
5344 return err;
5345 }
5346
mgmt_add_adv_patterns_monitor_complete(struct hci_dev * hdev,void * data,int status)5347 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5348 void *data, int status)
5349 {
5350 struct mgmt_rp_add_adv_patterns_monitor rp;
5351 struct mgmt_pending_cmd *cmd = data;
5352 struct adv_monitor *monitor;
5353
5354 /* This is likely the result of hdev being closed and mgmt_index_removed
5355 * is attempting to clean up any pending command so
5356 * hci_adv_monitors_clear is about to be called which will take care of
5357 * freeing the adv_monitor instances.
5358 */
5359 if (status == -ECANCELED && !mgmt_pending_valid(hdev, cmd))
5360 return;
5361
5362 monitor = cmd->user_data;
5363
5364 hci_dev_lock(hdev);
5365
5366 rp.monitor_handle = cpu_to_le16(monitor->handle);
5367
5368 if (!status) {
5369 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5370 hdev->adv_monitors_cnt++;
5371 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5372 monitor->state = ADV_MONITOR_STATE_REGISTERED;
5373 hci_update_passive_scan(hdev);
5374 }
5375
5376 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
5377 mgmt_status(status), &rp, sizeof(rp));
5378 mgmt_pending_remove(cmd);
5379
5380 hci_dev_unlock(hdev);
5381 bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5382 rp.monitor_handle, status);
5383 }
5384
mgmt_add_adv_patterns_monitor_sync(struct hci_dev * hdev,void * data)5385 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5386 {
5387 struct mgmt_pending_cmd *cmd = data;
5388 struct adv_monitor *mon;
5389
5390 mutex_lock(&hdev->mgmt_pending_lock);
5391
5392 if (!__mgmt_pending_listed(hdev, cmd)) {
5393 mutex_unlock(&hdev->mgmt_pending_lock);
5394 return -ECANCELED;
5395 }
5396
5397 mon = cmd->user_data;
5398
5399 mutex_unlock(&hdev->mgmt_pending_lock);
5400
5401 return hci_add_adv_monitor(hdev, mon);
5402 }
5403
__add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,struct adv_monitor * m,u8 status,void * data,u16 len,u16 op)5404 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5405 struct adv_monitor *m, u8 status,
5406 void *data, u16 len, u16 op)
5407 {
5408 struct mgmt_pending_cmd *cmd;
5409 int err;
5410
5411 hci_dev_lock(hdev);
5412
5413 if (status)
5414 goto unlock;
5415
5416 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5417 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5418 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5419 status = MGMT_STATUS_BUSY;
5420 goto unlock;
5421 }
5422
5423 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5424 if (!cmd) {
5425 status = MGMT_STATUS_NO_RESOURCES;
5426 goto unlock;
5427 }
5428
5429 cmd->user_data = m;
5430 err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5431 mgmt_add_adv_patterns_monitor_complete);
5432 if (err) {
5433 if (err == -ENOMEM)
5434 status = MGMT_STATUS_NO_RESOURCES;
5435 else
5436 status = MGMT_STATUS_FAILED;
5437
5438 goto unlock;
5439 }
5440
5441 hci_dev_unlock(hdev);
5442
5443 return 0;
5444
5445 unlock:
5446 hci_free_adv_monitor(hdev, m);
5447 hci_dev_unlock(hdev);
5448 return mgmt_cmd_status(sk, hdev->id, op, status);
5449 }
5450
parse_adv_monitor_rssi(struct adv_monitor * m,struct mgmt_adv_rssi_thresholds * rssi)5451 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5452 struct mgmt_adv_rssi_thresholds *rssi)
5453 {
5454 if (rssi) {
5455 m->rssi.low_threshold = rssi->low_threshold;
5456 m->rssi.low_threshold_timeout =
5457 __le16_to_cpu(rssi->low_threshold_timeout);
5458 m->rssi.high_threshold = rssi->high_threshold;
5459 m->rssi.high_threshold_timeout =
5460 __le16_to_cpu(rssi->high_threshold_timeout);
5461 m->rssi.sampling_period = rssi->sampling_period;
5462 } else {
5463 /* Default values. These numbers are the least constricting
5464 * parameters for MSFT API to work, so it behaves as if there
5465 * are no rssi parameter to consider. May need to be changed
5466 * if other API are to be supported.
5467 */
5468 m->rssi.low_threshold = -127;
5469 m->rssi.low_threshold_timeout = 60;
5470 m->rssi.high_threshold = -127;
5471 m->rssi.high_threshold_timeout = 0;
5472 m->rssi.sampling_period = 0;
5473 }
5474 }
5475
parse_adv_monitor_pattern(struct adv_monitor * m,u8 pattern_count,struct mgmt_adv_pattern * patterns)5476 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5477 struct mgmt_adv_pattern *patterns)
5478 {
5479 u8 offset = 0, length = 0;
5480 struct adv_pattern *p = NULL;
5481 int i;
5482
5483 for (i = 0; i < pattern_count; i++) {
5484 offset = patterns[i].offset;
5485 length = patterns[i].length;
5486 if (offset >= HCI_MAX_AD_LENGTH ||
5487 length > HCI_MAX_AD_LENGTH ||
5488 (offset + length) > HCI_MAX_AD_LENGTH)
5489 return MGMT_STATUS_INVALID_PARAMS;
5490
5491 p = kmalloc(sizeof(*p), GFP_KERNEL);
5492 if (!p)
5493 return MGMT_STATUS_NO_RESOURCES;
5494
5495 p->ad_type = patterns[i].ad_type;
5496 p->offset = patterns[i].offset;
5497 p->length = patterns[i].length;
5498 memcpy(p->value, patterns[i].value, p->length);
5499
5500 INIT_LIST_HEAD(&p->list);
5501 list_add(&p->list, &m->patterns);
5502 }
5503
5504 return MGMT_STATUS_SUCCESS;
5505 }
5506
add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5507 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5508 void *data, u16 len)
5509 {
5510 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5511 struct adv_monitor *m = NULL;
5512 u8 status = MGMT_STATUS_SUCCESS;
5513 size_t expected_size = sizeof(*cp);
5514
5515 BT_DBG("request for %s", hdev->name);
5516
5517 if (len <= sizeof(*cp)) {
5518 status = MGMT_STATUS_INVALID_PARAMS;
5519 goto done;
5520 }
5521
5522 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5523 if (len != expected_size) {
5524 status = MGMT_STATUS_INVALID_PARAMS;
5525 goto done;
5526 }
5527
5528 m = kzalloc(sizeof(*m), GFP_KERNEL);
5529 if (!m) {
5530 status = MGMT_STATUS_NO_RESOURCES;
5531 goto done;
5532 }
5533
5534 INIT_LIST_HEAD(&m->patterns);
5535
5536 parse_adv_monitor_rssi(m, NULL);
5537 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5538
5539 done:
5540 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5541 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5542 }
5543
add_adv_patterns_monitor_rssi(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5544 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5545 void *data, u16 len)
5546 {
5547 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5548 struct adv_monitor *m = NULL;
5549 u8 status = MGMT_STATUS_SUCCESS;
5550 size_t expected_size = sizeof(*cp);
5551
5552 BT_DBG("request for %s", hdev->name);
5553
5554 if (len <= sizeof(*cp)) {
5555 status = MGMT_STATUS_INVALID_PARAMS;
5556 goto done;
5557 }
5558
5559 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5560 if (len != expected_size) {
5561 status = MGMT_STATUS_INVALID_PARAMS;
5562 goto done;
5563 }
5564
5565 m = kzalloc(sizeof(*m), GFP_KERNEL);
5566 if (!m) {
5567 status = MGMT_STATUS_NO_RESOURCES;
5568 goto done;
5569 }
5570
5571 INIT_LIST_HEAD(&m->patterns);
5572
5573 parse_adv_monitor_rssi(m, &cp->rssi);
5574 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5575
5576 done:
5577 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5578 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5579 }
5580
mgmt_remove_adv_monitor_complete(struct hci_dev * hdev,void * data,int status)5581 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5582 void *data, int status)
5583 {
5584 struct mgmt_rp_remove_adv_monitor rp;
5585 struct mgmt_pending_cmd *cmd = data;
5586 struct mgmt_cp_remove_adv_monitor *cp;
5587
5588 if (status == -ECANCELED)
5589 return;
5590
5591 hci_dev_lock(hdev);
5592
5593 cp = cmd->param;
5594
5595 rp.monitor_handle = cp->monitor_handle;
5596
5597 if (!status) {
5598 mgmt_adv_monitor_removed(cmd->sk, hdev, cp->monitor_handle);
5599 hci_update_passive_scan(hdev);
5600 }
5601
5602 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
5603 mgmt_status(status), &rp, sizeof(rp));
5604 mgmt_pending_free(cmd);
5605
5606 hci_dev_unlock(hdev);
5607 bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5608 rp.monitor_handle, status);
5609 }
5610
mgmt_remove_adv_monitor_sync(struct hci_dev * hdev,void * data)5611 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5612 {
5613 struct mgmt_pending_cmd *cmd = data;
5614 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5615 u16 handle = __le16_to_cpu(cp->monitor_handle);
5616
5617 if (!handle)
5618 return hci_remove_all_adv_monitor(hdev);
5619
5620 return hci_remove_single_adv_monitor(hdev, handle);
5621 }
5622
remove_adv_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5623 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5624 void *data, u16 len)
5625 {
5626 struct mgmt_pending_cmd *cmd;
5627 int err, status;
5628
5629 hci_dev_lock(hdev);
5630
5631 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5632 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5633 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5634 status = MGMT_STATUS_BUSY;
5635 goto unlock;
5636 }
5637
5638 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5639 if (!cmd) {
5640 status = MGMT_STATUS_NO_RESOURCES;
5641 goto unlock;
5642 }
5643
5644 err = hci_cmd_sync_submit(hdev, mgmt_remove_adv_monitor_sync, cmd,
5645 mgmt_remove_adv_monitor_complete);
5646
5647 if (err) {
5648 mgmt_pending_free(cmd);
5649
5650 if (err == -ENOMEM)
5651 status = MGMT_STATUS_NO_RESOURCES;
5652 else
5653 status = MGMT_STATUS_FAILED;
5654
5655 goto unlock;
5656 }
5657
5658 hci_dev_unlock(hdev);
5659
5660 return 0;
5661
5662 unlock:
5663 hci_dev_unlock(hdev);
5664 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5665 status);
5666 }
5667
read_local_oob_data_complete(struct hci_dev * hdev,void * data,int err)5668 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data,
5669 int err)
5670 {
5671 struct mgmt_rp_read_local_oob_data mgmt_rp;
5672 size_t rp_size = sizeof(mgmt_rp);
5673 struct mgmt_pending_cmd *cmd = data;
5674 struct sk_buff *skb = cmd->skb;
5675 u8 status = mgmt_status(err);
5676
5677 if (!status) {
5678 if (!skb)
5679 status = MGMT_STATUS_FAILED;
5680 else if (IS_ERR(skb))
5681 status = mgmt_status(PTR_ERR(skb));
5682 else
5683 status = mgmt_status(skb->data[0]);
5684 }
5685
5686 bt_dev_dbg(hdev, "status %d", status);
5687
5688 if (status) {
5689 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5690 status);
5691 goto remove;
5692 }
5693
5694 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5695
5696 if (!bredr_sc_enabled(hdev)) {
5697 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5698
5699 if (skb->len < sizeof(*rp)) {
5700 mgmt_cmd_status(cmd->sk, hdev->id,
5701 MGMT_OP_READ_LOCAL_OOB_DATA,
5702 MGMT_STATUS_FAILED);
5703 goto remove;
5704 }
5705
5706 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5707 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5708
5709 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5710 } else {
5711 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5712
5713 if (skb->len < sizeof(*rp)) {
5714 mgmt_cmd_status(cmd->sk, hdev->id,
5715 MGMT_OP_READ_LOCAL_OOB_DATA,
5716 MGMT_STATUS_FAILED);
5717 goto remove;
5718 }
5719
5720 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5721 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5722
5723 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5724 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5725 }
5726
5727 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5728 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5729
5730 remove:
5731 if (skb && !IS_ERR(skb))
5732 kfree_skb(skb);
5733
5734 mgmt_pending_free(cmd);
5735 }
5736
read_local_oob_data_sync(struct hci_dev * hdev,void * data)5737 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5738 {
5739 struct mgmt_pending_cmd *cmd = data;
5740
5741 if (bredr_sc_enabled(hdev))
5742 cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5743 else
5744 cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5745
5746 if (IS_ERR(cmd->skb))
5747 return PTR_ERR(cmd->skb);
5748 else
5749 return 0;
5750 }
5751
read_local_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)5752 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5753 void *data, u16 data_len)
5754 {
5755 struct mgmt_pending_cmd *cmd;
5756 int err;
5757
5758 bt_dev_dbg(hdev, "sock %p", sk);
5759
5760 hci_dev_lock(hdev);
5761
5762 if (!hdev_is_powered(hdev)) {
5763 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5764 MGMT_STATUS_NOT_POWERED);
5765 goto unlock;
5766 }
5767
5768 if (!lmp_ssp_capable(hdev)) {
5769 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5770 MGMT_STATUS_NOT_SUPPORTED);
5771 goto unlock;
5772 }
5773
5774 cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5775 if (!cmd)
5776 err = -ENOMEM;
5777 else
5778 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5779 read_local_oob_data_complete);
5780
5781 if (err < 0) {
5782 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5783 MGMT_STATUS_FAILED);
5784
5785 if (cmd)
5786 mgmt_pending_free(cmd);
5787 }
5788
5789 unlock:
5790 hci_dev_unlock(hdev);
5791 return err;
5792 }
5793
add_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5794 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5795 void *data, u16 len)
5796 {
5797 struct mgmt_addr_info *addr = data;
5798 int err;
5799
5800 bt_dev_dbg(hdev, "sock %p", sk);
5801
5802 if (!bdaddr_type_is_valid(addr->type))
5803 return mgmt_cmd_complete(sk, hdev->id,
5804 MGMT_OP_ADD_REMOTE_OOB_DATA,
5805 MGMT_STATUS_INVALID_PARAMS,
5806 addr, sizeof(*addr));
5807
5808 hci_dev_lock(hdev);
5809
5810 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5811 struct mgmt_cp_add_remote_oob_data *cp = data;
5812 u8 status;
5813
5814 if (cp->addr.type != BDADDR_BREDR) {
5815 err = mgmt_cmd_complete(sk, hdev->id,
5816 MGMT_OP_ADD_REMOTE_OOB_DATA,
5817 MGMT_STATUS_INVALID_PARAMS,
5818 &cp->addr, sizeof(cp->addr));
5819 goto unlock;
5820 }
5821
5822 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5823 cp->addr.type, cp->hash,
5824 cp->rand, NULL, NULL);
5825 if (err < 0)
5826 status = MGMT_STATUS_FAILED;
5827 else
5828 status = MGMT_STATUS_SUCCESS;
5829
5830 err = mgmt_cmd_complete(sk, hdev->id,
5831 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5832 &cp->addr, sizeof(cp->addr));
5833 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5834 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5835 u8 *rand192, *hash192, *rand256, *hash256;
5836 u8 status;
5837
5838 if (bdaddr_type_is_le(cp->addr.type)) {
5839 /* Enforce zero-valued 192-bit parameters as
5840 * long as legacy SMP OOB isn't implemented.
5841 */
5842 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5843 memcmp(cp->hash192, ZERO_KEY, 16)) {
5844 err = mgmt_cmd_complete(sk, hdev->id,
5845 MGMT_OP_ADD_REMOTE_OOB_DATA,
5846 MGMT_STATUS_INVALID_PARAMS,
5847 addr, sizeof(*addr));
5848 goto unlock;
5849 }
5850
5851 rand192 = NULL;
5852 hash192 = NULL;
5853 } else {
5854 /* In case one of the P-192 values is set to zero,
5855 * then just disable OOB data for P-192.
5856 */
5857 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5858 !memcmp(cp->hash192, ZERO_KEY, 16)) {
5859 rand192 = NULL;
5860 hash192 = NULL;
5861 } else {
5862 rand192 = cp->rand192;
5863 hash192 = cp->hash192;
5864 }
5865 }
5866
5867 /* In case one of the P-256 values is set to zero, then just
5868 * disable OOB data for P-256.
5869 */
5870 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5871 !memcmp(cp->hash256, ZERO_KEY, 16)) {
5872 rand256 = NULL;
5873 hash256 = NULL;
5874 } else {
5875 rand256 = cp->rand256;
5876 hash256 = cp->hash256;
5877 }
5878
5879 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5880 cp->addr.type, hash192, rand192,
5881 hash256, rand256);
5882 if (err < 0)
5883 status = MGMT_STATUS_FAILED;
5884 else
5885 status = MGMT_STATUS_SUCCESS;
5886
5887 err = mgmt_cmd_complete(sk, hdev->id,
5888 MGMT_OP_ADD_REMOTE_OOB_DATA,
5889 status, &cp->addr, sizeof(cp->addr));
5890 } else {
5891 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5892 len);
5893 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5894 MGMT_STATUS_INVALID_PARAMS);
5895 }
5896
5897 unlock:
5898 hci_dev_unlock(hdev);
5899 return err;
5900 }
5901
remove_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5902 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5903 void *data, u16 len)
5904 {
5905 struct mgmt_cp_remove_remote_oob_data *cp = data;
5906 u8 status;
5907 int err;
5908
5909 bt_dev_dbg(hdev, "sock %p", sk);
5910
5911 if (cp->addr.type != BDADDR_BREDR)
5912 return mgmt_cmd_complete(sk, hdev->id,
5913 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5914 MGMT_STATUS_INVALID_PARAMS,
5915 &cp->addr, sizeof(cp->addr));
5916
5917 hci_dev_lock(hdev);
5918
5919 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5920 hci_remote_oob_data_clear(hdev);
5921 status = MGMT_STATUS_SUCCESS;
5922 goto done;
5923 }
5924
5925 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5926 if (err < 0)
5927 status = MGMT_STATUS_INVALID_PARAMS;
5928 else
5929 status = MGMT_STATUS_SUCCESS;
5930
5931 done:
5932 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5933 status, &cp->addr, sizeof(cp->addr));
5934
5935 hci_dev_unlock(hdev);
5936 return err;
5937 }
5938
discovery_type_is_valid(struct hci_dev * hdev,uint8_t type,uint8_t * mgmt_status)5939 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5940 uint8_t *mgmt_status)
5941 {
5942 switch (type) {
5943 case DISCOV_TYPE_LE:
5944 *mgmt_status = mgmt_le_support(hdev);
5945 if (*mgmt_status)
5946 return false;
5947 break;
5948 case DISCOV_TYPE_INTERLEAVED:
5949 *mgmt_status = mgmt_le_support(hdev);
5950 if (*mgmt_status)
5951 return false;
5952 fallthrough;
5953 case DISCOV_TYPE_BREDR:
5954 *mgmt_status = mgmt_bredr_support(hdev);
5955 if (*mgmt_status)
5956 return false;
5957 break;
5958 default:
5959 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5960 return false;
5961 }
5962
5963 return true;
5964 }
5965
start_discovery_complete(struct hci_dev * hdev,void * data,int err)5966 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5967 {
5968 struct mgmt_pending_cmd *cmd = data;
5969
5970 bt_dev_dbg(hdev, "err %d", err);
5971
5972 if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
5973 return;
5974
5975 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err),
5976 cmd->param, 1);
5977 mgmt_pending_free(cmd);
5978
5979 hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5980 DISCOVERY_FINDING);
5981 }
5982
start_discovery_sync(struct hci_dev * hdev,void * data)5983 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5984 {
5985 if (!mgmt_pending_listed(hdev, data))
5986 return -ECANCELED;
5987
5988 return hci_start_discovery_sync(hdev);
5989 }
5990
start_discovery_internal(struct sock * sk,struct hci_dev * hdev,u16 op,void * data,u16 len)5991 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5992 u16 op, void *data, u16 len)
5993 {
5994 struct mgmt_cp_start_discovery *cp = data;
5995 struct mgmt_pending_cmd *cmd;
5996 u8 status;
5997 int err;
5998
5999 bt_dev_dbg(hdev, "sock %p", sk);
6000
6001 hci_dev_lock(hdev);
6002
6003 if (!hdev_is_powered(hdev)) {
6004 err = mgmt_cmd_complete(sk, hdev->id, op,
6005 MGMT_STATUS_NOT_POWERED,
6006 &cp->type, sizeof(cp->type));
6007 goto failed;
6008 }
6009
6010 if (hdev->discovery.state != DISCOVERY_STOPPED ||
6011 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
6012 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
6013 &cp->type, sizeof(cp->type));
6014 goto failed;
6015 }
6016
6017 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
6018 err = mgmt_cmd_complete(sk, hdev->id, op, status,
6019 &cp->type, sizeof(cp->type));
6020 goto failed;
6021 }
6022
6023 /* Can't start discovery when it is paused */
6024 if (hdev->discovery_paused) {
6025 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
6026 &cp->type, sizeof(cp->type));
6027 goto failed;
6028 }
6029
6030 /* Clear the discovery filter first to free any previously
6031 * allocated memory for the UUID list.
6032 */
6033 hci_discovery_filter_clear(hdev);
6034
6035 hdev->discovery.type = cp->type;
6036 hdev->discovery.report_invalid_rssi = false;
6037 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
6038 hdev->discovery.limited = true;
6039 else
6040 hdev->discovery.limited = false;
6041
6042 cmd = mgmt_pending_add(sk, op, hdev, data, len);
6043 if (!cmd) {
6044 err = -ENOMEM;
6045 goto failed;
6046 }
6047
6048 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6049 start_discovery_complete);
6050 if (err < 0) {
6051 mgmt_pending_remove(cmd);
6052 goto failed;
6053 }
6054
6055 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6056
6057 failed:
6058 hci_dev_unlock(hdev);
6059 return err;
6060 }
6061
start_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6062 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
6063 void *data, u16 len)
6064 {
6065 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
6066 data, len);
6067 }
6068
start_limited_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6069 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
6070 void *data, u16 len)
6071 {
6072 return start_discovery_internal(sk, hdev,
6073 MGMT_OP_START_LIMITED_DISCOVERY,
6074 data, len);
6075 }
6076
start_service_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6077 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
6078 void *data, u16 len)
6079 {
6080 struct mgmt_cp_start_service_discovery *cp = data;
6081 struct mgmt_pending_cmd *cmd;
6082 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
6083 u16 uuid_count, expected_len;
6084 u8 status;
6085 int err;
6086
6087 bt_dev_dbg(hdev, "sock %p", sk);
6088
6089 hci_dev_lock(hdev);
6090
6091 if (!hdev_is_powered(hdev)) {
6092 err = mgmt_cmd_complete(sk, hdev->id,
6093 MGMT_OP_START_SERVICE_DISCOVERY,
6094 MGMT_STATUS_NOT_POWERED,
6095 &cp->type, sizeof(cp->type));
6096 goto failed;
6097 }
6098
6099 if (hdev->discovery.state != DISCOVERY_STOPPED ||
6100 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
6101 err = mgmt_cmd_complete(sk, hdev->id,
6102 MGMT_OP_START_SERVICE_DISCOVERY,
6103 MGMT_STATUS_BUSY, &cp->type,
6104 sizeof(cp->type));
6105 goto failed;
6106 }
6107
6108 if (hdev->discovery_paused) {
6109 err = mgmt_cmd_complete(sk, hdev->id,
6110 MGMT_OP_START_SERVICE_DISCOVERY,
6111 MGMT_STATUS_BUSY, &cp->type,
6112 sizeof(cp->type));
6113 goto failed;
6114 }
6115
6116 uuid_count = __le16_to_cpu(cp->uuid_count);
6117 if (uuid_count > max_uuid_count) {
6118 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
6119 uuid_count);
6120 err = mgmt_cmd_complete(sk, hdev->id,
6121 MGMT_OP_START_SERVICE_DISCOVERY,
6122 MGMT_STATUS_INVALID_PARAMS, &cp->type,
6123 sizeof(cp->type));
6124 goto failed;
6125 }
6126
6127 expected_len = sizeof(*cp) + uuid_count * 16;
6128 if (expected_len != len) {
6129 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
6130 expected_len, len);
6131 err = mgmt_cmd_complete(sk, hdev->id,
6132 MGMT_OP_START_SERVICE_DISCOVERY,
6133 MGMT_STATUS_INVALID_PARAMS, &cp->type,
6134 sizeof(cp->type));
6135 goto failed;
6136 }
6137
6138 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
6139 err = mgmt_cmd_complete(sk, hdev->id,
6140 MGMT_OP_START_SERVICE_DISCOVERY,
6141 status, &cp->type, sizeof(cp->type));
6142 goto failed;
6143 }
6144
6145 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
6146 hdev, data, len);
6147 if (!cmd) {
6148 err = -ENOMEM;
6149 goto failed;
6150 }
6151
6152 /* Clear the discovery filter first to free any previously
6153 * allocated memory for the UUID list.
6154 */
6155 hci_discovery_filter_clear(hdev);
6156
6157 hdev->discovery.result_filtering = true;
6158 hdev->discovery.type = cp->type;
6159 hdev->discovery.rssi = cp->rssi;
6160 hdev->discovery.uuid_count = uuid_count;
6161
6162 if (uuid_count > 0) {
6163 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
6164 GFP_KERNEL);
6165 if (!hdev->discovery.uuids) {
6166 err = mgmt_cmd_complete(sk, hdev->id,
6167 MGMT_OP_START_SERVICE_DISCOVERY,
6168 MGMT_STATUS_FAILED,
6169 &cp->type, sizeof(cp->type));
6170 mgmt_pending_remove(cmd);
6171 goto failed;
6172 }
6173 }
6174
6175 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6176 start_discovery_complete);
6177 if (err < 0) {
6178 mgmt_pending_remove(cmd);
6179 goto failed;
6180 }
6181
6182 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6183
6184 failed:
6185 hci_dev_unlock(hdev);
6186 return err;
6187 }
6188
stop_discovery_complete(struct hci_dev * hdev,void * data,int err)6189 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6190 {
6191 struct mgmt_pending_cmd *cmd = data;
6192
6193 if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
6194 return;
6195
6196 bt_dev_dbg(hdev, "err %d", err);
6197
6198 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err),
6199 cmd->param, 1);
6200 mgmt_pending_free(cmd);
6201
6202 if (!err)
6203 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6204 }
6205
stop_discovery_sync(struct hci_dev * hdev,void * data)6206 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6207 {
6208 if (!mgmt_pending_listed(hdev, data))
6209 return -ECANCELED;
6210
6211 return hci_stop_discovery_sync(hdev);
6212 }
6213
stop_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6214 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6215 u16 len)
6216 {
6217 struct mgmt_cp_stop_discovery *mgmt_cp = data;
6218 struct mgmt_pending_cmd *cmd;
6219 int err;
6220
6221 bt_dev_dbg(hdev, "sock %p", sk);
6222
6223 hci_dev_lock(hdev);
6224
6225 if (!hci_discovery_active(hdev)) {
6226 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6227 MGMT_STATUS_REJECTED, &mgmt_cp->type,
6228 sizeof(mgmt_cp->type));
6229 goto unlock;
6230 }
6231
6232 if (hdev->discovery.type != mgmt_cp->type) {
6233 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6234 MGMT_STATUS_INVALID_PARAMS,
6235 &mgmt_cp->type, sizeof(mgmt_cp->type));
6236 goto unlock;
6237 }
6238
6239 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6240 if (!cmd) {
6241 err = -ENOMEM;
6242 goto unlock;
6243 }
6244
6245 err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6246 stop_discovery_complete);
6247 if (err < 0) {
6248 mgmt_pending_remove(cmd);
6249 goto unlock;
6250 }
6251
6252 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6253
6254 unlock:
6255 hci_dev_unlock(hdev);
6256 return err;
6257 }
6258
confirm_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6259 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6260 u16 len)
6261 {
6262 struct mgmt_cp_confirm_name *cp = data;
6263 struct inquiry_entry *e;
6264 int err;
6265
6266 bt_dev_dbg(hdev, "sock %p", sk);
6267
6268 hci_dev_lock(hdev);
6269
6270 if (!hci_discovery_active(hdev)) {
6271 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6272 MGMT_STATUS_FAILED, &cp->addr,
6273 sizeof(cp->addr));
6274 goto failed;
6275 }
6276
6277 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6278 if (!e) {
6279 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6280 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6281 sizeof(cp->addr));
6282 goto failed;
6283 }
6284
6285 if (cp->name_known) {
6286 e->name_state = NAME_KNOWN;
6287 list_del(&e->list);
6288 } else {
6289 e->name_state = NAME_NEEDED;
6290 hci_inquiry_cache_update_resolve(hdev, e);
6291 }
6292
6293 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6294 &cp->addr, sizeof(cp->addr));
6295
6296 failed:
6297 hci_dev_unlock(hdev);
6298 return err;
6299 }
6300
block_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6301 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6302 u16 len)
6303 {
6304 struct mgmt_cp_block_device *cp = data;
6305 u8 status;
6306 int err;
6307
6308 bt_dev_dbg(hdev, "sock %p", sk);
6309
6310 if (!bdaddr_type_is_valid(cp->addr.type))
6311 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6312 MGMT_STATUS_INVALID_PARAMS,
6313 &cp->addr, sizeof(cp->addr));
6314
6315 hci_dev_lock(hdev);
6316
6317 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6318 cp->addr.type);
6319 if (err < 0) {
6320 status = MGMT_STATUS_FAILED;
6321 goto done;
6322 }
6323
6324 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6325 sk);
6326 status = MGMT_STATUS_SUCCESS;
6327
6328 done:
6329 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6330 &cp->addr, sizeof(cp->addr));
6331
6332 hci_dev_unlock(hdev);
6333
6334 return err;
6335 }
6336
unblock_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6337 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6338 u16 len)
6339 {
6340 struct mgmt_cp_unblock_device *cp = data;
6341 u8 status;
6342 int err;
6343
6344 bt_dev_dbg(hdev, "sock %p", sk);
6345
6346 if (!bdaddr_type_is_valid(cp->addr.type))
6347 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6348 MGMT_STATUS_INVALID_PARAMS,
6349 &cp->addr, sizeof(cp->addr));
6350
6351 hci_dev_lock(hdev);
6352
6353 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6354 cp->addr.type);
6355 if (err < 0) {
6356 status = MGMT_STATUS_INVALID_PARAMS;
6357 goto done;
6358 }
6359
6360 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6361 sk);
6362 status = MGMT_STATUS_SUCCESS;
6363
6364 done:
6365 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6366 &cp->addr, sizeof(cp->addr));
6367
6368 hci_dev_unlock(hdev);
6369
6370 return err;
6371 }
6372
set_device_id_sync(struct hci_dev * hdev,void * data)6373 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6374 {
6375 return hci_update_eir_sync(hdev);
6376 }
6377
set_device_id(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6378 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6379 u16 len)
6380 {
6381 struct mgmt_cp_set_device_id *cp = data;
6382 int err;
6383 __u16 source;
6384
6385 bt_dev_dbg(hdev, "sock %p", sk);
6386
6387 source = __le16_to_cpu(cp->source);
6388
6389 if (source > 0x0002)
6390 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6391 MGMT_STATUS_INVALID_PARAMS);
6392
6393 hci_dev_lock(hdev);
6394
6395 hdev->devid_source = source;
6396 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6397 hdev->devid_product = __le16_to_cpu(cp->product);
6398 hdev->devid_version = __le16_to_cpu(cp->version);
6399
6400 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6401 NULL, 0);
6402
6403 hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6404
6405 hci_dev_unlock(hdev);
6406
6407 return err;
6408 }
6409
enable_advertising_instance(struct hci_dev * hdev,int err)6410 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6411 {
6412 if (err)
6413 bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6414 else
6415 bt_dev_dbg(hdev, "status %d", err);
6416 }
6417
set_advertising_complete(struct hci_dev * hdev,void * data,int err)6418 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6419 {
6420 struct mgmt_pending_cmd *cmd = data;
6421 struct cmd_lookup match = { NULL, hdev };
6422 u8 instance;
6423 struct adv_info *adv_instance;
6424 u8 status = mgmt_status(err);
6425
6426 if (err == -ECANCELED || !mgmt_pending_valid(hdev, data))
6427 return;
6428
6429 if (status) {
6430 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, status);
6431 mgmt_pending_free(cmd);
6432 return;
6433 }
6434
6435 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6436 hci_dev_set_flag(hdev, HCI_ADVERTISING);
6437 else
6438 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6439
6440 settings_rsp(cmd, &match);
6441
6442 new_settings(hdev, match.sk);
6443
6444 if (match.sk)
6445 sock_put(match.sk);
6446
6447 /* If "Set Advertising" was just disabled and instance advertising was
6448 * set up earlier, then re-enable multi-instance advertising.
6449 */
6450 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6451 list_empty(&hdev->adv_instances))
6452 return;
6453
6454 instance = hdev->cur_adv_instance;
6455 if (!instance) {
6456 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6457 struct adv_info, list);
6458 if (!adv_instance)
6459 return;
6460
6461 instance = adv_instance->instance;
6462 }
6463
6464 err = hci_schedule_adv_instance_sync(hdev, instance, true);
6465
6466 enable_advertising_instance(hdev, err);
6467 }
6468
set_adv_sync(struct hci_dev * hdev,void * data)6469 static int set_adv_sync(struct hci_dev *hdev, void *data)
6470 {
6471 struct mgmt_pending_cmd *cmd = data;
6472 struct mgmt_mode cp;
6473 u8 val;
6474
6475 mutex_lock(&hdev->mgmt_pending_lock);
6476
6477 if (!__mgmt_pending_listed(hdev, cmd)) {
6478 mutex_unlock(&hdev->mgmt_pending_lock);
6479 return -ECANCELED;
6480 }
6481
6482 memcpy(&cp, cmd->param, sizeof(cp));
6483
6484 mutex_unlock(&hdev->mgmt_pending_lock);
6485
6486 val = !!cp.val;
6487
6488 if (cp.val == 0x02)
6489 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6490 else
6491 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6492
6493 cancel_adv_timeout(hdev);
6494
6495 if (val) {
6496 /* Switch to instance "0" for the Set Advertising setting.
6497 * We cannot use update_[adv|scan_rsp]_data() here as the
6498 * HCI_ADVERTISING flag is not yet set.
6499 */
6500 hdev->cur_adv_instance = 0x00;
6501
6502 if (ext_adv_capable(hdev)) {
6503 hci_start_ext_adv_sync(hdev, 0x00);
6504 } else {
6505 hci_update_adv_data_sync(hdev, 0x00);
6506 hci_update_scan_rsp_data_sync(hdev, 0x00);
6507 hci_enable_advertising_sync(hdev);
6508 }
6509 } else {
6510 hci_disable_advertising_sync(hdev);
6511 }
6512
6513 return 0;
6514 }
6515
set_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6516 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6517 u16 len)
6518 {
6519 struct mgmt_mode *cp = data;
6520 struct mgmt_pending_cmd *cmd;
6521 u8 val, status;
6522 int err;
6523
6524 bt_dev_dbg(hdev, "sock %p", sk);
6525
6526 status = mgmt_le_support(hdev);
6527 if (status)
6528 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6529 status);
6530
6531 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6532 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6533 MGMT_STATUS_INVALID_PARAMS);
6534
6535 if (hdev->advertising_paused)
6536 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6537 MGMT_STATUS_BUSY);
6538
6539 hci_dev_lock(hdev);
6540
6541 val = !!cp->val;
6542
6543 /* The following conditions are ones which mean that we should
6544 * not do any HCI communication but directly send a mgmt
6545 * response to user space (after toggling the flag if
6546 * necessary).
6547 */
6548 if (!hdev_is_powered(hdev) ||
6549 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6550 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6551 hci_dev_test_flag(hdev, HCI_MESH) ||
6552 hci_conn_num(hdev, LE_LINK) > 0 ||
6553 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6554 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6555 bool changed;
6556
6557 if (cp->val) {
6558 hdev->cur_adv_instance = 0x00;
6559 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6560 if (cp->val == 0x02)
6561 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6562 else
6563 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6564 } else {
6565 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6566 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6567 }
6568
6569 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6570 if (err < 0)
6571 goto unlock;
6572
6573 if (changed)
6574 err = new_settings(hdev, sk);
6575
6576 goto unlock;
6577 }
6578
6579 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6580 pending_find(MGMT_OP_SET_LE, hdev)) {
6581 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6582 MGMT_STATUS_BUSY);
6583 goto unlock;
6584 }
6585
6586 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6587 if (!cmd)
6588 err = -ENOMEM;
6589 else
6590 err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6591 set_advertising_complete);
6592
6593 if (err < 0 && cmd)
6594 mgmt_pending_remove(cmd);
6595
6596 unlock:
6597 hci_dev_unlock(hdev);
6598 return err;
6599 }
6600
set_static_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6601 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6602 void *data, u16 len)
6603 {
6604 struct mgmt_cp_set_static_address *cp = data;
6605 int err;
6606
6607 bt_dev_dbg(hdev, "sock %p", sk);
6608
6609 if (!lmp_le_capable(hdev))
6610 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6611 MGMT_STATUS_NOT_SUPPORTED);
6612
6613 if (hdev_is_powered(hdev))
6614 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6615 MGMT_STATUS_REJECTED);
6616
6617 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6618 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6619 return mgmt_cmd_status(sk, hdev->id,
6620 MGMT_OP_SET_STATIC_ADDRESS,
6621 MGMT_STATUS_INVALID_PARAMS);
6622
6623 /* Two most significant bits shall be set */
6624 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6625 return mgmt_cmd_status(sk, hdev->id,
6626 MGMT_OP_SET_STATIC_ADDRESS,
6627 MGMT_STATUS_INVALID_PARAMS);
6628 }
6629
6630 hci_dev_lock(hdev);
6631
6632 bacpy(&hdev->static_addr, &cp->bdaddr);
6633
6634 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6635 if (err < 0)
6636 goto unlock;
6637
6638 err = new_settings(hdev, sk);
6639
6640 unlock:
6641 hci_dev_unlock(hdev);
6642 return err;
6643 }
6644
set_scan_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6645 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6646 void *data, u16 len)
6647 {
6648 struct mgmt_cp_set_scan_params *cp = data;
6649 __u16 interval, window;
6650 int err;
6651
6652 bt_dev_dbg(hdev, "sock %p", sk);
6653
6654 if (!lmp_le_capable(hdev))
6655 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6656 MGMT_STATUS_NOT_SUPPORTED);
6657
6658 /* Keep allowed ranges in sync with set_mesh() */
6659 interval = __le16_to_cpu(cp->interval);
6660
6661 if (interval < 0x0004 || interval > 0x4000)
6662 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6663 MGMT_STATUS_INVALID_PARAMS);
6664
6665 window = __le16_to_cpu(cp->window);
6666
6667 if (window < 0x0004 || window > 0x4000)
6668 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6669 MGMT_STATUS_INVALID_PARAMS);
6670
6671 if (window > interval)
6672 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6673 MGMT_STATUS_INVALID_PARAMS);
6674
6675 hci_dev_lock(hdev);
6676
6677 hdev->le_scan_interval = interval;
6678 hdev->le_scan_window = window;
6679
6680 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6681 NULL, 0);
6682
6683 /* If background scan is running, restart it so new parameters are
6684 * loaded.
6685 */
6686 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6687 hdev->discovery.state == DISCOVERY_STOPPED)
6688 hci_update_passive_scan(hdev);
6689
6690 hci_dev_unlock(hdev);
6691
6692 return err;
6693 }
6694
fast_connectable_complete(struct hci_dev * hdev,void * data,int err)6695 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6696 {
6697 struct mgmt_pending_cmd *cmd = data;
6698
6699 bt_dev_dbg(hdev, "err %d", err);
6700
6701 if (err) {
6702 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6703 mgmt_status(err));
6704 } else {
6705 struct mgmt_mode *cp = cmd->param;
6706
6707 if (cp->val)
6708 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6709 else
6710 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6711
6712 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6713 new_settings(hdev, cmd->sk);
6714 }
6715
6716 mgmt_pending_free(cmd);
6717 }
6718
write_fast_connectable_sync(struct hci_dev * hdev,void * data)6719 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6720 {
6721 struct mgmt_pending_cmd *cmd = data;
6722 struct mgmt_mode *cp = cmd->param;
6723
6724 return hci_write_fast_connectable_sync(hdev, cp->val);
6725 }
6726
set_fast_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6727 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6728 void *data, u16 len)
6729 {
6730 struct mgmt_mode *cp = data;
6731 struct mgmt_pending_cmd *cmd;
6732 int err;
6733
6734 bt_dev_dbg(hdev, "sock %p", sk);
6735
6736 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6737 hdev->hci_ver < BLUETOOTH_VER_1_2)
6738 return mgmt_cmd_status(sk, hdev->id,
6739 MGMT_OP_SET_FAST_CONNECTABLE,
6740 MGMT_STATUS_NOT_SUPPORTED);
6741
6742 if (cp->val != 0x00 && cp->val != 0x01)
6743 return mgmt_cmd_status(sk, hdev->id,
6744 MGMT_OP_SET_FAST_CONNECTABLE,
6745 MGMT_STATUS_INVALID_PARAMS);
6746
6747 hci_dev_lock(hdev);
6748
6749 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6750 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6751 goto unlock;
6752 }
6753
6754 if (!hdev_is_powered(hdev)) {
6755 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6756 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6757 new_settings(hdev, sk);
6758 goto unlock;
6759 }
6760
6761 cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6762 len);
6763 if (!cmd)
6764 err = -ENOMEM;
6765 else
6766 err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6767 fast_connectable_complete);
6768
6769 if (err < 0) {
6770 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6771 MGMT_STATUS_FAILED);
6772
6773 if (cmd)
6774 mgmt_pending_free(cmd);
6775 }
6776
6777 unlock:
6778 hci_dev_unlock(hdev);
6779
6780 return err;
6781 }
6782
set_bredr_complete(struct hci_dev * hdev,void * data,int err)6783 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6784 {
6785 struct mgmt_pending_cmd *cmd = data;
6786
6787 bt_dev_dbg(hdev, "err %d", err);
6788
6789 if (err) {
6790 u8 mgmt_err = mgmt_status(err);
6791
6792 /* We need to restore the flag if related HCI commands
6793 * failed.
6794 */
6795 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6796
6797 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
6798 } else {
6799 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6800 new_settings(hdev, cmd->sk);
6801 }
6802
6803 mgmt_pending_free(cmd);
6804 }
6805
set_bredr_sync(struct hci_dev * hdev,void * data)6806 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6807 {
6808 int status;
6809
6810 status = hci_write_fast_connectable_sync(hdev, false);
6811
6812 if (!status)
6813 status = hci_update_scan_sync(hdev);
6814
6815 /* Since only the advertising data flags will change, there
6816 * is no need to update the scan response data.
6817 */
6818 if (!status)
6819 status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6820
6821 return status;
6822 }
6823
set_bredr(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6824 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6825 {
6826 struct mgmt_mode *cp = data;
6827 struct mgmt_pending_cmd *cmd;
6828 int err;
6829
6830 bt_dev_dbg(hdev, "sock %p", sk);
6831
6832 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6833 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6834 MGMT_STATUS_NOT_SUPPORTED);
6835
6836 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6837 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6838 MGMT_STATUS_REJECTED);
6839
6840 if (cp->val != 0x00 && cp->val != 0x01)
6841 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6842 MGMT_STATUS_INVALID_PARAMS);
6843
6844 hci_dev_lock(hdev);
6845
6846 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6847 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6848 goto unlock;
6849 }
6850
6851 if (!hdev_is_powered(hdev)) {
6852 if (!cp->val) {
6853 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6854 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6855 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6856 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6857 }
6858
6859 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6860
6861 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6862 if (err < 0)
6863 goto unlock;
6864
6865 err = new_settings(hdev, sk);
6866 goto unlock;
6867 }
6868
6869 /* Reject disabling when powered on */
6870 if (!cp->val) {
6871 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6872 MGMT_STATUS_REJECTED);
6873 goto unlock;
6874 } else {
6875 /* When configuring a dual-mode controller to operate
6876 * with LE only and using a static address, then switching
6877 * BR/EDR back on is not allowed.
6878 *
6879 * Dual-mode controllers shall operate with the public
6880 * address as its identity address for BR/EDR and LE. So
6881 * reject the attempt to create an invalid configuration.
6882 *
6883 * The same restrictions applies when secure connections
6884 * has been enabled. For BR/EDR this is a controller feature
6885 * while for LE it is a host stack feature. This means that
6886 * switching BR/EDR back on when secure connections has been
6887 * enabled is not a supported transaction.
6888 */
6889 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6890 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6891 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6892 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6893 MGMT_STATUS_REJECTED);
6894 goto unlock;
6895 }
6896 }
6897
6898 cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6899 if (!cmd)
6900 err = -ENOMEM;
6901 else
6902 err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6903 set_bredr_complete);
6904
6905 if (err < 0) {
6906 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6907 MGMT_STATUS_FAILED);
6908 if (cmd)
6909 mgmt_pending_free(cmd);
6910
6911 goto unlock;
6912 }
6913
6914 /* We need to flip the bit already here so that
6915 * hci_req_update_adv_data generates the correct flags.
6916 */
6917 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6918
6919 unlock:
6920 hci_dev_unlock(hdev);
6921 return err;
6922 }
6923
set_secure_conn_complete(struct hci_dev * hdev,void * data,int err)6924 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6925 {
6926 struct mgmt_pending_cmd *cmd = data;
6927 struct mgmt_mode *cp;
6928
6929 bt_dev_dbg(hdev, "err %d", err);
6930
6931 if (err) {
6932 u8 mgmt_err = mgmt_status(err);
6933
6934 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
6935 goto done;
6936 }
6937
6938 cp = cmd->param;
6939
6940 switch (cp->val) {
6941 case 0x00:
6942 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6943 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6944 break;
6945 case 0x01:
6946 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6947 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6948 break;
6949 case 0x02:
6950 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6951 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6952 break;
6953 }
6954
6955 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6956 new_settings(hdev, cmd->sk);
6957
6958 done:
6959 mgmt_pending_free(cmd);
6960 }
6961
set_secure_conn_sync(struct hci_dev * hdev,void * data)6962 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6963 {
6964 struct mgmt_pending_cmd *cmd = data;
6965 struct mgmt_mode *cp = cmd->param;
6966 u8 val = !!cp->val;
6967
6968 /* Force write of val */
6969 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6970
6971 return hci_write_sc_support_sync(hdev, val);
6972 }
6973
set_secure_conn(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6974 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6975 void *data, u16 len)
6976 {
6977 struct mgmt_mode *cp = data;
6978 struct mgmt_pending_cmd *cmd;
6979 u8 val;
6980 int err;
6981
6982 bt_dev_dbg(hdev, "sock %p", sk);
6983
6984 if (!lmp_sc_capable(hdev) &&
6985 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6986 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6987 MGMT_STATUS_NOT_SUPPORTED);
6988
6989 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6990 lmp_sc_capable(hdev) &&
6991 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6992 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6993 MGMT_STATUS_REJECTED);
6994
6995 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6996 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6997 MGMT_STATUS_INVALID_PARAMS);
6998
6999 hci_dev_lock(hdev);
7000
7001 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
7002 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
7003 bool changed;
7004
7005 if (cp->val) {
7006 changed = !hci_dev_test_and_set_flag(hdev,
7007 HCI_SC_ENABLED);
7008 if (cp->val == 0x02)
7009 hci_dev_set_flag(hdev, HCI_SC_ONLY);
7010 else
7011 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
7012 } else {
7013 changed = hci_dev_test_and_clear_flag(hdev,
7014 HCI_SC_ENABLED);
7015 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
7016 }
7017
7018 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
7019 if (err < 0)
7020 goto failed;
7021
7022 if (changed)
7023 err = new_settings(hdev, sk);
7024
7025 goto failed;
7026 }
7027
7028 val = !!cp->val;
7029
7030 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
7031 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
7032 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
7033 goto failed;
7034 }
7035
7036 cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
7037 if (!cmd)
7038 err = -ENOMEM;
7039 else
7040 err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
7041 set_secure_conn_complete);
7042
7043 if (err < 0) {
7044 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
7045 MGMT_STATUS_FAILED);
7046 if (cmd)
7047 mgmt_pending_free(cmd);
7048 }
7049
7050 failed:
7051 hci_dev_unlock(hdev);
7052 return err;
7053 }
7054
set_debug_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7055 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
7056 void *data, u16 len)
7057 {
7058 struct mgmt_mode *cp = data;
7059 bool changed, use_changed;
7060 int err;
7061
7062 bt_dev_dbg(hdev, "sock %p", sk);
7063
7064 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
7065 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
7066 MGMT_STATUS_INVALID_PARAMS);
7067
7068 hci_dev_lock(hdev);
7069
7070 if (cp->val)
7071 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
7072 else
7073 changed = hci_dev_test_and_clear_flag(hdev,
7074 HCI_KEEP_DEBUG_KEYS);
7075
7076 if (cp->val == 0x02)
7077 use_changed = !hci_dev_test_and_set_flag(hdev,
7078 HCI_USE_DEBUG_KEYS);
7079 else
7080 use_changed = hci_dev_test_and_clear_flag(hdev,
7081 HCI_USE_DEBUG_KEYS);
7082
7083 if (hdev_is_powered(hdev) && use_changed &&
7084 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7085 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
7086 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
7087 sizeof(mode), &mode);
7088 }
7089
7090 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
7091 if (err < 0)
7092 goto unlock;
7093
7094 if (changed)
7095 err = new_settings(hdev, sk);
7096
7097 unlock:
7098 hci_dev_unlock(hdev);
7099 return err;
7100 }
7101
set_privacy(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)7102 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7103 u16 len)
7104 {
7105 struct mgmt_cp_set_privacy *cp = cp_data;
7106 bool changed;
7107 int err;
7108
7109 bt_dev_dbg(hdev, "sock %p", sk);
7110
7111 if (!lmp_le_capable(hdev))
7112 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7113 MGMT_STATUS_NOT_SUPPORTED);
7114
7115 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
7116 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7117 MGMT_STATUS_INVALID_PARAMS);
7118
7119 if (hdev_is_powered(hdev))
7120 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7121 MGMT_STATUS_REJECTED);
7122
7123 hci_dev_lock(hdev);
7124
7125 /* If user space supports this command it is also expected to
7126 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
7127 */
7128 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7129
7130 if (cp->privacy) {
7131 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
7132 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
7133 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
7134 hci_adv_instances_set_rpa_expired(hdev, true);
7135 if (cp->privacy == 0x02)
7136 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
7137 else
7138 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7139 } else {
7140 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
7141 memset(hdev->irk, 0, sizeof(hdev->irk));
7142 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
7143 hci_adv_instances_set_rpa_expired(hdev, false);
7144 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7145 }
7146
7147 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
7148 if (err < 0)
7149 goto unlock;
7150
7151 if (changed)
7152 err = new_settings(hdev, sk);
7153
7154 unlock:
7155 hci_dev_unlock(hdev);
7156 return err;
7157 }
7158
irk_is_valid(struct mgmt_irk_info * irk)7159 static bool irk_is_valid(struct mgmt_irk_info *irk)
7160 {
7161 switch (irk->addr.type) {
7162 case BDADDR_LE_PUBLIC:
7163 return true;
7164
7165 case BDADDR_LE_RANDOM:
7166 /* Two most significant bits shall be set */
7167 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7168 return false;
7169 return true;
7170 }
7171
7172 return false;
7173 }
7174
load_irks(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)7175 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7176 u16 len)
7177 {
7178 struct mgmt_cp_load_irks *cp = cp_data;
7179 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
7180 sizeof(struct mgmt_irk_info));
7181 u16 irk_count, expected_len;
7182 int i, err;
7183
7184 bt_dev_dbg(hdev, "sock %p", sk);
7185
7186 if (!lmp_le_capable(hdev))
7187 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7188 MGMT_STATUS_NOT_SUPPORTED);
7189
7190 irk_count = __le16_to_cpu(cp->irk_count);
7191 if (irk_count > max_irk_count) {
7192 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
7193 irk_count);
7194 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7195 MGMT_STATUS_INVALID_PARAMS);
7196 }
7197
7198 expected_len = struct_size(cp, irks, irk_count);
7199 if (expected_len != len) {
7200 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7201 expected_len, len);
7202 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7203 MGMT_STATUS_INVALID_PARAMS);
7204 }
7205
7206 bt_dev_dbg(hdev, "irk_count %u", irk_count);
7207
7208 for (i = 0; i < irk_count; i++) {
7209 struct mgmt_irk_info *key = &cp->irks[i];
7210
7211 if (!irk_is_valid(key))
7212 return mgmt_cmd_status(sk, hdev->id,
7213 MGMT_OP_LOAD_IRKS,
7214 MGMT_STATUS_INVALID_PARAMS);
7215 }
7216
7217 hci_dev_lock(hdev);
7218
7219 hci_smp_irks_clear(hdev);
7220
7221 for (i = 0; i < irk_count; i++) {
7222 struct mgmt_irk_info *irk = &cp->irks[i];
7223
7224 if (hci_is_blocked_key(hdev,
7225 HCI_BLOCKED_KEY_TYPE_IRK,
7226 irk->val)) {
7227 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7228 &irk->addr.bdaddr);
7229 continue;
7230 }
7231
7232 hci_add_irk(hdev, &irk->addr.bdaddr,
7233 le_addr_type(irk->addr.type), irk->val,
7234 BDADDR_ANY);
7235 }
7236
7237 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7238
7239 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7240
7241 hci_dev_unlock(hdev);
7242
7243 return err;
7244 }
7245
ltk_is_valid(struct mgmt_ltk_info * key)7246 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7247 {
7248 if (key->initiator != 0x00 && key->initiator != 0x01)
7249 return false;
7250
7251 switch (key->addr.type) {
7252 case BDADDR_LE_PUBLIC:
7253 return true;
7254
7255 case BDADDR_LE_RANDOM:
7256 /* Two most significant bits shall be set */
7257 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7258 return false;
7259 return true;
7260 }
7261
7262 return false;
7263 }
7264
load_long_term_keys(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)7265 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7266 void *cp_data, u16 len)
7267 {
7268 struct mgmt_cp_load_long_term_keys *cp = cp_data;
7269 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7270 sizeof(struct mgmt_ltk_info));
7271 u16 key_count, expected_len;
7272 int i, err;
7273
7274 bt_dev_dbg(hdev, "sock %p", sk);
7275
7276 if (!lmp_le_capable(hdev))
7277 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7278 MGMT_STATUS_NOT_SUPPORTED);
7279
7280 key_count = __le16_to_cpu(cp->key_count);
7281 if (key_count > max_key_count) {
7282 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7283 key_count);
7284 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7285 MGMT_STATUS_INVALID_PARAMS);
7286 }
7287
7288 expected_len = struct_size(cp, keys, key_count);
7289 if (expected_len != len) {
7290 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7291 expected_len, len);
7292 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7293 MGMT_STATUS_INVALID_PARAMS);
7294 }
7295
7296 bt_dev_dbg(hdev, "key_count %u", key_count);
7297
7298 hci_dev_lock(hdev);
7299
7300 hci_smp_ltks_clear(hdev);
7301
7302 for (i = 0; i < key_count; i++) {
7303 struct mgmt_ltk_info *key = &cp->keys[i];
7304 u8 type, authenticated;
7305
7306 if (hci_is_blocked_key(hdev,
7307 HCI_BLOCKED_KEY_TYPE_LTK,
7308 key->val)) {
7309 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7310 &key->addr.bdaddr);
7311 continue;
7312 }
7313
7314 if (!ltk_is_valid(key)) {
7315 bt_dev_warn(hdev, "Invalid LTK for %pMR",
7316 &key->addr.bdaddr);
7317 continue;
7318 }
7319
7320 switch (key->type) {
7321 case MGMT_LTK_UNAUTHENTICATED:
7322 authenticated = 0x00;
7323 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7324 break;
7325 case MGMT_LTK_AUTHENTICATED:
7326 authenticated = 0x01;
7327 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7328 break;
7329 case MGMT_LTK_P256_UNAUTH:
7330 authenticated = 0x00;
7331 type = SMP_LTK_P256;
7332 break;
7333 case MGMT_LTK_P256_AUTH:
7334 authenticated = 0x01;
7335 type = SMP_LTK_P256;
7336 break;
7337 case MGMT_LTK_P256_DEBUG:
7338 authenticated = 0x00;
7339 type = SMP_LTK_P256_DEBUG;
7340 fallthrough;
7341 default:
7342 continue;
7343 }
7344
7345 hci_add_ltk(hdev, &key->addr.bdaddr,
7346 le_addr_type(key->addr.type), type, authenticated,
7347 key->val, key->enc_size, key->ediv, key->rand);
7348 }
7349
7350 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7351 NULL, 0);
7352
7353 hci_dev_unlock(hdev);
7354
7355 return err;
7356 }
7357
get_conn_info_complete(struct hci_dev * hdev,void * data,int err)7358 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
7359 {
7360 struct mgmt_pending_cmd *cmd = data;
7361 struct hci_conn *conn = cmd->user_data;
7362 struct mgmt_cp_get_conn_info *cp = cmd->param;
7363 struct mgmt_rp_get_conn_info rp;
7364 u8 status;
7365
7366 bt_dev_dbg(hdev, "err %d", err);
7367
7368 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
7369
7370 status = mgmt_status(err);
7371 if (status == MGMT_STATUS_SUCCESS) {
7372 rp.rssi = conn->rssi;
7373 rp.tx_power = conn->tx_power;
7374 rp.max_tx_power = conn->max_tx_power;
7375 } else {
7376 rp.rssi = HCI_RSSI_INVALID;
7377 rp.tx_power = HCI_TX_POWER_INVALID;
7378 rp.max_tx_power = HCI_TX_POWER_INVALID;
7379 }
7380
7381 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, MGMT_OP_GET_CONN_INFO, status,
7382 &rp, sizeof(rp));
7383
7384 mgmt_pending_free(cmd);
7385 }
7386
get_conn_info_sync(struct hci_dev * hdev,void * data)7387 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
7388 {
7389 struct mgmt_pending_cmd *cmd = data;
7390 struct mgmt_cp_get_conn_info *cp = cmd->param;
7391 struct hci_conn *conn;
7392 int err;
7393 __le16 handle;
7394
7395 /* Make sure we are still connected */
7396 if (cp->addr.type == BDADDR_BREDR)
7397 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7398 &cp->addr.bdaddr);
7399 else
7400 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7401
7402 if (!conn || conn->state != BT_CONNECTED)
7403 return MGMT_STATUS_NOT_CONNECTED;
7404
7405 cmd->user_data = conn;
7406 handle = cpu_to_le16(conn->handle);
7407
7408 /* Refresh RSSI each time */
7409 err = hci_read_rssi_sync(hdev, handle);
7410
7411 /* For LE links TX power does not change thus we don't need to
7412 * query for it once value is known.
7413 */
7414 if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
7415 conn->tx_power == HCI_TX_POWER_INVALID))
7416 err = hci_read_tx_power_sync(hdev, handle, 0x00);
7417
7418 /* Max TX power needs to be read only once per connection */
7419 if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
7420 err = hci_read_tx_power_sync(hdev, handle, 0x01);
7421
7422 return err;
7423 }
7424
get_conn_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7425 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7426 u16 len)
7427 {
7428 struct mgmt_cp_get_conn_info *cp = data;
7429 struct mgmt_rp_get_conn_info rp;
7430 struct hci_conn *conn;
7431 unsigned long conn_info_age;
7432 int err = 0;
7433
7434 bt_dev_dbg(hdev, "sock %p", sk);
7435
7436 memset(&rp, 0, sizeof(rp));
7437 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7438 rp.addr.type = cp->addr.type;
7439
7440 if (!bdaddr_type_is_valid(cp->addr.type))
7441 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7442 MGMT_STATUS_INVALID_PARAMS,
7443 &rp, sizeof(rp));
7444
7445 hci_dev_lock(hdev);
7446
7447 if (!hdev_is_powered(hdev)) {
7448 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7449 MGMT_STATUS_NOT_POWERED, &rp,
7450 sizeof(rp));
7451 goto unlock;
7452 }
7453
7454 if (cp->addr.type == BDADDR_BREDR)
7455 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7456 &cp->addr.bdaddr);
7457 else
7458 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7459
7460 if (!conn || conn->state != BT_CONNECTED) {
7461 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7462 MGMT_STATUS_NOT_CONNECTED, &rp,
7463 sizeof(rp));
7464 goto unlock;
7465 }
7466
7467 /* To avoid client trying to guess when to poll again for information we
7468 * calculate conn info age as random value between min/max set in hdev.
7469 */
7470 conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age,
7471 hdev->conn_info_max_age - 1);
7472
7473 /* Query controller to refresh cached values if they are too old or were
7474 * never read.
7475 */
7476 if (time_after(jiffies, conn->conn_info_timestamp +
7477 msecs_to_jiffies(conn_info_age)) ||
7478 !conn->conn_info_timestamp) {
7479 struct mgmt_pending_cmd *cmd;
7480
7481 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
7482 len);
7483 if (!cmd) {
7484 err = -ENOMEM;
7485 } else {
7486 err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
7487 cmd, get_conn_info_complete);
7488 }
7489
7490 if (err < 0) {
7491 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7492 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7493
7494 if (cmd)
7495 mgmt_pending_free(cmd);
7496
7497 goto unlock;
7498 }
7499
7500 conn->conn_info_timestamp = jiffies;
7501 } else {
7502 /* Cache is valid, just reply with values cached in hci_conn */
7503 rp.rssi = conn->rssi;
7504 rp.tx_power = conn->tx_power;
7505 rp.max_tx_power = conn->max_tx_power;
7506
7507 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7508 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7509 }
7510
7511 unlock:
7512 hci_dev_unlock(hdev);
7513 return err;
7514 }
7515
get_clock_info_complete(struct hci_dev * hdev,void * data,int err)7516 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
7517 {
7518 struct mgmt_pending_cmd *cmd = data;
7519 struct mgmt_cp_get_clock_info *cp = cmd->param;
7520 struct mgmt_rp_get_clock_info rp;
7521 struct hci_conn *conn = cmd->user_data;
7522 u8 status = mgmt_status(err);
7523
7524 bt_dev_dbg(hdev, "err %d", err);
7525
7526 memset(&rp, 0, sizeof(rp));
7527 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7528 rp.addr.type = cp->addr.type;
7529
7530 if (err)
7531 goto complete;
7532
7533 rp.local_clock = cpu_to_le32(hdev->clock);
7534
7535 if (conn) {
7536 rp.piconet_clock = cpu_to_le32(conn->clock);
7537 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7538 }
7539
7540 complete:
7541 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, status, &rp,
7542 sizeof(rp));
7543
7544 mgmt_pending_free(cmd);
7545 }
7546
get_clock_info_sync(struct hci_dev * hdev,void * data)7547 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
7548 {
7549 struct mgmt_pending_cmd *cmd = data;
7550 struct mgmt_cp_get_clock_info *cp = cmd->param;
7551 struct hci_cp_read_clock hci_cp;
7552 struct hci_conn *conn;
7553
7554 memset(&hci_cp, 0, sizeof(hci_cp));
7555 hci_read_clock_sync(hdev, &hci_cp);
7556
7557 /* Make sure connection still exists */
7558 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
7559 if (!conn || conn->state != BT_CONNECTED)
7560 return MGMT_STATUS_NOT_CONNECTED;
7561
7562 cmd->user_data = conn;
7563 hci_cp.handle = cpu_to_le16(conn->handle);
7564 hci_cp.which = 0x01; /* Piconet clock */
7565
7566 return hci_read_clock_sync(hdev, &hci_cp);
7567 }
7568
get_clock_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7569 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7570 u16 len)
7571 {
7572 struct mgmt_cp_get_clock_info *cp = data;
7573 struct mgmt_rp_get_clock_info rp;
7574 struct mgmt_pending_cmd *cmd;
7575 struct hci_conn *conn;
7576 int err;
7577
7578 bt_dev_dbg(hdev, "sock %p", sk);
7579
7580 memset(&rp, 0, sizeof(rp));
7581 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7582 rp.addr.type = cp->addr.type;
7583
7584 if (cp->addr.type != BDADDR_BREDR)
7585 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7586 MGMT_STATUS_INVALID_PARAMS,
7587 &rp, sizeof(rp));
7588
7589 hci_dev_lock(hdev);
7590
7591 if (!hdev_is_powered(hdev)) {
7592 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7593 MGMT_STATUS_NOT_POWERED, &rp,
7594 sizeof(rp));
7595 goto unlock;
7596 }
7597
7598 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7599 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7600 &cp->addr.bdaddr);
7601 if (!conn || conn->state != BT_CONNECTED) {
7602 err = mgmt_cmd_complete(sk, hdev->id,
7603 MGMT_OP_GET_CLOCK_INFO,
7604 MGMT_STATUS_NOT_CONNECTED,
7605 &rp, sizeof(rp));
7606 goto unlock;
7607 }
7608 } else {
7609 conn = NULL;
7610 }
7611
7612 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7613 if (!cmd)
7614 err = -ENOMEM;
7615 else
7616 err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7617 get_clock_info_complete);
7618
7619 if (err < 0) {
7620 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7621 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7622
7623 if (cmd)
7624 mgmt_pending_free(cmd);
7625 }
7626
7627
7628 unlock:
7629 hci_dev_unlock(hdev);
7630 return err;
7631 }
7632
device_added(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type,u8 action)7633 static void device_added(struct sock *sk, struct hci_dev *hdev,
7634 bdaddr_t *bdaddr, u8 type, u8 action)
7635 {
7636 struct mgmt_ev_device_added ev;
7637
7638 bacpy(&ev.addr.bdaddr, bdaddr);
7639 ev.addr.type = type;
7640 ev.action = action;
7641
7642 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7643 }
7644
add_device_complete(struct hci_dev * hdev,void * data,int err)7645 static void add_device_complete(struct hci_dev *hdev, void *data, int err)
7646 {
7647 struct mgmt_pending_cmd *cmd = data;
7648 struct mgmt_cp_add_device *cp = cmd->param;
7649
7650 if (!err) {
7651 struct hci_conn_params *params;
7652
7653 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7654 le_addr_type(cp->addr.type));
7655
7656 device_added(cmd->sk, hdev, &cp->addr.bdaddr, cp->addr.type,
7657 cp->action);
7658 device_flags_changed(NULL, hdev, &cp->addr.bdaddr,
7659 cp->addr.type, hdev->conn_flags,
7660 params ? params->flags : 0);
7661 }
7662
7663 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_ADD_DEVICE,
7664 mgmt_status(err), &cp->addr, sizeof(cp->addr));
7665 mgmt_pending_free(cmd);
7666 }
7667
add_device_sync(struct hci_dev * hdev,void * data)7668 static int add_device_sync(struct hci_dev *hdev, void *data)
7669 {
7670 return hci_update_passive_scan_sync(hdev);
7671 }
7672
add_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7673 static int add_device(struct sock *sk, struct hci_dev *hdev,
7674 void *data, u16 len)
7675 {
7676 struct mgmt_pending_cmd *cmd;
7677 struct mgmt_cp_add_device *cp = data;
7678 u8 auto_conn, addr_type;
7679 struct hci_conn_params *params;
7680 int err;
7681 u32 current_flags = 0;
7682 u32 supported_flags;
7683
7684 bt_dev_dbg(hdev, "sock %p", sk);
7685
7686 if (!bdaddr_type_is_valid(cp->addr.type) ||
7687 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7688 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7689 MGMT_STATUS_INVALID_PARAMS,
7690 &cp->addr, sizeof(cp->addr));
7691
7692 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7693 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7694 MGMT_STATUS_INVALID_PARAMS,
7695 &cp->addr, sizeof(cp->addr));
7696
7697 hci_dev_lock(hdev);
7698
7699 if (cp->addr.type == BDADDR_BREDR) {
7700 /* Only incoming connections action is supported for now */
7701 if (cp->action != 0x01) {
7702 err = mgmt_cmd_complete(sk, hdev->id,
7703 MGMT_OP_ADD_DEVICE,
7704 MGMT_STATUS_INVALID_PARAMS,
7705 &cp->addr, sizeof(cp->addr));
7706 goto unlock;
7707 }
7708
7709 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7710 &cp->addr.bdaddr,
7711 cp->addr.type, 0);
7712 if (err)
7713 goto unlock;
7714
7715 hci_update_scan(hdev);
7716
7717 goto added;
7718 }
7719
7720 addr_type = le_addr_type(cp->addr.type);
7721
7722 if (cp->action == 0x02)
7723 auto_conn = HCI_AUTO_CONN_ALWAYS;
7724 else if (cp->action == 0x01)
7725 auto_conn = HCI_AUTO_CONN_DIRECT;
7726 else
7727 auto_conn = HCI_AUTO_CONN_REPORT;
7728
7729 /* Kernel internally uses conn_params with resolvable private
7730 * address, but Add Device allows only identity addresses.
7731 * Make sure it is enforced before calling
7732 * hci_conn_params_lookup.
7733 */
7734 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7735 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7736 MGMT_STATUS_INVALID_PARAMS,
7737 &cp->addr, sizeof(cp->addr));
7738 goto unlock;
7739 }
7740
7741 /* If the connection parameters don't exist for this device,
7742 * they will be created and configured with defaults.
7743 */
7744 params = hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7745 auto_conn);
7746 if (!params) {
7747 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7748 MGMT_STATUS_FAILED, &cp->addr,
7749 sizeof(cp->addr));
7750 goto unlock;
7751 }
7752
7753 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
7754 if (!cmd) {
7755 err = -ENOMEM;
7756 goto unlock;
7757 }
7758
7759 err = hci_cmd_sync_queue(hdev, add_device_sync, cmd,
7760 add_device_complete);
7761 if (err < 0) {
7762 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7763 MGMT_STATUS_FAILED, &cp->addr,
7764 sizeof(cp->addr));
7765 mgmt_pending_free(cmd);
7766 }
7767
7768 goto unlock;
7769
7770 added:
7771 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7772 supported_flags = hdev->conn_flags;
7773 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7774 supported_flags, current_flags);
7775
7776 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7777 MGMT_STATUS_SUCCESS, &cp->addr,
7778 sizeof(cp->addr));
7779
7780 unlock:
7781 hci_dev_unlock(hdev);
7782 return err;
7783 }
7784
device_removed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)7785 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7786 bdaddr_t *bdaddr, u8 type)
7787 {
7788 struct mgmt_ev_device_removed ev;
7789
7790 bacpy(&ev.addr.bdaddr, bdaddr);
7791 ev.addr.type = type;
7792
7793 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7794 }
7795
remove_device_sync(struct hci_dev * hdev,void * data)7796 static int remove_device_sync(struct hci_dev *hdev, void *data)
7797 {
7798 return hci_update_passive_scan_sync(hdev);
7799 }
7800
remove_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7801 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7802 void *data, u16 len)
7803 {
7804 struct mgmt_cp_remove_device *cp = data;
7805 int err;
7806
7807 bt_dev_dbg(hdev, "sock %p", sk);
7808
7809 hci_dev_lock(hdev);
7810
7811 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7812 struct hci_conn_params *params;
7813 u8 addr_type;
7814
7815 if (!bdaddr_type_is_valid(cp->addr.type)) {
7816 err = mgmt_cmd_complete(sk, hdev->id,
7817 MGMT_OP_REMOVE_DEVICE,
7818 MGMT_STATUS_INVALID_PARAMS,
7819 &cp->addr, sizeof(cp->addr));
7820 goto unlock;
7821 }
7822
7823 if (cp->addr.type == BDADDR_BREDR) {
7824 err = hci_bdaddr_list_del(&hdev->accept_list,
7825 &cp->addr.bdaddr,
7826 cp->addr.type);
7827 if (err) {
7828 err = mgmt_cmd_complete(sk, hdev->id,
7829 MGMT_OP_REMOVE_DEVICE,
7830 MGMT_STATUS_INVALID_PARAMS,
7831 &cp->addr,
7832 sizeof(cp->addr));
7833 goto unlock;
7834 }
7835
7836 hci_update_scan(hdev);
7837
7838 device_removed(sk, hdev, &cp->addr.bdaddr,
7839 cp->addr.type);
7840 goto complete;
7841 }
7842
7843 addr_type = le_addr_type(cp->addr.type);
7844
7845 /* Kernel internally uses conn_params with resolvable private
7846 * address, but Remove Device allows only identity addresses.
7847 * Make sure it is enforced before calling
7848 * hci_conn_params_lookup.
7849 */
7850 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7851 err = mgmt_cmd_complete(sk, hdev->id,
7852 MGMT_OP_REMOVE_DEVICE,
7853 MGMT_STATUS_INVALID_PARAMS,
7854 &cp->addr, sizeof(cp->addr));
7855 goto unlock;
7856 }
7857
7858 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7859 addr_type);
7860 if (!params) {
7861 err = mgmt_cmd_complete(sk, hdev->id,
7862 MGMT_OP_REMOVE_DEVICE,
7863 MGMT_STATUS_INVALID_PARAMS,
7864 &cp->addr, sizeof(cp->addr));
7865 goto unlock;
7866 }
7867
7868 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7869 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7870 err = mgmt_cmd_complete(sk, hdev->id,
7871 MGMT_OP_REMOVE_DEVICE,
7872 MGMT_STATUS_INVALID_PARAMS,
7873 &cp->addr, sizeof(cp->addr));
7874 goto unlock;
7875 }
7876
7877 hci_conn_params_free(params);
7878
7879 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7880 } else {
7881 struct hci_conn_params *p, *tmp;
7882 struct bdaddr_list *b, *btmp;
7883
7884 if (cp->addr.type) {
7885 err = mgmt_cmd_complete(sk, hdev->id,
7886 MGMT_OP_REMOVE_DEVICE,
7887 MGMT_STATUS_INVALID_PARAMS,
7888 &cp->addr, sizeof(cp->addr));
7889 goto unlock;
7890 }
7891
7892 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7893 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7894 list_del(&b->list);
7895 kfree(b);
7896 }
7897
7898 hci_update_scan(hdev);
7899
7900 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7901 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7902 continue;
7903 device_removed(sk, hdev, &p->addr, p->addr_type);
7904 if (p->explicit_connect) {
7905 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7906 continue;
7907 }
7908 hci_conn_params_free(p);
7909 }
7910
7911 bt_dev_dbg(hdev, "All LE connection parameters were removed");
7912 }
7913
7914 hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7915
7916 complete:
7917 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7918 MGMT_STATUS_SUCCESS, &cp->addr,
7919 sizeof(cp->addr));
7920 unlock:
7921 hci_dev_unlock(hdev);
7922 return err;
7923 }
7924
conn_update_sync(struct hci_dev * hdev,void * data)7925 static int conn_update_sync(struct hci_dev *hdev, void *data)
7926 {
7927 struct hci_conn_params *params = data;
7928 struct hci_conn *conn;
7929
7930 conn = hci_conn_hash_lookup_le(hdev, ¶ms->addr, params->addr_type);
7931 if (!conn)
7932 return -ECANCELED;
7933
7934 return hci_le_conn_update_sync(hdev, conn, params);
7935 }
7936
load_conn_param(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7937 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7938 u16 len)
7939 {
7940 struct mgmt_cp_load_conn_param *cp = data;
7941 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7942 sizeof(struct mgmt_conn_param));
7943 u16 param_count, expected_len;
7944 int i;
7945
7946 if (!lmp_le_capable(hdev))
7947 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7948 MGMT_STATUS_NOT_SUPPORTED);
7949
7950 param_count = __le16_to_cpu(cp->param_count);
7951 if (param_count > max_param_count) {
7952 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7953 param_count);
7954 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7955 MGMT_STATUS_INVALID_PARAMS);
7956 }
7957
7958 expected_len = struct_size(cp, params, param_count);
7959 if (expected_len != len) {
7960 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7961 expected_len, len);
7962 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7963 MGMT_STATUS_INVALID_PARAMS);
7964 }
7965
7966 bt_dev_dbg(hdev, "param_count %u", param_count);
7967
7968 hci_dev_lock(hdev);
7969
7970 if (param_count > 1)
7971 hci_conn_params_clear_disabled(hdev);
7972
7973 for (i = 0; i < param_count; i++) {
7974 struct mgmt_conn_param *param = &cp->params[i];
7975 struct hci_conn_params *hci_param;
7976 u16 min, max, latency, timeout;
7977 bool update = false;
7978 u8 addr_type;
7979
7980 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
7981 param->addr.type);
7982
7983 if (param->addr.type == BDADDR_LE_PUBLIC) {
7984 addr_type = ADDR_LE_DEV_PUBLIC;
7985 } else if (param->addr.type == BDADDR_LE_RANDOM) {
7986 addr_type = ADDR_LE_DEV_RANDOM;
7987 } else {
7988 bt_dev_err(hdev, "ignoring invalid connection parameters");
7989 continue;
7990 }
7991
7992 min = le16_to_cpu(param->min_interval);
7993 max = le16_to_cpu(param->max_interval);
7994 latency = le16_to_cpu(param->latency);
7995 timeout = le16_to_cpu(param->timeout);
7996
7997 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7998 min, max, latency, timeout);
7999
8000 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
8001 bt_dev_err(hdev, "ignoring invalid connection parameters");
8002 continue;
8003 }
8004
8005 /* Detect when the loading is for an existing parameter then
8006 * attempt to trigger the connection update procedure.
8007 */
8008 if (!i && param_count == 1) {
8009 hci_param = hci_conn_params_lookup(hdev,
8010 ¶m->addr.bdaddr,
8011 addr_type);
8012 if (hci_param)
8013 update = true;
8014 else
8015 hci_conn_params_clear_disabled(hdev);
8016 }
8017
8018 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
8019 addr_type);
8020 if (!hci_param) {
8021 bt_dev_err(hdev, "failed to add connection parameters");
8022 continue;
8023 }
8024
8025 hci_param->conn_min_interval = min;
8026 hci_param->conn_max_interval = max;
8027 hci_param->conn_latency = latency;
8028 hci_param->supervision_timeout = timeout;
8029
8030 /* Check if we need to trigger a connection update */
8031 if (update) {
8032 struct hci_conn *conn;
8033
8034 /* Lookup for existing connection as central and check
8035 * if parameters match and if they don't then trigger
8036 * a connection update.
8037 */
8038 conn = hci_conn_hash_lookup_le(hdev, &hci_param->addr,
8039 addr_type);
8040 if (conn && conn->role == HCI_ROLE_MASTER &&
8041 (conn->le_conn_min_interval != min ||
8042 conn->le_conn_max_interval != max ||
8043 conn->le_conn_latency != latency ||
8044 conn->le_supv_timeout != timeout))
8045 hci_cmd_sync_queue(hdev, conn_update_sync,
8046 hci_param, NULL);
8047 }
8048 }
8049
8050 hci_dev_unlock(hdev);
8051
8052 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
8053 NULL, 0);
8054 }
8055
set_external_config(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)8056 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
8057 void *data, u16 len)
8058 {
8059 struct mgmt_cp_set_external_config *cp = data;
8060 bool changed;
8061 int err;
8062
8063 bt_dev_dbg(hdev, "sock %p", sk);
8064
8065 if (hdev_is_powered(hdev))
8066 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
8067 MGMT_STATUS_REJECTED);
8068
8069 if (cp->config != 0x00 && cp->config != 0x01)
8070 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
8071 MGMT_STATUS_INVALID_PARAMS);
8072
8073 if (!hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG))
8074 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
8075 MGMT_STATUS_NOT_SUPPORTED);
8076
8077 hci_dev_lock(hdev);
8078
8079 if (cp->config)
8080 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
8081 else
8082 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
8083
8084 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
8085 if (err < 0)
8086 goto unlock;
8087
8088 if (!changed)
8089 goto unlock;
8090
8091 err = new_options(hdev, sk);
8092
8093 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
8094 mgmt_index_removed(hdev);
8095
8096 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
8097 hci_dev_set_flag(hdev, HCI_CONFIG);
8098 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8099
8100 queue_work(hdev->req_workqueue, &hdev->power_on);
8101 } else {
8102 set_bit(HCI_RAW, &hdev->flags);
8103 mgmt_index_added(hdev);
8104 }
8105 }
8106
8107 unlock:
8108 hci_dev_unlock(hdev);
8109 return err;
8110 }
8111
set_public_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)8112 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
8113 void *data, u16 len)
8114 {
8115 struct mgmt_cp_set_public_address *cp = data;
8116 bool changed;
8117 int err;
8118
8119 bt_dev_dbg(hdev, "sock %p", sk);
8120
8121 if (hdev_is_powered(hdev))
8122 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8123 MGMT_STATUS_REJECTED);
8124
8125 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
8126 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8127 MGMT_STATUS_INVALID_PARAMS);
8128
8129 if (!hdev->set_bdaddr)
8130 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8131 MGMT_STATUS_NOT_SUPPORTED);
8132
8133 hci_dev_lock(hdev);
8134
8135 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
8136 bacpy(&hdev->public_addr, &cp->bdaddr);
8137
8138 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
8139 if (err < 0)
8140 goto unlock;
8141
8142 if (!changed)
8143 goto unlock;
8144
8145 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
8146 err = new_options(hdev, sk);
8147
8148 if (is_configured(hdev)) {
8149 mgmt_index_removed(hdev);
8150
8151 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
8152
8153 hci_dev_set_flag(hdev, HCI_CONFIG);
8154 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8155
8156 queue_work(hdev->req_workqueue, &hdev->power_on);
8157 }
8158
8159 unlock:
8160 hci_dev_unlock(hdev);
8161 return err;
8162 }
8163
read_local_oob_ext_data_complete(struct hci_dev * hdev,void * data,int err)8164 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
8165 int err)
8166 {
8167 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8168 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8169 u8 *h192, *r192, *h256, *r256;
8170 struct mgmt_pending_cmd *cmd = data;
8171 struct sk_buff *skb = cmd->skb;
8172 u8 status = mgmt_status(err);
8173 u16 eir_len;
8174
8175 if (!status) {
8176 if (!skb)
8177 status = MGMT_STATUS_FAILED;
8178 else if (IS_ERR(skb))
8179 status = mgmt_status(PTR_ERR(skb));
8180 else
8181 status = mgmt_status(skb->data[0]);
8182 }
8183
8184 bt_dev_dbg(hdev, "status %u", status);
8185
8186 mgmt_cp = cmd->param;
8187
8188 if (status) {
8189 status = mgmt_status(status);
8190 eir_len = 0;
8191
8192 h192 = NULL;
8193 r192 = NULL;
8194 h256 = NULL;
8195 r256 = NULL;
8196 } else if (!bredr_sc_enabled(hdev)) {
8197 struct hci_rp_read_local_oob_data *rp;
8198
8199 if (skb->len != sizeof(*rp)) {
8200 status = MGMT_STATUS_FAILED;
8201 eir_len = 0;
8202 } else {
8203 status = MGMT_STATUS_SUCCESS;
8204 rp = (void *)skb->data;
8205
8206 eir_len = 5 + 18 + 18;
8207 h192 = rp->hash;
8208 r192 = rp->rand;
8209 h256 = NULL;
8210 r256 = NULL;
8211 }
8212 } else {
8213 struct hci_rp_read_local_oob_ext_data *rp;
8214
8215 if (skb->len != sizeof(*rp)) {
8216 status = MGMT_STATUS_FAILED;
8217 eir_len = 0;
8218 } else {
8219 status = MGMT_STATUS_SUCCESS;
8220 rp = (void *)skb->data;
8221
8222 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8223 eir_len = 5 + 18 + 18;
8224 h192 = NULL;
8225 r192 = NULL;
8226 } else {
8227 eir_len = 5 + 18 + 18 + 18 + 18;
8228 h192 = rp->hash192;
8229 r192 = rp->rand192;
8230 }
8231
8232 h256 = rp->hash256;
8233 r256 = rp->rand256;
8234 }
8235 }
8236
8237 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8238 if (!mgmt_rp)
8239 goto done;
8240
8241 if (eir_len == 0)
8242 goto send_rsp;
8243
8244 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8245 hdev->dev_class, 3);
8246
8247 if (h192 && r192) {
8248 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8249 EIR_SSP_HASH_C192, h192, 16);
8250 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8251 EIR_SSP_RAND_R192, r192, 16);
8252 }
8253
8254 if (h256 && r256) {
8255 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8256 EIR_SSP_HASH_C256, h256, 16);
8257 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8258 EIR_SSP_RAND_R256, r256, 16);
8259 }
8260
8261 send_rsp:
8262 mgmt_rp->type = mgmt_cp->type;
8263 mgmt_rp->eir_len = cpu_to_le16(eir_len);
8264
8265 err = mgmt_cmd_complete(cmd->sk, hdev->id,
8266 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8267 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8268 if (err < 0 || status)
8269 goto done;
8270
8271 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8272
8273 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8274 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8275 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8276 done:
8277 if (skb && !IS_ERR(skb))
8278 kfree_skb(skb);
8279
8280 kfree(mgmt_rp);
8281 mgmt_pending_free(cmd);
8282 }
8283
read_local_ssp_oob_req(struct hci_dev * hdev,struct sock * sk,struct mgmt_cp_read_local_oob_ext_data * cp)8284 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8285 struct mgmt_cp_read_local_oob_ext_data *cp)
8286 {
8287 struct mgmt_pending_cmd *cmd;
8288 int err;
8289
8290 cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8291 cp, sizeof(*cp));
8292 if (!cmd)
8293 return -ENOMEM;
8294
8295 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
8296 read_local_oob_ext_data_complete);
8297
8298 if (err < 0) {
8299 mgmt_pending_remove(cmd);
8300 return err;
8301 }
8302
8303 return 0;
8304 }
8305
read_local_oob_ext_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8306 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8307 void *data, u16 data_len)
8308 {
8309 struct mgmt_cp_read_local_oob_ext_data *cp = data;
8310 struct mgmt_rp_read_local_oob_ext_data *rp;
8311 size_t rp_len;
8312 u16 eir_len;
8313 u8 status, flags, role, addr[7], hash[16], rand[16];
8314 int err;
8315
8316 bt_dev_dbg(hdev, "sock %p", sk);
8317
8318 if (hdev_is_powered(hdev)) {
8319 switch (cp->type) {
8320 case BIT(BDADDR_BREDR):
8321 status = mgmt_bredr_support(hdev);
8322 if (status)
8323 eir_len = 0;
8324 else
8325 eir_len = 5;
8326 break;
8327 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8328 status = mgmt_le_support(hdev);
8329 if (status)
8330 eir_len = 0;
8331 else
8332 eir_len = 9 + 3 + 18 + 18 + 3;
8333 break;
8334 default:
8335 status = MGMT_STATUS_INVALID_PARAMS;
8336 eir_len = 0;
8337 break;
8338 }
8339 } else {
8340 status = MGMT_STATUS_NOT_POWERED;
8341 eir_len = 0;
8342 }
8343
8344 rp_len = sizeof(*rp) + eir_len;
8345 rp = kmalloc(rp_len, GFP_ATOMIC);
8346 if (!rp)
8347 return -ENOMEM;
8348
8349 if (!status && !lmp_ssp_capable(hdev)) {
8350 status = MGMT_STATUS_NOT_SUPPORTED;
8351 eir_len = 0;
8352 }
8353
8354 if (status)
8355 goto complete;
8356
8357 hci_dev_lock(hdev);
8358
8359 eir_len = 0;
8360 switch (cp->type) {
8361 case BIT(BDADDR_BREDR):
8362 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8363 err = read_local_ssp_oob_req(hdev, sk, cp);
8364 hci_dev_unlock(hdev);
8365 if (!err)
8366 goto done;
8367
8368 status = MGMT_STATUS_FAILED;
8369 goto complete;
8370 } else {
8371 eir_len = eir_append_data(rp->eir, eir_len,
8372 EIR_CLASS_OF_DEV,
8373 hdev->dev_class, 3);
8374 }
8375 break;
8376 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8377 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8378 smp_generate_oob(hdev, hash, rand) < 0) {
8379 hci_dev_unlock(hdev);
8380 status = MGMT_STATUS_FAILED;
8381 goto complete;
8382 }
8383
8384 /* This should return the active RPA, but since the RPA
8385 * is only programmed on demand, it is really hard to fill
8386 * this in at the moment. For now disallow retrieving
8387 * local out-of-band data when privacy is in use.
8388 *
8389 * Returning the identity address will not help here since
8390 * pairing happens before the identity resolving key is
8391 * known and thus the connection establishment happens
8392 * based on the RPA and not the identity address.
8393 */
8394 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8395 hci_dev_unlock(hdev);
8396 status = MGMT_STATUS_REJECTED;
8397 goto complete;
8398 }
8399
8400 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8401 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8402 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8403 bacmp(&hdev->static_addr, BDADDR_ANY))) {
8404 memcpy(addr, &hdev->static_addr, 6);
8405 addr[6] = 0x01;
8406 } else {
8407 memcpy(addr, &hdev->bdaddr, 6);
8408 addr[6] = 0x00;
8409 }
8410
8411 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8412 addr, sizeof(addr));
8413
8414 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8415 role = 0x02;
8416 else
8417 role = 0x01;
8418
8419 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8420 &role, sizeof(role));
8421
8422 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8423 eir_len = eir_append_data(rp->eir, eir_len,
8424 EIR_LE_SC_CONFIRM,
8425 hash, sizeof(hash));
8426
8427 eir_len = eir_append_data(rp->eir, eir_len,
8428 EIR_LE_SC_RANDOM,
8429 rand, sizeof(rand));
8430 }
8431
8432 flags = mgmt_get_adv_discov_flags(hdev);
8433
8434 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8435 flags |= LE_AD_NO_BREDR;
8436
8437 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8438 &flags, sizeof(flags));
8439 break;
8440 }
8441
8442 hci_dev_unlock(hdev);
8443
8444 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8445
8446 status = MGMT_STATUS_SUCCESS;
8447
8448 complete:
8449 rp->type = cp->type;
8450 rp->eir_len = cpu_to_le16(eir_len);
8451
8452 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8453 status, rp, sizeof(*rp) + eir_len);
8454 if (err < 0 || status)
8455 goto done;
8456
8457 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8458 rp, sizeof(*rp) + eir_len,
8459 HCI_MGMT_OOB_DATA_EVENTS, sk);
8460
8461 done:
8462 kfree(rp);
8463
8464 return err;
8465 }
8466
get_supported_adv_flags(struct hci_dev * hdev)8467 static u32 get_supported_adv_flags(struct hci_dev *hdev)
8468 {
8469 u32 flags = 0;
8470
8471 flags |= MGMT_ADV_FLAG_CONNECTABLE;
8472 flags |= MGMT_ADV_FLAG_DISCOV;
8473 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8474 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8475 flags |= MGMT_ADV_FLAG_APPEARANCE;
8476 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8477 flags |= MGMT_ADV_PARAM_DURATION;
8478 flags |= MGMT_ADV_PARAM_TIMEOUT;
8479 flags |= MGMT_ADV_PARAM_INTERVALS;
8480 flags |= MGMT_ADV_PARAM_TX_POWER;
8481 flags |= MGMT_ADV_PARAM_SCAN_RSP;
8482
8483 /* In extended adv TX_POWER returned from Set Adv Param
8484 * will be always valid.
8485 */
8486 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
8487 flags |= MGMT_ADV_FLAG_TX_POWER;
8488
8489 if (ext_adv_capable(hdev)) {
8490 flags |= MGMT_ADV_FLAG_SEC_1M;
8491 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8492 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8493
8494 if (le_2m_capable(hdev))
8495 flags |= MGMT_ADV_FLAG_SEC_2M;
8496
8497 if (le_coded_capable(hdev))
8498 flags |= MGMT_ADV_FLAG_SEC_CODED;
8499 }
8500
8501 return flags;
8502 }
8503
read_adv_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8504 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8505 void *data, u16 data_len)
8506 {
8507 struct mgmt_rp_read_adv_features *rp;
8508 size_t rp_len;
8509 int err;
8510 struct adv_info *adv_instance;
8511 u32 supported_flags;
8512 u8 *instance;
8513
8514 bt_dev_dbg(hdev, "sock %p", sk);
8515
8516 if (!lmp_le_capable(hdev))
8517 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8518 MGMT_STATUS_REJECTED);
8519
8520 hci_dev_lock(hdev);
8521
8522 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8523 rp = kmalloc(rp_len, GFP_ATOMIC);
8524 if (!rp) {
8525 hci_dev_unlock(hdev);
8526 return -ENOMEM;
8527 }
8528
8529 supported_flags = get_supported_adv_flags(hdev);
8530
8531 rp->supported_flags = cpu_to_le32(supported_flags);
8532 rp->max_adv_data_len = max_adv_len(hdev);
8533 rp->max_scan_rsp_len = max_adv_len(hdev);
8534 rp->max_instances = hdev->le_num_of_adv_sets;
8535 rp->num_instances = hdev->adv_instance_cnt;
8536
8537 instance = rp->instance;
8538 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8539 /* Only instances 1-le_num_of_adv_sets are externally visible */
8540 if (adv_instance->instance <= hdev->adv_instance_cnt) {
8541 *instance = adv_instance->instance;
8542 instance++;
8543 } else {
8544 rp->num_instances--;
8545 rp_len--;
8546 }
8547 }
8548
8549 hci_dev_unlock(hdev);
8550
8551 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8552 MGMT_STATUS_SUCCESS, rp, rp_len);
8553
8554 kfree(rp);
8555
8556 return err;
8557 }
8558
calculate_name_len(struct hci_dev * hdev)8559 static u8 calculate_name_len(struct hci_dev *hdev)
8560 {
8561 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 2]; /* len + type + name */
8562
8563 return eir_append_local_name(hdev, buf, 0);
8564 }
8565
tlv_data_max_len(struct hci_dev * hdev,u32 adv_flags,bool is_adv_data)8566 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8567 bool is_adv_data)
8568 {
8569 u8 max_len = max_adv_len(hdev);
8570
8571 if (is_adv_data) {
8572 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8573 MGMT_ADV_FLAG_LIMITED_DISCOV |
8574 MGMT_ADV_FLAG_MANAGED_FLAGS))
8575 max_len -= 3;
8576
8577 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8578 max_len -= 3;
8579 } else {
8580 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8581 max_len -= calculate_name_len(hdev);
8582
8583 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8584 max_len -= 4;
8585 }
8586
8587 return max_len;
8588 }
8589
flags_managed(u32 adv_flags)8590 static bool flags_managed(u32 adv_flags)
8591 {
8592 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8593 MGMT_ADV_FLAG_LIMITED_DISCOV |
8594 MGMT_ADV_FLAG_MANAGED_FLAGS);
8595 }
8596
tx_power_managed(u32 adv_flags)8597 static bool tx_power_managed(u32 adv_flags)
8598 {
8599 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8600 }
8601
name_managed(u32 adv_flags)8602 static bool name_managed(u32 adv_flags)
8603 {
8604 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8605 }
8606
appearance_managed(u32 adv_flags)8607 static bool appearance_managed(u32 adv_flags)
8608 {
8609 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8610 }
8611
tlv_data_is_valid(struct hci_dev * hdev,u32 adv_flags,u8 * data,u8 len,bool is_adv_data)8612 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8613 u8 len, bool is_adv_data)
8614 {
8615 int i, cur_len;
8616 u8 max_len;
8617
8618 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8619
8620 if (len > max_len)
8621 return false;
8622
8623 /* Make sure that the data is correctly formatted. */
8624 for (i = 0; i < len; i += (cur_len + 1)) {
8625 cur_len = data[i];
8626
8627 if (!cur_len)
8628 continue;
8629
8630 if (data[i + 1] == EIR_FLAGS &&
8631 (!is_adv_data || flags_managed(adv_flags)))
8632 return false;
8633
8634 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8635 return false;
8636
8637 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8638 return false;
8639
8640 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8641 return false;
8642
8643 if (data[i + 1] == EIR_APPEARANCE &&
8644 appearance_managed(adv_flags))
8645 return false;
8646
8647 /* If the current field length would exceed the total data
8648 * length, then it's invalid.
8649 */
8650 if (i + cur_len >= len)
8651 return false;
8652 }
8653
8654 return true;
8655 }
8656
requested_adv_flags_are_valid(struct hci_dev * hdev,u32 adv_flags)8657 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8658 {
8659 u32 supported_flags, phy_flags;
8660
8661 /* The current implementation only supports a subset of the specified
8662 * flags. Also need to check mutual exclusiveness of sec flags.
8663 */
8664 supported_flags = get_supported_adv_flags(hdev);
8665 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8666 if (adv_flags & ~supported_flags ||
8667 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8668 return false;
8669
8670 return true;
8671 }
8672
adv_busy(struct hci_dev * hdev)8673 static bool adv_busy(struct hci_dev *hdev)
8674 {
8675 return pending_find(MGMT_OP_SET_LE, hdev);
8676 }
8677
add_adv_complete(struct hci_dev * hdev,struct sock * sk,u8 instance,int err)8678 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8679 int err)
8680 {
8681 struct adv_info *adv, *n;
8682
8683 bt_dev_dbg(hdev, "err %d", err);
8684
8685 hci_dev_lock(hdev);
8686
8687 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8688 u8 instance;
8689
8690 if (!adv->pending)
8691 continue;
8692
8693 if (!err) {
8694 adv->pending = false;
8695 continue;
8696 }
8697
8698 instance = adv->instance;
8699
8700 if (hdev->cur_adv_instance == instance)
8701 cancel_adv_timeout(hdev);
8702
8703 hci_remove_adv_instance(hdev, instance);
8704 mgmt_advertising_removed(sk, hdev, instance);
8705 }
8706
8707 hci_dev_unlock(hdev);
8708 }
8709
add_advertising_complete(struct hci_dev * hdev,void * data,int err)8710 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8711 {
8712 struct mgmt_pending_cmd *cmd = data;
8713 struct mgmt_cp_add_advertising *cp = cmd->param;
8714 struct mgmt_rp_add_advertising rp;
8715
8716 memset(&rp, 0, sizeof(rp));
8717
8718 rp.instance = cp->instance;
8719
8720 if (err)
8721 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
8722 mgmt_status(err));
8723 else
8724 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
8725 mgmt_status(err), &rp, sizeof(rp));
8726
8727 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8728
8729 mgmt_pending_free(cmd);
8730 }
8731
add_advertising_sync(struct hci_dev * hdev,void * data)8732 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8733 {
8734 struct mgmt_pending_cmd *cmd = data;
8735 struct mgmt_cp_add_advertising *cp = cmd->param;
8736
8737 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8738 }
8739
add_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8740 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8741 void *data, u16 data_len)
8742 {
8743 struct mgmt_cp_add_advertising *cp = data;
8744 struct mgmt_rp_add_advertising rp;
8745 u32 flags;
8746 u8 status;
8747 u16 timeout, duration;
8748 unsigned int prev_instance_cnt;
8749 u8 schedule_instance = 0;
8750 struct adv_info *adv, *next_instance;
8751 int err;
8752 struct mgmt_pending_cmd *cmd;
8753
8754 bt_dev_dbg(hdev, "sock %p", sk);
8755
8756 status = mgmt_le_support(hdev);
8757 if (status)
8758 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8759 status);
8760
8761 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8762 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8763 MGMT_STATUS_INVALID_PARAMS);
8764
8765 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8766 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8767 MGMT_STATUS_INVALID_PARAMS);
8768
8769 flags = __le32_to_cpu(cp->flags);
8770 timeout = __le16_to_cpu(cp->timeout);
8771 duration = __le16_to_cpu(cp->duration);
8772
8773 if (!requested_adv_flags_are_valid(hdev, flags))
8774 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8775 MGMT_STATUS_INVALID_PARAMS);
8776
8777 hci_dev_lock(hdev);
8778
8779 if (timeout && !hdev_is_powered(hdev)) {
8780 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8781 MGMT_STATUS_REJECTED);
8782 goto unlock;
8783 }
8784
8785 if (adv_busy(hdev)) {
8786 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8787 MGMT_STATUS_BUSY);
8788 goto unlock;
8789 }
8790
8791 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8792 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8793 cp->scan_rsp_len, false)) {
8794 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8795 MGMT_STATUS_INVALID_PARAMS);
8796 goto unlock;
8797 }
8798
8799 prev_instance_cnt = hdev->adv_instance_cnt;
8800
8801 adv = hci_add_adv_instance(hdev, cp->instance, flags,
8802 cp->adv_data_len, cp->data,
8803 cp->scan_rsp_len,
8804 cp->data + cp->adv_data_len,
8805 timeout, duration,
8806 HCI_ADV_TX_POWER_NO_PREFERENCE,
8807 hdev->le_adv_min_interval,
8808 hdev->le_adv_max_interval, 0);
8809 if (IS_ERR(adv)) {
8810 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8811 MGMT_STATUS_FAILED);
8812 goto unlock;
8813 }
8814
8815 /* Only trigger an advertising added event if a new instance was
8816 * actually added.
8817 */
8818 if (hdev->adv_instance_cnt > prev_instance_cnt)
8819 mgmt_advertising_added(sk, hdev, cp->instance);
8820
8821 if (hdev->cur_adv_instance == cp->instance) {
8822 /* If the currently advertised instance is being changed then
8823 * cancel the current advertising and schedule the next
8824 * instance. If there is only one instance then the overridden
8825 * advertising data will be visible right away.
8826 */
8827 cancel_adv_timeout(hdev);
8828
8829 next_instance = hci_get_next_instance(hdev, cp->instance);
8830 if (next_instance)
8831 schedule_instance = next_instance->instance;
8832 } else if (!hdev->adv_instance_timeout) {
8833 /* Immediately advertise the new instance if no other
8834 * instance is currently being advertised.
8835 */
8836 schedule_instance = cp->instance;
8837 }
8838
8839 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
8840 * there is no instance to be advertised then we have no HCI
8841 * communication to make. Simply return.
8842 */
8843 if (!hdev_is_powered(hdev) ||
8844 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8845 !schedule_instance) {
8846 rp.instance = cp->instance;
8847 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8848 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8849 goto unlock;
8850 }
8851
8852 /* We're good to go, update advertising data, parameters, and start
8853 * advertising.
8854 */
8855 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8856 data_len);
8857 if (!cmd) {
8858 err = -ENOMEM;
8859 goto unlock;
8860 }
8861
8862 cp->instance = schedule_instance;
8863
8864 err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8865 add_advertising_complete);
8866 if (err < 0)
8867 mgmt_pending_free(cmd);
8868
8869 unlock:
8870 hci_dev_unlock(hdev);
8871
8872 return err;
8873 }
8874
add_ext_adv_params_complete(struct hci_dev * hdev,void * data,int err)8875 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8876 int err)
8877 {
8878 struct mgmt_pending_cmd *cmd = data;
8879 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8880 struct mgmt_rp_add_ext_adv_params rp;
8881 struct adv_info *adv;
8882 u32 flags;
8883
8884 BT_DBG("%s", hdev->name);
8885
8886 hci_dev_lock(hdev);
8887
8888 adv = hci_find_adv_instance(hdev, cp->instance);
8889 if (!adv)
8890 goto unlock;
8891
8892 rp.instance = cp->instance;
8893 rp.tx_power = adv->tx_power;
8894
8895 /* While we're at it, inform userspace of the available space for this
8896 * advertisement, given the flags that will be used.
8897 */
8898 flags = __le32_to_cpu(cp->flags);
8899 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8900 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8901
8902 if (err) {
8903 /* If this advertisement was previously advertising and we
8904 * failed to update it, we signal that it has been removed and
8905 * delete its structure
8906 */
8907 if (!adv->pending)
8908 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8909
8910 hci_remove_adv_instance(hdev, cp->instance);
8911
8912 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
8913 mgmt_status(err));
8914 } else {
8915 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
8916 mgmt_status(err), &rp, sizeof(rp));
8917 }
8918
8919 unlock:
8920 mgmt_pending_free(cmd);
8921
8922 hci_dev_unlock(hdev);
8923 }
8924
add_ext_adv_params_sync(struct hci_dev * hdev,void * data)8925 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8926 {
8927 struct mgmt_pending_cmd *cmd = data;
8928 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8929
8930 return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8931 }
8932
add_ext_adv_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8933 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8934 void *data, u16 data_len)
8935 {
8936 struct mgmt_cp_add_ext_adv_params *cp = data;
8937 struct mgmt_rp_add_ext_adv_params rp;
8938 struct mgmt_pending_cmd *cmd = NULL;
8939 struct adv_info *adv;
8940 u32 flags, min_interval, max_interval;
8941 u16 timeout, duration;
8942 u8 status;
8943 s8 tx_power;
8944 int err;
8945
8946 BT_DBG("%s", hdev->name);
8947
8948 status = mgmt_le_support(hdev);
8949 if (status)
8950 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8951 status);
8952
8953 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8954 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8955 MGMT_STATUS_INVALID_PARAMS);
8956
8957 /* The purpose of breaking add_advertising into two separate MGMT calls
8958 * for params and data is to allow more parameters to be added to this
8959 * structure in the future. For this reason, we verify that we have the
8960 * bare minimum structure we know of when the interface was defined. Any
8961 * extra parameters we don't know about will be ignored in this request.
8962 */
8963 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8964 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8965 MGMT_STATUS_INVALID_PARAMS);
8966
8967 flags = __le32_to_cpu(cp->flags);
8968
8969 if (!requested_adv_flags_are_valid(hdev, flags))
8970 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8971 MGMT_STATUS_INVALID_PARAMS);
8972
8973 hci_dev_lock(hdev);
8974
8975 /* In new interface, we require that we are powered to register */
8976 if (!hdev_is_powered(hdev)) {
8977 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8978 MGMT_STATUS_REJECTED);
8979 goto unlock;
8980 }
8981
8982 if (adv_busy(hdev)) {
8983 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8984 MGMT_STATUS_BUSY);
8985 goto unlock;
8986 }
8987
8988 /* Parse defined parameters from request, use defaults otherwise */
8989 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8990 __le16_to_cpu(cp->timeout) : 0;
8991
8992 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8993 __le16_to_cpu(cp->duration) :
8994 hdev->def_multi_adv_rotation_duration;
8995
8996 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8997 __le32_to_cpu(cp->min_interval) :
8998 hdev->le_adv_min_interval;
8999
9000 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
9001 __le32_to_cpu(cp->max_interval) :
9002 hdev->le_adv_max_interval;
9003
9004 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
9005 cp->tx_power :
9006 HCI_ADV_TX_POWER_NO_PREFERENCE;
9007
9008 /* Create advertising instance with no advertising or response data */
9009 adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
9010 timeout, duration, tx_power, min_interval,
9011 max_interval, 0);
9012
9013 if (IS_ERR(adv)) {
9014 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
9015 MGMT_STATUS_FAILED);
9016 goto unlock;
9017 }
9018
9019 /* Submit request for advertising params if ext adv available */
9020 if (ext_adv_capable(hdev)) {
9021 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
9022 data, data_len);
9023 if (!cmd) {
9024 err = -ENOMEM;
9025 hci_remove_adv_instance(hdev, cp->instance);
9026 goto unlock;
9027 }
9028
9029 err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
9030 add_ext_adv_params_complete);
9031 if (err < 0)
9032 mgmt_pending_free(cmd);
9033 } else {
9034 rp.instance = cp->instance;
9035 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
9036 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9037 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9038 err = mgmt_cmd_complete(sk, hdev->id,
9039 MGMT_OP_ADD_EXT_ADV_PARAMS,
9040 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9041 }
9042
9043 unlock:
9044 hci_dev_unlock(hdev);
9045
9046 return err;
9047 }
9048
add_ext_adv_data_complete(struct hci_dev * hdev,void * data,int err)9049 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
9050 {
9051 struct mgmt_pending_cmd *cmd = data;
9052 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
9053 struct mgmt_rp_add_advertising rp;
9054
9055 add_adv_complete(hdev, cmd->sk, cp->instance, err);
9056
9057 memset(&rp, 0, sizeof(rp));
9058
9059 rp.instance = cp->instance;
9060
9061 if (err)
9062 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
9063 mgmt_status(err));
9064 else
9065 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
9066 mgmt_status(err), &rp, sizeof(rp));
9067
9068 mgmt_pending_free(cmd);
9069 }
9070
add_ext_adv_data_sync(struct hci_dev * hdev,void * data)9071 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
9072 {
9073 struct mgmt_pending_cmd *cmd = data;
9074 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
9075 int err;
9076
9077 if (ext_adv_capable(hdev)) {
9078 err = hci_update_adv_data_sync(hdev, cp->instance);
9079 if (err)
9080 return err;
9081
9082 err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
9083 if (err)
9084 return err;
9085
9086 return hci_enable_ext_advertising_sync(hdev, cp->instance);
9087 }
9088
9089 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
9090 }
9091
add_ext_adv_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9092 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
9093 u16 data_len)
9094 {
9095 struct mgmt_cp_add_ext_adv_data *cp = data;
9096 struct mgmt_rp_add_ext_adv_data rp;
9097 u8 schedule_instance = 0;
9098 struct adv_info *next_instance;
9099 struct adv_info *adv_instance;
9100 int err = 0;
9101 struct mgmt_pending_cmd *cmd;
9102
9103 BT_DBG("%s", hdev->name);
9104
9105 hci_dev_lock(hdev);
9106
9107 adv_instance = hci_find_adv_instance(hdev, cp->instance);
9108
9109 if (!adv_instance) {
9110 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9111 MGMT_STATUS_INVALID_PARAMS);
9112 goto unlock;
9113 }
9114
9115 /* In new interface, we require that we are powered to register */
9116 if (!hdev_is_powered(hdev)) {
9117 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9118 MGMT_STATUS_REJECTED);
9119 goto clear_new_instance;
9120 }
9121
9122 if (adv_busy(hdev)) {
9123 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9124 MGMT_STATUS_BUSY);
9125 goto clear_new_instance;
9126 }
9127
9128 /* Validate new data */
9129 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
9130 cp->adv_data_len, true) ||
9131 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
9132 cp->adv_data_len, cp->scan_rsp_len, false)) {
9133 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9134 MGMT_STATUS_INVALID_PARAMS);
9135 goto clear_new_instance;
9136 }
9137
9138 /* Set the data in the advertising instance */
9139 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
9140 cp->data, cp->scan_rsp_len,
9141 cp->data + cp->adv_data_len);
9142
9143 /* If using software rotation, determine next instance to use */
9144 if (hdev->cur_adv_instance == cp->instance) {
9145 /* If the currently advertised instance is being changed
9146 * then cancel the current advertising and schedule the
9147 * next instance. If there is only one instance then the
9148 * overridden advertising data will be visible right
9149 * away
9150 */
9151 cancel_adv_timeout(hdev);
9152
9153 next_instance = hci_get_next_instance(hdev, cp->instance);
9154 if (next_instance)
9155 schedule_instance = next_instance->instance;
9156 } else if (!hdev->adv_instance_timeout) {
9157 /* Immediately advertise the new instance if no other
9158 * instance is currently being advertised.
9159 */
9160 schedule_instance = cp->instance;
9161 }
9162
9163 /* If the HCI_ADVERTISING flag is set or there is no instance to
9164 * be advertised then we have no HCI communication to make.
9165 * Simply return.
9166 */
9167 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
9168 if (adv_instance->pending) {
9169 mgmt_advertising_added(sk, hdev, cp->instance);
9170 adv_instance->pending = false;
9171 }
9172 rp.instance = cp->instance;
9173 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9174 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9175 goto unlock;
9176 }
9177
9178 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9179 data_len);
9180 if (!cmd) {
9181 err = -ENOMEM;
9182 goto clear_new_instance;
9183 }
9184
9185 err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
9186 add_ext_adv_data_complete);
9187 if (err < 0) {
9188 mgmt_pending_free(cmd);
9189 goto clear_new_instance;
9190 }
9191
9192 /* We were successful in updating data, so trigger advertising_added
9193 * event if this is an instance that wasn't previously advertising. If
9194 * a failure occurs in the requests we initiated, we will remove the
9195 * instance again in add_advertising_complete
9196 */
9197 if (adv_instance->pending)
9198 mgmt_advertising_added(sk, hdev, cp->instance);
9199
9200 goto unlock;
9201
9202 clear_new_instance:
9203 hci_remove_adv_instance(hdev, cp->instance);
9204
9205 unlock:
9206 hci_dev_unlock(hdev);
9207
9208 return err;
9209 }
9210
remove_advertising_complete(struct hci_dev * hdev,void * data,int err)9211 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
9212 int err)
9213 {
9214 struct mgmt_pending_cmd *cmd = data;
9215 struct mgmt_cp_remove_advertising *cp = cmd->param;
9216 struct mgmt_rp_remove_advertising rp;
9217
9218 bt_dev_dbg(hdev, "err %d", err);
9219
9220 memset(&rp, 0, sizeof(rp));
9221 rp.instance = cp->instance;
9222
9223 if (err)
9224 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
9225 mgmt_status(err));
9226 else
9227 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
9228 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9229
9230 mgmt_pending_free(cmd);
9231 }
9232
remove_advertising_sync(struct hci_dev * hdev,void * data)9233 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
9234 {
9235 struct mgmt_pending_cmd *cmd = data;
9236 struct mgmt_cp_remove_advertising *cp = cmd->param;
9237 int err;
9238
9239 err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
9240 if (err)
9241 return err;
9242
9243 if (list_empty(&hdev->adv_instances))
9244 err = hci_disable_advertising_sync(hdev);
9245
9246 return err;
9247 }
9248
remove_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9249 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9250 void *data, u16 data_len)
9251 {
9252 struct mgmt_cp_remove_advertising *cp = data;
9253 struct mgmt_pending_cmd *cmd;
9254 int err;
9255
9256 bt_dev_dbg(hdev, "sock %p", sk);
9257
9258 hci_dev_lock(hdev);
9259
9260 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
9261 err = mgmt_cmd_status(sk, hdev->id,
9262 MGMT_OP_REMOVE_ADVERTISING,
9263 MGMT_STATUS_INVALID_PARAMS);
9264 goto unlock;
9265 }
9266
9267 if (pending_find(MGMT_OP_SET_LE, hdev)) {
9268 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9269 MGMT_STATUS_BUSY);
9270 goto unlock;
9271 }
9272
9273 if (list_empty(&hdev->adv_instances)) {
9274 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9275 MGMT_STATUS_INVALID_PARAMS);
9276 goto unlock;
9277 }
9278
9279 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9280 data_len);
9281 if (!cmd) {
9282 err = -ENOMEM;
9283 goto unlock;
9284 }
9285
9286 err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
9287 remove_advertising_complete);
9288 if (err < 0)
9289 mgmt_pending_free(cmd);
9290
9291 unlock:
9292 hci_dev_unlock(hdev);
9293
9294 return err;
9295 }
9296
get_adv_size_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9297 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9298 void *data, u16 data_len)
9299 {
9300 struct mgmt_cp_get_adv_size_info *cp = data;
9301 struct mgmt_rp_get_adv_size_info rp;
9302 u32 flags, supported_flags;
9303
9304 bt_dev_dbg(hdev, "sock %p", sk);
9305
9306 if (!lmp_le_capable(hdev))
9307 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9308 MGMT_STATUS_REJECTED);
9309
9310 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9311 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9312 MGMT_STATUS_INVALID_PARAMS);
9313
9314 flags = __le32_to_cpu(cp->flags);
9315
9316 /* The current implementation only supports a subset of the specified
9317 * flags.
9318 */
9319 supported_flags = get_supported_adv_flags(hdev);
9320 if (flags & ~supported_flags)
9321 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9322 MGMT_STATUS_INVALID_PARAMS);
9323
9324 rp.instance = cp->instance;
9325 rp.flags = cp->flags;
9326 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9327 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9328
9329 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9330 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9331 }
9332
9333 static const struct hci_mgmt_handler mgmt_handlers[] = {
9334 { NULL }, /* 0x0000 (no command) */
9335 { read_version, MGMT_READ_VERSION_SIZE,
9336 HCI_MGMT_NO_HDEV |
9337 HCI_MGMT_UNTRUSTED },
9338 { read_commands, MGMT_READ_COMMANDS_SIZE,
9339 HCI_MGMT_NO_HDEV |
9340 HCI_MGMT_UNTRUSTED },
9341 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
9342 HCI_MGMT_NO_HDEV |
9343 HCI_MGMT_UNTRUSTED },
9344 { read_controller_info, MGMT_READ_INFO_SIZE,
9345 HCI_MGMT_UNTRUSTED },
9346 { set_powered, MGMT_SETTING_SIZE },
9347 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
9348 { set_connectable, MGMT_SETTING_SIZE },
9349 { set_fast_connectable, MGMT_SETTING_SIZE },
9350 { set_bondable, MGMT_SETTING_SIZE },
9351 { set_link_security, MGMT_SETTING_SIZE },
9352 { set_ssp, MGMT_SETTING_SIZE },
9353 { set_hs, MGMT_SETTING_SIZE },
9354 { set_le, MGMT_SETTING_SIZE },
9355 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
9356 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
9357 { add_uuid, MGMT_ADD_UUID_SIZE },
9358 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
9359 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
9360 HCI_MGMT_VAR_LEN },
9361 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9362 HCI_MGMT_VAR_LEN },
9363 { disconnect, MGMT_DISCONNECT_SIZE },
9364 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
9365 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
9366 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
9367 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
9368 { pair_device, MGMT_PAIR_DEVICE_SIZE },
9369 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
9370 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
9371 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
9372 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9373 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
9374 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9375 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
9376 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9377 HCI_MGMT_VAR_LEN },
9378 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9379 { start_discovery, MGMT_START_DISCOVERY_SIZE },
9380 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
9381 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
9382 { block_device, MGMT_BLOCK_DEVICE_SIZE },
9383 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
9384 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
9385 { set_advertising, MGMT_SETTING_SIZE },
9386 { set_bredr, MGMT_SETTING_SIZE },
9387 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
9388 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
9389 { set_secure_conn, MGMT_SETTING_SIZE },
9390 { set_debug_keys, MGMT_SETTING_SIZE },
9391 { set_privacy, MGMT_SET_PRIVACY_SIZE },
9392 { load_irks, MGMT_LOAD_IRKS_SIZE,
9393 HCI_MGMT_VAR_LEN },
9394 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
9395 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
9396 { add_device, MGMT_ADD_DEVICE_SIZE },
9397 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
9398 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
9399 HCI_MGMT_VAR_LEN },
9400 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9401 HCI_MGMT_NO_HDEV |
9402 HCI_MGMT_UNTRUSTED },
9403 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
9404 HCI_MGMT_UNCONFIGURED |
9405 HCI_MGMT_UNTRUSTED },
9406 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
9407 HCI_MGMT_UNCONFIGURED },
9408 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
9409 HCI_MGMT_UNCONFIGURED },
9410 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9411 HCI_MGMT_VAR_LEN },
9412 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9413 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
9414 HCI_MGMT_NO_HDEV |
9415 HCI_MGMT_UNTRUSTED },
9416 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
9417 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
9418 HCI_MGMT_VAR_LEN },
9419 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
9420 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
9421 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9422 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9423 HCI_MGMT_UNTRUSTED },
9424 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
9425 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
9426 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
9427 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9428 HCI_MGMT_VAR_LEN },
9429 { set_wideband_speech, MGMT_SETTING_SIZE },
9430 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
9431 HCI_MGMT_UNTRUSTED },
9432 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
9433 HCI_MGMT_UNTRUSTED |
9434 HCI_MGMT_HDEV_OPTIONAL },
9435 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
9436 HCI_MGMT_VAR_LEN |
9437 HCI_MGMT_HDEV_OPTIONAL },
9438 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9439 HCI_MGMT_UNTRUSTED },
9440 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9441 HCI_MGMT_VAR_LEN },
9442 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9443 HCI_MGMT_UNTRUSTED },
9444 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9445 HCI_MGMT_VAR_LEN },
9446 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
9447 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
9448 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9449 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9450 HCI_MGMT_VAR_LEN },
9451 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
9452 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
9453 HCI_MGMT_VAR_LEN },
9454 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
9455 HCI_MGMT_VAR_LEN },
9456 { add_adv_patterns_monitor_rssi,
9457 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
9458 HCI_MGMT_VAR_LEN },
9459 { set_mesh, MGMT_SET_MESH_RECEIVER_SIZE,
9460 HCI_MGMT_VAR_LEN },
9461 { mesh_features, MGMT_MESH_READ_FEATURES_SIZE },
9462 { mesh_send, MGMT_MESH_SEND_SIZE,
9463 HCI_MGMT_VAR_LEN },
9464 { mesh_send_cancel, MGMT_MESH_SEND_CANCEL_SIZE },
9465 { mgmt_hci_cmd_sync, MGMT_HCI_CMD_SYNC_SIZE, HCI_MGMT_VAR_LEN },
9466 };
9467
mgmt_index_added(struct hci_dev * hdev)9468 void mgmt_index_added(struct hci_dev *hdev)
9469 {
9470 struct mgmt_ev_ext_index ev;
9471
9472 if (hci_test_quirk(hdev, HCI_QUIRK_RAW_DEVICE))
9473 return;
9474
9475 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9476 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0,
9477 HCI_MGMT_UNCONF_INDEX_EVENTS);
9478 ev.type = 0x01;
9479 } else {
9480 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
9481 HCI_MGMT_INDEX_EVENTS);
9482 ev.type = 0x00;
9483 }
9484
9485 ev.bus = hdev->bus;
9486
9487 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
9488 HCI_MGMT_EXT_INDEX_EVENTS);
9489 }
9490
mgmt_index_removed(struct hci_dev * hdev)9491 void mgmt_index_removed(struct hci_dev *hdev)
9492 {
9493 struct mgmt_ev_ext_index ev;
9494 struct cmd_lookup match = { NULL, hdev, MGMT_STATUS_INVALID_INDEX };
9495
9496 if (hci_test_quirk(hdev, HCI_QUIRK_RAW_DEVICE))
9497 return;
9498
9499 mgmt_pending_foreach(0, hdev, true, cmd_complete_rsp, &match);
9500
9501 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9502 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0,
9503 HCI_MGMT_UNCONF_INDEX_EVENTS);
9504 ev.type = 0x01;
9505 } else {
9506 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
9507 HCI_MGMT_INDEX_EVENTS);
9508 ev.type = 0x00;
9509 }
9510
9511 ev.bus = hdev->bus;
9512
9513 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
9514 HCI_MGMT_EXT_INDEX_EVENTS);
9515
9516 /* Cancel any remaining timed work */
9517 if (!hci_dev_test_flag(hdev, HCI_MGMT))
9518 return;
9519 cancel_delayed_work_sync(&hdev->discov_off);
9520 cancel_delayed_work_sync(&hdev->service_cache);
9521 cancel_delayed_work_sync(&hdev->rpa_expired);
9522 cancel_delayed_work_sync(&hdev->mesh_send_done);
9523 }
9524
mgmt_power_on(struct hci_dev * hdev,int err)9525 void mgmt_power_on(struct hci_dev *hdev, int err)
9526 {
9527 struct cmd_lookup match = { NULL, hdev };
9528
9529 bt_dev_dbg(hdev, "err %d", err);
9530
9531 hci_dev_lock(hdev);
9532
9533 if (!err) {
9534 restart_le_actions(hdev);
9535 hci_update_passive_scan(hdev);
9536 }
9537
9538 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, true, settings_rsp,
9539 &match);
9540
9541 new_settings(hdev, match.sk);
9542
9543 if (match.sk)
9544 sock_put(match.sk);
9545
9546 hci_dev_unlock(hdev);
9547 }
9548
__mgmt_power_off(struct hci_dev * hdev)9549 void __mgmt_power_off(struct hci_dev *hdev)
9550 {
9551 struct cmd_lookup match = { NULL, hdev };
9552 u8 zero_cod[] = { 0, 0, 0 };
9553
9554 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, true, settings_rsp,
9555 &match);
9556
9557 /* If the power off is because of hdev unregistration let
9558 * use the appropriate INVALID_INDEX status. Otherwise use
9559 * NOT_POWERED. We cover both scenarios here since later in
9560 * mgmt_index_removed() any hci_conn callbacks will have already
9561 * been triggered, potentially causing misleading DISCONNECTED
9562 * status responses.
9563 */
9564 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9565 match.mgmt_status = MGMT_STATUS_INVALID_INDEX;
9566 else
9567 match.mgmt_status = MGMT_STATUS_NOT_POWERED;
9568
9569 mgmt_pending_foreach(0, hdev, true, cmd_complete_rsp, &match);
9570
9571 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9572 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9573 zero_cod, sizeof(zero_cod),
9574 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9575 ext_info_changed(hdev, NULL);
9576 }
9577
9578 new_settings(hdev, match.sk);
9579
9580 if (match.sk)
9581 sock_put(match.sk);
9582 }
9583
mgmt_set_powered_failed(struct hci_dev * hdev,int err)9584 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9585 {
9586 struct mgmt_pending_cmd *cmd;
9587 u8 status;
9588
9589 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9590 if (!cmd)
9591 return;
9592
9593 if (err == -ERFKILL)
9594 status = MGMT_STATUS_RFKILLED;
9595 else
9596 status = MGMT_STATUS_FAILED;
9597
9598 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9599
9600 mgmt_pending_remove(cmd);
9601 }
9602
mgmt_new_link_key(struct hci_dev * hdev,struct link_key * key,bool persistent)9603 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9604 bool persistent)
9605 {
9606 struct mgmt_ev_new_link_key ev;
9607
9608 memset(&ev, 0, sizeof(ev));
9609
9610 ev.store_hint = persistent;
9611 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9612 ev.key.addr.type = BDADDR_BREDR;
9613 ev.key.type = key->type;
9614 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9615 ev.key.pin_len = key->pin_len;
9616
9617 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9618 }
9619
mgmt_ltk_type(struct smp_ltk * ltk)9620 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9621 {
9622 switch (ltk->type) {
9623 case SMP_LTK:
9624 case SMP_LTK_RESPONDER:
9625 if (ltk->authenticated)
9626 return MGMT_LTK_AUTHENTICATED;
9627 return MGMT_LTK_UNAUTHENTICATED;
9628 case SMP_LTK_P256:
9629 if (ltk->authenticated)
9630 return MGMT_LTK_P256_AUTH;
9631 return MGMT_LTK_P256_UNAUTH;
9632 case SMP_LTK_P256_DEBUG:
9633 return MGMT_LTK_P256_DEBUG;
9634 }
9635
9636 return MGMT_LTK_UNAUTHENTICATED;
9637 }
9638
mgmt_new_ltk(struct hci_dev * hdev,struct smp_ltk * key,bool persistent)9639 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9640 {
9641 struct mgmt_ev_new_long_term_key ev;
9642
9643 memset(&ev, 0, sizeof(ev));
9644
9645 /* Devices using resolvable or non-resolvable random addresses
9646 * without providing an identity resolving key don't require
9647 * to store long term keys. Their addresses will change the
9648 * next time around.
9649 *
9650 * Only when a remote device provides an identity address
9651 * make sure the long term key is stored. If the remote
9652 * identity is known, the long term keys are internally
9653 * mapped to the identity address. So allow static random
9654 * and public addresses here.
9655 */
9656 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9657 (key->bdaddr.b[5] & 0xc0) != 0xc0)
9658 ev.store_hint = 0x00;
9659 else
9660 ev.store_hint = persistent;
9661
9662 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9663 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
9664 ev.key.type = mgmt_ltk_type(key);
9665 ev.key.enc_size = key->enc_size;
9666 ev.key.ediv = key->ediv;
9667 ev.key.rand = key->rand;
9668
9669 if (key->type == SMP_LTK)
9670 ev.key.initiator = 1;
9671
9672 /* Make sure we copy only the significant bytes based on the
9673 * encryption key size, and set the rest of the value to zeroes.
9674 */
9675 memcpy(ev.key.val, key->val, key->enc_size);
9676 memset(ev.key.val + key->enc_size, 0,
9677 sizeof(ev.key.val) - key->enc_size);
9678
9679 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9680 }
9681
mgmt_new_irk(struct hci_dev * hdev,struct smp_irk * irk,bool persistent)9682 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9683 {
9684 struct mgmt_ev_new_irk ev;
9685
9686 memset(&ev, 0, sizeof(ev));
9687
9688 ev.store_hint = persistent;
9689
9690 bacpy(&ev.rpa, &irk->rpa);
9691 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9692 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
9693 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9694
9695 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9696 }
9697
mgmt_new_csrk(struct hci_dev * hdev,struct smp_csrk * csrk,bool persistent)9698 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9699 bool persistent)
9700 {
9701 struct mgmt_ev_new_csrk ev;
9702
9703 memset(&ev, 0, sizeof(ev));
9704
9705 /* Devices using resolvable or non-resolvable random addresses
9706 * without providing an identity resolving key don't require
9707 * to store signature resolving keys. Their addresses will change
9708 * the next time around.
9709 *
9710 * Only when a remote device provides an identity address
9711 * make sure the signature resolving key is stored. So allow
9712 * static random and public addresses here.
9713 */
9714 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9715 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9716 ev.store_hint = 0x00;
9717 else
9718 ev.store_hint = persistent;
9719
9720 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9721 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
9722 ev.key.type = csrk->type;
9723 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9724
9725 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9726 }
9727
mgmt_new_conn_param(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u8 store_hint,u16 min_interval,u16 max_interval,u16 latency,u16 timeout)9728 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9729 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9730 u16 max_interval, u16 latency, u16 timeout)
9731 {
9732 struct mgmt_ev_new_conn_param ev;
9733
9734 if (!hci_is_identity_address(bdaddr, bdaddr_type))
9735 return;
9736
9737 memset(&ev, 0, sizeof(ev));
9738 bacpy(&ev.addr.bdaddr, bdaddr);
9739 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9740 ev.store_hint = store_hint;
9741 ev.min_interval = cpu_to_le16(min_interval);
9742 ev.max_interval = cpu_to_le16(max_interval);
9743 ev.latency = cpu_to_le16(latency);
9744 ev.timeout = cpu_to_le16(timeout);
9745
9746 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9747 }
9748
mgmt_device_connected(struct hci_dev * hdev,struct hci_conn * conn,u8 * name,u8 name_len)9749 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9750 u8 *name, u8 name_len)
9751 {
9752 struct sk_buff *skb;
9753 struct mgmt_ev_device_connected *ev;
9754 u16 eir_len = 0;
9755 u32 flags = 0;
9756
9757 if (test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
9758 return;
9759
9760 /* allocate buff for LE or BR/EDR adv */
9761 if (conn->le_adv_data_len > 0)
9762 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9763 sizeof(*ev) + conn->le_adv_data_len);
9764 else
9765 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9766 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9767 eir_precalc_len(sizeof(conn->dev_class)));
9768
9769 if (!skb)
9770 return;
9771
9772 ev = skb_put(skb, sizeof(*ev));
9773 bacpy(&ev->addr.bdaddr, &conn->dst);
9774 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9775
9776 if (conn->out)
9777 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9778
9779 ev->flags = __cpu_to_le32(flags);
9780
9781 /* We must ensure that the EIR Data fields are ordered and
9782 * unique. Keep it simple for now and avoid the problem by not
9783 * adding any BR/EDR data to the LE adv.
9784 */
9785 if (conn->le_adv_data_len > 0) {
9786 skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9787 eir_len = conn->le_adv_data_len;
9788 } else {
9789 if (name)
9790 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9791
9792 if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9793 eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9794 conn->dev_class, sizeof(conn->dev_class));
9795 }
9796
9797 ev->eir_len = cpu_to_le16(eir_len);
9798
9799 mgmt_event_skb(skb, NULL);
9800 }
9801
unpair_device_rsp(struct mgmt_pending_cmd * cmd,void * data)9802 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9803 {
9804 struct hci_dev *hdev = data;
9805 struct mgmt_cp_unpair_device *cp = cmd->param;
9806
9807 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9808
9809 cmd->cmd_complete(cmd, 0);
9810 }
9811
mgmt_powering_down(struct hci_dev * hdev)9812 bool mgmt_powering_down(struct hci_dev *hdev)
9813 {
9814 struct mgmt_pending_cmd *cmd;
9815 struct mgmt_mode *cp;
9816
9817 if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
9818 return true;
9819
9820 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9821 if (!cmd)
9822 return false;
9823
9824 cp = cmd->param;
9825 if (!cp->val)
9826 return true;
9827
9828 return false;
9829 }
9830
mgmt_device_disconnected(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 reason,bool mgmt_connected)9831 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9832 u8 link_type, u8 addr_type, u8 reason,
9833 bool mgmt_connected)
9834 {
9835 struct mgmt_ev_device_disconnected ev;
9836 struct sock *sk = NULL;
9837
9838 if (!mgmt_connected)
9839 return;
9840
9841 if (link_type != ACL_LINK &&
9842 link_type != LE_LINK &&
9843 link_type != BIS_LINK)
9844 return;
9845
9846 bacpy(&ev.addr.bdaddr, bdaddr);
9847 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9848 ev.reason = reason;
9849
9850 /* Report disconnects due to suspend */
9851 if (hdev->suspended)
9852 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9853
9854 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9855
9856 if (sk)
9857 sock_put(sk);
9858 }
9859
mgmt_disconnect_failed(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9860 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9861 u8 link_type, u8 addr_type, u8 status)
9862 {
9863 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9864 struct mgmt_cp_disconnect *cp;
9865 struct mgmt_pending_cmd *cmd;
9866
9867 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, true,
9868 unpair_device_rsp, hdev);
9869
9870 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9871 if (!cmd)
9872 return;
9873
9874 cp = cmd->param;
9875
9876 if (bacmp(bdaddr, &cp->addr.bdaddr))
9877 return;
9878
9879 if (cp->addr.type != bdaddr_type)
9880 return;
9881
9882 cmd->cmd_complete(cmd, mgmt_status(status));
9883 mgmt_pending_remove(cmd);
9884 }
9885
mgmt_connect_failed(struct hci_dev * hdev,struct hci_conn * conn,u8 status)9886 void mgmt_connect_failed(struct hci_dev *hdev, struct hci_conn *conn, u8 status)
9887 {
9888 struct mgmt_ev_connect_failed ev;
9889
9890 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
9891 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
9892 conn->dst_type, status, true);
9893 return;
9894 }
9895
9896 bacpy(&ev.addr.bdaddr, &conn->dst);
9897 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9898 ev.status = mgmt_status(status);
9899
9900 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9901 }
9902
mgmt_pin_code_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 secure)9903 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9904 {
9905 struct mgmt_ev_pin_code_request ev;
9906
9907 bacpy(&ev.addr.bdaddr, bdaddr);
9908 ev.addr.type = BDADDR_BREDR;
9909 ev.secure = secure;
9910
9911 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9912 }
9913
mgmt_pin_code_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)9914 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9915 u8 status)
9916 {
9917 struct mgmt_pending_cmd *cmd;
9918
9919 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9920 if (!cmd)
9921 return;
9922
9923 cmd->cmd_complete(cmd, mgmt_status(status));
9924 mgmt_pending_remove(cmd);
9925 }
9926
mgmt_pin_code_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)9927 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9928 u8 status)
9929 {
9930 struct mgmt_pending_cmd *cmd;
9931
9932 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9933 if (!cmd)
9934 return;
9935
9936 cmd->cmd_complete(cmd, mgmt_status(status));
9937 mgmt_pending_remove(cmd);
9938 }
9939
mgmt_user_confirm_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 value,u8 confirm_hint)9940 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9941 u8 link_type, u8 addr_type, u32 value,
9942 u8 confirm_hint)
9943 {
9944 struct mgmt_ev_user_confirm_request ev;
9945
9946 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9947
9948 bacpy(&ev.addr.bdaddr, bdaddr);
9949 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9950 ev.confirm_hint = confirm_hint;
9951 ev.value = cpu_to_le32(value);
9952
9953 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9954 NULL);
9955 }
9956
mgmt_user_passkey_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type)9957 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9958 u8 link_type, u8 addr_type)
9959 {
9960 struct mgmt_ev_user_passkey_request ev;
9961
9962 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9963
9964 bacpy(&ev.addr.bdaddr, bdaddr);
9965 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9966
9967 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9968 NULL);
9969 }
9970
user_pairing_resp_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status,u8 opcode)9971 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9972 u8 link_type, u8 addr_type, u8 status,
9973 u8 opcode)
9974 {
9975 struct mgmt_pending_cmd *cmd;
9976
9977 cmd = pending_find(opcode, hdev);
9978 if (!cmd)
9979 return -ENOENT;
9980
9981 cmd->cmd_complete(cmd, mgmt_status(status));
9982 mgmt_pending_remove(cmd);
9983
9984 return 0;
9985 }
9986
mgmt_user_confirm_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9987 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9988 u8 link_type, u8 addr_type, u8 status)
9989 {
9990 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9991 status, MGMT_OP_USER_CONFIRM_REPLY);
9992 }
9993
mgmt_user_confirm_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9994 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9995 u8 link_type, u8 addr_type, u8 status)
9996 {
9997 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9998 status,
9999 MGMT_OP_USER_CONFIRM_NEG_REPLY);
10000 }
10001
mgmt_user_passkey_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)10002 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10003 u8 link_type, u8 addr_type, u8 status)
10004 {
10005 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
10006 status, MGMT_OP_USER_PASSKEY_REPLY);
10007 }
10008
mgmt_user_passkey_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)10009 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10010 u8 link_type, u8 addr_type, u8 status)
10011 {
10012 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
10013 status,
10014 MGMT_OP_USER_PASSKEY_NEG_REPLY);
10015 }
10016
mgmt_user_passkey_notify(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 passkey,u8 entered)10017 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
10018 u8 link_type, u8 addr_type, u32 passkey,
10019 u8 entered)
10020 {
10021 struct mgmt_ev_passkey_notify ev;
10022
10023 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
10024
10025 bacpy(&ev.addr.bdaddr, bdaddr);
10026 ev.addr.type = link_to_bdaddr(link_type, addr_type);
10027 ev.passkey = __cpu_to_le32(passkey);
10028 ev.entered = entered;
10029
10030 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
10031 }
10032
mgmt_auth_failed(struct hci_conn * conn,u8 hci_status)10033 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
10034 {
10035 struct mgmt_ev_auth_failed ev;
10036 struct mgmt_pending_cmd *cmd;
10037 u8 status = mgmt_status(hci_status);
10038
10039 bacpy(&ev.addr.bdaddr, &conn->dst);
10040 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
10041 ev.status = status;
10042
10043 cmd = find_pairing(conn);
10044
10045 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
10046 cmd ? cmd->sk : NULL);
10047
10048 if (cmd) {
10049 cmd->cmd_complete(cmd, status);
10050 mgmt_pending_remove(cmd);
10051 }
10052 }
10053
mgmt_auth_enable_complete(struct hci_dev * hdev,u8 status)10054 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
10055 {
10056 struct cmd_lookup match = { NULL, hdev };
10057 bool changed;
10058
10059 if (status) {
10060 u8 mgmt_err = mgmt_status(status);
10061 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, true,
10062 cmd_status_rsp, &mgmt_err);
10063 return;
10064 }
10065
10066 if (test_bit(HCI_AUTH, &hdev->flags))
10067 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
10068 else
10069 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
10070
10071 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, true,
10072 settings_rsp, &match);
10073
10074 if (changed)
10075 new_settings(hdev, match.sk);
10076
10077 if (match.sk)
10078 sock_put(match.sk);
10079 }
10080
sk_lookup(struct mgmt_pending_cmd * cmd,void * data)10081 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
10082 {
10083 struct cmd_lookup *match = data;
10084
10085 if (match->sk == NULL) {
10086 match->sk = cmd->sk;
10087 sock_hold(match->sk);
10088 }
10089 }
10090
mgmt_set_class_of_dev_complete(struct hci_dev * hdev,u8 * dev_class,u8 status)10091 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
10092 u8 status)
10093 {
10094 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
10095
10096 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, false, sk_lookup,
10097 &match);
10098 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, false, sk_lookup,
10099 &match);
10100 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, false, sk_lookup,
10101 &match);
10102
10103 if (!status) {
10104 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
10105 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
10106 ext_info_changed(hdev, NULL);
10107 }
10108
10109 if (match.sk)
10110 sock_put(match.sk);
10111 }
10112
mgmt_set_local_name_complete(struct hci_dev * hdev,u8 * name,u8 status)10113 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
10114 {
10115 struct mgmt_cp_set_local_name ev;
10116 struct mgmt_pending_cmd *cmd;
10117
10118 if (status)
10119 return;
10120
10121 memset(&ev, 0, sizeof(ev));
10122 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
10123 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
10124
10125 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
10126 if (!cmd) {
10127 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
10128
10129 /* If this is a HCI command related to powering on the
10130 * HCI dev don't send any mgmt signals.
10131 */
10132 if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
10133 return;
10134
10135 if (pending_find(MGMT_OP_SET_POWERED, hdev))
10136 return;
10137 }
10138
10139 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
10140 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
10141 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
10142 }
10143
has_uuid(u8 * uuid,u16 uuid_count,u8 (* uuids)[16])10144 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
10145 {
10146 int i;
10147
10148 for (i = 0; i < uuid_count; i++) {
10149 if (!memcmp(uuid, uuids[i], 16))
10150 return true;
10151 }
10152
10153 return false;
10154 }
10155
eir_has_uuids(u8 * eir,u16 eir_len,u16 uuid_count,u8 (* uuids)[16])10156 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
10157 {
10158 u16 parsed = 0;
10159
10160 while (parsed < eir_len) {
10161 u8 field_len = eir[0];
10162 u8 uuid[16];
10163 int i;
10164
10165 if (field_len == 0)
10166 break;
10167
10168 if (eir_len - parsed < field_len + 1)
10169 break;
10170
10171 switch (eir[1]) {
10172 case EIR_UUID16_ALL:
10173 case EIR_UUID16_SOME:
10174 for (i = 0; i + 3 <= field_len; i += 2) {
10175 memcpy(uuid, bluetooth_base_uuid, 16);
10176 uuid[13] = eir[i + 3];
10177 uuid[12] = eir[i + 2];
10178 if (has_uuid(uuid, uuid_count, uuids))
10179 return true;
10180 }
10181 break;
10182 case EIR_UUID32_ALL:
10183 case EIR_UUID32_SOME:
10184 for (i = 0; i + 5 <= field_len; i += 4) {
10185 memcpy(uuid, bluetooth_base_uuid, 16);
10186 uuid[15] = eir[i + 5];
10187 uuid[14] = eir[i + 4];
10188 uuid[13] = eir[i + 3];
10189 uuid[12] = eir[i + 2];
10190 if (has_uuid(uuid, uuid_count, uuids))
10191 return true;
10192 }
10193 break;
10194 case EIR_UUID128_ALL:
10195 case EIR_UUID128_SOME:
10196 for (i = 0; i + 17 <= field_len; i += 16) {
10197 memcpy(uuid, eir + i + 2, 16);
10198 if (has_uuid(uuid, uuid_count, uuids))
10199 return true;
10200 }
10201 break;
10202 }
10203
10204 parsed += field_len + 1;
10205 eir += field_len + 1;
10206 }
10207
10208 return false;
10209 }
10210
is_filter_match(struct hci_dev * hdev,s8 rssi,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len)10211 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10212 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10213 {
10214 /* If a RSSI threshold has been specified, and
10215 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10216 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10217 * is set, let it through for further processing, as we might need to
10218 * restart the scan.
10219 *
10220 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10221 * the results are also dropped.
10222 */
10223 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10224 (rssi == HCI_RSSI_INVALID ||
10225 (rssi < hdev->discovery.rssi &&
10226 !hci_test_quirk(hdev, HCI_QUIRK_STRICT_DUPLICATE_FILTER))))
10227 return false;
10228
10229 if (hdev->discovery.uuid_count != 0) {
10230 /* If a list of UUIDs is provided in filter, results with no
10231 * matching UUID should be dropped.
10232 */
10233 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10234 hdev->discovery.uuids) &&
10235 !eir_has_uuids(scan_rsp, scan_rsp_len,
10236 hdev->discovery.uuid_count,
10237 hdev->discovery.uuids))
10238 return false;
10239 }
10240
10241 /* If duplicate filtering does not report RSSI changes, then restart
10242 * scanning to ensure updated result with updated RSSI values.
10243 */
10244 if (hci_test_quirk(hdev, HCI_QUIRK_STRICT_DUPLICATE_FILTER)) {
10245 /* Validate RSSI value against the RSSI threshold once more. */
10246 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10247 rssi < hdev->discovery.rssi)
10248 return false;
10249 }
10250
10251 return true;
10252 }
10253
mgmt_adv_monitor_device_lost(struct hci_dev * hdev,u16 handle,bdaddr_t * bdaddr,u8 addr_type)10254 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
10255 bdaddr_t *bdaddr, u8 addr_type)
10256 {
10257 struct mgmt_ev_adv_monitor_device_lost ev;
10258
10259 ev.monitor_handle = cpu_to_le16(handle);
10260 bacpy(&ev.addr.bdaddr, bdaddr);
10261 ev.addr.type = addr_type;
10262
10263 mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
10264 NULL);
10265 }
10266
mgmt_send_adv_monitor_device_found(struct hci_dev * hdev,struct sk_buff * skb,struct sock * skip_sk,u16 handle)10267 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
10268 struct sk_buff *skb,
10269 struct sock *skip_sk,
10270 u16 handle)
10271 {
10272 struct sk_buff *advmon_skb;
10273 size_t advmon_skb_len;
10274 __le16 *monitor_handle;
10275
10276 if (!skb)
10277 return;
10278
10279 advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
10280 sizeof(struct mgmt_ev_device_found)) + skb->len;
10281 advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
10282 advmon_skb_len);
10283 if (!advmon_skb)
10284 return;
10285
10286 /* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10287 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10288 * store monitor_handle of the matched monitor.
10289 */
10290 monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
10291 *monitor_handle = cpu_to_le16(handle);
10292 skb_put_data(advmon_skb, skb->data, skb->len);
10293
10294 mgmt_event_skb(advmon_skb, skip_sk);
10295 }
10296
mgmt_adv_monitor_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,bool report_device,struct sk_buff * skb,struct sock * skip_sk)10297 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
10298 bdaddr_t *bdaddr, bool report_device,
10299 struct sk_buff *skb,
10300 struct sock *skip_sk)
10301 {
10302 struct monitored_device *dev, *tmp;
10303 bool matched = false;
10304 bool notified = false;
10305
10306 /* We have received the Advertisement Report because:
10307 * 1. the kernel has initiated active discovery
10308 * 2. if not, we have pend_le_reports > 0 in which case we are doing
10309 * passive scanning
10310 * 3. if none of the above is true, we have one or more active
10311 * Advertisement Monitor
10312 *
10313 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10314 * and report ONLY one advertisement per device for the matched Monitor
10315 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10316 *
10317 * For case 3, since we are not active scanning and all advertisements
10318 * received are due to a matched Advertisement Monitor, report all
10319 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10320 */
10321 if (report_device && !hdev->advmon_pend_notify) {
10322 mgmt_event_skb(skb, skip_sk);
10323 return;
10324 }
10325
10326 hdev->advmon_pend_notify = false;
10327
10328 list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
10329 if (!bacmp(&dev->bdaddr, bdaddr)) {
10330 matched = true;
10331
10332 if (!dev->notified) {
10333 mgmt_send_adv_monitor_device_found(hdev, skb,
10334 skip_sk,
10335 dev->handle);
10336 notified = true;
10337 dev->notified = true;
10338 }
10339 }
10340
10341 if (!dev->notified)
10342 hdev->advmon_pend_notify = true;
10343 }
10344
10345 if (!report_device &&
10346 ((matched && !notified) || !msft_monitor_supported(hdev))) {
10347 /* Handle 0 indicates that we are not active scanning and this
10348 * is a subsequent advertisement report for an already matched
10349 * Advertisement Monitor or the controller offloading support
10350 * is not available.
10351 */
10352 mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
10353 }
10354
10355 if (report_device)
10356 mgmt_event_skb(skb, skip_sk);
10357 else
10358 kfree_skb(skb);
10359 }
10360
mesh_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len,u64 instant)10361 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
10362 u8 addr_type, s8 rssi, u32 flags, u8 *eir,
10363 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10364 u64 instant)
10365 {
10366 struct sk_buff *skb;
10367 struct mgmt_ev_mesh_device_found *ev;
10368 int i, j;
10369
10370 if (!hdev->mesh_ad_types[0])
10371 goto accepted;
10372
10373 /* Scan for requested AD types */
10374 if (eir_len > 0) {
10375 for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
10376 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10377 if (!hdev->mesh_ad_types[j])
10378 break;
10379
10380 if (hdev->mesh_ad_types[j] == eir[i + 1])
10381 goto accepted;
10382 }
10383 }
10384 }
10385
10386 if (scan_rsp_len > 0) {
10387 for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
10388 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10389 if (!hdev->mesh_ad_types[j])
10390 break;
10391
10392 if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
10393 goto accepted;
10394 }
10395 }
10396 }
10397
10398 return;
10399
10400 accepted:
10401 skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
10402 sizeof(*ev) + eir_len + scan_rsp_len);
10403 if (!skb)
10404 return;
10405
10406 ev = skb_put(skb, sizeof(*ev));
10407
10408 bacpy(&ev->addr.bdaddr, bdaddr);
10409 ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
10410 ev->rssi = rssi;
10411 ev->flags = cpu_to_le32(flags);
10412 ev->instant = cpu_to_le64(instant);
10413
10414 if (eir_len > 0)
10415 /* Copy EIR or advertising data into event */
10416 skb_put_data(skb, eir, eir_len);
10417
10418 if (scan_rsp_len > 0)
10419 /* Append scan response data to event */
10420 skb_put_data(skb, scan_rsp, scan_rsp_len);
10421
10422 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10423
10424 mgmt_event_skb(skb, NULL);
10425 }
10426
mgmt_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 * dev_class,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len,u64 instant)10427 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10428 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10429 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10430 u64 instant)
10431 {
10432 struct sk_buff *skb;
10433 struct mgmt_ev_device_found *ev;
10434 bool report_device = hci_discovery_active(hdev);
10435
10436 if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
10437 mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
10438 eir, eir_len, scan_rsp, scan_rsp_len,
10439 instant);
10440
10441 /* Don't send events for a non-kernel initiated discovery. With
10442 * LE one exception is if we have pend_le_reports > 0 in which
10443 * case we're doing passive scanning and want these events.
10444 */
10445 if (!hci_discovery_active(hdev)) {
10446 if (link_type == ACL_LINK)
10447 return;
10448 if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
10449 report_device = true;
10450 else if (!hci_is_adv_monitoring(hdev))
10451 return;
10452 }
10453
10454 if (hdev->discovery.result_filtering) {
10455 /* We are using service discovery */
10456 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10457 scan_rsp_len))
10458 return;
10459 }
10460
10461 if (hdev->discovery.limited) {
10462 /* Check for limited discoverable bit */
10463 if (dev_class) {
10464 if (!(dev_class[1] & 0x20))
10465 return;
10466 } else {
10467 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10468 if (!flags || !(flags[0] & LE_AD_LIMITED))
10469 return;
10470 }
10471 }
10472
10473 /* Allocate skb. The 5 extra bytes are for the potential CoD field */
10474 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10475 sizeof(*ev) + eir_len + scan_rsp_len + 5);
10476 if (!skb)
10477 return;
10478
10479 ev = skb_put(skb, sizeof(*ev));
10480
10481 /* In case of device discovery with BR/EDR devices (pre 1.2), the
10482 * RSSI value was reported as 0 when not available. This behavior
10483 * is kept when using device discovery. This is required for full
10484 * backwards compatibility with the API.
10485 *
10486 * However when using service discovery, the value 127 will be
10487 * returned when the RSSI is not available.
10488 */
10489 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10490 link_type == ACL_LINK)
10491 rssi = 0;
10492
10493 bacpy(&ev->addr.bdaddr, bdaddr);
10494 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10495 ev->rssi = rssi;
10496 ev->flags = cpu_to_le32(flags);
10497
10498 if (eir_len > 0)
10499 /* Copy EIR or advertising data into event */
10500 skb_put_data(skb, eir, eir_len);
10501
10502 if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
10503 u8 eir_cod[5];
10504
10505 eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
10506 dev_class, 3);
10507 skb_put_data(skb, eir_cod, sizeof(eir_cod));
10508 }
10509
10510 if (scan_rsp_len > 0)
10511 /* Append scan response data to event */
10512 skb_put_data(skb, scan_rsp, scan_rsp_len);
10513
10514 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10515
10516 mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
10517 }
10518
mgmt_remote_name(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,s8 rssi,u8 * name,u8 name_len)10519 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10520 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10521 {
10522 struct sk_buff *skb;
10523 struct mgmt_ev_device_found *ev;
10524 u16 eir_len = 0;
10525 u32 flags = 0;
10526
10527 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10528 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
10529 if (!skb)
10530 return;
10531
10532 ev = skb_put(skb, sizeof(*ev));
10533 bacpy(&ev->addr.bdaddr, bdaddr);
10534 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10535 ev->rssi = rssi;
10536
10537 if (name)
10538 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
10539 else
10540 flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
10541
10542 ev->eir_len = cpu_to_le16(eir_len);
10543 ev->flags = cpu_to_le32(flags);
10544
10545 mgmt_event_skb(skb, NULL);
10546 }
10547
mgmt_discovering(struct hci_dev * hdev,u8 discovering)10548 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10549 {
10550 struct mgmt_ev_discovering ev;
10551
10552 bt_dev_dbg(hdev, "discovering %u", discovering);
10553
10554 memset(&ev, 0, sizeof(ev));
10555 ev.type = hdev->discovery.type;
10556 ev.discovering = discovering;
10557
10558 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10559 }
10560
mgmt_suspending(struct hci_dev * hdev,u8 state)10561 void mgmt_suspending(struct hci_dev *hdev, u8 state)
10562 {
10563 struct mgmt_ev_controller_suspend ev;
10564
10565 ev.suspend_state = state;
10566 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10567 }
10568
mgmt_resuming(struct hci_dev * hdev,u8 reason,bdaddr_t * bdaddr,u8 addr_type)10569 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10570 u8 addr_type)
10571 {
10572 struct mgmt_ev_controller_resume ev;
10573
10574 ev.wake_reason = reason;
10575 if (bdaddr) {
10576 bacpy(&ev.addr.bdaddr, bdaddr);
10577 ev.addr.type = addr_type;
10578 } else {
10579 memset(&ev.addr, 0, sizeof(ev.addr));
10580 }
10581
10582 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
10583 }
10584
10585 static struct hci_mgmt_chan chan = {
10586 .channel = HCI_CHANNEL_CONTROL,
10587 .handler_count = ARRAY_SIZE(mgmt_handlers),
10588 .handlers = mgmt_handlers,
10589 .hdev_init = mgmt_init_hdev,
10590 };
10591
mgmt_init(void)10592 int mgmt_init(void)
10593 {
10594 return hci_mgmt_chan_register(&chan);
10595 }
10596
mgmt_exit(void)10597 void mgmt_exit(void)
10598 {
10599 hci_mgmt_chan_unregister(&chan);
10600 }
10601
mgmt_cleanup(struct sock * sk)10602 void mgmt_cleanup(struct sock *sk)
10603 {
10604 struct mgmt_mesh_tx *mesh_tx;
10605 struct hci_dev *hdev;
10606
10607 read_lock(&hci_dev_list_lock);
10608
10609 list_for_each_entry(hdev, &hci_dev_list, list) {
10610 do {
10611 mesh_tx = mgmt_mesh_next(hdev, sk);
10612
10613 if (mesh_tx)
10614 mesh_send_complete(hdev, mesh_tx, true);
10615 } while (mesh_tx);
10616 }
10617
10618 read_unlock(&hci_dev_list_lock);
10619 }
10620