1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <linux/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35
36 #include "smp.h"
37 #include "mgmt_util.h"
38 #include "mgmt_config.h"
39 #include "msft.h"
40 #include "eir.h"
41 #include "aosp.h"
42
43 #define MGMT_VERSION 1
44 #define MGMT_REVISION 23
45
46 static const u16 mgmt_commands[] = {
47 MGMT_OP_READ_INDEX_LIST,
48 MGMT_OP_READ_INFO,
49 MGMT_OP_SET_POWERED,
50 MGMT_OP_SET_DISCOVERABLE,
51 MGMT_OP_SET_CONNECTABLE,
52 MGMT_OP_SET_FAST_CONNECTABLE,
53 MGMT_OP_SET_BONDABLE,
54 MGMT_OP_SET_LINK_SECURITY,
55 MGMT_OP_SET_SSP,
56 MGMT_OP_SET_HS,
57 MGMT_OP_SET_LE,
58 MGMT_OP_SET_DEV_CLASS,
59 MGMT_OP_SET_LOCAL_NAME,
60 MGMT_OP_ADD_UUID,
61 MGMT_OP_REMOVE_UUID,
62 MGMT_OP_LOAD_LINK_KEYS,
63 MGMT_OP_LOAD_LONG_TERM_KEYS,
64 MGMT_OP_DISCONNECT,
65 MGMT_OP_GET_CONNECTIONS,
66 MGMT_OP_PIN_CODE_REPLY,
67 MGMT_OP_PIN_CODE_NEG_REPLY,
68 MGMT_OP_SET_IO_CAPABILITY,
69 MGMT_OP_PAIR_DEVICE,
70 MGMT_OP_CANCEL_PAIR_DEVICE,
71 MGMT_OP_UNPAIR_DEVICE,
72 MGMT_OP_USER_CONFIRM_REPLY,
73 MGMT_OP_USER_CONFIRM_NEG_REPLY,
74 MGMT_OP_USER_PASSKEY_REPLY,
75 MGMT_OP_USER_PASSKEY_NEG_REPLY,
76 MGMT_OP_READ_LOCAL_OOB_DATA,
77 MGMT_OP_ADD_REMOTE_OOB_DATA,
78 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
79 MGMT_OP_START_DISCOVERY,
80 MGMT_OP_STOP_DISCOVERY,
81 MGMT_OP_CONFIRM_NAME,
82 MGMT_OP_BLOCK_DEVICE,
83 MGMT_OP_UNBLOCK_DEVICE,
84 MGMT_OP_SET_DEVICE_ID,
85 MGMT_OP_SET_ADVERTISING,
86 MGMT_OP_SET_BREDR,
87 MGMT_OP_SET_STATIC_ADDRESS,
88 MGMT_OP_SET_SCAN_PARAMS,
89 MGMT_OP_SET_SECURE_CONN,
90 MGMT_OP_SET_DEBUG_KEYS,
91 MGMT_OP_SET_PRIVACY,
92 MGMT_OP_LOAD_IRKS,
93 MGMT_OP_GET_CONN_INFO,
94 MGMT_OP_GET_CLOCK_INFO,
95 MGMT_OP_ADD_DEVICE,
96 MGMT_OP_REMOVE_DEVICE,
97 MGMT_OP_LOAD_CONN_PARAM,
98 MGMT_OP_READ_UNCONF_INDEX_LIST,
99 MGMT_OP_READ_CONFIG_INFO,
100 MGMT_OP_SET_EXTERNAL_CONFIG,
101 MGMT_OP_SET_PUBLIC_ADDRESS,
102 MGMT_OP_START_SERVICE_DISCOVERY,
103 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
104 MGMT_OP_READ_EXT_INDEX_LIST,
105 MGMT_OP_READ_ADV_FEATURES,
106 MGMT_OP_ADD_ADVERTISING,
107 MGMT_OP_REMOVE_ADVERTISING,
108 MGMT_OP_GET_ADV_SIZE_INFO,
109 MGMT_OP_START_LIMITED_DISCOVERY,
110 MGMT_OP_READ_EXT_INFO,
111 MGMT_OP_SET_APPEARANCE,
112 MGMT_OP_GET_PHY_CONFIGURATION,
113 MGMT_OP_SET_PHY_CONFIGURATION,
114 MGMT_OP_SET_BLOCKED_KEYS,
115 MGMT_OP_SET_WIDEBAND_SPEECH,
116 MGMT_OP_READ_CONTROLLER_CAP,
117 MGMT_OP_READ_EXP_FEATURES_INFO,
118 MGMT_OP_SET_EXP_FEATURE,
119 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
120 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
121 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
122 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
123 MGMT_OP_GET_DEVICE_FLAGS,
124 MGMT_OP_SET_DEVICE_FLAGS,
125 MGMT_OP_READ_ADV_MONITOR_FEATURES,
126 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
127 MGMT_OP_REMOVE_ADV_MONITOR,
128 MGMT_OP_ADD_EXT_ADV_PARAMS,
129 MGMT_OP_ADD_EXT_ADV_DATA,
130 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
131 MGMT_OP_SET_MESH_RECEIVER,
132 MGMT_OP_MESH_READ_FEATURES,
133 MGMT_OP_MESH_SEND,
134 MGMT_OP_MESH_SEND_CANCEL,
135 MGMT_OP_HCI_CMD_SYNC,
136 };
137
138 static const u16 mgmt_events[] = {
139 MGMT_EV_CONTROLLER_ERROR,
140 MGMT_EV_INDEX_ADDED,
141 MGMT_EV_INDEX_REMOVED,
142 MGMT_EV_NEW_SETTINGS,
143 MGMT_EV_CLASS_OF_DEV_CHANGED,
144 MGMT_EV_LOCAL_NAME_CHANGED,
145 MGMT_EV_NEW_LINK_KEY,
146 MGMT_EV_NEW_LONG_TERM_KEY,
147 MGMT_EV_DEVICE_CONNECTED,
148 MGMT_EV_DEVICE_DISCONNECTED,
149 MGMT_EV_CONNECT_FAILED,
150 MGMT_EV_PIN_CODE_REQUEST,
151 MGMT_EV_USER_CONFIRM_REQUEST,
152 MGMT_EV_USER_PASSKEY_REQUEST,
153 MGMT_EV_AUTH_FAILED,
154 MGMT_EV_DEVICE_FOUND,
155 MGMT_EV_DISCOVERING,
156 MGMT_EV_DEVICE_BLOCKED,
157 MGMT_EV_DEVICE_UNBLOCKED,
158 MGMT_EV_DEVICE_UNPAIRED,
159 MGMT_EV_PASSKEY_NOTIFY,
160 MGMT_EV_NEW_IRK,
161 MGMT_EV_NEW_CSRK,
162 MGMT_EV_DEVICE_ADDED,
163 MGMT_EV_DEVICE_REMOVED,
164 MGMT_EV_NEW_CONN_PARAM,
165 MGMT_EV_UNCONF_INDEX_ADDED,
166 MGMT_EV_UNCONF_INDEX_REMOVED,
167 MGMT_EV_NEW_CONFIG_OPTIONS,
168 MGMT_EV_EXT_INDEX_ADDED,
169 MGMT_EV_EXT_INDEX_REMOVED,
170 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
171 MGMT_EV_ADVERTISING_ADDED,
172 MGMT_EV_ADVERTISING_REMOVED,
173 MGMT_EV_EXT_INFO_CHANGED,
174 MGMT_EV_PHY_CONFIGURATION_CHANGED,
175 MGMT_EV_EXP_FEATURE_CHANGED,
176 MGMT_EV_DEVICE_FLAGS_CHANGED,
177 MGMT_EV_ADV_MONITOR_ADDED,
178 MGMT_EV_ADV_MONITOR_REMOVED,
179 MGMT_EV_CONTROLLER_SUSPEND,
180 MGMT_EV_CONTROLLER_RESUME,
181 MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
182 MGMT_EV_ADV_MONITOR_DEVICE_LOST,
183 };
184
185 static const u16 mgmt_untrusted_commands[] = {
186 MGMT_OP_READ_INDEX_LIST,
187 MGMT_OP_READ_INFO,
188 MGMT_OP_READ_UNCONF_INDEX_LIST,
189 MGMT_OP_READ_CONFIG_INFO,
190 MGMT_OP_READ_EXT_INDEX_LIST,
191 MGMT_OP_READ_EXT_INFO,
192 MGMT_OP_READ_CONTROLLER_CAP,
193 MGMT_OP_READ_EXP_FEATURES_INFO,
194 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
195 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
196 };
197
198 static const u16 mgmt_untrusted_events[] = {
199 MGMT_EV_INDEX_ADDED,
200 MGMT_EV_INDEX_REMOVED,
201 MGMT_EV_NEW_SETTINGS,
202 MGMT_EV_CLASS_OF_DEV_CHANGED,
203 MGMT_EV_LOCAL_NAME_CHANGED,
204 MGMT_EV_UNCONF_INDEX_ADDED,
205 MGMT_EV_UNCONF_INDEX_REMOVED,
206 MGMT_EV_NEW_CONFIG_OPTIONS,
207 MGMT_EV_EXT_INDEX_ADDED,
208 MGMT_EV_EXT_INDEX_REMOVED,
209 MGMT_EV_EXT_INFO_CHANGED,
210 MGMT_EV_EXP_FEATURE_CHANGED,
211 };
212
213 #define CACHE_TIMEOUT secs_to_jiffies(2)
214
215 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
216 "\x00\x00\x00\x00\x00\x00\x00\x00"
217
218 /* HCI to MGMT error code conversion table */
219 static const u8 mgmt_status_table[] = {
220 MGMT_STATUS_SUCCESS,
221 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
222 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
223 MGMT_STATUS_FAILED, /* Hardware Failure */
224 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
225 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
226 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
227 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
228 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
229 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
230 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
231 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
232 MGMT_STATUS_BUSY, /* Command Disallowed */
233 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
234 MGMT_STATUS_REJECTED, /* Rejected Security */
235 MGMT_STATUS_REJECTED, /* Rejected Personal */
236 MGMT_STATUS_TIMEOUT, /* Host Timeout */
237 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
238 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
239 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
240 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
241 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
242 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
243 MGMT_STATUS_BUSY, /* Repeated Attempts */
244 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
245 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
246 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
247 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
248 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
249 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
250 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
251 MGMT_STATUS_FAILED, /* Unspecified Error */
252 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
253 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
254 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
255 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
256 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
257 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
258 MGMT_STATUS_FAILED, /* Unit Link Key Used */
259 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
260 MGMT_STATUS_TIMEOUT, /* Instant Passed */
261 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
262 MGMT_STATUS_FAILED, /* Transaction Collision */
263 MGMT_STATUS_FAILED, /* Reserved for future use */
264 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
265 MGMT_STATUS_REJECTED, /* QoS Rejected */
266 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
267 MGMT_STATUS_REJECTED, /* Insufficient Security */
268 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
269 MGMT_STATUS_FAILED, /* Reserved for future use */
270 MGMT_STATUS_BUSY, /* Role Switch Pending */
271 MGMT_STATUS_FAILED, /* Reserved for future use */
272 MGMT_STATUS_FAILED, /* Slot Violation */
273 MGMT_STATUS_FAILED, /* Role Switch Failed */
274 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
275 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
276 MGMT_STATUS_BUSY, /* Host Busy Pairing */
277 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
278 MGMT_STATUS_BUSY, /* Controller Busy */
279 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
280 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
281 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
282 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
283 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
284 };
285
mgmt_errno_status(int err)286 static u8 mgmt_errno_status(int err)
287 {
288 switch (err) {
289 case 0:
290 return MGMT_STATUS_SUCCESS;
291 case -EPERM:
292 return MGMT_STATUS_REJECTED;
293 case -EINVAL:
294 return MGMT_STATUS_INVALID_PARAMS;
295 case -EOPNOTSUPP:
296 return MGMT_STATUS_NOT_SUPPORTED;
297 case -EBUSY:
298 return MGMT_STATUS_BUSY;
299 case -ETIMEDOUT:
300 return MGMT_STATUS_AUTH_FAILED;
301 case -ENOMEM:
302 return MGMT_STATUS_NO_RESOURCES;
303 case -EISCONN:
304 return MGMT_STATUS_ALREADY_CONNECTED;
305 case -ENOTCONN:
306 return MGMT_STATUS_DISCONNECTED;
307 }
308
309 return MGMT_STATUS_FAILED;
310 }
311
mgmt_status(int err)312 static u8 mgmt_status(int err)
313 {
314 if (err < 0)
315 return mgmt_errno_status(err);
316
317 if (err < ARRAY_SIZE(mgmt_status_table))
318 return mgmt_status_table[err];
319
320 return MGMT_STATUS_FAILED;
321 }
322
mgmt_index_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag)323 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
324 u16 len, int flag)
325 {
326 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
327 flag, NULL);
328 }
329
mgmt_limited_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag,struct sock * skip_sk)330 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
331 u16 len, int flag, struct sock *skip_sk)
332 {
333 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
334 flag, skip_sk);
335 }
336
mgmt_event(u16 event,struct hci_dev * hdev,void * data,u16 len,struct sock * skip_sk)337 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
338 struct sock *skip_sk)
339 {
340 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
341 HCI_SOCK_TRUSTED, skip_sk);
342 }
343
mgmt_event_skb(struct sk_buff * skb,struct sock * skip_sk)344 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
345 {
346 return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
347 skip_sk);
348 }
349
le_addr_type(u8 mgmt_addr_type)350 static u8 le_addr_type(u8 mgmt_addr_type)
351 {
352 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
353 return ADDR_LE_DEV_PUBLIC;
354 else
355 return ADDR_LE_DEV_RANDOM;
356 }
357
mgmt_fill_version_info(void * ver)358 void mgmt_fill_version_info(void *ver)
359 {
360 struct mgmt_rp_read_version *rp = ver;
361
362 rp->version = MGMT_VERSION;
363 rp->revision = cpu_to_le16(MGMT_REVISION);
364 }
365
read_version(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)366 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
367 u16 data_len)
368 {
369 struct mgmt_rp_read_version rp;
370
371 bt_dev_dbg(hdev, "sock %p", sk);
372
373 mgmt_fill_version_info(&rp);
374
375 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
376 &rp, sizeof(rp));
377 }
378
read_commands(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)379 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
380 u16 data_len)
381 {
382 struct mgmt_rp_read_commands *rp;
383 u16 num_commands, num_events;
384 size_t rp_size;
385 int i, err;
386
387 bt_dev_dbg(hdev, "sock %p", sk);
388
389 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
390 num_commands = ARRAY_SIZE(mgmt_commands);
391 num_events = ARRAY_SIZE(mgmt_events);
392 } else {
393 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
394 num_events = ARRAY_SIZE(mgmt_untrusted_events);
395 }
396
397 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
398
399 rp = kmalloc(rp_size, GFP_KERNEL);
400 if (!rp)
401 return -ENOMEM;
402
403 rp->num_commands = cpu_to_le16(num_commands);
404 rp->num_events = cpu_to_le16(num_events);
405
406 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
407 __le16 *opcode = rp->opcodes;
408
409 for (i = 0; i < num_commands; i++, opcode++)
410 put_unaligned_le16(mgmt_commands[i], opcode);
411
412 for (i = 0; i < num_events; i++, opcode++)
413 put_unaligned_le16(mgmt_events[i], opcode);
414 } else {
415 __le16 *opcode = rp->opcodes;
416
417 for (i = 0; i < num_commands; i++, opcode++)
418 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
419
420 for (i = 0; i < num_events; i++, opcode++)
421 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
422 }
423
424 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
425 rp, rp_size);
426 kfree(rp);
427
428 return err;
429 }
430
read_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)431 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
432 u16 data_len)
433 {
434 struct mgmt_rp_read_index_list *rp;
435 struct hci_dev *d;
436 size_t rp_len;
437 u16 count;
438 int err;
439
440 bt_dev_dbg(hdev, "sock %p", sk);
441
442 read_lock(&hci_dev_list_lock);
443
444 count = 0;
445 list_for_each_entry(d, &hci_dev_list, list) {
446 if (!hci_dev_test_flag(d, HCI_UNCONFIGURED))
447 count++;
448 }
449
450 rp_len = sizeof(*rp) + (2 * count);
451 rp = kmalloc(rp_len, GFP_ATOMIC);
452 if (!rp) {
453 read_unlock(&hci_dev_list_lock);
454 return -ENOMEM;
455 }
456
457 count = 0;
458 list_for_each_entry(d, &hci_dev_list, list) {
459 if (hci_dev_test_flag(d, HCI_SETUP) ||
460 hci_dev_test_flag(d, HCI_CONFIG) ||
461 hci_dev_test_flag(d, HCI_USER_CHANNEL))
462 continue;
463
464 /* Devices marked as raw-only are neither configured
465 * nor unconfigured controllers.
466 */
467 if (hci_test_quirk(d, HCI_QUIRK_RAW_DEVICE))
468 continue;
469
470 if (!hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
471 rp->index[count++] = cpu_to_le16(d->id);
472 bt_dev_dbg(hdev, "Added hci%u", d->id);
473 }
474 }
475
476 rp->num_controllers = cpu_to_le16(count);
477 rp_len = sizeof(*rp) + (2 * count);
478
479 read_unlock(&hci_dev_list_lock);
480
481 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
482 0, rp, rp_len);
483
484 kfree(rp);
485
486 return err;
487 }
488
read_unconf_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)489 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
490 void *data, u16 data_len)
491 {
492 struct mgmt_rp_read_unconf_index_list *rp;
493 struct hci_dev *d;
494 size_t rp_len;
495 u16 count;
496 int err;
497
498 bt_dev_dbg(hdev, "sock %p", sk);
499
500 read_lock(&hci_dev_list_lock);
501
502 count = 0;
503 list_for_each_entry(d, &hci_dev_list, list) {
504 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
505 count++;
506 }
507
508 rp_len = sizeof(*rp) + (2 * count);
509 rp = kmalloc(rp_len, GFP_ATOMIC);
510 if (!rp) {
511 read_unlock(&hci_dev_list_lock);
512 return -ENOMEM;
513 }
514
515 count = 0;
516 list_for_each_entry(d, &hci_dev_list, list) {
517 if (hci_dev_test_flag(d, HCI_SETUP) ||
518 hci_dev_test_flag(d, HCI_CONFIG) ||
519 hci_dev_test_flag(d, HCI_USER_CHANNEL))
520 continue;
521
522 /* Devices marked as raw-only are neither configured
523 * nor unconfigured controllers.
524 */
525 if (hci_test_quirk(d, HCI_QUIRK_RAW_DEVICE))
526 continue;
527
528 if (hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
529 rp->index[count++] = cpu_to_le16(d->id);
530 bt_dev_dbg(hdev, "Added hci%u", d->id);
531 }
532 }
533
534 rp->num_controllers = cpu_to_le16(count);
535 rp_len = sizeof(*rp) + (2 * count);
536
537 read_unlock(&hci_dev_list_lock);
538
539 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
540 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
541
542 kfree(rp);
543
544 return err;
545 }
546
read_ext_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)547 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
548 void *data, u16 data_len)
549 {
550 struct mgmt_rp_read_ext_index_list *rp;
551 struct hci_dev *d;
552 u16 count;
553 int err;
554
555 bt_dev_dbg(hdev, "sock %p", sk);
556
557 read_lock(&hci_dev_list_lock);
558
559 count = 0;
560 list_for_each_entry(d, &hci_dev_list, list)
561 count++;
562
563 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
564 if (!rp) {
565 read_unlock(&hci_dev_list_lock);
566 return -ENOMEM;
567 }
568
569 count = 0;
570 list_for_each_entry(d, &hci_dev_list, list) {
571 if (hci_dev_test_flag(d, HCI_SETUP) ||
572 hci_dev_test_flag(d, HCI_CONFIG) ||
573 hci_dev_test_flag(d, HCI_USER_CHANNEL))
574 continue;
575
576 /* Devices marked as raw-only are neither configured
577 * nor unconfigured controllers.
578 */
579 if (hci_test_quirk(d, HCI_QUIRK_RAW_DEVICE))
580 continue;
581
582 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
583 rp->entry[count].type = 0x01;
584 else
585 rp->entry[count].type = 0x00;
586
587 rp->entry[count].bus = d->bus;
588 rp->entry[count++].index = cpu_to_le16(d->id);
589 bt_dev_dbg(hdev, "Added hci%u", d->id);
590 }
591
592 rp->num_controllers = cpu_to_le16(count);
593
594 read_unlock(&hci_dev_list_lock);
595
596 /* If this command is called at least once, then all the
597 * default index and unconfigured index events are disabled
598 * and from now on only extended index events are used.
599 */
600 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
601 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
602 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
603
604 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
605 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
606 struct_size(rp, entry, count));
607
608 kfree(rp);
609
610 return err;
611 }
612
is_configured(struct hci_dev * hdev)613 static bool is_configured(struct hci_dev *hdev)
614 {
615 if (hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG) &&
616 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
617 return false;
618
619 if ((hci_test_quirk(hdev, HCI_QUIRK_INVALID_BDADDR) ||
620 hci_test_quirk(hdev, HCI_QUIRK_USE_BDADDR_PROPERTY)) &&
621 !bacmp(&hdev->public_addr, BDADDR_ANY))
622 return false;
623
624 return true;
625 }
626
get_missing_options(struct hci_dev * hdev)627 static __le32 get_missing_options(struct hci_dev *hdev)
628 {
629 u32 options = 0;
630
631 if (hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG) &&
632 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
633 options |= MGMT_OPTION_EXTERNAL_CONFIG;
634
635 if ((hci_test_quirk(hdev, HCI_QUIRK_INVALID_BDADDR) ||
636 hci_test_quirk(hdev, HCI_QUIRK_USE_BDADDR_PROPERTY)) &&
637 !bacmp(&hdev->public_addr, BDADDR_ANY))
638 options |= MGMT_OPTION_PUBLIC_ADDRESS;
639
640 return cpu_to_le32(options);
641 }
642
new_options(struct hci_dev * hdev,struct sock * skip)643 static int new_options(struct hci_dev *hdev, struct sock *skip)
644 {
645 __le32 options = get_missing_options(hdev);
646
647 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
648 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
649 }
650
send_options_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)651 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
652 {
653 __le32 options = get_missing_options(hdev);
654
655 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
656 sizeof(options));
657 }
658
read_config_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)659 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
660 void *data, u16 data_len)
661 {
662 struct mgmt_rp_read_config_info rp;
663 u32 options = 0;
664
665 bt_dev_dbg(hdev, "sock %p", sk);
666
667 hci_dev_lock(hdev);
668
669 memset(&rp, 0, sizeof(rp));
670 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
671
672 if (hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG))
673 options |= MGMT_OPTION_EXTERNAL_CONFIG;
674
675 if (hdev->set_bdaddr)
676 options |= MGMT_OPTION_PUBLIC_ADDRESS;
677
678 rp.supported_options = cpu_to_le32(options);
679 rp.missing_options = get_missing_options(hdev);
680
681 hci_dev_unlock(hdev);
682
683 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
684 &rp, sizeof(rp));
685 }
686
get_supported_phys(struct hci_dev * hdev)687 static u32 get_supported_phys(struct hci_dev *hdev)
688 {
689 u32 supported_phys = 0;
690
691 if (lmp_bredr_capable(hdev)) {
692 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
693
694 if (hdev->features[0][0] & LMP_3SLOT)
695 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
696
697 if (hdev->features[0][0] & LMP_5SLOT)
698 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
699
700 if (lmp_edr_2m_capable(hdev)) {
701 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
702
703 if (lmp_edr_3slot_capable(hdev))
704 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
705
706 if (lmp_edr_5slot_capable(hdev))
707 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
708
709 if (lmp_edr_3m_capable(hdev)) {
710 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
711
712 if (lmp_edr_3slot_capable(hdev))
713 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
714
715 if (lmp_edr_5slot_capable(hdev))
716 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
717 }
718 }
719 }
720
721 if (lmp_le_capable(hdev)) {
722 supported_phys |= MGMT_PHY_LE_1M_TX;
723 supported_phys |= MGMT_PHY_LE_1M_RX;
724
725 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
726 supported_phys |= MGMT_PHY_LE_2M_TX;
727 supported_phys |= MGMT_PHY_LE_2M_RX;
728 }
729
730 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
731 supported_phys |= MGMT_PHY_LE_CODED_TX;
732 supported_phys |= MGMT_PHY_LE_CODED_RX;
733 }
734 }
735
736 return supported_phys;
737 }
738
get_selected_phys(struct hci_dev * hdev)739 static u32 get_selected_phys(struct hci_dev *hdev)
740 {
741 u32 selected_phys = 0;
742
743 if (lmp_bredr_capable(hdev)) {
744 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
745
746 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
747 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
748
749 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
750 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
751
752 if (lmp_edr_2m_capable(hdev)) {
753 if (!(hdev->pkt_type & HCI_2DH1))
754 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
755
756 if (lmp_edr_3slot_capable(hdev) &&
757 !(hdev->pkt_type & HCI_2DH3))
758 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
759
760 if (lmp_edr_5slot_capable(hdev) &&
761 !(hdev->pkt_type & HCI_2DH5))
762 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
763
764 if (lmp_edr_3m_capable(hdev)) {
765 if (!(hdev->pkt_type & HCI_3DH1))
766 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
767
768 if (lmp_edr_3slot_capable(hdev) &&
769 !(hdev->pkt_type & HCI_3DH3))
770 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
771
772 if (lmp_edr_5slot_capable(hdev) &&
773 !(hdev->pkt_type & HCI_3DH5))
774 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
775 }
776 }
777 }
778
779 if (lmp_le_capable(hdev)) {
780 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
781 selected_phys |= MGMT_PHY_LE_1M_TX;
782
783 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
784 selected_phys |= MGMT_PHY_LE_1M_RX;
785
786 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
787 selected_phys |= MGMT_PHY_LE_2M_TX;
788
789 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
790 selected_phys |= MGMT_PHY_LE_2M_RX;
791
792 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
793 selected_phys |= MGMT_PHY_LE_CODED_TX;
794
795 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
796 selected_phys |= MGMT_PHY_LE_CODED_RX;
797 }
798
799 return selected_phys;
800 }
801
get_configurable_phys(struct hci_dev * hdev)802 static u32 get_configurable_phys(struct hci_dev *hdev)
803 {
804 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
805 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
806 }
807
get_supported_settings(struct hci_dev * hdev)808 static u32 get_supported_settings(struct hci_dev *hdev)
809 {
810 u32 settings = 0;
811
812 settings |= MGMT_SETTING_POWERED;
813 settings |= MGMT_SETTING_BONDABLE;
814 settings |= MGMT_SETTING_DEBUG_KEYS;
815 settings |= MGMT_SETTING_CONNECTABLE;
816 settings |= MGMT_SETTING_DISCOVERABLE;
817
818 if (lmp_bredr_capable(hdev)) {
819 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
820 settings |= MGMT_SETTING_FAST_CONNECTABLE;
821 settings |= MGMT_SETTING_BREDR;
822 settings |= MGMT_SETTING_LINK_SECURITY;
823
824 if (lmp_ssp_capable(hdev)) {
825 settings |= MGMT_SETTING_SSP;
826 }
827
828 if (lmp_sc_capable(hdev))
829 settings |= MGMT_SETTING_SECURE_CONN;
830
831 if (hci_test_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED))
832 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
833 }
834
835 if (lmp_le_capable(hdev)) {
836 settings |= MGMT_SETTING_LE;
837 settings |= MGMT_SETTING_SECURE_CONN;
838 settings |= MGMT_SETTING_PRIVACY;
839 settings |= MGMT_SETTING_STATIC_ADDRESS;
840 settings |= MGMT_SETTING_ADVERTISING;
841 }
842
843 if (hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG) || hdev->set_bdaddr)
844 settings |= MGMT_SETTING_CONFIGURATION;
845
846 if (cis_central_capable(hdev))
847 settings |= MGMT_SETTING_CIS_CENTRAL;
848
849 if (cis_peripheral_capable(hdev))
850 settings |= MGMT_SETTING_CIS_PERIPHERAL;
851
852 if (ll_privacy_capable(hdev))
853 settings |= MGMT_SETTING_LL_PRIVACY;
854
855 if (past_sender_capable(hdev))
856 settings |= MGMT_SETTING_PAST_SENDER;
857
858 if (past_receiver_capable(hdev))
859 settings |= MGMT_SETTING_PAST_RECEIVER;
860
861 settings |= MGMT_SETTING_PHY_CONFIGURATION;
862
863 return settings;
864 }
865
get_current_settings(struct hci_dev * hdev)866 static u32 get_current_settings(struct hci_dev *hdev)
867 {
868 u32 settings = 0;
869
870 if (hdev_is_powered(hdev))
871 settings |= MGMT_SETTING_POWERED;
872
873 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
874 settings |= MGMT_SETTING_CONNECTABLE;
875
876 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
877 settings |= MGMT_SETTING_FAST_CONNECTABLE;
878
879 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
880 settings |= MGMT_SETTING_DISCOVERABLE;
881
882 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
883 settings |= MGMT_SETTING_BONDABLE;
884
885 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
886 settings |= MGMT_SETTING_BREDR;
887
888 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
889 settings |= MGMT_SETTING_LE;
890
891 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
892 settings |= MGMT_SETTING_LINK_SECURITY;
893
894 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
895 settings |= MGMT_SETTING_SSP;
896
897 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
898 settings |= MGMT_SETTING_ADVERTISING;
899
900 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
901 settings |= MGMT_SETTING_SECURE_CONN;
902
903 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
904 settings |= MGMT_SETTING_DEBUG_KEYS;
905
906 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
907 settings |= MGMT_SETTING_PRIVACY;
908
909 /* The current setting for static address has two purposes. The
910 * first is to indicate if the static address will be used and
911 * the second is to indicate if it is actually set.
912 *
913 * This means if the static address is not configured, this flag
914 * will never be set. If the address is configured, then if the
915 * address is actually used decides if the flag is set or not.
916 *
917 * For single mode LE only controllers and dual-mode controllers
918 * with BR/EDR disabled, the existence of the static address will
919 * be evaluated.
920 */
921 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
922 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
923 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
924 if (bacmp(&hdev->static_addr, BDADDR_ANY))
925 settings |= MGMT_SETTING_STATIC_ADDRESS;
926 }
927
928 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
929 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
930
931 if (cis_central_enabled(hdev))
932 settings |= MGMT_SETTING_CIS_CENTRAL;
933
934 if (cis_peripheral_enabled(hdev))
935 settings |= MGMT_SETTING_CIS_PERIPHERAL;
936
937 if (bis_enabled(hdev))
938 settings |= MGMT_SETTING_ISO_BROADCASTER;
939
940 if (sync_recv_enabled(hdev))
941 settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
942
943 if (ll_privacy_enabled(hdev))
944 settings |= MGMT_SETTING_LL_PRIVACY;
945
946 if (past_sender_enabled(hdev))
947 settings |= MGMT_SETTING_PAST_SENDER;
948
949 if (past_receiver_enabled(hdev))
950 settings |= MGMT_SETTING_PAST_RECEIVER;
951
952 return settings;
953 }
954
pending_find(u16 opcode,struct hci_dev * hdev)955 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
956 {
957 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
958 }
959
mgmt_get_adv_discov_flags(struct hci_dev * hdev)960 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
961 {
962 struct mgmt_pending_cmd *cmd;
963
964 /* If there's a pending mgmt command the flags will not yet have
965 * their final values, so check for this first.
966 */
967 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
968 if (cmd) {
969 struct mgmt_mode *cp = cmd->param;
970 if (cp->val == 0x01)
971 return LE_AD_GENERAL;
972 else if (cp->val == 0x02)
973 return LE_AD_LIMITED;
974 } else {
975 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
976 return LE_AD_LIMITED;
977 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
978 return LE_AD_GENERAL;
979 }
980
981 return 0;
982 }
983
mgmt_get_connectable(struct hci_dev * hdev)984 bool mgmt_get_connectable(struct hci_dev *hdev)
985 {
986 struct mgmt_pending_cmd *cmd;
987
988 /* If there's a pending mgmt command the flag will not yet have
989 * it's final value, so check for this first.
990 */
991 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
992 if (cmd) {
993 struct mgmt_mode *cp = cmd->param;
994
995 return cp->val;
996 }
997
998 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
999 }
1000
service_cache_sync(struct hci_dev * hdev,void * data)1001 static int service_cache_sync(struct hci_dev *hdev, void *data)
1002 {
1003 hci_update_eir_sync(hdev);
1004 hci_update_class_sync(hdev);
1005
1006 return 0;
1007 }
1008
service_cache_off(struct work_struct * work)1009 static void service_cache_off(struct work_struct *work)
1010 {
1011 struct hci_dev *hdev = container_of(work, struct hci_dev,
1012 service_cache.work);
1013
1014 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1015 return;
1016
1017 hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1018 }
1019
rpa_expired_sync(struct hci_dev * hdev,void * data)1020 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1021 {
1022 /* The generation of a new RPA and programming it into the
1023 * controller happens in the hci_req_enable_advertising()
1024 * function.
1025 */
1026 if (ext_adv_capable(hdev))
1027 return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1028 else
1029 return hci_enable_advertising_sync(hdev);
1030 }
1031
rpa_expired(struct work_struct * work)1032 static void rpa_expired(struct work_struct *work)
1033 {
1034 struct hci_dev *hdev = container_of(work, struct hci_dev,
1035 rpa_expired.work);
1036
1037 bt_dev_dbg(hdev, "");
1038
1039 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1040
1041 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1042 return;
1043
1044 hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1045 }
1046
1047 static int set_discoverable_sync(struct hci_dev *hdev, void *data);
1048
discov_off(struct work_struct * work)1049 static void discov_off(struct work_struct *work)
1050 {
1051 struct hci_dev *hdev = container_of(work, struct hci_dev,
1052 discov_off.work);
1053
1054 bt_dev_dbg(hdev, "");
1055
1056 hci_dev_lock(hdev);
1057
1058 /* When discoverable timeout triggers, then just make sure
1059 * the limited discoverable flag is cleared. Even in the case
1060 * of a timeout triggered from general discoverable, it is
1061 * safe to unconditionally clear the flag.
1062 */
1063 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1064 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1065 hdev->discov_timeout = 0;
1066
1067 hci_cmd_sync_queue(hdev, set_discoverable_sync, NULL, NULL);
1068
1069 mgmt_new_settings(hdev);
1070
1071 hci_dev_unlock(hdev);
1072 }
1073
1074 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1075
mesh_send_complete(struct hci_dev * hdev,struct mgmt_mesh_tx * mesh_tx,bool silent)1076 static void mesh_send_complete(struct hci_dev *hdev,
1077 struct mgmt_mesh_tx *mesh_tx, bool silent)
1078 {
1079 u8 handle = mesh_tx->handle;
1080
1081 if (!silent)
1082 mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1083 sizeof(handle), NULL);
1084
1085 mgmt_mesh_remove(mesh_tx);
1086 }
1087
mesh_send_done_sync(struct hci_dev * hdev,void * data)1088 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1089 {
1090 struct mgmt_mesh_tx *mesh_tx;
1091
1092 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1093 if (list_empty(&hdev->adv_instances))
1094 hci_disable_advertising_sync(hdev);
1095 mesh_tx = mgmt_mesh_next(hdev, NULL);
1096
1097 if (mesh_tx)
1098 mesh_send_complete(hdev, mesh_tx, false);
1099
1100 return 0;
1101 }
1102
1103 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1104 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
mesh_next(struct hci_dev * hdev,void * data,int err)1105 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1106 {
1107 struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1108
1109 if (!mesh_tx)
1110 return;
1111
1112 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1113 mesh_send_start_complete);
1114
1115 if (err < 0)
1116 mesh_send_complete(hdev, mesh_tx, false);
1117 else
1118 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1119 }
1120
mesh_send_done(struct work_struct * work)1121 static void mesh_send_done(struct work_struct *work)
1122 {
1123 struct hci_dev *hdev = container_of(work, struct hci_dev,
1124 mesh_send_done.work);
1125
1126 if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1127 return;
1128
1129 hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1130 }
1131
mgmt_init_hdev(struct sock * sk,struct hci_dev * hdev)1132 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1133 {
1134 if (hci_dev_test_flag(hdev, HCI_MGMT))
1135 return;
1136
1137 BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1138
1139 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1140 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1141 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1142 INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1143
1144 /* Non-mgmt controlled devices get this bit set
1145 * implicitly so that pairing works for them, however
1146 * for mgmt we require user-space to explicitly enable
1147 * it
1148 */
1149 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1150
1151 hci_dev_set_flag(hdev, HCI_MGMT);
1152 }
1153
read_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1154 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1155 void *data, u16 data_len)
1156 {
1157 struct mgmt_rp_read_info rp;
1158
1159 bt_dev_dbg(hdev, "sock %p", sk);
1160
1161 hci_dev_lock(hdev);
1162
1163 memset(&rp, 0, sizeof(rp));
1164
1165 bacpy(&rp.bdaddr, &hdev->bdaddr);
1166
1167 rp.version = hdev->hci_ver;
1168 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1169
1170 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1171 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1172
1173 memcpy(rp.dev_class, hdev->dev_class, 3);
1174
1175 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1176 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1177
1178 hci_dev_unlock(hdev);
1179
1180 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1181 sizeof(rp));
1182 }
1183
append_eir_data_to_buf(struct hci_dev * hdev,u8 * eir)1184 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1185 {
1186 u16 eir_len = 0;
1187 size_t name_len;
1188
1189 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1190 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1191 hdev->dev_class, 3);
1192
1193 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1194 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1195 hdev->appearance);
1196
1197 name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1198 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1199 hdev->dev_name, name_len);
1200
1201 name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1202 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1203 hdev->short_name, name_len);
1204
1205 return eir_len;
1206 }
1207
read_ext_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1208 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1209 void *data, u16 data_len)
1210 {
1211 char buf[512];
1212 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1213 u16 eir_len;
1214
1215 bt_dev_dbg(hdev, "sock %p", sk);
1216
1217 memset(&buf, 0, sizeof(buf));
1218
1219 hci_dev_lock(hdev);
1220
1221 bacpy(&rp->bdaddr, &hdev->bdaddr);
1222
1223 rp->version = hdev->hci_ver;
1224 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1225
1226 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1227 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1228
1229
1230 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1231 rp->eir_len = cpu_to_le16(eir_len);
1232
1233 hci_dev_unlock(hdev);
1234
1235 /* If this command is called at least once, then the events
1236 * for class of device and local name changes are disabled
1237 * and only the new extended controller information event
1238 * is used.
1239 */
1240 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1241 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1242 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1243
1244 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1245 sizeof(*rp) + eir_len);
1246 }
1247
ext_info_changed(struct hci_dev * hdev,struct sock * skip)1248 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1249 {
1250 char buf[512];
1251 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1252 u16 eir_len;
1253
1254 memset(buf, 0, sizeof(buf));
1255
1256 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1257 ev->eir_len = cpu_to_le16(eir_len);
1258
1259 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1260 sizeof(*ev) + eir_len,
1261 HCI_MGMT_EXT_INFO_EVENTS, skip);
1262 }
1263
send_settings_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)1264 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1265 {
1266 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1267
1268 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1269 sizeof(settings));
1270 }
1271
mgmt_advertising_added(struct sock * sk,struct hci_dev * hdev,u8 instance)1272 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1273 {
1274 struct mgmt_ev_advertising_added ev;
1275
1276 ev.instance = instance;
1277
1278 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1279 }
1280
mgmt_advertising_removed(struct sock * sk,struct hci_dev * hdev,u8 instance)1281 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1282 u8 instance)
1283 {
1284 struct mgmt_ev_advertising_removed ev;
1285
1286 ev.instance = instance;
1287
1288 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1289 }
1290
cancel_adv_timeout(struct hci_dev * hdev)1291 static void cancel_adv_timeout(struct hci_dev *hdev)
1292 {
1293 if (hdev->adv_instance_timeout) {
1294 hdev->adv_instance_timeout = 0;
1295 cancel_delayed_work(&hdev->adv_instance_expire);
1296 }
1297 }
1298
1299 /* This function requires the caller holds hdev->lock */
restart_le_actions(struct hci_dev * hdev)1300 static void restart_le_actions(struct hci_dev *hdev)
1301 {
1302 struct hci_conn_params *p;
1303
1304 list_for_each_entry(p, &hdev->le_conn_params, list) {
1305 /* Needed for AUTO_OFF case where might not "really"
1306 * have been powered off.
1307 */
1308 hci_pend_le_list_del_init(p);
1309
1310 switch (p->auto_connect) {
1311 case HCI_AUTO_CONN_DIRECT:
1312 case HCI_AUTO_CONN_ALWAYS:
1313 hci_pend_le_list_add(p, &hdev->pend_le_conns);
1314 break;
1315 case HCI_AUTO_CONN_REPORT:
1316 hci_pend_le_list_add(p, &hdev->pend_le_reports);
1317 break;
1318 default:
1319 break;
1320 }
1321 }
1322 }
1323
new_settings(struct hci_dev * hdev,struct sock * skip)1324 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1325 {
1326 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1327
1328 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1329 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1330 }
1331
mgmt_set_powered_complete(struct hci_dev * hdev,void * data,int err)1332 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1333 {
1334 struct mgmt_pending_cmd *cmd = data;
1335 struct mgmt_mode *cp;
1336
1337 /* Make sure cmd still outstanding. */
1338 if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
1339 return;
1340
1341 cp = cmd->param;
1342
1343 bt_dev_dbg(hdev, "err %d", err);
1344
1345 if (!err) {
1346 if (cp->val) {
1347 hci_dev_lock(hdev);
1348 restart_le_actions(hdev);
1349 hci_update_passive_scan(hdev);
1350 hci_dev_unlock(hdev);
1351 }
1352
1353 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1354
1355 /* Only call new_setting for power on as power off is deferred
1356 * to hdev->power_off work which does call hci_dev_do_close.
1357 */
1358 if (cp->val)
1359 new_settings(hdev, cmd->sk);
1360 } else {
1361 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1362 mgmt_status(err));
1363 }
1364
1365 mgmt_pending_free(cmd);
1366 }
1367
set_powered_sync(struct hci_dev * hdev,void * data)1368 static int set_powered_sync(struct hci_dev *hdev, void *data)
1369 {
1370 struct mgmt_pending_cmd *cmd = data;
1371 struct mgmt_mode cp;
1372
1373 mutex_lock(&hdev->mgmt_pending_lock);
1374
1375 /* Make sure cmd still outstanding. */
1376 if (!__mgmt_pending_listed(hdev, cmd)) {
1377 mutex_unlock(&hdev->mgmt_pending_lock);
1378 return -ECANCELED;
1379 }
1380
1381 memcpy(&cp, cmd->param, sizeof(cp));
1382
1383 mutex_unlock(&hdev->mgmt_pending_lock);
1384
1385 BT_DBG("%s", hdev->name);
1386
1387 return hci_set_powered_sync(hdev, cp.val);
1388 }
1389
set_powered(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1390 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1391 u16 len)
1392 {
1393 struct mgmt_mode *cp = data;
1394 struct mgmt_pending_cmd *cmd;
1395 int err;
1396
1397 bt_dev_dbg(hdev, "sock %p", sk);
1398
1399 if (cp->val != 0x00 && cp->val != 0x01)
1400 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1401 MGMT_STATUS_INVALID_PARAMS);
1402
1403 hci_dev_lock(hdev);
1404
1405 if (!cp->val) {
1406 if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN)) {
1407 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1408 MGMT_STATUS_BUSY);
1409 goto failed;
1410 }
1411 }
1412
1413 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1414 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1415 MGMT_STATUS_BUSY);
1416 goto failed;
1417 }
1418
1419 if (!!cp->val == hdev_is_powered(hdev)) {
1420 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1421 goto failed;
1422 }
1423
1424 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1425 if (!cmd) {
1426 err = -ENOMEM;
1427 goto failed;
1428 }
1429
1430 /* Cancel potentially blocking sync operation before power off */
1431 if (cp->val == 0x00) {
1432 hci_cmd_sync_cancel_sync(hdev, -EHOSTDOWN);
1433 err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1434 mgmt_set_powered_complete);
1435 } else {
1436 /* Use hci_cmd_sync_submit since hdev might not be running */
1437 err = hci_cmd_sync_submit(hdev, set_powered_sync, cmd,
1438 mgmt_set_powered_complete);
1439 }
1440
1441 if (err < 0)
1442 mgmt_pending_remove(cmd);
1443
1444 failed:
1445 hci_dev_unlock(hdev);
1446 return err;
1447 }
1448
mgmt_new_settings(struct hci_dev * hdev)1449 int mgmt_new_settings(struct hci_dev *hdev)
1450 {
1451 return new_settings(hdev, NULL);
1452 }
1453
1454 struct cmd_lookup {
1455 struct sock *sk;
1456 struct hci_dev *hdev;
1457 u8 mgmt_status;
1458 };
1459
settings_rsp(struct mgmt_pending_cmd * cmd,void * data)1460 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1461 {
1462 struct cmd_lookup *match = data;
1463
1464 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1465
1466 if (match->sk == NULL) {
1467 match->sk = cmd->sk;
1468 sock_hold(match->sk);
1469 }
1470 }
1471
cmd_status_rsp(struct mgmt_pending_cmd * cmd,void * data)1472 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1473 {
1474 u8 *status = data;
1475
1476 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, *status);
1477 }
1478
cmd_complete_rsp(struct mgmt_pending_cmd * cmd,void * data)1479 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1480 {
1481 struct cmd_lookup *match = data;
1482
1483 /* dequeue cmd_sync entries using cmd as data as that is about to be
1484 * removed/freed.
1485 */
1486 hci_cmd_sync_dequeue(match->hdev, NULL, cmd, NULL);
1487
1488 if (cmd->cmd_complete) {
1489 cmd->cmd_complete(cmd, match->mgmt_status);
1490 return;
1491 }
1492
1493 cmd_status_rsp(cmd, data);
1494 }
1495
generic_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1496 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1497 {
1498 return mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, status,
1499 cmd->param, cmd->param_len);
1500 }
1501
addr_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1502 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1503 {
1504 return mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, status,
1505 cmd->param, sizeof(struct mgmt_addr_info));
1506 }
1507
mgmt_bredr_support(struct hci_dev * hdev)1508 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1509 {
1510 if (!lmp_bredr_capable(hdev))
1511 return MGMT_STATUS_NOT_SUPPORTED;
1512 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1513 return MGMT_STATUS_REJECTED;
1514 else
1515 return MGMT_STATUS_SUCCESS;
1516 }
1517
mgmt_le_support(struct hci_dev * hdev)1518 static u8 mgmt_le_support(struct hci_dev *hdev)
1519 {
1520 if (!lmp_le_capable(hdev))
1521 return MGMT_STATUS_NOT_SUPPORTED;
1522 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1523 return MGMT_STATUS_REJECTED;
1524 else
1525 return MGMT_STATUS_SUCCESS;
1526 }
1527
mgmt_set_discoverable_complete(struct hci_dev * hdev,void * data,int err)1528 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1529 int err)
1530 {
1531 struct mgmt_pending_cmd *cmd = data;
1532
1533 bt_dev_dbg(hdev, "err %d", err);
1534
1535 /* Make sure cmd still outstanding. */
1536 if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
1537 return;
1538
1539 hci_dev_lock(hdev);
1540
1541 if (err) {
1542 u8 mgmt_err = mgmt_status(err);
1543 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
1544 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1545 goto done;
1546 }
1547
1548 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1549 hdev->discov_timeout > 0) {
1550 int to = secs_to_jiffies(hdev->discov_timeout);
1551 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1552 }
1553
1554 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1555 new_settings(hdev, cmd->sk);
1556
1557 done:
1558 mgmt_pending_free(cmd);
1559 hci_dev_unlock(hdev);
1560 }
1561
set_discoverable_sync(struct hci_dev * hdev,void * data)1562 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1563 {
1564 if (!mgmt_pending_listed(hdev, data))
1565 return -ECANCELED;
1566
1567 BT_DBG("%s", hdev->name);
1568
1569 return hci_update_discoverable_sync(hdev);
1570 }
1571
set_discoverable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1572 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1573 u16 len)
1574 {
1575 struct mgmt_cp_set_discoverable *cp = data;
1576 struct mgmt_pending_cmd *cmd;
1577 u16 timeout;
1578 int err;
1579
1580 bt_dev_dbg(hdev, "sock %p", sk);
1581
1582 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1583 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1584 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1585 MGMT_STATUS_REJECTED);
1586
1587 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1588 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1589 MGMT_STATUS_INVALID_PARAMS);
1590
1591 timeout = __le16_to_cpu(cp->timeout);
1592
1593 /* Disabling discoverable requires that no timeout is set,
1594 * and enabling limited discoverable requires a timeout.
1595 */
1596 if ((cp->val == 0x00 && timeout > 0) ||
1597 (cp->val == 0x02 && timeout == 0))
1598 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1599 MGMT_STATUS_INVALID_PARAMS);
1600
1601 hci_dev_lock(hdev);
1602
1603 if (!hdev_is_powered(hdev) && timeout > 0) {
1604 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1605 MGMT_STATUS_NOT_POWERED);
1606 goto failed;
1607 }
1608
1609 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1610 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1611 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1612 MGMT_STATUS_BUSY);
1613 goto failed;
1614 }
1615
1616 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1617 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1618 MGMT_STATUS_REJECTED);
1619 goto failed;
1620 }
1621
1622 if (hdev->advertising_paused) {
1623 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1624 MGMT_STATUS_BUSY);
1625 goto failed;
1626 }
1627
1628 if (!hdev_is_powered(hdev)) {
1629 bool changed = false;
1630
1631 /* Setting limited discoverable when powered off is
1632 * not a valid operation since it requires a timeout
1633 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1634 */
1635 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1636 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1637 changed = true;
1638 }
1639
1640 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1641 if (err < 0)
1642 goto failed;
1643
1644 if (changed)
1645 err = new_settings(hdev, sk);
1646
1647 goto failed;
1648 }
1649
1650 /* If the current mode is the same, then just update the timeout
1651 * value with the new value. And if only the timeout gets updated,
1652 * then no need for any HCI transactions.
1653 */
1654 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1655 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1656 HCI_LIMITED_DISCOVERABLE)) {
1657 cancel_delayed_work(&hdev->discov_off);
1658 hdev->discov_timeout = timeout;
1659
1660 if (cp->val && hdev->discov_timeout > 0) {
1661 int to = secs_to_jiffies(hdev->discov_timeout);
1662 queue_delayed_work(hdev->req_workqueue,
1663 &hdev->discov_off, to);
1664 }
1665
1666 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1667 goto failed;
1668 }
1669
1670 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1671 if (!cmd) {
1672 err = -ENOMEM;
1673 goto failed;
1674 }
1675
1676 /* Cancel any potential discoverable timeout that might be
1677 * still active and store new timeout value. The arming of
1678 * the timeout happens in the complete handler.
1679 */
1680 cancel_delayed_work(&hdev->discov_off);
1681 hdev->discov_timeout = timeout;
1682
1683 if (cp->val)
1684 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1685 else
1686 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1687
1688 /* Limited discoverable mode */
1689 if (cp->val == 0x02)
1690 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1691 else
1692 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1693
1694 err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1695 mgmt_set_discoverable_complete);
1696
1697 if (err < 0)
1698 mgmt_pending_remove(cmd);
1699
1700 failed:
1701 hci_dev_unlock(hdev);
1702 return err;
1703 }
1704
mgmt_set_connectable_complete(struct hci_dev * hdev,void * data,int err)1705 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1706 int err)
1707 {
1708 struct mgmt_pending_cmd *cmd = data;
1709
1710 bt_dev_dbg(hdev, "err %d", err);
1711
1712 /* Make sure cmd still outstanding. */
1713 if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
1714 return;
1715
1716 hci_dev_lock(hdev);
1717
1718 if (err) {
1719 u8 mgmt_err = mgmt_status(err);
1720 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
1721 goto done;
1722 }
1723
1724 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1725 new_settings(hdev, cmd->sk);
1726
1727 done:
1728 mgmt_pending_free(cmd);
1729
1730 hci_dev_unlock(hdev);
1731 }
1732
set_connectable_update_settings(struct hci_dev * hdev,struct sock * sk,u8 val)1733 static int set_connectable_update_settings(struct hci_dev *hdev,
1734 struct sock *sk, u8 val)
1735 {
1736 bool changed = false;
1737 int err;
1738
1739 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1740 changed = true;
1741
1742 if (val) {
1743 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1744 } else {
1745 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1746 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1747 }
1748
1749 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1750 if (err < 0)
1751 return err;
1752
1753 if (changed) {
1754 hci_update_scan(hdev);
1755 hci_update_passive_scan(hdev);
1756 return new_settings(hdev, sk);
1757 }
1758
1759 return 0;
1760 }
1761
set_connectable_sync(struct hci_dev * hdev,void * data)1762 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1763 {
1764 if (!mgmt_pending_listed(hdev, data))
1765 return -ECANCELED;
1766
1767 BT_DBG("%s", hdev->name);
1768
1769 return hci_update_connectable_sync(hdev);
1770 }
1771
set_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1772 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1773 u16 len)
1774 {
1775 struct mgmt_mode *cp = data;
1776 struct mgmt_pending_cmd *cmd;
1777 int err;
1778
1779 bt_dev_dbg(hdev, "sock %p", sk);
1780
1781 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1782 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1783 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1784 MGMT_STATUS_REJECTED);
1785
1786 if (cp->val != 0x00 && cp->val != 0x01)
1787 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1788 MGMT_STATUS_INVALID_PARAMS);
1789
1790 hci_dev_lock(hdev);
1791
1792 if (!hdev_is_powered(hdev)) {
1793 err = set_connectable_update_settings(hdev, sk, cp->val);
1794 goto failed;
1795 }
1796
1797 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1798 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1799 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1800 MGMT_STATUS_BUSY);
1801 goto failed;
1802 }
1803
1804 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1805 if (!cmd) {
1806 err = -ENOMEM;
1807 goto failed;
1808 }
1809
1810 if (cp->val) {
1811 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1812 } else {
1813 if (hdev->discov_timeout > 0)
1814 cancel_delayed_work(&hdev->discov_off);
1815
1816 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1817 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1818 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1819 }
1820
1821 err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1822 mgmt_set_connectable_complete);
1823
1824 if (err < 0)
1825 mgmt_pending_remove(cmd);
1826
1827 failed:
1828 hci_dev_unlock(hdev);
1829 return err;
1830 }
1831
set_bondable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1832 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1833 u16 len)
1834 {
1835 struct mgmt_mode *cp = data;
1836 bool changed;
1837 int err;
1838
1839 bt_dev_dbg(hdev, "sock %p", sk);
1840
1841 if (cp->val != 0x00 && cp->val != 0x01)
1842 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1843 MGMT_STATUS_INVALID_PARAMS);
1844
1845 hci_dev_lock(hdev);
1846
1847 if (cp->val)
1848 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1849 else
1850 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1851
1852 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1853 if (err < 0)
1854 goto unlock;
1855
1856 if (changed) {
1857 /* In limited privacy mode the change of bondable mode
1858 * may affect the local advertising address.
1859 */
1860 hci_update_discoverable(hdev);
1861
1862 err = new_settings(hdev, sk);
1863 }
1864
1865 unlock:
1866 hci_dev_unlock(hdev);
1867 return err;
1868 }
1869
set_link_security(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1870 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1871 u16 len)
1872 {
1873 struct mgmt_mode *cp = data;
1874 struct mgmt_pending_cmd *cmd;
1875 u8 val, status;
1876 int err;
1877
1878 bt_dev_dbg(hdev, "sock %p", sk);
1879
1880 status = mgmt_bredr_support(hdev);
1881 if (status)
1882 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1883 status);
1884
1885 if (cp->val != 0x00 && cp->val != 0x01)
1886 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1887 MGMT_STATUS_INVALID_PARAMS);
1888
1889 hci_dev_lock(hdev);
1890
1891 if (!hdev_is_powered(hdev)) {
1892 bool changed = false;
1893
1894 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1895 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1896 changed = true;
1897 }
1898
1899 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1900 if (err < 0)
1901 goto failed;
1902
1903 if (changed)
1904 err = new_settings(hdev, sk);
1905
1906 goto failed;
1907 }
1908
1909 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1910 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1911 MGMT_STATUS_BUSY);
1912 goto failed;
1913 }
1914
1915 val = !!cp->val;
1916
1917 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1918 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1919 goto failed;
1920 }
1921
1922 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1923 if (!cmd) {
1924 err = -ENOMEM;
1925 goto failed;
1926 }
1927
1928 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1929 if (err < 0) {
1930 mgmt_pending_remove(cmd);
1931 goto failed;
1932 }
1933
1934 failed:
1935 hci_dev_unlock(hdev);
1936 return err;
1937 }
1938
set_ssp_complete(struct hci_dev * hdev,void * data,int err)1939 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1940 {
1941 struct cmd_lookup match = { NULL, hdev };
1942 struct mgmt_pending_cmd *cmd = data;
1943 struct mgmt_mode *cp;
1944 u8 enable;
1945 bool changed;
1946
1947 /* Make sure cmd still outstanding. */
1948 if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
1949 return;
1950
1951 cp = cmd->param;
1952 enable = cp->val;
1953
1954 if (err) {
1955 u8 mgmt_err = mgmt_status(err);
1956
1957 if (enable && hci_dev_test_and_clear_flag(hdev,
1958 HCI_SSP_ENABLED)) {
1959 new_settings(hdev, NULL);
1960 }
1961
1962 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
1963 return;
1964 }
1965
1966 if (enable) {
1967 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1968 } else {
1969 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1970 }
1971
1972 settings_rsp(cmd, &match);
1973
1974 if (changed)
1975 new_settings(hdev, match.sk);
1976
1977 if (match.sk)
1978 sock_put(match.sk);
1979
1980 hci_update_eir_sync(hdev);
1981 }
1982
set_ssp_sync(struct hci_dev * hdev,void * data)1983 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1984 {
1985 struct mgmt_pending_cmd *cmd = data;
1986 struct mgmt_mode cp;
1987 bool changed = false;
1988 int err;
1989
1990 mutex_lock(&hdev->mgmt_pending_lock);
1991
1992 if (!__mgmt_pending_listed(hdev, cmd)) {
1993 mutex_unlock(&hdev->mgmt_pending_lock);
1994 return -ECANCELED;
1995 }
1996
1997 memcpy(&cp, cmd->param, sizeof(cp));
1998
1999 mutex_unlock(&hdev->mgmt_pending_lock);
2000
2001 if (cp.val)
2002 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
2003
2004 err = hci_write_ssp_mode_sync(hdev, cp.val);
2005
2006 if (!err && changed)
2007 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
2008
2009 return err;
2010 }
2011
set_ssp(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2012 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2013 {
2014 struct mgmt_mode *cp = data;
2015 struct mgmt_pending_cmd *cmd;
2016 u8 status;
2017 int err;
2018
2019 bt_dev_dbg(hdev, "sock %p", sk);
2020
2021 status = mgmt_bredr_support(hdev);
2022 if (status)
2023 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2024
2025 if (!lmp_ssp_capable(hdev))
2026 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2027 MGMT_STATUS_NOT_SUPPORTED);
2028
2029 if (cp->val != 0x00 && cp->val != 0x01)
2030 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2031 MGMT_STATUS_INVALID_PARAMS);
2032
2033 hci_dev_lock(hdev);
2034
2035 if (!hdev_is_powered(hdev)) {
2036 bool changed;
2037
2038 if (cp->val) {
2039 changed = !hci_dev_test_and_set_flag(hdev,
2040 HCI_SSP_ENABLED);
2041 } else {
2042 changed = hci_dev_test_and_clear_flag(hdev,
2043 HCI_SSP_ENABLED);
2044 }
2045
2046 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2047 if (err < 0)
2048 goto failed;
2049
2050 if (changed)
2051 err = new_settings(hdev, sk);
2052
2053 goto failed;
2054 }
2055
2056 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2057 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2058 MGMT_STATUS_BUSY);
2059 goto failed;
2060 }
2061
2062 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2063 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2064 goto failed;
2065 }
2066
2067 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2068 if (!cmd)
2069 err = -ENOMEM;
2070 else
2071 err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2072 set_ssp_complete);
2073
2074 if (err < 0) {
2075 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2076 MGMT_STATUS_FAILED);
2077
2078 if (cmd)
2079 mgmt_pending_remove(cmd);
2080 }
2081
2082 failed:
2083 hci_dev_unlock(hdev);
2084 return err;
2085 }
2086
set_hs(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2087 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2088 {
2089 bt_dev_dbg(hdev, "sock %p", sk);
2090
2091 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2092 MGMT_STATUS_NOT_SUPPORTED);
2093 }
2094
set_le_complete(struct hci_dev * hdev,void * data,int err)2095 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2096 {
2097 struct mgmt_pending_cmd *cmd = data;
2098 struct cmd_lookup match = { NULL, hdev };
2099 u8 status = mgmt_status(err);
2100
2101 bt_dev_dbg(hdev, "err %d", err);
2102
2103 if (err == -ECANCELED || !mgmt_pending_valid(hdev, data))
2104 return;
2105
2106 if (status) {
2107 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, status);
2108 goto done;
2109 }
2110
2111 settings_rsp(cmd, &match);
2112
2113 new_settings(hdev, match.sk);
2114
2115 if (match.sk)
2116 sock_put(match.sk);
2117
2118 done:
2119 mgmt_pending_free(cmd);
2120 }
2121
set_le_sync(struct hci_dev * hdev,void * data)2122 static int set_le_sync(struct hci_dev *hdev, void *data)
2123 {
2124 struct mgmt_pending_cmd *cmd = data;
2125 struct mgmt_mode cp;
2126 u8 val;
2127 int err;
2128
2129 mutex_lock(&hdev->mgmt_pending_lock);
2130
2131 if (!__mgmt_pending_listed(hdev, cmd)) {
2132 mutex_unlock(&hdev->mgmt_pending_lock);
2133 return -ECANCELED;
2134 }
2135
2136 memcpy(&cp, cmd->param, sizeof(cp));
2137 val = !!cp.val;
2138
2139 mutex_unlock(&hdev->mgmt_pending_lock);
2140
2141 if (!val) {
2142 hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2143
2144 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2145 hci_disable_advertising_sync(hdev);
2146
2147 if (ext_adv_capable(hdev))
2148 hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2149 } else {
2150 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2151 }
2152
2153 err = hci_write_le_host_supported_sync(hdev, val, 0);
2154
2155 /* Make sure the controller has a good default for
2156 * advertising data. Restrict the update to when LE
2157 * has actually been enabled. During power on, the
2158 * update in powered_update_hci will take care of it.
2159 */
2160 if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2161 if (ext_adv_capable(hdev)) {
2162 int status;
2163
2164 status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2165 if (!status)
2166 hci_update_scan_rsp_data_sync(hdev, 0x00);
2167 } else {
2168 hci_update_adv_data_sync(hdev, 0x00);
2169 hci_update_scan_rsp_data_sync(hdev, 0x00);
2170 }
2171
2172 hci_update_passive_scan(hdev);
2173 }
2174
2175 return err;
2176 }
2177
set_mesh_complete(struct hci_dev * hdev,void * data,int err)2178 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2179 {
2180 struct mgmt_pending_cmd *cmd = data;
2181 u8 status = mgmt_status(err);
2182 struct sock *sk;
2183
2184 if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
2185 return;
2186
2187 sk = cmd->sk;
2188
2189 if (status) {
2190 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2191 status);
2192 mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev, true,
2193 cmd_status_rsp, &status);
2194 goto done;
2195 }
2196
2197 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2198
2199 done:
2200 mgmt_pending_free(cmd);
2201 }
2202
set_mesh_sync(struct hci_dev * hdev,void * data)2203 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2204 {
2205 struct mgmt_pending_cmd *cmd = data;
2206 DEFINE_FLEX(struct mgmt_cp_set_mesh, cp, ad_types, num_ad_types,
2207 sizeof(hdev->mesh_ad_types));
2208 size_t len;
2209
2210 mutex_lock(&hdev->mgmt_pending_lock);
2211
2212 if (!__mgmt_pending_listed(hdev, cmd)) {
2213 mutex_unlock(&hdev->mgmt_pending_lock);
2214 return -ECANCELED;
2215 }
2216
2217 len = cmd->param_len;
2218 memcpy(cp, cmd->param, min(__struct_size(cp), len));
2219
2220 mutex_unlock(&hdev->mgmt_pending_lock);
2221
2222 memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2223
2224 if (cp->enable)
2225 hci_dev_set_flag(hdev, HCI_MESH);
2226 else
2227 hci_dev_clear_flag(hdev, HCI_MESH);
2228
2229 hdev->le_scan_interval = __le16_to_cpu(cp->period);
2230 hdev->le_scan_window = __le16_to_cpu(cp->window);
2231
2232 len -= sizeof(struct mgmt_cp_set_mesh);
2233
2234 /* If filters don't fit, forward all adv pkts */
2235 if (len <= sizeof(hdev->mesh_ad_types))
2236 memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2237
2238 hci_update_passive_scan_sync(hdev);
2239 return 0;
2240 }
2241
set_mesh(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2242 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2243 {
2244 struct mgmt_cp_set_mesh *cp = data;
2245 struct mgmt_pending_cmd *cmd;
2246 __u16 period, window;
2247 int err = 0;
2248
2249 bt_dev_dbg(hdev, "sock %p", sk);
2250
2251 if (!lmp_le_capable(hdev) ||
2252 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2253 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2254 MGMT_STATUS_NOT_SUPPORTED);
2255
2256 if (cp->enable != 0x00 && cp->enable != 0x01)
2257 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2258 MGMT_STATUS_INVALID_PARAMS);
2259
2260 /* Keep allowed ranges in sync with set_scan_params() */
2261 period = __le16_to_cpu(cp->period);
2262
2263 if (period < 0x0004 || period > 0x4000)
2264 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2265 MGMT_STATUS_INVALID_PARAMS);
2266
2267 window = __le16_to_cpu(cp->window);
2268
2269 if (window < 0x0004 || window > 0x4000)
2270 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2271 MGMT_STATUS_INVALID_PARAMS);
2272
2273 if (window > period)
2274 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2275 MGMT_STATUS_INVALID_PARAMS);
2276
2277 hci_dev_lock(hdev);
2278
2279 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2280 if (!cmd)
2281 err = -ENOMEM;
2282 else
2283 err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2284 set_mesh_complete);
2285
2286 if (err < 0) {
2287 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2288 MGMT_STATUS_FAILED);
2289
2290 if (cmd)
2291 mgmt_pending_remove(cmd);
2292 }
2293
2294 hci_dev_unlock(hdev);
2295 return err;
2296 }
2297
mesh_send_start_complete(struct hci_dev * hdev,void * data,int err)2298 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2299 {
2300 struct mgmt_mesh_tx *mesh_tx = data;
2301 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2302 unsigned long mesh_send_interval;
2303 u8 mgmt_err = mgmt_status(err);
2304
2305 /* Report any errors here, but don't report completion */
2306
2307 if (mgmt_err) {
2308 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2309 /* Send Complete Error Code for handle */
2310 mesh_send_complete(hdev, mesh_tx, false);
2311 return;
2312 }
2313
2314 mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2315 queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2316 mesh_send_interval);
2317 }
2318
mesh_send_sync(struct hci_dev * hdev,void * data)2319 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2320 {
2321 struct mgmt_mesh_tx *mesh_tx = data;
2322 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2323 struct adv_info *adv, *next_instance;
2324 u8 instance = hdev->le_num_of_adv_sets + 1;
2325 u16 timeout, duration;
2326 int err = 0;
2327
2328 if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2329 return MGMT_STATUS_BUSY;
2330
2331 timeout = 1000;
2332 duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2333 adv = hci_add_adv_instance(hdev, instance, 0,
2334 send->adv_data_len, send->adv_data,
2335 0, NULL,
2336 timeout, duration,
2337 HCI_ADV_TX_POWER_NO_PREFERENCE,
2338 hdev->le_adv_min_interval,
2339 hdev->le_adv_max_interval,
2340 mesh_tx->handle);
2341
2342 if (!IS_ERR(adv))
2343 mesh_tx->instance = instance;
2344 else
2345 err = PTR_ERR(adv);
2346
2347 if (hdev->cur_adv_instance == instance) {
2348 /* If the currently advertised instance is being changed then
2349 * cancel the current advertising and schedule the next
2350 * instance. If there is only one instance then the overridden
2351 * advertising data will be visible right away.
2352 */
2353 cancel_adv_timeout(hdev);
2354
2355 next_instance = hci_get_next_instance(hdev, instance);
2356 if (next_instance)
2357 instance = next_instance->instance;
2358 else
2359 instance = 0;
2360 } else if (hdev->adv_instance_timeout) {
2361 /* Immediately advertise the new instance if no other, or
2362 * let it go naturally from queue if ADV is already happening
2363 */
2364 instance = 0;
2365 }
2366
2367 if (instance)
2368 return hci_schedule_adv_instance_sync(hdev, instance, true);
2369
2370 return err;
2371 }
2372
send_count(struct mgmt_mesh_tx * mesh_tx,void * data)2373 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2374 {
2375 struct mgmt_rp_mesh_read_features *rp = data;
2376
2377 if (rp->used_handles >= rp->max_handles)
2378 return;
2379
2380 rp->handles[rp->used_handles++] = mesh_tx->handle;
2381 }
2382
mesh_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2383 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2384 void *data, u16 len)
2385 {
2386 struct mgmt_rp_mesh_read_features rp;
2387
2388 if (!lmp_le_capable(hdev) ||
2389 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2390 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2391 MGMT_STATUS_NOT_SUPPORTED);
2392
2393 memset(&rp, 0, sizeof(rp));
2394 rp.index = cpu_to_le16(hdev->id);
2395 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2396 rp.max_handles = MESH_HANDLES_MAX;
2397
2398 hci_dev_lock(hdev);
2399
2400 if (rp.max_handles)
2401 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2402
2403 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2404 rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2405
2406 hci_dev_unlock(hdev);
2407 return 0;
2408 }
2409
send_cancel(struct hci_dev * hdev,void * data)2410 static int send_cancel(struct hci_dev *hdev, void *data)
2411 {
2412 struct mgmt_pending_cmd *cmd = data;
2413 struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2414 struct mgmt_mesh_tx *mesh_tx;
2415
2416 if (!cancel->handle) {
2417 do {
2418 mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2419
2420 if (mesh_tx)
2421 mesh_send_complete(hdev, mesh_tx, false);
2422 } while (mesh_tx);
2423 } else {
2424 mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2425
2426 if (mesh_tx && mesh_tx->sk == cmd->sk)
2427 mesh_send_complete(hdev, mesh_tx, false);
2428 }
2429
2430 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2431 0, NULL, 0);
2432 mgmt_pending_free(cmd);
2433
2434 return 0;
2435 }
2436
mesh_send_cancel(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2437 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2438 void *data, u16 len)
2439 {
2440 struct mgmt_pending_cmd *cmd;
2441 int err;
2442
2443 if (!lmp_le_capable(hdev) ||
2444 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2445 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2446 MGMT_STATUS_NOT_SUPPORTED);
2447
2448 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2449 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2450 MGMT_STATUS_REJECTED);
2451
2452 hci_dev_lock(hdev);
2453 cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2454 if (!cmd)
2455 err = -ENOMEM;
2456 else
2457 err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2458
2459 if (err < 0) {
2460 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2461 MGMT_STATUS_FAILED);
2462
2463 if (cmd)
2464 mgmt_pending_free(cmd);
2465 }
2466
2467 hci_dev_unlock(hdev);
2468 return err;
2469 }
2470
mesh_send(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2471 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2472 {
2473 struct mgmt_mesh_tx *mesh_tx;
2474 struct mgmt_cp_mesh_send *send = data;
2475 struct mgmt_rp_mesh_read_features rp;
2476 bool sending;
2477 int err = 0;
2478
2479 if (!lmp_le_capable(hdev) ||
2480 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2481 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2482 MGMT_STATUS_NOT_SUPPORTED);
2483 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2484 len <= MGMT_MESH_SEND_SIZE ||
2485 len > (MGMT_MESH_SEND_SIZE + 31))
2486 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2487 MGMT_STATUS_REJECTED);
2488
2489 hci_dev_lock(hdev);
2490
2491 memset(&rp, 0, sizeof(rp));
2492 rp.max_handles = MESH_HANDLES_MAX;
2493
2494 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2495
2496 if (rp.max_handles <= rp.used_handles) {
2497 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2498 MGMT_STATUS_BUSY);
2499 goto done;
2500 }
2501
2502 sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2503 mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2504
2505 if (!mesh_tx)
2506 err = -ENOMEM;
2507 else if (!sending)
2508 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2509 mesh_send_start_complete);
2510
2511 if (err < 0) {
2512 bt_dev_err(hdev, "Send Mesh Failed %d", err);
2513 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2514 MGMT_STATUS_FAILED);
2515
2516 if (mesh_tx) {
2517 if (sending)
2518 mgmt_mesh_remove(mesh_tx);
2519 }
2520 } else {
2521 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2522
2523 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2524 &mesh_tx->handle, 1);
2525 }
2526
2527 done:
2528 hci_dev_unlock(hdev);
2529 return err;
2530 }
2531
set_le(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2532 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2533 {
2534 struct mgmt_mode *cp = data;
2535 struct mgmt_pending_cmd *cmd;
2536 int err;
2537 u8 val, enabled;
2538
2539 bt_dev_dbg(hdev, "sock %p", sk);
2540
2541 if (!lmp_le_capable(hdev))
2542 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2543 MGMT_STATUS_NOT_SUPPORTED);
2544
2545 if (cp->val != 0x00 && cp->val != 0x01)
2546 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2547 MGMT_STATUS_INVALID_PARAMS);
2548
2549 /* Bluetooth single mode LE only controllers or dual-mode
2550 * controllers configured as LE only devices, do not allow
2551 * switching LE off. These have either LE enabled explicitly
2552 * or BR/EDR has been previously switched off.
2553 *
2554 * When trying to enable an already enabled LE, then gracefully
2555 * send a positive response. Trying to disable it however will
2556 * result into rejection.
2557 */
2558 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2559 if (cp->val == 0x01)
2560 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2561
2562 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2563 MGMT_STATUS_REJECTED);
2564 }
2565
2566 hci_dev_lock(hdev);
2567
2568 val = !!cp->val;
2569 enabled = lmp_host_le_capable(hdev);
2570
2571 if (!hdev_is_powered(hdev) || val == enabled) {
2572 bool changed = false;
2573
2574 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2575 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2576 changed = true;
2577 }
2578
2579 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2580 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2581 changed = true;
2582 }
2583
2584 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2585 if (err < 0)
2586 goto unlock;
2587
2588 if (changed)
2589 err = new_settings(hdev, sk);
2590
2591 goto unlock;
2592 }
2593
2594 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2595 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2596 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2597 MGMT_STATUS_BUSY);
2598 goto unlock;
2599 }
2600
2601 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2602 if (!cmd)
2603 err = -ENOMEM;
2604 else
2605 err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2606 set_le_complete);
2607
2608 if (err < 0) {
2609 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2610 MGMT_STATUS_FAILED);
2611
2612 if (cmd)
2613 mgmt_pending_remove(cmd);
2614 }
2615
2616 unlock:
2617 hci_dev_unlock(hdev);
2618 return err;
2619 }
2620
send_hci_cmd_sync(struct hci_dev * hdev,void * data)2621 static int send_hci_cmd_sync(struct hci_dev *hdev, void *data)
2622 {
2623 struct mgmt_pending_cmd *cmd = data;
2624 struct mgmt_cp_hci_cmd_sync *cp = cmd->param;
2625 struct sk_buff *skb;
2626
2627 skb = __hci_cmd_sync_ev(hdev, le16_to_cpu(cp->opcode),
2628 le16_to_cpu(cp->params_len), cp->params,
2629 cp->event, cp->timeout ?
2630 secs_to_jiffies(cp->timeout) :
2631 HCI_CMD_TIMEOUT);
2632 if (IS_ERR(skb)) {
2633 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
2634 mgmt_status(PTR_ERR(skb)));
2635 goto done;
2636 }
2637
2638 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_HCI_CMD_SYNC, 0,
2639 skb->data, skb->len);
2640
2641 kfree_skb(skb);
2642
2643 done:
2644 mgmt_pending_free(cmd);
2645
2646 return 0;
2647 }
2648
mgmt_hci_cmd_sync(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2649 static int mgmt_hci_cmd_sync(struct sock *sk, struct hci_dev *hdev,
2650 void *data, u16 len)
2651 {
2652 struct mgmt_cp_hci_cmd_sync *cp = data;
2653 struct mgmt_pending_cmd *cmd;
2654 int err;
2655
2656 if (len != (offsetof(struct mgmt_cp_hci_cmd_sync, params) +
2657 le16_to_cpu(cp->params_len)))
2658 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
2659 MGMT_STATUS_INVALID_PARAMS);
2660
2661 hci_dev_lock(hdev);
2662 cmd = mgmt_pending_new(sk, MGMT_OP_HCI_CMD_SYNC, hdev, data, len);
2663 if (!cmd)
2664 err = -ENOMEM;
2665 else
2666 err = hci_cmd_sync_queue(hdev, send_hci_cmd_sync, cmd, NULL);
2667
2668 if (err < 0) {
2669 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
2670 MGMT_STATUS_FAILED);
2671
2672 if (cmd)
2673 mgmt_pending_free(cmd);
2674 }
2675
2676 hci_dev_unlock(hdev);
2677 return err;
2678 }
2679
2680 /* This is a helper function to test for pending mgmt commands that can
2681 * cause CoD or EIR HCI commands. We can only allow one such pending
2682 * mgmt command at a time since otherwise we cannot easily track what
2683 * the current values are, will be, and based on that calculate if a new
2684 * HCI command needs to be sent and if yes with what value.
2685 */
pending_eir_or_class(struct hci_dev * hdev)2686 static bool pending_eir_or_class(struct hci_dev *hdev)
2687 {
2688 struct mgmt_pending_cmd *cmd;
2689
2690 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2691 switch (cmd->opcode) {
2692 case MGMT_OP_ADD_UUID:
2693 case MGMT_OP_REMOVE_UUID:
2694 case MGMT_OP_SET_DEV_CLASS:
2695 case MGMT_OP_SET_POWERED:
2696 return true;
2697 }
2698 }
2699
2700 return false;
2701 }
2702
2703 static const u8 bluetooth_base_uuid[] = {
2704 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2705 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2706 };
2707
get_uuid_size(const u8 * uuid)2708 static u8 get_uuid_size(const u8 *uuid)
2709 {
2710 u32 val;
2711
2712 if (memcmp(uuid, bluetooth_base_uuid, 12))
2713 return 128;
2714
2715 val = get_unaligned_le32(&uuid[12]);
2716 if (val > 0xffff)
2717 return 32;
2718
2719 return 16;
2720 }
2721
mgmt_class_complete(struct hci_dev * hdev,void * data,int err)2722 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2723 {
2724 struct mgmt_pending_cmd *cmd = data;
2725
2726 bt_dev_dbg(hdev, "err %d", err);
2727
2728 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
2729 mgmt_status(err), hdev->dev_class, 3);
2730
2731 mgmt_pending_free(cmd);
2732 }
2733
add_uuid_sync(struct hci_dev * hdev,void * data)2734 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2735 {
2736 int err;
2737
2738 err = hci_update_class_sync(hdev);
2739 if (err)
2740 return err;
2741
2742 return hci_update_eir_sync(hdev);
2743 }
2744
add_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2745 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2746 {
2747 struct mgmt_cp_add_uuid *cp = data;
2748 struct mgmt_pending_cmd *cmd;
2749 struct bt_uuid *uuid;
2750 int err;
2751
2752 bt_dev_dbg(hdev, "sock %p", sk);
2753
2754 hci_dev_lock(hdev);
2755
2756 if (pending_eir_or_class(hdev)) {
2757 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2758 MGMT_STATUS_BUSY);
2759 goto failed;
2760 }
2761
2762 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2763 if (!uuid) {
2764 err = -ENOMEM;
2765 goto failed;
2766 }
2767
2768 memcpy(uuid->uuid, cp->uuid, 16);
2769 uuid->svc_hint = cp->svc_hint;
2770 uuid->size = get_uuid_size(cp->uuid);
2771
2772 list_add_tail(&uuid->list, &hdev->uuids);
2773
2774 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2775 if (!cmd) {
2776 err = -ENOMEM;
2777 goto failed;
2778 }
2779
2780 /* MGMT_OP_ADD_UUID don't require adapter the UP/Running so use
2781 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2782 */
2783 err = hci_cmd_sync_submit(hdev, add_uuid_sync, cmd,
2784 mgmt_class_complete);
2785 if (err < 0) {
2786 mgmt_pending_free(cmd);
2787 goto failed;
2788 }
2789
2790 failed:
2791 hci_dev_unlock(hdev);
2792 return err;
2793 }
2794
enable_service_cache(struct hci_dev * hdev)2795 static bool enable_service_cache(struct hci_dev *hdev)
2796 {
2797 if (!hdev_is_powered(hdev))
2798 return false;
2799
2800 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2801 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2802 CACHE_TIMEOUT);
2803 return true;
2804 }
2805
2806 return false;
2807 }
2808
remove_uuid_sync(struct hci_dev * hdev,void * data)2809 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2810 {
2811 int err;
2812
2813 err = hci_update_class_sync(hdev);
2814 if (err)
2815 return err;
2816
2817 return hci_update_eir_sync(hdev);
2818 }
2819
remove_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2820 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2821 u16 len)
2822 {
2823 struct mgmt_cp_remove_uuid *cp = data;
2824 struct mgmt_pending_cmd *cmd;
2825 struct bt_uuid *match, *tmp;
2826 static const u8 bt_uuid_any[] = {
2827 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2828 };
2829 int err, found;
2830
2831 bt_dev_dbg(hdev, "sock %p", sk);
2832
2833 hci_dev_lock(hdev);
2834
2835 if (pending_eir_or_class(hdev)) {
2836 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2837 MGMT_STATUS_BUSY);
2838 goto unlock;
2839 }
2840
2841 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2842 hci_uuids_clear(hdev);
2843
2844 if (enable_service_cache(hdev)) {
2845 err = mgmt_cmd_complete(sk, hdev->id,
2846 MGMT_OP_REMOVE_UUID,
2847 0, hdev->dev_class, 3);
2848 goto unlock;
2849 }
2850
2851 goto update_class;
2852 }
2853
2854 found = 0;
2855
2856 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2857 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2858 continue;
2859
2860 list_del(&match->list);
2861 kfree(match);
2862 found++;
2863 }
2864
2865 if (found == 0) {
2866 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2867 MGMT_STATUS_INVALID_PARAMS);
2868 goto unlock;
2869 }
2870
2871 update_class:
2872 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2873 if (!cmd) {
2874 err = -ENOMEM;
2875 goto unlock;
2876 }
2877
2878 /* MGMT_OP_REMOVE_UUID don't require adapter the UP/Running so use
2879 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2880 */
2881 err = hci_cmd_sync_submit(hdev, remove_uuid_sync, cmd,
2882 mgmt_class_complete);
2883 if (err < 0)
2884 mgmt_pending_free(cmd);
2885
2886 unlock:
2887 hci_dev_unlock(hdev);
2888 return err;
2889 }
2890
set_class_sync(struct hci_dev * hdev,void * data)2891 static int set_class_sync(struct hci_dev *hdev, void *data)
2892 {
2893 int err = 0;
2894
2895 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2896 cancel_delayed_work_sync(&hdev->service_cache);
2897 err = hci_update_eir_sync(hdev);
2898 }
2899
2900 if (err)
2901 return err;
2902
2903 return hci_update_class_sync(hdev);
2904 }
2905
set_dev_class(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2906 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2907 u16 len)
2908 {
2909 struct mgmt_cp_set_dev_class *cp = data;
2910 struct mgmt_pending_cmd *cmd;
2911 int err;
2912
2913 bt_dev_dbg(hdev, "sock %p", sk);
2914
2915 if (!lmp_bredr_capable(hdev))
2916 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2917 MGMT_STATUS_NOT_SUPPORTED);
2918
2919 hci_dev_lock(hdev);
2920
2921 if (pending_eir_or_class(hdev)) {
2922 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2923 MGMT_STATUS_BUSY);
2924 goto unlock;
2925 }
2926
2927 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2928 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2929 MGMT_STATUS_INVALID_PARAMS);
2930 goto unlock;
2931 }
2932
2933 hdev->major_class = cp->major;
2934 hdev->minor_class = cp->minor;
2935
2936 if (!hdev_is_powered(hdev)) {
2937 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2938 hdev->dev_class, 3);
2939 goto unlock;
2940 }
2941
2942 cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2943 if (!cmd) {
2944 err = -ENOMEM;
2945 goto unlock;
2946 }
2947
2948 /* MGMT_OP_SET_DEV_CLASS don't require adapter the UP/Running so use
2949 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2950 */
2951 err = hci_cmd_sync_submit(hdev, set_class_sync, cmd,
2952 mgmt_class_complete);
2953 if (err < 0)
2954 mgmt_pending_free(cmd);
2955
2956 unlock:
2957 hci_dev_unlock(hdev);
2958 return err;
2959 }
2960
load_link_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2961 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2962 u16 len)
2963 {
2964 struct mgmt_cp_load_link_keys *cp = data;
2965 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2966 sizeof(struct mgmt_link_key_info));
2967 u16 key_count, expected_len;
2968 bool changed;
2969 int i;
2970
2971 bt_dev_dbg(hdev, "sock %p", sk);
2972
2973 if (!lmp_bredr_capable(hdev))
2974 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2975 MGMT_STATUS_NOT_SUPPORTED);
2976
2977 key_count = __le16_to_cpu(cp->key_count);
2978 if (key_count > max_key_count) {
2979 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2980 key_count);
2981 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2982 MGMT_STATUS_INVALID_PARAMS);
2983 }
2984
2985 expected_len = struct_size(cp, keys, key_count);
2986 if (expected_len != len) {
2987 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2988 expected_len, len);
2989 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2990 MGMT_STATUS_INVALID_PARAMS);
2991 }
2992
2993 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2994 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2995 MGMT_STATUS_INVALID_PARAMS);
2996
2997 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2998 key_count);
2999
3000 hci_dev_lock(hdev);
3001
3002 hci_link_keys_clear(hdev);
3003
3004 if (cp->debug_keys)
3005 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
3006 else
3007 changed = hci_dev_test_and_clear_flag(hdev,
3008 HCI_KEEP_DEBUG_KEYS);
3009
3010 if (changed)
3011 new_settings(hdev, NULL);
3012
3013 for (i = 0; i < key_count; i++) {
3014 struct mgmt_link_key_info *key = &cp->keys[i];
3015
3016 if (hci_is_blocked_key(hdev,
3017 HCI_BLOCKED_KEY_TYPE_LINKKEY,
3018 key->val)) {
3019 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
3020 &key->addr.bdaddr);
3021 continue;
3022 }
3023
3024 if (key->addr.type != BDADDR_BREDR) {
3025 bt_dev_warn(hdev,
3026 "Invalid link address type %u for %pMR",
3027 key->addr.type, &key->addr.bdaddr);
3028 continue;
3029 }
3030
3031 if (key->type > 0x08) {
3032 bt_dev_warn(hdev, "Invalid link key type %u for %pMR",
3033 key->type, &key->addr.bdaddr);
3034 continue;
3035 }
3036
3037 /* Always ignore debug keys and require a new pairing if
3038 * the user wants to use them.
3039 */
3040 if (key->type == HCI_LK_DEBUG_COMBINATION)
3041 continue;
3042
3043 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
3044 key->type, key->pin_len, NULL);
3045 }
3046
3047 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
3048
3049 hci_dev_unlock(hdev);
3050
3051 return 0;
3052 }
3053
device_unpaired(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,struct sock * skip_sk)3054 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
3055 u8 addr_type, struct sock *skip_sk)
3056 {
3057 struct mgmt_ev_device_unpaired ev;
3058
3059 bacpy(&ev.addr.bdaddr, bdaddr);
3060 ev.addr.type = addr_type;
3061
3062 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
3063 skip_sk);
3064 }
3065
unpair_device_complete(struct hci_dev * hdev,void * data,int err)3066 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
3067 {
3068 struct mgmt_pending_cmd *cmd = data;
3069 struct mgmt_cp_unpair_device *cp = cmd->param;
3070
3071 if (!err)
3072 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
3073
3074 cmd->cmd_complete(cmd, err);
3075 mgmt_pending_free(cmd);
3076 }
3077
unpair_device_sync(struct hci_dev * hdev,void * data)3078 static int unpair_device_sync(struct hci_dev *hdev, void *data)
3079 {
3080 struct mgmt_pending_cmd *cmd = data;
3081 struct mgmt_cp_unpair_device *cp = cmd->param;
3082 struct hci_conn *conn;
3083
3084 if (cp->addr.type == BDADDR_BREDR)
3085 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3086 &cp->addr.bdaddr);
3087 else
3088 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3089 le_addr_type(cp->addr.type));
3090
3091 if (!conn)
3092 return 0;
3093
3094 /* Disregard any possible error since the likes of hci_abort_conn_sync
3095 * will clean up the connection no matter the error.
3096 */
3097 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3098
3099 return 0;
3100 }
3101
unpair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3102 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3103 u16 len)
3104 {
3105 struct mgmt_cp_unpair_device *cp = data;
3106 struct mgmt_rp_unpair_device rp;
3107 struct hci_conn_params *params;
3108 struct mgmt_pending_cmd *cmd;
3109 struct hci_conn *conn;
3110 u8 addr_type;
3111 int err;
3112
3113 memset(&rp, 0, sizeof(rp));
3114 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3115 rp.addr.type = cp->addr.type;
3116
3117 if (!bdaddr_type_is_valid(cp->addr.type))
3118 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3119 MGMT_STATUS_INVALID_PARAMS,
3120 &rp, sizeof(rp));
3121
3122 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
3123 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3124 MGMT_STATUS_INVALID_PARAMS,
3125 &rp, sizeof(rp));
3126
3127 hci_dev_lock(hdev);
3128
3129 if (!hdev_is_powered(hdev)) {
3130 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3131 MGMT_STATUS_NOT_POWERED, &rp,
3132 sizeof(rp));
3133 goto unlock;
3134 }
3135
3136 if (cp->addr.type == BDADDR_BREDR) {
3137 /* If disconnection is requested, then look up the
3138 * connection. If the remote device is connected, it
3139 * will be later used to terminate the link.
3140 *
3141 * Setting it to NULL explicitly will cause no
3142 * termination of the link.
3143 */
3144 if (cp->disconnect)
3145 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3146 &cp->addr.bdaddr);
3147 else
3148 conn = NULL;
3149
3150 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
3151 if (err < 0) {
3152 err = mgmt_cmd_complete(sk, hdev->id,
3153 MGMT_OP_UNPAIR_DEVICE,
3154 MGMT_STATUS_NOT_PAIRED, &rp,
3155 sizeof(rp));
3156 goto unlock;
3157 }
3158
3159 goto done;
3160 }
3161
3162 /* LE address type */
3163 addr_type = le_addr_type(cp->addr.type);
3164
3165 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
3166 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
3167 if (err < 0) {
3168 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3169 MGMT_STATUS_NOT_PAIRED, &rp,
3170 sizeof(rp));
3171 goto unlock;
3172 }
3173
3174 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3175 if (!conn) {
3176 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3177 goto done;
3178 }
3179
3180
3181 /* Defer clearing up the connection parameters until closing to
3182 * give a chance of keeping them if a repairing happens.
3183 */
3184 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3185
3186 /* Disable auto-connection parameters if present */
3187 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3188 if (params) {
3189 if (params->explicit_connect)
3190 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3191 else
3192 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3193 }
3194
3195 /* If disconnection is not requested, then clear the connection
3196 * variable so that the link is not terminated.
3197 */
3198 if (!cp->disconnect)
3199 conn = NULL;
3200
3201 done:
3202 /* If the connection variable is set, then termination of the
3203 * link is requested.
3204 */
3205 if (!conn) {
3206 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3207 &rp, sizeof(rp));
3208 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3209 goto unlock;
3210 }
3211
3212 cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3213 sizeof(*cp));
3214 if (!cmd) {
3215 err = -ENOMEM;
3216 goto unlock;
3217 }
3218
3219 cmd->cmd_complete = addr_cmd_complete;
3220
3221 err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3222 unpair_device_complete);
3223 if (err < 0)
3224 mgmt_pending_free(cmd);
3225
3226 unlock:
3227 hci_dev_unlock(hdev);
3228 return err;
3229 }
3230
disconnect_complete(struct hci_dev * hdev,void * data,int err)3231 static void disconnect_complete(struct hci_dev *hdev, void *data, int err)
3232 {
3233 struct mgmt_pending_cmd *cmd = data;
3234
3235 cmd->cmd_complete(cmd, mgmt_status(err));
3236 mgmt_pending_free(cmd);
3237 }
3238
disconnect_sync(struct hci_dev * hdev,void * data)3239 static int disconnect_sync(struct hci_dev *hdev, void *data)
3240 {
3241 struct mgmt_pending_cmd *cmd = data;
3242 struct mgmt_cp_disconnect *cp = cmd->param;
3243 struct hci_conn *conn;
3244
3245 if (cp->addr.type == BDADDR_BREDR)
3246 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3247 &cp->addr.bdaddr);
3248 else
3249 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3250 le_addr_type(cp->addr.type));
3251
3252 if (!conn)
3253 return -ENOTCONN;
3254
3255 /* Disregard any possible error since the likes of hci_abort_conn_sync
3256 * will clean up the connection no matter the error.
3257 */
3258 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3259
3260 return 0;
3261 }
3262
disconnect(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3263 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3264 u16 len)
3265 {
3266 struct mgmt_cp_disconnect *cp = data;
3267 struct mgmt_rp_disconnect rp;
3268 struct mgmt_pending_cmd *cmd;
3269 int err;
3270
3271 bt_dev_dbg(hdev, "sock %p", sk);
3272
3273 memset(&rp, 0, sizeof(rp));
3274 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3275 rp.addr.type = cp->addr.type;
3276
3277 if (!bdaddr_type_is_valid(cp->addr.type))
3278 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3279 MGMT_STATUS_INVALID_PARAMS,
3280 &rp, sizeof(rp));
3281
3282 hci_dev_lock(hdev);
3283
3284 if (!test_bit(HCI_UP, &hdev->flags)) {
3285 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3286 MGMT_STATUS_NOT_POWERED, &rp,
3287 sizeof(rp));
3288 goto failed;
3289 }
3290
3291 cmd = mgmt_pending_new(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3292 if (!cmd) {
3293 err = -ENOMEM;
3294 goto failed;
3295 }
3296
3297 cmd->cmd_complete = generic_cmd_complete;
3298
3299 err = hci_cmd_sync_queue(hdev, disconnect_sync, cmd,
3300 disconnect_complete);
3301 if (err < 0)
3302 mgmt_pending_free(cmd);
3303
3304 failed:
3305 hci_dev_unlock(hdev);
3306 return err;
3307 }
3308
link_to_bdaddr(u8 link_type,u8 addr_type)3309 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3310 {
3311 switch (link_type) {
3312 case CIS_LINK:
3313 case BIS_LINK:
3314 case PA_LINK:
3315 case LE_LINK:
3316 switch (addr_type) {
3317 case ADDR_LE_DEV_PUBLIC:
3318 return BDADDR_LE_PUBLIC;
3319
3320 default:
3321 /* Fallback to LE Random address type */
3322 return BDADDR_LE_RANDOM;
3323 }
3324
3325 default:
3326 /* Fallback to BR/EDR type */
3327 return BDADDR_BREDR;
3328 }
3329 }
3330
get_connections(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)3331 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3332 u16 data_len)
3333 {
3334 struct mgmt_rp_get_connections *rp;
3335 struct hci_conn *c;
3336 int err;
3337 u16 i;
3338
3339 bt_dev_dbg(hdev, "sock %p", sk);
3340
3341 hci_dev_lock(hdev);
3342
3343 if (!hdev_is_powered(hdev)) {
3344 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3345 MGMT_STATUS_NOT_POWERED);
3346 goto unlock;
3347 }
3348
3349 i = 0;
3350 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3351 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3352 i++;
3353 }
3354
3355 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3356 if (!rp) {
3357 err = -ENOMEM;
3358 goto unlock;
3359 }
3360
3361 i = 0;
3362 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3363 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3364 continue;
3365 bacpy(&rp->addr[i].bdaddr, &c->dst);
3366 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3367 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3368 continue;
3369 i++;
3370 }
3371
3372 rp->conn_count = cpu_to_le16(i);
3373
3374 /* Recalculate length in case of filtered SCO connections, etc */
3375 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3376 struct_size(rp, addr, i));
3377
3378 kfree(rp);
3379
3380 unlock:
3381 hci_dev_unlock(hdev);
3382 return err;
3383 }
3384
send_pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_pin_code_neg_reply * cp)3385 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3386 struct mgmt_cp_pin_code_neg_reply *cp)
3387 {
3388 struct mgmt_pending_cmd *cmd;
3389 int err;
3390
3391 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3392 sizeof(*cp));
3393 if (!cmd)
3394 return -ENOMEM;
3395
3396 cmd->cmd_complete = addr_cmd_complete;
3397
3398 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3399 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3400 if (err < 0)
3401 mgmt_pending_remove(cmd);
3402
3403 return err;
3404 }
3405
pin_code_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3406 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3407 u16 len)
3408 {
3409 struct hci_conn *conn;
3410 struct mgmt_cp_pin_code_reply *cp = data;
3411 struct hci_cp_pin_code_reply reply;
3412 struct mgmt_pending_cmd *cmd;
3413 int err;
3414
3415 bt_dev_dbg(hdev, "sock %p", sk);
3416
3417 hci_dev_lock(hdev);
3418
3419 if (!hdev_is_powered(hdev)) {
3420 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3421 MGMT_STATUS_NOT_POWERED);
3422 goto failed;
3423 }
3424
3425 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3426 if (!conn) {
3427 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3428 MGMT_STATUS_NOT_CONNECTED);
3429 goto failed;
3430 }
3431
3432 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3433 struct mgmt_cp_pin_code_neg_reply ncp;
3434
3435 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3436
3437 bt_dev_err(hdev, "PIN code is not 16 bytes long");
3438
3439 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3440 if (err >= 0)
3441 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3442 MGMT_STATUS_INVALID_PARAMS);
3443
3444 goto failed;
3445 }
3446
3447 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3448 if (!cmd) {
3449 err = -ENOMEM;
3450 goto failed;
3451 }
3452
3453 cmd->cmd_complete = addr_cmd_complete;
3454
3455 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3456 reply.pin_len = cp->pin_len;
3457 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3458
3459 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3460 if (err < 0)
3461 mgmt_pending_remove(cmd);
3462
3463 failed:
3464 hci_dev_unlock(hdev);
3465 return err;
3466 }
3467
set_io_capability(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3468 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3469 u16 len)
3470 {
3471 struct mgmt_cp_set_io_capability *cp = data;
3472
3473 bt_dev_dbg(hdev, "sock %p", sk);
3474
3475 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3476 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3477 MGMT_STATUS_INVALID_PARAMS);
3478
3479 hci_dev_lock(hdev);
3480
3481 hdev->io_capability = cp->io_capability;
3482
3483 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3484
3485 hci_dev_unlock(hdev);
3486
3487 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3488 NULL, 0);
3489 }
3490
find_pairing(struct hci_conn * conn)3491 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3492 {
3493 struct hci_dev *hdev = conn->hdev;
3494 struct mgmt_pending_cmd *cmd;
3495
3496 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3497 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3498 continue;
3499
3500 if (cmd->user_data != conn)
3501 continue;
3502
3503 return cmd;
3504 }
3505
3506 return NULL;
3507 }
3508
pairing_complete(struct mgmt_pending_cmd * cmd,u8 status)3509 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3510 {
3511 struct mgmt_rp_pair_device rp;
3512 struct hci_conn *conn = cmd->user_data;
3513 int err;
3514
3515 bacpy(&rp.addr.bdaddr, &conn->dst);
3516 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3517
3518 err = mgmt_cmd_complete(cmd->sk, cmd->hdev->id, MGMT_OP_PAIR_DEVICE,
3519 status, &rp, sizeof(rp));
3520
3521 /* So we don't get further callbacks for this connection */
3522 conn->connect_cfm_cb = NULL;
3523 conn->security_cfm_cb = NULL;
3524 conn->disconn_cfm_cb = NULL;
3525
3526 hci_conn_drop(conn);
3527
3528 /* The device is paired so there is no need to remove
3529 * its connection parameters anymore.
3530 */
3531 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3532
3533 hci_conn_put(conn);
3534
3535 return err;
3536 }
3537
mgmt_smp_complete(struct hci_conn * conn,bool complete)3538 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3539 {
3540 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3541 struct mgmt_pending_cmd *cmd;
3542
3543 cmd = find_pairing(conn);
3544 if (cmd) {
3545 cmd->cmd_complete(cmd, status);
3546 mgmt_pending_remove(cmd);
3547 }
3548 }
3549
pairing_complete_cb(struct hci_conn * conn,u8 status)3550 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3551 {
3552 struct mgmt_pending_cmd *cmd;
3553
3554 BT_DBG("status %u", status);
3555
3556 cmd = find_pairing(conn);
3557 if (!cmd) {
3558 BT_DBG("Unable to find a pending command");
3559 return;
3560 }
3561
3562 cmd->cmd_complete(cmd, mgmt_status(status));
3563 mgmt_pending_remove(cmd);
3564 }
3565
le_pairing_complete_cb(struct hci_conn * conn,u8 status)3566 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3567 {
3568 struct mgmt_pending_cmd *cmd;
3569
3570 BT_DBG("status %u", status);
3571
3572 if (!status)
3573 return;
3574
3575 cmd = find_pairing(conn);
3576 if (!cmd) {
3577 BT_DBG("Unable to find a pending command");
3578 return;
3579 }
3580
3581 cmd->cmd_complete(cmd, mgmt_status(status));
3582 mgmt_pending_remove(cmd);
3583 }
3584
pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3585 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3586 u16 len)
3587 {
3588 struct mgmt_cp_pair_device *cp = data;
3589 struct mgmt_rp_pair_device rp;
3590 struct mgmt_pending_cmd *cmd;
3591 u8 sec_level, auth_type;
3592 struct hci_conn *conn;
3593 int err;
3594
3595 bt_dev_dbg(hdev, "sock %p", sk);
3596
3597 memset(&rp, 0, sizeof(rp));
3598 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3599 rp.addr.type = cp->addr.type;
3600
3601 if (!bdaddr_type_is_valid(cp->addr.type))
3602 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3603 MGMT_STATUS_INVALID_PARAMS,
3604 &rp, sizeof(rp));
3605
3606 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3607 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3608 MGMT_STATUS_INVALID_PARAMS,
3609 &rp, sizeof(rp));
3610
3611 hci_dev_lock(hdev);
3612
3613 if (!hdev_is_powered(hdev)) {
3614 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3615 MGMT_STATUS_NOT_POWERED, &rp,
3616 sizeof(rp));
3617 goto unlock;
3618 }
3619
3620 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3621 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3622 MGMT_STATUS_ALREADY_PAIRED, &rp,
3623 sizeof(rp));
3624 goto unlock;
3625 }
3626
3627 sec_level = BT_SECURITY_MEDIUM;
3628 auth_type = HCI_AT_DEDICATED_BONDING;
3629
3630 if (cp->addr.type == BDADDR_BREDR) {
3631 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3632 auth_type, CONN_REASON_PAIR_DEVICE,
3633 HCI_ACL_CONN_TIMEOUT);
3634 } else {
3635 u8 addr_type = le_addr_type(cp->addr.type);
3636 struct hci_conn_params *p;
3637
3638 /* When pairing a new device, it is expected to remember
3639 * this device for future connections. Adding the connection
3640 * parameter information ahead of time allows tracking
3641 * of the peripheral preferred values and will speed up any
3642 * further connection establishment.
3643 *
3644 * If connection parameters already exist, then they
3645 * will be kept and this function does nothing.
3646 */
3647 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3648 if (!p) {
3649 err = -EIO;
3650 goto unlock;
3651 }
3652
3653 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3654 p->auto_connect = HCI_AUTO_CONN_DISABLED;
3655
3656 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3657 sec_level, HCI_LE_CONN_TIMEOUT,
3658 CONN_REASON_PAIR_DEVICE);
3659 }
3660
3661 if (IS_ERR(conn)) {
3662 int status;
3663
3664 if (PTR_ERR(conn) == -EBUSY)
3665 status = MGMT_STATUS_BUSY;
3666 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3667 status = MGMT_STATUS_NOT_SUPPORTED;
3668 else if (PTR_ERR(conn) == -ECONNREFUSED)
3669 status = MGMT_STATUS_REJECTED;
3670 else
3671 status = MGMT_STATUS_CONNECT_FAILED;
3672
3673 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3674 status, &rp, sizeof(rp));
3675 goto unlock;
3676 }
3677
3678 if (conn->connect_cfm_cb) {
3679 hci_conn_drop(conn);
3680 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3681 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3682 goto unlock;
3683 }
3684
3685 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3686 if (!cmd) {
3687 err = -ENOMEM;
3688 hci_conn_drop(conn);
3689 goto unlock;
3690 }
3691
3692 cmd->cmd_complete = pairing_complete;
3693
3694 /* For LE, just connecting isn't a proof that the pairing finished */
3695 if (cp->addr.type == BDADDR_BREDR) {
3696 conn->connect_cfm_cb = pairing_complete_cb;
3697 conn->security_cfm_cb = pairing_complete_cb;
3698 conn->disconn_cfm_cb = pairing_complete_cb;
3699 } else {
3700 conn->connect_cfm_cb = le_pairing_complete_cb;
3701 conn->security_cfm_cb = le_pairing_complete_cb;
3702 conn->disconn_cfm_cb = le_pairing_complete_cb;
3703 }
3704
3705 conn->io_capability = cp->io_cap;
3706 cmd->user_data = hci_conn_get(conn);
3707
3708 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3709 hci_conn_security(conn, sec_level, auth_type, true)) {
3710 cmd->cmd_complete(cmd, 0);
3711 mgmt_pending_remove(cmd);
3712 }
3713
3714 err = 0;
3715
3716 unlock:
3717 hci_dev_unlock(hdev);
3718 return err;
3719 }
3720
cancel_pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3721 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3722 u16 len)
3723 {
3724 struct mgmt_addr_info *addr = data;
3725 struct mgmt_pending_cmd *cmd;
3726 struct hci_conn *conn;
3727 int err;
3728
3729 bt_dev_dbg(hdev, "sock %p", sk);
3730
3731 hci_dev_lock(hdev);
3732
3733 if (!hdev_is_powered(hdev)) {
3734 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3735 MGMT_STATUS_NOT_POWERED);
3736 goto unlock;
3737 }
3738
3739 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3740 if (!cmd) {
3741 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3742 MGMT_STATUS_INVALID_PARAMS);
3743 goto unlock;
3744 }
3745
3746 conn = cmd->user_data;
3747
3748 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3749 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3750 MGMT_STATUS_INVALID_PARAMS);
3751 goto unlock;
3752 }
3753
3754 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3755 mgmt_pending_remove(cmd);
3756
3757 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3758 addr, sizeof(*addr));
3759
3760 /* Since user doesn't want to proceed with the connection, abort any
3761 * ongoing pairing and then terminate the link if it was created
3762 * because of the pair device action.
3763 */
3764 if (addr->type == BDADDR_BREDR)
3765 hci_remove_link_key(hdev, &addr->bdaddr);
3766 else
3767 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3768 le_addr_type(addr->type));
3769
3770 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3771 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3772
3773 unlock:
3774 hci_dev_unlock(hdev);
3775 return err;
3776 }
3777
user_pairing_resp(struct sock * sk,struct hci_dev * hdev,struct mgmt_addr_info * addr,u16 mgmt_op,u16 hci_op,__le32 passkey)3778 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3779 struct mgmt_addr_info *addr, u16 mgmt_op,
3780 u16 hci_op, __le32 passkey)
3781 {
3782 struct mgmt_pending_cmd *cmd;
3783 struct hci_conn *conn;
3784 int err;
3785
3786 hci_dev_lock(hdev);
3787
3788 if (!hdev_is_powered(hdev)) {
3789 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3790 MGMT_STATUS_NOT_POWERED, addr,
3791 sizeof(*addr));
3792 goto done;
3793 }
3794
3795 if (addr->type == BDADDR_BREDR)
3796 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3797 else
3798 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3799 le_addr_type(addr->type));
3800
3801 if (!conn) {
3802 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3803 MGMT_STATUS_NOT_CONNECTED, addr,
3804 sizeof(*addr));
3805 goto done;
3806 }
3807
3808 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3809 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3810 if (!err)
3811 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3812 MGMT_STATUS_SUCCESS, addr,
3813 sizeof(*addr));
3814 else
3815 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3816 MGMT_STATUS_FAILED, addr,
3817 sizeof(*addr));
3818
3819 goto done;
3820 }
3821
3822 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3823 if (!cmd) {
3824 err = -ENOMEM;
3825 goto done;
3826 }
3827
3828 cmd->cmd_complete = addr_cmd_complete;
3829
3830 /* Continue with pairing via HCI */
3831 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3832 struct hci_cp_user_passkey_reply cp;
3833
3834 bacpy(&cp.bdaddr, &addr->bdaddr);
3835 cp.passkey = passkey;
3836 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3837 } else
3838 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3839 &addr->bdaddr);
3840
3841 if (err < 0)
3842 mgmt_pending_remove(cmd);
3843
3844 done:
3845 hci_dev_unlock(hdev);
3846 return err;
3847 }
3848
pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3849 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3850 void *data, u16 len)
3851 {
3852 struct mgmt_cp_pin_code_neg_reply *cp = data;
3853
3854 bt_dev_dbg(hdev, "sock %p", sk);
3855
3856 return user_pairing_resp(sk, hdev, &cp->addr,
3857 MGMT_OP_PIN_CODE_NEG_REPLY,
3858 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3859 }
3860
user_confirm_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3861 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3862 u16 len)
3863 {
3864 struct mgmt_cp_user_confirm_reply *cp = data;
3865
3866 bt_dev_dbg(hdev, "sock %p", sk);
3867
3868 if (len != sizeof(*cp))
3869 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3870 MGMT_STATUS_INVALID_PARAMS);
3871
3872 return user_pairing_resp(sk, hdev, &cp->addr,
3873 MGMT_OP_USER_CONFIRM_REPLY,
3874 HCI_OP_USER_CONFIRM_REPLY, 0);
3875 }
3876
user_confirm_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3877 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3878 void *data, u16 len)
3879 {
3880 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3881
3882 bt_dev_dbg(hdev, "sock %p", sk);
3883
3884 return user_pairing_resp(sk, hdev, &cp->addr,
3885 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3886 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3887 }
3888
user_passkey_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3889 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3890 u16 len)
3891 {
3892 struct mgmt_cp_user_passkey_reply *cp = data;
3893
3894 bt_dev_dbg(hdev, "sock %p", sk);
3895
3896 return user_pairing_resp(sk, hdev, &cp->addr,
3897 MGMT_OP_USER_PASSKEY_REPLY,
3898 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3899 }
3900
user_passkey_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3901 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3902 void *data, u16 len)
3903 {
3904 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3905
3906 bt_dev_dbg(hdev, "sock %p", sk);
3907
3908 return user_pairing_resp(sk, hdev, &cp->addr,
3909 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3910 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3911 }
3912
adv_expire_sync(struct hci_dev * hdev,u32 flags)3913 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3914 {
3915 struct adv_info *adv_instance;
3916
3917 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3918 if (!adv_instance)
3919 return 0;
3920
3921 /* stop if current instance doesn't need to be changed */
3922 if (!(adv_instance->flags & flags))
3923 return 0;
3924
3925 cancel_adv_timeout(hdev);
3926
3927 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3928 if (!adv_instance)
3929 return 0;
3930
3931 hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3932
3933 return 0;
3934 }
3935
name_changed_sync(struct hci_dev * hdev,void * data)3936 static int name_changed_sync(struct hci_dev *hdev, void *data)
3937 {
3938 return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3939 }
3940
set_name_complete(struct hci_dev * hdev,void * data,int err)3941 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3942 {
3943 struct mgmt_pending_cmd *cmd = data;
3944 struct mgmt_cp_set_local_name *cp;
3945 u8 status = mgmt_status(err);
3946
3947 bt_dev_dbg(hdev, "err %d", err);
3948
3949 if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
3950 return;
3951
3952 cp = cmd->param;
3953
3954 if (status) {
3955 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3956 status);
3957 } else {
3958 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3959 cp, sizeof(*cp));
3960
3961 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3962 hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3963 }
3964
3965 mgmt_pending_free(cmd);
3966 }
3967
set_name_sync(struct hci_dev * hdev,void * data)3968 static int set_name_sync(struct hci_dev *hdev, void *data)
3969 {
3970 struct mgmt_pending_cmd *cmd = data;
3971 struct mgmt_cp_set_local_name cp;
3972
3973 mutex_lock(&hdev->mgmt_pending_lock);
3974
3975 if (!__mgmt_pending_listed(hdev, cmd)) {
3976 mutex_unlock(&hdev->mgmt_pending_lock);
3977 return -ECANCELED;
3978 }
3979
3980 memcpy(&cp, cmd->param, sizeof(cp));
3981
3982 mutex_unlock(&hdev->mgmt_pending_lock);
3983
3984 if (lmp_bredr_capable(hdev)) {
3985 hci_update_name_sync(hdev, cp.name);
3986 hci_update_eir_sync(hdev);
3987 }
3988
3989 /* The name is stored in the scan response data and so
3990 * no need to update the advertising data here.
3991 */
3992 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3993 hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3994
3995 return 0;
3996 }
3997
set_local_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3998 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3999 u16 len)
4000 {
4001 struct mgmt_cp_set_local_name *cp = data;
4002 struct mgmt_pending_cmd *cmd;
4003 int err;
4004
4005 bt_dev_dbg(hdev, "sock %p", sk);
4006
4007 hci_dev_lock(hdev);
4008
4009 /* If the old values are the same as the new ones just return a
4010 * direct command complete event.
4011 */
4012 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
4013 !memcmp(hdev->short_name, cp->short_name,
4014 sizeof(hdev->short_name))) {
4015 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
4016 data, len);
4017 goto failed;
4018 }
4019
4020 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
4021
4022 if (!hdev_is_powered(hdev)) {
4023 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
4024
4025 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
4026 data, len);
4027 if (err < 0)
4028 goto failed;
4029
4030 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
4031 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
4032 ext_info_changed(hdev, sk);
4033
4034 goto failed;
4035 }
4036
4037 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
4038 if (!cmd)
4039 err = -ENOMEM;
4040 else
4041 err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
4042 set_name_complete);
4043
4044 if (err < 0) {
4045 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
4046 MGMT_STATUS_FAILED);
4047
4048 if (cmd)
4049 mgmt_pending_remove(cmd);
4050
4051 goto failed;
4052 }
4053
4054 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
4055
4056 failed:
4057 hci_dev_unlock(hdev);
4058 return err;
4059 }
4060
appearance_changed_sync(struct hci_dev * hdev,void * data)4061 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
4062 {
4063 return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
4064 }
4065
set_appearance(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4066 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
4067 u16 len)
4068 {
4069 struct mgmt_cp_set_appearance *cp = data;
4070 u16 appearance;
4071 int err;
4072
4073 bt_dev_dbg(hdev, "sock %p", sk);
4074
4075 if (!lmp_le_capable(hdev))
4076 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
4077 MGMT_STATUS_NOT_SUPPORTED);
4078
4079 appearance = le16_to_cpu(cp->appearance);
4080
4081 hci_dev_lock(hdev);
4082
4083 if (hdev->appearance != appearance) {
4084 hdev->appearance = appearance;
4085
4086 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
4087 hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
4088 NULL);
4089
4090 ext_info_changed(hdev, sk);
4091 }
4092
4093 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
4094 0);
4095
4096 hci_dev_unlock(hdev);
4097
4098 return err;
4099 }
4100
get_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4101 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4102 void *data, u16 len)
4103 {
4104 struct mgmt_rp_get_phy_configuration rp;
4105
4106 bt_dev_dbg(hdev, "sock %p", sk);
4107
4108 hci_dev_lock(hdev);
4109
4110 memset(&rp, 0, sizeof(rp));
4111
4112 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
4113 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
4114 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
4115
4116 hci_dev_unlock(hdev);
4117
4118 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
4119 &rp, sizeof(rp));
4120 }
4121
mgmt_phy_configuration_changed(struct hci_dev * hdev,struct sock * skip)4122 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
4123 {
4124 struct mgmt_ev_phy_configuration_changed ev;
4125
4126 memset(&ev, 0, sizeof(ev));
4127
4128 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
4129
4130 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
4131 sizeof(ev), skip);
4132 }
4133
set_default_phy_complete(struct hci_dev * hdev,void * data,int err)4134 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
4135 {
4136 struct mgmt_pending_cmd *cmd = data;
4137 struct sk_buff *skb;
4138 u8 status = mgmt_status(err);
4139
4140 skb = cmd->skb;
4141
4142 if (!status) {
4143 if (!skb)
4144 status = MGMT_STATUS_FAILED;
4145 else if (IS_ERR(skb))
4146 status = mgmt_status(PTR_ERR(skb));
4147 else
4148 status = mgmt_status(skb->data[0]);
4149 }
4150
4151 bt_dev_dbg(hdev, "status %d", status);
4152
4153 if (status) {
4154 mgmt_cmd_status(cmd->sk, hdev->id,
4155 MGMT_OP_SET_PHY_CONFIGURATION, status);
4156 } else {
4157 mgmt_cmd_complete(cmd->sk, hdev->id,
4158 MGMT_OP_SET_PHY_CONFIGURATION, 0,
4159 NULL, 0);
4160
4161 mgmt_phy_configuration_changed(hdev, cmd->sk);
4162 }
4163
4164 if (skb && !IS_ERR(skb))
4165 kfree_skb(skb);
4166
4167 mgmt_pending_free(cmd);
4168 }
4169
set_default_phy_sync(struct hci_dev * hdev,void * data)4170 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
4171 {
4172 struct mgmt_pending_cmd *cmd = data;
4173 struct mgmt_cp_set_phy_configuration *cp = cmd->param;
4174 struct hci_cp_le_set_default_phy cp_phy;
4175 u32 selected_phys;
4176
4177 selected_phys = __le32_to_cpu(cp->selected_phys);
4178
4179 memset(&cp_phy, 0, sizeof(cp_phy));
4180
4181 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
4182 cp_phy.all_phys |= 0x01;
4183
4184 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
4185 cp_phy.all_phys |= 0x02;
4186
4187 if (selected_phys & MGMT_PHY_LE_1M_TX)
4188 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
4189
4190 if (selected_phys & MGMT_PHY_LE_2M_TX)
4191 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
4192
4193 if (selected_phys & MGMT_PHY_LE_CODED_TX)
4194 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
4195
4196 if (selected_phys & MGMT_PHY_LE_1M_RX)
4197 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
4198
4199 if (selected_phys & MGMT_PHY_LE_2M_RX)
4200 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
4201
4202 if (selected_phys & MGMT_PHY_LE_CODED_RX)
4203 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
4204
4205 cmd->skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4206 sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
4207
4208 return 0;
4209 }
4210
set_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4211 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4212 void *data, u16 len)
4213 {
4214 struct mgmt_cp_set_phy_configuration *cp = data;
4215 struct mgmt_pending_cmd *cmd;
4216 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4217 u16 pkt_type = (HCI_DH1 | HCI_DM1);
4218 bool changed = false;
4219 int err;
4220
4221 bt_dev_dbg(hdev, "sock %p", sk);
4222
4223 configurable_phys = get_configurable_phys(hdev);
4224 supported_phys = get_supported_phys(hdev);
4225 selected_phys = __le32_to_cpu(cp->selected_phys);
4226
4227 if (selected_phys & ~supported_phys)
4228 return mgmt_cmd_status(sk, hdev->id,
4229 MGMT_OP_SET_PHY_CONFIGURATION,
4230 MGMT_STATUS_INVALID_PARAMS);
4231
4232 unconfigure_phys = supported_phys & ~configurable_phys;
4233
4234 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4235 return mgmt_cmd_status(sk, hdev->id,
4236 MGMT_OP_SET_PHY_CONFIGURATION,
4237 MGMT_STATUS_INVALID_PARAMS);
4238
4239 if (selected_phys == get_selected_phys(hdev))
4240 return mgmt_cmd_complete(sk, hdev->id,
4241 MGMT_OP_SET_PHY_CONFIGURATION,
4242 0, NULL, 0);
4243
4244 hci_dev_lock(hdev);
4245
4246 if (!hdev_is_powered(hdev)) {
4247 err = mgmt_cmd_status(sk, hdev->id,
4248 MGMT_OP_SET_PHY_CONFIGURATION,
4249 MGMT_STATUS_REJECTED);
4250 goto unlock;
4251 }
4252
4253 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4254 err = mgmt_cmd_status(sk, hdev->id,
4255 MGMT_OP_SET_PHY_CONFIGURATION,
4256 MGMT_STATUS_BUSY);
4257 goto unlock;
4258 }
4259
4260 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4261 pkt_type |= (HCI_DH3 | HCI_DM3);
4262 else
4263 pkt_type &= ~(HCI_DH3 | HCI_DM3);
4264
4265 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4266 pkt_type |= (HCI_DH5 | HCI_DM5);
4267 else
4268 pkt_type &= ~(HCI_DH5 | HCI_DM5);
4269
4270 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4271 pkt_type &= ~HCI_2DH1;
4272 else
4273 pkt_type |= HCI_2DH1;
4274
4275 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4276 pkt_type &= ~HCI_2DH3;
4277 else
4278 pkt_type |= HCI_2DH3;
4279
4280 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4281 pkt_type &= ~HCI_2DH5;
4282 else
4283 pkt_type |= HCI_2DH5;
4284
4285 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4286 pkt_type &= ~HCI_3DH1;
4287 else
4288 pkt_type |= HCI_3DH1;
4289
4290 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4291 pkt_type &= ~HCI_3DH3;
4292 else
4293 pkt_type |= HCI_3DH3;
4294
4295 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4296 pkt_type &= ~HCI_3DH5;
4297 else
4298 pkt_type |= HCI_3DH5;
4299
4300 if (pkt_type != hdev->pkt_type) {
4301 hdev->pkt_type = pkt_type;
4302 changed = true;
4303 }
4304
4305 if ((selected_phys & MGMT_PHY_LE_MASK) ==
4306 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4307 if (changed)
4308 mgmt_phy_configuration_changed(hdev, sk);
4309
4310 err = mgmt_cmd_complete(sk, hdev->id,
4311 MGMT_OP_SET_PHY_CONFIGURATION,
4312 0, NULL, 0);
4313
4314 goto unlock;
4315 }
4316
4317 cmd = mgmt_pending_new(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4318 len);
4319 if (!cmd)
4320 err = -ENOMEM;
4321 else
4322 err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4323 set_default_phy_complete);
4324
4325 if (err < 0) {
4326 err = mgmt_cmd_status(sk, hdev->id,
4327 MGMT_OP_SET_PHY_CONFIGURATION,
4328 MGMT_STATUS_FAILED);
4329
4330 if (cmd)
4331 mgmt_pending_remove(cmd);
4332 }
4333
4334 unlock:
4335 hci_dev_unlock(hdev);
4336
4337 return err;
4338 }
4339
set_blocked_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4340 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4341 u16 len)
4342 {
4343 int err = MGMT_STATUS_SUCCESS;
4344 struct mgmt_cp_set_blocked_keys *keys = data;
4345 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4346 sizeof(struct mgmt_blocked_key_info));
4347 u16 key_count, expected_len;
4348 int i;
4349
4350 bt_dev_dbg(hdev, "sock %p", sk);
4351
4352 key_count = __le16_to_cpu(keys->key_count);
4353 if (key_count > max_key_count) {
4354 bt_dev_err(hdev, "too big key_count value %u", key_count);
4355 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4356 MGMT_STATUS_INVALID_PARAMS);
4357 }
4358
4359 expected_len = struct_size(keys, keys, key_count);
4360 if (expected_len != len) {
4361 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4362 expected_len, len);
4363 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4364 MGMT_STATUS_INVALID_PARAMS);
4365 }
4366
4367 hci_dev_lock(hdev);
4368
4369 hci_blocked_keys_clear(hdev);
4370
4371 for (i = 0; i < key_count; ++i) {
4372 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4373
4374 if (!b) {
4375 err = MGMT_STATUS_NO_RESOURCES;
4376 break;
4377 }
4378
4379 b->type = keys->keys[i].type;
4380 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4381 list_add_rcu(&b->list, &hdev->blocked_keys);
4382 }
4383 hci_dev_unlock(hdev);
4384
4385 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4386 err, NULL, 0);
4387 }
4388
set_wideband_speech(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4389 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4390 void *data, u16 len)
4391 {
4392 struct mgmt_mode *cp = data;
4393 int err;
4394 bool changed = false;
4395
4396 bt_dev_dbg(hdev, "sock %p", sk);
4397
4398 if (!hci_test_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED))
4399 return mgmt_cmd_status(sk, hdev->id,
4400 MGMT_OP_SET_WIDEBAND_SPEECH,
4401 MGMT_STATUS_NOT_SUPPORTED);
4402
4403 if (cp->val != 0x00 && cp->val != 0x01)
4404 return mgmt_cmd_status(sk, hdev->id,
4405 MGMT_OP_SET_WIDEBAND_SPEECH,
4406 MGMT_STATUS_INVALID_PARAMS);
4407
4408 hci_dev_lock(hdev);
4409
4410 if (hdev_is_powered(hdev) &&
4411 !!cp->val != hci_dev_test_flag(hdev,
4412 HCI_WIDEBAND_SPEECH_ENABLED)) {
4413 err = mgmt_cmd_status(sk, hdev->id,
4414 MGMT_OP_SET_WIDEBAND_SPEECH,
4415 MGMT_STATUS_REJECTED);
4416 goto unlock;
4417 }
4418
4419 if (cp->val)
4420 changed = !hci_dev_test_and_set_flag(hdev,
4421 HCI_WIDEBAND_SPEECH_ENABLED);
4422 else
4423 changed = hci_dev_test_and_clear_flag(hdev,
4424 HCI_WIDEBAND_SPEECH_ENABLED);
4425
4426 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4427 if (err < 0)
4428 goto unlock;
4429
4430 if (changed)
4431 err = new_settings(hdev, sk);
4432
4433 unlock:
4434 hci_dev_unlock(hdev);
4435 return err;
4436 }
4437
read_controller_cap(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4438 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4439 void *data, u16 data_len)
4440 {
4441 char buf[20];
4442 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4443 u16 cap_len = 0;
4444 u8 flags = 0;
4445 u8 tx_power_range[2];
4446
4447 bt_dev_dbg(hdev, "sock %p", sk);
4448
4449 memset(&buf, 0, sizeof(buf));
4450
4451 hci_dev_lock(hdev);
4452
4453 /* When the Read Simple Pairing Options command is supported, then
4454 * the remote public key validation is supported.
4455 *
4456 * Alternatively, when Microsoft extensions are available, they can
4457 * indicate support for public key validation as well.
4458 */
4459 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4460 flags |= 0x01; /* Remote public key validation (BR/EDR) */
4461
4462 flags |= 0x02; /* Remote public key validation (LE) */
4463
4464 /* When the Read Encryption Key Size command is supported, then the
4465 * encryption key size is enforced.
4466 */
4467 if (hdev->commands[20] & 0x10)
4468 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
4469
4470 flags |= 0x08; /* Encryption key size enforcement (LE) */
4471
4472 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4473 &flags, 1);
4474
4475 /* When the Read Simple Pairing Options command is supported, then
4476 * also max encryption key size information is provided.
4477 */
4478 if (hdev->commands[41] & 0x08)
4479 cap_len = eir_append_le16(rp->cap, cap_len,
4480 MGMT_CAP_MAX_ENC_KEY_SIZE,
4481 hdev->max_enc_key_size);
4482
4483 cap_len = eir_append_le16(rp->cap, cap_len,
4484 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4485 SMP_MAX_ENC_KEY_SIZE);
4486
4487 /* Append the min/max LE tx power parameters if we were able to fetch
4488 * it from the controller
4489 */
4490 if (hdev->commands[38] & 0x80) {
4491 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4492 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4493 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4494 tx_power_range, 2);
4495 }
4496
4497 rp->cap_len = cpu_to_le16(cap_len);
4498
4499 hci_dev_unlock(hdev);
4500
4501 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4502 rp, sizeof(*rp) + cap_len);
4503 }
4504
4505 #ifdef CONFIG_BT_FEATURE_DEBUG
4506 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4507 static const u8 debug_uuid[16] = {
4508 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4509 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4510 };
4511 #endif
4512
4513 /* 330859bc-7506-492d-9370-9a6f0614037f */
4514 static const u8 quality_report_uuid[16] = {
4515 0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4516 0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4517 };
4518
4519 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4520 static const u8 offload_codecs_uuid[16] = {
4521 0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4522 0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4523 };
4524
4525 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4526 static const u8 le_simultaneous_roles_uuid[16] = {
4527 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4528 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4529 };
4530
4531 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4532 static const u8 iso_socket_uuid[16] = {
4533 0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4534 0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4535 };
4536
4537 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4538 static const u8 mgmt_mesh_uuid[16] = {
4539 0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4540 0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4541 };
4542
read_exp_features_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4543 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4544 void *data, u16 data_len)
4545 {
4546 struct mgmt_rp_read_exp_features_info *rp;
4547 size_t len;
4548 u16 idx = 0;
4549 u32 flags;
4550 int status;
4551
4552 bt_dev_dbg(hdev, "sock %p", sk);
4553
4554 /* Enough space for 7 features */
4555 len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4556 rp = kzalloc(len, GFP_KERNEL);
4557 if (!rp)
4558 return -ENOMEM;
4559
4560 #ifdef CONFIG_BT_FEATURE_DEBUG
4561 flags = bt_dbg_get() ? BIT(0) : 0;
4562
4563 memcpy(rp->features[idx].uuid, debug_uuid, 16);
4564 rp->features[idx].flags = cpu_to_le32(flags);
4565 idx++;
4566 #endif
4567
4568 if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4569 if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4570 flags = BIT(0);
4571 else
4572 flags = 0;
4573
4574 memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4575 rp->features[idx].flags = cpu_to_le32(flags);
4576 idx++;
4577 }
4578
4579 if (hdev && (aosp_has_quality_report(hdev) ||
4580 hdev->set_quality_report)) {
4581 if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4582 flags = BIT(0);
4583 else
4584 flags = 0;
4585
4586 memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4587 rp->features[idx].flags = cpu_to_le32(flags);
4588 idx++;
4589 }
4590
4591 if (hdev && hdev->get_data_path_id) {
4592 if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4593 flags = BIT(0);
4594 else
4595 flags = 0;
4596
4597 memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4598 rp->features[idx].flags = cpu_to_le32(flags);
4599 idx++;
4600 }
4601
4602 if (IS_ENABLED(CONFIG_BT_LE)) {
4603 flags = iso_inited() ? BIT(0) : 0;
4604 memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4605 rp->features[idx].flags = cpu_to_le32(flags);
4606 idx++;
4607 }
4608
4609 if (hdev && lmp_le_capable(hdev)) {
4610 if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4611 flags = BIT(0);
4612 else
4613 flags = 0;
4614
4615 memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4616 rp->features[idx].flags = cpu_to_le32(flags);
4617 idx++;
4618 }
4619
4620 rp->feature_count = cpu_to_le16(idx);
4621
4622 /* After reading the experimental features information, enable
4623 * the events to update client on any future change.
4624 */
4625 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4626
4627 status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4628 MGMT_OP_READ_EXP_FEATURES_INFO,
4629 0, rp, sizeof(*rp) + (20 * idx));
4630
4631 kfree(rp);
4632 return status;
4633 }
4634
exp_feature_changed(struct hci_dev * hdev,const u8 * uuid,bool enabled,struct sock * skip)4635 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4636 bool enabled, struct sock *skip)
4637 {
4638 struct mgmt_ev_exp_feature_changed ev;
4639
4640 memset(&ev, 0, sizeof(ev));
4641 memcpy(ev.uuid, uuid, 16);
4642 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4643
4644 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4645 &ev, sizeof(ev),
4646 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4647 }
4648
4649 #define EXP_FEAT(_uuid, _set_func) \
4650 { \
4651 .uuid = _uuid, \
4652 .set_func = _set_func, \
4653 }
4654
4655 /* The zero key uuid is special. Multiple exp features are set through it. */
set_zero_key_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4656 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4657 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4658 {
4659 struct mgmt_rp_set_exp_feature rp;
4660
4661 memset(rp.uuid, 0, 16);
4662 rp.flags = cpu_to_le32(0);
4663
4664 #ifdef CONFIG_BT_FEATURE_DEBUG
4665 if (!hdev) {
4666 bool changed = bt_dbg_get();
4667
4668 bt_dbg_set(false);
4669
4670 if (changed)
4671 exp_feature_changed(NULL, ZERO_KEY, false, sk);
4672 }
4673 #endif
4674
4675 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4676
4677 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4678 MGMT_OP_SET_EXP_FEATURE, 0,
4679 &rp, sizeof(rp));
4680 }
4681
4682 #ifdef CONFIG_BT_FEATURE_DEBUG
set_debug_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4683 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4684 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4685 {
4686 struct mgmt_rp_set_exp_feature rp;
4687
4688 bool val, changed;
4689 int err;
4690
4691 /* Command requires to use the non-controller index */
4692 if (hdev)
4693 return mgmt_cmd_status(sk, hdev->id,
4694 MGMT_OP_SET_EXP_FEATURE,
4695 MGMT_STATUS_INVALID_INDEX);
4696
4697 /* Parameters are limited to a single octet */
4698 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4699 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4700 MGMT_OP_SET_EXP_FEATURE,
4701 MGMT_STATUS_INVALID_PARAMS);
4702
4703 /* Only boolean on/off is supported */
4704 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4705 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4706 MGMT_OP_SET_EXP_FEATURE,
4707 MGMT_STATUS_INVALID_PARAMS);
4708
4709 val = !!cp->param[0];
4710 changed = val ? !bt_dbg_get() : bt_dbg_get();
4711 bt_dbg_set(val);
4712
4713 memcpy(rp.uuid, debug_uuid, 16);
4714 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4715
4716 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4717
4718 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4719 MGMT_OP_SET_EXP_FEATURE, 0,
4720 &rp, sizeof(rp));
4721
4722 if (changed)
4723 exp_feature_changed(hdev, debug_uuid, val, sk);
4724
4725 return err;
4726 }
4727 #endif
4728
set_mgmt_mesh_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4729 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4730 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4731 {
4732 struct mgmt_rp_set_exp_feature rp;
4733 bool val, changed;
4734 int err;
4735
4736 /* Command requires to use the controller index */
4737 if (!hdev)
4738 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4739 MGMT_OP_SET_EXP_FEATURE,
4740 MGMT_STATUS_INVALID_INDEX);
4741
4742 /* Parameters are limited to a single octet */
4743 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4744 return mgmt_cmd_status(sk, hdev->id,
4745 MGMT_OP_SET_EXP_FEATURE,
4746 MGMT_STATUS_INVALID_PARAMS);
4747
4748 /* Only boolean on/off is supported */
4749 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4750 return mgmt_cmd_status(sk, hdev->id,
4751 MGMT_OP_SET_EXP_FEATURE,
4752 MGMT_STATUS_INVALID_PARAMS);
4753
4754 val = !!cp->param[0];
4755
4756 if (val) {
4757 changed = !hci_dev_test_and_set_flag(hdev,
4758 HCI_MESH_EXPERIMENTAL);
4759 } else {
4760 hci_dev_clear_flag(hdev, HCI_MESH);
4761 changed = hci_dev_test_and_clear_flag(hdev,
4762 HCI_MESH_EXPERIMENTAL);
4763 }
4764
4765 memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4766 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4767
4768 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4769
4770 err = mgmt_cmd_complete(sk, hdev->id,
4771 MGMT_OP_SET_EXP_FEATURE, 0,
4772 &rp, sizeof(rp));
4773
4774 if (changed)
4775 exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4776
4777 return err;
4778 }
4779
set_quality_report_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4780 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4781 struct mgmt_cp_set_exp_feature *cp,
4782 u16 data_len)
4783 {
4784 struct mgmt_rp_set_exp_feature rp;
4785 bool val, changed;
4786 int err;
4787
4788 /* Command requires to use a valid controller index */
4789 if (!hdev)
4790 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4791 MGMT_OP_SET_EXP_FEATURE,
4792 MGMT_STATUS_INVALID_INDEX);
4793
4794 /* Parameters are limited to a single octet */
4795 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4796 return mgmt_cmd_status(sk, hdev->id,
4797 MGMT_OP_SET_EXP_FEATURE,
4798 MGMT_STATUS_INVALID_PARAMS);
4799
4800 /* Only boolean on/off is supported */
4801 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4802 return mgmt_cmd_status(sk, hdev->id,
4803 MGMT_OP_SET_EXP_FEATURE,
4804 MGMT_STATUS_INVALID_PARAMS);
4805
4806 hci_req_sync_lock(hdev);
4807
4808 val = !!cp->param[0];
4809 changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4810
4811 if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4812 err = mgmt_cmd_status(sk, hdev->id,
4813 MGMT_OP_SET_EXP_FEATURE,
4814 MGMT_STATUS_NOT_SUPPORTED);
4815 goto unlock_quality_report;
4816 }
4817
4818 if (changed) {
4819 if (hdev->set_quality_report)
4820 err = hdev->set_quality_report(hdev, val);
4821 else
4822 err = aosp_set_quality_report(hdev, val);
4823
4824 if (err) {
4825 err = mgmt_cmd_status(sk, hdev->id,
4826 MGMT_OP_SET_EXP_FEATURE,
4827 MGMT_STATUS_FAILED);
4828 goto unlock_quality_report;
4829 }
4830
4831 if (val)
4832 hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4833 else
4834 hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4835 }
4836
4837 bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4838
4839 memcpy(rp.uuid, quality_report_uuid, 16);
4840 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4841 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4842
4843 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4844 &rp, sizeof(rp));
4845
4846 if (changed)
4847 exp_feature_changed(hdev, quality_report_uuid, val, sk);
4848
4849 unlock_quality_report:
4850 hci_req_sync_unlock(hdev);
4851 return err;
4852 }
4853
set_offload_codec_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4854 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4855 struct mgmt_cp_set_exp_feature *cp,
4856 u16 data_len)
4857 {
4858 bool val, changed;
4859 int err;
4860 struct mgmt_rp_set_exp_feature rp;
4861
4862 /* Command requires to use a valid controller index */
4863 if (!hdev)
4864 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4865 MGMT_OP_SET_EXP_FEATURE,
4866 MGMT_STATUS_INVALID_INDEX);
4867
4868 /* Parameters are limited to a single octet */
4869 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4870 return mgmt_cmd_status(sk, hdev->id,
4871 MGMT_OP_SET_EXP_FEATURE,
4872 MGMT_STATUS_INVALID_PARAMS);
4873
4874 /* Only boolean on/off is supported */
4875 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4876 return mgmt_cmd_status(sk, hdev->id,
4877 MGMT_OP_SET_EXP_FEATURE,
4878 MGMT_STATUS_INVALID_PARAMS);
4879
4880 val = !!cp->param[0];
4881 changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4882
4883 if (!hdev->get_data_path_id) {
4884 return mgmt_cmd_status(sk, hdev->id,
4885 MGMT_OP_SET_EXP_FEATURE,
4886 MGMT_STATUS_NOT_SUPPORTED);
4887 }
4888
4889 if (changed) {
4890 if (val)
4891 hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4892 else
4893 hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4894 }
4895
4896 bt_dev_info(hdev, "offload codecs enable %d changed %d",
4897 val, changed);
4898
4899 memcpy(rp.uuid, offload_codecs_uuid, 16);
4900 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4901 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4902 err = mgmt_cmd_complete(sk, hdev->id,
4903 MGMT_OP_SET_EXP_FEATURE, 0,
4904 &rp, sizeof(rp));
4905
4906 if (changed)
4907 exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4908
4909 return err;
4910 }
4911
set_le_simultaneous_roles_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4912 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4913 struct mgmt_cp_set_exp_feature *cp,
4914 u16 data_len)
4915 {
4916 bool val, changed;
4917 int err;
4918 struct mgmt_rp_set_exp_feature rp;
4919
4920 /* Command requires to use a valid controller index */
4921 if (!hdev)
4922 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4923 MGMT_OP_SET_EXP_FEATURE,
4924 MGMT_STATUS_INVALID_INDEX);
4925
4926 /* Parameters are limited to a single octet */
4927 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4928 return mgmt_cmd_status(sk, hdev->id,
4929 MGMT_OP_SET_EXP_FEATURE,
4930 MGMT_STATUS_INVALID_PARAMS);
4931
4932 /* Only boolean on/off is supported */
4933 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4934 return mgmt_cmd_status(sk, hdev->id,
4935 MGMT_OP_SET_EXP_FEATURE,
4936 MGMT_STATUS_INVALID_PARAMS);
4937
4938 val = !!cp->param[0];
4939 changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4940
4941 if (!hci_dev_le_state_simultaneous(hdev)) {
4942 return mgmt_cmd_status(sk, hdev->id,
4943 MGMT_OP_SET_EXP_FEATURE,
4944 MGMT_STATUS_NOT_SUPPORTED);
4945 }
4946
4947 if (changed) {
4948 if (val)
4949 hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4950 else
4951 hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4952 }
4953
4954 bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4955 val, changed);
4956
4957 memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4958 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4959 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4960 err = mgmt_cmd_complete(sk, hdev->id,
4961 MGMT_OP_SET_EXP_FEATURE, 0,
4962 &rp, sizeof(rp));
4963
4964 if (changed)
4965 exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4966
4967 return err;
4968 }
4969
4970 #ifdef CONFIG_BT_LE
set_iso_socket_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4971 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4972 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4973 {
4974 struct mgmt_rp_set_exp_feature rp;
4975 bool val, changed = false;
4976 int err;
4977
4978 /* Command requires to use the non-controller index */
4979 if (hdev)
4980 return mgmt_cmd_status(sk, hdev->id,
4981 MGMT_OP_SET_EXP_FEATURE,
4982 MGMT_STATUS_INVALID_INDEX);
4983
4984 /* Parameters are limited to a single octet */
4985 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4986 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4987 MGMT_OP_SET_EXP_FEATURE,
4988 MGMT_STATUS_INVALID_PARAMS);
4989
4990 /* Only boolean on/off is supported */
4991 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4992 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4993 MGMT_OP_SET_EXP_FEATURE,
4994 MGMT_STATUS_INVALID_PARAMS);
4995
4996 val = cp->param[0] ? true : false;
4997 if (val)
4998 err = iso_init();
4999 else
5000 err = iso_exit();
5001
5002 if (!err)
5003 changed = true;
5004
5005 memcpy(rp.uuid, iso_socket_uuid, 16);
5006 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
5007
5008 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
5009
5010 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
5011 MGMT_OP_SET_EXP_FEATURE, 0,
5012 &rp, sizeof(rp));
5013
5014 if (changed)
5015 exp_feature_changed(hdev, iso_socket_uuid, val, sk);
5016
5017 return err;
5018 }
5019 #endif
5020
5021 static const struct mgmt_exp_feature {
5022 const u8 *uuid;
5023 int (*set_func)(struct sock *sk, struct hci_dev *hdev,
5024 struct mgmt_cp_set_exp_feature *cp, u16 data_len);
5025 } exp_features[] = {
5026 EXP_FEAT(ZERO_KEY, set_zero_key_func),
5027 #ifdef CONFIG_BT_FEATURE_DEBUG
5028 EXP_FEAT(debug_uuid, set_debug_func),
5029 #endif
5030 EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
5031 EXP_FEAT(quality_report_uuid, set_quality_report_func),
5032 EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
5033 EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
5034 #ifdef CONFIG_BT_LE
5035 EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
5036 #endif
5037
5038 /* end with a null feature */
5039 EXP_FEAT(NULL, NULL)
5040 };
5041
set_exp_feature(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)5042 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
5043 void *data, u16 data_len)
5044 {
5045 struct mgmt_cp_set_exp_feature *cp = data;
5046 size_t i = 0;
5047
5048 bt_dev_dbg(hdev, "sock %p", sk);
5049
5050 for (i = 0; exp_features[i].uuid; i++) {
5051 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
5052 return exp_features[i].set_func(sk, hdev, cp, data_len);
5053 }
5054
5055 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
5056 MGMT_OP_SET_EXP_FEATURE,
5057 MGMT_STATUS_NOT_SUPPORTED);
5058 }
5059
get_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)5060 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5061 u16 data_len)
5062 {
5063 struct mgmt_cp_get_device_flags *cp = data;
5064 struct mgmt_rp_get_device_flags rp;
5065 struct bdaddr_list_with_flags *br_params;
5066 struct hci_conn_params *params;
5067 u32 supported_flags;
5068 u32 current_flags = 0;
5069 u8 status = MGMT_STATUS_INVALID_PARAMS;
5070
5071 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
5072 &cp->addr.bdaddr, cp->addr.type);
5073
5074 hci_dev_lock(hdev);
5075
5076 supported_flags = hdev->conn_flags;
5077
5078 memset(&rp, 0, sizeof(rp));
5079
5080 if (cp->addr.type == BDADDR_BREDR) {
5081 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5082 &cp->addr.bdaddr,
5083 cp->addr.type);
5084 if (!br_params)
5085 goto done;
5086
5087 current_flags = br_params->flags;
5088 } else {
5089 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5090 le_addr_type(cp->addr.type));
5091 if (!params)
5092 goto done;
5093
5094 current_flags = params->flags;
5095 }
5096
5097 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5098 rp.addr.type = cp->addr.type;
5099 rp.supported_flags = cpu_to_le32(supported_flags);
5100 rp.current_flags = cpu_to_le32(current_flags);
5101
5102 status = MGMT_STATUS_SUCCESS;
5103
5104 done:
5105 hci_dev_unlock(hdev);
5106
5107 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5108 &rp, sizeof(rp));
5109 }
5110
device_flags_changed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u32 supported_flags,u32 current_flags)5111 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5112 bdaddr_t *bdaddr, u8 bdaddr_type,
5113 u32 supported_flags, u32 current_flags)
5114 {
5115 struct mgmt_ev_device_flags_changed ev;
5116
5117 bacpy(&ev.addr.bdaddr, bdaddr);
5118 ev.addr.type = bdaddr_type;
5119 ev.supported_flags = cpu_to_le32(supported_flags);
5120 ev.current_flags = cpu_to_le32(current_flags);
5121
5122 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5123 }
5124
is_connected(struct hci_dev * hdev,bdaddr_t * addr,u8 type)5125 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
5126 {
5127 struct hci_conn *conn;
5128
5129 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
5130 if (!conn)
5131 return false;
5132
5133 if (conn->dst_type != type)
5134 return false;
5135
5136 if (conn->state != BT_CONNECTED)
5137 return false;
5138
5139 return true;
5140 }
5141
5142 /* This function requires the caller holds hdev->lock */
hci_conn_params_set(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type,u8 auto_connect)5143 static struct hci_conn_params *hci_conn_params_set(struct hci_dev *hdev,
5144 bdaddr_t *addr, u8 addr_type,
5145 u8 auto_connect)
5146 {
5147 struct hci_conn_params *params;
5148
5149 params = hci_conn_params_add(hdev, addr, addr_type);
5150 if (!params)
5151 return NULL;
5152
5153 if (params->auto_connect == auto_connect)
5154 return params;
5155
5156 hci_pend_le_list_del_init(params);
5157
5158 switch (auto_connect) {
5159 case HCI_AUTO_CONN_DISABLED:
5160 case HCI_AUTO_CONN_LINK_LOSS:
5161 /* If auto connect is being disabled when we're trying to
5162 * connect to device, keep connecting.
5163 */
5164 if (params->explicit_connect)
5165 hci_pend_le_list_add(params, &hdev->pend_le_conns);
5166 break;
5167 case HCI_AUTO_CONN_REPORT:
5168 if (params->explicit_connect)
5169 hci_pend_le_list_add(params, &hdev->pend_le_conns);
5170 else
5171 hci_pend_le_list_add(params, &hdev->pend_le_reports);
5172 break;
5173 case HCI_AUTO_CONN_DIRECT:
5174 case HCI_AUTO_CONN_ALWAYS:
5175 if (!is_connected(hdev, addr, addr_type))
5176 hci_pend_le_list_add(params, &hdev->pend_le_conns);
5177 break;
5178 }
5179
5180 params->auto_connect = auto_connect;
5181
5182 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
5183 addr, addr_type, auto_connect);
5184
5185 return params;
5186 }
5187
set_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5188 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5189 u16 len)
5190 {
5191 struct mgmt_cp_set_device_flags *cp = data;
5192 struct bdaddr_list_with_flags *br_params;
5193 struct hci_conn_params *params;
5194 u8 status = MGMT_STATUS_INVALID_PARAMS;
5195 u32 supported_flags;
5196 u32 current_flags = __le32_to_cpu(cp->current_flags);
5197
5198 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5199 &cp->addr.bdaddr, cp->addr.type, current_flags);
5200
5201 // We should take hci_dev_lock() early, I think.. conn_flags can change
5202 supported_flags = hdev->conn_flags;
5203
5204 if ((supported_flags | current_flags) != supported_flags) {
5205 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5206 current_flags, supported_flags);
5207 goto done;
5208 }
5209
5210 hci_dev_lock(hdev);
5211
5212 if (cp->addr.type == BDADDR_BREDR) {
5213 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5214 &cp->addr.bdaddr,
5215 cp->addr.type);
5216
5217 if (br_params) {
5218 br_params->flags = current_flags;
5219 status = MGMT_STATUS_SUCCESS;
5220 } else {
5221 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5222 &cp->addr.bdaddr, cp->addr.type);
5223 }
5224
5225 goto unlock;
5226 }
5227
5228 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5229 le_addr_type(cp->addr.type));
5230 if (!params) {
5231 /* Create a new hci_conn_params if it doesn't exist */
5232 params = hci_conn_params_set(hdev, &cp->addr.bdaddr,
5233 le_addr_type(cp->addr.type),
5234 HCI_AUTO_CONN_DISABLED);
5235 if (!params) {
5236 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5237 &cp->addr.bdaddr,
5238 le_addr_type(cp->addr.type));
5239 goto unlock;
5240 }
5241 }
5242
5243 supported_flags = hdev->conn_flags;
5244
5245 if ((supported_flags | current_flags) != supported_flags) {
5246 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5247 current_flags, supported_flags);
5248 goto unlock;
5249 }
5250
5251 WRITE_ONCE(params->flags, current_flags);
5252 status = MGMT_STATUS_SUCCESS;
5253
5254 /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5255 * has been set.
5256 */
5257 if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5258 hci_update_passive_scan(hdev);
5259
5260 unlock:
5261 hci_dev_unlock(hdev);
5262
5263 done:
5264 if (status == MGMT_STATUS_SUCCESS)
5265 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5266 supported_flags, current_flags);
5267
5268 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5269 &cp->addr, sizeof(cp->addr));
5270 }
5271
mgmt_adv_monitor_added(struct sock * sk,struct hci_dev * hdev,u16 handle)5272 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5273 u16 handle)
5274 {
5275 struct mgmt_ev_adv_monitor_added ev;
5276
5277 ev.monitor_handle = cpu_to_le16(handle);
5278
5279 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5280 }
5281
mgmt_adv_monitor_removed(struct sock * sk,struct hci_dev * hdev,__le16 handle)5282 static void mgmt_adv_monitor_removed(struct sock *sk, struct hci_dev *hdev,
5283 __le16 handle)
5284 {
5285 struct mgmt_ev_adv_monitor_removed ev;
5286
5287 ev.monitor_handle = handle;
5288
5289 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk);
5290 }
5291
read_adv_mon_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5292 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5293 void *data, u16 len)
5294 {
5295 struct adv_monitor *monitor = NULL;
5296 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5297 int handle, err;
5298 size_t rp_size = 0;
5299 __u32 supported = 0;
5300 __u32 enabled = 0;
5301 __u16 num_handles = 0;
5302 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5303
5304 BT_DBG("request for %s", hdev->name);
5305
5306 hci_dev_lock(hdev);
5307
5308 if (msft_monitor_supported(hdev))
5309 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5310
5311 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5312 handles[num_handles++] = monitor->handle;
5313
5314 hci_dev_unlock(hdev);
5315
5316 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5317 rp = kmalloc(rp_size, GFP_KERNEL);
5318 if (!rp)
5319 return -ENOMEM;
5320
5321 /* All supported features are currently enabled */
5322 enabled = supported;
5323
5324 rp->supported_features = cpu_to_le32(supported);
5325 rp->enabled_features = cpu_to_le32(enabled);
5326 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5327 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5328 rp->num_handles = cpu_to_le16(num_handles);
5329 if (num_handles)
5330 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5331
5332 err = mgmt_cmd_complete(sk, hdev->id,
5333 MGMT_OP_READ_ADV_MONITOR_FEATURES,
5334 MGMT_STATUS_SUCCESS, rp, rp_size);
5335
5336 kfree(rp);
5337
5338 return err;
5339 }
5340
mgmt_add_adv_patterns_monitor_complete(struct hci_dev * hdev,void * data,int status)5341 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5342 void *data, int status)
5343 {
5344 struct mgmt_rp_add_adv_patterns_monitor rp;
5345 struct mgmt_pending_cmd *cmd = data;
5346 struct adv_monitor *monitor;
5347
5348 /* This is likely the result of hdev being closed and mgmt_index_removed
5349 * is attempting to clean up any pending command so
5350 * hci_adv_monitors_clear is about to be called which will take care of
5351 * freeing the adv_monitor instances.
5352 */
5353 if (status == -ECANCELED && !mgmt_pending_valid(hdev, cmd))
5354 return;
5355
5356 monitor = cmd->user_data;
5357
5358 hci_dev_lock(hdev);
5359
5360 rp.monitor_handle = cpu_to_le16(monitor->handle);
5361
5362 if (!status) {
5363 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5364 hdev->adv_monitors_cnt++;
5365 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5366 monitor->state = ADV_MONITOR_STATE_REGISTERED;
5367 hci_update_passive_scan(hdev);
5368 }
5369
5370 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
5371 mgmt_status(status), &rp, sizeof(rp));
5372 mgmt_pending_remove(cmd);
5373
5374 hci_dev_unlock(hdev);
5375 bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5376 rp.monitor_handle, status);
5377 }
5378
mgmt_add_adv_patterns_monitor_sync(struct hci_dev * hdev,void * data)5379 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5380 {
5381 struct mgmt_pending_cmd *cmd = data;
5382 struct adv_monitor *mon;
5383
5384 mutex_lock(&hdev->mgmt_pending_lock);
5385
5386 if (!__mgmt_pending_listed(hdev, cmd)) {
5387 mutex_unlock(&hdev->mgmt_pending_lock);
5388 return -ECANCELED;
5389 }
5390
5391 mon = cmd->user_data;
5392
5393 mutex_unlock(&hdev->mgmt_pending_lock);
5394
5395 return hci_add_adv_monitor(hdev, mon);
5396 }
5397
__add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,struct adv_monitor * m,u8 status,void * data,u16 len,u16 op)5398 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5399 struct adv_monitor *m, u8 status,
5400 void *data, u16 len, u16 op)
5401 {
5402 struct mgmt_pending_cmd *cmd;
5403 int err;
5404
5405 hci_dev_lock(hdev);
5406
5407 if (status)
5408 goto unlock;
5409
5410 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5411 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5412 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5413 status = MGMT_STATUS_BUSY;
5414 goto unlock;
5415 }
5416
5417 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5418 if (!cmd) {
5419 status = MGMT_STATUS_NO_RESOURCES;
5420 goto unlock;
5421 }
5422
5423 cmd->user_data = m;
5424 err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5425 mgmt_add_adv_patterns_monitor_complete);
5426 if (err) {
5427 if (err == -ENOMEM)
5428 status = MGMT_STATUS_NO_RESOURCES;
5429 else
5430 status = MGMT_STATUS_FAILED;
5431
5432 goto unlock;
5433 }
5434
5435 hci_dev_unlock(hdev);
5436
5437 return 0;
5438
5439 unlock:
5440 hci_free_adv_monitor(hdev, m);
5441 hci_dev_unlock(hdev);
5442 return mgmt_cmd_status(sk, hdev->id, op, status);
5443 }
5444
parse_adv_monitor_rssi(struct adv_monitor * m,struct mgmt_adv_rssi_thresholds * rssi)5445 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5446 struct mgmt_adv_rssi_thresholds *rssi)
5447 {
5448 if (rssi) {
5449 m->rssi.low_threshold = rssi->low_threshold;
5450 m->rssi.low_threshold_timeout =
5451 __le16_to_cpu(rssi->low_threshold_timeout);
5452 m->rssi.high_threshold = rssi->high_threshold;
5453 m->rssi.high_threshold_timeout =
5454 __le16_to_cpu(rssi->high_threshold_timeout);
5455 m->rssi.sampling_period = rssi->sampling_period;
5456 } else {
5457 /* Default values. These numbers are the least constricting
5458 * parameters for MSFT API to work, so it behaves as if there
5459 * are no rssi parameter to consider. May need to be changed
5460 * if other API are to be supported.
5461 */
5462 m->rssi.low_threshold = -127;
5463 m->rssi.low_threshold_timeout = 60;
5464 m->rssi.high_threshold = -127;
5465 m->rssi.high_threshold_timeout = 0;
5466 m->rssi.sampling_period = 0;
5467 }
5468 }
5469
parse_adv_monitor_pattern(struct adv_monitor * m,u8 pattern_count,struct mgmt_adv_pattern * patterns)5470 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5471 struct mgmt_adv_pattern *patterns)
5472 {
5473 u8 offset = 0, length = 0;
5474 struct adv_pattern *p = NULL;
5475 int i;
5476
5477 for (i = 0; i < pattern_count; i++) {
5478 offset = patterns[i].offset;
5479 length = patterns[i].length;
5480 if (offset >= HCI_MAX_AD_LENGTH ||
5481 length > HCI_MAX_AD_LENGTH ||
5482 (offset + length) > HCI_MAX_AD_LENGTH)
5483 return MGMT_STATUS_INVALID_PARAMS;
5484
5485 p = kmalloc(sizeof(*p), GFP_KERNEL);
5486 if (!p)
5487 return MGMT_STATUS_NO_RESOURCES;
5488
5489 p->ad_type = patterns[i].ad_type;
5490 p->offset = patterns[i].offset;
5491 p->length = patterns[i].length;
5492 memcpy(p->value, patterns[i].value, p->length);
5493
5494 INIT_LIST_HEAD(&p->list);
5495 list_add(&p->list, &m->patterns);
5496 }
5497
5498 return MGMT_STATUS_SUCCESS;
5499 }
5500
add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5501 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5502 void *data, u16 len)
5503 {
5504 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5505 struct adv_monitor *m = NULL;
5506 u8 status = MGMT_STATUS_SUCCESS;
5507 size_t expected_size = sizeof(*cp);
5508
5509 BT_DBG("request for %s", hdev->name);
5510
5511 if (len <= sizeof(*cp)) {
5512 status = MGMT_STATUS_INVALID_PARAMS;
5513 goto done;
5514 }
5515
5516 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5517 if (len != expected_size) {
5518 status = MGMT_STATUS_INVALID_PARAMS;
5519 goto done;
5520 }
5521
5522 m = kzalloc(sizeof(*m), GFP_KERNEL);
5523 if (!m) {
5524 status = MGMT_STATUS_NO_RESOURCES;
5525 goto done;
5526 }
5527
5528 INIT_LIST_HEAD(&m->patterns);
5529
5530 parse_adv_monitor_rssi(m, NULL);
5531 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5532
5533 done:
5534 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5535 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5536 }
5537
add_adv_patterns_monitor_rssi(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5538 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5539 void *data, u16 len)
5540 {
5541 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5542 struct adv_monitor *m = NULL;
5543 u8 status = MGMT_STATUS_SUCCESS;
5544 size_t expected_size = sizeof(*cp);
5545
5546 BT_DBG("request for %s", hdev->name);
5547
5548 if (len <= sizeof(*cp)) {
5549 status = MGMT_STATUS_INVALID_PARAMS;
5550 goto done;
5551 }
5552
5553 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5554 if (len != expected_size) {
5555 status = MGMT_STATUS_INVALID_PARAMS;
5556 goto done;
5557 }
5558
5559 m = kzalloc(sizeof(*m), GFP_KERNEL);
5560 if (!m) {
5561 status = MGMT_STATUS_NO_RESOURCES;
5562 goto done;
5563 }
5564
5565 INIT_LIST_HEAD(&m->patterns);
5566
5567 parse_adv_monitor_rssi(m, &cp->rssi);
5568 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5569
5570 done:
5571 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5572 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5573 }
5574
mgmt_remove_adv_monitor_complete(struct hci_dev * hdev,void * data,int status)5575 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5576 void *data, int status)
5577 {
5578 struct mgmt_rp_remove_adv_monitor rp;
5579 struct mgmt_pending_cmd *cmd = data;
5580 struct mgmt_cp_remove_adv_monitor *cp;
5581
5582 if (status == -ECANCELED)
5583 return;
5584
5585 hci_dev_lock(hdev);
5586
5587 cp = cmd->param;
5588
5589 rp.monitor_handle = cp->monitor_handle;
5590
5591 if (!status) {
5592 mgmt_adv_monitor_removed(cmd->sk, hdev, cp->monitor_handle);
5593 hci_update_passive_scan(hdev);
5594 }
5595
5596 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
5597 mgmt_status(status), &rp, sizeof(rp));
5598 mgmt_pending_free(cmd);
5599
5600 hci_dev_unlock(hdev);
5601 bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5602 rp.monitor_handle, status);
5603 }
5604
mgmt_remove_adv_monitor_sync(struct hci_dev * hdev,void * data)5605 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5606 {
5607 struct mgmt_pending_cmd *cmd = data;
5608 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5609 u16 handle = __le16_to_cpu(cp->monitor_handle);
5610
5611 if (!handle)
5612 return hci_remove_all_adv_monitor(hdev);
5613
5614 return hci_remove_single_adv_monitor(hdev, handle);
5615 }
5616
remove_adv_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5617 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5618 void *data, u16 len)
5619 {
5620 struct mgmt_pending_cmd *cmd;
5621 int err, status;
5622
5623 hci_dev_lock(hdev);
5624
5625 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5626 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5627 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5628 status = MGMT_STATUS_BUSY;
5629 goto unlock;
5630 }
5631
5632 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5633 if (!cmd) {
5634 status = MGMT_STATUS_NO_RESOURCES;
5635 goto unlock;
5636 }
5637
5638 err = hci_cmd_sync_submit(hdev, mgmt_remove_adv_monitor_sync, cmd,
5639 mgmt_remove_adv_monitor_complete);
5640
5641 if (err) {
5642 mgmt_pending_free(cmd);
5643
5644 if (err == -ENOMEM)
5645 status = MGMT_STATUS_NO_RESOURCES;
5646 else
5647 status = MGMT_STATUS_FAILED;
5648
5649 goto unlock;
5650 }
5651
5652 hci_dev_unlock(hdev);
5653
5654 return 0;
5655
5656 unlock:
5657 hci_dev_unlock(hdev);
5658 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5659 status);
5660 }
5661
read_local_oob_data_complete(struct hci_dev * hdev,void * data,int err)5662 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data,
5663 int err)
5664 {
5665 struct mgmt_rp_read_local_oob_data mgmt_rp;
5666 size_t rp_size = sizeof(mgmt_rp);
5667 struct mgmt_pending_cmd *cmd = data;
5668 struct sk_buff *skb = cmd->skb;
5669 u8 status = mgmt_status(err);
5670
5671 if (!status) {
5672 if (!skb)
5673 status = MGMT_STATUS_FAILED;
5674 else if (IS_ERR(skb))
5675 status = mgmt_status(PTR_ERR(skb));
5676 else
5677 status = mgmt_status(skb->data[0]);
5678 }
5679
5680 bt_dev_dbg(hdev, "status %d", status);
5681
5682 if (status) {
5683 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5684 status);
5685 goto remove;
5686 }
5687
5688 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5689
5690 if (!bredr_sc_enabled(hdev)) {
5691 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5692
5693 if (skb->len < sizeof(*rp)) {
5694 mgmt_cmd_status(cmd->sk, hdev->id,
5695 MGMT_OP_READ_LOCAL_OOB_DATA,
5696 MGMT_STATUS_FAILED);
5697 goto remove;
5698 }
5699
5700 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5701 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5702
5703 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5704 } else {
5705 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5706
5707 if (skb->len < sizeof(*rp)) {
5708 mgmt_cmd_status(cmd->sk, hdev->id,
5709 MGMT_OP_READ_LOCAL_OOB_DATA,
5710 MGMT_STATUS_FAILED);
5711 goto remove;
5712 }
5713
5714 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5715 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5716
5717 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5718 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5719 }
5720
5721 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5722 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5723
5724 remove:
5725 if (skb && !IS_ERR(skb))
5726 kfree_skb(skb);
5727
5728 mgmt_pending_free(cmd);
5729 }
5730
read_local_oob_data_sync(struct hci_dev * hdev,void * data)5731 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5732 {
5733 struct mgmt_pending_cmd *cmd = data;
5734
5735 if (bredr_sc_enabled(hdev))
5736 cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5737 else
5738 cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5739
5740 if (IS_ERR(cmd->skb))
5741 return PTR_ERR(cmd->skb);
5742 else
5743 return 0;
5744 }
5745
read_local_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)5746 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5747 void *data, u16 data_len)
5748 {
5749 struct mgmt_pending_cmd *cmd;
5750 int err;
5751
5752 bt_dev_dbg(hdev, "sock %p", sk);
5753
5754 hci_dev_lock(hdev);
5755
5756 if (!hdev_is_powered(hdev)) {
5757 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5758 MGMT_STATUS_NOT_POWERED);
5759 goto unlock;
5760 }
5761
5762 if (!lmp_ssp_capable(hdev)) {
5763 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5764 MGMT_STATUS_NOT_SUPPORTED);
5765 goto unlock;
5766 }
5767
5768 cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5769 if (!cmd)
5770 err = -ENOMEM;
5771 else
5772 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5773 read_local_oob_data_complete);
5774
5775 if (err < 0) {
5776 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5777 MGMT_STATUS_FAILED);
5778
5779 if (cmd)
5780 mgmt_pending_free(cmd);
5781 }
5782
5783 unlock:
5784 hci_dev_unlock(hdev);
5785 return err;
5786 }
5787
add_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5788 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5789 void *data, u16 len)
5790 {
5791 struct mgmt_addr_info *addr = data;
5792 int err;
5793
5794 bt_dev_dbg(hdev, "sock %p", sk);
5795
5796 if (!bdaddr_type_is_valid(addr->type))
5797 return mgmt_cmd_complete(sk, hdev->id,
5798 MGMT_OP_ADD_REMOTE_OOB_DATA,
5799 MGMT_STATUS_INVALID_PARAMS,
5800 addr, sizeof(*addr));
5801
5802 hci_dev_lock(hdev);
5803
5804 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5805 struct mgmt_cp_add_remote_oob_data *cp = data;
5806 u8 status;
5807
5808 if (cp->addr.type != BDADDR_BREDR) {
5809 err = mgmt_cmd_complete(sk, hdev->id,
5810 MGMT_OP_ADD_REMOTE_OOB_DATA,
5811 MGMT_STATUS_INVALID_PARAMS,
5812 &cp->addr, sizeof(cp->addr));
5813 goto unlock;
5814 }
5815
5816 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5817 cp->addr.type, cp->hash,
5818 cp->rand, NULL, NULL);
5819 if (err < 0)
5820 status = MGMT_STATUS_FAILED;
5821 else
5822 status = MGMT_STATUS_SUCCESS;
5823
5824 err = mgmt_cmd_complete(sk, hdev->id,
5825 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5826 &cp->addr, sizeof(cp->addr));
5827 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5828 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5829 u8 *rand192, *hash192, *rand256, *hash256;
5830 u8 status;
5831
5832 if (bdaddr_type_is_le(cp->addr.type)) {
5833 /* Enforce zero-valued 192-bit parameters as
5834 * long as legacy SMP OOB isn't implemented.
5835 */
5836 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5837 memcmp(cp->hash192, ZERO_KEY, 16)) {
5838 err = mgmt_cmd_complete(sk, hdev->id,
5839 MGMT_OP_ADD_REMOTE_OOB_DATA,
5840 MGMT_STATUS_INVALID_PARAMS,
5841 addr, sizeof(*addr));
5842 goto unlock;
5843 }
5844
5845 rand192 = NULL;
5846 hash192 = NULL;
5847 } else {
5848 /* In case one of the P-192 values is set to zero,
5849 * then just disable OOB data for P-192.
5850 */
5851 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5852 !memcmp(cp->hash192, ZERO_KEY, 16)) {
5853 rand192 = NULL;
5854 hash192 = NULL;
5855 } else {
5856 rand192 = cp->rand192;
5857 hash192 = cp->hash192;
5858 }
5859 }
5860
5861 /* In case one of the P-256 values is set to zero, then just
5862 * disable OOB data for P-256.
5863 */
5864 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5865 !memcmp(cp->hash256, ZERO_KEY, 16)) {
5866 rand256 = NULL;
5867 hash256 = NULL;
5868 } else {
5869 rand256 = cp->rand256;
5870 hash256 = cp->hash256;
5871 }
5872
5873 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5874 cp->addr.type, hash192, rand192,
5875 hash256, rand256);
5876 if (err < 0)
5877 status = MGMT_STATUS_FAILED;
5878 else
5879 status = MGMT_STATUS_SUCCESS;
5880
5881 err = mgmt_cmd_complete(sk, hdev->id,
5882 MGMT_OP_ADD_REMOTE_OOB_DATA,
5883 status, &cp->addr, sizeof(cp->addr));
5884 } else {
5885 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5886 len);
5887 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5888 MGMT_STATUS_INVALID_PARAMS);
5889 }
5890
5891 unlock:
5892 hci_dev_unlock(hdev);
5893 return err;
5894 }
5895
remove_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5896 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5897 void *data, u16 len)
5898 {
5899 struct mgmt_cp_remove_remote_oob_data *cp = data;
5900 u8 status;
5901 int err;
5902
5903 bt_dev_dbg(hdev, "sock %p", sk);
5904
5905 if (cp->addr.type != BDADDR_BREDR)
5906 return mgmt_cmd_complete(sk, hdev->id,
5907 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5908 MGMT_STATUS_INVALID_PARAMS,
5909 &cp->addr, sizeof(cp->addr));
5910
5911 hci_dev_lock(hdev);
5912
5913 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5914 hci_remote_oob_data_clear(hdev);
5915 status = MGMT_STATUS_SUCCESS;
5916 goto done;
5917 }
5918
5919 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5920 if (err < 0)
5921 status = MGMT_STATUS_INVALID_PARAMS;
5922 else
5923 status = MGMT_STATUS_SUCCESS;
5924
5925 done:
5926 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5927 status, &cp->addr, sizeof(cp->addr));
5928
5929 hci_dev_unlock(hdev);
5930 return err;
5931 }
5932
discovery_type_is_valid(struct hci_dev * hdev,uint8_t type,uint8_t * mgmt_status)5933 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5934 uint8_t *mgmt_status)
5935 {
5936 switch (type) {
5937 case DISCOV_TYPE_LE:
5938 *mgmt_status = mgmt_le_support(hdev);
5939 if (*mgmt_status)
5940 return false;
5941 break;
5942 case DISCOV_TYPE_INTERLEAVED:
5943 *mgmt_status = mgmt_le_support(hdev);
5944 if (*mgmt_status)
5945 return false;
5946 fallthrough;
5947 case DISCOV_TYPE_BREDR:
5948 *mgmt_status = mgmt_bredr_support(hdev);
5949 if (*mgmt_status)
5950 return false;
5951 break;
5952 default:
5953 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5954 return false;
5955 }
5956
5957 return true;
5958 }
5959
start_discovery_complete(struct hci_dev * hdev,void * data,int err)5960 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5961 {
5962 struct mgmt_pending_cmd *cmd = data;
5963
5964 bt_dev_dbg(hdev, "err %d", err);
5965
5966 if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
5967 return;
5968
5969 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err),
5970 cmd->param, 1);
5971 mgmt_pending_free(cmd);
5972
5973 hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5974 DISCOVERY_FINDING);
5975 }
5976
start_discovery_sync(struct hci_dev * hdev,void * data)5977 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5978 {
5979 if (!mgmt_pending_listed(hdev, data))
5980 return -ECANCELED;
5981
5982 return hci_start_discovery_sync(hdev);
5983 }
5984
start_discovery_internal(struct sock * sk,struct hci_dev * hdev,u16 op,void * data,u16 len)5985 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5986 u16 op, void *data, u16 len)
5987 {
5988 struct mgmt_cp_start_discovery *cp = data;
5989 struct mgmt_pending_cmd *cmd;
5990 u8 status;
5991 int err;
5992
5993 bt_dev_dbg(hdev, "sock %p", sk);
5994
5995 hci_dev_lock(hdev);
5996
5997 if (!hdev_is_powered(hdev)) {
5998 err = mgmt_cmd_complete(sk, hdev->id, op,
5999 MGMT_STATUS_NOT_POWERED,
6000 &cp->type, sizeof(cp->type));
6001 goto failed;
6002 }
6003
6004 if (hdev->discovery.state != DISCOVERY_STOPPED ||
6005 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
6006 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
6007 &cp->type, sizeof(cp->type));
6008 goto failed;
6009 }
6010
6011 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
6012 err = mgmt_cmd_complete(sk, hdev->id, op, status,
6013 &cp->type, sizeof(cp->type));
6014 goto failed;
6015 }
6016
6017 /* Can't start discovery when it is paused */
6018 if (hdev->discovery_paused) {
6019 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
6020 &cp->type, sizeof(cp->type));
6021 goto failed;
6022 }
6023
6024 /* Clear the discovery filter first to free any previously
6025 * allocated memory for the UUID list.
6026 */
6027 hci_discovery_filter_clear(hdev);
6028
6029 hdev->discovery.type = cp->type;
6030 hdev->discovery.report_invalid_rssi = false;
6031 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
6032 hdev->discovery.limited = true;
6033 else
6034 hdev->discovery.limited = false;
6035
6036 cmd = mgmt_pending_add(sk, op, hdev, data, len);
6037 if (!cmd) {
6038 err = -ENOMEM;
6039 goto failed;
6040 }
6041
6042 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6043 start_discovery_complete);
6044 if (err < 0) {
6045 mgmt_pending_remove(cmd);
6046 goto failed;
6047 }
6048
6049 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6050
6051 failed:
6052 hci_dev_unlock(hdev);
6053 return err;
6054 }
6055
start_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6056 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
6057 void *data, u16 len)
6058 {
6059 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
6060 data, len);
6061 }
6062
start_limited_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6063 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
6064 void *data, u16 len)
6065 {
6066 return start_discovery_internal(sk, hdev,
6067 MGMT_OP_START_LIMITED_DISCOVERY,
6068 data, len);
6069 }
6070
start_service_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6071 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
6072 void *data, u16 len)
6073 {
6074 struct mgmt_cp_start_service_discovery *cp = data;
6075 struct mgmt_pending_cmd *cmd;
6076 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
6077 u16 uuid_count, expected_len;
6078 u8 status;
6079 int err;
6080
6081 bt_dev_dbg(hdev, "sock %p", sk);
6082
6083 hci_dev_lock(hdev);
6084
6085 if (!hdev_is_powered(hdev)) {
6086 err = mgmt_cmd_complete(sk, hdev->id,
6087 MGMT_OP_START_SERVICE_DISCOVERY,
6088 MGMT_STATUS_NOT_POWERED,
6089 &cp->type, sizeof(cp->type));
6090 goto failed;
6091 }
6092
6093 if (hdev->discovery.state != DISCOVERY_STOPPED ||
6094 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
6095 err = mgmt_cmd_complete(sk, hdev->id,
6096 MGMT_OP_START_SERVICE_DISCOVERY,
6097 MGMT_STATUS_BUSY, &cp->type,
6098 sizeof(cp->type));
6099 goto failed;
6100 }
6101
6102 if (hdev->discovery_paused) {
6103 err = mgmt_cmd_complete(sk, hdev->id,
6104 MGMT_OP_START_SERVICE_DISCOVERY,
6105 MGMT_STATUS_BUSY, &cp->type,
6106 sizeof(cp->type));
6107 goto failed;
6108 }
6109
6110 uuid_count = __le16_to_cpu(cp->uuid_count);
6111 if (uuid_count > max_uuid_count) {
6112 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
6113 uuid_count);
6114 err = mgmt_cmd_complete(sk, hdev->id,
6115 MGMT_OP_START_SERVICE_DISCOVERY,
6116 MGMT_STATUS_INVALID_PARAMS, &cp->type,
6117 sizeof(cp->type));
6118 goto failed;
6119 }
6120
6121 expected_len = sizeof(*cp) + uuid_count * 16;
6122 if (expected_len != len) {
6123 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
6124 expected_len, len);
6125 err = mgmt_cmd_complete(sk, hdev->id,
6126 MGMT_OP_START_SERVICE_DISCOVERY,
6127 MGMT_STATUS_INVALID_PARAMS, &cp->type,
6128 sizeof(cp->type));
6129 goto failed;
6130 }
6131
6132 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
6133 err = mgmt_cmd_complete(sk, hdev->id,
6134 MGMT_OP_START_SERVICE_DISCOVERY,
6135 status, &cp->type, sizeof(cp->type));
6136 goto failed;
6137 }
6138
6139 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
6140 hdev, data, len);
6141 if (!cmd) {
6142 err = -ENOMEM;
6143 goto failed;
6144 }
6145
6146 /* Clear the discovery filter first to free any previously
6147 * allocated memory for the UUID list.
6148 */
6149 hci_discovery_filter_clear(hdev);
6150
6151 hdev->discovery.result_filtering = true;
6152 hdev->discovery.type = cp->type;
6153 hdev->discovery.rssi = cp->rssi;
6154 hdev->discovery.uuid_count = uuid_count;
6155
6156 if (uuid_count > 0) {
6157 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
6158 GFP_KERNEL);
6159 if (!hdev->discovery.uuids) {
6160 err = mgmt_cmd_complete(sk, hdev->id,
6161 MGMT_OP_START_SERVICE_DISCOVERY,
6162 MGMT_STATUS_FAILED,
6163 &cp->type, sizeof(cp->type));
6164 mgmt_pending_remove(cmd);
6165 goto failed;
6166 }
6167 }
6168
6169 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6170 start_discovery_complete);
6171 if (err < 0) {
6172 mgmt_pending_remove(cmd);
6173 goto failed;
6174 }
6175
6176 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6177
6178 failed:
6179 hci_dev_unlock(hdev);
6180 return err;
6181 }
6182
stop_discovery_complete(struct hci_dev * hdev,void * data,int err)6183 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6184 {
6185 struct mgmt_pending_cmd *cmd = data;
6186
6187 if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
6188 return;
6189
6190 bt_dev_dbg(hdev, "err %d", err);
6191
6192 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err),
6193 cmd->param, 1);
6194 mgmt_pending_free(cmd);
6195
6196 if (!err)
6197 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6198 }
6199
stop_discovery_sync(struct hci_dev * hdev,void * data)6200 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6201 {
6202 if (!mgmt_pending_listed(hdev, data))
6203 return -ECANCELED;
6204
6205 return hci_stop_discovery_sync(hdev);
6206 }
6207
stop_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6208 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6209 u16 len)
6210 {
6211 struct mgmt_cp_stop_discovery *mgmt_cp = data;
6212 struct mgmt_pending_cmd *cmd;
6213 int err;
6214
6215 bt_dev_dbg(hdev, "sock %p", sk);
6216
6217 hci_dev_lock(hdev);
6218
6219 if (!hci_discovery_active(hdev)) {
6220 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6221 MGMT_STATUS_REJECTED, &mgmt_cp->type,
6222 sizeof(mgmt_cp->type));
6223 goto unlock;
6224 }
6225
6226 if (hdev->discovery.type != mgmt_cp->type) {
6227 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6228 MGMT_STATUS_INVALID_PARAMS,
6229 &mgmt_cp->type, sizeof(mgmt_cp->type));
6230 goto unlock;
6231 }
6232
6233 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6234 if (!cmd) {
6235 err = -ENOMEM;
6236 goto unlock;
6237 }
6238
6239 err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6240 stop_discovery_complete);
6241 if (err < 0) {
6242 mgmt_pending_remove(cmd);
6243 goto unlock;
6244 }
6245
6246 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6247
6248 unlock:
6249 hci_dev_unlock(hdev);
6250 return err;
6251 }
6252
confirm_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6253 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6254 u16 len)
6255 {
6256 struct mgmt_cp_confirm_name *cp = data;
6257 struct inquiry_entry *e;
6258 int err;
6259
6260 bt_dev_dbg(hdev, "sock %p", sk);
6261
6262 hci_dev_lock(hdev);
6263
6264 if (!hci_discovery_active(hdev)) {
6265 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6266 MGMT_STATUS_FAILED, &cp->addr,
6267 sizeof(cp->addr));
6268 goto failed;
6269 }
6270
6271 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6272 if (!e) {
6273 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6274 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6275 sizeof(cp->addr));
6276 goto failed;
6277 }
6278
6279 if (cp->name_known) {
6280 e->name_state = NAME_KNOWN;
6281 list_del(&e->list);
6282 } else {
6283 e->name_state = NAME_NEEDED;
6284 hci_inquiry_cache_update_resolve(hdev, e);
6285 }
6286
6287 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6288 &cp->addr, sizeof(cp->addr));
6289
6290 failed:
6291 hci_dev_unlock(hdev);
6292 return err;
6293 }
6294
block_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6295 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6296 u16 len)
6297 {
6298 struct mgmt_cp_block_device *cp = data;
6299 u8 status;
6300 int err;
6301
6302 bt_dev_dbg(hdev, "sock %p", sk);
6303
6304 if (!bdaddr_type_is_valid(cp->addr.type))
6305 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6306 MGMT_STATUS_INVALID_PARAMS,
6307 &cp->addr, sizeof(cp->addr));
6308
6309 hci_dev_lock(hdev);
6310
6311 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6312 cp->addr.type);
6313 if (err < 0) {
6314 status = MGMT_STATUS_FAILED;
6315 goto done;
6316 }
6317
6318 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6319 sk);
6320 status = MGMT_STATUS_SUCCESS;
6321
6322 done:
6323 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6324 &cp->addr, sizeof(cp->addr));
6325
6326 hci_dev_unlock(hdev);
6327
6328 return err;
6329 }
6330
unblock_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6331 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6332 u16 len)
6333 {
6334 struct mgmt_cp_unblock_device *cp = data;
6335 u8 status;
6336 int err;
6337
6338 bt_dev_dbg(hdev, "sock %p", sk);
6339
6340 if (!bdaddr_type_is_valid(cp->addr.type))
6341 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6342 MGMT_STATUS_INVALID_PARAMS,
6343 &cp->addr, sizeof(cp->addr));
6344
6345 hci_dev_lock(hdev);
6346
6347 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6348 cp->addr.type);
6349 if (err < 0) {
6350 status = MGMT_STATUS_INVALID_PARAMS;
6351 goto done;
6352 }
6353
6354 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6355 sk);
6356 status = MGMT_STATUS_SUCCESS;
6357
6358 done:
6359 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6360 &cp->addr, sizeof(cp->addr));
6361
6362 hci_dev_unlock(hdev);
6363
6364 return err;
6365 }
6366
set_device_id_sync(struct hci_dev * hdev,void * data)6367 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6368 {
6369 return hci_update_eir_sync(hdev);
6370 }
6371
set_device_id(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6372 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6373 u16 len)
6374 {
6375 struct mgmt_cp_set_device_id *cp = data;
6376 int err;
6377 __u16 source;
6378
6379 bt_dev_dbg(hdev, "sock %p", sk);
6380
6381 source = __le16_to_cpu(cp->source);
6382
6383 if (source > 0x0002)
6384 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6385 MGMT_STATUS_INVALID_PARAMS);
6386
6387 hci_dev_lock(hdev);
6388
6389 hdev->devid_source = source;
6390 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6391 hdev->devid_product = __le16_to_cpu(cp->product);
6392 hdev->devid_version = __le16_to_cpu(cp->version);
6393
6394 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6395 NULL, 0);
6396
6397 hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6398
6399 hci_dev_unlock(hdev);
6400
6401 return err;
6402 }
6403
enable_advertising_instance(struct hci_dev * hdev,int err)6404 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6405 {
6406 if (err)
6407 bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6408 else
6409 bt_dev_dbg(hdev, "status %d", err);
6410 }
6411
set_advertising_complete(struct hci_dev * hdev,void * data,int err)6412 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6413 {
6414 struct mgmt_pending_cmd *cmd = data;
6415 struct cmd_lookup match = { NULL, hdev };
6416 u8 instance;
6417 struct adv_info *adv_instance;
6418 u8 status = mgmt_status(err);
6419
6420 if (err == -ECANCELED || !mgmt_pending_valid(hdev, data))
6421 return;
6422
6423 if (status) {
6424 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, status);
6425 mgmt_pending_free(cmd);
6426 return;
6427 }
6428
6429 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6430 hci_dev_set_flag(hdev, HCI_ADVERTISING);
6431 else
6432 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6433
6434 settings_rsp(cmd, &match);
6435
6436 new_settings(hdev, match.sk);
6437
6438 if (match.sk)
6439 sock_put(match.sk);
6440
6441 /* If "Set Advertising" was just disabled and instance advertising was
6442 * set up earlier, then re-enable multi-instance advertising.
6443 */
6444 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6445 list_empty(&hdev->adv_instances))
6446 return;
6447
6448 instance = hdev->cur_adv_instance;
6449 if (!instance) {
6450 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6451 struct adv_info, list);
6452 if (!adv_instance)
6453 return;
6454
6455 instance = adv_instance->instance;
6456 }
6457
6458 err = hci_schedule_adv_instance_sync(hdev, instance, true);
6459
6460 enable_advertising_instance(hdev, err);
6461 }
6462
set_adv_sync(struct hci_dev * hdev,void * data)6463 static int set_adv_sync(struct hci_dev *hdev, void *data)
6464 {
6465 struct mgmt_pending_cmd *cmd = data;
6466 struct mgmt_mode cp;
6467 u8 val;
6468
6469 mutex_lock(&hdev->mgmt_pending_lock);
6470
6471 if (!__mgmt_pending_listed(hdev, cmd)) {
6472 mutex_unlock(&hdev->mgmt_pending_lock);
6473 return -ECANCELED;
6474 }
6475
6476 memcpy(&cp, cmd->param, sizeof(cp));
6477
6478 mutex_unlock(&hdev->mgmt_pending_lock);
6479
6480 val = !!cp.val;
6481
6482 if (cp.val == 0x02)
6483 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6484 else
6485 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6486
6487 cancel_adv_timeout(hdev);
6488
6489 if (val) {
6490 /* Switch to instance "0" for the Set Advertising setting.
6491 * We cannot use update_[adv|scan_rsp]_data() here as the
6492 * HCI_ADVERTISING flag is not yet set.
6493 */
6494 hdev->cur_adv_instance = 0x00;
6495
6496 if (ext_adv_capable(hdev)) {
6497 hci_start_ext_adv_sync(hdev, 0x00);
6498 } else {
6499 hci_update_adv_data_sync(hdev, 0x00);
6500 hci_update_scan_rsp_data_sync(hdev, 0x00);
6501 hci_enable_advertising_sync(hdev);
6502 }
6503 } else {
6504 hci_disable_advertising_sync(hdev);
6505 }
6506
6507 return 0;
6508 }
6509
set_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6510 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6511 u16 len)
6512 {
6513 struct mgmt_mode *cp = data;
6514 struct mgmt_pending_cmd *cmd;
6515 u8 val, status;
6516 int err;
6517
6518 bt_dev_dbg(hdev, "sock %p", sk);
6519
6520 status = mgmt_le_support(hdev);
6521 if (status)
6522 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6523 status);
6524
6525 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6526 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6527 MGMT_STATUS_INVALID_PARAMS);
6528
6529 if (hdev->advertising_paused)
6530 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6531 MGMT_STATUS_BUSY);
6532
6533 hci_dev_lock(hdev);
6534
6535 val = !!cp->val;
6536
6537 /* The following conditions are ones which mean that we should
6538 * not do any HCI communication but directly send a mgmt
6539 * response to user space (after toggling the flag if
6540 * necessary).
6541 */
6542 if (!hdev_is_powered(hdev) ||
6543 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6544 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6545 hci_dev_test_flag(hdev, HCI_MESH) ||
6546 hci_conn_num(hdev, LE_LINK) > 0 ||
6547 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6548 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6549 bool changed;
6550
6551 if (cp->val) {
6552 hdev->cur_adv_instance = 0x00;
6553 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6554 if (cp->val == 0x02)
6555 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6556 else
6557 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6558 } else {
6559 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6560 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6561 }
6562
6563 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6564 if (err < 0)
6565 goto unlock;
6566
6567 if (changed)
6568 err = new_settings(hdev, sk);
6569
6570 goto unlock;
6571 }
6572
6573 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6574 pending_find(MGMT_OP_SET_LE, hdev)) {
6575 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6576 MGMT_STATUS_BUSY);
6577 goto unlock;
6578 }
6579
6580 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6581 if (!cmd)
6582 err = -ENOMEM;
6583 else
6584 err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6585 set_advertising_complete);
6586
6587 if (err < 0 && cmd)
6588 mgmt_pending_remove(cmd);
6589
6590 unlock:
6591 hci_dev_unlock(hdev);
6592 return err;
6593 }
6594
set_static_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6595 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6596 void *data, u16 len)
6597 {
6598 struct mgmt_cp_set_static_address *cp = data;
6599 int err;
6600
6601 bt_dev_dbg(hdev, "sock %p", sk);
6602
6603 if (!lmp_le_capable(hdev))
6604 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6605 MGMT_STATUS_NOT_SUPPORTED);
6606
6607 if (hdev_is_powered(hdev))
6608 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6609 MGMT_STATUS_REJECTED);
6610
6611 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6612 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6613 return mgmt_cmd_status(sk, hdev->id,
6614 MGMT_OP_SET_STATIC_ADDRESS,
6615 MGMT_STATUS_INVALID_PARAMS);
6616
6617 /* Two most significant bits shall be set */
6618 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6619 return mgmt_cmd_status(sk, hdev->id,
6620 MGMT_OP_SET_STATIC_ADDRESS,
6621 MGMT_STATUS_INVALID_PARAMS);
6622 }
6623
6624 hci_dev_lock(hdev);
6625
6626 bacpy(&hdev->static_addr, &cp->bdaddr);
6627
6628 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6629 if (err < 0)
6630 goto unlock;
6631
6632 err = new_settings(hdev, sk);
6633
6634 unlock:
6635 hci_dev_unlock(hdev);
6636 return err;
6637 }
6638
set_scan_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6639 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6640 void *data, u16 len)
6641 {
6642 struct mgmt_cp_set_scan_params *cp = data;
6643 __u16 interval, window;
6644 int err;
6645
6646 bt_dev_dbg(hdev, "sock %p", sk);
6647
6648 if (!lmp_le_capable(hdev))
6649 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6650 MGMT_STATUS_NOT_SUPPORTED);
6651
6652 /* Keep allowed ranges in sync with set_mesh() */
6653 interval = __le16_to_cpu(cp->interval);
6654
6655 if (interval < 0x0004 || interval > 0x4000)
6656 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6657 MGMT_STATUS_INVALID_PARAMS);
6658
6659 window = __le16_to_cpu(cp->window);
6660
6661 if (window < 0x0004 || window > 0x4000)
6662 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6663 MGMT_STATUS_INVALID_PARAMS);
6664
6665 if (window > interval)
6666 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6667 MGMT_STATUS_INVALID_PARAMS);
6668
6669 hci_dev_lock(hdev);
6670
6671 hdev->le_scan_interval = interval;
6672 hdev->le_scan_window = window;
6673
6674 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6675 NULL, 0);
6676
6677 /* If background scan is running, restart it so new parameters are
6678 * loaded.
6679 */
6680 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6681 hdev->discovery.state == DISCOVERY_STOPPED)
6682 hci_update_passive_scan(hdev);
6683
6684 hci_dev_unlock(hdev);
6685
6686 return err;
6687 }
6688
fast_connectable_complete(struct hci_dev * hdev,void * data,int err)6689 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6690 {
6691 struct mgmt_pending_cmd *cmd = data;
6692
6693 bt_dev_dbg(hdev, "err %d", err);
6694
6695 if (err) {
6696 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6697 mgmt_status(err));
6698 } else {
6699 struct mgmt_mode *cp = cmd->param;
6700
6701 if (cp->val)
6702 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6703 else
6704 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6705
6706 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6707 new_settings(hdev, cmd->sk);
6708 }
6709
6710 mgmt_pending_free(cmd);
6711 }
6712
write_fast_connectable_sync(struct hci_dev * hdev,void * data)6713 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6714 {
6715 struct mgmt_pending_cmd *cmd = data;
6716 struct mgmt_mode *cp = cmd->param;
6717
6718 return hci_write_fast_connectable_sync(hdev, cp->val);
6719 }
6720
set_fast_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6721 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6722 void *data, u16 len)
6723 {
6724 struct mgmt_mode *cp = data;
6725 struct mgmt_pending_cmd *cmd;
6726 int err;
6727
6728 bt_dev_dbg(hdev, "sock %p", sk);
6729
6730 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6731 hdev->hci_ver < BLUETOOTH_VER_1_2)
6732 return mgmt_cmd_status(sk, hdev->id,
6733 MGMT_OP_SET_FAST_CONNECTABLE,
6734 MGMT_STATUS_NOT_SUPPORTED);
6735
6736 if (cp->val != 0x00 && cp->val != 0x01)
6737 return mgmt_cmd_status(sk, hdev->id,
6738 MGMT_OP_SET_FAST_CONNECTABLE,
6739 MGMT_STATUS_INVALID_PARAMS);
6740
6741 hci_dev_lock(hdev);
6742
6743 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6744 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6745 goto unlock;
6746 }
6747
6748 if (!hdev_is_powered(hdev)) {
6749 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6750 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6751 new_settings(hdev, sk);
6752 goto unlock;
6753 }
6754
6755 cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6756 len);
6757 if (!cmd)
6758 err = -ENOMEM;
6759 else
6760 err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6761 fast_connectable_complete);
6762
6763 if (err < 0) {
6764 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6765 MGMT_STATUS_FAILED);
6766
6767 if (cmd)
6768 mgmt_pending_free(cmd);
6769 }
6770
6771 unlock:
6772 hci_dev_unlock(hdev);
6773
6774 return err;
6775 }
6776
set_bredr_complete(struct hci_dev * hdev,void * data,int err)6777 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6778 {
6779 struct mgmt_pending_cmd *cmd = data;
6780
6781 bt_dev_dbg(hdev, "err %d", err);
6782
6783 if (err) {
6784 u8 mgmt_err = mgmt_status(err);
6785
6786 /* We need to restore the flag if related HCI commands
6787 * failed.
6788 */
6789 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6790
6791 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
6792 } else {
6793 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6794 new_settings(hdev, cmd->sk);
6795 }
6796
6797 mgmt_pending_free(cmd);
6798 }
6799
set_bredr_sync(struct hci_dev * hdev,void * data)6800 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6801 {
6802 int status;
6803
6804 status = hci_write_fast_connectable_sync(hdev, false);
6805
6806 if (!status)
6807 status = hci_update_scan_sync(hdev);
6808
6809 /* Since only the advertising data flags will change, there
6810 * is no need to update the scan response data.
6811 */
6812 if (!status)
6813 status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6814
6815 return status;
6816 }
6817
set_bredr(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6818 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6819 {
6820 struct mgmt_mode *cp = data;
6821 struct mgmt_pending_cmd *cmd;
6822 int err;
6823
6824 bt_dev_dbg(hdev, "sock %p", sk);
6825
6826 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6827 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6828 MGMT_STATUS_NOT_SUPPORTED);
6829
6830 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6831 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6832 MGMT_STATUS_REJECTED);
6833
6834 if (cp->val != 0x00 && cp->val != 0x01)
6835 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6836 MGMT_STATUS_INVALID_PARAMS);
6837
6838 hci_dev_lock(hdev);
6839
6840 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6841 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6842 goto unlock;
6843 }
6844
6845 if (!hdev_is_powered(hdev)) {
6846 if (!cp->val) {
6847 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6848 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6849 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6850 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6851 }
6852
6853 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6854
6855 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6856 if (err < 0)
6857 goto unlock;
6858
6859 err = new_settings(hdev, sk);
6860 goto unlock;
6861 }
6862
6863 /* Reject disabling when powered on */
6864 if (!cp->val) {
6865 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6866 MGMT_STATUS_REJECTED);
6867 goto unlock;
6868 } else {
6869 /* When configuring a dual-mode controller to operate
6870 * with LE only and using a static address, then switching
6871 * BR/EDR back on is not allowed.
6872 *
6873 * Dual-mode controllers shall operate with the public
6874 * address as its identity address for BR/EDR and LE. So
6875 * reject the attempt to create an invalid configuration.
6876 *
6877 * The same restrictions applies when secure connections
6878 * has been enabled. For BR/EDR this is a controller feature
6879 * while for LE it is a host stack feature. This means that
6880 * switching BR/EDR back on when secure connections has been
6881 * enabled is not a supported transaction.
6882 */
6883 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6884 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6885 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6886 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6887 MGMT_STATUS_REJECTED);
6888 goto unlock;
6889 }
6890 }
6891
6892 cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6893 if (!cmd)
6894 err = -ENOMEM;
6895 else
6896 err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6897 set_bredr_complete);
6898
6899 if (err < 0) {
6900 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6901 MGMT_STATUS_FAILED);
6902 if (cmd)
6903 mgmt_pending_free(cmd);
6904
6905 goto unlock;
6906 }
6907
6908 /* We need to flip the bit already here so that
6909 * hci_req_update_adv_data generates the correct flags.
6910 */
6911 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6912
6913 unlock:
6914 hci_dev_unlock(hdev);
6915 return err;
6916 }
6917
set_secure_conn_complete(struct hci_dev * hdev,void * data,int err)6918 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6919 {
6920 struct mgmt_pending_cmd *cmd = data;
6921 struct mgmt_mode *cp;
6922
6923 bt_dev_dbg(hdev, "err %d", err);
6924
6925 if (err) {
6926 u8 mgmt_err = mgmt_status(err);
6927
6928 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
6929 goto done;
6930 }
6931
6932 cp = cmd->param;
6933
6934 switch (cp->val) {
6935 case 0x00:
6936 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6937 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6938 break;
6939 case 0x01:
6940 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6941 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6942 break;
6943 case 0x02:
6944 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6945 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6946 break;
6947 }
6948
6949 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6950 new_settings(hdev, cmd->sk);
6951
6952 done:
6953 mgmt_pending_free(cmd);
6954 }
6955
set_secure_conn_sync(struct hci_dev * hdev,void * data)6956 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6957 {
6958 struct mgmt_pending_cmd *cmd = data;
6959 struct mgmt_mode *cp = cmd->param;
6960 u8 val = !!cp->val;
6961
6962 /* Force write of val */
6963 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6964
6965 return hci_write_sc_support_sync(hdev, val);
6966 }
6967
set_secure_conn(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6968 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6969 void *data, u16 len)
6970 {
6971 struct mgmt_mode *cp = data;
6972 struct mgmt_pending_cmd *cmd;
6973 u8 val;
6974 int err;
6975
6976 bt_dev_dbg(hdev, "sock %p", sk);
6977
6978 if (!lmp_sc_capable(hdev) &&
6979 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6980 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6981 MGMT_STATUS_NOT_SUPPORTED);
6982
6983 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6984 lmp_sc_capable(hdev) &&
6985 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6986 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6987 MGMT_STATUS_REJECTED);
6988
6989 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6990 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6991 MGMT_STATUS_INVALID_PARAMS);
6992
6993 hci_dev_lock(hdev);
6994
6995 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6996 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6997 bool changed;
6998
6999 if (cp->val) {
7000 changed = !hci_dev_test_and_set_flag(hdev,
7001 HCI_SC_ENABLED);
7002 if (cp->val == 0x02)
7003 hci_dev_set_flag(hdev, HCI_SC_ONLY);
7004 else
7005 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
7006 } else {
7007 changed = hci_dev_test_and_clear_flag(hdev,
7008 HCI_SC_ENABLED);
7009 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
7010 }
7011
7012 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
7013 if (err < 0)
7014 goto failed;
7015
7016 if (changed)
7017 err = new_settings(hdev, sk);
7018
7019 goto failed;
7020 }
7021
7022 val = !!cp->val;
7023
7024 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
7025 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
7026 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
7027 goto failed;
7028 }
7029
7030 cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
7031 if (!cmd)
7032 err = -ENOMEM;
7033 else
7034 err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
7035 set_secure_conn_complete);
7036
7037 if (err < 0) {
7038 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
7039 MGMT_STATUS_FAILED);
7040 if (cmd)
7041 mgmt_pending_free(cmd);
7042 }
7043
7044 failed:
7045 hci_dev_unlock(hdev);
7046 return err;
7047 }
7048
set_debug_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7049 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
7050 void *data, u16 len)
7051 {
7052 struct mgmt_mode *cp = data;
7053 bool changed, use_changed;
7054 int err;
7055
7056 bt_dev_dbg(hdev, "sock %p", sk);
7057
7058 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
7059 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
7060 MGMT_STATUS_INVALID_PARAMS);
7061
7062 hci_dev_lock(hdev);
7063
7064 if (cp->val)
7065 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
7066 else
7067 changed = hci_dev_test_and_clear_flag(hdev,
7068 HCI_KEEP_DEBUG_KEYS);
7069
7070 if (cp->val == 0x02)
7071 use_changed = !hci_dev_test_and_set_flag(hdev,
7072 HCI_USE_DEBUG_KEYS);
7073 else
7074 use_changed = hci_dev_test_and_clear_flag(hdev,
7075 HCI_USE_DEBUG_KEYS);
7076
7077 if (hdev_is_powered(hdev) && use_changed &&
7078 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7079 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
7080 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
7081 sizeof(mode), &mode);
7082 }
7083
7084 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
7085 if (err < 0)
7086 goto unlock;
7087
7088 if (changed)
7089 err = new_settings(hdev, sk);
7090
7091 unlock:
7092 hci_dev_unlock(hdev);
7093 return err;
7094 }
7095
set_privacy(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)7096 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7097 u16 len)
7098 {
7099 struct mgmt_cp_set_privacy *cp = cp_data;
7100 bool changed;
7101 int err;
7102
7103 bt_dev_dbg(hdev, "sock %p", sk);
7104
7105 if (!lmp_le_capable(hdev))
7106 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7107 MGMT_STATUS_NOT_SUPPORTED);
7108
7109 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
7110 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7111 MGMT_STATUS_INVALID_PARAMS);
7112
7113 if (hdev_is_powered(hdev))
7114 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7115 MGMT_STATUS_REJECTED);
7116
7117 hci_dev_lock(hdev);
7118
7119 /* If user space supports this command it is also expected to
7120 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
7121 */
7122 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7123
7124 if (cp->privacy) {
7125 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
7126 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
7127 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
7128 hci_adv_instances_set_rpa_expired(hdev, true);
7129 if (cp->privacy == 0x02)
7130 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
7131 else
7132 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7133 } else {
7134 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
7135 memset(hdev->irk, 0, sizeof(hdev->irk));
7136 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
7137 hci_adv_instances_set_rpa_expired(hdev, false);
7138 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7139 }
7140
7141 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
7142 if (err < 0)
7143 goto unlock;
7144
7145 if (changed)
7146 err = new_settings(hdev, sk);
7147
7148 unlock:
7149 hci_dev_unlock(hdev);
7150 return err;
7151 }
7152
irk_is_valid(struct mgmt_irk_info * irk)7153 static bool irk_is_valid(struct mgmt_irk_info *irk)
7154 {
7155 switch (irk->addr.type) {
7156 case BDADDR_LE_PUBLIC:
7157 return true;
7158
7159 case BDADDR_LE_RANDOM:
7160 /* Two most significant bits shall be set */
7161 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7162 return false;
7163 return true;
7164 }
7165
7166 return false;
7167 }
7168
load_irks(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)7169 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7170 u16 len)
7171 {
7172 struct mgmt_cp_load_irks *cp = cp_data;
7173 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
7174 sizeof(struct mgmt_irk_info));
7175 u16 irk_count, expected_len;
7176 int i, err;
7177
7178 bt_dev_dbg(hdev, "sock %p", sk);
7179
7180 if (!lmp_le_capable(hdev))
7181 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7182 MGMT_STATUS_NOT_SUPPORTED);
7183
7184 irk_count = __le16_to_cpu(cp->irk_count);
7185 if (irk_count > max_irk_count) {
7186 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
7187 irk_count);
7188 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7189 MGMT_STATUS_INVALID_PARAMS);
7190 }
7191
7192 expected_len = struct_size(cp, irks, irk_count);
7193 if (expected_len != len) {
7194 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7195 expected_len, len);
7196 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7197 MGMT_STATUS_INVALID_PARAMS);
7198 }
7199
7200 bt_dev_dbg(hdev, "irk_count %u", irk_count);
7201
7202 for (i = 0; i < irk_count; i++) {
7203 struct mgmt_irk_info *key = &cp->irks[i];
7204
7205 if (!irk_is_valid(key))
7206 return mgmt_cmd_status(sk, hdev->id,
7207 MGMT_OP_LOAD_IRKS,
7208 MGMT_STATUS_INVALID_PARAMS);
7209 }
7210
7211 hci_dev_lock(hdev);
7212
7213 hci_smp_irks_clear(hdev);
7214
7215 for (i = 0; i < irk_count; i++) {
7216 struct mgmt_irk_info *irk = &cp->irks[i];
7217
7218 if (hci_is_blocked_key(hdev,
7219 HCI_BLOCKED_KEY_TYPE_IRK,
7220 irk->val)) {
7221 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7222 &irk->addr.bdaddr);
7223 continue;
7224 }
7225
7226 hci_add_irk(hdev, &irk->addr.bdaddr,
7227 le_addr_type(irk->addr.type), irk->val,
7228 BDADDR_ANY);
7229 }
7230
7231 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7232
7233 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7234
7235 hci_dev_unlock(hdev);
7236
7237 return err;
7238 }
7239
ltk_is_valid(struct mgmt_ltk_info * key)7240 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7241 {
7242 if (key->initiator != 0x00 && key->initiator != 0x01)
7243 return false;
7244
7245 switch (key->addr.type) {
7246 case BDADDR_LE_PUBLIC:
7247 return true;
7248
7249 case BDADDR_LE_RANDOM:
7250 /* Two most significant bits shall be set */
7251 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7252 return false;
7253 return true;
7254 }
7255
7256 return false;
7257 }
7258
load_long_term_keys(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)7259 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7260 void *cp_data, u16 len)
7261 {
7262 struct mgmt_cp_load_long_term_keys *cp = cp_data;
7263 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7264 sizeof(struct mgmt_ltk_info));
7265 u16 key_count, expected_len;
7266 int i, err;
7267
7268 bt_dev_dbg(hdev, "sock %p", sk);
7269
7270 if (!lmp_le_capable(hdev))
7271 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7272 MGMT_STATUS_NOT_SUPPORTED);
7273
7274 key_count = __le16_to_cpu(cp->key_count);
7275 if (key_count > max_key_count) {
7276 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7277 key_count);
7278 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7279 MGMT_STATUS_INVALID_PARAMS);
7280 }
7281
7282 expected_len = struct_size(cp, keys, key_count);
7283 if (expected_len != len) {
7284 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7285 expected_len, len);
7286 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7287 MGMT_STATUS_INVALID_PARAMS);
7288 }
7289
7290 bt_dev_dbg(hdev, "key_count %u", key_count);
7291
7292 hci_dev_lock(hdev);
7293
7294 hci_smp_ltks_clear(hdev);
7295
7296 for (i = 0; i < key_count; i++) {
7297 struct mgmt_ltk_info *key = &cp->keys[i];
7298 u8 type, authenticated;
7299
7300 if (hci_is_blocked_key(hdev,
7301 HCI_BLOCKED_KEY_TYPE_LTK,
7302 key->val)) {
7303 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7304 &key->addr.bdaddr);
7305 continue;
7306 }
7307
7308 if (!ltk_is_valid(key)) {
7309 bt_dev_warn(hdev, "Invalid LTK for %pMR",
7310 &key->addr.bdaddr);
7311 continue;
7312 }
7313
7314 switch (key->type) {
7315 case MGMT_LTK_UNAUTHENTICATED:
7316 authenticated = 0x00;
7317 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7318 break;
7319 case MGMT_LTK_AUTHENTICATED:
7320 authenticated = 0x01;
7321 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7322 break;
7323 case MGMT_LTK_P256_UNAUTH:
7324 authenticated = 0x00;
7325 type = SMP_LTK_P256;
7326 break;
7327 case MGMT_LTK_P256_AUTH:
7328 authenticated = 0x01;
7329 type = SMP_LTK_P256;
7330 break;
7331 case MGMT_LTK_P256_DEBUG:
7332 authenticated = 0x00;
7333 type = SMP_LTK_P256_DEBUG;
7334 fallthrough;
7335 default:
7336 continue;
7337 }
7338
7339 hci_add_ltk(hdev, &key->addr.bdaddr,
7340 le_addr_type(key->addr.type), type, authenticated,
7341 key->val, key->enc_size, key->ediv, key->rand);
7342 }
7343
7344 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7345 NULL, 0);
7346
7347 hci_dev_unlock(hdev);
7348
7349 return err;
7350 }
7351
get_conn_info_complete(struct hci_dev * hdev,void * data,int err)7352 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
7353 {
7354 struct mgmt_pending_cmd *cmd = data;
7355 struct hci_conn *conn = cmd->user_data;
7356 struct mgmt_cp_get_conn_info *cp = cmd->param;
7357 struct mgmt_rp_get_conn_info rp;
7358 u8 status;
7359
7360 bt_dev_dbg(hdev, "err %d", err);
7361
7362 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
7363
7364 status = mgmt_status(err);
7365 if (status == MGMT_STATUS_SUCCESS) {
7366 rp.rssi = conn->rssi;
7367 rp.tx_power = conn->tx_power;
7368 rp.max_tx_power = conn->max_tx_power;
7369 } else {
7370 rp.rssi = HCI_RSSI_INVALID;
7371 rp.tx_power = HCI_TX_POWER_INVALID;
7372 rp.max_tx_power = HCI_TX_POWER_INVALID;
7373 }
7374
7375 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, MGMT_OP_GET_CONN_INFO, status,
7376 &rp, sizeof(rp));
7377
7378 mgmt_pending_free(cmd);
7379 }
7380
get_conn_info_sync(struct hci_dev * hdev,void * data)7381 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
7382 {
7383 struct mgmt_pending_cmd *cmd = data;
7384 struct mgmt_cp_get_conn_info *cp = cmd->param;
7385 struct hci_conn *conn;
7386 int err;
7387 __le16 handle;
7388
7389 /* Make sure we are still connected */
7390 if (cp->addr.type == BDADDR_BREDR)
7391 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7392 &cp->addr.bdaddr);
7393 else
7394 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7395
7396 if (!conn || conn->state != BT_CONNECTED)
7397 return MGMT_STATUS_NOT_CONNECTED;
7398
7399 cmd->user_data = conn;
7400 handle = cpu_to_le16(conn->handle);
7401
7402 /* Refresh RSSI each time */
7403 err = hci_read_rssi_sync(hdev, handle);
7404
7405 /* For LE links TX power does not change thus we don't need to
7406 * query for it once value is known.
7407 */
7408 if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
7409 conn->tx_power == HCI_TX_POWER_INVALID))
7410 err = hci_read_tx_power_sync(hdev, handle, 0x00);
7411
7412 /* Max TX power needs to be read only once per connection */
7413 if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
7414 err = hci_read_tx_power_sync(hdev, handle, 0x01);
7415
7416 return err;
7417 }
7418
get_conn_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7419 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7420 u16 len)
7421 {
7422 struct mgmt_cp_get_conn_info *cp = data;
7423 struct mgmt_rp_get_conn_info rp;
7424 struct hci_conn *conn;
7425 unsigned long conn_info_age;
7426 int err = 0;
7427
7428 bt_dev_dbg(hdev, "sock %p", sk);
7429
7430 memset(&rp, 0, sizeof(rp));
7431 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7432 rp.addr.type = cp->addr.type;
7433
7434 if (!bdaddr_type_is_valid(cp->addr.type))
7435 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7436 MGMT_STATUS_INVALID_PARAMS,
7437 &rp, sizeof(rp));
7438
7439 hci_dev_lock(hdev);
7440
7441 if (!hdev_is_powered(hdev)) {
7442 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7443 MGMT_STATUS_NOT_POWERED, &rp,
7444 sizeof(rp));
7445 goto unlock;
7446 }
7447
7448 if (cp->addr.type == BDADDR_BREDR)
7449 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7450 &cp->addr.bdaddr);
7451 else
7452 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7453
7454 if (!conn || conn->state != BT_CONNECTED) {
7455 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7456 MGMT_STATUS_NOT_CONNECTED, &rp,
7457 sizeof(rp));
7458 goto unlock;
7459 }
7460
7461 /* To avoid client trying to guess when to poll again for information we
7462 * calculate conn info age as random value between min/max set in hdev.
7463 */
7464 conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age,
7465 hdev->conn_info_max_age - 1);
7466
7467 /* Query controller to refresh cached values if they are too old or were
7468 * never read.
7469 */
7470 if (time_after(jiffies, conn->conn_info_timestamp +
7471 msecs_to_jiffies(conn_info_age)) ||
7472 !conn->conn_info_timestamp) {
7473 struct mgmt_pending_cmd *cmd;
7474
7475 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
7476 len);
7477 if (!cmd) {
7478 err = -ENOMEM;
7479 } else {
7480 err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
7481 cmd, get_conn_info_complete);
7482 }
7483
7484 if (err < 0) {
7485 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7486 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7487
7488 if (cmd)
7489 mgmt_pending_free(cmd);
7490
7491 goto unlock;
7492 }
7493
7494 conn->conn_info_timestamp = jiffies;
7495 } else {
7496 /* Cache is valid, just reply with values cached in hci_conn */
7497 rp.rssi = conn->rssi;
7498 rp.tx_power = conn->tx_power;
7499 rp.max_tx_power = conn->max_tx_power;
7500
7501 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7502 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7503 }
7504
7505 unlock:
7506 hci_dev_unlock(hdev);
7507 return err;
7508 }
7509
get_clock_info_complete(struct hci_dev * hdev,void * data,int err)7510 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
7511 {
7512 struct mgmt_pending_cmd *cmd = data;
7513 struct mgmt_cp_get_clock_info *cp = cmd->param;
7514 struct mgmt_rp_get_clock_info rp;
7515 struct hci_conn *conn = cmd->user_data;
7516 u8 status = mgmt_status(err);
7517
7518 bt_dev_dbg(hdev, "err %d", err);
7519
7520 memset(&rp, 0, sizeof(rp));
7521 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7522 rp.addr.type = cp->addr.type;
7523
7524 if (err)
7525 goto complete;
7526
7527 rp.local_clock = cpu_to_le32(hdev->clock);
7528
7529 if (conn) {
7530 rp.piconet_clock = cpu_to_le32(conn->clock);
7531 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7532 }
7533
7534 complete:
7535 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, status, &rp,
7536 sizeof(rp));
7537
7538 mgmt_pending_free(cmd);
7539 }
7540
get_clock_info_sync(struct hci_dev * hdev,void * data)7541 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
7542 {
7543 struct mgmt_pending_cmd *cmd = data;
7544 struct mgmt_cp_get_clock_info *cp = cmd->param;
7545 struct hci_cp_read_clock hci_cp;
7546 struct hci_conn *conn;
7547
7548 memset(&hci_cp, 0, sizeof(hci_cp));
7549 hci_read_clock_sync(hdev, &hci_cp);
7550
7551 /* Make sure connection still exists */
7552 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
7553 if (!conn || conn->state != BT_CONNECTED)
7554 return MGMT_STATUS_NOT_CONNECTED;
7555
7556 cmd->user_data = conn;
7557 hci_cp.handle = cpu_to_le16(conn->handle);
7558 hci_cp.which = 0x01; /* Piconet clock */
7559
7560 return hci_read_clock_sync(hdev, &hci_cp);
7561 }
7562
get_clock_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7563 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7564 u16 len)
7565 {
7566 struct mgmt_cp_get_clock_info *cp = data;
7567 struct mgmt_rp_get_clock_info rp;
7568 struct mgmt_pending_cmd *cmd;
7569 struct hci_conn *conn;
7570 int err;
7571
7572 bt_dev_dbg(hdev, "sock %p", sk);
7573
7574 memset(&rp, 0, sizeof(rp));
7575 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7576 rp.addr.type = cp->addr.type;
7577
7578 if (cp->addr.type != BDADDR_BREDR)
7579 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7580 MGMT_STATUS_INVALID_PARAMS,
7581 &rp, sizeof(rp));
7582
7583 hci_dev_lock(hdev);
7584
7585 if (!hdev_is_powered(hdev)) {
7586 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7587 MGMT_STATUS_NOT_POWERED, &rp,
7588 sizeof(rp));
7589 goto unlock;
7590 }
7591
7592 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7593 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7594 &cp->addr.bdaddr);
7595 if (!conn || conn->state != BT_CONNECTED) {
7596 err = mgmt_cmd_complete(sk, hdev->id,
7597 MGMT_OP_GET_CLOCK_INFO,
7598 MGMT_STATUS_NOT_CONNECTED,
7599 &rp, sizeof(rp));
7600 goto unlock;
7601 }
7602 } else {
7603 conn = NULL;
7604 }
7605
7606 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7607 if (!cmd)
7608 err = -ENOMEM;
7609 else
7610 err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7611 get_clock_info_complete);
7612
7613 if (err < 0) {
7614 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7615 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7616
7617 if (cmd)
7618 mgmt_pending_free(cmd);
7619 }
7620
7621
7622 unlock:
7623 hci_dev_unlock(hdev);
7624 return err;
7625 }
7626
device_added(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type,u8 action)7627 static void device_added(struct sock *sk, struct hci_dev *hdev,
7628 bdaddr_t *bdaddr, u8 type, u8 action)
7629 {
7630 struct mgmt_ev_device_added ev;
7631
7632 bacpy(&ev.addr.bdaddr, bdaddr);
7633 ev.addr.type = type;
7634 ev.action = action;
7635
7636 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7637 }
7638
add_device_complete(struct hci_dev * hdev,void * data,int err)7639 static void add_device_complete(struct hci_dev *hdev, void *data, int err)
7640 {
7641 struct mgmt_pending_cmd *cmd = data;
7642 struct mgmt_cp_add_device *cp = cmd->param;
7643
7644 if (!err) {
7645 struct hci_conn_params *params;
7646
7647 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7648 le_addr_type(cp->addr.type));
7649
7650 device_added(cmd->sk, hdev, &cp->addr.bdaddr, cp->addr.type,
7651 cp->action);
7652 device_flags_changed(NULL, hdev, &cp->addr.bdaddr,
7653 cp->addr.type, hdev->conn_flags,
7654 params ? params->flags : 0);
7655 }
7656
7657 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_ADD_DEVICE,
7658 mgmt_status(err), &cp->addr, sizeof(cp->addr));
7659 mgmt_pending_free(cmd);
7660 }
7661
add_device_sync(struct hci_dev * hdev,void * data)7662 static int add_device_sync(struct hci_dev *hdev, void *data)
7663 {
7664 return hci_update_passive_scan_sync(hdev);
7665 }
7666
add_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7667 static int add_device(struct sock *sk, struct hci_dev *hdev,
7668 void *data, u16 len)
7669 {
7670 struct mgmt_pending_cmd *cmd;
7671 struct mgmt_cp_add_device *cp = data;
7672 u8 auto_conn, addr_type;
7673 struct hci_conn_params *params;
7674 int err;
7675 u32 current_flags = 0;
7676 u32 supported_flags;
7677
7678 bt_dev_dbg(hdev, "sock %p", sk);
7679
7680 if (!bdaddr_type_is_valid(cp->addr.type) ||
7681 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7682 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7683 MGMT_STATUS_INVALID_PARAMS,
7684 &cp->addr, sizeof(cp->addr));
7685
7686 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7687 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7688 MGMT_STATUS_INVALID_PARAMS,
7689 &cp->addr, sizeof(cp->addr));
7690
7691 hci_dev_lock(hdev);
7692
7693 if (cp->addr.type == BDADDR_BREDR) {
7694 /* Only incoming connections action is supported for now */
7695 if (cp->action != 0x01) {
7696 err = mgmt_cmd_complete(sk, hdev->id,
7697 MGMT_OP_ADD_DEVICE,
7698 MGMT_STATUS_INVALID_PARAMS,
7699 &cp->addr, sizeof(cp->addr));
7700 goto unlock;
7701 }
7702
7703 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7704 &cp->addr.bdaddr,
7705 cp->addr.type, 0);
7706 if (err)
7707 goto unlock;
7708
7709 hci_update_scan(hdev);
7710
7711 goto added;
7712 }
7713
7714 addr_type = le_addr_type(cp->addr.type);
7715
7716 if (cp->action == 0x02)
7717 auto_conn = HCI_AUTO_CONN_ALWAYS;
7718 else if (cp->action == 0x01)
7719 auto_conn = HCI_AUTO_CONN_DIRECT;
7720 else
7721 auto_conn = HCI_AUTO_CONN_REPORT;
7722
7723 /* Kernel internally uses conn_params with resolvable private
7724 * address, but Add Device allows only identity addresses.
7725 * Make sure it is enforced before calling
7726 * hci_conn_params_lookup.
7727 */
7728 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7729 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7730 MGMT_STATUS_INVALID_PARAMS,
7731 &cp->addr, sizeof(cp->addr));
7732 goto unlock;
7733 }
7734
7735 /* If the connection parameters don't exist for this device,
7736 * they will be created and configured with defaults.
7737 */
7738 params = hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7739 auto_conn);
7740 if (!params) {
7741 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7742 MGMT_STATUS_FAILED, &cp->addr,
7743 sizeof(cp->addr));
7744 goto unlock;
7745 }
7746
7747 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
7748 if (!cmd) {
7749 err = -ENOMEM;
7750 goto unlock;
7751 }
7752
7753 err = hci_cmd_sync_queue(hdev, add_device_sync, cmd,
7754 add_device_complete);
7755 if (err < 0) {
7756 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7757 MGMT_STATUS_FAILED, &cp->addr,
7758 sizeof(cp->addr));
7759 mgmt_pending_free(cmd);
7760 }
7761
7762 goto unlock;
7763
7764 added:
7765 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7766 supported_flags = hdev->conn_flags;
7767 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7768 supported_flags, current_flags);
7769
7770 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7771 MGMT_STATUS_SUCCESS, &cp->addr,
7772 sizeof(cp->addr));
7773
7774 unlock:
7775 hci_dev_unlock(hdev);
7776 return err;
7777 }
7778
device_removed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)7779 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7780 bdaddr_t *bdaddr, u8 type)
7781 {
7782 struct mgmt_ev_device_removed ev;
7783
7784 bacpy(&ev.addr.bdaddr, bdaddr);
7785 ev.addr.type = type;
7786
7787 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7788 }
7789
remove_device_sync(struct hci_dev * hdev,void * data)7790 static int remove_device_sync(struct hci_dev *hdev, void *data)
7791 {
7792 return hci_update_passive_scan_sync(hdev);
7793 }
7794
remove_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7795 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7796 void *data, u16 len)
7797 {
7798 struct mgmt_cp_remove_device *cp = data;
7799 int err;
7800
7801 bt_dev_dbg(hdev, "sock %p", sk);
7802
7803 hci_dev_lock(hdev);
7804
7805 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7806 struct hci_conn_params *params;
7807 u8 addr_type;
7808
7809 if (!bdaddr_type_is_valid(cp->addr.type)) {
7810 err = mgmt_cmd_complete(sk, hdev->id,
7811 MGMT_OP_REMOVE_DEVICE,
7812 MGMT_STATUS_INVALID_PARAMS,
7813 &cp->addr, sizeof(cp->addr));
7814 goto unlock;
7815 }
7816
7817 if (cp->addr.type == BDADDR_BREDR) {
7818 err = hci_bdaddr_list_del(&hdev->accept_list,
7819 &cp->addr.bdaddr,
7820 cp->addr.type);
7821 if (err) {
7822 err = mgmt_cmd_complete(sk, hdev->id,
7823 MGMT_OP_REMOVE_DEVICE,
7824 MGMT_STATUS_INVALID_PARAMS,
7825 &cp->addr,
7826 sizeof(cp->addr));
7827 goto unlock;
7828 }
7829
7830 hci_update_scan(hdev);
7831
7832 device_removed(sk, hdev, &cp->addr.bdaddr,
7833 cp->addr.type);
7834 goto complete;
7835 }
7836
7837 addr_type = le_addr_type(cp->addr.type);
7838
7839 /* Kernel internally uses conn_params with resolvable private
7840 * address, but Remove Device allows only identity addresses.
7841 * Make sure it is enforced before calling
7842 * hci_conn_params_lookup.
7843 */
7844 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7845 err = mgmt_cmd_complete(sk, hdev->id,
7846 MGMT_OP_REMOVE_DEVICE,
7847 MGMT_STATUS_INVALID_PARAMS,
7848 &cp->addr, sizeof(cp->addr));
7849 goto unlock;
7850 }
7851
7852 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7853 addr_type);
7854 if (!params) {
7855 err = mgmt_cmd_complete(sk, hdev->id,
7856 MGMT_OP_REMOVE_DEVICE,
7857 MGMT_STATUS_INVALID_PARAMS,
7858 &cp->addr, sizeof(cp->addr));
7859 goto unlock;
7860 }
7861
7862 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7863 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7864 err = mgmt_cmd_complete(sk, hdev->id,
7865 MGMT_OP_REMOVE_DEVICE,
7866 MGMT_STATUS_INVALID_PARAMS,
7867 &cp->addr, sizeof(cp->addr));
7868 goto unlock;
7869 }
7870
7871 hci_conn_params_free(params);
7872
7873 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7874 } else {
7875 struct hci_conn_params *p, *tmp;
7876 struct bdaddr_list *b, *btmp;
7877
7878 if (cp->addr.type) {
7879 err = mgmt_cmd_complete(sk, hdev->id,
7880 MGMT_OP_REMOVE_DEVICE,
7881 MGMT_STATUS_INVALID_PARAMS,
7882 &cp->addr, sizeof(cp->addr));
7883 goto unlock;
7884 }
7885
7886 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7887 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7888 list_del(&b->list);
7889 kfree(b);
7890 }
7891
7892 hci_update_scan(hdev);
7893
7894 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7895 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7896 continue;
7897 device_removed(sk, hdev, &p->addr, p->addr_type);
7898 if (p->explicit_connect) {
7899 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7900 continue;
7901 }
7902 hci_conn_params_free(p);
7903 }
7904
7905 bt_dev_dbg(hdev, "All LE connection parameters were removed");
7906 }
7907
7908 hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7909
7910 complete:
7911 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7912 MGMT_STATUS_SUCCESS, &cp->addr,
7913 sizeof(cp->addr));
7914 unlock:
7915 hci_dev_unlock(hdev);
7916 return err;
7917 }
7918
conn_update_sync(struct hci_dev * hdev,void * data)7919 static int conn_update_sync(struct hci_dev *hdev, void *data)
7920 {
7921 struct hci_conn_params *params = data;
7922 struct hci_conn *conn;
7923
7924 conn = hci_conn_hash_lookup_le(hdev, ¶ms->addr, params->addr_type);
7925 if (!conn)
7926 return -ECANCELED;
7927
7928 return hci_le_conn_update_sync(hdev, conn, params);
7929 }
7930
load_conn_param(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7931 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7932 u16 len)
7933 {
7934 struct mgmt_cp_load_conn_param *cp = data;
7935 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7936 sizeof(struct mgmt_conn_param));
7937 u16 param_count, expected_len;
7938 int i;
7939
7940 if (!lmp_le_capable(hdev))
7941 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7942 MGMT_STATUS_NOT_SUPPORTED);
7943
7944 param_count = __le16_to_cpu(cp->param_count);
7945 if (param_count > max_param_count) {
7946 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7947 param_count);
7948 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7949 MGMT_STATUS_INVALID_PARAMS);
7950 }
7951
7952 expected_len = struct_size(cp, params, param_count);
7953 if (expected_len != len) {
7954 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7955 expected_len, len);
7956 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7957 MGMT_STATUS_INVALID_PARAMS);
7958 }
7959
7960 bt_dev_dbg(hdev, "param_count %u", param_count);
7961
7962 hci_dev_lock(hdev);
7963
7964 if (param_count > 1)
7965 hci_conn_params_clear_disabled(hdev);
7966
7967 for (i = 0; i < param_count; i++) {
7968 struct mgmt_conn_param *param = &cp->params[i];
7969 struct hci_conn_params *hci_param;
7970 u16 min, max, latency, timeout;
7971 bool update = false;
7972 u8 addr_type;
7973
7974 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
7975 param->addr.type);
7976
7977 if (param->addr.type == BDADDR_LE_PUBLIC) {
7978 addr_type = ADDR_LE_DEV_PUBLIC;
7979 } else if (param->addr.type == BDADDR_LE_RANDOM) {
7980 addr_type = ADDR_LE_DEV_RANDOM;
7981 } else {
7982 bt_dev_err(hdev, "ignoring invalid connection parameters");
7983 continue;
7984 }
7985
7986 min = le16_to_cpu(param->min_interval);
7987 max = le16_to_cpu(param->max_interval);
7988 latency = le16_to_cpu(param->latency);
7989 timeout = le16_to_cpu(param->timeout);
7990
7991 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7992 min, max, latency, timeout);
7993
7994 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7995 bt_dev_err(hdev, "ignoring invalid connection parameters");
7996 continue;
7997 }
7998
7999 /* Detect when the loading is for an existing parameter then
8000 * attempt to trigger the connection update procedure.
8001 */
8002 if (!i && param_count == 1) {
8003 hci_param = hci_conn_params_lookup(hdev,
8004 ¶m->addr.bdaddr,
8005 addr_type);
8006 if (hci_param)
8007 update = true;
8008 else
8009 hci_conn_params_clear_disabled(hdev);
8010 }
8011
8012 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
8013 addr_type);
8014 if (!hci_param) {
8015 bt_dev_err(hdev, "failed to add connection parameters");
8016 continue;
8017 }
8018
8019 hci_param->conn_min_interval = min;
8020 hci_param->conn_max_interval = max;
8021 hci_param->conn_latency = latency;
8022 hci_param->supervision_timeout = timeout;
8023
8024 /* Check if we need to trigger a connection update */
8025 if (update) {
8026 struct hci_conn *conn;
8027
8028 /* Lookup for existing connection as central and check
8029 * if parameters match and if they don't then trigger
8030 * a connection update.
8031 */
8032 conn = hci_conn_hash_lookup_le(hdev, &hci_param->addr,
8033 addr_type);
8034 if (conn && conn->role == HCI_ROLE_MASTER &&
8035 (conn->le_conn_min_interval != min ||
8036 conn->le_conn_max_interval != max ||
8037 conn->le_conn_latency != latency ||
8038 conn->le_supv_timeout != timeout))
8039 hci_cmd_sync_queue(hdev, conn_update_sync,
8040 hci_param, NULL);
8041 }
8042 }
8043
8044 hci_dev_unlock(hdev);
8045
8046 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
8047 NULL, 0);
8048 }
8049
set_external_config(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)8050 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
8051 void *data, u16 len)
8052 {
8053 struct mgmt_cp_set_external_config *cp = data;
8054 bool changed;
8055 int err;
8056
8057 bt_dev_dbg(hdev, "sock %p", sk);
8058
8059 if (hdev_is_powered(hdev))
8060 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
8061 MGMT_STATUS_REJECTED);
8062
8063 if (cp->config != 0x00 && cp->config != 0x01)
8064 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
8065 MGMT_STATUS_INVALID_PARAMS);
8066
8067 if (!hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG))
8068 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
8069 MGMT_STATUS_NOT_SUPPORTED);
8070
8071 hci_dev_lock(hdev);
8072
8073 if (cp->config)
8074 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
8075 else
8076 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
8077
8078 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
8079 if (err < 0)
8080 goto unlock;
8081
8082 if (!changed)
8083 goto unlock;
8084
8085 err = new_options(hdev, sk);
8086
8087 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
8088 mgmt_index_removed(hdev);
8089
8090 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
8091 hci_dev_set_flag(hdev, HCI_CONFIG);
8092 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8093
8094 queue_work(hdev->req_workqueue, &hdev->power_on);
8095 } else {
8096 set_bit(HCI_RAW, &hdev->flags);
8097 mgmt_index_added(hdev);
8098 }
8099 }
8100
8101 unlock:
8102 hci_dev_unlock(hdev);
8103 return err;
8104 }
8105
set_public_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)8106 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
8107 void *data, u16 len)
8108 {
8109 struct mgmt_cp_set_public_address *cp = data;
8110 bool changed;
8111 int err;
8112
8113 bt_dev_dbg(hdev, "sock %p", sk);
8114
8115 if (hdev_is_powered(hdev))
8116 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8117 MGMT_STATUS_REJECTED);
8118
8119 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
8120 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8121 MGMT_STATUS_INVALID_PARAMS);
8122
8123 if (!hdev->set_bdaddr)
8124 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8125 MGMT_STATUS_NOT_SUPPORTED);
8126
8127 hci_dev_lock(hdev);
8128
8129 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
8130 bacpy(&hdev->public_addr, &cp->bdaddr);
8131
8132 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
8133 if (err < 0)
8134 goto unlock;
8135
8136 if (!changed)
8137 goto unlock;
8138
8139 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
8140 err = new_options(hdev, sk);
8141
8142 if (is_configured(hdev)) {
8143 mgmt_index_removed(hdev);
8144
8145 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
8146
8147 hci_dev_set_flag(hdev, HCI_CONFIG);
8148 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8149
8150 queue_work(hdev->req_workqueue, &hdev->power_on);
8151 }
8152
8153 unlock:
8154 hci_dev_unlock(hdev);
8155 return err;
8156 }
8157
read_local_oob_ext_data_complete(struct hci_dev * hdev,void * data,int err)8158 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
8159 int err)
8160 {
8161 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8162 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8163 u8 *h192, *r192, *h256, *r256;
8164 struct mgmt_pending_cmd *cmd = data;
8165 struct sk_buff *skb = cmd->skb;
8166 u8 status = mgmt_status(err);
8167 u16 eir_len;
8168
8169 if (!status) {
8170 if (!skb)
8171 status = MGMT_STATUS_FAILED;
8172 else if (IS_ERR(skb))
8173 status = mgmt_status(PTR_ERR(skb));
8174 else
8175 status = mgmt_status(skb->data[0]);
8176 }
8177
8178 bt_dev_dbg(hdev, "status %u", status);
8179
8180 mgmt_cp = cmd->param;
8181
8182 if (status) {
8183 status = mgmt_status(status);
8184 eir_len = 0;
8185
8186 h192 = NULL;
8187 r192 = NULL;
8188 h256 = NULL;
8189 r256 = NULL;
8190 } else if (!bredr_sc_enabled(hdev)) {
8191 struct hci_rp_read_local_oob_data *rp;
8192
8193 if (skb->len != sizeof(*rp)) {
8194 status = MGMT_STATUS_FAILED;
8195 eir_len = 0;
8196 } else {
8197 status = MGMT_STATUS_SUCCESS;
8198 rp = (void *)skb->data;
8199
8200 eir_len = 5 + 18 + 18;
8201 h192 = rp->hash;
8202 r192 = rp->rand;
8203 h256 = NULL;
8204 r256 = NULL;
8205 }
8206 } else {
8207 struct hci_rp_read_local_oob_ext_data *rp;
8208
8209 if (skb->len != sizeof(*rp)) {
8210 status = MGMT_STATUS_FAILED;
8211 eir_len = 0;
8212 } else {
8213 status = MGMT_STATUS_SUCCESS;
8214 rp = (void *)skb->data;
8215
8216 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8217 eir_len = 5 + 18 + 18;
8218 h192 = NULL;
8219 r192 = NULL;
8220 } else {
8221 eir_len = 5 + 18 + 18 + 18 + 18;
8222 h192 = rp->hash192;
8223 r192 = rp->rand192;
8224 }
8225
8226 h256 = rp->hash256;
8227 r256 = rp->rand256;
8228 }
8229 }
8230
8231 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8232 if (!mgmt_rp)
8233 goto done;
8234
8235 if (eir_len == 0)
8236 goto send_rsp;
8237
8238 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8239 hdev->dev_class, 3);
8240
8241 if (h192 && r192) {
8242 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8243 EIR_SSP_HASH_C192, h192, 16);
8244 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8245 EIR_SSP_RAND_R192, r192, 16);
8246 }
8247
8248 if (h256 && r256) {
8249 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8250 EIR_SSP_HASH_C256, h256, 16);
8251 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8252 EIR_SSP_RAND_R256, r256, 16);
8253 }
8254
8255 send_rsp:
8256 mgmt_rp->type = mgmt_cp->type;
8257 mgmt_rp->eir_len = cpu_to_le16(eir_len);
8258
8259 err = mgmt_cmd_complete(cmd->sk, hdev->id,
8260 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8261 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8262 if (err < 0 || status)
8263 goto done;
8264
8265 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8266
8267 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8268 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8269 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8270 done:
8271 if (skb && !IS_ERR(skb))
8272 kfree_skb(skb);
8273
8274 kfree(mgmt_rp);
8275 mgmt_pending_free(cmd);
8276 }
8277
read_local_ssp_oob_req(struct hci_dev * hdev,struct sock * sk,struct mgmt_cp_read_local_oob_ext_data * cp)8278 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8279 struct mgmt_cp_read_local_oob_ext_data *cp)
8280 {
8281 struct mgmt_pending_cmd *cmd;
8282 int err;
8283
8284 cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8285 cp, sizeof(*cp));
8286 if (!cmd)
8287 return -ENOMEM;
8288
8289 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
8290 read_local_oob_ext_data_complete);
8291
8292 if (err < 0) {
8293 mgmt_pending_remove(cmd);
8294 return err;
8295 }
8296
8297 return 0;
8298 }
8299
read_local_oob_ext_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8300 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8301 void *data, u16 data_len)
8302 {
8303 struct mgmt_cp_read_local_oob_ext_data *cp = data;
8304 struct mgmt_rp_read_local_oob_ext_data *rp;
8305 size_t rp_len;
8306 u16 eir_len;
8307 u8 status, flags, role, addr[7], hash[16], rand[16];
8308 int err;
8309
8310 bt_dev_dbg(hdev, "sock %p", sk);
8311
8312 if (hdev_is_powered(hdev)) {
8313 switch (cp->type) {
8314 case BIT(BDADDR_BREDR):
8315 status = mgmt_bredr_support(hdev);
8316 if (status)
8317 eir_len = 0;
8318 else
8319 eir_len = 5;
8320 break;
8321 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8322 status = mgmt_le_support(hdev);
8323 if (status)
8324 eir_len = 0;
8325 else
8326 eir_len = 9 + 3 + 18 + 18 + 3;
8327 break;
8328 default:
8329 status = MGMT_STATUS_INVALID_PARAMS;
8330 eir_len = 0;
8331 break;
8332 }
8333 } else {
8334 status = MGMT_STATUS_NOT_POWERED;
8335 eir_len = 0;
8336 }
8337
8338 rp_len = sizeof(*rp) + eir_len;
8339 rp = kmalloc(rp_len, GFP_ATOMIC);
8340 if (!rp)
8341 return -ENOMEM;
8342
8343 if (!status && !lmp_ssp_capable(hdev)) {
8344 status = MGMT_STATUS_NOT_SUPPORTED;
8345 eir_len = 0;
8346 }
8347
8348 if (status)
8349 goto complete;
8350
8351 hci_dev_lock(hdev);
8352
8353 eir_len = 0;
8354 switch (cp->type) {
8355 case BIT(BDADDR_BREDR):
8356 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8357 err = read_local_ssp_oob_req(hdev, sk, cp);
8358 hci_dev_unlock(hdev);
8359 if (!err)
8360 goto done;
8361
8362 status = MGMT_STATUS_FAILED;
8363 goto complete;
8364 } else {
8365 eir_len = eir_append_data(rp->eir, eir_len,
8366 EIR_CLASS_OF_DEV,
8367 hdev->dev_class, 3);
8368 }
8369 break;
8370 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8371 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8372 smp_generate_oob(hdev, hash, rand) < 0) {
8373 hci_dev_unlock(hdev);
8374 status = MGMT_STATUS_FAILED;
8375 goto complete;
8376 }
8377
8378 /* This should return the active RPA, but since the RPA
8379 * is only programmed on demand, it is really hard to fill
8380 * this in at the moment. For now disallow retrieving
8381 * local out-of-band data when privacy is in use.
8382 *
8383 * Returning the identity address will not help here since
8384 * pairing happens before the identity resolving key is
8385 * known and thus the connection establishment happens
8386 * based on the RPA and not the identity address.
8387 */
8388 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8389 hci_dev_unlock(hdev);
8390 status = MGMT_STATUS_REJECTED;
8391 goto complete;
8392 }
8393
8394 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8395 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8396 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8397 bacmp(&hdev->static_addr, BDADDR_ANY))) {
8398 memcpy(addr, &hdev->static_addr, 6);
8399 addr[6] = 0x01;
8400 } else {
8401 memcpy(addr, &hdev->bdaddr, 6);
8402 addr[6] = 0x00;
8403 }
8404
8405 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8406 addr, sizeof(addr));
8407
8408 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8409 role = 0x02;
8410 else
8411 role = 0x01;
8412
8413 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8414 &role, sizeof(role));
8415
8416 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8417 eir_len = eir_append_data(rp->eir, eir_len,
8418 EIR_LE_SC_CONFIRM,
8419 hash, sizeof(hash));
8420
8421 eir_len = eir_append_data(rp->eir, eir_len,
8422 EIR_LE_SC_RANDOM,
8423 rand, sizeof(rand));
8424 }
8425
8426 flags = mgmt_get_adv_discov_flags(hdev);
8427
8428 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8429 flags |= LE_AD_NO_BREDR;
8430
8431 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8432 &flags, sizeof(flags));
8433 break;
8434 }
8435
8436 hci_dev_unlock(hdev);
8437
8438 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8439
8440 status = MGMT_STATUS_SUCCESS;
8441
8442 complete:
8443 rp->type = cp->type;
8444 rp->eir_len = cpu_to_le16(eir_len);
8445
8446 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8447 status, rp, sizeof(*rp) + eir_len);
8448 if (err < 0 || status)
8449 goto done;
8450
8451 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8452 rp, sizeof(*rp) + eir_len,
8453 HCI_MGMT_OOB_DATA_EVENTS, sk);
8454
8455 done:
8456 kfree(rp);
8457
8458 return err;
8459 }
8460
get_supported_adv_flags(struct hci_dev * hdev)8461 static u32 get_supported_adv_flags(struct hci_dev *hdev)
8462 {
8463 u32 flags = 0;
8464
8465 flags |= MGMT_ADV_FLAG_CONNECTABLE;
8466 flags |= MGMT_ADV_FLAG_DISCOV;
8467 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8468 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8469 flags |= MGMT_ADV_FLAG_APPEARANCE;
8470 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8471 flags |= MGMT_ADV_PARAM_DURATION;
8472 flags |= MGMT_ADV_PARAM_TIMEOUT;
8473 flags |= MGMT_ADV_PARAM_INTERVALS;
8474 flags |= MGMT_ADV_PARAM_TX_POWER;
8475 flags |= MGMT_ADV_PARAM_SCAN_RSP;
8476
8477 /* In extended adv TX_POWER returned from Set Adv Param
8478 * will be always valid.
8479 */
8480 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
8481 flags |= MGMT_ADV_FLAG_TX_POWER;
8482
8483 if (ext_adv_capable(hdev)) {
8484 flags |= MGMT_ADV_FLAG_SEC_1M;
8485 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8486 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8487
8488 if (le_2m_capable(hdev))
8489 flags |= MGMT_ADV_FLAG_SEC_2M;
8490
8491 if (le_coded_capable(hdev))
8492 flags |= MGMT_ADV_FLAG_SEC_CODED;
8493 }
8494
8495 return flags;
8496 }
8497
read_adv_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8498 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8499 void *data, u16 data_len)
8500 {
8501 struct mgmt_rp_read_adv_features *rp;
8502 size_t rp_len;
8503 int err;
8504 struct adv_info *adv_instance;
8505 u32 supported_flags;
8506 u8 *instance;
8507
8508 bt_dev_dbg(hdev, "sock %p", sk);
8509
8510 if (!lmp_le_capable(hdev))
8511 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8512 MGMT_STATUS_REJECTED);
8513
8514 hci_dev_lock(hdev);
8515
8516 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8517 rp = kmalloc(rp_len, GFP_ATOMIC);
8518 if (!rp) {
8519 hci_dev_unlock(hdev);
8520 return -ENOMEM;
8521 }
8522
8523 supported_flags = get_supported_adv_flags(hdev);
8524
8525 rp->supported_flags = cpu_to_le32(supported_flags);
8526 rp->max_adv_data_len = max_adv_len(hdev);
8527 rp->max_scan_rsp_len = max_adv_len(hdev);
8528 rp->max_instances = hdev->le_num_of_adv_sets;
8529 rp->num_instances = hdev->adv_instance_cnt;
8530
8531 instance = rp->instance;
8532 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8533 /* Only instances 1-le_num_of_adv_sets are externally visible */
8534 if (adv_instance->instance <= hdev->adv_instance_cnt) {
8535 *instance = adv_instance->instance;
8536 instance++;
8537 } else {
8538 rp->num_instances--;
8539 rp_len--;
8540 }
8541 }
8542
8543 hci_dev_unlock(hdev);
8544
8545 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8546 MGMT_STATUS_SUCCESS, rp, rp_len);
8547
8548 kfree(rp);
8549
8550 return err;
8551 }
8552
calculate_name_len(struct hci_dev * hdev)8553 static u8 calculate_name_len(struct hci_dev *hdev)
8554 {
8555 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 2]; /* len + type + name */
8556
8557 return eir_append_local_name(hdev, buf, 0);
8558 }
8559
tlv_data_max_len(struct hci_dev * hdev,u32 adv_flags,bool is_adv_data)8560 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8561 bool is_adv_data)
8562 {
8563 u8 max_len = max_adv_len(hdev);
8564
8565 if (is_adv_data) {
8566 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8567 MGMT_ADV_FLAG_LIMITED_DISCOV |
8568 MGMT_ADV_FLAG_MANAGED_FLAGS))
8569 max_len -= 3;
8570
8571 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8572 max_len -= 3;
8573 } else {
8574 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8575 max_len -= calculate_name_len(hdev);
8576
8577 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8578 max_len -= 4;
8579 }
8580
8581 return max_len;
8582 }
8583
flags_managed(u32 adv_flags)8584 static bool flags_managed(u32 adv_flags)
8585 {
8586 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8587 MGMT_ADV_FLAG_LIMITED_DISCOV |
8588 MGMT_ADV_FLAG_MANAGED_FLAGS);
8589 }
8590
tx_power_managed(u32 adv_flags)8591 static bool tx_power_managed(u32 adv_flags)
8592 {
8593 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8594 }
8595
name_managed(u32 adv_flags)8596 static bool name_managed(u32 adv_flags)
8597 {
8598 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8599 }
8600
appearance_managed(u32 adv_flags)8601 static bool appearance_managed(u32 adv_flags)
8602 {
8603 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8604 }
8605
tlv_data_is_valid(struct hci_dev * hdev,u32 adv_flags,u8 * data,u8 len,bool is_adv_data)8606 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8607 u8 len, bool is_adv_data)
8608 {
8609 int i, cur_len;
8610 u8 max_len;
8611
8612 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8613
8614 if (len > max_len)
8615 return false;
8616
8617 /* Make sure that the data is correctly formatted. */
8618 for (i = 0; i < len; i += (cur_len + 1)) {
8619 cur_len = data[i];
8620
8621 if (!cur_len)
8622 continue;
8623
8624 if (data[i + 1] == EIR_FLAGS &&
8625 (!is_adv_data || flags_managed(adv_flags)))
8626 return false;
8627
8628 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8629 return false;
8630
8631 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8632 return false;
8633
8634 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8635 return false;
8636
8637 if (data[i + 1] == EIR_APPEARANCE &&
8638 appearance_managed(adv_flags))
8639 return false;
8640
8641 /* If the current field length would exceed the total data
8642 * length, then it's invalid.
8643 */
8644 if (i + cur_len >= len)
8645 return false;
8646 }
8647
8648 return true;
8649 }
8650
requested_adv_flags_are_valid(struct hci_dev * hdev,u32 adv_flags)8651 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8652 {
8653 u32 supported_flags, phy_flags;
8654
8655 /* The current implementation only supports a subset of the specified
8656 * flags. Also need to check mutual exclusiveness of sec flags.
8657 */
8658 supported_flags = get_supported_adv_flags(hdev);
8659 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8660 if (adv_flags & ~supported_flags ||
8661 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8662 return false;
8663
8664 return true;
8665 }
8666
adv_busy(struct hci_dev * hdev)8667 static bool adv_busy(struct hci_dev *hdev)
8668 {
8669 return pending_find(MGMT_OP_SET_LE, hdev);
8670 }
8671
add_adv_complete(struct hci_dev * hdev,struct sock * sk,u8 instance,int err)8672 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8673 int err)
8674 {
8675 struct adv_info *adv, *n;
8676
8677 bt_dev_dbg(hdev, "err %d", err);
8678
8679 hci_dev_lock(hdev);
8680
8681 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8682 u8 instance;
8683
8684 if (!adv->pending)
8685 continue;
8686
8687 if (!err) {
8688 adv->pending = false;
8689 continue;
8690 }
8691
8692 instance = adv->instance;
8693
8694 if (hdev->cur_adv_instance == instance)
8695 cancel_adv_timeout(hdev);
8696
8697 hci_remove_adv_instance(hdev, instance);
8698 mgmt_advertising_removed(sk, hdev, instance);
8699 }
8700
8701 hci_dev_unlock(hdev);
8702 }
8703
add_advertising_complete(struct hci_dev * hdev,void * data,int err)8704 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8705 {
8706 struct mgmt_pending_cmd *cmd = data;
8707 struct mgmt_cp_add_advertising *cp = cmd->param;
8708 struct mgmt_rp_add_advertising rp;
8709
8710 memset(&rp, 0, sizeof(rp));
8711
8712 rp.instance = cp->instance;
8713
8714 if (err)
8715 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
8716 mgmt_status(err));
8717 else
8718 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
8719 mgmt_status(err), &rp, sizeof(rp));
8720
8721 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8722
8723 mgmt_pending_free(cmd);
8724 }
8725
add_advertising_sync(struct hci_dev * hdev,void * data)8726 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8727 {
8728 struct mgmt_pending_cmd *cmd = data;
8729 struct mgmt_cp_add_advertising *cp = cmd->param;
8730
8731 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8732 }
8733
add_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8734 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8735 void *data, u16 data_len)
8736 {
8737 struct mgmt_cp_add_advertising *cp = data;
8738 struct mgmt_rp_add_advertising rp;
8739 u32 flags;
8740 u8 status;
8741 u16 timeout, duration;
8742 unsigned int prev_instance_cnt;
8743 u8 schedule_instance = 0;
8744 struct adv_info *adv, *next_instance;
8745 int err;
8746 struct mgmt_pending_cmd *cmd;
8747
8748 bt_dev_dbg(hdev, "sock %p", sk);
8749
8750 status = mgmt_le_support(hdev);
8751 if (status)
8752 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8753 status);
8754
8755 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8756 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8757 MGMT_STATUS_INVALID_PARAMS);
8758
8759 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8760 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8761 MGMT_STATUS_INVALID_PARAMS);
8762
8763 flags = __le32_to_cpu(cp->flags);
8764 timeout = __le16_to_cpu(cp->timeout);
8765 duration = __le16_to_cpu(cp->duration);
8766
8767 if (!requested_adv_flags_are_valid(hdev, flags))
8768 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8769 MGMT_STATUS_INVALID_PARAMS);
8770
8771 hci_dev_lock(hdev);
8772
8773 if (timeout && !hdev_is_powered(hdev)) {
8774 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8775 MGMT_STATUS_REJECTED);
8776 goto unlock;
8777 }
8778
8779 if (adv_busy(hdev)) {
8780 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8781 MGMT_STATUS_BUSY);
8782 goto unlock;
8783 }
8784
8785 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8786 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8787 cp->scan_rsp_len, false)) {
8788 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8789 MGMT_STATUS_INVALID_PARAMS);
8790 goto unlock;
8791 }
8792
8793 prev_instance_cnt = hdev->adv_instance_cnt;
8794
8795 adv = hci_add_adv_instance(hdev, cp->instance, flags,
8796 cp->adv_data_len, cp->data,
8797 cp->scan_rsp_len,
8798 cp->data + cp->adv_data_len,
8799 timeout, duration,
8800 HCI_ADV_TX_POWER_NO_PREFERENCE,
8801 hdev->le_adv_min_interval,
8802 hdev->le_adv_max_interval, 0);
8803 if (IS_ERR(adv)) {
8804 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8805 MGMT_STATUS_FAILED);
8806 goto unlock;
8807 }
8808
8809 /* Only trigger an advertising added event if a new instance was
8810 * actually added.
8811 */
8812 if (hdev->adv_instance_cnt > prev_instance_cnt)
8813 mgmt_advertising_added(sk, hdev, cp->instance);
8814
8815 if (hdev->cur_adv_instance == cp->instance) {
8816 /* If the currently advertised instance is being changed then
8817 * cancel the current advertising and schedule the next
8818 * instance. If there is only one instance then the overridden
8819 * advertising data will be visible right away.
8820 */
8821 cancel_adv_timeout(hdev);
8822
8823 next_instance = hci_get_next_instance(hdev, cp->instance);
8824 if (next_instance)
8825 schedule_instance = next_instance->instance;
8826 } else if (!hdev->adv_instance_timeout) {
8827 /* Immediately advertise the new instance if no other
8828 * instance is currently being advertised.
8829 */
8830 schedule_instance = cp->instance;
8831 }
8832
8833 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
8834 * there is no instance to be advertised then we have no HCI
8835 * communication to make. Simply return.
8836 */
8837 if (!hdev_is_powered(hdev) ||
8838 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8839 !schedule_instance) {
8840 rp.instance = cp->instance;
8841 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8842 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8843 goto unlock;
8844 }
8845
8846 /* We're good to go, update advertising data, parameters, and start
8847 * advertising.
8848 */
8849 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8850 data_len);
8851 if (!cmd) {
8852 err = -ENOMEM;
8853 goto unlock;
8854 }
8855
8856 cp->instance = schedule_instance;
8857
8858 err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8859 add_advertising_complete);
8860 if (err < 0)
8861 mgmt_pending_free(cmd);
8862
8863 unlock:
8864 hci_dev_unlock(hdev);
8865
8866 return err;
8867 }
8868
add_ext_adv_params_complete(struct hci_dev * hdev,void * data,int err)8869 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8870 int err)
8871 {
8872 struct mgmt_pending_cmd *cmd = data;
8873 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8874 struct mgmt_rp_add_ext_adv_params rp;
8875 struct adv_info *adv;
8876 u32 flags;
8877
8878 BT_DBG("%s", hdev->name);
8879
8880 hci_dev_lock(hdev);
8881
8882 adv = hci_find_adv_instance(hdev, cp->instance);
8883 if (!adv)
8884 goto unlock;
8885
8886 rp.instance = cp->instance;
8887 rp.tx_power = adv->tx_power;
8888
8889 /* While we're at it, inform userspace of the available space for this
8890 * advertisement, given the flags that will be used.
8891 */
8892 flags = __le32_to_cpu(cp->flags);
8893 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8894 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8895
8896 if (err) {
8897 /* If this advertisement was previously advertising and we
8898 * failed to update it, we signal that it has been removed and
8899 * delete its structure
8900 */
8901 if (!adv->pending)
8902 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8903
8904 hci_remove_adv_instance(hdev, cp->instance);
8905
8906 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
8907 mgmt_status(err));
8908 } else {
8909 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
8910 mgmt_status(err), &rp, sizeof(rp));
8911 }
8912
8913 unlock:
8914 mgmt_pending_free(cmd);
8915
8916 hci_dev_unlock(hdev);
8917 }
8918
add_ext_adv_params_sync(struct hci_dev * hdev,void * data)8919 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8920 {
8921 struct mgmt_pending_cmd *cmd = data;
8922 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8923
8924 return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8925 }
8926
add_ext_adv_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8927 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8928 void *data, u16 data_len)
8929 {
8930 struct mgmt_cp_add_ext_adv_params *cp = data;
8931 struct mgmt_rp_add_ext_adv_params rp;
8932 struct mgmt_pending_cmd *cmd = NULL;
8933 struct adv_info *adv;
8934 u32 flags, min_interval, max_interval;
8935 u16 timeout, duration;
8936 u8 status;
8937 s8 tx_power;
8938 int err;
8939
8940 BT_DBG("%s", hdev->name);
8941
8942 status = mgmt_le_support(hdev);
8943 if (status)
8944 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8945 status);
8946
8947 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8948 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8949 MGMT_STATUS_INVALID_PARAMS);
8950
8951 /* The purpose of breaking add_advertising into two separate MGMT calls
8952 * for params and data is to allow more parameters to be added to this
8953 * structure in the future. For this reason, we verify that we have the
8954 * bare minimum structure we know of when the interface was defined. Any
8955 * extra parameters we don't know about will be ignored in this request.
8956 */
8957 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8958 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8959 MGMT_STATUS_INVALID_PARAMS);
8960
8961 flags = __le32_to_cpu(cp->flags);
8962
8963 if (!requested_adv_flags_are_valid(hdev, flags))
8964 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8965 MGMT_STATUS_INVALID_PARAMS);
8966
8967 hci_dev_lock(hdev);
8968
8969 /* In new interface, we require that we are powered to register */
8970 if (!hdev_is_powered(hdev)) {
8971 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8972 MGMT_STATUS_REJECTED);
8973 goto unlock;
8974 }
8975
8976 if (adv_busy(hdev)) {
8977 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8978 MGMT_STATUS_BUSY);
8979 goto unlock;
8980 }
8981
8982 /* Parse defined parameters from request, use defaults otherwise */
8983 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8984 __le16_to_cpu(cp->timeout) : 0;
8985
8986 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8987 __le16_to_cpu(cp->duration) :
8988 hdev->def_multi_adv_rotation_duration;
8989
8990 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8991 __le32_to_cpu(cp->min_interval) :
8992 hdev->le_adv_min_interval;
8993
8994 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8995 __le32_to_cpu(cp->max_interval) :
8996 hdev->le_adv_max_interval;
8997
8998 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8999 cp->tx_power :
9000 HCI_ADV_TX_POWER_NO_PREFERENCE;
9001
9002 /* Create advertising instance with no advertising or response data */
9003 adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
9004 timeout, duration, tx_power, min_interval,
9005 max_interval, 0);
9006
9007 if (IS_ERR(adv)) {
9008 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
9009 MGMT_STATUS_FAILED);
9010 goto unlock;
9011 }
9012
9013 /* Submit request for advertising params if ext adv available */
9014 if (ext_adv_capable(hdev)) {
9015 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
9016 data, data_len);
9017 if (!cmd) {
9018 err = -ENOMEM;
9019 hci_remove_adv_instance(hdev, cp->instance);
9020 goto unlock;
9021 }
9022
9023 err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
9024 add_ext_adv_params_complete);
9025 if (err < 0)
9026 mgmt_pending_free(cmd);
9027 } else {
9028 rp.instance = cp->instance;
9029 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
9030 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9031 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9032 err = mgmt_cmd_complete(sk, hdev->id,
9033 MGMT_OP_ADD_EXT_ADV_PARAMS,
9034 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9035 }
9036
9037 unlock:
9038 hci_dev_unlock(hdev);
9039
9040 return err;
9041 }
9042
add_ext_adv_data_complete(struct hci_dev * hdev,void * data,int err)9043 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
9044 {
9045 struct mgmt_pending_cmd *cmd = data;
9046 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
9047 struct mgmt_rp_add_advertising rp;
9048
9049 add_adv_complete(hdev, cmd->sk, cp->instance, err);
9050
9051 memset(&rp, 0, sizeof(rp));
9052
9053 rp.instance = cp->instance;
9054
9055 if (err)
9056 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
9057 mgmt_status(err));
9058 else
9059 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
9060 mgmt_status(err), &rp, sizeof(rp));
9061
9062 mgmt_pending_free(cmd);
9063 }
9064
add_ext_adv_data_sync(struct hci_dev * hdev,void * data)9065 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
9066 {
9067 struct mgmt_pending_cmd *cmd = data;
9068 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
9069 int err;
9070
9071 if (ext_adv_capable(hdev)) {
9072 err = hci_update_adv_data_sync(hdev, cp->instance);
9073 if (err)
9074 return err;
9075
9076 err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
9077 if (err)
9078 return err;
9079
9080 return hci_enable_ext_advertising_sync(hdev, cp->instance);
9081 }
9082
9083 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
9084 }
9085
add_ext_adv_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9086 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
9087 u16 data_len)
9088 {
9089 struct mgmt_cp_add_ext_adv_data *cp = data;
9090 struct mgmt_rp_add_ext_adv_data rp;
9091 u8 schedule_instance = 0;
9092 struct adv_info *next_instance;
9093 struct adv_info *adv_instance;
9094 int err = 0;
9095 struct mgmt_pending_cmd *cmd;
9096
9097 BT_DBG("%s", hdev->name);
9098
9099 hci_dev_lock(hdev);
9100
9101 adv_instance = hci_find_adv_instance(hdev, cp->instance);
9102
9103 if (!adv_instance) {
9104 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9105 MGMT_STATUS_INVALID_PARAMS);
9106 goto unlock;
9107 }
9108
9109 /* In new interface, we require that we are powered to register */
9110 if (!hdev_is_powered(hdev)) {
9111 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9112 MGMT_STATUS_REJECTED);
9113 goto clear_new_instance;
9114 }
9115
9116 if (adv_busy(hdev)) {
9117 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9118 MGMT_STATUS_BUSY);
9119 goto clear_new_instance;
9120 }
9121
9122 /* Validate new data */
9123 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
9124 cp->adv_data_len, true) ||
9125 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
9126 cp->adv_data_len, cp->scan_rsp_len, false)) {
9127 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9128 MGMT_STATUS_INVALID_PARAMS);
9129 goto clear_new_instance;
9130 }
9131
9132 /* Set the data in the advertising instance */
9133 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
9134 cp->data, cp->scan_rsp_len,
9135 cp->data + cp->adv_data_len);
9136
9137 /* If using software rotation, determine next instance to use */
9138 if (hdev->cur_adv_instance == cp->instance) {
9139 /* If the currently advertised instance is being changed
9140 * then cancel the current advertising and schedule the
9141 * next instance. If there is only one instance then the
9142 * overridden advertising data will be visible right
9143 * away
9144 */
9145 cancel_adv_timeout(hdev);
9146
9147 next_instance = hci_get_next_instance(hdev, cp->instance);
9148 if (next_instance)
9149 schedule_instance = next_instance->instance;
9150 } else if (!hdev->adv_instance_timeout) {
9151 /* Immediately advertise the new instance if no other
9152 * instance is currently being advertised.
9153 */
9154 schedule_instance = cp->instance;
9155 }
9156
9157 /* If the HCI_ADVERTISING flag is set or there is no instance to
9158 * be advertised then we have no HCI communication to make.
9159 * Simply return.
9160 */
9161 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
9162 if (adv_instance->pending) {
9163 mgmt_advertising_added(sk, hdev, cp->instance);
9164 adv_instance->pending = false;
9165 }
9166 rp.instance = cp->instance;
9167 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9168 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9169 goto unlock;
9170 }
9171
9172 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9173 data_len);
9174 if (!cmd) {
9175 err = -ENOMEM;
9176 goto clear_new_instance;
9177 }
9178
9179 err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
9180 add_ext_adv_data_complete);
9181 if (err < 0) {
9182 mgmt_pending_free(cmd);
9183 goto clear_new_instance;
9184 }
9185
9186 /* We were successful in updating data, so trigger advertising_added
9187 * event if this is an instance that wasn't previously advertising. If
9188 * a failure occurs in the requests we initiated, we will remove the
9189 * instance again in add_advertising_complete
9190 */
9191 if (adv_instance->pending)
9192 mgmt_advertising_added(sk, hdev, cp->instance);
9193
9194 goto unlock;
9195
9196 clear_new_instance:
9197 hci_remove_adv_instance(hdev, cp->instance);
9198
9199 unlock:
9200 hci_dev_unlock(hdev);
9201
9202 return err;
9203 }
9204
remove_advertising_complete(struct hci_dev * hdev,void * data,int err)9205 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
9206 int err)
9207 {
9208 struct mgmt_pending_cmd *cmd = data;
9209 struct mgmt_cp_remove_advertising *cp = cmd->param;
9210 struct mgmt_rp_remove_advertising rp;
9211
9212 bt_dev_dbg(hdev, "err %d", err);
9213
9214 memset(&rp, 0, sizeof(rp));
9215 rp.instance = cp->instance;
9216
9217 if (err)
9218 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
9219 mgmt_status(err));
9220 else
9221 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
9222 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9223
9224 mgmt_pending_free(cmd);
9225 }
9226
remove_advertising_sync(struct hci_dev * hdev,void * data)9227 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
9228 {
9229 struct mgmt_pending_cmd *cmd = data;
9230 struct mgmt_cp_remove_advertising *cp = cmd->param;
9231 int err;
9232
9233 err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
9234 if (err)
9235 return err;
9236
9237 if (list_empty(&hdev->adv_instances))
9238 err = hci_disable_advertising_sync(hdev);
9239
9240 return err;
9241 }
9242
remove_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9243 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9244 void *data, u16 data_len)
9245 {
9246 struct mgmt_cp_remove_advertising *cp = data;
9247 struct mgmt_pending_cmd *cmd;
9248 int err;
9249
9250 bt_dev_dbg(hdev, "sock %p", sk);
9251
9252 hci_dev_lock(hdev);
9253
9254 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
9255 err = mgmt_cmd_status(sk, hdev->id,
9256 MGMT_OP_REMOVE_ADVERTISING,
9257 MGMT_STATUS_INVALID_PARAMS);
9258 goto unlock;
9259 }
9260
9261 if (pending_find(MGMT_OP_SET_LE, hdev)) {
9262 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9263 MGMT_STATUS_BUSY);
9264 goto unlock;
9265 }
9266
9267 if (list_empty(&hdev->adv_instances)) {
9268 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9269 MGMT_STATUS_INVALID_PARAMS);
9270 goto unlock;
9271 }
9272
9273 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9274 data_len);
9275 if (!cmd) {
9276 err = -ENOMEM;
9277 goto unlock;
9278 }
9279
9280 err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
9281 remove_advertising_complete);
9282 if (err < 0)
9283 mgmt_pending_free(cmd);
9284
9285 unlock:
9286 hci_dev_unlock(hdev);
9287
9288 return err;
9289 }
9290
get_adv_size_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9291 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9292 void *data, u16 data_len)
9293 {
9294 struct mgmt_cp_get_adv_size_info *cp = data;
9295 struct mgmt_rp_get_adv_size_info rp;
9296 u32 flags, supported_flags;
9297
9298 bt_dev_dbg(hdev, "sock %p", sk);
9299
9300 if (!lmp_le_capable(hdev))
9301 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9302 MGMT_STATUS_REJECTED);
9303
9304 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9305 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9306 MGMT_STATUS_INVALID_PARAMS);
9307
9308 flags = __le32_to_cpu(cp->flags);
9309
9310 /* The current implementation only supports a subset of the specified
9311 * flags.
9312 */
9313 supported_flags = get_supported_adv_flags(hdev);
9314 if (flags & ~supported_flags)
9315 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9316 MGMT_STATUS_INVALID_PARAMS);
9317
9318 rp.instance = cp->instance;
9319 rp.flags = cp->flags;
9320 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9321 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9322
9323 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9324 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9325 }
9326
9327 static const struct hci_mgmt_handler mgmt_handlers[] = {
9328 { NULL }, /* 0x0000 (no command) */
9329 { read_version, MGMT_READ_VERSION_SIZE,
9330 HCI_MGMT_NO_HDEV |
9331 HCI_MGMT_UNTRUSTED },
9332 { read_commands, MGMT_READ_COMMANDS_SIZE,
9333 HCI_MGMT_NO_HDEV |
9334 HCI_MGMT_UNTRUSTED },
9335 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
9336 HCI_MGMT_NO_HDEV |
9337 HCI_MGMT_UNTRUSTED },
9338 { read_controller_info, MGMT_READ_INFO_SIZE,
9339 HCI_MGMT_UNTRUSTED },
9340 { set_powered, MGMT_SETTING_SIZE },
9341 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
9342 { set_connectable, MGMT_SETTING_SIZE },
9343 { set_fast_connectable, MGMT_SETTING_SIZE },
9344 { set_bondable, MGMT_SETTING_SIZE },
9345 { set_link_security, MGMT_SETTING_SIZE },
9346 { set_ssp, MGMT_SETTING_SIZE },
9347 { set_hs, MGMT_SETTING_SIZE },
9348 { set_le, MGMT_SETTING_SIZE },
9349 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
9350 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
9351 { add_uuid, MGMT_ADD_UUID_SIZE },
9352 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
9353 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
9354 HCI_MGMT_VAR_LEN },
9355 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9356 HCI_MGMT_VAR_LEN },
9357 { disconnect, MGMT_DISCONNECT_SIZE },
9358 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
9359 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
9360 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
9361 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
9362 { pair_device, MGMT_PAIR_DEVICE_SIZE },
9363 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
9364 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
9365 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
9366 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9367 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
9368 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9369 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
9370 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9371 HCI_MGMT_VAR_LEN },
9372 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9373 { start_discovery, MGMT_START_DISCOVERY_SIZE },
9374 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
9375 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
9376 { block_device, MGMT_BLOCK_DEVICE_SIZE },
9377 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
9378 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
9379 { set_advertising, MGMT_SETTING_SIZE },
9380 { set_bredr, MGMT_SETTING_SIZE },
9381 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
9382 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
9383 { set_secure_conn, MGMT_SETTING_SIZE },
9384 { set_debug_keys, MGMT_SETTING_SIZE },
9385 { set_privacy, MGMT_SET_PRIVACY_SIZE },
9386 { load_irks, MGMT_LOAD_IRKS_SIZE,
9387 HCI_MGMT_VAR_LEN },
9388 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
9389 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
9390 { add_device, MGMT_ADD_DEVICE_SIZE },
9391 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
9392 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
9393 HCI_MGMT_VAR_LEN },
9394 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9395 HCI_MGMT_NO_HDEV |
9396 HCI_MGMT_UNTRUSTED },
9397 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
9398 HCI_MGMT_UNCONFIGURED |
9399 HCI_MGMT_UNTRUSTED },
9400 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
9401 HCI_MGMT_UNCONFIGURED },
9402 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
9403 HCI_MGMT_UNCONFIGURED },
9404 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9405 HCI_MGMT_VAR_LEN },
9406 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9407 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
9408 HCI_MGMT_NO_HDEV |
9409 HCI_MGMT_UNTRUSTED },
9410 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
9411 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
9412 HCI_MGMT_VAR_LEN },
9413 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
9414 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
9415 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9416 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9417 HCI_MGMT_UNTRUSTED },
9418 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
9419 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
9420 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
9421 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9422 HCI_MGMT_VAR_LEN },
9423 { set_wideband_speech, MGMT_SETTING_SIZE },
9424 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
9425 HCI_MGMT_UNTRUSTED },
9426 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
9427 HCI_MGMT_UNTRUSTED |
9428 HCI_MGMT_HDEV_OPTIONAL },
9429 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
9430 HCI_MGMT_VAR_LEN |
9431 HCI_MGMT_HDEV_OPTIONAL },
9432 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9433 HCI_MGMT_UNTRUSTED },
9434 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9435 HCI_MGMT_VAR_LEN },
9436 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9437 HCI_MGMT_UNTRUSTED },
9438 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9439 HCI_MGMT_VAR_LEN },
9440 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
9441 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
9442 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9443 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9444 HCI_MGMT_VAR_LEN },
9445 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
9446 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
9447 HCI_MGMT_VAR_LEN },
9448 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
9449 HCI_MGMT_VAR_LEN },
9450 { add_adv_patterns_monitor_rssi,
9451 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
9452 HCI_MGMT_VAR_LEN },
9453 { set_mesh, MGMT_SET_MESH_RECEIVER_SIZE,
9454 HCI_MGMT_VAR_LEN },
9455 { mesh_features, MGMT_MESH_READ_FEATURES_SIZE },
9456 { mesh_send, MGMT_MESH_SEND_SIZE,
9457 HCI_MGMT_VAR_LEN },
9458 { mesh_send_cancel, MGMT_MESH_SEND_CANCEL_SIZE },
9459 { mgmt_hci_cmd_sync, MGMT_HCI_CMD_SYNC_SIZE, HCI_MGMT_VAR_LEN },
9460 };
9461
mgmt_index_added(struct hci_dev * hdev)9462 void mgmt_index_added(struct hci_dev *hdev)
9463 {
9464 struct mgmt_ev_ext_index ev;
9465
9466 if (hci_test_quirk(hdev, HCI_QUIRK_RAW_DEVICE))
9467 return;
9468
9469 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9470 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0,
9471 HCI_MGMT_UNCONF_INDEX_EVENTS);
9472 ev.type = 0x01;
9473 } else {
9474 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
9475 HCI_MGMT_INDEX_EVENTS);
9476 ev.type = 0x00;
9477 }
9478
9479 ev.bus = hdev->bus;
9480
9481 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
9482 HCI_MGMT_EXT_INDEX_EVENTS);
9483 }
9484
mgmt_index_removed(struct hci_dev * hdev)9485 void mgmt_index_removed(struct hci_dev *hdev)
9486 {
9487 struct mgmt_ev_ext_index ev;
9488 struct cmd_lookup match = { NULL, hdev, MGMT_STATUS_INVALID_INDEX };
9489
9490 if (hci_test_quirk(hdev, HCI_QUIRK_RAW_DEVICE))
9491 return;
9492
9493 mgmt_pending_foreach(0, hdev, true, cmd_complete_rsp, &match);
9494
9495 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9496 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0,
9497 HCI_MGMT_UNCONF_INDEX_EVENTS);
9498 ev.type = 0x01;
9499 } else {
9500 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
9501 HCI_MGMT_INDEX_EVENTS);
9502 ev.type = 0x00;
9503 }
9504
9505 ev.bus = hdev->bus;
9506
9507 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
9508 HCI_MGMT_EXT_INDEX_EVENTS);
9509
9510 /* Cancel any remaining timed work */
9511 if (!hci_dev_test_flag(hdev, HCI_MGMT))
9512 return;
9513 cancel_delayed_work_sync(&hdev->discov_off);
9514 cancel_delayed_work_sync(&hdev->service_cache);
9515 cancel_delayed_work_sync(&hdev->rpa_expired);
9516 cancel_delayed_work_sync(&hdev->mesh_send_done);
9517 }
9518
mgmt_power_on(struct hci_dev * hdev,int err)9519 void mgmt_power_on(struct hci_dev *hdev, int err)
9520 {
9521 struct cmd_lookup match = { NULL, hdev };
9522
9523 bt_dev_dbg(hdev, "err %d", err);
9524
9525 hci_dev_lock(hdev);
9526
9527 if (!err) {
9528 restart_le_actions(hdev);
9529 hci_update_passive_scan(hdev);
9530 }
9531
9532 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, true, settings_rsp,
9533 &match);
9534
9535 new_settings(hdev, match.sk);
9536
9537 if (match.sk)
9538 sock_put(match.sk);
9539
9540 hci_dev_unlock(hdev);
9541 }
9542
__mgmt_power_off(struct hci_dev * hdev)9543 void __mgmt_power_off(struct hci_dev *hdev)
9544 {
9545 struct cmd_lookup match = { NULL, hdev };
9546 u8 zero_cod[] = { 0, 0, 0 };
9547
9548 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, true, settings_rsp,
9549 &match);
9550
9551 /* If the power off is because of hdev unregistration let
9552 * use the appropriate INVALID_INDEX status. Otherwise use
9553 * NOT_POWERED. We cover both scenarios here since later in
9554 * mgmt_index_removed() any hci_conn callbacks will have already
9555 * been triggered, potentially causing misleading DISCONNECTED
9556 * status responses.
9557 */
9558 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9559 match.mgmt_status = MGMT_STATUS_INVALID_INDEX;
9560 else
9561 match.mgmt_status = MGMT_STATUS_NOT_POWERED;
9562
9563 mgmt_pending_foreach(0, hdev, true, cmd_complete_rsp, &match);
9564
9565 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9566 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9567 zero_cod, sizeof(zero_cod),
9568 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9569 ext_info_changed(hdev, NULL);
9570 }
9571
9572 new_settings(hdev, match.sk);
9573
9574 if (match.sk)
9575 sock_put(match.sk);
9576 }
9577
mgmt_set_powered_failed(struct hci_dev * hdev,int err)9578 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9579 {
9580 struct mgmt_pending_cmd *cmd;
9581 u8 status;
9582
9583 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9584 if (!cmd)
9585 return;
9586
9587 if (err == -ERFKILL)
9588 status = MGMT_STATUS_RFKILLED;
9589 else
9590 status = MGMT_STATUS_FAILED;
9591
9592 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9593
9594 mgmt_pending_remove(cmd);
9595 }
9596
mgmt_new_link_key(struct hci_dev * hdev,struct link_key * key,bool persistent)9597 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9598 bool persistent)
9599 {
9600 struct mgmt_ev_new_link_key ev;
9601
9602 memset(&ev, 0, sizeof(ev));
9603
9604 ev.store_hint = persistent;
9605 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9606 ev.key.addr.type = BDADDR_BREDR;
9607 ev.key.type = key->type;
9608 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9609 ev.key.pin_len = key->pin_len;
9610
9611 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9612 }
9613
mgmt_ltk_type(struct smp_ltk * ltk)9614 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9615 {
9616 switch (ltk->type) {
9617 case SMP_LTK:
9618 case SMP_LTK_RESPONDER:
9619 if (ltk->authenticated)
9620 return MGMT_LTK_AUTHENTICATED;
9621 return MGMT_LTK_UNAUTHENTICATED;
9622 case SMP_LTK_P256:
9623 if (ltk->authenticated)
9624 return MGMT_LTK_P256_AUTH;
9625 return MGMT_LTK_P256_UNAUTH;
9626 case SMP_LTK_P256_DEBUG:
9627 return MGMT_LTK_P256_DEBUG;
9628 }
9629
9630 return MGMT_LTK_UNAUTHENTICATED;
9631 }
9632
mgmt_new_ltk(struct hci_dev * hdev,struct smp_ltk * key,bool persistent)9633 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9634 {
9635 struct mgmt_ev_new_long_term_key ev;
9636
9637 memset(&ev, 0, sizeof(ev));
9638
9639 /* Devices using resolvable or non-resolvable random addresses
9640 * without providing an identity resolving key don't require
9641 * to store long term keys. Their addresses will change the
9642 * next time around.
9643 *
9644 * Only when a remote device provides an identity address
9645 * make sure the long term key is stored. If the remote
9646 * identity is known, the long term keys are internally
9647 * mapped to the identity address. So allow static random
9648 * and public addresses here.
9649 */
9650 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9651 (key->bdaddr.b[5] & 0xc0) != 0xc0)
9652 ev.store_hint = 0x00;
9653 else
9654 ev.store_hint = persistent;
9655
9656 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9657 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
9658 ev.key.type = mgmt_ltk_type(key);
9659 ev.key.enc_size = key->enc_size;
9660 ev.key.ediv = key->ediv;
9661 ev.key.rand = key->rand;
9662
9663 if (key->type == SMP_LTK)
9664 ev.key.initiator = 1;
9665
9666 /* Make sure we copy only the significant bytes based on the
9667 * encryption key size, and set the rest of the value to zeroes.
9668 */
9669 memcpy(ev.key.val, key->val, key->enc_size);
9670 memset(ev.key.val + key->enc_size, 0,
9671 sizeof(ev.key.val) - key->enc_size);
9672
9673 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9674 }
9675
mgmt_new_irk(struct hci_dev * hdev,struct smp_irk * irk,bool persistent)9676 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9677 {
9678 struct mgmt_ev_new_irk ev;
9679
9680 memset(&ev, 0, sizeof(ev));
9681
9682 ev.store_hint = persistent;
9683
9684 bacpy(&ev.rpa, &irk->rpa);
9685 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9686 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
9687 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9688
9689 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9690 }
9691
mgmt_new_csrk(struct hci_dev * hdev,struct smp_csrk * csrk,bool persistent)9692 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9693 bool persistent)
9694 {
9695 struct mgmt_ev_new_csrk ev;
9696
9697 memset(&ev, 0, sizeof(ev));
9698
9699 /* Devices using resolvable or non-resolvable random addresses
9700 * without providing an identity resolving key don't require
9701 * to store signature resolving keys. Their addresses will change
9702 * the next time around.
9703 *
9704 * Only when a remote device provides an identity address
9705 * make sure the signature resolving key is stored. So allow
9706 * static random and public addresses here.
9707 */
9708 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9709 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9710 ev.store_hint = 0x00;
9711 else
9712 ev.store_hint = persistent;
9713
9714 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9715 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
9716 ev.key.type = csrk->type;
9717 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9718
9719 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9720 }
9721
mgmt_new_conn_param(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u8 store_hint,u16 min_interval,u16 max_interval,u16 latency,u16 timeout)9722 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9723 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9724 u16 max_interval, u16 latency, u16 timeout)
9725 {
9726 struct mgmt_ev_new_conn_param ev;
9727
9728 if (!hci_is_identity_address(bdaddr, bdaddr_type))
9729 return;
9730
9731 memset(&ev, 0, sizeof(ev));
9732 bacpy(&ev.addr.bdaddr, bdaddr);
9733 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9734 ev.store_hint = store_hint;
9735 ev.min_interval = cpu_to_le16(min_interval);
9736 ev.max_interval = cpu_to_le16(max_interval);
9737 ev.latency = cpu_to_le16(latency);
9738 ev.timeout = cpu_to_le16(timeout);
9739
9740 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9741 }
9742
mgmt_device_connected(struct hci_dev * hdev,struct hci_conn * conn,u8 * name,u8 name_len)9743 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9744 u8 *name, u8 name_len)
9745 {
9746 struct sk_buff *skb;
9747 struct mgmt_ev_device_connected *ev;
9748 u16 eir_len = 0;
9749 u32 flags = 0;
9750
9751 if (test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
9752 return;
9753
9754 /* allocate buff for LE or BR/EDR adv */
9755 if (conn->le_adv_data_len > 0)
9756 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9757 sizeof(*ev) + conn->le_adv_data_len);
9758 else
9759 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9760 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9761 eir_precalc_len(sizeof(conn->dev_class)));
9762
9763 if (!skb)
9764 return;
9765
9766 ev = skb_put(skb, sizeof(*ev));
9767 bacpy(&ev->addr.bdaddr, &conn->dst);
9768 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9769
9770 if (conn->out)
9771 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9772
9773 ev->flags = __cpu_to_le32(flags);
9774
9775 /* We must ensure that the EIR Data fields are ordered and
9776 * unique. Keep it simple for now and avoid the problem by not
9777 * adding any BR/EDR data to the LE adv.
9778 */
9779 if (conn->le_adv_data_len > 0) {
9780 skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9781 eir_len = conn->le_adv_data_len;
9782 } else {
9783 if (name)
9784 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9785
9786 if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9787 eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9788 conn->dev_class, sizeof(conn->dev_class));
9789 }
9790
9791 ev->eir_len = cpu_to_le16(eir_len);
9792
9793 mgmt_event_skb(skb, NULL);
9794 }
9795
unpair_device_rsp(struct mgmt_pending_cmd * cmd,void * data)9796 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9797 {
9798 struct hci_dev *hdev = data;
9799 struct mgmt_cp_unpair_device *cp = cmd->param;
9800
9801 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9802
9803 cmd->cmd_complete(cmd, 0);
9804 }
9805
mgmt_powering_down(struct hci_dev * hdev)9806 bool mgmt_powering_down(struct hci_dev *hdev)
9807 {
9808 struct mgmt_pending_cmd *cmd;
9809 struct mgmt_mode *cp;
9810
9811 if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
9812 return true;
9813
9814 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9815 if (!cmd)
9816 return false;
9817
9818 cp = cmd->param;
9819 if (!cp->val)
9820 return true;
9821
9822 return false;
9823 }
9824
mgmt_device_disconnected(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 reason,bool mgmt_connected)9825 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9826 u8 link_type, u8 addr_type, u8 reason,
9827 bool mgmt_connected)
9828 {
9829 struct mgmt_ev_device_disconnected ev;
9830 struct sock *sk = NULL;
9831
9832 if (!mgmt_connected)
9833 return;
9834
9835 if (link_type != ACL_LINK &&
9836 link_type != LE_LINK &&
9837 link_type != BIS_LINK)
9838 return;
9839
9840 bacpy(&ev.addr.bdaddr, bdaddr);
9841 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9842 ev.reason = reason;
9843
9844 /* Report disconnects due to suspend */
9845 if (hdev->suspended)
9846 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9847
9848 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9849
9850 if (sk)
9851 sock_put(sk);
9852 }
9853
mgmt_disconnect_failed(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9854 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9855 u8 link_type, u8 addr_type, u8 status)
9856 {
9857 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9858 struct mgmt_cp_disconnect *cp;
9859 struct mgmt_pending_cmd *cmd;
9860
9861 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, true,
9862 unpair_device_rsp, hdev);
9863
9864 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9865 if (!cmd)
9866 return;
9867
9868 cp = cmd->param;
9869
9870 if (bacmp(bdaddr, &cp->addr.bdaddr))
9871 return;
9872
9873 if (cp->addr.type != bdaddr_type)
9874 return;
9875
9876 cmd->cmd_complete(cmd, mgmt_status(status));
9877 mgmt_pending_remove(cmd);
9878 }
9879
mgmt_connect_failed(struct hci_dev * hdev,struct hci_conn * conn,u8 status)9880 void mgmt_connect_failed(struct hci_dev *hdev, struct hci_conn *conn, u8 status)
9881 {
9882 struct mgmt_ev_connect_failed ev;
9883
9884 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
9885 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
9886 conn->dst_type, status, true);
9887 return;
9888 }
9889
9890 bacpy(&ev.addr.bdaddr, &conn->dst);
9891 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9892 ev.status = mgmt_status(status);
9893
9894 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9895 }
9896
mgmt_pin_code_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 secure)9897 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9898 {
9899 struct mgmt_ev_pin_code_request ev;
9900
9901 bacpy(&ev.addr.bdaddr, bdaddr);
9902 ev.addr.type = BDADDR_BREDR;
9903 ev.secure = secure;
9904
9905 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9906 }
9907
mgmt_pin_code_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)9908 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9909 u8 status)
9910 {
9911 struct mgmt_pending_cmd *cmd;
9912
9913 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9914 if (!cmd)
9915 return;
9916
9917 cmd->cmd_complete(cmd, mgmt_status(status));
9918 mgmt_pending_remove(cmd);
9919 }
9920
mgmt_pin_code_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)9921 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9922 u8 status)
9923 {
9924 struct mgmt_pending_cmd *cmd;
9925
9926 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9927 if (!cmd)
9928 return;
9929
9930 cmd->cmd_complete(cmd, mgmt_status(status));
9931 mgmt_pending_remove(cmd);
9932 }
9933
mgmt_user_confirm_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 value,u8 confirm_hint)9934 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9935 u8 link_type, u8 addr_type, u32 value,
9936 u8 confirm_hint)
9937 {
9938 struct mgmt_ev_user_confirm_request ev;
9939
9940 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9941
9942 bacpy(&ev.addr.bdaddr, bdaddr);
9943 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9944 ev.confirm_hint = confirm_hint;
9945 ev.value = cpu_to_le32(value);
9946
9947 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9948 NULL);
9949 }
9950
mgmt_user_passkey_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type)9951 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9952 u8 link_type, u8 addr_type)
9953 {
9954 struct mgmt_ev_user_passkey_request ev;
9955
9956 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9957
9958 bacpy(&ev.addr.bdaddr, bdaddr);
9959 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9960
9961 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9962 NULL);
9963 }
9964
user_pairing_resp_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status,u8 opcode)9965 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9966 u8 link_type, u8 addr_type, u8 status,
9967 u8 opcode)
9968 {
9969 struct mgmt_pending_cmd *cmd;
9970
9971 cmd = pending_find(opcode, hdev);
9972 if (!cmd)
9973 return -ENOENT;
9974
9975 cmd->cmd_complete(cmd, mgmt_status(status));
9976 mgmt_pending_remove(cmd);
9977
9978 return 0;
9979 }
9980
mgmt_user_confirm_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9981 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9982 u8 link_type, u8 addr_type, u8 status)
9983 {
9984 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9985 status, MGMT_OP_USER_CONFIRM_REPLY);
9986 }
9987
mgmt_user_confirm_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9988 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9989 u8 link_type, u8 addr_type, u8 status)
9990 {
9991 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9992 status,
9993 MGMT_OP_USER_CONFIRM_NEG_REPLY);
9994 }
9995
mgmt_user_passkey_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9996 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9997 u8 link_type, u8 addr_type, u8 status)
9998 {
9999 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
10000 status, MGMT_OP_USER_PASSKEY_REPLY);
10001 }
10002
mgmt_user_passkey_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)10003 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10004 u8 link_type, u8 addr_type, u8 status)
10005 {
10006 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
10007 status,
10008 MGMT_OP_USER_PASSKEY_NEG_REPLY);
10009 }
10010
mgmt_user_passkey_notify(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 passkey,u8 entered)10011 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
10012 u8 link_type, u8 addr_type, u32 passkey,
10013 u8 entered)
10014 {
10015 struct mgmt_ev_passkey_notify ev;
10016
10017 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
10018
10019 bacpy(&ev.addr.bdaddr, bdaddr);
10020 ev.addr.type = link_to_bdaddr(link_type, addr_type);
10021 ev.passkey = __cpu_to_le32(passkey);
10022 ev.entered = entered;
10023
10024 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
10025 }
10026
mgmt_auth_failed(struct hci_conn * conn,u8 hci_status)10027 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
10028 {
10029 struct mgmt_ev_auth_failed ev;
10030 struct mgmt_pending_cmd *cmd;
10031 u8 status = mgmt_status(hci_status);
10032
10033 bacpy(&ev.addr.bdaddr, &conn->dst);
10034 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
10035 ev.status = status;
10036
10037 cmd = find_pairing(conn);
10038
10039 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
10040 cmd ? cmd->sk : NULL);
10041
10042 if (cmd) {
10043 cmd->cmd_complete(cmd, status);
10044 mgmt_pending_remove(cmd);
10045 }
10046 }
10047
mgmt_auth_enable_complete(struct hci_dev * hdev,u8 status)10048 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
10049 {
10050 struct cmd_lookup match = { NULL, hdev };
10051 bool changed;
10052
10053 if (status) {
10054 u8 mgmt_err = mgmt_status(status);
10055 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, true,
10056 cmd_status_rsp, &mgmt_err);
10057 return;
10058 }
10059
10060 if (test_bit(HCI_AUTH, &hdev->flags))
10061 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
10062 else
10063 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
10064
10065 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, true,
10066 settings_rsp, &match);
10067
10068 if (changed)
10069 new_settings(hdev, match.sk);
10070
10071 if (match.sk)
10072 sock_put(match.sk);
10073 }
10074
sk_lookup(struct mgmt_pending_cmd * cmd,void * data)10075 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
10076 {
10077 struct cmd_lookup *match = data;
10078
10079 if (match->sk == NULL) {
10080 match->sk = cmd->sk;
10081 sock_hold(match->sk);
10082 }
10083 }
10084
mgmt_set_class_of_dev_complete(struct hci_dev * hdev,u8 * dev_class,u8 status)10085 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
10086 u8 status)
10087 {
10088 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
10089
10090 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, false, sk_lookup,
10091 &match);
10092 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, false, sk_lookup,
10093 &match);
10094 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, false, sk_lookup,
10095 &match);
10096
10097 if (!status) {
10098 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
10099 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
10100 ext_info_changed(hdev, NULL);
10101 }
10102
10103 if (match.sk)
10104 sock_put(match.sk);
10105 }
10106
mgmt_set_local_name_complete(struct hci_dev * hdev,u8 * name,u8 status)10107 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
10108 {
10109 struct mgmt_cp_set_local_name ev;
10110 struct mgmt_pending_cmd *cmd;
10111
10112 if (status)
10113 return;
10114
10115 memset(&ev, 0, sizeof(ev));
10116 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
10117 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
10118
10119 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
10120 if (!cmd) {
10121 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
10122
10123 /* If this is a HCI command related to powering on the
10124 * HCI dev don't send any mgmt signals.
10125 */
10126 if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
10127 return;
10128
10129 if (pending_find(MGMT_OP_SET_POWERED, hdev))
10130 return;
10131 }
10132
10133 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
10134 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
10135 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
10136 }
10137
has_uuid(u8 * uuid,u16 uuid_count,u8 (* uuids)[16])10138 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
10139 {
10140 int i;
10141
10142 for (i = 0; i < uuid_count; i++) {
10143 if (!memcmp(uuid, uuids[i], 16))
10144 return true;
10145 }
10146
10147 return false;
10148 }
10149
eir_has_uuids(u8 * eir,u16 eir_len,u16 uuid_count,u8 (* uuids)[16])10150 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
10151 {
10152 u16 parsed = 0;
10153
10154 while (parsed < eir_len) {
10155 u8 field_len = eir[0];
10156 u8 uuid[16];
10157 int i;
10158
10159 if (field_len == 0)
10160 break;
10161
10162 if (eir_len - parsed < field_len + 1)
10163 break;
10164
10165 switch (eir[1]) {
10166 case EIR_UUID16_ALL:
10167 case EIR_UUID16_SOME:
10168 for (i = 0; i + 3 <= field_len; i += 2) {
10169 memcpy(uuid, bluetooth_base_uuid, 16);
10170 uuid[13] = eir[i + 3];
10171 uuid[12] = eir[i + 2];
10172 if (has_uuid(uuid, uuid_count, uuids))
10173 return true;
10174 }
10175 break;
10176 case EIR_UUID32_ALL:
10177 case EIR_UUID32_SOME:
10178 for (i = 0; i + 5 <= field_len; i += 4) {
10179 memcpy(uuid, bluetooth_base_uuid, 16);
10180 uuid[15] = eir[i + 5];
10181 uuid[14] = eir[i + 4];
10182 uuid[13] = eir[i + 3];
10183 uuid[12] = eir[i + 2];
10184 if (has_uuid(uuid, uuid_count, uuids))
10185 return true;
10186 }
10187 break;
10188 case EIR_UUID128_ALL:
10189 case EIR_UUID128_SOME:
10190 for (i = 0; i + 17 <= field_len; i += 16) {
10191 memcpy(uuid, eir + i + 2, 16);
10192 if (has_uuid(uuid, uuid_count, uuids))
10193 return true;
10194 }
10195 break;
10196 }
10197
10198 parsed += field_len + 1;
10199 eir += field_len + 1;
10200 }
10201
10202 return false;
10203 }
10204
is_filter_match(struct hci_dev * hdev,s8 rssi,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len)10205 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10206 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10207 {
10208 /* If a RSSI threshold has been specified, and
10209 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10210 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10211 * is set, let it through for further processing, as we might need to
10212 * restart the scan.
10213 *
10214 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10215 * the results are also dropped.
10216 */
10217 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10218 (rssi == HCI_RSSI_INVALID ||
10219 (rssi < hdev->discovery.rssi &&
10220 !hci_test_quirk(hdev, HCI_QUIRK_STRICT_DUPLICATE_FILTER))))
10221 return false;
10222
10223 if (hdev->discovery.uuid_count != 0) {
10224 /* If a list of UUIDs is provided in filter, results with no
10225 * matching UUID should be dropped.
10226 */
10227 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10228 hdev->discovery.uuids) &&
10229 !eir_has_uuids(scan_rsp, scan_rsp_len,
10230 hdev->discovery.uuid_count,
10231 hdev->discovery.uuids))
10232 return false;
10233 }
10234
10235 /* If duplicate filtering does not report RSSI changes, then restart
10236 * scanning to ensure updated result with updated RSSI values.
10237 */
10238 if (hci_test_quirk(hdev, HCI_QUIRK_STRICT_DUPLICATE_FILTER)) {
10239 /* Validate RSSI value against the RSSI threshold once more. */
10240 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10241 rssi < hdev->discovery.rssi)
10242 return false;
10243 }
10244
10245 return true;
10246 }
10247
mgmt_adv_monitor_device_lost(struct hci_dev * hdev,u16 handle,bdaddr_t * bdaddr,u8 addr_type)10248 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
10249 bdaddr_t *bdaddr, u8 addr_type)
10250 {
10251 struct mgmt_ev_adv_monitor_device_lost ev;
10252
10253 ev.monitor_handle = cpu_to_le16(handle);
10254 bacpy(&ev.addr.bdaddr, bdaddr);
10255 ev.addr.type = addr_type;
10256
10257 mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
10258 NULL);
10259 }
10260
mgmt_send_adv_monitor_device_found(struct hci_dev * hdev,struct sk_buff * skb,struct sock * skip_sk,u16 handle)10261 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
10262 struct sk_buff *skb,
10263 struct sock *skip_sk,
10264 u16 handle)
10265 {
10266 struct sk_buff *advmon_skb;
10267 size_t advmon_skb_len;
10268 __le16 *monitor_handle;
10269
10270 if (!skb)
10271 return;
10272
10273 advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
10274 sizeof(struct mgmt_ev_device_found)) + skb->len;
10275 advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
10276 advmon_skb_len);
10277 if (!advmon_skb)
10278 return;
10279
10280 /* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10281 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10282 * store monitor_handle of the matched monitor.
10283 */
10284 monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
10285 *monitor_handle = cpu_to_le16(handle);
10286 skb_put_data(advmon_skb, skb->data, skb->len);
10287
10288 mgmt_event_skb(advmon_skb, skip_sk);
10289 }
10290
mgmt_adv_monitor_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,bool report_device,struct sk_buff * skb,struct sock * skip_sk)10291 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
10292 bdaddr_t *bdaddr, bool report_device,
10293 struct sk_buff *skb,
10294 struct sock *skip_sk)
10295 {
10296 struct monitored_device *dev, *tmp;
10297 bool matched = false;
10298 bool notified = false;
10299
10300 /* We have received the Advertisement Report because:
10301 * 1. the kernel has initiated active discovery
10302 * 2. if not, we have pend_le_reports > 0 in which case we are doing
10303 * passive scanning
10304 * 3. if none of the above is true, we have one or more active
10305 * Advertisement Monitor
10306 *
10307 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10308 * and report ONLY one advertisement per device for the matched Monitor
10309 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10310 *
10311 * For case 3, since we are not active scanning and all advertisements
10312 * received are due to a matched Advertisement Monitor, report all
10313 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10314 */
10315 if (report_device && !hdev->advmon_pend_notify) {
10316 mgmt_event_skb(skb, skip_sk);
10317 return;
10318 }
10319
10320 hdev->advmon_pend_notify = false;
10321
10322 list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
10323 if (!bacmp(&dev->bdaddr, bdaddr)) {
10324 matched = true;
10325
10326 if (!dev->notified) {
10327 mgmt_send_adv_monitor_device_found(hdev, skb,
10328 skip_sk,
10329 dev->handle);
10330 notified = true;
10331 dev->notified = true;
10332 }
10333 }
10334
10335 if (!dev->notified)
10336 hdev->advmon_pend_notify = true;
10337 }
10338
10339 if (!report_device &&
10340 ((matched && !notified) || !msft_monitor_supported(hdev))) {
10341 /* Handle 0 indicates that we are not active scanning and this
10342 * is a subsequent advertisement report for an already matched
10343 * Advertisement Monitor or the controller offloading support
10344 * is not available.
10345 */
10346 mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
10347 }
10348
10349 if (report_device)
10350 mgmt_event_skb(skb, skip_sk);
10351 else
10352 kfree_skb(skb);
10353 }
10354
mesh_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len,u64 instant)10355 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
10356 u8 addr_type, s8 rssi, u32 flags, u8 *eir,
10357 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10358 u64 instant)
10359 {
10360 struct sk_buff *skb;
10361 struct mgmt_ev_mesh_device_found *ev;
10362 int i, j;
10363
10364 if (!hdev->mesh_ad_types[0])
10365 goto accepted;
10366
10367 /* Scan for requested AD types */
10368 if (eir_len > 0) {
10369 for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
10370 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10371 if (!hdev->mesh_ad_types[j])
10372 break;
10373
10374 if (hdev->mesh_ad_types[j] == eir[i + 1])
10375 goto accepted;
10376 }
10377 }
10378 }
10379
10380 if (scan_rsp_len > 0) {
10381 for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
10382 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10383 if (!hdev->mesh_ad_types[j])
10384 break;
10385
10386 if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
10387 goto accepted;
10388 }
10389 }
10390 }
10391
10392 return;
10393
10394 accepted:
10395 skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
10396 sizeof(*ev) + eir_len + scan_rsp_len);
10397 if (!skb)
10398 return;
10399
10400 ev = skb_put(skb, sizeof(*ev));
10401
10402 bacpy(&ev->addr.bdaddr, bdaddr);
10403 ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
10404 ev->rssi = rssi;
10405 ev->flags = cpu_to_le32(flags);
10406 ev->instant = cpu_to_le64(instant);
10407
10408 if (eir_len > 0)
10409 /* Copy EIR or advertising data into event */
10410 skb_put_data(skb, eir, eir_len);
10411
10412 if (scan_rsp_len > 0)
10413 /* Append scan response data to event */
10414 skb_put_data(skb, scan_rsp, scan_rsp_len);
10415
10416 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10417
10418 mgmt_event_skb(skb, NULL);
10419 }
10420
mgmt_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 * dev_class,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len,u64 instant)10421 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10422 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10423 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10424 u64 instant)
10425 {
10426 struct sk_buff *skb;
10427 struct mgmt_ev_device_found *ev;
10428 bool report_device = hci_discovery_active(hdev);
10429
10430 if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
10431 mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
10432 eir, eir_len, scan_rsp, scan_rsp_len,
10433 instant);
10434
10435 /* Don't send events for a non-kernel initiated discovery. With
10436 * LE one exception is if we have pend_le_reports > 0 in which
10437 * case we're doing passive scanning and want these events.
10438 */
10439 if (!hci_discovery_active(hdev)) {
10440 if (link_type == ACL_LINK)
10441 return;
10442 if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
10443 report_device = true;
10444 else if (!hci_is_adv_monitoring(hdev))
10445 return;
10446 }
10447
10448 if (hdev->discovery.result_filtering) {
10449 /* We are using service discovery */
10450 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10451 scan_rsp_len))
10452 return;
10453 }
10454
10455 if (hdev->discovery.limited) {
10456 /* Check for limited discoverable bit */
10457 if (dev_class) {
10458 if (!(dev_class[1] & 0x20))
10459 return;
10460 } else {
10461 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10462 if (!flags || !(flags[0] & LE_AD_LIMITED))
10463 return;
10464 }
10465 }
10466
10467 /* Allocate skb. The 5 extra bytes are for the potential CoD field */
10468 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10469 sizeof(*ev) + eir_len + scan_rsp_len + 5);
10470 if (!skb)
10471 return;
10472
10473 ev = skb_put(skb, sizeof(*ev));
10474
10475 /* In case of device discovery with BR/EDR devices (pre 1.2), the
10476 * RSSI value was reported as 0 when not available. This behavior
10477 * is kept when using device discovery. This is required for full
10478 * backwards compatibility with the API.
10479 *
10480 * However when using service discovery, the value 127 will be
10481 * returned when the RSSI is not available.
10482 */
10483 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10484 link_type == ACL_LINK)
10485 rssi = 0;
10486
10487 bacpy(&ev->addr.bdaddr, bdaddr);
10488 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10489 ev->rssi = rssi;
10490 ev->flags = cpu_to_le32(flags);
10491
10492 if (eir_len > 0)
10493 /* Copy EIR or advertising data into event */
10494 skb_put_data(skb, eir, eir_len);
10495
10496 if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
10497 u8 eir_cod[5];
10498
10499 eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
10500 dev_class, 3);
10501 skb_put_data(skb, eir_cod, sizeof(eir_cod));
10502 }
10503
10504 if (scan_rsp_len > 0)
10505 /* Append scan response data to event */
10506 skb_put_data(skb, scan_rsp, scan_rsp_len);
10507
10508 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10509
10510 mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
10511 }
10512
mgmt_remote_name(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,s8 rssi,u8 * name,u8 name_len)10513 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10514 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10515 {
10516 struct sk_buff *skb;
10517 struct mgmt_ev_device_found *ev;
10518 u16 eir_len = 0;
10519 u32 flags = 0;
10520
10521 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10522 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
10523 if (!skb)
10524 return;
10525
10526 ev = skb_put(skb, sizeof(*ev));
10527 bacpy(&ev->addr.bdaddr, bdaddr);
10528 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10529 ev->rssi = rssi;
10530
10531 if (name)
10532 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
10533 else
10534 flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
10535
10536 ev->eir_len = cpu_to_le16(eir_len);
10537 ev->flags = cpu_to_le32(flags);
10538
10539 mgmt_event_skb(skb, NULL);
10540 }
10541
mgmt_discovering(struct hci_dev * hdev,u8 discovering)10542 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10543 {
10544 struct mgmt_ev_discovering ev;
10545
10546 bt_dev_dbg(hdev, "discovering %u", discovering);
10547
10548 memset(&ev, 0, sizeof(ev));
10549 ev.type = hdev->discovery.type;
10550 ev.discovering = discovering;
10551
10552 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10553 }
10554
mgmt_suspending(struct hci_dev * hdev,u8 state)10555 void mgmt_suspending(struct hci_dev *hdev, u8 state)
10556 {
10557 struct mgmt_ev_controller_suspend ev;
10558
10559 ev.suspend_state = state;
10560 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10561 }
10562
mgmt_resuming(struct hci_dev * hdev,u8 reason,bdaddr_t * bdaddr,u8 addr_type)10563 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10564 u8 addr_type)
10565 {
10566 struct mgmt_ev_controller_resume ev;
10567
10568 ev.wake_reason = reason;
10569 if (bdaddr) {
10570 bacpy(&ev.addr.bdaddr, bdaddr);
10571 ev.addr.type = addr_type;
10572 } else {
10573 memset(&ev.addr, 0, sizeof(ev.addr));
10574 }
10575
10576 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
10577 }
10578
10579 static struct hci_mgmt_chan chan = {
10580 .channel = HCI_CHANNEL_CONTROL,
10581 .handler_count = ARRAY_SIZE(mgmt_handlers),
10582 .handlers = mgmt_handlers,
10583 .hdev_init = mgmt_init_hdev,
10584 };
10585
mgmt_init(void)10586 int mgmt_init(void)
10587 {
10588 return hci_mgmt_chan_register(&chan);
10589 }
10590
mgmt_exit(void)10591 void mgmt_exit(void)
10592 {
10593 hci_mgmt_chan_unregister(&chan);
10594 }
10595
mgmt_cleanup(struct sock * sk)10596 void mgmt_cleanup(struct sock *sk)
10597 {
10598 struct mgmt_mesh_tx *mesh_tx;
10599 struct hci_dev *hdev;
10600
10601 read_lock(&hci_dev_list_lock);
10602
10603 list_for_each_entry(hdev, &hci_dev_list, list) {
10604 do {
10605 mesh_tx = mgmt_mesh_next(hdev, sk);
10606
10607 if (mesh_tx)
10608 mesh_send_complete(hdev, mesh_tx, true);
10609 } while (mesh_tx);
10610 }
10611
10612 read_unlock(&hci_dev_list_lock);
10613 }
10614