1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <linux/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35
36 #include "smp.h"
37 #include "mgmt_util.h"
38 #include "mgmt_config.h"
39 #include "msft.h"
40 #include "eir.h"
41 #include "aosp.h"
42
43 #define MGMT_VERSION 1
44 #define MGMT_REVISION 23
45
46 static const u16 mgmt_commands[] = {
47 MGMT_OP_READ_INDEX_LIST,
48 MGMT_OP_READ_INFO,
49 MGMT_OP_SET_POWERED,
50 MGMT_OP_SET_DISCOVERABLE,
51 MGMT_OP_SET_CONNECTABLE,
52 MGMT_OP_SET_FAST_CONNECTABLE,
53 MGMT_OP_SET_BONDABLE,
54 MGMT_OP_SET_LINK_SECURITY,
55 MGMT_OP_SET_SSP,
56 MGMT_OP_SET_HS,
57 MGMT_OP_SET_LE,
58 MGMT_OP_SET_DEV_CLASS,
59 MGMT_OP_SET_LOCAL_NAME,
60 MGMT_OP_ADD_UUID,
61 MGMT_OP_REMOVE_UUID,
62 MGMT_OP_LOAD_LINK_KEYS,
63 MGMT_OP_LOAD_LONG_TERM_KEYS,
64 MGMT_OP_DISCONNECT,
65 MGMT_OP_GET_CONNECTIONS,
66 MGMT_OP_PIN_CODE_REPLY,
67 MGMT_OP_PIN_CODE_NEG_REPLY,
68 MGMT_OP_SET_IO_CAPABILITY,
69 MGMT_OP_PAIR_DEVICE,
70 MGMT_OP_CANCEL_PAIR_DEVICE,
71 MGMT_OP_UNPAIR_DEVICE,
72 MGMT_OP_USER_CONFIRM_REPLY,
73 MGMT_OP_USER_CONFIRM_NEG_REPLY,
74 MGMT_OP_USER_PASSKEY_REPLY,
75 MGMT_OP_USER_PASSKEY_NEG_REPLY,
76 MGMT_OP_READ_LOCAL_OOB_DATA,
77 MGMT_OP_ADD_REMOTE_OOB_DATA,
78 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
79 MGMT_OP_START_DISCOVERY,
80 MGMT_OP_STOP_DISCOVERY,
81 MGMT_OP_CONFIRM_NAME,
82 MGMT_OP_BLOCK_DEVICE,
83 MGMT_OP_UNBLOCK_DEVICE,
84 MGMT_OP_SET_DEVICE_ID,
85 MGMT_OP_SET_ADVERTISING,
86 MGMT_OP_SET_BREDR,
87 MGMT_OP_SET_STATIC_ADDRESS,
88 MGMT_OP_SET_SCAN_PARAMS,
89 MGMT_OP_SET_SECURE_CONN,
90 MGMT_OP_SET_DEBUG_KEYS,
91 MGMT_OP_SET_PRIVACY,
92 MGMT_OP_LOAD_IRKS,
93 MGMT_OP_GET_CONN_INFO,
94 MGMT_OP_GET_CLOCK_INFO,
95 MGMT_OP_ADD_DEVICE,
96 MGMT_OP_REMOVE_DEVICE,
97 MGMT_OP_LOAD_CONN_PARAM,
98 MGMT_OP_READ_UNCONF_INDEX_LIST,
99 MGMT_OP_READ_CONFIG_INFO,
100 MGMT_OP_SET_EXTERNAL_CONFIG,
101 MGMT_OP_SET_PUBLIC_ADDRESS,
102 MGMT_OP_START_SERVICE_DISCOVERY,
103 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
104 MGMT_OP_READ_EXT_INDEX_LIST,
105 MGMT_OP_READ_ADV_FEATURES,
106 MGMT_OP_ADD_ADVERTISING,
107 MGMT_OP_REMOVE_ADVERTISING,
108 MGMT_OP_GET_ADV_SIZE_INFO,
109 MGMT_OP_START_LIMITED_DISCOVERY,
110 MGMT_OP_READ_EXT_INFO,
111 MGMT_OP_SET_APPEARANCE,
112 MGMT_OP_GET_PHY_CONFIGURATION,
113 MGMT_OP_SET_PHY_CONFIGURATION,
114 MGMT_OP_SET_BLOCKED_KEYS,
115 MGMT_OP_SET_WIDEBAND_SPEECH,
116 MGMT_OP_READ_CONTROLLER_CAP,
117 MGMT_OP_READ_EXP_FEATURES_INFO,
118 MGMT_OP_SET_EXP_FEATURE,
119 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
120 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
121 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
122 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
123 MGMT_OP_GET_DEVICE_FLAGS,
124 MGMT_OP_SET_DEVICE_FLAGS,
125 MGMT_OP_READ_ADV_MONITOR_FEATURES,
126 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
127 MGMT_OP_REMOVE_ADV_MONITOR,
128 MGMT_OP_ADD_EXT_ADV_PARAMS,
129 MGMT_OP_ADD_EXT_ADV_DATA,
130 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
131 MGMT_OP_SET_MESH_RECEIVER,
132 MGMT_OP_MESH_READ_FEATURES,
133 MGMT_OP_MESH_SEND,
134 MGMT_OP_MESH_SEND_CANCEL,
135 MGMT_OP_HCI_CMD_SYNC,
136 };
137
138 static const u16 mgmt_events[] = {
139 MGMT_EV_CONTROLLER_ERROR,
140 MGMT_EV_INDEX_ADDED,
141 MGMT_EV_INDEX_REMOVED,
142 MGMT_EV_NEW_SETTINGS,
143 MGMT_EV_CLASS_OF_DEV_CHANGED,
144 MGMT_EV_LOCAL_NAME_CHANGED,
145 MGMT_EV_NEW_LINK_KEY,
146 MGMT_EV_NEW_LONG_TERM_KEY,
147 MGMT_EV_DEVICE_CONNECTED,
148 MGMT_EV_DEVICE_DISCONNECTED,
149 MGMT_EV_CONNECT_FAILED,
150 MGMT_EV_PIN_CODE_REQUEST,
151 MGMT_EV_USER_CONFIRM_REQUEST,
152 MGMT_EV_USER_PASSKEY_REQUEST,
153 MGMT_EV_AUTH_FAILED,
154 MGMT_EV_DEVICE_FOUND,
155 MGMT_EV_DISCOVERING,
156 MGMT_EV_DEVICE_BLOCKED,
157 MGMT_EV_DEVICE_UNBLOCKED,
158 MGMT_EV_DEVICE_UNPAIRED,
159 MGMT_EV_PASSKEY_NOTIFY,
160 MGMT_EV_NEW_IRK,
161 MGMT_EV_NEW_CSRK,
162 MGMT_EV_DEVICE_ADDED,
163 MGMT_EV_DEVICE_REMOVED,
164 MGMT_EV_NEW_CONN_PARAM,
165 MGMT_EV_UNCONF_INDEX_ADDED,
166 MGMT_EV_UNCONF_INDEX_REMOVED,
167 MGMT_EV_NEW_CONFIG_OPTIONS,
168 MGMT_EV_EXT_INDEX_ADDED,
169 MGMT_EV_EXT_INDEX_REMOVED,
170 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
171 MGMT_EV_ADVERTISING_ADDED,
172 MGMT_EV_ADVERTISING_REMOVED,
173 MGMT_EV_EXT_INFO_CHANGED,
174 MGMT_EV_PHY_CONFIGURATION_CHANGED,
175 MGMT_EV_EXP_FEATURE_CHANGED,
176 MGMT_EV_DEVICE_FLAGS_CHANGED,
177 MGMT_EV_ADV_MONITOR_ADDED,
178 MGMT_EV_ADV_MONITOR_REMOVED,
179 MGMT_EV_CONTROLLER_SUSPEND,
180 MGMT_EV_CONTROLLER_RESUME,
181 MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
182 MGMT_EV_ADV_MONITOR_DEVICE_LOST,
183 };
184
185 static const u16 mgmt_untrusted_commands[] = {
186 MGMT_OP_READ_INDEX_LIST,
187 MGMT_OP_READ_INFO,
188 MGMT_OP_READ_UNCONF_INDEX_LIST,
189 MGMT_OP_READ_CONFIG_INFO,
190 MGMT_OP_READ_EXT_INDEX_LIST,
191 MGMT_OP_READ_EXT_INFO,
192 MGMT_OP_READ_CONTROLLER_CAP,
193 MGMT_OP_READ_EXP_FEATURES_INFO,
194 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
195 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
196 };
197
198 static const u16 mgmt_untrusted_events[] = {
199 MGMT_EV_INDEX_ADDED,
200 MGMT_EV_INDEX_REMOVED,
201 MGMT_EV_NEW_SETTINGS,
202 MGMT_EV_CLASS_OF_DEV_CHANGED,
203 MGMT_EV_LOCAL_NAME_CHANGED,
204 MGMT_EV_UNCONF_INDEX_ADDED,
205 MGMT_EV_UNCONF_INDEX_REMOVED,
206 MGMT_EV_NEW_CONFIG_OPTIONS,
207 MGMT_EV_EXT_INDEX_ADDED,
208 MGMT_EV_EXT_INDEX_REMOVED,
209 MGMT_EV_EXT_INFO_CHANGED,
210 MGMT_EV_EXP_FEATURE_CHANGED,
211 };
212
213 #define CACHE_TIMEOUT secs_to_jiffies(2)
214
215 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
216 "\x00\x00\x00\x00\x00\x00\x00\x00"
217
218 /* HCI to MGMT error code conversion table */
219 static const u8 mgmt_status_table[] = {
220 MGMT_STATUS_SUCCESS,
221 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
222 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
223 MGMT_STATUS_FAILED, /* Hardware Failure */
224 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
225 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
226 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
227 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
228 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
229 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
230 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
231 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
232 MGMT_STATUS_BUSY, /* Command Disallowed */
233 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
234 MGMT_STATUS_REJECTED, /* Rejected Security */
235 MGMT_STATUS_REJECTED, /* Rejected Personal */
236 MGMT_STATUS_TIMEOUT, /* Host Timeout */
237 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
238 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
239 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
240 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
241 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
242 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
243 MGMT_STATUS_BUSY, /* Repeated Attempts */
244 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
245 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
246 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
247 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
248 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
249 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
250 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
251 MGMT_STATUS_FAILED, /* Unspecified Error */
252 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
253 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
254 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
255 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
256 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
257 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
258 MGMT_STATUS_FAILED, /* Unit Link Key Used */
259 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
260 MGMT_STATUS_TIMEOUT, /* Instant Passed */
261 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
262 MGMT_STATUS_FAILED, /* Transaction Collision */
263 MGMT_STATUS_FAILED, /* Reserved for future use */
264 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
265 MGMT_STATUS_REJECTED, /* QoS Rejected */
266 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
267 MGMT_STATUS_REJECTED, /* Insufficient Security */
268 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
269 MGMT_STATUS_FAILED, /* Reserved for future use */
270 MGMT_STATUS_BUSY, /* Role Switch Pending */
271 MGMT_STATUS_FAILED, /* Reserved for future use */
272 MGMT_STATUS_FAILED, /* Slot Violation */
273 MGMT_STATUS_FAILED, /* Role Switch Failed */
274 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
275 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
276 MGMT_STATUS_BUSY, /* Host Busy Pairing */
277 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
278 MGMT_STATUS_BUSY, /* Controller Busy */
279 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
280 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
281 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
282 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
283 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
284 };
285
mgmt_errno_status(int err)286 static u8 mgmt_errno_status(int err)
287 {
288 switch (err) {
289 case 0:
290 return MGMT_STATUS_SUCCESS;
291 case -EPERM:
292 return MGMT_STATUS_REJECTED;
293 case -EINVAL:
294 return MGMT_STATUS_INVALID_PARAMS;
295 case -EOPNOTSUPP:
296 return MGMT_STATUS_NOT_SUPPORTED;
297 case -EBUSY:
298 return MGMT_STATUS_BUSY;
299 case -ETIMEDOUT:
300 return MGMT_STATUS_AUTH_FAILED;
301 case -ENOMEM:
302 return MGMT_STATUS_NO_RESOURCES;
303 case -EISCONN:
304 return MGMT_STATUS_ALREADY_CONNECTED;
305 case -ENOTCONN:
306 return MGMT_STATUS_DISCONNECTED;
307 }
308
309 return MGMT_STATUS_FAILED;
310 }
311
mgmt_status(int err)312 static u8 mgmt_status(int err)
313 {
314 if (err < 0)
315 return mgmt_errno_status(err);
316
317 if (err < ARRAY_SIZE(mgmt_status_table))
318 return mgmt_status_table[err];
319
320 return MGMT_STATUS_FAILED;
321 }
322
mgmt_index_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag)323 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
324 u16 len, int flag)
325 {
326 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
327 flag, NULL);
328 }
329
mgmt_limited_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag,struct sock * skip_sk)330 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
331 u16 len, int flag, struct sock *skip_sk)
332 {
333 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
334 flag, skip_sk);
335 }
336
mgmt_event(u16 event,struct hci_dev * hdev,void * data,u16 len,struct sock * skip_sk)337 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
338 struct sock *skip_sk)
339 {
340 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
341 HCI_SOCK_TRUSTED, skip_sk);
342 }
343
mgmt_event_skb(struct sk_buff * skb,struct sock * skip_sk)344 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
345 {
346 return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
347 skip_sk);
348 }
349
le_addr_type(u8 mgmt_addr_type)350 static u8 le_addr_type(u8 mgmt_addr_type)
351 {
352 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
353 return ADDR_LE_DEV_PUBLIC;
354 else
355 return ADDR_LE_DEV_RANDOM;
356 }
357
mgmt_fill_version_info(void * ver)358 void mgmt_fill_version_info(void *ver)
359 {
360 struct mgmt_rp_read_version *rp = ver;
361
362 rp->version = MGMT_VERSION;
363 rp->revision = cpu_to_le16(MGMT_REVISION);
364 }
365
read_version(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)366 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
367 u16 data_len)
368 {
369 struct mgmt_rp_read_version rp;
370
371 bt_dev_dbg(hdev, "sock %p", sk);
372
373 mgmt_fill_version_info(&rp);
374
375 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
376 &rp, sizeof(rp));
377 }
378
read_commands(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)379 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
380 u16 data_len)
381 {
382 struct mgmt_rp_read_commands *rp;
383 u16 num_commands, num_events;
384 size_t rp_size;
385 int i, err;
386
387 bt_dev_dbg(hdev, "sock %p", sk);
388
389 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
390 num_commands = ARRAY_SIZE(mgmt_commands);
391 num_events = ARRAY_SIZE(mgmt_events);
392 } else {
393 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
394 num_events = ARRAY_SIZE(mgmt_untrusted_events);
395 }
396
397 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
398
399 rp = kmalloc(rp_size, GFP_KERNEL);
400 if (!rp)
401 return -ENOMEM;
402
403 rp->num_commands = cpu_to_le16(num_commands);
404 rp->num_events = cpu_to_le16(num_events);
405
406 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
407 __le16 *opcode = rp->opcodes;
408
409 for (i = 0; i < num_commands; i++, opcode++)
410 put_unaligned_le16(mgmt_commands[i], opcode);
411
412 for (i = 0; i < num_events; i++, opcode++)
413 put_unaligned_le16(mgmt_events[i], opcode);
414 } else {
415 __le16 *opcode = rp->opcodes;
416
417 for (i = 0; i < num_commands; i++, opcode++)
418 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
419
420 for (i = 0; i < num_events; i++, opcode++)
421 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
422 }
423
424 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
425 rp, rp_size);
426 kfree(rp);
427
428 return err;
429 }
430
read_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)431 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
432 u16 data_len)
433 {
434 struct mgmt_rp_read_index_list *rp;
435 struct hci_dev *d;
436 size_t rp_len;
437 u16 count;
438 int err;
439
440 bt_dev_dbg(hdev, "sock %p", sk);
441
442 read_lock(&hci_dev_list_lock);
443
444 count = 0;
445 list_for_each_entry(d, &hci_dev_list, list) {
446 if (!hci_dev_test_flag(d, HCI_UNCONFIGURED))
447 count++;
448 }
449
450 rp_len = sizeof(*rp) + (2 * count);
451 rp = kmalloc(rp_len, GFP_ATOMIC);
452 if (!rp) {
453 read_unlock(&hci_dev_list_lock);
454 return -ENOMEM;
455 }
456
457 count = 0;
458 list_for_each_entry(d, &hci_dev_list, list) {
459 if (hci_dev_test_flag(d, HCI_SETUP) ||
460 hci_dev_test_flag(d, HCI_CONFIG) ||
461 hci_dev_test_flag(d, HCI_USER_CHANNEL))
462 continue;
463
464 /* Devices marked as raw-only are neither configured
465 * nor unconfigured controllers.
466 */
467 if (hci_test_quirk(d, HCI_QUIRK_RAW_DEVICE))
468 continue;
469
470 if (!hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
471 rp->index[count++] = cpu_to_le16(d->id);
472 bt_dev_dbg(hdev, "Added hci%u", d->id);
473 }
474 }
475
476 rp->num_controllers = cpu_to_le16(count);
477 rp_len = sizeof(*rp) + (2 * count);
478
479 read_unlock(&hci_dev_list_lock);
480
481 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
482 0, rp, rp_len);
483
484 kfree(rp);
485
486 return err;
487 }
488
read_unconf_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)489 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
490 void *data, u16 data_len)
491 {
492 struct mgmt_rp_read_unconf_index_list *rp;
493 struct hci_dev *d;
494 size_t rp_len;
495 u16 count;
496 int err;
497
498 bt_dev_dbg(hdev, "sock %p", sk);
499
500 read_lock(&hci_dev_list_lock);
501
502 count = 0;
503 list_for_each_entry(d, &hci_dev_list, list) {
504 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
505 count++;
506 }
507
508 rp_len = sizeof(*rp) + (2 * count);
509 rp = kmalloc(rp_len, GFP_ATOMIC);
510 if (!rp) {
511 read_unlock(&hci_dev_list_lock);
512 return -ENOMEM;
513 }
514
515 count = 0;
516 list_for_each_entry(d, &hci_dev_list, list) {
517 if (hci_dev_test_flag(d, HCI_SETUP) ||
518 hci_dev_test_flag(d, HCI_CONFIG) ||
519 hci_dev_test_flag(d, HCI_USER_CHANNEL))
520 continue;
521
522 /* Devices marked as raw-only are neither configured
523 * nor unconfigured controllers.
524 */
525 if (hci_test_quirk(d, HCI_QUIRK_RAW_DEVICE))
526 continue;
527
528 if (hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
529 rp->index[count++] = cpu_to_le16(d->id);
530 bt_dev_dbg(hdev, "Added hci%u", d->id);
531 }
532 }
533
534 rp->num_controllers = cpu_to_le16(count);
535 rp_len = sizeof(*rp) + (2 * count);
536
537 read_unlock(&hci_dev_list_lock);
538
539 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
540 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
541
542 kfree(rp);
543
544 return err;
545 }
546
read_ext_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)547 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
548 void *data, u16 data_len)
549 {
550 struct mgmt_rp_read_ext_index_list *rp;
551 struct hci_dev *d;
552 u16 count;
553 int err;
554
555 bt_dev_dbg(hdev, "sock %p", sk);
556
557 read_lock(&hci_dev_list_lock);
558
559 count = 0;
560 list_for_each_entry(d, &hci_dev_list, list)
561 count++;
562
563 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
564 if (!rp) {
565 read_unlock(&hci_dev_list_lock);
566 return -ENOMEM;
567 }
568
569 count = 0;
570 list_for_each_entry(d, &hci_dev_list, list) {
571 if (hci_dev_test_flag(d, HCI_SETUP) ||
572 hci_dev_test_flag(d, HCI_CONFIG) ||
573 hci_dev_test_flag(d, HCI_USER_CHANNEL))
574 continue;
575
576 /* Devices marked as raw-only are neither configured
577 * nor unconfigured controllers.
578 */
579 if (hci_test_quirk(d, HCI_QUIRK_RAW_DEVICE))
580 continue;
581
582 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
583 rp->entry[count].type = 0x01;
584 else
585 rp->entry[count].type = 0x00;
586
587 rp->entry[count].bus = d->bus;
588 rp->entry[count++].index = cpu_to_le16(d->id);
589 bt_dev_dbg(hdev, "Added hci%u", d->id);
590 }
591
592 rp->num_controllers = cpu_to_le16(count);
593
594 read_unlock(&hci_dev_list_lock);
595
596 /* If this command is called at least once, then all the
597 * default index and unconfigured index events are disabled
598 * and from now on only extended index events are used.
599 */
600 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
601 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
602 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
603
604 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
605 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
606 struct_size(rp, entry, count));
607
608 kfree(rp);
609
610 return err;
611 }
612
is_configured(struct hci_dev * hdev)613 static bool is_configured(struct hci_dev *hdev)
614 {
615 if (hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG) &&
616 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
617 return false;
618
619 if ((hci_test_quirk(hdev, HCI_QUIRK_INVALID_BDADDR) ||
620 hci_test_quirk(hdev, HCI_QUIRK_USE_BDADDR_PROPERTY)) &&
621 !bacmp(&hdev->public_addr, BDADDR_ANY))
622 return false;
623
624 return true;
625 }
626
get_missing_options(struct hci_dev * hdev)627 static __le32 get_missing_options(struct hci_dev *hdev)
628 {
629 u32 options = 0;
630
631 if (hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG) &&
632 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
633 options |= MGMT_OPTION_EXTERNAL_CONFIG;
634
635 if ((hci_test_quirk(hdev, HCI_QUIRK_INVALID_BDADDR) ||
636 hci_test_quirk(hdev, HCI_QUIRK_USE_BDADDR_PROPERTY)) &&
637 !bacmp(&hdev->public_addr, BDADDR_ANY))
638 options |= MGMT_OPTION_PUBLIC_ADDRESS;
639
640 return cpu_to_le32(options);
641 }
642
new_options(struct hci_dev * hdev,struct sock * skip)643 static int new_options(struct hci_dev *hdev, struct sock *skip)
644 {
645 __le32 options = get_missing_options(hdev);
646
647 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
648 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
649 }
650
send_options_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)651 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
652 {
653 __le32 options = get_missing_options(hdev);
654
655 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
656 sizeof(options));
657 }
658
read_config_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)659 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
660 void *data, u16 data_len)
661 {
662 struct mgmt_rp_read_config_info rp;
663 u32 options = 0;
664
665 bt_dev_dbg(hdev, "sock %p", sk);
666
667 hci_dev_lock(hdev);
668
669 memset(&rp, 0, sizeof(rp));
670 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
671
672 if (hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG))
673 options |= MGMT_OPTION_EXTERNAL_CONFIG;
674
675 if (hdev->set_bdaddr)
676 options |= MGMT_OPTION_PUBLIC_ADDRESS;
677
678 rp.supported_options = cpu_to_le32(options);
679 rp.missing_options = get_missing_options(hdev);
680
681 hci_dev_unlock(hdev);
682
683 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
684 &rp, sizeof(rp));
685 }
686
get_supported_phys(struct hci_dev * hdev)687 static u32 get_supported_phys(struct hci_dev *hdev)
688 {
689 u32 supported_phys = 0;
690
691 if (lmp_bredr_capable(hdev)) {
692 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
693
694 if (hdev->features[0][0] & LMP_3SLOT)
695 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
696
697 if (hdev->features[0][0] & LMP_5SLOT)
698 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
699
700 if (lmp_edr_2m_capable(hdev)) {
701 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
702
703 if (lmp_edr_3slot_capable(hdev))
704 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
705
706 if (lmp_edr_5slot_capable(hdev))
707 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
708
709 if (lmp_edr_3m_capable(hdev)) {
710 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
711
712 if (lmp_edr_3slot_capable(hdev))
713 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
714
715 if (lmp_edr_5slot_capable(hdev))
716 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
717 }
718 }
719 }
720
721 if (lmp_le_capable(hdev)) {
722 supported_phys |= MGMT_PHY_LE_1M_TX;
723 supported_phys |= MGMT_PHY_LE_1M_RX;
724
725 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
726 supported_phys |= MGMT_PHY_LE_2M_TX;
727 supported_phys |= MGMT_PHY_LE_2M_RX;
728 }
729
730 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
731 supported_phys |= MGMT_PHY_LE_CODED_TX;
732 supported_phys |= MGMT_PHY_LE_CODED_RX;
733 }
734 }
735
736 return supported_phys;
737 }
738
get_selected_phys(struct hci_dev * hdev)739 static u32 get_selected_phys(struct hci_dev *hdev)
740 {
741 u32 selected_phys = 0;
742
743 if (lmp_bredr_capable(hdev)) {
744 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
745
746 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
747 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
748
749 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
750 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
751
752 if (lmp_edr_2m_capable(hdev)) {
753 if (!(hdev->pkt_type & HCI_2DH1))
754 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
755
756 if (lmp_edr_3slot_capable(hdev) &&
757 !(hdev->pkt_type & HCI_2DH3))
758 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
759
760 if (lmp_edr_5slot_capable(hdev) &&
761 !(hdev->pkt_type & HCI_2DH5))
762 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
763
764 if (lmp_edr_3m_capable(hdev)) {
765 if (!(hdev->pkt_type & HCI_3DH1))
766 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
767
768 if (lmp_edr_3slot_capable(hdev) &&
769 !(hdev->pkt_type & HCI_3DH3))
770 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
771
772 if (lmp_edr_5slot_capable(hdev) &&
773 !(hdev->pkt_type & HCI_3DH5))
774 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
775 }
776 }
777 }
778
779 if (lmp_le_capable(hdev)) {
780 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
781 selected_phys |= MGMT_PHY_LE_1M_TX;
782
783 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
784 selected_phys |= MGMT_PHY_LE_1M_RX;
785
786 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
787 selected_phys |= MGMT_PHY_LE_2M_TX;
788
789 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
790 selected_phys |= MGMT_PHY_LE_2M_RX;
791
792 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
793 selected_phys |= MGMT_PHY_LE_CODED_TX;
794
795 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
796 selected_phys |= MGMT_PHY_LE_CODED_RX;
797 }
798
799 return selected_phys;
800 }
801
get_configurable_phys(struct hci_dev * hdev)802 static u32 get_configurable_phys(struct hci_dev *hdev)
803 {
804 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
805 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
806 }
807
get_supported_settings(struct hci_dev * hdev)808 static u32 get_supported_settings(struct hci_dev *hdev)
809 {
810 u32 settings = 0;
811
812 settings |= MGMT_SETTING_POWERED;
813 settings |= MGMT_SETTING_BONDABLE;
814 settings |= MGMT_SETTING_DEBUG_KEYS;
815 settings |= MGMT_SETTING_CONNECTABLE;
816 settings |= MGMT_SETTING_DISCOVERABLE;
817
818 if (lmp_bredr_capable(hdev)) {
819 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
820 settings |= MGMT_SETTING_FAST_CONNECTABLE;
821 settings |= MGMT_SETTING_BREDR;
822 settings |= MGMT_SETTING_LINK_SECURITY;
823
824 if (lmp_ssp_capable(hdev)) {
825 settings |= MGMT_SETTING_SSP;
826 }
827
828 if (lmp_sc_capable(hdev))
829 settings |= MGMT_SETTING_SECURE_CONN;
830
831 if (hci_test_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED))
832 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
833 }
834
835 if (lmp_le_capable(hdev)) {
836 settings |= MGMT_SETTING_LE;
837 settings |= MGMT_SETTING_SECURE_CONN;
838 settings |= MGMT_SETTING_PRIVACY;
839 settings |= MGMT_SETTING_STATIC_ADDRESS;
840 settings |= MGMT_SETTING_ADVERTISING;
841 }
842
843 if (hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG) || hdev->set_bdaddr)
844 settings |= MGMT_SETTING_CONFIGURATION;
845
846 if (cis_central_capable(hdev))
847 settings |= MGMT_SETTING_CIS_CENTRAL;
848
849 if (cis_peripheral_capable(hdev))
850 settings |= MGMT_SETTING_CIS_PERIPHERAL;
851
852 if (bis_capable(hdev))
853 settings |= MGMT_SETTING_ISO_BROADCASTER;
854
855 if (sync_recv_capable(hdev))
856 settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
857
858 if (ll_privacy_capable(hdev))
859 settings |= MGMT_SETTING_LL_PRIVACY;
860
861 if (past_sender_capable(hdev))
862 settings |= MGMT_SETTING_PAST_SENDER;
863
864 if (past_receiver_capable(hdev))
865 settings |= MGMT_SETTING_PAST_RECEIVER;
866
867 settings |= MGMT_SETTING_PHY_CONFIGURATION;
868
869 return settings;
870 }
871
get_current_settings(struct hci_dev * hdev)872 static u32 get_current_settings(struct hci_dev *hdev)
873 {
874 u32 settings = 0;
875
876 if (hdev_is_powered(hdev))
877 settings |= MGMT_SETTING_POWERED;
878
879 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
880 settings |= MGMT_SETTING_CONNECTABLE;
881
882 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
883 settings |= MGMT_SETTING_FAST_CONNECTABLE;
884
885 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
886 settings |= MGMT_SETTING_DISCOVERABLE;
887
888 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
889 settings |= MGMT_SETTING_BONDABLE;
890
891 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
892 settings |= MGMT_SETTING_BREDR;
893
894 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
895 settings |= MGMT_SETTING_LE;
896
897 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
898 settings |= MGMT_SETTING_LINK_SECURITY;
899
900 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
901 settings |= MGMT_SETTING_SSP;
902
903 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
904 settings |= MGMT_SETTING_ADVERTISING;
905
906 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
907 settings |= MGMT_SETTING_SECURE_CONN;
908
909 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
910 settings |= MGMT_SETTING_DEBUG_KEYS;
911
912 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
913 settings |= MGMT_SETTING_PRIVACY;
914
915 /* The current setting for static address has two purposes. The
916 * first is to indicate if the static address will be used and
917 * the second is to indicate if it is actually set.
918 *
919 * This means if the static address is not configured, this flag
920 * will never be set. If the address is configured, then if the
921 * address is actually used decides if the flag is set or not.
922 *
923 * For single mode LE only controllers and dual-mode controllers
924 * with BR/EDR disabled, the existence of the static address will
925 * be evaluated.
926 */
927 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
928 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
929 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
930 if (bacmp(&hdev->static_addr, BDADDR_ANY))
931 settings |= MGMT_SETTING_STATIC_ADDRESS;
932 }
933
934 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
935 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
936
937 if (cis_central_enabled(hdev))
938 settings |= MGMT_SETTING_CIS_CENTRAL;
939
940 if (cis_peripheral_enabled(hdev))
941 settings |= MGMT_SETTING_CIS_PERIPHERAL;
942
943 if (bis_enabled(hdev))
944 settings |= MGMT_SETTING_ISO_BROADCASTER;
945
946 if (sync_recv_enabled(hdev))
947 settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
948
949 if (ll_privacy_enabled(hdev))
950 settings |= MGMT_SETTING_LL_PRIVACY;
951
952 if (past_sender_enabled(hdev))
953 settings |= MGMT_SETTING_PAST_SENDER;
954
955 if (past_receiver_enabled(hdev))
956 settings |= MGMT_SETTING_PAST_RECEIVER;
957
958 return settings;
959 }
960
pending_find(u16 opcode,struct hci_dev * hdev)961 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
962 {
963 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
964 }
965
mgmt_get_adv_discov_flags(struct hci_dev * hdev)966 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
967 {
968 struct mgmt_pending_cmd *cmd;
969
970 /* If there's a pending mgmt command the flags will not yet have
971 * their final values, so check for this first.
972 */
973 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
974 if (cmd) {
975 struct mgmt_mode *cp = cmd->param;
976 if (cp->val == 0x01)
977 return LE_AD_GENERAL;
978 else if (cp->val == 0x02)
979 return LE_AD_LIMITED;
980 } else {
981 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
982 return LE_AD_LIMITED;
983 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
984 return LE_AD_GENERAL;
985 }
986
987 return 0;
988 }
989
mgmt_get_connectable(struct hci_dev * hdev)990 bool mgmt_get_connectable(struct hci_dev *hdev)
991 {
992 struct mgmt_pending_cmd *cmd;
993
994 /* If there's a pending mgmt command the flag will not yet have
995 * it's final value, so check for this first.
996 */
997 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
998 if (cmd) {
999 struct mgmt_mode *cp = cmd->param;
1000
1001 return cp->val;
1002 }
1003
1004 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
1005 }
1006
service_cache_sync(struct hci_dev * hdev,void * data)1007 static int service_cache_sync(struct hci_dev *hdev, void *data)
1008 {
1009 hci_update_eir_sync(hdev);
1010 hci_update_class_sync(hdev);
1011
1012 return 0;
1013 }
1014
service_cache_off(struct work_struct * work)1015 static void service_cache_off(struct work_struct *work)
1016 {
1017 struct hci_dev *hdev = container_of(work, struct hci_dev,
1018 service_cache.work);
1019
1020 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1021 return;
1022
1023 hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1024 }
1025
rpa_expired_sync(struct hci_dev * hdev,void * data)1026 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1027 {
1028 /* The generation of a new RPA and programming it into the
1029 * controller happens in the hci_req_enable_advertising()
1030 * function.
1031 */
1032 if (ext_adv_capable(hdev))
1033 return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1034 else
1035 return hci_enable_advertising_sync(hdev);
1036 }
1037
rpa_expired(struct work_struct * work)1038 static void rpa_expired(struct work_struct *work)
1039 {
1040 struct hci_dev *hdev = container_of(work, struct hci_dev,
1041 rpa_expired.work);
1042
1043 bt_dev_dbg(hdev, "");
1044
1045 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1046
1047 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1048 return;
1049
1050 hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1051 }
1052
1053 static int set_discoverable_sync(struct hci_dev *hdev, void *data);
1054
discov_off(struct work_struct * work)1055 static void discov_off(struct work_struct *work)
1056 {
1057 struct hci_dev *hdev = container_of(work, struct hci_dev,
1058 discov_off.work);
1059
1060 bt_dev_dbg(hdev, "");
1061
1062 hci_dev_lock(hdev);
1063
1064 /* When discoverable timeout triggers, then just make sure
1065 * the limited discoverable flag is cleared. Even in the case
1066 * of a timeout triggered from general discoverable, it is
1067 * safe to unconditionally clear the flag.
1068 */
1069 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1070 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1071 hdev->discov_timeout = 0;
1072
1073 hci_cmd_sync_queue(hdev, set_discoverable_sync, NULL, NULL);
1074
1075 mgmt_new_settings(hdev);
1076
1077 hci_dev_unlock(hdev);
1078 }
1079
1080 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1081
mesh_send_complete(struct hci_dev * hdev,struct mgmt_mesh_tx * mesh_tx,bool silent)1082 static void mesh_send_complete(struct hci_dev *hdev,
1083 struct mgmt_mesh_tx *mesh_tx, bool silent)
1084 {
1085 u8 handle = mesh_tx->handle;
1086
1087 if (!silent)
1088 mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1089 sizeof(handle), NULL);
1090
1091 mgmt_mesh_remove(mesh_tx);
1092 }
1093
mesh_send_done_sync(struct hci_dev * hdev,void * data)1094 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1095 {
1096 struct mgmt_mesh_tx *mesh_tx;
1097
1098 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1099 if (list_empty(&hdev->adv_instances))
1100 hci_disable_advertising_sync(hdev);
1101 mesh_tx = mgmt_mesh_next(hdev, NULL);
1102
1103 if (mesh_tx)
1104 mesh_send_complete(hdev, mesh_tx, false);
1105
1106 return 0;
1107 }
1108
1109 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1110 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
mesh_next(struct hci_dev * hdev,void * data,int err)1111 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1112 {
1113 struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1114
1115 if (!mesh_tx)
1116 return;
1117
1118 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1119 mesh_send_start_complete);
1120
1121 if (err < 0)
1122 mesh_send_complete(hdev, mesh_tx, false);
1123 else
1124 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1125 }
1126
mesh_send_done(struct work_struct * work)1127 static void mesh_send_done(struct work_struct *work)
1128 {
1129 struct hci_dev *hdev = container_of(work, struct hci_dev,
1130 mesh_send_done.work);
1131
1132 if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1133 return;
1134
1135 hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1136 }
1137
mgmt_init_hdev(struct sock * sk,struct hci_dev * hdev)1138 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1139 {
1140 if (hci_dev_test_flag(hdev, HCI_MGMT))
1141 return;
1142
1143 BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1144
1145 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1146 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1147 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1148 INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1149
1150 /* Non-mgmt controlled devices get this bit set
1151 * implicitly so that pairing works for them, however
1152 * for mgmt we require user-space to explicitly enable
1153 * it
1154 */
1155 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1156
1157 hci_dev_set_flag(hdev, HCI_MGMT);
1158 }
1159
read_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1160 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1161 void *data, u16 data_len)
1162 {
1163 struct mgmt_rp_read_info rp;
1164
1165 bt_dev_dbg(hdev, "sock %p", sk);
1166
1167 hci_dev_lock(hdev);
1168
1169 memset(&rp, 0, sizeof(rp));
1170
1171 bacpy(&rp.bdaddr, &hdev->bdaddr);
1172
1173 rp.version = hdev->hci_ver;
1174 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1175
1176 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1177 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1178
1179 memcpy(rp.dev_class, hdev->dev_class, 3);
1180
1181 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1182 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1183
1184 hci_dev_unlock(hdev);
1185
1186 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1187 sizeof(rp));
1188 }
1189
append_eir_data_to_buf(struct hci_dev * hdev,u8 * eir)1190 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1191 {
1192 u16 eir_len = 0;
1193 size_t name_len;
1194
1195 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1196 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1197 hdev->dev_class, 3);
1198
1199 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1200 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1201 hdev->appearance);
1202
1203 name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1204 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1205 hdev->dev_name, name_len);
1206
1207 name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1208 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1209 hdev->short_name, name_len);
1210
1211 return eir_len;
1212 }
1213
read_ext_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1214 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1215 void *data, u16 data_len)
1216 {
1217 char buf[512];
1218 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1219 u16 eir_len;
1220
1221 bt_dev_dbg(hdev, "sock %p", sk);
1222
1223 memset(&buf, 0, sizeof(buf));
1224
1225 hci_dev_lock(hdev);
1226
1227 bacpy(&rp->bdaddr, &hdev->bdaddr);
1228
1229 rp->version = hdev->hci_ver;
1230 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1231
1232 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1233 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1234
1235
1236 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1237 rp->eir_len = cpu_to_le16(eir_len);
1238
1239 hci_dev_unlock(hdev);
1240
1241 /* If this command is called at least once, then the events
1242 * for class of device and local name changes are disabled
1243 * and only the new extended controller information event
1244 * is used.
1245 */
1246 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1247 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1248 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1249
1250 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1251 sizeof(*rp) + eir_len);
1252 }
1253
ext_info_changed(struct hci_dev * hdev,struct sock * skip)1254 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1255 {
1256 char buf[512];
1257 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1258 u16 eir_len;
1259
1260 memset(buf, 0, sizeof(buf));
1261
1262 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1263 ev->eir_len = cpu_to_le16(eir_len);
1264
1265 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1266 sizeof(*ev) + eir_len,
1267 HCI_MGMT_EXT_INFO_EVENTS, skip);
1268 }
1269
send_settings_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)1270 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1271 {
1272 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1273
1274 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1275 sizeof(settings));
1276 }
1277
mgmt_advertising_added(struct sock * sk,struct hci_dev * hdev,u8 instance)1278 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1279 {
1280 struct mgmt_ev_advertising_added ev;
1281
1282 ev.instance = instance;
1283
1284 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1285 }
1286
mgmt_advertising_removed(struct sock * sk,struct hci_dev * hdev,u8 instance)1287 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1288 u8 instance)
1289 {
1290 struct mgmt_ev_advertising_removed ev;
1291
1292 ev.instance = instance;
1293
1294 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1295 }
1296
cancel_adv_timeout(struct hci_dev * hdev)1297 static void cancel_adv_timeout(struct hci_dev *hdev)
1298 {
1299 if (hdev->adv_instance_timeout) {
1300 hdev->adv_instance_timeout = 0;
1301 cancel_delayed_work(&hdev->adv_instance_expire);
1302 }
1303 }
1304
1305 /* This function requires the caller holds hdev->lock */
restart_le_actions(struct hci_dev * hdev)1306 static void restart_le_actions(struct hci_dev *hdev)
1307 {
1308 struct hci_conn_params *p;
1309
1310 list_for_each_entry(p, &hdev->le_conn_params, list) {
1311 /* Needed for AUTO_OFF case where might not "really"
1312 * have been powered off.
1313 */
1314 hci_pend_le_list_del_init(p);
1315
1316 switch (p->auto_connect) {
1317 case HCI_AUTO_CONN_DIRECT:
1318 case HCI_AUTO_CONN_ALWAYS:
1319 hci_pend_le_list_add(p, &hdev->pend_le_conns);
1320 break;
1321 case HCI_AUTO_CONN_REPORT:
1322 hci_pend_le_list_add(p, &hdev->pend_le_reports);
1323 break;
1324 default:
1325 break;
1326 }
1327 }
1328 }
1329
new_settings(struct hci_dev * hdev,struct sock * skip)1330 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1331 {
1332 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1333
1334 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1335 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1336 }
1337
mgmt_set_powered_complete(struct hci_dev * hdev,void * data,int err)1338 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1339 {
1340 struct mgmt_pending_cmd *cmd = data;
1341 struct mgmt_mode *cp;
1342
1343 /* Make sure cmd still outstanding. */
1344 if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
1345 return;
1346
1347 cp = cmd->param;
1348
1349 bt_dev_dbg(hdev, "err %d", err);
1350
1351 if (!err) {
1352 if (cp->val) {
1353 hci_dev_lock(hdev);
1354 restart_le_actions(hdev);
1355 hci_update_passive_scan(hdev);
1356 hci_dev_unlock(hdev);
1357 }
1358
1359 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1360
1361 /* Only call new_setting for power on as power off is deferred
1362 * to hdev->power_off work which does call hci_dev_do_close.
1363 */
1364 if (cp->val)
1365 new_settings(hdev, cmd->sk);
1366 } else {
1367 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1368 mgmt_status(err));
1369 }
1370
1371 mgmt_pending_free(cmd);
1372 }
1373
set_powered_sync(struct hci_dev * hdev,void * data)1374 static int set_powered_sync(struct hci_dev *hdev, void *data)
1375 {
1376 struct mgmt_pending_cmd *cmd = data;
1377 struct mgmt_mode cp;
1378
1379 mutex_lock(&hdev->mgmt_pending_lock);
1380
1381 /* Make sure cmd still outstanding. */
1382 if (!__mgmt_pending_listed(hdev, cmd)) {
1383 mutex_unlock(&hdev->mgmt_pending_lock);
1384 return -ECANCELED;
1385 }
1386
1387 memcpy(&cp, cmd->param, sizeof(cp));
1388
1389 mutex_unlock(&hdev->mgmt_pending_lock);
1390
1391 BT_DBG("%s", hdev->name);
1392
1393 return hci_set_powered_sync(hdev, cp.val);
1394 }
1395
set_powered(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1396 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1397 u16 len)
1398 {
1399 struct mgmt_mode *cp = data;
1400 struct mgmt_pending_cmd *cmd;
1401 int err;
1402
1403 bt_dev_dbg(hdev, "sock %p", sk);
1404
1405 if (cp->val != 0x00 && cp->val != 0x01)
1406 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1407 MGMT_STATUS_INVALID_PARAMS);
1408
1409 hci_dev_lock(hdev);
1410
1411 if (!cp->val) {
1412 if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN)) {
1413 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1414 MGMT_STATUS_BUSY);
1415 goto failed;
1416 }
1417 }
1418
1419 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1420 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1421 MGMT_STATUS_BUSY);
1422 goto failed;
1423 }
1424
1425 if (!!cp->val == hdev_is_powered(hdev)) {
1426 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1427 goto failed;
1428 }
1429
1430 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1431 if (!cmd) {
1432 err = -ENOMEM;
1433 goto failed;
1434 }
1435
1436 /* Cancel potentially blocking sync operation before power off */
1437 if (cp->val == 0x00) {
1438 hci_cmd_sync_cancel_sync(hdev, -EHOSTDOWN);
1439 err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1440 mgmt_set_powered_complete);
1441 } else {
1442 /* Use hci_cmd_sync_submit since hdev might not be running */
1443 err = hci_cmd_sync_submit(hdev, set_powered_sync, cmd,
1444 mgmt_set_powered_complete);
1445 }
1446
1447 if (err < 0)
1448 mgmt_pending_remove(cmd);
1449
1450 failed:
1451 hci_dev_unlock(hdev);
1452 return err;
1453 }
1454
mgmt_new_settings(struct hci_dev * hdev)1455 int mgmt_new_settings(struct hci_dev *hdev)
1456 {
1457 return new_settings(hdev, NULL);
1458 }
1459
1460 struct cmd_lookup {
1461 struct sock *sk;
1462 struct hci_dev *hdev;
1463 u8 mgmt_status;
1464 };
1465
settings_rsp(struct mgmt_pending_cmd * cmd,void * data)1466 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1467 {
1468 struct cmd_lookup *match = data;
1469
1470 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1471
1472 if (match->sk == NULL) {
1473 match->sk = cmd->sk;
1474 sock_hold(match->sk);
1475 }
1476 }
1477
cmd_status_rsp(struct mgmt_pending_cmd * cmd,void * data)1478 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1479 {
1480 u8 *status = data;
1481
1482 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, *status);
1483 }
1484
cmd_complete_rsp(struct mgmt_pending_cmd * cmd,void * data)1485 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1486 {
1487 struct cmd_lookup *match = data;
1488
1489 /* dequeue cmd_sync entries using cmd as data as that is about to be
1490 * removed/freed.
1491 */
1492 hci_cmd_sync_dequeue(match->hdev, NULL, cmd, NULL);
1493
1494 if (cmd->cmd_complete) {
1495 cmd->cmd_complete(cmd, match->mgmt_status);
1496 return;
1497 }
1498
1499 cmd_status_rsp(cmd, data);
1500 }
1501
generic_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1502 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1503 {
1504 return mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, status,
1505 cmd->param, cmd->param_len);
1506 }
1507
addr_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1508 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1509 {
1510 return mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, status,
1511 cmd->param, sizeof(struct mgmt_addr_info));
1512 }
1513
mgmt_bredr_support(struct hci_dev * hdev)1514 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1515 {
1516 if (!lmp_bredr_capable(hdev))
1517 return MGMT_STATUS_NOT_SUPPORTED;
1518 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1519 return MGMT_STATUS_REJECTED;
1520 else
1521 return MGMT_STATUS_SUCCESS;
1522 }
1523
mgmt_le_support(struct hci_dev * hdev)1524 static u8 mgmt_le_support(struct hci_dev *hdev)
1525 {
1526 if (!lmp_le_capable(hdev))
1527 return MGMT_STATUS_NOT_SUPPORTED;
1528 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1529 return MGMT_STATUS_REJECTED;
1530 else
1531 return MGMT_STATUS_SUCCESS;
1532 }
1533
mgmt_set_discoverable_complete(struct hci_dev * hdev,void * data,int err)1534 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1535 int err)
1536 {
1537 struct mgmt_pending_cmd *cmd = data;
1538
1539 bt_dev_dbg(hdev, "err %d", err);
1540
1541 /* Make sure cmd still outstanding. */
1542 if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
1543 return;
1544
1545 hci_dev_lock(hdev);
1546
1547 if (err) {
1548 u8 mgmt_err = mgmt_status(err);
1549 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
1550 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1551 goto done;
1552 }
1553
1554 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1555 hdev->discov_timeout > 0) {
1556 int to = secs_to_jiffies(hdev->discov_timeout);
1557 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1558 }
1559
1560 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1561 new_settings(hdev, cmd->sk);
1562
1563 done:
1564 mgmt_pending_free(cmd);
1565 hci_dev_unlock(hdev);
1566 }
1567
set_discoverable_sync(struct hci_dev * hdev,void * data)1568 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1569 {
1570 if (!mgmt_pending_listed(hdev, data))
1571 return -ECANCELED;
1572
1573 BT_DBG("%s", hdev->name);
1574
1575 return hci_update_discoverable_sync(hdev);
1576 }
1577
set_discoverable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1578 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1579 u16 len)
1580 {
1581 struct mgmt_cp_set_discoverable *cp = data;
1582 struct mgmt_pending_cmd *cmd;
1583 u16 timeout;
1584 int err;
1585
1586 bt_dev_dbg(hdev, "sock %p", sk);
1587
1588 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1589 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1590 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1591 MGMT_STATUS_REJECTED);
1592
1593 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1594 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1595 MGMT_STATUS_INVALID_PARAMS);
1596
1597 timeout = __le16_to_cpu(cp->timeout);
1598
1599 /* Disabling discoverable requires that no timeout is set,
1600 * and enabling limited discoverable requires a timeout.
1601 */
1602 if ((cp->val == 0x00 && timeout > 0) ||
1603 (cp->val == 0x02 && timeout == 0))
1604 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1605 MGMT_STATUS_INVALID_PARAMS);
1606
1607 hci_dev_lock(hdev);
1608
1609 if (!hdev_is_powered(hdev) && timeout > 0) {
1610 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1611 MGMT_STATUS_NOT_POWERED);
1612 goto failed;
1613 }
1614
1615 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1616 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1617 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1618 MGMT_STATUS_BUSY);
1619 goto failed;
1620 }
1621
1622 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1623 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1624 MGMT_STATUS_REJECTED);
1625 goto failed;
1626 }
1627
1628 if (hdev->advertising_paused) {
1629 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1630 MGMT_STATUS_BUSY);
1631 goto failed;
1632 }
1633
1634 if (!hdev_is_powered(hdev)) {
1635 bool changed = false;
1636
1637 /* Setting limited discoverable when powered off is
1638 * not a valid operation since it requires a timeout
1639 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1640 */
1641 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1642 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1643 changed = true;
1644 }
1645
1646 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1647 if (err < 0)
1648 goto failed;
1649
1650 if (changed)
1651 err = new_settings(hdev, sk);
1652
1653 goto failed;
1654 }
1655
1656 /* If the current mode is the same, then just update the timeout
1657 * value with the new value. And if only the timeout gets updated,
1658 * then no need for any HCI transactions.
1659 */
1660 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1661 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1662 HCI_LIMITED_DISCOVERABLE)) {
1663 cancel_delayed_work(&hdev->discov_off);
1664 hdev->discov_timeout = timeout;
1665
1666 if (cp->val && hdev->discov_timeout > 0) {
1667 int to = secs_to_jiffies(hdev->discov_timeout);
1668 queue_delayed_work(hdev->req_workqueue,
1669 &hdev->discov_off, to);
1670 }
1671
1672 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1673 goto failed;
1674 }
1675
1676 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1677 if (!cmd) {
1678 err = -ENOMEM;
1679 goto failed;
1680 }
1681
1682 /* Cancel any potential discoverable timeout that might be
1683 * still active and store new timeout value. The arming of
1684 * the timeout happens in the complete handler.
1685 */
1686 cancel_delayed_work(&hdev->discov_off);
1687 hdev->discov_timeout = timeout;
1688
1689 if (cp->val)
1690 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1691 else
1692 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1693
1694 /* Limited discoverable mode */
1695 if (cp->val == 0x02)
1696 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1697 else
1698 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1699
1700 err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1701 mgmt_set_discoverable_complete);
1702
1703 if (err < 0)
1704 mgmt_pending_remove(cmd);
1705
1706 failed:
1707 hci_dev_unlock(hdev);
1708 return err;
1709 }
1710
mgmt_set_connectable_complete(struct hci_dev * hdev,void * data,int err)1711 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1712 int err)
1713 {
1714 struct mgmt_pending_cmd *cmd = data;
1715
1716 bt_dev_dbg(hdev, "err %d", err);
1717
1718 /* Make sure cmd still outstanding. */
1719 if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
1720 return;
1721
1722 hci_dev_lock(hdev);
1723
1724 if (err) {
1725 u8 mgmt_err = mgmt_status(err);
1726 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
1727 goto done;
1728 }
1729
1730 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1731 new_settings(hdev, cmd->sk);
1732
1733 done:
1734 mgmt_pending_free(cmd);
1735
1736 hci_dev_unlock(hdev);
1737 }
1738
set_connectable_update_settings(struct hci_dev * hdev,struct sock * sk,u8 val)1739 static int set_connectable_update_settings(struct hci_dev *hdev,
1740 struct sock *sk, u8 val)
1741 {
1742 bool changed = false;
1743 int err;
1744
1745 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1746 changed = true;
1747
1748 if (val) {
1749 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1750 } else {
1751 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1752 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1753 }
1754
1755 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1756 if (err < 0)
1757 return err;
1758
1759 if (changed) {
1760 hci_update_scan(hdev);
1761 hci_update_passive_scan(hdev);
1762 return new_settings(hdev, sk);
1763 }
1764
1765 return 0;
1766 }
1767
set_connectable_sync(struct hci_dev * hdev,void * data)1768 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1769 {
1770 if (!mgmt_pending_listed(hdev, data))
1771 return -ECANCELED;
1772
1773 BT_DBG("%s", hdev->name);
1774
1775 return hci_update_connectable_sync(hdev);
1776 }
1777
set_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1778 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1779 u16 len)
1780 {
1781 struct mgmt_mode *cp = data;
1782 struct mgmt_pending_cmd *cmd;
1783 int err;
1784
1785 bt_dev_dbg(hdev, "sock %p", sk);
1786
1787 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1788 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1789 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1790 MGMT_STATUS_REJECTED);
1791
1792 if (cp->val != 0x00 && cp->val != 0x01)
1793 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1794 MGMT_STATUS_INVALID_PARAMS);
1795
1796 hci_dev_lock(hdev);
1797
1798 if (!hdev_is_powered(hdev)) {
1799 err = set_connectable_update_settings(hdev, sk, cp->val);
1800 goto failed;
1801 }
1802
1803 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1804 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1805 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1806 MGMT_STATUS_BUSY);
1807 goto failed;
1808 }
1809
1810 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1811 if (!cmd) {
1812 err = -ENOMEM;
1813 goto failed;
1814 }
1815
1816 if (cp->val) {
1817 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1818 } else {
1819 if (hdev->discov_timeout > 0)
1820 cancel_delayed_work(&hdev->discov_off);
1821
1822 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1823 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1824 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1825 }
1826
1827 err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1828 mgmt_set_connectable_complete);
1829
1830 if (err < 0)
1831 mgmt_pending_remove(cmd);
1832
1833 failed:
1834 hci_dev_unlock(hdev);
1835 return err;
1836 }
1837
set_bondable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1838 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1839 u16 len)
1840 {
1841 struct mgmt_mode *cp = data;
1842 bool changed;
1843 int err;
1844
1845 bt_dev_dbg(hdev, "sock %p", sk);
1846
1847 if (cp->val != 0x00 && cp->val != 0x01)
1848 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1849 MGMT_STATUS_INVALID_PARAMS);
1850
1851 hci_dev_lock(hdev);
1852
1853 if (cp->val)
1854 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1855 else
1856 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1857
1858 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1859 if (err < 0)
1860 goto unlock;
1861
1862 if (changed) {
1863 /* In limited privacy mode the change of bondable mode
1864 * may affect the local advertising address.
1865 */
1866 hci_update_discoverable(hdev);
1867
1868 err = new_settings(hdev, sk);
1869 }
1870
1871 unlock:
1872 hci_dev_unlock(hdev);
1873 return err;
1874 }
1875
set_link_security(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1876 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1877 u16 len)
1878 {
1879 struct mgmt_mode *cp = data;
1880 struct mgmt_pending_cmd *cmd;
1881 u8 val, status;
1882 int err;
1883
1884 bt_dev_dbg(hdev, "sock %p", sk);
1885
1886 status = mgmt_bredr_support(hdev);
1887 if (status)
1888 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1889 status);
1890
1891 if (cp->val != 0x00 && cp->val != 0x01)
1892 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1893 MGMT_STATUS_INVALID_PARAMS);
1894
1895 hci_dev_lock(hdev);
1896
1897 if (!hdev_is_powered(hdev)) {
1898 bool changed = false;
1899
1900 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1901 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1902 changed = true;
1903 }
1904
1905 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1906 if (err < 0)
1907 goto failed;
1908
1909 if (changed)
1910 err = new_settings(hdev, sk);
1911
1912 goto failed;
1913 }
1914
1915 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1916 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1917 MGMT_STATUS_BUSY);
1918 goto failed;
1919 }
1920
1921 val = !!cp->val;
1922
1923 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1924 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1925 goto failed;
1926 }
1927
1928 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1929 if (!cmd) {
1930 err = -ENOMEM;
1931 goto failed;
1932 }
1933
1934 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1935 if (err < 0) {
1936 mgmt_pending_remove(cmd);
1937 goto failed;
1938 }
1939
1940 failed:
1941 hci_dev_unlock(hdev);
1942 return err;
1943 }
1944
set_ssp_complete(struct hci_dev * hdev,void * data,int err)1945 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1946 {
1947 struct cmd_lookup match = { NULL, hdev };
1948 struct mgmt_pending_cmd *cmd = data;
1949 struct mgmt_mode *cp;
1950 u8 enable;
1951 bool changed;
1952
1953 /* Make sure cmd still outstanding. */
1954 if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
1955 return;
1956
1957 cp = cmd->param;
1958 enable = cp->val;
1959
1960 if (err) {
1961 u8 mgmt_err = mgmt_status(err);
1962
1963 if (enable && hci_dev_test_and_clear_flag(hdev,
1964 HCI_SSP_ENABLED)) {
1965 new_settings(hdev, NULL);
1966 }
1967
1968 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
1969 mgmt_pending_free(cmd);
1970 return;
1971 }
1972
1973 if (enable) {
1974 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1975 } else {
1976 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1977 }
1978
1979 settings_rsp(cmd, &match);
1980
1981 if (changed)
1982 new_settings(hdev, match.sk);
1983
1984 if (match.sk)
1985 sock_put(match.sk);
1986
1987 hci_update_eir_sync(hdev);
1988 mgmt_pending_free(cmd);
1989 }
1990
set_ssp_sync(struct hci_dev * hdev,void * data)1991 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1992 {
1993 struct mgmt_pending_cmd *cmd = data;
1994 struct mgmt_mode cp;
1995 bool changed = false;
1996 int err;
1997
1998 mutex_lock(&hdev->mgmt_pending_lock);
1999
2000 if (!__mgmt_pending_listed(hdev, cmd)) {
2001 mutex_unlock(&hdev->mgmt_pending_lock);
2002 return -ECANCELED;
2003 }
2004
2005 memcpy(&cp, cmd->param, sizeof(cp));
2006
2007 mutex_unlock(&hdev->mgmt_pending_lock);
2008
2009 if (cp.val)
2010 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
2011
2012 err = hci_write_ssp_mode_sync(hdev, cp.val);
2013
2014 if (!err && changed)
2015 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
2016
2017 return err;
2018 }
2019
set_ssp(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2020 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2021 {
2022 struct mgmt_mode *cp = data;
2023 struct mgmt_pending_cmd *cmd;
2024 u8 status;
2025 int err;
2026
2027 bt_dev_dbg(hdev, "sock %p", sk);
2028
2029 status = mgmt_bredr_support(hdev);
2030 if (status)
2031 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2032
2033 if (!lmp_ssp_capable(hdev))
2034 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2035 MGMT_STATUS_NOT_SUPPORTED);
2036
2037 if (cp->val != 0x00 && cp->val != 0x01)
2038 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2039 MGMT_STATUS_INVALID_PARAMS);
2040
2041 hci_dev_lock(hdev);
2042
2043 if (!hdev_is_powered(hdev)) {
2044 bool changed;
2045
2046 if (cp->val) {
2047 changed = !hci_dev_test_and_set_flag(hdev,
2048 HCI_SSP_ENABLED);
2049 } else {
2050 changed = hci_dev_test_and_clear_flag(hdev,
2051 HCI_SSP_ENABLED);
2052 }
2053
2054 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2055 if (err < 0)
2056 goto failed;
2057
2058 if (changed)
2059 err = new_settings(hdev, sk);
2060
2061 goto failed;
2062 }
2063
2064 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2065 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2066 MGMT_STATUS_BUSY);
2067 goto failed;
2068 }
2069
2070 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2071 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2072 goto failed;
2073 }
2074
2075 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2076 if (!cmd)
2077 err = -ENOMEM;
2078 else
2079 err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2080 set_ssp_complete);
2081
2082 if (err < 0) {
2083 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2084 MGMT_STATUS_FAILED);
2085
2086 if (cmd)
2087 mgmt_pending_remove(cmd);
2088 }
2089
2090 failed:
2091 hci_dev_unlock(hdev);
2092 return err;
2093 }
2094
set_hs(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2095 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2096 {
2097 bt_dev_dbg(hdev, "sock %p", sk);
2098
2099 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2100 MGMT_STATUS_NOT_SUPPORTED);
2101 }
2102
set_le_complete(struct hci_dev * hdev,void * data,int err)2103 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2104 {
2105 struct mgmt_pending_cmd *cmd = data;
2106 struct cmd_lookup match = { NULL, hdev };
2107 u8 status = mgmt_status(err);
2108
2109 bt_dev_dbg(hdev, "err %d", err);
2110
2111 if (err == -ECANCELED || !mgmt_pending_valid(hdev, data))
2112 return;
2113
2114 if (status) {
2115 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, status);
2116 goto done;
2117 }
2118
2119 settings_rsp(cmd, &match);
2120
2121 new_settings(hdev, match.sk);
2122
2123 if (match.sk)
2124 sock_put(match.sk);
2125
2126 done:
2127 mgmt_pending_free(cmd);
2128 }
2129
set_le_sync(struct hci_dev * hdev,void * data)2130 static int set_le_sync(struct hci_dev *hdev, void *data)
2131 {
2132 struct mgmt_pending_cmd *cmd = data;
2133 struct mgmt_mode cp;
2134 u8 val;
2135 int err;
2136
2137 mutex_lock(&hdev->mgmt_pending_lock);
2138
2139 if (!__mgmt_pending_listed(hdev, cmd)) {
2140 mutex_unlock(&hdev->mgmt_pending_lock);
2141 return -ECANCELED;
2142 }
2143
2144 memcpy(&cp, cmd->param, sizeof(cp));
2145 val = !!cp.val;
2146
2147 mutex_unlock(&hdev->mgmt_pending_lock);
2148
2149 if (!val) {
2150 hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2151
2152 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2153 hci_disable_advertising_sync(hdev);
2154
2155 if (ext_adv_capable(hdev))
2156 hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2157 } else {
2158 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2159 }
2160
2161 err = hci_write_le_host_supported_sync(hdev, val, 0);
2162
2163 /* Make sure the controller has a good default for
2164 * advertising data. Restrict the update to when LE
2165 * has actually been enabled. During power on, the
2166 * update in powered_update_hci will take care of it.
2167 */
2168 if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2169 if (ext_adv_capable(hdev)) {
2170 int status;
2171
2172 status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2173 if (!status)
2174 hci_update_scan_rsp_data_sync(hdev, 0x00);
2175 } else {
2176 hci_update_adv_data_sync(hdev, 0x00);
2177 hci_update_scan_rsp_data_sync(hdev, 0x00);
2178 }
2179
2180 hci_update_passive_scan(hdev);
2181 }
2182
2183 return err;
2184 }
2185
set_mesh_complete(struct hci_dev * hdev,void * data,int err)2186 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2187 {
2188 struct mgmt_pending_cmd *cmd = data;
2189 u8 status = mgmt_status(err);
2190 struct sock *sk;
2191
2192 if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
2193 return;
2194
2195 sk = cmd->sk;
2196
2197 if (status) {
2198 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2199 status);
2200 mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev, true,
2201 cmd_status_rsp, &status);
2202 goto done;
2203 }
2204
2205 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2206
2207 done:
2208 mgmt_pending_free(cmd);
2209 }
2210
set_mesh_sync(struct hci_dev * hdev,void * data)2211 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2212 {
2213 struct mgmt_pending_cmd *cmd = data;
2214 DEFINE_FLEX(struct mgmt_cp_set_mesh, cp, ad_types, num_ad_types,
2215 sizeof(hdev->mesh_ad_types));
2216 size_t len;
2217
2218 mutex_lock(&hdev->mgmt_pending_lock);
2219
2220 if (!__mgmt_pending_listed(hdev, cmd)) {
2221 mutex_unlock(&hdev->mgmt_pending_lock);
2222 return -ECANCELED;
2223 }
2224
2225 len = cmd->param_len;
2226 memcpy(cp, cmd->param, min(__struct_size(cp), len));
2227
2228 mutex_unlock(&hdev->mgmt_pending_lock);
2229
2230 memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2231
2232 if (cp->enable)
2233 hci_dev_set_flag(hdev, HCI_MESH);
2234 else
2235 hci_dev_clear_flag(hdev, HCI_MESH);
2236
2237 hdev->le_scan_interval = __le16_to_cpu(cp->period);
2238 hdev->le_scan_window = __le16_to_cpu(cp->window);
2239
2240 len -= sizeof(struct mgmt_cp_set_mesh);
2241
2242 /* If filters don't fit, forward all adv pkts */
2243 if (len <= sizeof(hdev->mesh_ad_types))
2244 memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2245
2246 hci_update_passive_scan_sync(hdev);
2247 return 0;
2248 }
2249
set_mesh(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2250 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2251 {
2252 struct mgmt_cp_set_mesh *cp = data;
2253 struct mgmt_pending_cmd *cmd;
2254 __u16 period, window;
2255 int err = 0;
2256
2257 bt_dev_dbg(hdev, "sock %p", sk);
2258
2259 if (!lmp_le_capable(hdev) ||
2260 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2261 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2262 MGMT_STATUS_NOT_SUPPORTED);
2263
2264 if (cp->enable != 0x00 && cp->enable != 0x01)
2265 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2266 MGMT_STATUS_INVALID_PARAMS);
2267
2268 /* Keep allowed ranges in sync with set_scan_params() */
2269 period = __le16_to_cpu(cp->period);
2270
2271 if (period < 0x0004 || period > 0x4000)
2272 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2273 MGMT_STATUS_INVALID_PARAMS);
2274
2275 window = __le16_to_cpu(cp->window);
2276
2277 if (window < 0x0004 || window > 0x4000)
2278 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2279 MGMT_STATUS_INVALID_PARAMS);
2280
2281 if (window > period)
2282 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2283 MGMT_STATUS_INVALID_PARAMS);
2284
2285 hci_dev_lock(hdev);
2286
2287 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2288 if (!cmd)
2289 err = -ENOMEM;
2290 else
2291 err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2292 set_mesh_complete);
2293
2294 if (err < 0) {
2295 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2296 MGMT_STATUS_FAILED);
2297
2298 if (cmd)
2299 mgmt_pending_remove(cmd);
2300 }
2301
2302 hci_dev_unlock(hdev);
2303 return err;
2304 }
2305
mesh_send_start_complete(struct hci_dev * hdev,void * data,int err)2306 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2307 {
2308 struct mgmt_mesh_tx *mesh_tx = data;
2309 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2310 unsigned long mesh_send_interval;
2311 u8 mgmt_err = mgmt_status(err);
2312
2313 /* Report any errors here, but don't report completion */
2314
2315 if (mgmt_err) {
2316 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2317 /* Send Complete Error Code for handle */
2318 mesh_send_complete(hdev, mesh_tx, false);
2319 return;
2320 }
2321
2322 mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2323 queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2324 mesh_send_interval);
2325 }
2326
mesh_send_sync(struct hci_dev * hdev,void * data)2327 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2328 {
2329 struct mgmt_mesh_tx *mesh_tx = data;
2330 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2331 struct adv_info *adv, *next_instance;
2332 u8 instance = hdev->le_num_of_adv_sets + 1;
2333 u16 timeout, duration;
2334 int err = 0;
2335
2336 if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2337 return MGMT_STATUS_BUSY;
2338
2339 timeout = 1000;
2340 duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2341 adv = hci_add_adv_instance(hdev, instance, 0,
2342 send->adv_data_len, send->adv_data,
2343 0, NULL,
2344 timeout, duration,
2345 HCI_ADV_TX_POWER_NO_PREFERENCE,
2346 hdev->le_adv_min_interval,
2347 hdev->le_adv_max_interval,
2348 mesh_tx->handle);
2349
2350 if (!IS_ERR(adv))
2351 mesh_tx->instance = instance;
2352 else
2353 err = PTR_ERR(adv);
2354
2355 if (hdev->cur_adv_instance == instance) {
2356 /* If the currently advertised instance is being changed then
2357 * cancel the current advertising and schedule the next
2358 * instance. If there is only one instance then the overridden
2359 * advertising data will be visible right away.
2360 */
2361 cancel_adv_timeout(hdev);
2362
2363 next_instance = hci_get_next_instance(hdev, instance);
2364 if (next_instance)
2365 instance = next_instance->instance;
2366 else
2367 instance = 0;
2368 } else if (hdev->adv_instance_timeout) {
2369 /* Immediately advertise the new instance if no other, or
2370 * let it go naturally from queue if ADV is already happening
2371 */
2372 instance = 0;
2373 }
2374
2375 if (instance)
2376 return hci_schedule_adv_instance_sync(hdev, instance, true);
2377
2378 return err;
2379 }
2380
send_count(struct mgmt_mesh_tx * mesh_tx,void * data)2381 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2382 {
2383 struct mgmt_rp_mesh_read_features *rp = data;
2384
2385 if (rp->used_handles >= rp->max_handles)
2386 return;
2387
2388 rp->handles[rp->used_handles++] = mesh_tx->handle;
2389 }
2390
mesh_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2391 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2392 void *data, u16 len)
2393 {
2394 struct mgmt_rp_mesh_read_features rp;
2395
2396 if (!lmp_le_capable(hdev) ||
2397 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2398 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2399 MGMT_STATUS_NOT_SUPPORTED);
2400
2401 memset(&rp, 0, sizeof(rp));
2402 rp.index = cpu_to_le16(hdev->id);
2403 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2404 rp.max_handles = MESH_HANDLES_MAX;
2405
2406 hci_dev_lock(hdev);
2407
2408 if (rp.max_handles)
2409 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2410
2411 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2412 rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2413
2414 hci_dev_unlock(hdev);
2415 return 0;
2416 }
2417
send_cancel(struct hci_dev * hdev,void * data)2418 static int send_cancel(struct hci_dev *hdev, void *data)
2419 {
2420 struct mgmt_pending_cmd *cmd = data;
2421 struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2422 struct mgmt_mesh_tx *mesh_tx;
2423
2424 if (!cancel->handle) {
2425 do {
2426 mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2427
2428 if (mesh_tx)
2429 mesh_send_complete(hdev, mesh_tx, false);
2430 } while (mesh_tx);
2431 } else {
2432 mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2433
2434 if (mesh_tx && mesh_tx->sk == cmd->sk)
2435 mesh_send_complete(hdev, mesh_tx, false);
2436 }
2437
2438 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2439 0, NULL, 0);
2440 mgmt_pending_free(cmd);
2441
2442 return 0;
2443 }
2444
mesh_send_cancel(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2445 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2446 void *data, u16 len)
2447 {
2448 struct mgmt_pending_cmd *cmd;
2449 int err;
2450
2451 if (!lmp_le_capable(hdev) ||
2452 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2453 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2454 MGMT_STATUS_NOT_SUPPORTED);
2455
2456 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2457 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2458 MGMT_STATUS_REJECTED);
2459
2460 hci_dev_lock(hdev);
2461 cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2462 if (!cmd)
2463 err = -ENOMEM;
2464 else
2465 err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2466
2467 if (err < 0) {
2468 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2469 MGMT_STATUS_FAILED);
2470
2471 if (cmd)
2472 mgmt_pending_free(cmd);
2473 }
2474
2475 hci_dev_unlock(hdev);
2476 return err;
2477 }
2478
mesh_send(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2479 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2480 {
2481 struct mgmt_mesh_tx *mesh_tx;
2482 struct mgmt_cp_mesh_send *send = data;
2483 struct mgmt_rp_mesh_read_features rp;
2484 bool sending;
2485 int err = 0;
2486
2487 if (!lmp_le_capable(hdev) ||
2488 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2489 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2490 MGMT_STATUS_NOT_SUPPORTED);
2491 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2492 len <= MGMT_MESH_SEND_SIZE ||
2493 len > (MGMT_MESH_SEND_SIZE + 31))
2494 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2495 MGMT_STATUS_REJECTED);
2496
2497 hci_dev_lock(hdev);
2498
2499 memset(&rp, 0, sizeof(rp));
2500 rp.max_handles = MESH_HANDLES_MAX;
2501
2502 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2503
2504 if (rp.max_handles <= rp.used_handles) {
2505 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2506 MGMT_STATUS_BUSY);
2507 goto done;
2508 }
2509
2510 sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2511 mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2512
2513 if (!mesh_tx)
2514 err = -ENOMEM;
2515 else if (!sending)
2516 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2517 mesh_send_start_complete);
2518
2519 if (err < 0) {
2520 bt_dev_err(hdev, "Send Mesh Failed %d", err);
2521 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2522 MGMT_STATUS_FAILED);
2523
2524 if (mesh_tx) {
2525 if (sending)
2526 mgmt_mesh_remove(mesh_tx);
2527 }
2528 } else {
2529 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2530
2531 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2532 &mesh_tx->handle, 1);
2533 }
2534
2535 done:
2536 hci_dev_unlock(hdev);
2537 return err;
2538 }
2539
set_le(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2540 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2541 {
2542 struct mgmt_mode *cp = data;
2543 struct mgmt_pending_cmd *cmd;
2544 int err;
2545 u8 val, enabled;
2546
2547 bt_dev_dbg(hdev, "sock %p", sk);
2548
2549 if (!lmp_le_capable(hdev))
2550 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2551 MGMT_STATUS_NOT_SUPPORTED);
2552
2553 if (cp->val != 0x00 && cp->val != 0x01)
2554 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2555 MGMT_STATUS_INVALID_PARAMS);
2556
2557 /* Bluetooth single mode LE only controllers or dual-mode
2558 * controllers configured as LE only devices, do not allow
2559 * switching LE off. These have either LE enabled explicitly
2560 * or BR/EDR has been previously switched off.
2561 *
2562 * When trying to enable an already enabled LE, then gracefully
2563 * send a positive response. Trying to disable it however will
2564 * result into rejection.
2565 */
2566 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2567 if (cp->val == 0x01)
2568 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2569
2570 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2571 MGMT_STATUS_REJECTED);
2572 }
2573
2574 hci_dev_lock(hdev);
2575
2576 val = !!cp->val;
2577 enabled = lmp_host_le_capable(hdev);
2578
2579 if (!hdev_is_powered(hdev) || val == enabled) {
2580 bool changed = false;
2581
2582 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2583 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2584 changed = true;
2585 }
2586
2587 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2588 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2589 changed = true;
2590 }
2591
2592 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2593 if (err < 0)
2594 goto unlock;
2595
2596 if (changed)
2597 err = new_settings(hdev, sk);
2598
2599 goto unlock;
2600 }
2601
2602 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2603 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2604 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2605 MGMT_STATUS_BUSY);
2606 goto unlock;
2607 }
2608
2609 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2610 if (!cmd)
2611 err = -ENOMEM;
2612 else
2613 err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2614 set_le_complete);
2615
2616 if (err < 0) {
2617 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2618 MGMT_STATUS_FAILED);
2619
2620 if (cmd)
2621 mgmt_pending_remove(cmd);
2622 }
2623
2624 unlock:
2625 hci_dev_unlock(hdev);
2626 return err;
2627 }
2628
send_hci_cmd_sync(struct hci_dev * hdev,void * data)2629 static int send_hci_cmd_sync(struct hci_dev *hdev, void *data)
2630 {
2631 struct mgmt_pending_cmd *cmd = data;
2632 struct mgmt_cp_hci_cmd_sync *cp = cmd->param;
2633 struct sk_buff *skb;
2634
2635 skb = __hci_cmd_sync_ev(hdev, le16_to_cpu(cp->opcode),
2636 le16_to_cpu(cp->params_len), cp->params,
2637 cp->event, cp->timeout ?
2638 secs_to_jiffies(cp->timeout) :
2639 HCI_CMD_TIMEOUT);
2640 if (IS_ERR(skb)) {
2641 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
2642 mgmt_status(PTR_ERR(skb)));
2643 goto done;
2644 }
2645
2646 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_HCI_CMD_SYNC, 0,
2647 skb->data, skb->len);
2648
2649 kfree_skb(skb);
2650
2651 done:
2652 mgmt_pending_free(cmd);
2653
2654 return 0;
2655 }
2656
mgmt_hci_cmd_sync(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2657 static int mgmt_hci_cmd_sync(struct sock *sk, struct hci_dev *hdev,
2658 void *data, u16 len)
2659 {
2660 struct mgmt_cp_hci_cmd_sync *cp = data;
2661 struct mgmt_pending_cmd *cmd;
2662 int err;
2663
2664 if (len != (offsetof(struct mgmt_cp_hci_cmd_sync, params) +
2665 le16_to_cpu(cp->params_len)))
2666 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
2667 MGMT_STATUS_INVALID_PARAMS);
2668
2669 hci_dev_lock(hdev);
2670 cmd = mgmt_pending_new(sk, MGMT_OP_HCI_CMD_SYNC, hdev, data, len);
2671 if (!cmd)
2672 err = -ENOMEM;
2673 else
2674 err = hci_cmd_sync_queue(hdev, send_hci_cmd_sync, cmd, NULL);
2675
2676 if (err < 0) {
2677 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
2678 MGMT_STATUS_FAILED);
2679
2680 if (cmd)
2681 mgmt_pending_free(cmd);
2682 }
2683
2684 hci_dev_unlock(hdev);
2685 return err;
2686 }
2687
2688 /* This is a helper function to test for pending mgmt commands that can
2689 * cause CoD or EIR HCI commands. We can only allow one such pending
2690 * mgmt command at a time since otherwise we cannot easily track what
2691 * the current values are, will be, and based on that calculate if a new
2692 * HCI command needs to be sent and if yes with what value.
2693 */
pending_eir_or_class(struct hci_dev * hdev)2694 static bool pending_eir_or_class(struct hci_dev *hdev)
2695 {
2696 struct mgmt_pending_cmd *cmd;
2697
2698 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2699 switch (cmd->opcode) {
2700 case MGMT_OP_ADD_UUID:
2701 case MGMT_OP_REMOVE_UUID:
2702 case MGMT_OP_SET_DEV_CLASS:
2703 case MGMT_OP_SET_POWERED:
2704 return true;
2705 }
2706 }
2707
2708 return false;
2709 }
2710
2711 static const u8 bluetooth_base_uuid[] = {
2712 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2713 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2714 };
2715
get_uuid_size(const u8 * uuid)2716 static u8 get_uuid_size(const u8 *uuid)
2717 {
2718 u32 val;
2719
2720 if (memcmp(uuid, bluetooth_base_uuid, 12))
2721 return 128;
2722
2723 val = get_unaligned_le32(&uuid[12]);
2724 if (val > 0xffff)
2725 return 32;
2726
2727 return 16;
2728 }
2729
mgmt_class_complete(struct hci_dev * hdev,void * data,int err)2730 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2731 {
2732 struct mgmt_pending_cmd *cmd = data;
2733
2734 bt_dev_dbg(hdev, "err %d", err);
2735
2736 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
2737 mgmt_status(err), hdev->dev_class, 3);
2738
2739 mgmt_pending_free(cmd);
2740 }
2741
add_uuid_sync(struct hci_dev * hdev,void * data)2742 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2743 {
2744 int err;
2745
2746 err = hci_update_class_sync(hdev);
2747 if (err)
2748 return err;
2749
2750 return hci_update_eir_sync(hdev);
2751 }
2752
add_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2753 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2754 {
2755 struct mgmt_cp_add_uuid *cp = data;
2756 struct mgmt_pending_cmd *cmd;
2757 struct bt_uuid *uuid;
2758 int err;
2759
2760 bt_dev_dbg(hdev, "sock %p", sk);
2761
2762 hci_dev_lock(hdev);
2763
2764 if (pending_eir_or_class(hdev)) {
2765 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2766 MGMT_STATUS_BUSY);
2767 goto failed;
2768 }
2769
2770 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2771 if (!uuid) {
2772 err = -ENOMEM;
2773 goto failed;
2774 }
2775
2776 memcpy(uuid->uuid, cp->uuid, 16);
2777 uuid->svc_hint = cp->svc_hint;
2778 uuid->size = get_uuid_size(cp->uuid);
2779
2780 list_add_tail(&uuid->list, &hdev->uuids);
2781
2782 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2783 if (!cmd) {
2784 err = -ENOMEM;
2785 goto failed;
2786 }
2787
2788 /* MGMT_OP_ADD_UUID don't require adapter the UP/Running so use
2789 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2790 */
2791 err = hci_cmd_sync_submit(hdev, add_uuid_sync, cmd,
2792 mgmt_class_complete);
2793 if (err < 0) {
2794 mgmt_pending_free(cmd);
2795 goto failed;
2796 }
2797
2798 failed:
2799 hci_dev_unlock(hdev);
2800 return err;
2801 }
2802
enable_service_cache(struct hci_dev * hdev)2803 static bool enable_service_cache(struct hci_dev *hdev)
2804 {
2805 if (!hdev_is_powered(hdev))
2806 return false;
2807
2808 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2809 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2810 CACHE_TIMEOUT);
2811 return true;
2812 }
2813
2814 return false;
2815 }
2816
remove_uuid_sync(struct hci_dev * hdev,void * data)2817 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2818 {
2819 int err;
2820
2821 err = hci_update_class_sync(hdev);
2822 if (err)
2823 return err;
2824
2825 return hci_update_eir_sync(hdev);
2826 }
2827
remove_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2828 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2829 u16 len)
2830 {
2831 struct mgmt_cp_remove_uuid *cp = data;
2832 struct mgmt_pending_cmd *cmd;
2833 struct bt_uuid *match, *tmp;
2834 static const u8 bt_uuid_any[] = {
2835 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2836 };
2837 int err, found;
2838
2839 bt_dev_dbg(hdev, "sock %p", sk);
2840
2841 hci_dev_lock(hdev);
2842
2843 if (pending_eir_or_class(hdev)) {
2844 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2845 MGMT_STATUS_BUSY);
2846 goto unlock;
2847 }
2848
2849 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2850 hci_uuids_clear(hdev);
2851
2852 if (enable_service_cache(hdev)) {
2853 err = mgmt_cmd_complete(sk, hdev->id,
2854 MGMT_OP_REMOVE_UUID,
2855 0, hdev->dev_class, 3);
2856 goto unlock;
2857 }
2858
2859 goto update_class;
2860 }
2861
2862 found = 0;
2863
2864 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2865 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2866 continue;
2867
2868 list_del(&match->list);
2869 kfree(match);
2870 found++;
2871 }
2872
2873 if (found == 0) {
2874 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2875 MGMT_STATUS_INVALID_PARAMS);
2876 goto unlock;
2877 }
2878
2879 update_class:
2880 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2881 if (!cmd) {
2882 err = -ENOMEM;
2883 goto unlock;
2884 }
2885
2886 /* MGMT_OP_REMOVE_UUID don't require adapter the UP/Running so use
2887 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2888 */
2889 err = hci_cmd_sync_submit(hdev, remove_uuid_sync, cmd,
2890 mgmt_class_complete);
2891 if (err < 0)
2892 mgmt_pending_free(cmd);
2893
2894 unlock:
2895 hci_dev_unlock(hdev);
2896 return err;
2897 }
2898
set_class_sync(struct hci_dev * hdev,void * data)2899 static int set_class_sync(struct hci_dev *hdev, void *data)
2900 {
2901 int err = 0;
2902
2903 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2904 cancel_delayed_work_sync(&hdev->service_cache);
2905 err = hci_update_eir_sync(hdev);
2906 }
2907
2908 if (err)
2909 return err;
2910
2911 return hci_update_class_sync(hdev);
2912 }
2913
set_dev_class(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2914 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2915 u16 len)
2916 {
2917 struct mgmt_cp_set_dev_class *cp = data;
2918 struct mgmt_pending_cmd *cmd;
2919 int err;
2920
2921 bt_dev_dbg(hdev, "sock %p", sk);
2922
2923 if (!lmp_bredr_capable(hdev))
2924 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2925 MGMT_STATUS_NOT_SUPPORTED);
2926
2927 hci_dev_lock(hdev);
2928
2929 if (pending_eir_or_class(hdev)) {
2930 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2931 MGMT_STATUS_BUSY);
2932 goto unlock;
2933 }
2934
2935 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2936 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2937 MGMT_STATUS_INVALID_PARAMS);
2938 goto unlock;
2939 }
2940
2941 hdev->major_class = cp->major;
2942 hdev->minor_class = cp->minor;
2943
2944 if (!hdev_is_powered(hdev)) {
2945 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2946 hdev->dev_class, 3);
2947 goto unlock;
2948 }
2949
2950 cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2951 if (!cmd) {
2952 err = -ENOMEM;
2953 goto unlock;
2954 }
2955
2956 /* MGMT_OP_SET_DEV_CLASS don't require adapter the UP/Running so use
2957 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2958 */
2959 err = hci_cmd_sync_submit(hdev, set_class_sync, cmd,
2960 mgmt_class_complete);
2961 if (err < 0)
2962 mgmt_pending_free(cmd);
2963
2964 unlock:
2965 hci_dev_unlock(hdev);
2966 return err;
2967 }
2968
load_link_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2969 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2970 u16 len)
2971 {
2972 struct mgmt_cp_load_link_keys *cp = data;
2973 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2974 sizeof(struct mgmt_link_key_info));
2975 u16 key_count, expected_len;
2976 bool changed;
2977 int i;
2978
2979 bt_dev_dbg(hdev, "sock %p", sk);
2980
2981 if (!lmp_bredr_capable(hdev))
2982 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2983 MGMT_STATUS_NOT_SUPPORTED);
2984
2985 key_count = __le16_to_cpu(cp->key_count);
2986 if (key_count > max_key_count) {
2987 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2988 key_count);
2989 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2990 MGMT_STATUS_INVALID_PARAMS);
2991 }
2992
2993 expected_len = struct_size(cp, keys, key_count);
2994 if (expected_len != len) {
2995 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2996 expected_len, len);
2997 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2998 MGMT_STATUS_INVALID_PARAMS);
2999 }
3000
3001 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
3002 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
3003 MGMT_STATUS_INVALID_PARAMS);
3004
3005 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
3006 key_count);
3007
3008 hci_dev_lock(hdev);
3009
3010 hci_link_keys_clear(hdev);
3011
3012 if (cp->debug_keys)
3013 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
3014 else
3015 changed = hci_dev_test_and_clear_flag(hdev,
3016 HCI_KEEP_DEBUG_KEYS);
3017
3018 if (changed)
3019 new_settings(hdev, NULL);
3020
3021 for (i = 0; i < key_count; i++) {
3022 struct mgmt_link_key_info *key = &cp->keys[i];
3023
3024 if (hci_is_blocked_key(hdev,
3025 HCI_BLOCKED_KEY_TYPE_LINKKEY,
3026 key->val)) {
3027 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
3028 &key->addr.bdaddr);
3029 continue;
3030 }
3031
3032 if (key->addr.type != BDADDR_BREDR) {
3033 bt_dev_warn(hdev,
3034 "Invalid link address type %u for %pMR",
3035 key->addr.type, &key->addr.bdaddr);
3036 continue;
3037 }
3038
3039 if (key->type > 0x08) {
3040 bt_dev_warn(hdev, "Invalid link key type %u for %pMR",
3041 key->type, &key->addr.bdaddr);
3042 continue;
3043 }
3044
3045 /* Always ignore debug keys and require a new pairing if
3046 * the user wants to use them.
3047 */
3048 if (key->type == HCI_LK_DEBUG_COMBINATION)
3049 continue;
3050
3051 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
3052 key->type, key->pin_len, NULL);
3053 }
3054
3055 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
3056
3057 hci_dev_unlock(hdev);
3058
3059 return 0;
3060 }
3061
device_unpaired(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,struct sock * skip_sk)3062 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
3063 u8 addr_type, struct sock *skip_sk)
3064 {
3065 struct mgmt_ev_device_unpaired ev;
3066
3067 bacpy(&ev.addr.bdaddr, bdaddr);
3068 ev.addr.type = addr_type;
3069
3070 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
3071 skip_sk);
3072 }
3073
unpair_device_complete(struct hci_dev * hdev,void * data,int err)3074 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
3075 {
3076 struct mgmt_pending_cmd *cmd = data;
3077 struct mgmt_cp_unpair_device *cp = cmd->param;
3078
3079 if (!err)
3080 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
3081
3082 cmd->cmd_complete(cmd, err);
3083 mgmt_pending_free(cmd);
3084 }
3085
unpair_device_sync(struct hci_dev * hdev,void * data)3086 static int unpair_device_sync(struct hci_dev *hdev, void *data)
3087 {
3088 struct mgmt_pending_cmd *cmd = data;
3089 struct mgmt_cp_unpair_device *cp = cmd->param;
3090 struct hci_conn *conn;
3091
3092 if (cp->addr.type == BDADDR_BREDR)
3093 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3094 &cp->addr.bdaddr);
3095 else
3096 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3097 le_addr_type(cp->addr.type));
3098
3099 if (!conn)
3100 return 0;
3101
3102 /* Disregard any possible error since the likes of hci_abort_conn_sync
3103 * will clean up the connection no matter the error.
3104 */
3105 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3106
3107 return 0;
3108 }
3109
unpair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3110 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3111 u16 len)
3112 {
3113 struct mgmt_cp_unpair_device *cp = data;
3114 struct mgmt_rp_unpair_device rp;
3115 struct hci_conn_params *params;
3116 struct mgmt_pending_cmd *cmd;
3117 struct hci_conn *conn;
3118 u8 addr_type;
3119 int err;
3120
3121 memset(&rp, 0, sizeof(rp));
3122 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3123 rp.addr.type = cp->addr.type;
3124
3125 if (!bdaddr_type_is_valid(cp->addr.type))
3126 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3127 MGMT_STATUS_INVALID_PARAMS,
3128 &rp, sizeof(rp));
3129
3130 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
3131 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3132 MGMT_STATUS_INVALID_PARAMS,
3133 &rp, sizeof(rp));
3134
3135 hci_dev_lock(hdev);
3136
3137 if (!hdev_is_powered(hdev)) {
3138 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3139 MGMT_STATUS_NOT_POWERED, &rp,
3140 sizeof(rp));
3141 goto unlock;
3142 }
3143
3144 if (cp->addr.type == BDADDR_BREDR) {
3145 /* If disconnection is requested, then look up the
3146 * connection. If the remote device is connected, it
3147 * will be later used to terminate the link.
3148 *
3149 * Setting it to NULL explicitly will cause no
3150 * termination of the link.
3151 */
3152 if (cp->disconnect)
3153 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3154 &cp->addr.bdaddr);
3155 else
3156 conn = NULL;
3157
3158 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
3159 if (err < 0) {
3160 err = mgmt_cmd_complete(sk, hdev->id,
3161 MGMT_OP_UNPAIR_DEVICE,
3162 MGMT_STATUS_NOT_PAIRED, &rp,
3163 sizeof(rp));
3164 goto unlock;
3165 }
3166
3167 goto done;
3168 }
3169
3170 /* LE address type */
3171 addr_type = le_addr_type(cp->addr.type);
3172
3173 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
3174 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
3175 if (err < 0) {
3176 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3177 MGMT_STATUS_NOT_PAIRED, &rp,
3178 sizeof(rp));
3179 goto unlock;
3180 }
3181
3182 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3183 if (!conn) {
3184 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3185 goto done;
3186 }
3187
3188
3189 /* Defer clearing up the connection parameters until closing to
3190 * give a chance of keeping them if a repairing happens.
3191 */
3192 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3193
3194 /* Disable auto-connection parameters if present */
3195 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3196 if (params) {
3197 if (params->explicit_connect)
3198 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3199 else
3200 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3201 }
3202
3203 /* If disconnection is not requested, then clear the connection
3204 * variable so that the link is not terminated.
3205 */
3206 if (!cp->disconnect)
3207 conn = NULL;
3208
3209 done:
3210 /* If the connection variable is set, then termination of the
3211 * link is requested.
3212 */
3213 if (!conn) {
3214 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3215 &rp, sizeof(rp));
3216 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3217 goto unlock;
3218 }
3219
3220 cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3221 sizeof(*cp));
3222 if (!cmd) {
3223 err = -ENOMEM;
3224 goto unlock;
3225 }
3226
3227 cmd->cmd_complete = addr_cmd_complete;
3228
3229 err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3230 unpair_device_complete);
3231 if (err < 0)
3232 mgmt_pending_free(cmd);
3233
3234 unlock:
3235 hci_dev_unlock(hdev);
3236 return err;
3237 }
3238
disconnect_complete(struct hci_dev * hdev,void * data,int err)3239 static void disconnect_complete(struct hci_dev *hdev, void *data, int err)
3240 {
3241 struct mgmt_pending_cmd *cmd = data;
3242
3243 cmd->cmd_complete(cmd, mgmt_status(err));
3244 mgmt_pending_free(cmd);
3245 }
3246
disconnect_sync(struct hci_dev * hdev,void * data)3247 static int disconnect_sync(struct hci_dev *hdev, void *data)
3248 {
3249 struct mgmt_pending_cmd *cmd = data;
3250 struct mgmt_cp_disconnect *cp = cmd->param;
3251 struct hci_conn *conn;
3252
3253 if (cp->addr.type == BDADDR_BREDR)
3254 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3255 &cp->addr.bdaddr);
3256 else
3257 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3258 le_addr_type(cp->addr.type));
3259
3260 if (!conn)
3261 return -ENOTCONN;
3262
3263 /* Disregard any possible error since the likes of hci_abort_conn_sync
3264 * will clean up the connection no matter the error.
3265 */
3266 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3267
3268 return 0;
3269 }
3270
disconnect(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3271 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3272 u16 len)
3273 {
3274 struct mgmt_cp_disconnect *cp = data;
3275 struct mgmt_rp_disconnect rp;
3276 struct mgmt_pending_cmd *cmd;
3277 int err;
3278
3279 bt_dev_dbg(hdev, "sock %p", sk);
3280
3281 memset(&rp, 0, sizeof(rp));
3282 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3283 rp.addr.type = cp->addr.type;
3284
3285 if (!bdaddr_type_is_valid(cp->addr.type))
3286 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3287 MGMT_STATUS_INVALID_PARAMS,
3288 &rp, sizeof(rp));
3289
3290 hci_dev_lock(hdev);
3291
3292 if (!test_bit(HCI_UP, &hdev->flags)) {
3293 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3294 MGMT_STATUS_NOT_POWERED, &rp,
3295 sizeof(rp));
3296 goto failed;
3297 }
3298
3299 cmd = mgmt_pending_new(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3300 if (!cmd) {
3301 err = -ENOMEM;
3302 goto failed;
3303 }
3304
3305 cmd->cmd_complete = generic_cmd_complete;
3306
3307 err = hci_cmd_sync_queue(hdev, disconnect_sync, cmd,
3308 disconnect_complete);
3309 if (err < 0)
3310 mgmt_pending_free(cmd);
3311
3312 failed:
3313 hci_dev_unlock(hdev);
3314 return err;
3315 }
3316
link_to_bdaddr(u8 link_type,u8 addr_type)3317 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3318 {
3319 switch (link_type) {
3320 case CIS_LINK:
3321 case BIS_LINK:
3322 case PA_LINK:
3323 case LE_LINK:
3324 switch (addr_type) {
3325 case ADDR_LE_DEV_PUBLIC:
3326 return BDADDR_LE_PUBLIC;
3327
3328 default:
3329 /* Fallback to LE Random address type */
3330 return BDADDR_LE_RANDOM;
3331 }
3332
3333 default:
3334 /* Fallback to BR/EDR type */
3335 return BDADDR_BREDR;
3336 }
3337 }
3338
get_connections(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)3339 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3340 u16 data_len)
3341 {
3342 struct mgmt_rp_get_connections *rp;
3343 struct hci_conn *c;
3344 int err;
3345 u16 i;
3346
3347 bt_dev_dbg(hdev, "sock %p", sk);
3348
3349 hci_dev_lock(hdev);
3350
3351 if (!hdev_is_powered(hdev)) {
3352 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3353 MGMT_STATUS_NOT_POWERED);
3354 goto unlock;
3355 }
3356
3357 i = 0;
3358 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3359 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3360 i++;
3361 }
3362
3363 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3364 if (!rp) {
3365 err = -ENOMEM;
3366 goto unlock;
3367 }
3368
3369 i = 0;
3370 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3371 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3372 continue;
3373 bacpy(&rp->addr[i].bdaddr, &c->dst);
3374 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3375 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3376 continue;
3377 i++;
3378 }
3379
3380 rp->conn_count = cpu_to_le16(i);
3381
3382 /* Recalculate length in case of filtered SCO connections, etc */
3383 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3384 struct_size(rp, addr, i));
3385
3386 kfree(rp);
3387
3388 unlock:
3389 hci_dev_unlock(hdev);
3390 return err;
3391 }
3392
send_pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_pin_code_neg_reply * cp)3393 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3394 struct mgmt_cp_pin_code_neg_reply *cp)
3395 {
3396 struct mgmt_pending_cmd *cmd;
3397 int err;
3398
3399 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3400 sizeof(*cp));
3401 if (!cmd)
3402 return -ENOMEM;
3403
3404 cmd->cmd_complete = addr_cmd_complete;
3405
3406 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3407 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3408 if (err < 0)
3409 mgmt_pending_remove(cmd);
3410
3411 return err;
3412 }
3413
pin_code_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3414 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3415 u16 len)
3416 {
3417 struct hci_conn *conn;
3418 struct mgmt_cp_pin_code_reply *cp = data;
3419 struct hci_cp_pin_code_reply reply;
3420 struct mgmt_pending_cmd *cmd;
3421 int err;
3422
3423 bt_dev_dbg(hdev, "sock %p", sk);
3424
3425 hci_dev_lock(hdev);
3426
3427 if (!hdev_is_powered(hdev)) {
3428 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3429 MGMT_STATUS_NOT_POWERED);
3430 goto failed;
3431 }
3432
3433 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3434 if (!conn) {
3435 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3436 MGMT_STATUS_NOT_CONNECTED);
3437 goto failed;
3438 }
3439
3440 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3441 struct mgmt_cp_pin_code_neg_reply ncp;
3442
3443 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3444
3445 bt_dev_err(hdev, "PIN code is not 16 bytes long");
3446
3447 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3448 if (err >= 0)
3449 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3450 MGMT_STATUS_INVALID_PARAMS);
3451
3452 goto failed;
3453 }
3454
3455 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3456 if (!cmd) {
3457 err = -ENOMEM;
3458 goto failed;
3459 }
3460
3461 cmd->cmd_complete = addr_cmd_complete;
3462
3463 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3464 reply.pin_len = cp->pin_len;
3465 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3466
3467 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3468 if (err < 0)
3469 mgmt_pending_remove(cmd);
3470
3471 failed:
3472 hci_dev_unlock(hdev);
3473 return err;
3474 }
3475
set_io_capability(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3476 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3477 u16 len)
3478 {
3479 struct mgmt_cp_set_io_capability *cp = data;
3480
3481 bt_dev_dbg(hdev, "sock %p", sk);
3482
3483 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3484 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3485 MGMT_STATUS_INVALID_PARAMS);
3486
3487 hci_dev_lock(hdev);
3488
3489 hdev->io_capability = cp->io_capability;
3490
3491 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3492
3493 hci_dev_unlock(hdev);
3494
3495 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3496 NULL, 0);
3497 }
3498
find_pairing(struct hci_conn * conn)3499 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3500 {
3501 struct hci_dev *hdev = conn->hdev;
3502 struct mgmt_pending_cmd *cmd;
3503
3504 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3505 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3506 continue;
3507
3508 if (cmd->user_data != conn)
3509 continue;
3510
3511 return cmd;
3512 }
3513
3514 return NULL;
3515 }
3516
pairing_complete(struct mgmt_pending_cmd * cmd,u8 status)3517 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3518 {
3519 struct mgmt_rp_pair_device rp;
3520 struct hci_conn *conn = cmd->user_data;
3521 int err;
3522
3523 bacpy(&rp.addr.bdaddr, &conn->dst);
3524 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3525
3526 err = mgmt_cmd_complete(cmd->sk, cmd->hdev->id, MGMT_OP_PAIR_DEVICE,
3527 status, &rp, sizeof(rp));
3528
3529 /* So we don't get further callbacks for this connection */
3530 conn->connect_cfm_cb = NULL;
3531 conn->security_cfm_cb = NULL;
3532 conn->disconn_cfm_cb = NULL;
3533
3534 hci_conn_drop(conn);
3535
3536 /* The device is paired so there is no need to remove
3537 * its connection parameters anymore.
3538 */
3539 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3540
3541 hci_conn_put(conn);
3542
3543 return err;
3544 }
3545
mgmt_smp_complete(struct hci_conn * conn,bool complete)3546 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3547 {
3548 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3549 struct mgmt_pending_cmd *cmd;
3550
3551 cmd = find_pairing(conn);
3552 if (cmd) {
3553 cmd->cmd_complete(cmd, status);
3554 mgmt_pending_remove(cmd);
3555 }
3556 }
3557
pairing_complete_cb(struct hci_conn * conn,u8 status)3558 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3559 {
3560 struct mgmt_pending_cmd *cmd;
3561
3562 BT_DBG("status %u", status);
3563
3564 cmd = find_pairing(conn);
3565 if (!cmd) {
3566 BT_DBG("Unable to find a pending command");
3567 return;
3568 }
3569
3570 cmd->cmd_complete(cmd, mgmt_status(status));
3571 mgmt_pending_remove(cmd);
3572 }
3573
le_pairing_complete_cb(struct hci_conn * conn,u8 status)3574 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3575 {
3576 struct mgmt_pending_cmd *cmd;
3577
3578 BT_DBG("status %u", status);
3579
3580 if (!status)
3581 return;
3582
3583 cmd = find_pairing(conn);
3584 if (!cmd) {
3585 BT_DBG("Unable to find a pending command");
3586 return;
3587 }
3588
3589 cmd->cmd_complete(cmd, mgmt_status(status));
3590 mgmt_pending_remove(cmd);
3591 }
3592
pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3593 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3594 u16 len)
3595 {
3596 struct mgmt_cp_pair_device *cp = data;
3597 struct mgmt_rp_pair_device rp;
3598 struct mgmt_pending_cmd *cmd;
3599 u8 sec_level, auth_type;
3600 struct hci_conn *conn;
3601 int err;
3602
3603 bt_dev_dbg(hdev, "sock %p", sk);
3604
3605 memset(&rp, 0, sizeof(rp));
3606 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3607 rp.addr.type = cp->addr.type;
3608
3609 if (!bdaddr_type_is_valid(cp->addr.type))
3610 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3611 MGMT_STATUS_INVALID_PARAMS,
3612 &rp, sizeof(rp));
3613
3614 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3615 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3616 MGMT_STATUS_INVALID_PARAMS,
3617 &rp, sizeof(rp));
3618
3619 hci_dev_lock(hdev);
3620
3621 if (!hdev_is_powered(hdev)) {
3622 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3623 MGMT_STATUS_NOT_POWERED, &rp,
3624 sizeof(rp));
3625 goto unlock;
3626 }
3627
3628 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3629 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3630 MGMT_STATUS_ALREADY_PAIRED, &rp,
3631 sizeof(rp));
3632 goto unlock;
3633 }
3634
3635 sec_level = BT_SECURITY_MEDIUM;
3636 auth_type = HCI_AT_DEDICATED_BONDING;
3637
3638 if (cp->addr.type == BDADDR_BREDR) {
3639 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3640 auth_type, CONN_REASON_PAIR_DEVICE,
3641 HCI_ACL_CONN_TIMEOUT);
3642 } else {
3643 u8 addr_type = le_addr_type(cp->addr.type);
3644 struct hci_conn_params *p;
3645
3646 /* When pairing a new device, it is expected to remember
3647 * this device for future connections. Adding the connection
3648 * parameter information ahead of time allows tracking
3649 * of the peripheral preferred values and will speed up any
3650 * further connection establishment.
3651 *
3652 * If connection parameters already exist, then they
3653 * will be kept and this function does nothing.
3654 */
3655 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3656 if (!p) {
3657 err = -EIO;
3658 goto unlock;
3659 }
3660
3661 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3662 p->auto_connect = HCI_AUTO_CONN_DISABLED;
3663
3664 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3665 sec_level, HCI_LE_CONN_TIMEOUT,
3666 CONN_REASON_PAIR_DEVICE);
3667 }
3668
3669 if (IS_ERR(conn)) {
3670 int status;
3671
3672 if (PTR_ERR(conn) == -EBUSY)
3673 status = MGMT_STATUS_BUSY;
3674 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3675 status = MGMT_STATUS_NOT_SUPPORTED;
3676 else if (PTR_ERR(conn) == -ECONNREFUSED)
3677 status = MGMT_STATUS_REJECTED;
3678 else
3679 status = MGMT_STATUS_CONNECT_FAILED;
3680
3681 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3682 status, &rp, sizeof(rp));
3683 goto unlock;
3684 }
3685
3686 if (conn->connect_cfm_cb) {
3687 hci_conn_drop(conn);
3688 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3689 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3690 goto unlock;
3691 }
3692
3693 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3694 if (!cmd) {
3695 err = -ENOMEM;
3696 hci_conn_drop(conn);
3697 goto unlock;
3698 }
3699
3700 cmd->cmd_complete = pairing_complete;
3701
3702 /* For LE, just connecting isn't a proof that the pairing finished */
3703 if (cp->addr.type == BDADDR_BREDR) {
3704 conn->connect_cfm_cb = pairing_complete_cb;
3705 conn->security_cfm_cb = pairing_complete_cb;
3706 conn->disconn_cfm_cb = pairing_complete_cb;
3707 } else {
3708 conn->connect_cfm_cb = le_pairing_complete_cb;
3709 conn->security_cfm_cb = le_pairing_complete_cb;
3710 conn->disconn_cfm_cb = le_pairing_complete_cb;
3711 }
3712
3713 conn->io_capability = cp->io_cap;
3714 cmd->user_data = hci_conn_get(conn);
3715
3716 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3717 hci_conn_security(conn, sec_level, auth_type, true)) {
3718 cmd->cmd_complete(cmd, 0);
3719 mgmt_pending_remove(cmd);
3720 }
3721
3722 err = 0;
3723
3724 unlock:
3725 hci_dev_unlock(hdev);
3726 return err;
3727 }
3728
cancel_pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3729 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3730 u16 len)
3731 {
3732 struct mgmt_addr_info *addr = data;
3733 struct mgmt_pending_cmd *cmd;
3734 struct hci_conn *conn;
3735 int err;
3736
3737 bt_dev_dbg(hdev, "sock %p", sk);
3738
3739 hci_dev_lock(hdev);
3740
3741 if (!hdev_is_powered(hdev)) {
3742 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3743 MGMT_STATUS_NOT_POWERED);
3744 goto unlock;
3745 }
3746
3747 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3748 if (!cmd) {
3749 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3750 MGMT_STATUS_INVALID_PARAMS);
3751 goto unlock;
3752 }
3753
3754 conn = cmd->user_data;
3755
3756 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3757 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3758 MGMT_STATUS_INVALID_PARAMS);
3759 goto unlock;
3760 }
3761
3762 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3763 mgmt_pending_remove(cmd);
3764
3765 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3766 addr, sizeof(*addr));
3767
3768 /* Since user doesn't want to proceed with the connection, abort any
3769 * ongoing pairing and then terminate the link if it was created
3770 * because of the pair device action.
3771 */
3772 if (addr->type == BDADDR_BREDR)
3773 hci_remove_link_key(hdev, &addr->bdaddr);
3774 else
3775 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3776 le_addr_type(addr->type));
3777
3778 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3779 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3780
3781 unlock:
3782 hci_dev_unlock(hdev);
3783 return err;
3784 }
3785
user_pairing_resp(struct sock * sk,struct hci_dev * hdev,struct mgmt_addr_info * addr,u16 mgmt_op,u16 hci_op,__le32 passkey)3786 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3787 struct mgmt_addr_info *addr, u16 mgmt_op,
3788 u16 hci_op, __le32 passkey)
3789 {
3790 struct mgmt_pending_cmd *cmd;
3791 struct hci_conn *conn;
3792 int err;
3793
3794 hci_dev_lock(hdev);
3795
3796 if (!hdev_is_powered(hdev)) {
3797 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3798 MGMT_STATUS_NOT_POWERED, addr,
3799 sizeof(*addr));
3800 goto done;
3801 }
3802
3803 if (addr->type == BDADDR_BREDR)
3804 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3805 else
3806 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3807 le_addr_type(addr->type));
3808
3809 if (!conn) {
3810 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3811 MGMT_STATUS_NOT_CONNECTED, addr,
3812 sizeof(*addr));
3813 goto done;
3814 }
3815
3816 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3817 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3818 if (!err)
3819 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3820 MGMT_STATUS_SUCCESS, addr,
3821 sizeof(*addr));
3822 else
3823 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3824 MGMT_STATUS_FAILED, addr,
3825 sizeof(*addr));
3826
3827 goto done;
3828 }
3829
3830 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3831 if (!cmd) {
3832 err = -ENOMEM;
3833 goto done;
3834 }
3835
3836 cmd->cmd_complete = addr_cmd_complete;
3837
3838 /* Continue with pairing via HCI */
3839 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3840 struct hci_cp_user_passkey_reply cp;
3841
3842 bacpy(&cp.bdaddr, &addr->bdaddr);
3843 cp.passkey = passkey;
3844 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3845 } else
3846 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3847 &addr->bdaddr);
3848
3849 if (err < 0)
3850 mgmt_pending_remove(cmd);
3851
3852 done:
3853 hci_dev_unlock(hdev);
3854 return err;
3855 }
3856
pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3857 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3858 void *data, u16 len)
3859 {
3860 struct mgmt_cp_pin_code_neg_reply *cp = data;
3861
3862 bt_dev_dbg(hdev, "sock %p", sk);
3863
3864 return user_pairing_resp(sk, hdev, &cp->addr,
3865 MGMT_OP_PIN_CODE_NEG_REPLY,
3866 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3867 }
3868
user_confirm_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3869 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3870 u16 len)
3871 {
3872 struct mgmt_cp_user_confirm_reply *cp = data;
3873
3874 bt_dev_dbg(hdev, "sock %p", sk);
3875
3876 if (len != sizeof(*cp))
3877 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3878 MGMT_STATUS_INVALID_PARAMS);
3879
3880 return user_pairing_resp(sk, hdev, &cp->addr,
3881 MGMT_OP_USER_CONFIRM_REPLY,
3882 HCI_OP_USER_CONFIRM_REPLY, 0);
3883 }
3884
user_confirm_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3885 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3886 void *data, u16 len)
3887 {
3888 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3889
3890 bt_dev_dbg(hdev, "sock %p", sk);
3891
3892 return user_pairing_resp(sk, hdev, &cp->addr,
3893 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3894 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3895 }
3896
user_passkey_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3897 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3898 u16 len)
3899 {
3900 struct mgmt_cp_user_passkey_reply *cp = data;
3901
3902 bt_dev_dbg(hdev, "sock %p", sk);
3903
3904 return user_pairing_resp(sk, hdev, &cp->addr,
3905 MGMT_OP_USER_PASSKEY_REPLY,
3906 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3907 }
3908
user_passkey_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3909 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3910 void *data, u16 len)
3911 {
3912 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3913
3914 bt_dev_dbg(hdev, "sock %p", sk);
3915
3916 return user_pairing_resp(sk, hdev, &cp->addr,
3917 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3918 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3919 }
3920
adv_expire_sync(struct hci_dev * hdev,u32 flags)3921 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3922 {
3923 struct adv_info *adv_instance;
3924
3925 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3926 if (!adv_instance)
3927 return 0;
3928
3929 /* stop if current instance doesn't need to be changed */
3930 if (!(adv_instance->flags & flags))
3931 return 0;
3932
3933 cancel_adv_timeout(hdev);
3934
3935 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3936 if (!adv_instance)
3937 return 0;
3938
3939 hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3940
3941 return 0;
3942 }
3943
name_changed_sync(struct hci_dev * hdev,void * data)3944 static int name_changed_sync(struct hci_dev *hdev, void *data)
3945 {
3946 return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3947 }
3948
set_name_complete(struct hci_dev * hdev,void * data,int err)3949 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3950 {
3951 struct mgmt_pending_cmd *cmd = data;
3952 struct mgmt_cp_set_local_name *cp;
3953 u8 status = mgmt_status(err);
3954
3955 bt_dev_dbg(hdev, "err %d", err);
3956
3957 if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
3958 return;
3959
3960 cp = cmd->param;
3961
3962 if (status) {
3963 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3964 status);
3965 } else {
3966 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3967 cp, sizeof(*cp));
3968
3969 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3970 hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3971 }
3972
3973 mgmt_pending_free(cmd);
3974 }
3975
set_name_sync(struct hci_dev * hdev,void * data)3976 static int set_name_sync(struct hci_dev *hdev, void *data)
3977 {
3978 struct mgmt_pending_cmd *cmd = data;
3979 struct mgmt_cp_set_local_name cp;
3980
3981 mutex_lock(&hdev->mgmt_pending_lock);
3982
3983 if (!__mgmt_pending_listed(hdev, cmd)) {
3984 mutex_unlock(&hdev->mgmt_pending_lock);
3985 return -ECANCELED;
3986 }
3987
3988 memcpy(&cp, cmd->param, sizeof(cp));
3989
3990 mutex_unlock(&hdev->mgmt_pending_lock);
3991
3992 if (lmp_bredr_capable(hdev)) {
3993 hci_update_name_sync(hdev, cp.name);
3994 hci_update_eir_sync(hdev);
3995 }
3996
3997 /* The name is stored in the scan response data and so
3998 * no need to update the advertising data here.
3999 */
4000 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
4001 hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
4002
4003 return 0;
4004 }
4005
set_local_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4006 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
4007 u16 len)
4008 {
4009 struct mgmt_cp_set_local_name *cp = data;
4010 struct mgmt_pending_cmd *cmd;
4011 int err;
4012
4013 bt_dev_dbg(hdev, "sock %p", sk);
4014
4015 hci_dev_lock(hdev);
4016
4017 /* If the old values are the same as the new ones just return a
4018 * direct command complete event.
4019 */
4020 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
4021 !memcmp(hdev->short_name, cp->short_name,
4022 sizeof(hdev->short_name))) {
4023 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
4024 data, len);
4025 goto failed;
4026 }
4027
4028 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
4029
4030 if (!hdev_is_powered(hdev)) {
4031 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
4032
4033 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
4034 data, len);
4035 if (err < 0)
4036 goto failed;
4037
4038 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
4039 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
4040 ext_info_changed(hdev, sk);
4041
4042 goto failed;
4043 }
4044
4045 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
4046 if (!cmd)
4047 err = -ENOMEM;
4048 else
4049 err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
4050 set_name_complete);
4051
4052 if (err < 0) {
4053 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
4054 MGMT_STATUS_FAILED);
4055
4056 if (cmd)
4057 mgmt_pending_remove(cmd);
4058
4059 goto failed;
4060 }
4061
4062 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
4063
4064 failed:
4065 hci_dev_unlock(hdev);
4066 return err;
4067 }
4068
appearance_changed_sync(struct hci_dev * hdev,void * data)4069 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
4070 {
4071 return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
4072 }
4073
set_appearance(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4074 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
4075 u16 len)
4076 {
4077 struct mgmt_cp_set_appearance *cp = data;
4078 u16 appearance;
4079 int err;
4080
4081 bt_dev_dbg(hdev, "sock %p", sk);
4082
4083 if (!lmp_le_capable(hdev))
4084 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
4085 MGMT_STATUS_NOT_SUPPORTED);
4086
4087 appearance = le16_to_cpu(cp->appearance);
4088
4089 hci_dev_lock(hdev);
4090
4091 if (hdev->appearance != appearance) {
4092 hdev->appearance = appearance;
4093
4094 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
4095 hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
4096 NULL);
4097
4098 ext_info_changed(hdev, sk);
4099 }
4100
4101 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
4102 0);
4103
4104 hci_dev_unlock(hdev);
4105
4106 return err;
4107 }
4108
get_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4109 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4110 void *data, u16 len)
4111 {
4112 struct mgmt_rp_get_phy_configuration rp;
4113
4114 bt_dev_dbg(hdev, "sock %p", sk);
4115
4116 hci_dev_lock(hdev);
4117
4118 memset(&rp, 0, sizeof(rp));
4119
4120 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
4121 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
4122 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
4123
4124 hci_dev_unlock(hdev);
4125
4126 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
4127 &rp, sizeof(rp));
4128 }
4129
mgmt_phy_configuration_changed(struct hci_dev * hdev,struct sock * skip)4130 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
4131 {
4132 struct mgmt_ev_phy_configuration_changed ev;
4133
4134 memset(&ev, 0, sizeof(ev));
4135
4136 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
4137
4138 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
4139 sizeof(ev), skip);
4140 }
4141
set_default_phy_complete(struct hci_dev * hdev,void * data,int err)4142 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
4143 {
4144 struct mgmt_pending_cmd *cmd = data;
4145 struct sk_buff *skb;
4146 u8 status = mgmt_status(err);
4147
4148 skb = cmd->skb;
4149
4150 if (!status) {
4151 if (!skb)
4152 status = MGMT_STATUS_FAILED;
4153 else if (IS_ERR(skb))
4154 status = mgmt_status(PTR_ERR(skb));
4155 else
4156 status = mgmt_status(skb->data[0]);
4157 }
4158
4159 bt_dev_dbg(hdev, "status %d", status);
4160
4161 if (status) {
4162 mgmt_cmd_status(cmd->sk, hdev->id,
4163 MGMT_OP_SET_PHY_CONFIGURATION, status);
4164 } else {
4165 mgmt_cmd_complete(cmd->sk, hdev->id,
4166 MGMT_OP_SET_PHY_CONFIGURATION, 0,
4167 NULL, 0);
4168
4169 mgmt_phy_configuration_changed(hdev, cmd->sk);
4170 }
4171
4172 if (skb && !IS_ERR(skb))
4173 kfree_skb(skb);
4174
4175 mgmt_pending_free(cmd);
4176 }
4177
set_default_phy_sync(struct hci_dev * hdev,void * data)4178 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
4179 {
4180 struct mgmt_pending_cmd *cmd = data;
4181 struct mgmt_cp_set_phy_configuration *cp = cmd->param;
4182 struct hci_cp_le_set_default_phy cp_phy;
4183 u32 selected_phys;
4184
4185 selected_phys = __le32_to_cpu(cp->selected_phys);
4186
4187 memset(&cp_phy, 0, sizeof(cp_phy));
4188
4189 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
4190 cp_phy.all_phys |= 0x01;
4191
4192 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
4193 cp_phy.all_phys |= 0x02;
4194
4195 if (selected_phys & MGMT_PHY_LE_1M_TX)
4196 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
4197
4198 if (selected_phys & MGMT_PHY_LE_2M_TX)
4199 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
4200
4201 if (selected_phys & MGMT_PHY_LE_CODED_TX)
4202 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
4203
4204 if (selected_phys & MGMT_PHY_LE_1M_RX)
4205 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
4206
4207 if (selected_phys & MGMT_PHY_LE_2M_RX)
4208 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
4209
4210 if (selected_phys & MGMT_PHY_LE_CODED_RX)
4211 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
4212
4213 cmd->skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4214 sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
4215
4216 return 0;
4217 }
4218
set_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4219 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4220 void *data, u16 len)
4221 {
4222 struct mgmt_cp_set_phy_configuration *cp = data;
4223 struct mgmt_pending_cmd *cmd;
4224 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4225 u16 pkt_type = (HCI_DH1 | HCI_DM1);
4226 bool changed = false;
4227 int err;
4228
4229 bt_dev_dbg(hdev, "sock %p", sk);
4230
4231 configurable_phys = get_configurable_phys(hdev);
4232 supported_phys = get_supported_phys(hdev);
4233 selected_phys = __le32_to_cpu(cp->selected_phys);
4234
4235 if (selected_phys & ~supported_phys)
4236 return mgmt_cmd_status(sk, hdev->id,
4237 MGMT_OP_SET_PHY_CONFIGURATION,
4238 MGMT_STATUS_INVALID_PARAMS);
4239
4240 unconfigure_phys = supported_phys & ~configurable_phys;
4241
4242 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4243 return mgmt_cmd_status(sk, hdev->id,
4244 MGMT_OP_SET_PHY_CONFIGURATION,
4245 MGMT_STATUS_INVALID_PARAMS);
4246
4247 if (selected_phys == get_selected_phys(hdev))
4248 return mgmt_cmd_complete(sk, hdev->id,
4249 MGMT_OP_SET_PHY_CONFIGURATION,
4250 0, NULL, 0);
4251
4252 hci_dev_lock(hdev);
4253
4254 if (!hdev_is_powered(hdev)) {
4255 err = mgmt_cmd_status(sk, hdev->id,
4256 MGMT_OP_SET_PHY_CONFIGURATION,
4257 MGMT_STATUS_REJECTED);
4258 goto unlock;
4259 }
4260
4261 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4262 err = mgmt_cmd_status(sk, hdev->id,
4263 MGMT_OP_SET_PHY_CONFIGURATION,
4264 MGMT_STATUS_BUSY);
4265 goto unlock;
4266 }
4267
4268 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4269 pkt_type |= (HCI_DH3 | HCI_DM3);
4270 else
4271 pkt_type &= ~(HCI_DH3 | HCI_DM3);
4272
4273 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4274 pkt_type |= (HCI_DH5 | HCI_DM5);
4275 else
4276 pkt_type &= ~(HCI_DH5 | HCI_DM5);
4277
4278 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4279 pkt_type &= ~HCI_2DH1;
4280 else
4281 pkt_type |= HCI_2DH1;
4282
4283 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4284 pkt_type &= ~HCI_2DH3;
4285 else
4286 pkt_type |= HCI_2DH3;
4287
4288 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4289 pkt_type &= ~HCI_2DH5;
4290 else
4291 pkt_type |= HCI_2DH5;
4292
4293 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4294 pkt_type &= ~HCI_3DH1;
4295 else
4296 pkt_type |= HCI_3DH1;
4297
4298 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4299 pkt_type &= ~HCI_3DH3;
4300 else
4301 pkt_type |= HCI_3DH3;
4302
4303 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4304 pkt_type &= ~HCI_3DH5;
4305 else
4306 pkt_type |= HCI_3DH5;
4307
4308 if (pkt_type != hdev->pkt_type) {
4309 hdev->pkt_type = pkt_type;
4310 changed = true;
4311 }
4312
4313 if ((selected_phys & MGMT_PHY_LE_MASK) ==
4314 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4315 if (changed)
4316 mgmt_phy_configuration_changed(hdev, sk);
4317
4318 err = mgmt_cmd_complete(sk, hdev->id,
4319 MGMT_OP_SET_PHY_CONFIGURATION,
4320 0, NULL, 0);
4321
4322 goto unlock;
4323 }
4324
4325 cmd = mgmt_pending_new(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4326 len);
4327 if (!cmd)
4328 err = -ENOMEM;
4329 else
4330 err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4331 set_default_phy_complete);
4332
4333 if (err < 0) {
4334 err = mgmt_cmd_status(sk, hdev->id,
4335 MGMT_OP_SET_PHY_CONFIGURATION,
4336 MGMT_STATUS_FAILED);
4337
4338 if (cmd)
4339 mgmt_pending_remove(cmd);
4340 }
4341
4342 unlock:
4343 hci_dev_unlock(hdev);
4344
4345 return err;
4346 }
4347
set_blocked_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4348 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4349 u16 len)
4350 {
4351 int err = MGMT_STATUS_SUCCESS;
4352 struct mgmt_cp_set_blocked_keys *keys = data;
4353 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4354 sizeof(struct mgmt_blocked_key_info));
4355 u16 key_count, expected_len;
4356 int i;
4357
4358 bt_dev_dbg(hdev, "sock %p", sk);
4359
4360 key_count = __le16_to_cpu(keys->key_count);
4361 if (key_count > max_key_count) {
4362 bt_dev_err(hdev, "too big key_count value %u", key_count);
4363 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4364 MGMT_STATUS_INVALID_PARAMS);
4365 }
4366
4367 expected_len = struct_size(keys, keys, key_count);
4368 if (expected_len != len) {
4369 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4370 expected_len, len);
4371 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4372 MGMT_STATUS_INVALID_PARAMS);
4373 }
4374
4375 hci_dev_lock(hdev);
4376
4377 hci_blocked_keys_clear(hdev);
4378
4379 for (i = 0; i < key_count; ++i) {
4380 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4381
4382 if (!b) {
4383 err = MGMT_STATUS_NO_RESOURCES;
4384 break;
4385 }
4386
4387 b->type = keys->keys[i].type;
4388 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4389 list_add_rcu(&b->list, &hdev->blocked_keys);
4390 }
4391 hci_dev_unlock(hdev);
4392
4393 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4394 err, NULL, 0);
4395 }
4396
set_wideband_speech(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4397 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4398 void *data, u16 len)
4399 {
4400 struct mgmt_mode *cp = data;
4401 int err;
4402 bool changed = false;
4403
4404 bt_dev_dbg(hdev, "sock %p", sk);
4405
4406 if (!hci_test_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED))
4407 return mgmt_cmd_status(sk, hdev->id,
4408 MGMT_OP_SET_WIDEBAND_SPEECH,
4409 MGMT_STATUS_NOT_SUPPORTED);
4410
4411 if (cp->val != 0x00 && cp->val != 0x01)
4412 return mgmt_cmd_status(sk, hdev->id,
4413 MGMT_OP_SET_WIDEBAND_SPEECH,
4414 MGMT_STATUS_INVALID_PARAMS);
4415
4416 hci_dev_lock(hdev);
4417
4418 if (hdev_is_powered(hdev) &&
4419 !!cp->val != hci_dev_test_flag(hdev,
4420 HCI_WIDEBAND_SPEECH_ENABLED)) {
4421 err = mgmt_cmd_status(sk, hdev->id,
4422 MGMT_OP_SET_WIDEBAND_SPEECH,
4423 MGMT_STATUS_REJECTED);
4424 goto unlock;
4425 }
4426
4427 if (cp->val)
4428 changed = !hci_dev_test_and_set_flag(hdev,
4429 HCI_WIDEBAND_SPEECH_ENABLED);
4430 else
4431 changed = hci_dev_test_and_clear_flag(hdev,
4432 HCI_WIDEBAND_SPEECH_ENABLED);
4433
4434 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4435 if (err < 0)
4436 goto unlock;
4437
4438 if (changed)
4439 err = new_settings(hdev, sk);
4440
4441 unlock:
4442 hci_dev_unlock(hdev);
4443 return err;
4444 }
4445
read_controller_cap(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4446 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4447 void *data, u16 data_len)
4448 {
4449 char buf[20];
4450 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4451 u16 cap_len = 0;
4452 u8 flags = 0;
4453 u8 tx_power_range[2];
4454
4455 bt_dev_dbg(hdev, "sock %p", sk);
4456
4457 memset(&buf, 0, sizeof(buf));
4458
4459 hci_dev_lock(hdev);
4460
4461 /* When the Read Simple Pairing Options command is supported, then
4462 * the remote public key validation is supported.
4463 *
4464 * Alternatively, when Microsoft extensions are available, they can
4465 * indicate support for public key validation as well.
4466 */
4467 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4468 flags |= 0x01; /* Remote public key validation (BR/EDR) */
4469
4470 flags |= 0x02; /* Remote public key validation (LE) */
4471
4472 /* When the Read Encryption Key Size command is supported, then the
4473 * encryption key size is enforced.
4474 */
4475 if (hdev->commands[20] & 0x10)
4476 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
4477
4478 flags |= 0x08; /* Encryption key size enforcement (LE) */
4479
4480 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4481 &flags, 1);
4482
4483 /* When the Read Simple Pairing Options command is supported, then
4484 * also max encryption key size information is provided.
4485 */
4486 if (hdev->commands[41] & 0x08)
4487 cap_len = eir_append_le16(rp->cap, cap_len,
4488 MGMT_CAP_MAX_ENC_KEY_SIZE,
4489 hdev->max_enc_key_size);
4490
4491 cap_len = eir_append_le16(rp->cap, cap_len,
4492 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4493 SMP_MAX_ENC_KEY_SIZE);
4494
4495 /* Append the min/max LE tx power parameters if we were able to fetch
4496 * it from the controller
4497 */
4498 if (hdev->commands[38] & 0x80) {
4499 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4500 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4501 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4502 tx_power_range, 2);
4503 }
4504
4505 rp->cap_len = cpu_to_le16(cap_len);
4506
4507 hci_dev_unlock(hdev);
4508
4509 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4510 rp, sizeof(*rp) + cap_len);
4511 }
4512
4513 #ifdef CONFIG_BT_FEATURE_DEBUG
4514 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4515 static const u8 debug_uuid[16] = {
4516 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4517 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4518 };
4519 #endif
4520
4521 /* 330859bc-7506-492d-9370-9a6f0614037f */
4522 static const u8 quality_report_uuid[16] = {
4523 0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4524 0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4525 };
4526
4527 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4528 static const u8 offload_codecs_uuid[16] = {
4529 0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4530 0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4531 };
4532
4533 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4534 static const u8 le_simultaneous_roles_uuid[16] = {
4535 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4536 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4537 };
4538
4539 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4540 static const u8 iso_socket_uuid[16] = {
4541 0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4542 0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4543 };
4544
4545 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4546 static const u8 mgmt_mesh_uuid[16] = {
4547 0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4548 0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4549 };
4550
read_exp_features_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4551 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4552 void *data, u16 data_len)
4553 {
4554 struct mgmt_rp_read_exp_features_info *rp;
4555 size_t len;
4556 u16 idx = 0;
4557 u32 flags;
4558 int status;
4559
4560 bt_dev_dbg(hdev, "sock %p", sk);
4561
4562 /* Enough space for 7 features */
4563 len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4564 rp = kzalloc(len, GFP_KERNEL);
4565 if (!rp)
4566 return -ENOMEM;
4567
4568 #ifdef CONFIG_BT_FEATURE_DEBUG
4569 flags = bt_dbg_get() ? BIT(0) : 0;
4570
4571 memcpy(rp->features[idx].uuid, debug_uuid, 16);
4572 rp->features[idx].flags = cpu_to_le32(flags);
4573 idx++;
4574 #endif
4575
4576 if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4577 if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4578 flags = BIT(0);
4579 else
4580 flags = 0;
4581
4582 memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4583 rp->features[idx].flags = cpu_to_le32(flags);
4584 idx++;
4585 }
4586
4587 if (hdev && (aosp_has_quality_report(hdev) ||
4588 hdev->set_quality_report)) {
4589 if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4590 flags = BIT(0);
4591 else
4592 flags = 0;
4593
4594 memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4595 rp->features[idx].flags = cpu_to_le32(flags);
4596 idx++;
4597 }
4598
4599 if (hdev && hdev->get_data_path_id) {
4600 if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4601 flags = BIT(0);
4602 else
4603 flags = 0;
4604
4605 memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4606 rp->features[idx].flags = cpu_to_le32(flags);
4607 idx++;
4608 }
4609
4610 if (IS_ENABLED(CONFIG_BT_LE)) {
4611 flags = iso_inited() ? BIT(0) : 0;
4612 memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4613 rp->features[idx].flags = cpu_to_le32(flags);
4614 idx++;
4615 }
4616
4617 if (hdev && lmp_le_capable(hdev)) {
4618 if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4619 flags = BIT(0);
4620 else
4621 flags = 0;
4622
4623 memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4624 rp->features[idx].flags = cpu_to_le32(flags);
4625 idx++;
4626 }
4627
4628 rp->feature_count = cpu_to_le16(idx);
4629
4630 /* After reading the experimental features information, enable
4631 * the events to update client on any future change.
4632 */
4633 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4634
4635 status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4636 MGMT_OP_READ_EXP_FEATURES_INFO,
4637 0, rp, sizeof(*rp) + (20 * idx));
4638
4639 kfree(rp);
4640 return status;
4641 }
4642
exp_feature_changed(struct hci_dev * hdev,const u8 * uuid,bool enabled,struct sock * skip)4643 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4644 bool enabled, struct sock *skip)
4645 {
4646 struct mgmt_ev_exp_feature_changed ev;
4647
4648 memset(&ev, 0, sizeof(ev));
4649 memcpy(ev.uuid, uuid, 16);
4650 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4651
4652 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4653 &ev, sizeof(ev),
4654 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4655 }
4656
4657 #define EXP_FEAT(_uuid, _set_func) \
4658 { \
4659 .uuid = _uuid, \
4660 .set_func = _set_func, \
4661 }
4662
4663 /* The zero key uuid is special. Multiple exp features are set through it. */
set_zero_key_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4664 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4665 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4666 {
4667 struct mgmt_rp_set_exp_feature rp;
4668
4669 memset(rp.uuid, 0, 16);
4670 rp.flags = cpu_to_le32(0);
4671
4672 #ifdef CONFIG_BT_FEATURE_DEBUG
4673 if (!hdev) {
4674 bool changed = bt_dbg_get();
4675
4676 bt_dbg_set(false);
4677
4678 if (changed)
4679 exp_feature_changed(NULL, ZERO_KEY, false, sk);
4680 }
4681 #endif
4682
4683 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4684
4685 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4686 MGMT_OP_SET_EXP_FEATURE, 0,
4687 &rp, sizeof(rp));
4688 }
4689
4690 #ifdef CONFIG_BT_FEATURE_DEBUG
set_debug_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4691 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4692 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4693 {
4694 struct mgmt_rp_set_exp_feature rp;
4695
4696 bool val, changed;
4697 int err;
4698
4699 /* Command requires to use the non-controller index */
4700 if (hdev)
4701 return mgmt_cmd_status(sk, hdev->id,
4702 MGMT_OP_SET_EXP_FEATURE,
4703 MGMT_STATUS_INVALID_INDEX);
4704
4705 /* Parameters are limited to a single octet */
4706 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4707 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4708 MGMT_OP_SET_EXP_FEATURE,
4709 MGMT_STATUS_INVALID_PARAMS);
4710
4711 /* Only boolean on/off is supported */
4712 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4713 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4714 MGMT_OP_SET_EXP_FEATURE,
4715 MGMT_STATUS_INVALID_PARAMS);
4716
4717 val = !!cp->param[0];
4718 changed = val ? !bt_dbg_get() : bt_dbg_get();
4719 bt_dbg_set(val);
4720
4721 memcpy(rp.uuid, debug_uuid, 16);
4722 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4723
4724 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4725
4726 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4727 MGMT_OP_SET_EXP_FEATURE, 0,
4728 &rp, sizeof(rp));
4729
4730 if (changed)
4731 exp_feature_changed(hdev, debug_uuid, val, sk);
4732
4733 return err;
4734 }
4735 #endif
4736
set_mgmt_mesh_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4737 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4738 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4739 {
4740 struct mgmt_rp_set_exp_feature rp;
4741 bool val, changed;
4742 int err;
4743
4744 /* Command requires to use the controller index */
4745 if (!hdev)
4746 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4747 MGMT_OP_SET_EXP_FEATURE,
4748 MGMT_STATUS_INVALID_INDEX);
4749
4750 /* Parameters are limited to a single octet */
4751 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4752 return mgmt_cmd_status(sk, hdev->id,
4753 MGMT_OP_SET_EXP_FEATURE,
4754 MGMT_STATUS_INVALID_PARAMS);
4755
4756 /* Only boolean on/off is supported */
4757 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4758 return mgmt_cmd_status(sk, hdev->id,
4759 MGMT_OP_SET_EXP_FEATURE,
4760 MGMT_STATUS_INVALID_PARAMS);
4761
4762 val = !!cp->param[0];
4763
4764 if (val) {
4765 changed = !hci_dev_test_and_set_flag(hdev,
4766 HCI_MESH_EXPERIMENTAL);
4767 } else {
4768 hci_dev_clear_flag(hdev, HCI_MESH);
4769 changed = hci_dev_test_and_clear_flag(hdev,
4770 HCI_MESH_EXPERIMENTAL);
4771 }
4772
4773 memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4774 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4775
4776 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4777
4778 err = mgmt_cmd_complete(sk, hdev->id,
4779 MGMT_OP_SET_EXP_FEATURE, 0,
4780 &rp, sizeof(rp));
4781
4782 if (changed)
4783 exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4784
4785 return err;
4786 }
4787
set_quality_report_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4788 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4789 struct mgmt_cp_set_exp_feature *cp,
4790 u16 data_len)
4791 {
4792 struct mgmt_rp_set_exp_feature rp;
4793 bool val, changed;
4794 int err;
4795
4796 /* Command requires to use a valid controller index */
4797 if (!hdev)
4798 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4799 MGMT_OP_SET_EXP_FEATURE,
4800 MGMT_STATUS_INVALID_INDEX);
4801
4802 /* Parameters are limited to a single octet */
4803 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4804 return mgmt_cmd_status(sk, hdev->id,
4805 MGMT_OP_SET_EXP_FEATURE,
4806 MGMT_STATUS_INVALID_PARAMS);
4807
4808 /* Only boolean on/off is supported */
4809 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4810 return mgmt_cmd_status(sk, hdev->id,
4811 MGMT_OP_SET_EXP_FEATURE,
4812 MGMT_STATUS_INVALID_PARAMS);
4813
4814 hci_req_sync_lock(hdev);
4815
4816 val = !!cp->param[0];
4817 changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4818
4819 if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4820 err = mgmt_cmd_status(sk, hdev->id,
4821 MGMT_OP_SET_EXP_FEATURE,
4822 MGMT_STATUS_NOT_SUPPORTED);
4823 goto unlock_quality_report;
4824 }
4825
4826 if (changed) {
4827 if (hdev->set_quality_report)
4828 err = hdev->set_quality_report(hdev, val);
4829 else
4830 err = aosp_set_quality_report(hdev, val);
4831
4832 if (err) {
4833 err = mgmt_cmd_status(sk, hdev->id,
4834 MGMT_OP_SET_EXP_FEATURE,
4835 MGMT_STATUS_FAILED);
4836 goto unlock_quality_report;
4837 }
4838
4839 if (val)
4840 hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4841 else
4842 hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4843 }
4844
4845 bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4846
4847 memcpy(rp.uuid, quality_report_uuid, 16);
4848 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4849 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4850
4851 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4852 &rp, sizeof(rp));
4853
4854 if (changed)
4855 exp_feature_changed(hdev, quality_report_uuid, val, sk);
4856
4857 unlock_quality_report:
4858 hci_req_sync_unlock(hdev);
4859 return err;
4860 }
4861
set_offload_codec_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4862 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4863 struct mgmt_cp_set_exp_feature *cp,
4864 u16 data_len)
4865 {
4866 bool val, changed;
4867 int err;
4868 struct mgmt_rp_set_exp_feature rp;
4869
4870 /* Command requires to use a valid controller index */
4871 if (!hdev)
4872 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4873 MGMT_OP_SET_EXP_FEATURE,
4874 MGMT_STATUS_INVALID_INDEX);
4875
4876 /* Parameters are limited to a single octet */
4877 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4878 return mgmt_cmd_status(sk, hdev->id,
4879 MGMT_OP_SET_EXP_FEATURE,
4880 MGMT_STATUS_INVALID_PARAMS);
4881
4882 /* Only boolean on/off is supported */
4883 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4884 return mgmt_cmd_status(sk, hdev->id,
4885 MGMT_OP_SET_EXP_FEATURE,
4886 MGMT_STATUS_INVALID_PARAMS);
4887
4888 val = !!cp->param[0];
4889 changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4890
4891 if (!hdev->get_data_path_id) {
4892 return mgmt_cmd_status(sk, hdev->id,
4893 MGMT_OP_SET_EXP_FEATURE,
4894 MGMT_STATUS_NOT_SUPPORTED);
4895 }
4896
4897 if (changed) {
4898 if (val)
4899 hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4900 else
4901 hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4902 }
4903
4904 bt_dev_info(hdev, "offload codecs enable %d changed %d",
4905 val, changed);
4906
4907 memcpy(rp.uuid, offload_codecs_uuid, 16);
4908 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4909 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4910 err = mgmt_cmd_complete(sk, hdev->id,
4911 MGMT_OP_SET_EXP_FEATURE, 0,
4912 &rp, sizeof(rp));
4913
4914 if (changed)
4915 exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4916
4917 return err;
4918 }
4919
set_le_simultaneous_roles_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4920 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4921 struct mgmt_cp_set_exp_feature *cp,
4922 u16 data_len)
4923 {
4924 bool val, changed;
4925 int err;
4926 struct mgmt_rp_set_exp_feature rp;
4927
4928 /* Command requires to use a valid controller index */
4929 if (!hdev)
4930 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4931 MGMT_OP_SET_EXP_FEATURE,
4932 MGMT_STATUS_INVALID_INDEX);
4933
4934 /* Parameters are limited to a single octet */
4935 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4936 return mgmt_cmd_status(sk, hdev->id,
4937 MGMT_OP_SET_EXP_FEATURE,
4938 MGMT_STATUS_INVALID_PARAMS);
4939
4940 /* Only boolean on/off is supported */
4941 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4942 return mgmt_cmd_status(sk, hdev->id,
4943 MGMT_OP_SET_EXP_FEATURE,
4944 MGMT_STATUS_INVALID_PARAMS);
4945
4946 val = !!cp->param[0];
4947 changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4948
4949 if (!hci_dev_le_state_simultaneous(hdev)) {
4950 return mgmt_cmd_status(sk, hdev->id,
4951 MGMT_OP_SET_EXP_FEATURE,
4952 MGMT_STATUS_NOT_SUPPORTED);
4953 }
4954
4955 if (changed) {
4956 if (val)
4957 hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4958 else
4959 hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4960 }
4961
4962 bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4963 val, changed);
4964
4965 memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4966 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4967 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4968 err = mgmt_cmd_complete(sk, hdev->id,
4969 MGMT_OP_SET_EXP_FEATURE, 0,
4970 &rp, sizeof(rp));
4971
4972 if (changed)
4973 exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4974
4975 return err;
4976 }
4977
4978 #ifdef CONFIG_BT_LE
set_iso_socket_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4979 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4980 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4981 {
4982 struct mgmt_rp_set_exp_feature rp;
4983 bool val, changed = false;
4984 int err;
4985
4986 /* Command requires to use the non-controller index */
4987 if (hdev)
4988 return mgmt_cmd_status(sk, hdev->id,
4989 MGMT_OP_SET_EXP_FEATURE,
4990 MGMT_STATUS_INVALID_INDEX);
4991
4992 /* Parameters are limited to a single octet */
4993 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4994 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4995 MGMT_OP_SET_EXP_FEATURE,
4996 MGMT_STATUS_INVALID_PARAMS);
4997
4998 /* Only boolean on/off is supported */
4999 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
5000 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
5001 MGMT_OP_SET_EXP_FEATURE,
5002 MGMT_STATUS_INVALID_PARAMS);
5003
5004 val = cp->param[0] ? true : false;
5005 if (val)
5006 err = iso_init();
5007 else
5008 err = iso_exit();
5009
5010 if (!err)
5011 changed = true;
5012
5013 memcpy(rp.uuid, iso_socket_uuid, 16);
5014 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
5015
5016 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
5017
5018 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
5019 MGMT_OP_SET_EXP_FEATURE, 0,
5020 &rp, sizeof(rp));
5021
5022 if (changed)
5023 exp_feature_changed(hdev, iso_socket_uuid, val, sk);
5024
5025 return err;
5026 }
5027 #endif
5028
5029 static const struct mgmt_exp_feature {
5030 const u8 *uuid;
5031 int (*set_func)(struct sock *sk, struct hci_dev *hdev,
5032 struct mgmt_cp_set_exp_feature *cp, u16 data_len);
5033 } exp_features[] = {
5034 EXP_FEAT(ZERO_KEY, set_zero_key_func),
5035 #ifdef CONFIG_BT_FEATURE_DEBUG
5036 EXP_FEAT(debug_uuid, set_debug_func),
5037 #endif
5038 EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
5039 EXP_FEAT(quality_report_uuid, set_quality_report_func),
5040 EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
5041 EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
5042 #ifdef CONFIG_BT_LE
5043 EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
5044 #endif
5045
5046 /* end with a null feature */
5047 EXP_FEAT(NULL, NULL)
5048 };
5049
set_exp_feature(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)5050 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
5051 void *data, u16 data_len)
5052 {
5053 struct mgmt_cp_set_exp_feature *cp = data;
5054 size_t i = 0;
5055
5056 bt_dev_dbg(hdev, "sock %p", sk);
5057
5058 for (i = 0; exp_features[i].uuid; i++) {
5059 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
5060 return exp_features[i].set_func(sk, hdev, cp, data_len);
5061 }
5062
5063 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
5064 MGMT_OP_SET_EXP_FEATURE,
5065 MGMT_STATUS_NOT_SUPPORTED);
5066 }
5067
get_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)5068 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5069 u16 data_len)
5070 {
5071 struct mgmt_cp_get_device_flags *cp = data;
5072 struct mgmt_rp_get_device_flags rp;
5073 struct bdaddr_list_with_flags *br_params;
5074 struct hci_conn_params *params;
5075 u32 supported_flags;
5076 u32 current_flags = 0;
5077 u8 status = MGMT_STATUS_INVALID_PARAMS;
5078
5079 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
5080 &cp->addr.bdaddr, cp->addr.type);
5081
5082 hci_dev_lock(hdev);
5083
5084 supported_flags = hdev->conn_flags;
5085
5086 memset(&rp, 0, sizeof(rp));
5087
5088 if (cp->addr.type == BDADDR_BREDR) {
5089 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5090 &cp->addr.bdaddr,
5091 cp->addr.type);
5092 if (!br_params)
5093 goto done;
5094
5095 current_flags = br_params->flags;
5096 } else {
5097 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5098 le_addr_type(cp->addr.type));
5099 if (!params)
5100 goto done;
5101
5102 current_flags = params->flags;
5103 }
5104
5105 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5106 rp.addr.type = cp->addr.type;
5107 rp.supported_flags = cpu_to_le32(supported_flags);
5108 rp.current_flags = cpu_to_le32(current_flags);
5109
5110 status = MGMT_STATUS_SUCCESS;
5111
5112 done:
5113 hci_dev_unlock(hdev);
5114
5115 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5116 &rp, sizeof(rp));
5117 }
5118
device_flags_changed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u32 supported_flags,u32 current_flags)5119 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5120 bdaddr_t *bdaddr, u8 bdaddr_type,
5121 u32 supported_flags, u32 current_flags)
5122 {
5123 struct mgmt_ev_device_flags_changed ev;
5124
5125 bacpy(&ev.addr.bdaddr, bdaddr);
5126 ev.addr.type = bdaddr_type;
5127 ev.supported_flags = cpu_to_le32(supported_flags);
5128 ev.current_flags = cpu_to_le32(current_flags);
5129
5130 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5131 }
5132
is_connected(struct hci_dev * hdev,bdaddr_t * addr,u8 type)5133 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
5134 {
5135 struct hci_conn *conn;
5136
5137 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
5138 if (!conn)
5139 return false;
5140
5141 if (conn->dst_type != type)
5142 return false;
5143
5144 if (conn->state != BT_CONNECTED)
5145 return false;
5146
5147 return true;
5148 }
5149
5150 /* This function requires the caller holds hdev->lock */
hci_conn_params_set(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type,u8 auto_connect)5151 static struct hci_conn_params *hci_conn_params_set(struct hci_dev *hdev,
5152 bdaddr_t *addr, u8 addr_type,
5153 u8 auto_connect)
5154 {
5155 struct hci_conn_params *params;
5156
5157 params = hci_conn_params_add(hdev, addr, addr_type);
5158 if (!params)
5159 return NULL;
5160
5161 if (params->auto_connect == auto_connect)
5162 return params;
5163
5164 hci_pend_le_list_del_init(params);
5165
5166 switch (auto_connect) {
5167 case HCI_AUTO_CONN_DISABLED:
5168 case HCI_AUTO_CONN_LINK_LOSS:
5169 /* If auto connect is being disabled when we're trying to
5170 * connect to device, keep connecting.
5171 */
5172 if (params->explicit_connect)
5173 hci_pend_le_list_add(params, &hdev->pend_le_conns);
5174 break;
5175 case HCI_AUTO_CONN_REPORT:
5176 if (params->explicit_connect)
5177 hci_pend_le_list_add(params, &hdev->pend_le_conns);
5178 else
5179 hci_pend_le_list_add(params, &hdev->pend_le_reports);
5180 break;
5181 case HCI_AUTO_CONN_DIRECT:
5182 case HCI_AUTO_CONN_ALWAYS:
5183 if (!is_connected(hdev, addr, addr_type))
5184 hci_pend_le_list_add(params, &hdev->pend_le_conns);
5185 break;
5186 }
5187
5188 params->auto_connect = auto_connect;
5189
5190 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
5191 addr, addr_type, auto_connect);
5192
5193 return params;
5194 }
5195
set_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5196 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5197 u16 len)
5198 {
5199 struct mgmt_cp_set_device_flags *cp = data;
5200 struct bdaddr_list_with_flags *br_params;
5201 struct hci_conn_params *params;
5202 u8 status = MGMT_STATUS_INVALID_PARAMS;
5203 u32 supported_flags;
5204 u32 current_flags = __le32_to_cpu(cp->current_flags);
5205
5206 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5207 &cp->addr.bdaddr, cp->addr.type, current_flags);
5208
5209 // We should take hci_dev_lock() early, I think.. conn_flags can change
5210 supported_flags = hdev->conn_flags;
5211
5212 if ((supported_flags | current_flags) != supported_flags) {
5213 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5214 current_flags, supported_flags);
5215 goto done;
5216 }
5217
5218 hci_dev_lock(hdev);
5219
5220 if (cp->addr.type == BDADDR_BREDR) {
5221 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5222 &cp->addr.bdaddr,
5223 cp->addr.type);
5224
5225 if (br_params) {
5226 br_params->flags = current_flags;
5227 status = MGMT_STATUS_SUCCESS;
5228 } else {
5229 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5230 &cp->addr.bdaddr, cp->addr.type);
5231 }
5232
5233 goto unlock;
5234 }
5235
5236 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5237 le_addr_type(cp->addr.type));
5238 if (!params) {
5239 /* Create a new hci_conn_params if it doesn't exist */
5240 params = hci_conn_params_set(hdev, &cp->addr.bdaddr,
5241 le_addr_type(cp->addr.type),
5242 HCI_AUTO_CONN_DISABLED);
5243 if (!params) {
5244 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5245 &cp->addr.bdaddr,
5246 le_addr_type(cp->addr.type));
5247 goto unlock;
5248 }
5249 }
5250
5251 supported_flags = hdev->conn_flags;
5252
5253 if ((supported_flags | current_flags) != supported_flags) {
5254 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5255 current_flags, supported_flags);
5256 goto unlock;
5257 }
5258
5259 WRITE_ONCE(params->flags, current_flags);
5260 status = MGMT_STATUS_SUCCESS;
5261
5262 /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5263 * has been set.
5264 */
5265 if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5266 hci_update_passive_scan(hdev);
5267
5268 unlock:
5269 hci_dev_unlock(hdev);
5270
5271 done:
5272 if (status == MGMT_STATUS_SUCCESS)
5273 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5274 supported_flags, current_flags);
5275
5276 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5277 &cp->addr, sizeof(cp->addr));
5278 }
5279
mgmt_adv_monitor_added(struct sock * sk,struct hci_dev * hdev,u16 handle)5280 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5281 u16 handle)
5282 {
5283 struct mgmt_ev_adv_monitor_added ev;
5284
5285 ev.monitor_handle = cpu_to_le16(handle);
5286
5287 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5288 }
5289
mgmt_adv_monitor_removed(struct sock * sk,struct hci_dev * hdev,__le16 handle)5290 static void mgmt_adv_monitor_removed(struct sock *sk, struct hci_dev *hdev,
5291 __le16 handle)
5292 {
5293 struct mgmt_ev_adv_monitor_removed ev;
5294
5295 ev.monitor_handle = handle;
5296
5297 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk);
5298 }
5299
read_adv_mon_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5300 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5301 void *data, u16 len)
5302 {
5303 struct adv_monitor *monitor = NULL;
5304 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5305 int handle, err;
5306 size_t rp_size = 0;
5307 __u32 supported = 0;
5308 __u32 enabled = 0;
5309 __u16 num_handles = 0;
5310 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5311
5312 BT_DBG("request for %s", hdev->name);
5313
5314 hci_dev_lock(hdev);
5315
5316 if (msft_monitor_supported(hdev))
5317 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5318
5319 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5320 handles[num_handles++] = monitor->handle;
5321
5322 hci_dev_unlock(hdev);
5323
5324 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5325 rp = kmalloc(rp_size, GFP_KERNEL);
5326 if (!rp)
5327 return -ENOMEM;
5328
5329 /* All supported features are currently enabled */
5330 enabled = supported;
5331
5332 rp->supported_features = cpu_to_le32(supported);
5333 rp->enabled_features = cpu_to_le32(enabled);
5334 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5335 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5336 rp->num_handles = cpu_to_le16(num_handles);
5337 if (num_handles)
5338 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5339
5340 err = mgmt_cmd_complete(sk, hdev->id,
5341 MGMT_OP_READ_ADV_MONITOR_FEATURES,
5342 MGMT_STATUS_SUCCESS, rp, rp_size);
5343
5344 kfree(rp);
5345
5346 return err;
5347 }
5348
mgmt_add_adv_patterns_monitor_complete(struct hci_dev * hdev,void * data,int status)5349 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5350 void *data, int status)
5351 {
5352 struct mgmt_rp_add_adv_patterns_monitor rp;
5353 struct mgmt_pending_cmd *cmd = data;
5354 struct adv_monitor *monitor;
5355
5356 /* This is likely the result of hdev being closed and mgmt_index_removed
5357 * is attempting to clean up any pending command so
5358 * hci_adv_monitors_clear is about to be called which will take care of
5359 * freeing the adv_monitor instances.
5360 */
5361 if (status == -ECANCELED && !mgmt_pending_valid(hdev, cmd))
5362 return;
5363
5364 monitor = cmd->user_data;
5365
5366 hci_dev_lock(hdev);
5367
5368 rp.monitor_handle = cpu_to_le16(monitor->handle);
5369
5370 if (!status) {
5371 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5372 hdev->adv_monitors_cnt++;
5373 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5374 monitor->state = ADV_MONITOR_STATE_REGISTERED;
5375 hci_update_passive_scan(hdev);
5376 }
5377
5378 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
5379 mgmt_status(status), &rp, sizeof(rp));
5380 mgmt_pending_remove(cmd);
5381
5382 hci_dev_unlock(hdev);
5383 bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5384 rp.monitor_handle, status);
5385 }
5386
mgmt_add_adv_patterns_monitor_sync(struct hci_dev * hdev,void * data)5387 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5388 {
5389 struct mgmt_pending_cmd *cmd = data;
5390 struct adv_monitor *mon;
5391
5392 mutex_lock(&hdev->mgmt_pending_lock);
5393
5394 if (!__mgmt_pending_listed(hdev, cmd)) {
5395 mutex_unlock(&hdev->mgmt_pending_lock);
5396 return -ECANCELED;
5397 }
5398
5399 mon = cmd->user_data;
5400
5401 mutex_unlock(&hdev->mgmt_pending_lock);
5402
5403 return hci_add_adv_monitor(hdev, mon);
5404 }
5405
__add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,struct adv_monitor * m,u8 status,void * data,u16 len,u16 op)5406 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5407 struct adv_monitor *m, u8 status,
5408 void *data, u16 len, u16 op)
5409 {
5410 struct mgmt_pending_cmd *cmd;
5411 int err;
5412
5413 hci_dev_lock(hdev);
5414
5415 if (status)
5416 goto unlock;
5417
5418 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5419 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5420 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5421 status = MGMT_STATUS_BUSY;
5422 goto unlock;
5423 }
5424
5425 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5426 if (!cmd) {
5427 status = MGMT_STATUS_NO_RESOURCES;
5428 goto unlock;
5429 }
5430
5431 cmd->user_data = m;
5432 err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5433 mgmt_add_adv_patterns_monitor_complete);
5434 if (err) {
5435 if (err == -ENOMEM)
5436 status = MGMT_STATUS_NO_RESOURCES;
5437 else
5438 status = MGMT_STATUS_FAILED;
5439
5440 goto unlock;
5441 }
5442
5443 hci_dev_unlock(hdev);
5444
5445 return 0;
5446
5447 unlock:
5448 hci_free_adv_monitor(hdev, m);
5449 hci_dev_unlock(hdev);
5450 return mgmt_cmd_status(sk, hdev->id, op, status);
5451 }
5452
parse_adv_monitor_rssi(struct adv_monitor * m,struct mgmt_adv_rssi_thresholds * rssi)5453 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5454 struct mgmt_adv_rssi_thresholds *rssi)
5455 {
5456 if (rssi) {
5457 m->rssi.low_threshold = rssi->low_threshold;
5458 m->rssi.low_threshold_timeout =
5459 __le16_to_cpu(rssi->low_threshold_timeout);
5460 m->rssi.high_threshold = rssi->high_threshold;
5461 m->rssi.high_threshold_timeout =
5462 __le16_to_cpu(rssi->high_threshold_timeout);
5463 m->rssi.sampling_period = rssi->sampling_period;
5464 } else {
5465 /* Default values. These numbers are the least constricting
5466 * parameters for MSFT API to work, so it behaves as if there
5467 * are no rssi parameter to consider. May need to be changed
5468 * if other API are to be supported.
5469 */
5470 m->rssi.low_threshold = -127;
5471 m->rssi.low_threshold_timeout = 60;
5472 m->rssi.high_threshold = -127;
5473 m->rssi.high_threshold_timeout = 0;
5474 m->rssi.sampling_period = 0;
5475 }
5476 }
5477
parse_adv_monitor_pattern(struct adv_monitor * m,u8 pattern_count,struct mgmt_adv_pattern * patterns)5478 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5479 struct mgmt_adv_pattern *patterns)
5480 {
5481 u8 offset = 0, length = 0;
5482 struct adv_pattern *p = NULL;
5483 int i;
5484
5485 for (i = 0; i < pattern_count; i++) {
5486 offset = patterns[i].offset;
5487 length = patterns[i].length;
5488 if (offset >= HCI_MAX_AD_LENGTH ||
5489 length > HCI_MAX_AD_LENGTH ||
5490 (offset + length) > HCI_MAX_AD_LENGTH)
5491 return MGMT_STATUS_INVALID_PARAMS;
5492
5493 p = kmalloc(sizeof(*p), GFP_KERNEL);
5494 if (!p)
5495 return MGMT_STATUS_NO_RESOURCES;
5496
5497 p->ad_type = patterns[i].ad_type;
5498 p->offset = patterns[i].offset;
5499 p->length = patterns[i].length;
5500 memcpy(p->value, patterns[i].value, p->length);
5501
5502 INIT_LIST_HEAD(&p->list);
5503 list_add(&p->list, &m->patterns);
5504 }
5505
5506 return MGMT_STATUS_SUCCESS;
5507 }
5508
add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5509 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5510 void *data, u16 len)
5511 {
5512 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5513 struct adv_monitor *m = NULL;
5514 u8 status = MGMT_STATUS_SUCCESS;
5515 size_t expected_size = sizeof(*cp);
5516
5517 BT_DBG("request for %s", hdev->name);
5518
5519 if (len <= sizeof(*cp)) {
5520 status = MGMT_STATUS_INVALID_PARAMS;
5521 goto done;
5522 }
5523
5524 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5525 if (len != expected_size) {
5526 status = MGMT_STATUS_INVALID_PARAMS;
5527 goto done;
5528 }
5529
5530 m = kzalloc(sizeof(*m), GFP_KERNEL);
5531 if (!m) {
5532 status = MGMT_STATUS_NO_RESOURCES;
5533 goto done;
5534 }
5535
5536 INIT_LIST_HEAD(&m->patterns);
5537
5538 parse_adv_monitor_rssi(m, NULL);
5539 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5540
5541 done:
5542 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5543 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5544 }
5545
add_adv_patterns_monitor_rssi(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5546 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5547 void *data, u16 len)
5548 {
5549 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5550 struct adv_monitor *m = NULL;
5551 u8 status = MGMT_STATUS_SUCCESS;
5552 size_t expected_size = sizeof(*cp);
5553
5554 BT_DBG("request for %s", hdev->name);
5555
5556 if (len <= sizeof(*cp)) {
5557 status = MGMT_STATUS_INVALID_PARAMS;
5558 goto done;
5559 }
5560
5561 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5562 if (len != expected_size) {
5563 status = MGMT_STATUS_INVALID_PARAMS;
5564 goto done;
5565 }
5566
5567 m = kzalloc(sizeof(*m), GFP_KERNEL);
5568 if (!m) {
5569 status = MGMT_STATUS_NO_RESOURCES;
5570 goto done;
5571 }
5572
5573 INIT_LIST_HEAD(&m->patterns);
5574
5575 parse_adv_monitor_rssi(m, &cp->rssi);
5576 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5577
5578 done:
5579 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5580 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5581 }
5582
mgmt_remove_adv_monitor_complete(struct hci_dev * hdev,void * data,int status)5583 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5584 void *data, int status)
5585 {
5586 struct mgmt_rp_remove_adv_monitor rp;
5587 struct mgmt_pending_cmd *cmd = data;
5588 struct mgmt_cp_remove_adv_monitor *cp;
5589
5590 if (status == -ECANCELED)
5591 return;
5592
5593 hci_dev_lock(hdev);
5594
5595 cp = cmd->param;
5596
5597 rp.monitor_handle = cp->monitor_handle;
5598
5599 if (!status) {
5600 mgmt_adv_monitor_removed(cmd->sk, hdev, cp->monitor_handle);
5601 hci_update_passive_scan(hdev);
5602 }
5603
5604 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
5605 mgmt_status(status), &rp, sizeof(rp));
5606 mgmt_pending_free(cmd);
5607
5608 hci_dev_unlock(hdev);
5609 bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5610 rp.monitor_handle, status);
5611 }
5612
mgmt_remove_adv_monitor_sync(struct hci_dev * hdev,void * data)5613 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5614 {
5615 struct mgmt_pending_cmd *cmd = data;
5616 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5617 u16 handle = __le16_to_cpu(cp->monitor_handle);
5618
5619 if (!handle)
5620 return hci_remove_all_adv_monitor(hdev);
5621
5622 return hci_remove_single_adv_monitor(hdev, handle);
5623 }
5624
remove_adv_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5625 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5626 void *data, u16 len)
5627 {
5628 struct mgmt_pending_cmd *cmd;
5629 int err, status;
5630
5631 hci_dev_lock(hdev);
5632
5633 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5634 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5635 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5636 status = MGMT_STATUS_BUSY;
5637 goto unlock;
5638 }
5639
5640 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5641 if (!cmd) {
5642 status = MGMT_STATUS_NO_RESOURCES;
5643 goto unlock;
5644 }
5645
5646 err = hci_cmd_sync_submit(hdev, mgmt_remove_adv_monitor_sync, cmd,
5647 mgmt_remove_adv_monitor_complete);
5648
5649 if (err) {
5650 mgmt_pending_free(cmd);
5651
5652 if (err == -ENOMEM)
5653 status = MGMT_STATUS_NO_RESOURCES;
5654 else
5655 status = MGMT_STATUS_FAILED;
5656
5657 goto unlock;
5658 }
5659
5660 hci_dev_unlock(hdev);
5661
5662 return 0;
5663
5664 unlock:
5665 hci_dev_unlock(hdev);
5666 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5667 status);
5668 }
5669
read_local_oob_data_complete(struct hci_dev * hdev,void * data,int err)5670 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data,
5671 int err)
5672 {
5673 struct mgmt_rp_read_local_oob_data mgmt_rp;
5674 size_t rp_size = sizeof(mgmt_rp);
5675 struct mgmt_pending_cmd *cmd = data;
5676 struct sk_buff *skb = cmd->skb;
5677 u8 status = mgmt_status(err);
5678
5679 if (!status) {
5680 if (!skb)
5681 status = MGMT_STATUS_FAILED;
5682 else if (IS_ERR(skb))
5683 status = mgmt_status(PTR_ERR(skb));
5684 else
5685 status = mgmt_status(skb->data[0]);
5686 }
5687
5688 bt_dev_dbg(hdev, "status %d", status);
5689
5690 if (status) {
5691 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5692 status);
5693 goto remove;
5694 }
5695
5696 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5697
5698 if (!bredr_sc_enabled(hdev)) {
5699 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5700
5701 if (skb->len < sizeof(*rp)) {
5702 mgmt_cmd_status(cmd->sk, hdev->id,
5703 MGMT_OP_READ_LOCAL_OOB_DATA,
5704 MGMT_STATUS_FAILED);
5705 goto remove;
5706 }
5707
5708 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5709 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5710
5711 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5712 } else {
5713 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5714
5715 if (skb->len < sizeof(*rp)) {
5716 mgmt_cmd_status(cmd->sk, hdev->id,
5717 MGMT_OP_READ_LOCAL_OOB_DATA,
5718 MGMT_STATUS_FAILED);
5719 goto remove;
5720 }
5721
5722 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5723 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5724
5725 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5726 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5727 }
5728
5729 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5730 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5731
5732 remove:
5733 if (skb && !IS_ERR(skb))
5734 kfree_skb(skb);
5735
5736 mgmt_pending_free(cmd);
5737 }
5738
read_local_oob_data_sync(struct hci_dev * hdev,void * data)5739 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5740 {
5741 struct mgmt_pending_cmd *cmd = data;
5742
5743 if (bredr_sc_enabled(hdev))
5744 cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5745 else
5746 cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5747
5748 if (IS_ERR(cmd->skb))
5749 return PTR_ERR(cmd->skb);
5750 else
5751 return 0;
5752 }
5753
read_local_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)5754 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5755 void *data, u16 data_len)
5756 {
5757 struct mgmt_pending_cmd *cmd;
5758 int err;
5759
5760 bt_dev_dbg(hdev, "sock %p", sk);
5761
5762 hci_dev_lock(hdev);
5763
5764 if (!hdev_is_powered(hdev)) {
5765 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5766 MGMT_STATUS_NOT_POWERED);
5767 goto unlock;
5768 }
5769
5770 if (!lmp_ssp_capable(hdev)) {
5771 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5772 MGMT_STATUS_NOT_SUPPORTED);
5773 goto unlock;
5774 }
5775
5776 cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5777 if (!cmd)
5778 err = -ENOMEM;
5779 else
5780 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5781 read_local_oob_data_complete);
5782
5783 if (err < 0) {
5784 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5785 MGMT_STATUS_FAILED);
5786
5787 if (cmd)
5788 mgmt_pending_free(cmd);
5789 }
5790
5791 unlock:
5792 hci_dev_unlock(hdev);
5793 return err;
5794 }
5795
add_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5796 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5797 void *data, u16 len)
5798 {
5799 struct mgmt_addr_info *addr = data;
5800 int err;
5801
5802 bt_dev_dbg(hdev, "sock %p", sk);
5803
5804 if (!bdaddr_type_is_valid(addr->type))
5805 return mgmt_cmd_complete(sk, hdev->id,
5806 MGMT_OP_ADD_REMOTE_OOB_DATA,
5807 MGMT_STATUS_INVALID_PARAMS,
5808 addr, sizeof(*addr));
5809
5810 hci_dev_lock(hdev);
5811
5812 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5813 struct mgmt_cp_add_remote_oob_data *cp = data;
5814 u8 status;
5815
5816 if (cp->addr.type != BDADDR_BREDR) {
5817 err = mgmt_cmd_complete(sk, hdev->id,
5818 MGMT_OP_ADD_REMOTE_OOB_DATA,
5819 MGMT_STATUS_INVALID_PARAMS,
5820 &cp->addr, sizeof(cp->addr));
5821 goto unlock;
5822 }
5823
5824 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5825 cp->addr.type, cp->hash,
5826 cp->rand, NULL, NULL);
5827 if (err < 0)
5828 status = MGMT_STATUS_FAILED;
5829 else
5830 status = MGMT_STATUS_SUCCESS;
5831
5832 err = mgmt_cmd_complete(sk, hdev->id,
5833 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5834 &cp->addr, sizeof(cp->addr));
5835 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5836 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5837 u8 *rand192, *hash192, *rand256, *hash256;
5838 u8 status;
5839
5840 if (bdaddr_type_is_le(cp->addr.type)) {
5841 /* Enforce zero-valued 192-bit parameters as
5842 * long as legacy SMP OOB isn't implemented.
5843 */
5844 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5845 memcmp(cp->hash192, ZERO_KEY, 16)) {
5846 err = mgmt_cmd_complete(sk, hdev->id,
5847 MGMT_OP_ADD_REMOTE_OOB_DATA,
5848 MGMT_STATUS_INVALID_PARAMS,
5849 addr, sizeof(*addr));
5850 goto unlock;
5851 }
5852
5853 rand192 = NULL;
5854 hash192 = NULL;
5855 } else {
5856 /* In case one of the P-192 values is set to zero,
5857 * then just disable OOB data for P-192.
5858 */
5859 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5860 !memcmp(cp->hash192, ZERO_KEY, 16)) {
5861 rand192 = NULL;
5862 hash192 = NULL;
5863 } else {
5864 rand192 = cp->rand192;
5865 hash192 = cp->hash192;
5866 }
5867 }
5868
5869 /* In case one of the P-256 values is set to zero, then just
5870 * disable OOB data for P-256.
5871 */
5872 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5873 !memcmp(cp->hash256, ZERO_KEY, 16)) {
5874 rand256 = NULL;
5875 hash256 = NULL;
5876 } else {
5877 rand256 = cp->rand256;
5878 hash256 = cp->hash256;
5879 }
5880
5881 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5882 cp->addr.type, hash192, rand192,
5883 hash256, rand256);
5884 if (err < 0)
5885 status = MGMT_STATUS_FAILED;
5886 else
5887 status = MGMT_STATUS_SUCCESS;
5888
5889 err = mgmt_cmd_complete(sk, hdev->id,
5890 MGMT_OP_ADD_REMOTE_OOB_DATA,
5891 status, &cp->addr, sizeof(cp->addr));
5892 } else {
5893 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5894 len);
5895 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5896 MGMT_STATUS_INVALID_PARAMS);
5897 }
5898
5899 unlock:
5900 hci_dev_unlock(hdev);
5901 return err;
5902 }
5903
remove_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5904 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5905 void *data, u16 len)
5906 {
5907 struct mgmt_cp_remove_remote_oob_data *cp = data;
5908 u8 status;
5909 int err;
5910
5911 bt_dev_dbg(hdev, "sock %p", sk);
5912
5913 if (cp->addr.type != BDADDR_BREDR)
5914 return mgmt_cmd_complete(sk, hdev->id,
5915 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5916 MGMT_STATUS_INVALID_PARAMS,
5917 &cp->addr, sizeof(cp->addr));
5918
5919 hci_dev_lock(hdev);
5920
5921 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5922 hci_remote_oob_data_clear(hdev);
5923 status = MGMT_STATUS_SUCCESS;
5924 goto done;
5925 }
5926
5927 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5928 if (err < 0)
5929 status = MGMT_STATUS_INVALID_PARAMS;
5930 else
5931 status = MGMT_STATUS_SUCCESS;
5932
5933 done:
5934 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5935 status, &cp->addr, sizeof(cp->addr));
5936
5937 hci_dev_unlock(hdev);
5938 return err;
5939 }
5940
discovery_type_is_valid(struct hci_dev * hdev,uint8_t type,uint8_t * mgmt_status)5941 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5942 uint8_t *mgmt_status)
5943 {
5944 switch (type) {
5945 case DISCOV_TYPE_LE:
5946 *mgmt_status = mgmt_le_support(hdev);
5947 if (*mgmt_status)
5948 return false;
5949 break;
5950 case DISCOV_TYPE_INTERLEAVED:
5951 *mgmt_status = mgmt_le_support(hdev);
5952 if (*mgmt_status)
5953 return false;
5954 fallthrough;
5955 case DISCOV_TYPE_BREDR:
5956 *mgmt_status = mgmt_bredr_support(hdev);
5957 if (*mgmt_status)
5958 return false;
5959 break;
5960 default:
5961 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5962 return false;
5963 }
5964
5965 return true;
5966 }
5967
start_discovery_complete(struct hci_dev * hdev,void * data,int err)5968 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5969 {
5970 struct mgmt_pending_cmd *cmd = data;
5971
5972 bt_dev_dbg(hdev, "err %d", err);
5973
5974 if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
5975 return;
5976
5977 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err),
5978 cmd->param, 1);
5979 mgmt_pending_free(cmd);
5980
5981 hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5982 DISCOVERY_FINDING);
5983 }
5984
start_discovery_sync(struct hci_dev * hdev,void * data)5985 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5986 {
5987 if (!mgmt_pending_listed(hdev, data))
5988 return -ECANCELED;
5989
5990 return hci_start_discovery_sync(hdev);
5991 }
5992
start_discovery_internal(struct sock * sk,struct hci_dev * hdev,u16 op,void * data,u16 len)5993 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5994 u16 op, void *data, u16 len)
5995 {
5996 struct mgmt_cp_start_discovery *cp = data;
5997 struct mgmt_pending_cmd *cmd;
5998 u8 status;
5999 int err;
6000
6001 bt_dev_dbg(hdev, "sock %p", sk);
6002
6003 hci_dev_lock(hdev);
6004
6005 if (!hdev_is_powered(hdev)) {
6006 err = mgmt_cmd_complete(sk, hdev->id, op,
6007 MGMT_STATUS_NOT_POWERED,
6008 &cp->type, sizeof(cp->type));
6009 goto failed;
6010 }
6011
6012 if (hdev->discovery.state != DISCOVERY_STOPPED ||
6013 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
6014 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
6015 &cp->type, sizeof(cp->type));
6016 goto failed;
6017 }
6018
6019 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
6020 err = mgmt_cmd_complete(sk, hdev->id, op, status,
6021 &cp->type, sizeof(cp->type));
6022 goto failed;
6023 }
6024
6025 /* Can't start discovery when it is paused */
6026 if (hdev->discovery_paused) {
6027 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
6028 &cp->type, sizeof(cp->type));
6029 goto failed;
6030 }
6031
6032 /* Clear the discovery filter first to free any previously
6033 * allocated memory for the UUID list.
6034 */
6035 hci_discovery_filter_clear(hdev);
6036
6037 hdev->discovery.type = cp->type;
6038 hdev->discovery.report_invalid_rssi = false;
6039 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
6040 hdev->discovery.limited = true;
6041 else
6042 hdev->discovery.limited = false;
6043
6044 cmd = mgmt_pending_add(sk, op, hdev, data, len);
6045 if (!cmd) {
6046 err = -ENOMEM;
6047 goto failed;
6048 }
6049
6050 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6051 start_discovery_complete);
6052 if (err < 0) {
6053 mgmt_pending_remove(cmd);
6054 goto failed;
6055 }
6056
6057 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6058
6059 failed:
6060 hci_dev_unlock(hdev);
6061 return err;
6062 }
6063
start_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6064 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
6065 void *data, u16 len)
6066 {
6067 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
6068 data, len);
6069 }
6070
start_limited_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6071 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
6072 void *data, u16 len)
6073 {
6074 return start_discovery_internal(sk, hdev,
6075 MGMT_OP_START_LIMITED_DISCOVERY,
6076 data, len);
6077 }
6078
start_service_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6079 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
6080 void *data, u16 len)
6081 {
6082 struct mgmt_cp_start_service_discovery *cp = data;
6083 struct mgmt_pending_cmd *cmd;
6084 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
6085 u16 uuid_count, expected_len;
6086 u8 status;
6087 int err;
6088
6089 bt_dev_dbg(hdev, "sock %p", sk);
6090
6091 hci_dev_lock(hdev);
6092
6093 if (!hdev_is_powered(hdev)) {
6094 err = mgmt_cmd_complete(sk, hdev->id,
6095 MGMT_OP_START_SERVICE_DISCOVERY,
6096 MGMT_STATUS_NOT_POWERED,
6097 &cp->type, sizeof(cp->type));
6098 goto failed;
6099 }
6100
6101 if (hdev->discovery.state != DISCOVERY_STOPPED ||
6102 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
6103 err = mgmt_cmd_complete(sk, hdev->id,
6104 MGMT_OP_START_SERVICE_DISCOVERY,
6105 MGMT_STATUS_BUSY, &cp->type,
6106 sizeof(cp->type));
6107 goto failed;
6108 }
6109
6110 if (hdev->discovery_paused) {
6111 err = mgmt_cmd_complete(sk, hdev->id,
6112 MGMT_OP_START_SERVICE_DISCOVERY,
6113 MGMT_STATUS_BUSY, &cp->type,
6114 sizeof(cp->type));
6115 goto failed;
6116 }
6117
6118 uuid_count = __le16_to_cpu(cp->uuid_count);
6119 if (uuid_count > max_uuid_count) {
6120 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
6121 uuid_count);
6122 err = mgmt_cmd_complete(sk, hdev->id,
6123 MGMT_OP_START_SERVICE_DISCOVERY,
6124 MGMT_STATUS_INVALID_PARAMS, &cp->type,
6125 sizeof(cp->type));
6126 goto failed;
6127 }
6128
6129 expected_len = sizeof(*cp) + uuid_count * 16;
6130 if (expected_len != len) {
6131 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
6132 expected_len, len);
6133 err = mgmt_cmd_complete(sk, hdev->id,
6134 MGMT_OP_START_SERVICE_DISCOVERY,
6135 MGMT_STATUS_INVALID_PARAMS, &cp->type,
6136 sizeof(cp->type));
6137 goto failed;
6138 }
6139
6140 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
6141 err = mgmt_cmd_complete(sk, hdev->id,
6142 MGMT_OP_START_SERVICE_DISCOVERY,
6143 status, &cp->type, sizeof(cp->type));
6144 goto failed;
6145 }
6146
6147 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
6148 hdev, data, len);
6149 if (!cmd) {
6150 err = -ENOMEM;
6151 goto failed;
6152 }
6153
6154 /* Clear the discovery filter first to free any previously
6155 * allocated memory for the UUID list.
6156 */
6157 hci_discovery_filter_clear(hdev);
6158
6159 hdev->discovery.result_filtering = true;
6160 hdev->discovery.type = cp->type;
6161 hdev->discovery.rssi = cp->rssi;
6162 hdev->discovery.uuid_count = uuid_count;
6163
6164 if (uuid_count > 0) {
6165 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
6166 GFP_KERNEL);
6167 if (!hdev->discovery.uuids) {
6168 err = mgmt_cmd_complete(sk, hdev->id,
6169 MGMT_OP_START_SERVICE_DISCOVERY,
6170 MGMT_STATUS_FAILED,
6171 &cp->type, sizeof(cp->type));
6172 mgmt_pending_remove(cmd);
6173 goto failed;
6174 }
6175 }
6176
6177 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6178 start_discovery_complete);
6179 if (err < 0) {
6180 mgmt_pending_remove(cmd);
6181 goto failed;
6182 }
6183
6184 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6185
6186 failed:
6187 hci_dev_unlock(hdev);
6188 return err;
6189 }
6190
stop_discovery_complete(struct hci_dev * hdev,void * data,int err)6191 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6192 {
6193 struct mgmt_pending_cmd *cmd = data;
6194
6195 if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
6196 return;
6197
6198 bt_dev_dbg(hdev, "err %d", err);
6199
6200 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err),
6201 cmd->param, 1);
6202 mgmt_pending_free(cmd);
6203
6204 if (!err)
6205 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6206 }
6207
stop_discovery_sync(struct hci_dev * hdev,void * data)6208 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6209 {
6210 if (!mgmt_pending_listed(hdev, data))
6211 return -ECANCELED;
6212
6213 return hci_stop_discovery_sync(hdev);
6214 }
6215
stop_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6216 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6217 u16 len)
6218 {
6219 struct mgmt_cp_stop_discovery *mgmt_cp = data;
6220 struct mgmt_pending_cmd *cmd;
6221 int err;
6222
6223 bt_dev_dbg(hdev, "sock %p", sk);
6224
6225 hci_dev_lock(hdev);
6226
6227 if (!hci_discovery_active(hdev)) {
6228 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6229 MGMT_STATUS_REJECTED, &mgmt_cp->type,
6230 sizeof(mgmt_cp->type));
6231 goto unlock;
6232 }
6233
6234 if (hdev->discovery.type != mgmt_cp->type) {
6235 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6236 MGMT_STATUS_INVALID_PARAMS,
6237 &mgmt_cp->type, sizeof(mgmt_cp->type));
6238 goto unlock;
6239 }
6240
6241 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6242 if (!cmd) {
6243 err = -ENOMEM;
6244 goto unlock;
6245 }
6246
6247 err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6248 stop_discovery_complete);
6249 if (err < 0) {
6250 mgmt_pending_remove(cmd);
6251 goto unlock;
6252 }
6253
6254 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6255
6256 unlock:
6257 hci_dev_unlock(hdev);
6258 return err;
6259 }
6260
confirm_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6261 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6262 u16 len)
6263 {
6264 struct mgmt_cp_confirm_name *cp = data;
6265 struct inquiry_entry *e;
6266 int err;
6267
6268 bt_dev_dbg(hdev, "sock %p", sk);
6269
6270 hci_dev_lock(hdev);
6271
6272 if (!hci_discovery_active(hdev)) {
6273 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6274 MGMT_STATUS_FAILED, &cp->addr,
6275 sizeof(cp->addr));
6276 goto failed;
6277 }
6278
6279 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6280 if (!e) {
6281 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6282 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6283 sizeof(cp->addr));
6284 goto failed;
6285 }
6286
6287 if (cp->name_known) {
6288 e->name_state = NAME_KNOWN;
6289 list_del(&e->list);
6290 } else {
6291 e->name_state = NAME_NEEDED;
6292 hci_inquiry_cache_update_resolve(hdev, e);
6293 }
6294
6295 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6296 &cp->addr, sizeof(cp->addr));
6297
6298 failed:
6299 hci_dev_unlock(hdev);
6300 return err;
6301 }
6302
block_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6303 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6304 u16 len)
6305 {
6306 struct mgmt_cp_block_device *cp = data;
6307 u8 status;
6308 int err;
6309
6310 bt_dev_dbg(hdev, "sock %p", sk);
6311
6312 if (!bdaddr_type_is_valid(cp->addr.type))
6313 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6314 MGMT_STATUS_INVALID_PARAMS,
6315 &cp->addr, sizeof(cp->addr));
6316
6317 hci_dev_lock(hdev);
6318
6319 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6320 cp->addr.type);
6321 if (err < 0) {
6322 status = MGMT_STATUS_FAILED;
6323 goto done;
6324 }
6325
6326 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6327 sk);
6328 status = MGMT_STATUS_SUCCESS;
6329
6330 done:
6331 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6332 &cp->addr, sizeof(cp->addr));
6333
6334 hci_dev_unlock(hdev);
6335
6336 return err;
6337 }
6338
unblock_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6339 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6340 u16 len)
6341 {
6342 struct mgmt_cp_unblock_device *cp = data;
6343 u8 status;
6344 int err;
6345
6346 bt_dev_dbg(hdev, "sock %p", sk);
6347
6348 if (!bdaddr_type_is_valid(cp->addr.type))
6349 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6350 MGMT_STATUS_INVALID_PARAMS,
6351 &cp->addr, sizeof(cp->addr));
6352
6353 hci_dev_lock(hdev);
6354
6355 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6356 cp->addr.type);
6357 if (err < 0) {
6358 status = MGMT_STATUS_INVALID_PARAMS;
6359 goto done;
6360 }
6361
6362 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6363 sk);
6364 status = MGMT_STATUS_SUCCESS;
6365
6366 done:
6367 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6368 &cp->addr, sizeof(cp->addr));
6369
6370 hci_dev_unlock(hdev);
6371
6372 return err;
6373 }
6374
set_device_id_sync(struct hci_dev * hdev,void * data)6375 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6376 {
6377 return hci_update_eir_sync(hdev);
6378 }
6379
set_device_id(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6380 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6381 u16 len)
6382 {
6383 struct mgmt_cp_set_device_id *cp = data;
6384 int err;
6385 __u16 source;
6386
6387 bt_dev_dbg(hdev, "sock %p", sk);
6388
6389 source = __le16_to_cpu(cp->source);
6390
6391 if (source > 0x0002)
6392 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6393 MGMT_STATUS_INVALID_PARAMS);
6394
6395 hci_dev_lock(hdev);
6396
6397 hdev->devid_source = source;
6398 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6399 hdev->devid_product = __le16_to_cpu(cp->product);
6400 hdev->devid_version = __le16_to_cpu(cp->version);
6401
6402 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6403 NULL, 0);
6404
6405 hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6406
6407 hci_dev_unlock(hdev);
6408
6409 return err;
6410 }
6411
enable_advertising_instance(struct hci_dev * hdev,int err)6412 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6413 {
6414 if (err)
6415 bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6416 else
6417 bt_dev_dbg(hdev, "status %d", err);
6418 }
6419
set_advertising_complete(struct hci_dev * hdev,void * data,int err)6420 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6421 {
6422 struct mgmt_pending_cmd *cmd = data;
6423 struct cmd_lookup match = { NULL, hdev };
6424 u8 instance;
6425 struct adv_info *adv_instance;
6426 u8 status = mgmt_status(err);
6427
6428 if (err == -ECANCELED || !mgmt_pending_valid(hdev, data))
6429 return;
6430
6431 if (status) {
6432 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, status);
6433 mgmt_pending_free(cmd);
6434 return;
6435 }
6436
6437 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6438 hci_dev_set_flag(hdev, HCI_ADVERTISING);
6439 else
6440 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6441
6442 settings_rsp(cmd, &match);
6443 mgmt_pending_free(cmd);
6444
6445 new_settings(hdev, match.sk);
6446
6447 if (match.sk)
6448 sock_put(match.sk);
6449
6450 /* If "Set Advertising" was just disabled and instance advertising was
6451 * set up earlier, then re-enable multi-instance advertising.
6452 */
6453 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6454 list_empty(&hdev->adv_instances))
6455 return;
6456
6457 instance = hdev->cur_adv_instance;
6458 if (!instance) {
6459 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6460 struct adv_info, list);
6461 if (!adv_instance)
6462 return;
6463
6464 instance = adv_instance->instance;
6465 }
6466
6467 err = hci_schedule_adv_instance_sync(hdev, instance, true);
6468
6469 enable_advertising_instance(hdev, err);
6470 }
6471
set_adv_sync(struct hci_dev * hdev,void * data)6472 static int set_adv_sync(struct hci_dev *hdev, void *data)
6473 {
6474 struct mgmt_pending_cmd *cmd = data;
6475 struct mgmt_mode cp;
6476 u8 val;
6477
6478 mutex_lock(&hdev->mgmt_pending_lock);
6479
6480 if (!__mgmt_pending_listed(hdev, cmd)) {
6481 mutex_unlock(&hdev->mgmt_pending_lock);
6482 return -ECANCELED;
6483 }
6484
6485 memcpy(&cp, cmd->param, sizeof(cp));
6486
6487 mutex_unlock(&hdev->mgmt_pending_lock);
6488
6489 val = !!cp.val;
6490
6491 if (cp.val == 0x02)
6492 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6493 else
6494 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6495
6496 cancel_adv_timeout(hdev);
6497
6498 if (val) {
6499 /* Switch to instance "0" for the Set Advertising setting.
6500 * We cannot use update_[adv|scan_rsp]_data() here as the
6501 * HCI_ADVERTISING flag is not yet set.
6502 */
6503 hdev->cur_adv_instance = 0x00;
6504
6505 if (ext_adv_capable(hdev)) {
6506 hci_start_ext_adv_sync(hdev, 0x00);
6507 } else {
6508 hci_update_adv_data_sync(hdev, 0x00);
6509 hci_update_scan_rsp_data_sync(hdev, 0x00);
6510 hci_enable_advertising_sync(hdev);
6511 }
6512 } else {
6513 hci_disable_advertising_sync(hdev);
6514 }
6515
6516 return 0;
6517 }
6518
set_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6519 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6520 u16 len)
6521 {
6522 struct mgmt_mode *cp = data;
6523 struct mgmt_pending_cmd *cmd;
6524 u8 val, status;
6525 int err;
6526
6527 bt_dev_dbg(hdev, "sock %p", sk);
6528
6529 status = mgmt_le_support(hdev);
6530 if (status)
6531 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6532 status);
6533
6534 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6535 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6536 MGMT_STATUS_INVALID_PARAMS);
6537
6538 if (hdev->advertising_paused)
6539 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6540 MGMT_STATUS_BUSY);
6541
6542 hci_dev_lock(hdev);
6543
6544 val = !!cp->val;
6545
6546 /* The following conditions are ones which mean that we should
6547 * not do any HCI communication but directly send a mgmt
6548 * response to user space (after toggling the flag if
6549 * necessary).
6550 */
6551 if (!hdev_is_powered(hdev) ||
6552 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6553 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6554 hci_dev_test_flag(hdev, HCI_MESH) ||
6555 hci_conn_num(hdev, LE_LINK) > 0 ||
6556 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6557 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6558 bool changed;
6559
6560 if (cp->val) {
6561 hdev->cur_adv_instance = 0x00;
6562 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6563 if (cp->val == 0x02)
6564 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6565 else
6566 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6567 } else {
6568 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6569 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6570 }
6571
6572 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6573 if (err < 0)
6574 goto unlock;
6575
6576 if (changed)
6577 err = new_settings(hdev, sk);
6578
6579 goto unlock;
6580 }
6581
6582 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6583 pending_find(MGMT_OP_SET_LE, hdev)) {
6584 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6585 MGMT_STATUS_BUSY);
6586 goto unlock;
6587 }
6588
6589 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6590 if (!cmd)
6591 err = -ENOMEM;
6592 else
6593 err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6594 set_advertising_complete);
6595
6596 if (err < 0 && cmd)
6597 mgmt_pending_remove(cmd);
6598
6599 unlock:
6600 hci_dev_unlock(hdev);
6601 return err;
6602 }
6603
set_static_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6604 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6605 void *data, u16 len)
6606 {
6607 struct mgmt_cp_set_static_address *cp = data;
6608 int err;
6609
6610 bt_dev_dbg(hdev, "sock %p", sk);
6611
6612 if (!lmp_le_capable(hdev))
6613 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6614 MGMT_STATUS_NOT_SUPPORTED);
6615
6616 if (hdev_is_powered(hdev))
6617 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6618 MGMT_STATUS_REJECTED);
6619
6620 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6621 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6622 return mgmt_cmd_status(sk, hdev->id,
6623 MGMT_OP_SET_STATIC_ADDRESS,
6624 MGMT_STATUS_INVALID_PARAMS);
6625
6626 /* Two most significant bits shall be set */
6627 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6628 return mgmt_cmd_status(sk, hdev->id,
6629 MGMT_OP_SET_STATIC_ADDRESS,
6630 MGMT_STATUS_INVALID_PARAMS);
6631 }
6632
6633 hci_dev_lock(hdev);
6634
6635 bacpy(&hdev->static_addr, &cp->bdaddr);
6636
6637 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6638 if (err < 0)
6639 goto unlock;
6640
6641 err = new_settings(hdev, sk);
6642
6643 unlock:
6644 hci_dev_unlock(hdev);
6645 return err;
6646 }
6647
set_scan_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6648 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6649 void *data, u16 len)
6650 {
6651 struct mgmt_cp_set_scan_params *cp = data;
6652 __u16 interval, window;
6653 int err;
6654
6655 bt_dev_dbg(hdev, "sock %p", sk);
6656
6657 if (!lmp_le_capable(hdev))
6658 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6659 MGMT_STATUS_NOT_SUPPORTED);
6660
6661 /* Keep allowed ranges in sync with set_mesh() */
6662 interval = __le16_to_cpu(cp->interval);
6663
6664 if (interval < 0x0004 || interval > 0x4000)
6665 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6666 MGMT_STATUS_INVALID_PARAMS);
6667
6668 window = __le16_to_cpu(cp->window);
6669
6670 if (window < 0x0004 || window > 0x4000)
6671 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6672 MGMT_STATUS_INVALID_PARAMS);
6673
6674 if (window > interval)
6675 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6676 MGMT_STATUS_INVALID_PARAMS);
6677
6678 hci_dev_lock(hdev);
6679
6680 hdev->le_scan_interval = interval;
6681 hdev->le_scan_window = window;
6682
6683 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6684 NULL, 0);
6685
6686 /* If background scan is running, restart it so new parameters are
6687 * loaded.
6688 */
6689 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6690 hdev->discovery.state == DISCOVERY_STOPPED)
6691 hci_update_passive_scan(hdev);
6692
6693 hci_dev_unlock(hdev);
6694
6695 return err;
6696 }
6697
fast_connectable_complete(struct hci_dev * hdev,void * data,int err)6698 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6699 {
6700 struct mgmt_pending_cmd *cmd = data;
6701
6702 bt_dev_dbg(hdev, "err %d", err);
6703
6704 if (err) {
6705 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6706 mgmt_status(err));
6707 } else {
6708 struct mgmt_mode *cp = cmd->param;
6709
6710 if (cp->val)
6711 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6712 else
6713 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6714
6715 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6716 new_settings(hdev, cmd->sk);
6717 }
6718
6719 mgmt_pending_free(cmd);
6720 }
6721
write_fast_connectable_sync(struct hci_dev * hdev,void * data)6722 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6723 {
6724 struct mgmt_pending_cmd *cmd = data;
6725 struct mgmt_mode *cp = cmd->param;
6726
6727 return hci_write_fast_connectable_sync(hdev, cp->val);
6728 }
6729
set_fast_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6730 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6731 void *data, u16 len)
6732 {
6733 struct mgmt_mode *cp = data;
6734 struct mgmt_pending_cmd *cmd;
6735 int err;
6736
6737 bt_dev_dbg(hdev, "sock %p", sk);
6738
6739 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6740 hdev->hci_ver < BLUETOOTH_VER_1_2)
6741 return mgmt_cmd_status(sk, hdev->id,
6742 MGMT_OP_SET_FAST_CONNECTABLE,
6743 MGMT_STATUS_NOT_SUPPORTED);
6744
6745 if (cp->val != 0x00 && cp->val != 0x01)
6746 return mgmt_cmd_status(sk, hdev->id,
6747 MGMT_OP_SET_FAST_CONNECTABLE,
6748 MGMT_STATUS_INVALID_PARAMS);
6749
6750 hci_dev_lock(hdev);
6751
6752 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6753 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6754 goto unlock;
6755 }
6756
6757 if (!hdev_is_powered(hdev)) {
6758 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6759 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6760 new_settings(hdev, sk);
6761 goto unlock;
6762 }
6763
6764 cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6765 len);
6766 if (!cmd)
6767 err = -ENOMEM;
6768 else
6769 err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6770 fast_connectable_complete);
6771
6772 if (err < 0) {
6773 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6774 MGMT_STATUS_FAILED);
6775
6776 if (cmd)
6777 mgmt_pending_free(cmd);
6778 }
6779
6780 unlock:
6781 hci_dev_unlock(hdev);
6782
6783 return err;
6784 }
6785
set_bredr_complete(struct hci_dev * hdev,void * data,int err)6786 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6787 {
6788 struct mgmt_pending_cmd *cmd = data;
6789
6790 bt_dev_dbg(hdev, "err %d", err);
6791
6792 if (err) {
6793 u8 mgmt_err = mgmt_status(err);
6794
6795 /* We need to restore the flag if related HCI commands
6796 * failed.
6797 */
6798 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6799
6800 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
6801 } else {
6802 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6803 new_settings(hdev, cmd->sk);
6804 }
6805
6806 mgmt_pending_free(cmd);
6807 }
6808
set_bredr_sync(struct hci_dev * hdev,void * data)6809 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6810 {
6811 int status;
6812
6813 status = hci_write_fast_connectable_sync(hdev, false);
6814
6815 if (!status)
6816 status = hci_update_scan_sync(hdev);
6817
6818 /* Since only the advertising data flags will change, there
6819 * is no need to update the scan response data.
6820 */
6821 if (!status)
6822 status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6823
6824 return status;
6825 }
6826
set_bredr(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6827 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6828 {
6829 struct mgmt_mode *cp = data;
6830 struct mgmt_pending_cmd *cmd;
6831 int err;
6832
6833 bt_dev_dbg(hdev, "sock %p", sk);
6834
6835 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6836 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6837 MGMT_STATUS_NOT_SUPPORTED);
6838
6839 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6840 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6841 MGMT_STATUS_REJECTED);
6842
6843 if (cp->val != 0x00 && cp->val != 0x01)
6844 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6845 MGMT_STATUS_INVALID_PARAMS);
6846
6847 hci_dev_lock(hdev);
6848
6849 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6850 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6851 goto unlock;
6852 }
6853
6854 if (!hdev_is_powered(hdev)) {
6855 if (!cp->val) {
6856 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6857 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6858 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6859 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6860 }
6861
6862 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6863
6864 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6865 if (err < 0)
6866 goto unlock;
6867
6868 err = new_settings(hdev, sk);
6869 goto unlock;
6870 }
6871
6872 /* Reject disabling when powered on */
6873 if (!cp->val) {
6874 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6875 MGMT_STATUS_REJECTED);
6876 goto unlock;
6877 } else {
6878 /* When configuring a dual-mode controller to operate
6879 * with LE only and using a static address, then switching
6880 * BR/EDR back on is not allowed.
6881 *
6882 * Dual-mode controllers shall operate with the public
6883 * address as its identity address for BR/EDR and LE. So
6884 * reject the attempt to create an invalid configuration.
6885 *
6886 * The same restrictions applies when secure connections
6887 * has been enabled. For BR/EDR this is a controller feature
6888 * while for LE it is a host stack feature. This means that
6889 * switching BR/EDR back on when secure connections has been
6890 * enabled is not a supported transaction.
6891 */
6892 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6893 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6894 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6895 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6896 MGMT_STATUS_REJECTED);
6897 goto unlock;
6898 }
6899 }
6900
6901 cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6902 if (!cmd)
6903 err = -ENOMEM;
6904 else
6905 err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6906 set_bredr_complete);
6907
6908 if (err < 0) {
6909 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6910 MGMT_STATUS_FAILED);
6911 if (cmd)
6912 mgmt_pending_free(cmd);
6913
6914 goto unlock;
6915 }
6916
6917 /* We need to flip the bit already here so that
6918 * hci_req_update_adv_data generates the correct flags.
6919 */
6920 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6921
6922 unlock:
6923 hci_dev_unlock(hdev);
6924 return err;
6925 }
6926
set_secure_conn_complete(struct hci_dev * hdev,void * data,int err)6927 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6928 {
6929 struct mgmt_pending_cmd *cmd = data;
6930 struct mgmt_mode *cp;
6931
6932 bt_dev_dbg(hdev, "err %d", err);
6933
6934 if (err) {
6935 u8 mgmt_err = mgmt_status(err);
6936
6937 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
6938 goto done;
6939 }
6940
6941 cp = cmd->param;
6942
6943 switch (cp->val) {
6944 case 0x00:
6945 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6946 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6947 break;
6948 case 0x01:
6949 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6950 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6951 break;
6952 case 0x02:
6953 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6954 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6955 break;
6956 }
6957
6958 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6959 new_settings(hdev, cmd->sk);
6960
6961 done:
6962 mgmt_pending_free(cmd);
6963 }
6964
set_secure_conn_sync(struct hci_dev * hdev,void * data)6965 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6966 {
6967 struct mgmt_pending_cmd *cmd = data;
6968 struct mgmt_mode *cp = cmd->param;
6969 u8 val = !!cp->val;
6970
6971 /* Force write of val */
6972 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6973
6974 return hci_write_sc_support_sync(hdev, val);
6975 }
6976
set_secure_conn(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6977 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6978 void *data, u16 len)
6979 {
6980 struct mgmt_mode *cp = data;
6981 struct mgmt_pending_cmd *cmd;
6982 u8 val;
6983 int err;
6984
6985 bt_dev_dbg(hdev, "sock %p", sk);
6986
6987 if (!lmp_sc_capable(hdev) &&
6988 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6989 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6990 MGMT_STATUS_NOT_SUPPORTED);
6991
6992 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6993 lmp_sc_capable(hdev) &&
6994 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6995 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6996 MGMT_STATUS_REJECTED);
6997
6998 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6999 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
7000 MGMT_STATUS_INVALID_PARAMS);
7001
7002 hci_dev_lock(hdev);
7003
7004 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
7005 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
7006 bool changed;
7007
7008 if (cp->val) {
7009 changed = !hci_dev_test_and_set_flag(hdev,
7010 HCI_SC_ENABLED);
7011 if (cp->val == 0x02)
7012 hci_dev_set_flag(hdev, HCI_SC_ONLY);
7013 else
7014 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
7015 } else {
7016 changed = hci_dev_test_and_clear_flag(hdev,
7017 HCI_SC_ENABLED);
7018 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
7019 }
7020
7021 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
7022 if (err < 0)
7023 goto failed;
7024
7025 if (changed)
7026 err = new_settings(hdev, sk);
7027
7028 goto failed;
7029 }
7030
7031 val = !!cp->val;
7032
7033 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
7034 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
7035 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
7036 goto failed;
7037 }
7038
7039 cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
7040 if (!cmd)
7041 err = -ENOMEM;
7042 else
7043 err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
7044 set_secure_conn_complete);
7045
7046 if (err < 0) {
7047 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
7048 MGMT_STATUS_FAILED);
7049 if (cmd)
7050 mgmt_pending_free(cmd);
7051 }
7052
7053 failed:
7054 hci_dev_unlock(hdev);
7055 return err;
7056 }
7057
set_debug_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7058 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
7059 void *data, u16 len)
7060 {
7061 struct mgmt_mode *cp = data;
7062 bool changed, use_changed;
7063 int err;
7064
7065 bt_dev_dbg(hdev, "sock %p", sk);
7066
7067 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
7068 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
7069 MGMT_STATUS_INVALID_PARAMS);
7070
7071 hci_dev_lock(hdev);
7072
7073 if (cp->val)
7074 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
7075 else
7076 changed = hci_dev_test_and_clear_flag(hdev,
7077 HCI_KEEP_DEBUG_KEYS);
7078
7079 if (cp->val == 0x02)
7080 use_changed = !hci_dev_test_and_set_flag(hdev,
7081 HCI_USE_DEBUG_KEYS);
7082 else
7083 use_changed = hci_dev_test_and_clear_flag(hdev,
7084 HCI_USE_DEBUG_KEYS);
7085
7086 if (hdev_is_powered(hdev) && use_changed &&
7087 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7088 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
7089 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
7090 sizeof(mode), &mode);
7091 }
7092
7093 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
7094 if (err < 0)
7095 goto unlock;
7096
7097 if (changed)
7098 err = new_settings(hdev, sk);
7099
7100 unlock:
7101 hci_dev_unlock(hdev);
7102 return err;
7103 }
7104
set_privacy(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)7105 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7106 u16 len)
7107 {
7108 struct mgmt_cp_set_privacy *cp = cp_data;
7109 bool changed;
7110 int err;
7111
7112 bt_dev_dbg(hdev, "sock %p", sk);
7113
7114 if (!lmp_le_capable(hdev))
7115 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7116 MGMT_STATUS_NOT_SUPPORTED);
7117
7118 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
7119 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7120 MGMT_STATUS_INVALID_PARAMS);
7121
7122 if (hdev_is_powered(hdev))
7123 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7124 MGMT_STATUS_REJECTED);
7125
7126 hci_dev_lock(hdev);
7127
7128 /* If user space supports this command it is also expected to
7129 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
7130 */
7131 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7132
7133 if (cp->privacy) {
7134 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
7135 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
7136 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
7137 hci_adv_instances_set_rpa_expired(hdev, true);
7138 if (cp->privacy == 0x02)
7139 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
7140 else
7141 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7142 } else {
7143 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
7144 memset(hdev->irk, 0, sizeof(hdev->irk));
7145 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
7146 hci_adv_instances_set_rpa_expired(hdev, false);
7147 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7148 }
7149
7150 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
7151 if (err < 0)
7152 goto unlock;
7153
7154 if (changed)
7155 err = new_settings(hdev, sk);
7156
7157 unlock:
7158 hci_dev_unlock(hdev);
7159 return err;
7160 }
7161
irk_is_valid(struct mgmt_irk_info * irk)7162 static bool irk_is_valid(struct mgmt_irk_info *irk)
7163 {
7164 switch (irk->addr.type) {
7165 case BDADDR_LE_PUBLIC:
7166 return true;
7167
7168 case BDADDR_LE_RANDOM:
7169 /* Two most significant bits shall be set */
7170 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7171 return false;
7172 return true;
7173 }
7174
7175 return false;
7176 }
7177
load_irks(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)7178 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7179 u16 len)
7180 {
7181 struct mgmt_cp_load_irks *cp = cp_data;
7182 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
7183 sizeof(struct mgmt_irk_info));
7184 u16 irk_count, expected_len;
7185 int i, err;
7186
7187 bt_dev_dbg(hdev, "sock %p", sk);
7188
7189 if (!lmp_le_capable(hdev))
7190 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7191 MGMT_STATUS_NOT_SUPPORTED);
7192
7193 irk_count = __le16_to_cpu(cp->irk_count);
7194 if (irk_count > max_irk_count) {
7195 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
7196 irk_count);
7197 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7198 MGMT_STATUS_INVALID_PARAMS);
7199 }
7200
7201 expected_len = struct_size(cp, irks, irk_count);
7202 if (expected_len != len) {
7203 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7204 expected_len, len);
7205 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7206 MGMT_STATUS_INVALID_PARAMS);
7207 }
7208
7209 bt_dev_dbg(hdev, "irk_count %u", irk_count);
7210
7211 for (i = 0; i < irk_count; i++) {
7212 struct mgmt_irk_info *key = &cp->irks[i];
7213
7214 if (!irk_is_valid(key))
7215 return mgmt_cmd_status(sk, hdev->id,
7216 MGMT_OP_LOAD_IRKS,
7217 MGMT_STATUS_INVALID_PARAMS);
7218 }
7219
7220 hci_dev_lock(hdev);
7221
7222 hci_smp_irks_clear(hdev);
7223
7224 for (i = 0; i < irk_count; i++) {
7225 struct mgmt_irk_info *irk = &cp->irks[i];
7226
7227 if (hci_is_blocked_key(hdev,
7228 HCI_BLOCKED_KEY_TYPE_IRK,
7229 irk->val)) {
7230 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7231 &irk->addr.bdaddr);
7232 continue;
7233 }
7234
7235 hci_add_irk(hdev, &irk->addr.bdaddr,
7236 le_addr_type(irk->addr.type), irk->val,
7237 BDADDR_ANY);
7238 }
7239
7240 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7241
7242 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7243
7244 hci_dev_unlock(hdev);
7245
7246 return err;
7247 }
7248
ltk_is_valid(struct mgmt_ltk_info * key)7249 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7250 {
7251 if (key->initiator != 0x00 && key->initiator != 0x01)
7252 return false;
7253
7254 switch (key->addr.type) {
7255 case BDADDR_LE_PUBLIC:
7256 return true;
7257
7258 case BDADDR_LE_RANDOM:
7259 /* Two most significant bits shall be set */
7260 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7261 return false;
7262 return true;
7263 }
7264
7265 return false;
7266 }
7267
load_long_term_keys(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)7268 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7269 void *cp_data, u16 len)
7270 {
7271 struct mgmt_cp_load_long_term_keys *cp = cp_data;
7272 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7273 sizeof(struct mgmt_ltk_info));
7274 u16 key_count, expected_len;
7275 int i, err;
7276
7277 bt_dev_dbg(hdev, "sock %p", sk);
7278
7279 if (!lmp_le_capable(hdev))
7280 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7281 MGMT_STATUS_NOT_SUPPORTED);
7282
7283 key_count = __le16_to_cpu(cp->key_count);
7284 if (key_count > max_key_count) {
7285 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7286 key_count);
7287 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7288 MGMT_STATUS_INVALID_PARAMS);
7289 }
7290
7291 expected_len = struct_size(cp, keys, key_count);
7292 if (expected_len != len) {
7293 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7294 expected_len, len);
7295 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7296 MGMT_STATUS_INVALID_PARAMS);
7297 }
7298
7299 bt_dev_dbg(hdev, "key_count %u", key_count);
7300
7301 hci_dev_lock(hdev);
7302
7303 hci_smp_ltks_clear(hdev);
7304
7305 for (i = 0; i < key_count; i++) {
7306 struct mgmt_ltk_info *key = &cp->keys[i];
7307 u8 type, authenticated;
7308
7309 if (hci_is_blocked_key(hdev,
7310 HCI_BLOCKED_KEY_TYPE_LTK,
7311 key->val)) {
7312 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7313 &key->addr.bdaddr);
7314 continue;
7315 }
7316
7317 if (!ltk_is_valid(key)) {
7318 bt_dev_warn(hdev, "Invalid LTK for %pMR",
7319 &key->addr.bdaddr);
7320 continue;
7321 }
7322
7323 switch (key->type) {
7324 case MGMT_LTK_UNAUTHENTICATED:
7325 authenticated = 0x00;
7326 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7327 break;
7328 case MGMT_LTK_AUTHENTICATED:
7329 authenticated = 0x01;
7330 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7331 break;
7332 case MGMT_LTK_P256_UNAUTH:
7333 authenticated = 0x00;
7334 type = SMP_LTK_P256;
7335 break;
7336 case MGMT_LTK_P256_AUTH:
7337 authenticated = 0x01;
7338 type = SMP_LTK_P256;
7339 break;
7340 case MGMT_LTK_P256_DEBUG:
7341 authenticated = 0x00;
7342 type = SMP_LTK_P256_DEBUG;
7343 fallthrough;
7344 default:
7345 continue;
7346 }
7347
7348 hci_add_ltk(hdev, &key->addr.bdaddr,
7349 le_addr_type(key->addr.type), type, authenticated,
7350 key->val, key->enc_size, key->ediv, key->rand);
7351 }
7352
7353 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7354 NULL, 0);
7355
7356 hci_dev_unlock(hdev);
7357
7358 return err;
7359 }
7360
get_conn_info_complete(struct hci_dev * hdev,void * data,int err)7361 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
7362 {
7363 struct mgmt_pending_cmd *cmd = data;
7364 struct hci_conn *conn = cmd->user_data;
7365 struct mgmt_cp_get_conn_info *cp = cmd->param;
7366 struct mgmt_rp_get_conn_info rp;
7367 u8 status;
7368
7369 bt_dev_dbg(hdev, "err %d", err);
7370
7371 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
7372
7373 status = mgmt_status(err);
7374 if (status == MGMT_STATUS_SUCCESS) {
7375 rp.rssi = conn->rssi;
7376 rp.tx_power = conn->tx_power;
7377 rp.max_tx_power = conn->max_tx_power;
7378 } else {
7379 rp.rssi = HCI_RSSI_INVALID;
7380 rp.tx_power = HCI_TX_POWER_INVALID;
7381 rp.max_tx_power = HCI_TX_POWER_INVALID;
7382 }
7383
7384 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, MGMT_OP_GET_CONN_INFO, status,
7385 &rp, sizeof(rp));
7386
7387 mgmt_pending_free(cmd);
7388 }
7389
get_conn_info_sync(struct hci_dev * hdev,void * data)7390 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
7391 {
7392 struct mgmt_pending_cmd *cmd = data;
7393 struct mgmt_cp_get_conn_info *cp = cmd->param;
7394 struct hci_conn *conn;
7395 int err;
7396 __le16 handle;
7397
7398 /* Make sure we are still connected */
7399 if (cp->addr.type == BDADDR_BREDR)
7400 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7401 &cp->addr.bdaddr);
7402 else
7403 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7404
7405 if (!conn || conn->state != BT_CONNECTED)
7406 return MGMT_STATUS_NOT_CONNECTED;
7407
7408 cmd->user_data = conn;
7409 handle = cpu_to_le16(conn->handle);
7410
7411 /* Refresh RSSI each time */
7412 err = hci_read_rssi_sync(hdev, handle);
7413
7414 /* For LE links TX power does not change thus we don't need to
7415 * query for it once value is known.
7416 */
7417 if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
7418 conn->tx_power == HCI_TX_POWER_INVALID))
7419 err = hci_read_tx_power_sync(hdev, handle, 0x00);
7420
7421 /* Max TX power needs to be read only once per connection */
7422 if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
7423 err = hci_read_tx_power_sync(hdev, handle, 0x01);
7424
7425 return err;
7426 }
7427
get_conn_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7428 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7429 u16 len)
7430 {
7431 struct mgmt_cp_get_conn_info *cp = data;
7432 struct mgmt_rp_get_conn_info rp;
7433 struct hci_conn *conn;
7434 unsigned long conn_info_age;
7435 int err = 0;
7436
7437 bt_dev_dbg(hdev, "sock %p", sk);
7438
7439 memset(&rp, 0, sizeof(rp));
7440 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7441 rp.addr.type = cp->addr.type;
7442
7443 if (!bdaddr_type_is_valid(cp->addr.type))
7444 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7445 MGMT_STATUS_INVALID_PARAMS,
7446 &rp, sizeof(rp));
7447
7448 hci_dev_lock(hdev);
7449
7450 if (!hdev_is_powered(hdev)) {
7451 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7452 MGMT_STATUS_NOT_POWERED, &rp,
7453 sizeof(rp));
7454 goto unlock;
7455 }
7456
7457 if (cp->addr.type == BDADDR_BREDR)
7458 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7459 &cp->addr.bdaddr);
7460 else
7461 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7462
7463 if (!conn || conn->state != BT_CONNECTED) {
7464 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7465 MGMT_STATUS_NOT_CONNECTED, &rp,
7466 sizeof(rp));
7467 goto unlock;
7468 }
7469
7470 /* To avoid client trying to guess when to poll again for information we
7471 * calculate conn info age as random value between min/max set in hdev.
7472 */
7473 conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age,
7474 hdev->conn_info_max_age - 1);
7475
7476 /* Query controller to refresh cached values if they are too old or were
7477 * never read.
7478 */
7479 if (time_after(jiffies, conn->conn_info_timestamp +
7480 msecs_to_jiffies(conn_info_age)) ||
7481 !conn->conn_info_timestamp) {
7482 struct mgmt_pending_cmd *cmd;
7483
7484 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
7485 len);
7486 if (!cmd) {
7487 err = -ENOMEM;
7488 } else {
7489 err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
7490 cmd, get_conn_info_complete);
7491 }
7492
7493 if (err < 0) {
7494 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7495 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7496
7497 if (cmd)
7498 mgmt_pending_free(cmd);
7499
7500 goto unlock;
7501 }
7502
7503 conn->conn_info_timestamp = jiffies;
7504 } else {
7505 /* Cache is valid, just reply with values cached in hci_conn */
7506 rp.rssi = conn->rssi;
7507 rp.tx_power = conn->tx_power;
7508 rp.max_tx_power = conn->max_tx_power;
7509
7510 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7511 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7512 }
7513
7514 unlock:
7515 hci_dev_unlock(hdev);
7516 return err;
7517 }
7518
get_clock_info_complete(struct hci_dev * hdev,void * data,int err)7519 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
7520 {
7521 struct mgmt_pending_cmd *cmd = data;
7522 struct mgmt_cp_get_clock_info *cp = cmd->param;
7523 struct mgmt_rp_get_clock_info rp;
7524 struct hci_conn *conn = cmd->user_data;
7525 u8 status = mgmt_status(err);
7526
7527 bt_dev_dbg(hdev, "err %d", err);
7528
7529 memset(&rp, 0, sizeof(rp));
7530 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7531 rp.addr.type = cp->addr.type;
7532
7533 if (err)
7534 goto complete;
7535
7536 rp.local_clock = cpu_to_le32(hdev->clock);
7537
7538 if (conn) {
7539 rp.piconet_clock = cpu_to_le32(conn->clock);
7540 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7541 }
7542
7543 complete:
7544 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, status, &rp,
7545 sizeof(rp));
7546
7547 mgmt_pending_free(cmd);
7548 }
7549
get_clock_info_sync(struct hci_dev * hdev,void * data)7550 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
7551 {
7552 struct mgmt_pending_cmd *cmd = data;
7553 struct mgmt_cp_get_clock_info *cp = cmd->param;
7554 struct hci_cp_read_clock hci_cp;
7555 struct hci_conn *conn;
7556
7557 memset(&hci_cp, 0, sizeof(hci_cp));
7558 hci_read_clock_sync(hdev, &hci_cp);
7559
7560 /* Make sure connection still exists */
7561 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
7562 if (!conn || conn->state != BT_CONNECTED)
7563 return MGMT_STATUS_NOT_CONNECTED;
7564
7565 cmd->user_data = conn;
7566 hci_cp.handle = cpu_to_le16(conn->handle);
7567 hci_cp.which = 0x01; /* Piconet clock */
7568
7569 return hci_read_clock_sync(hdev, &hci_cp);
7570 }
7571
get_clock_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7572 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7573 u16 len)
7574 {
7575 struct mgmt_cp_get_clock_info *cp = data;
7576 struct mgmt_rp_get_clock_info rp;
7577 struct mgmt_pending_cmd *cmd;
7578 struct hci_conn *conn;
7579 int err;
7580
7581 bt_dev_dbg(hdev, "sock %p", sk);
7582
7583 memset(&rp, 0, sizeof(rp));
7584 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7585 rp.addr.type = cp->addr.type;
7586
7587 if (cp->addr.type != BDADDR_BREDR)
7588 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7589 MGMT_STATUS_INVALID_PARAMS,
7590 &rp, sizeof(rp));
7591
7592 hci_dev_lock(hdev);
7593
7594 if (!hdev_is_powered(hdev)) {
7595 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7596 MGMT_STATUS_NOT_POWERED, &rp,
7597 sizeof(rp));
7598 goto unlock;
7599 }
7600
7601 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7602 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7603 &cp->addr.bdaddr);
7604 if (!conn || conn->state != BT_CONNECTED) {
7605 err = mgmt_cmd_complete(sk, hdev->id,
7606 MGMT_OP_GET_CLOCK_INFO,
7607 MGMT_STATUS_NOT_CONNECTED,
7608 &rp, sizeof(rp));
7609 goto unlock;
7610 }
7611 } else {
7612 conn = NULL;
7613 }
7614
7615 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7616 if (!cmd)
7617 err = -ENOMEM;
7618 else
7619 err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7620 get_clock_info_complete);
7621
7622 if (err < 0) {
7623 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7624 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7625
7626 if (cmd)
7627 mgmt_pending_free(cmd);
7628 }
7629
7630
7631 unlock:
7632 hci_dev_unlock(hdev);
7633 return err;
7634 }
7635
device_added(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type,u8 action)7636 static void device_added(struct sock *sk, struct hci_dev *hdev,
7637 bdaddr_t *bdaddr, u8 type, u8 action)
7638 {
7639 struct mgmt_ev_device_added ev;
7640
7641 bacpy(&ev.addr.bdaddr, bdaddr);
7642 ev.addr.type = type;
7643 ev.action = action;
7644
7645 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7646 }
7647
add_device_complete(struct hci_dev * hdev,void * data,int err)7648 static void add_device_complete(struct hci_dev *hdev, void *data, int err)
7649 {
7650 struct mgmt_pending_cmd *cmd = data;
7651 struct mgmt_cp_add_device *cp = cmd->param;
7652
7653 if (!err) {
7654 struct hci_conn_params *params;
7655
7656 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7657 le_addr_type(cp->addr.type));
7658
7659 device_added(cmd->sk, hdev, &cp->addr.bdaddr, cp->addr.type,
7660 cp->action);
7661 device_flags_changed(NULL, hdev, &cp->addr.bdaddr,
7662 cp->addr.type, hdev->conn_flags,
7663 params ? params->flags : 0);
7664 }
7665
7666 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_ADD_DEVICE,
7667 mgmt_status(err), &cp->addr, sizeof(cp->addr));
7668 mgmt_pending_free(cmd);
7669 }
7670
add_device_sync(struct hci_dev * hdev,void * data)7671 static int add_device_sync(struct hci_dev *hdev, void *data)
7672 {
7673 return hci_update_passive_scan_sync(hdev);
7674 }
7675
add_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7676 static int add_device(struct sock *sk, struct hci_dev *hdev,
7677 void *data, u16 len)
7678 {
7679 struct mgmt_pending_cmd *cmd;
7680 struct mgmt_cp_add_device *cp = data;
7681 u8 auto_conn, addr_type;
7682 struct hci_conn_params *params;
7683 int err;
7684 u32 current_flags = 0;
7685 u32 supported_flags;
7686
7687 bt_dev_dbg(hdev, "sock %p", sk);
7688
7689 if (!bdaddr_type_is_valid(cp->addr.type) ||
7690 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7691 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7692 MGMT_STATUS_INVALID_PARAMS,
7693 &cp->addr, sizeof(cp->addr));
7694
7695 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7696 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7697 MGMT_STATUS_INVALID_PARAMS,
7698 &cp->addr, sizeof(cp->addr));
7699
7700 hci_dev_lock(hdev);
7701
7702 if (cp->addr.type == BDADDR_BREDR) {
7703 /* Only incoming connections action is supported for now */
7704 if (cp->action != 0x01) {
7705 err = mgmt_cmd_complete(sk, hdev->id,
7706 MGMT_OP_ADD_DEVICE,
7707 MGMT_STATUS_INVALID_PARAMS,
7708 &cp->addr, sizeof(cp->addr));
7709 goto unlock;
7710 }
7711
7712 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7713 &cp->addr.bdaddr,
7714 cp->addr.type, 0);
7715 if (err)
7716 goto unlock;
7717
7718 hci_update_scan(hdev);
7719
7720 goto added;
7721 }
7722
7723 addr_type = le_addr_type(cp->addr.type);
7724
7725 if (cp->action == 0x02)
7726 auto_conn = HCI_AUTO_CONN_ALWAYS;
7727 else if (cp->action == 0x01)
7728 auto_conn = HCI_AUTO_CONN_DIRECT;
7729 else
7730 auto_conn = HCI_AUTO_CONN_REPORT;
7731
7732 /* Kernel internally uses conn_params with resolvable private
7733 * address, but Add Device allows only identity addresses.
7734 * Make sure it is enforced before calling
7735 * hci_conn_params_lookup.
7736 */
7737 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7738 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7739 MGMT_STATUS_INVALID_PARAMS,
7740 &cp->addr, sizeof(cp->addr));
7741 goto unlock;
7742 }
7743
7744 /* If the connection parameters don't exist for this device,
7745 * they will be created and configured with defaults.
7746 */
7747 params = hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7748 auto_conn);
7749 if (!params) {
7750 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7751 MGMT_STATUS_FAILED, &cp->addr,
7752 sizeof(cp->addr));
7753 goto unlock;
7754 }
7755
7756 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
7757 if (!cmd) {
7758 err = -ENOMEM;
7759 goto unlock;
7760 }
7761
7762 err = hci_cmd_sync_queue(hdev, add_device_sync, cmd,
7763 add_device_complete);
7764 if (err < 0) {
7765 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7766 MGMT_STATUS_FAILED, &cp->addr,
7767 sizeof(cp->addr));
7768 mgmt_pending_free(cmd);
7769 }
7770
7771 goto unlock;
7772
7773 added:
7774 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7775 supported_flags = hdev->conn_flags;
7776 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7777 supported_flags, current_flags);
7778
7779 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7780 MGMT_STATUS_SUCCESS, &cp->addr,
7781 sizeof(cp->addr));
7782
7783 unlock:
7784 hci_dev_unlock(hdev);
7785 return err;
7786 }
7787
device_removed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)7788 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7789 bdaddr_t *bdaddr, u8 type)
7790 {
7791 struct mgmt_ev_device_removed ev;
7792
7793 bacpy(&ev.addr.bdaddr, bdaddr);
7794 ev.addr.type = type;
7795
7796 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7797 }
7798
remove_device_sync(struct hci_dev * hdev,void * data)7799 static int remove_device_sync(struct hci_dev *hdev, void *data)
7800 {
7801 return hci_update_passive_scan_sync(hdev);
7802 }
7803
remove_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7804 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7805 void *data, u16 len)
7806 {
7807 struct mgmt_cp_remove_device *cp = data;
7808 int err;
7809
7810 bt_dev_dbg(hdev, "sock %p", sk);
7811
7812 hci_dev_lock(hdev);
7813
7814 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7815 struct hci_conn_params *params;
7816 u8 addr_type;
7817
7818 if (!bdaddr_type_is_valid(cp->addr.type)) {
7819 err = mgmt_cmd_complete(sk, hdev->id,
7820 MGMT_OP_REMOVE_DEVICE,
7821 MGMT_STATUS_INVALID_PARAMS,
7822 &cp->addr, sizeof(cp->addr));
7823 goto unlock;
7824 }
7825
7826 if (cp->addr.type == BDADDR_BREDR) {
7827 err = hci_bdaddr_list_del(&hdev->accept_list,
7828 &cp->addr.bdaddr,
7829 cp->addr.type);
7830 if (err) {
7831 err = mgmt_cmd_complete(sk, hdev->id,
7832 MGMT_OP_REMOVE_DEVICE,
7833 MGMT_STATUS_INVALID_PARAMS,
7834 &cp->addr,
7835 sizeof(cp->addr));
7836 goto unlock;
7837 }
7838
7839 hci_update_scan(hdev);
7840
7841 device_removed(sk, hdev, &cp->addr.bdaddr,
7842 cp->addr.type);
7843 goto complete;
7844 }
7845
7846 addr_type = le_addr_type(cp->addr.type);
7847
7848 /* Kernel internally uses conn_params with resolvable private
7849 * address, but Remove Device allows only identity addresses.
7850 * Make sure it is enforced before calling
7851 * hci_conn_params_lookup.
7852 */
7853 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7854 err = mgmt_cmd_complete(sk, hdev->id,
7855 MGMT_OP_REMOVE_DEVICE,
7856 MGMT_STATUS_INVALID_PARAMS,
7857 &cp->addr, sizeof(cp->addr));
7858 goto unlock;
7859 }
7860
7861 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7862 addr_type);
7863 if (!params) {
7864 err = mgmt_cmd_complete(sk, hdev->id,
7865 MGMT_OP_REMOVE_DEVICE,
7866 MGMT_STATUS_INVALID_PARAMS,
7867 &cp->addr, sizeof(cp->addr));
7868 goto unlock;
7869 }
7870
7871 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7872 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7873 err = mgmt_cmd_complete(sk, hdev->id,
7874 MGMT_OP_REMOVE_DEVICE,
7875 MGMT_STATUS_INVALID_PARAMS,
7876 &cp->addr, sizeof(cp->addr));
7877 goto unlock;
7878 }
7879
7880 hci_conn_params_free(params);
7881
7882 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7883 } else {
7884 struct hci_conn_params *p, *tmp;
7885 struct bdaddr_list *b, *btmp;
7886
7887 if (cp->addr.type) {
7888 err = mgmt_cmd_complete(sk, hdev->id,
7889 MGMT_OP_REMOVE_DEVICE,
7890 MGMT_STATUS_INVALID_PARAMS,
7891 &cp->addr, sizeof(cp->addr));
7892 goto unlock;
7893 }
7894
7895 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7896 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7897 list_del(&b->list);
7898 kfree(b);
7899 }
7900
7901 hci_update_scan(hdev);
7902
7903 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7904 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7905 continue;
7906 device_removed(sk, hdev, &p->addr, p->addr_type);
7907 if (p->explicit_connect) {
7908 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7909 continue;
7910 }
7911 hci_conn_params_free(p);
7912 }
7913
7914 bt_dev_dbg(hdev, "All LE connection parameters were removed");
7915 }
7916
7917 hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7918
7919 complete:
7920 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7921 MGMT_STATUS_SUCCESS, &cp->addr,
7922 sizeof(cp->addr));
7923 unlock:
7924 hci_dev_unlock(hdev);
7925 return err;
7926 }
7927
conn_update_sync(struct hci_dev * hdev,void * data)7928 static int conn_update_sync(struct hci_dev *hdev, void *data)
7929 {
7930 struct hci_conn_params *params = data;
7931 struct hci_conn *conn;
7932
7933 conn = hci_conn_hash_lookup_le(hdev, ¶ms->addr, params->addr_type);
7934 if (!conn)
7935 return -ECANCELED;
7936
7937 return hci_le_conn_update_sync(hdev, conn, params);
7938 }
7939
load_conn_param(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7940 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7941 u16 len)
7942 {
7943 struct mgmt_cp_load_conn_param *cp = data;
7944 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7945 sizeof(struct mgmt_conn_param));
7946 u16 param_count, expected_len;
7947 int i;
7948
7949 if (!lmp_le_capable(hdev))
7950 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7951 MGMT_STATUS_NOT_SUPPORTED);
7952
7953 param_count = __le16_to_cpu(cp->param_count);
7954 if (param_count > max_param_count) {
7955 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7956 param_count);
7957 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7958 MGMT_STATUS_INVALID_PARAMS);
7959 }
7960
7961 expected_len = struct_size(cp, params, param_count);
7962 if (expected_len != len) {
7963 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7964 expected_len, len);
7965 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7966 MGMT_STATUS_INVALID_PARAMS);
7967 }
7968
7969 bt_dev_dbg(hdev, "param_count %u", param_count);
7970
7971 hci_dev_lock(hdev);
7972
7973 if (param_count > 1)
7974 hci_conn_params_clear_disabled(hdev);
7975
7976 for (i = 0; i < param_count; i++) {
7977 struct mgmt_conn_param *param = &cp->params[i];
7978 struct hci_conn_params *hci_param;
7979 u16 min, max, latency, timeout;
7980 bool update = false;
7981 u8 addr_type;
7982
7983 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
7984 param->addr.type);
7985
7986 if (param->addr.type == BDADDR_LE_PUBLIC) {
7987 addr_type = ADDR_LE_DEV_PUBLIC;
7988 } else if (param->addr.type == BDADDR_LE_RANDOM) {
7989 addr_type = ADDR_LE_DEV_RANDOM;
7990 } else {
7991 bt_dev_err(hdev, "ignoring invalid connection parameters");
7992 continue;
7993 }
7994
7995 min = le16_to_cpu(param->min_interval);
7996 max = le16_to_cpu(param->max_interval);
7997 latency = le16_to_cpu(param->latency);
7998 timeout = le16_to_cpu(param->timeout);
7999
8000 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
8001 min, max, latency, timeout);
8002
8003 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
8004 bt_dev_err(hdev, "ignoring invalid connection parameters");
8005 continue;
8006 }
8007
8008 /* Detect when the loading is for an existing parameter then
8009 * attempt to trigger the connection update procedure.
8010 */
8011 if (!i && param_count == 1) {
8012 hci_param = hci_conn_params_lookup(hdev,
8013 ¶m->addr.bdaddr,
8014 addr_type);
8015 if (hci_param)
8016 update = true;
8017 else
8018 hci_conn_params_clear_disabled(hdev);
8019 }
8020
8021 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
8022 addr_type);
8023 if (!hci_param) {
8024 bt_dev_err(hdev, "failed to add connection parameters");
8025 continue;
8026 }
8027
8028 hci_param->conn_min_interval = min;
8029 hci_param->conn_max_interval = max;
8030 hci_param->conn_latency = latency;
8031 hci_param->supervision_timeout = timeout;
8032
8033 /* Check if we need to trigger a connection update */
8034 if (update) {
8035 struct hci_conn *conn;
8036
8037 /* Lookup for existing connection as central and check
8038 * if parameters match and if they don't then trigger
8039 * a connection update.
8040 */
8041 conn = hci_conn_hash_lookup_le(hdev, &hci_param->addr,
8042 addr_type);
8043 if (conn && conn->role == HCI_ROLE_MASTER &&
8044 (conn->le_conn_min_interval != min ||
8045 conn->le_conn_max_interval != max ||
8046 conn->le_conn_latency != latency ||
8047 conn->le_supv_timeout != timeout))
8048 hci_cmd_sync_queue(hdev, conn_update_sync,
8049 hci_param, NULL);
8050 }
8051 }
8052
8053 hci_dev_unlock(hdev);
8054
8055 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
8056 NULL, 0);
8057 }
8058
set_external_config(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)8059 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
8060 void *data, u16 len)
8061 {
8062 struct mgmt_cp_set_external_config *cp = data;
8063 bool changed;
8064 int err;
8065
8066 bt_dev_dbg(hdev, "sock %p", sk);
8067
8068 if (hdev_is_powered(hdev))
8069 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
8070 MGMT_STATUS_REJECTED);
8071
8072 if (cp->config != 0x00 && cp->config != 0x01)
8073 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
8074 MGMT_STATUS_INVALID_PARAMS);
8075
8076 if (!hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG))
8077 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
8078 MGMT_STATUS_NOT_SUPPORTED);
8079
8080 hci_dev_lock(hdev);
8081
8082 if (cp->config)
8083 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
8084 else
8085 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
8086
8087 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
8088 if (err < 0)
8089 goto unlock;
8090
8091 if (!changed)
8092 goto unlock;
8093
8094 err = new_options(hdev, sk);
8095
8096 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
8097 mgmt_index_removed(hdev);
8098
8099 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
8100 hci_dev_set_flag(hdev, HCI_CONFIG);
8101 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8102
8103 queue_work(hdev->req_workqueue, &hdev->power_on);
8104 } else {
8105 set_bit(HCI_RAW, &hdev->flags);
8106 mgmt_index_added(hdev);
8107 }
8108 }
8109
8110 unlock:
8111 hci_dev_unlock(hdev);
8112 return err;
8113 }
8114
set_public_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)8115 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
8116 void *data, u16 len)
8117 {
8118 struct mgmt_cp_set_public_address *cp = data;
8119 bool changed;
8120 int err;
8121
8122 bt_dev_dbg(hdev, "sock %p", sk);
8123
8124 if (hdev_is_powered(hdev))
8125 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8126 MGMT_STATUS_REJECTED);
8127
8128 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
8129 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8130 MGMT_STATUS_INVALID_PARAMS);
8131
8132 if (!hdev->set_bdaddr)
8133 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8134 MGMT_STATUS_NOT_SUPPORTED);
8135
8136 hci_dev_lock(hdev);
8137
8138 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
8139 bacpy(&hdev->public_addr, &cp->bdaddr);
8140
8141 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
8142 if (err < 0)
8143 goto unlock;
8144
8145 if (!changed)
8146 goto unlock;
8147
8148 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
8149 err = new_options(hdev, sk);
8150
8151 if (is_configured(hdev)) {
8152 mgmt_index_removed(hdev);
8153
8154 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
8155
8156 hci_dev_set_flag(hdev, HCI_CONFIG);
8157 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8158
8159 queue_work(hdev->req_workqueue, &hdev->power_on);
8160 }
8161
8162 unlock:
8163 hci_dev_unlock(hdev);
8164 return err;
8165 }
8166
read_local_oob_ext_data_complete(struct hci_dev * hdev,void * data,int err)8167 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
8168 int err)
8169 {
8170 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8171 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8172 u8 *h192, *r192, *h256, *r256;
8173 struct mgmt_pending_cmd *cmd = data;
8174 struct sk_buff *skb = cmd->skb;
8175 u8 status = mgmt_status(err);
8176 u16 eir_len;
8177
8178 if (!status) {
8179 if (!skb)
8180 status = MGMT_STATUS_FAILED;
8181 else if (IS_ERR(skb))
8182 status = mgmt_status(PTR_ERR(skb));
8183 else
8184 status = mgmt_status(skb->data[0]);
8185 }
8186
8187 bt_dev_dbg(hdev, "status %u", status);
8188
8189 mgmt_cp = cmd->param;
8190
8191 if (status) {
8192 status = mgmt_status(status);
8193 eir_len = 0;
8194
8195 h192 = NULL;
8196 r192 = NULL;
8197 h256 = NULL;
8198 r256 = NULL;
8199 } else if (!bredr_sc_enabled(hdev)) {
8200 struct hci_rp_read_local_oob_data *rp;
8201
8202 if (skb->len != sizeof(*rp)) {
8203 status = MGMT_STATUS_FAILED;
8204 eir_len = 0;
8205 } else {
8206 status = MGMT_STATUS_SUCCESS;
8207 rp = (void *)skb->data;
8208
8209 eir_len = 5 + 18 + 18;
8210 h192 = rp->hash;
8211 r192 = rp->rand;
8212 h256 = NULL;
8213 r256 = NULL;
8214 }
8215 } else {
8216 struct hci_rp_read_local_oob_ext_data *rp;
8217
8218 if (skb->len != sizeof(*rp)) {
8219 status = MGMT_STATUS_FAILED;
8220 eir_len = 0;
8221 } else {
8222 status = MGMT_STATUS_SUCCESS;
8223 rp = (void *)skb->data;
8224
8225 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8226 eir_len = 5 + 18 + 18;
8227 h192 = NULL;
8228 r192 = NULL;
8229 } else {
8230 eir_len = 5 + 18 + 18 + 18 + 18;
8231 h192 = rp->hash192;
8232 r192 = rp->rand192;
8233 }
8234
8235 h256 = rp->hash256;
8236 r256 = rp->rand256;
8237 }
8238 }
8239
8240 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8241 if (!mgmt_rp)
8242 goto done;
8243
8244 if (eir_len == 0)
8245 goto send_rsp;
8246
8247 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8248 hdev->dev_class, 3);
8249
8250 if (h192 && r192) {
8251 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8252 EIR_SSP_HASH_C192, h192, 16);
8253 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8254 EIR_SSP_RAND_R192, r192, 16);
8255 }
8256
8257 if (h256 && r256) {
8258 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8259 EIR_SSP_HASH_C256, h256, 16);
8260 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8261 EIR_SSP_RAND_R256, r256, 16);
8262 }
8263
8264 send_rsp:
8265 mgmt_rp->type = mgmt_cp->type;
8266 mgmt_rp->eir_len = cpu_to_le16(eir_len);
8267
8268 err = mgmt_cmd_complete(cmd->sk, hdev->id,
8269 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8270 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8271 if (err < 0 || status)
8272 goto done;
8273
8274 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8275
8276 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8277 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8278 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8279 done:
8280 if (skb && !IS_ERR(skb))
8281 kfree_skb(skb);
8282
8283 kfree(mgmt_rp);
8284 mgmt_pending_free(cmd);
8285 }
8286
read_local_ssp_oob_req(struct hci_dev * hdev,struct sock * sk,struct mgmt_cp_read_local_oob_ext_data * cp)8287 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8288 struct mgmt_cp_read_local_oob_ext_data *cp)
8289 {
8290 struct mgmt_pending_cmd *cmd;
8291 int err;
8292
8293 cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8294 cp, sizeof(*cp));
8295 if (!cmd)
8296 return -ENOMEM;
8297
8298 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
8299 read_local_oob_ext_data_complete);
8300
8301 if (err < 0) {
8302 mgmt_pending_remove(cmd);
8303 return err;
8304 }
8305
8306 return 0;
8307 }
8308
read_local_oob_ext_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8309 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8310 void *data, u16 data_len)
8311 {
8312 struct mgmt_cp_read_local_oob_ext_data *cp = data;
8313 struct mgmt_rp_read_local_oob_ext_data *rp;
8314 size_t rp_len;
8315 u16 eir_len;
8316 u8 status, flags, role, addr[7], hash[16], rand[16];
8317 int err;
8318
8319 bt_dev_dbg(hdev, "sock %p", sk);
8320
8321 if (hdev_is_powered(hdev)) {
8322 switch (cp->type) {
8323 case BIT(BDADDR_BREDR):
8324 status = mgmt_bredr_support(hdev);
8325 if (status)
8326 eir_len = 0;
8327 else
8328 eir_len = 5;
8329 break;
8330 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8331 status = mgmt_le_support(hdev);
8332 if (status)
8333 eir_len = 0;
8334 else
8335 eir_len = 9 + 3 + 18 + 18 + 3;
8336 break;
8337 default:
8338 status = MGMT_STATUS_INVALID_PARAMS;
8339 eir_len = 0;
8340 break;
8341 }
8342 } else {
8343 status = MGMT_STATUS_NOT_POWERED;
8344 eir_len = 0;
8345 }
8346
8347 rp_len = sizeof(*rp) + eir_len;
8348 rp = kmalloc(rp_len, GFP_ATOMIC);
8349 if (!rp)
8350 return -ENOMEM;
8351
8352 if (!status && !lmp_ssp_capable(hdev)) {
8353 status = MGMT_STATUS_NOT_SUPPORTED;
8354 eir_len = 0;
8355 }
8356
8357 if (status)
8358 goto complete;
8359
8360 hci_dev_lock(hdev);
8361
8362 eir_len = 0;
8363 switch (cp->type) {
8364 case BIT(BDADDR_BREDR):
8365 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8366 err = read_local_ssp_oob_req(hdev, sk, cp);
8367 hci_dev_unlock(hdev);
8368 if (!err)
8369 goto done;
8370
8371 status = MGMT_STATUS_FAILED;
8372 goto complete;
8373 } else {
8374 eir_len = eir_append_data(rp->eir, eir_len,
8375 EIR_CLASS_OF_DEV,
8376 hdev->dev_class, 3);
8377 }
8378 break;
8379 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8380 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8381 smp_generate_oob(hdev, hash, rand) < 0) {
8382 hci_dev_unlock(hdev);
8383 status = MGMT_STATUS_FAILED;
8384 goto complete;
8385 }
8386
8387 /* This should return the active RPA, but since the RPA
8388 * is only programmed on demand, it is really hard to fill
8389 * this in at the moment. For now disallow retrieving
8390 * local out-of-band data when privacy is in use.
8391 *
8392 * Returning the identity address will not help here since
8393 * pairing happens before the identity resolving key is
8394 * known and thus the connection establishment happens
8395 * based on the RPA and not the identity address.
8396 */
8397 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8398 hci_dev_unlock(hdev);
8399 status = MGMT_STATUS_REJECTED;
8400 goto complete;
8401 }
8402
8403 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8404 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8405 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8406 bacmp(&hdev->static_addr, BDADDR_ANY))) {
8407 memcpy(addr, &hdev->static_addr, 6);
8408 addr[6] = 0x01;
8409 } else {
8410 memcpy(addr, &hdev->bdaddr, 6);
8411 addr[6] = 0x00;
8412 }
8413
8414 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8415 addr, sizeof(addr));
8416
8417 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8418 role = 0x02;
8419 else
8420 role = 0x01;
8421
8422 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8423 &role, sizeof(role));
8424
8425 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8426 eir_len = eir_append_data(rp->eir, eir_len,
8427 EIR_LE_SC_CONFIRM,
8428 hash, sizeof(hash));
8429
8430 eir_len = eir_append_data(rp->eir, eir_len,
8431 EIR_LE_SC_RANDOM,
8432 rand, sizeof(rand));
8433 }
8434
8435 flags = mgmt_get_adv_discov_flags(hdev);
8436
8437 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8438 flags |= LE_AD_NO_BREDR;
8439
8440 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8441 &flags, sizeof(flags));
8442 break;
8443 }
8444
8445 hci_dev_unlock(hdev);
8446
8447 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8448
8449 status = MGMT_STATUS_SUCCESS;
8450
8451 complete:
8452 rp->type = cp->type;
8453 rp->eir_len = cpu_to_le16(eir_len);
8454
8455 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8456 status, rp, sizeof(*rp) + eir_len);
8457 if (err < 0 || status)
8458 goto done;
8459
8460 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8461 rp, sizeof(*rp) + eir_len,
8462 HCI_MGMT_OOB_DATA_EVENTS, sk);
8463
8464 done:
8465 kfree(rp);
8466
8467 return err;
8468 }
8469
get_supported_adv_flags(struct hci_dev * hdev)8470 static u32 get_supported_adv_flags(struct hci_dev *hdev)
8471 {
8472 u32 flags = 0;
8473
8474 flags |= MGMT_ADV_FLAG_CONNECTABLE;
8475 flags |= MGMT_ADV_FLAG_DISCOV;
8476 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8477 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8478 flags |= MGMT_ADV_FLAG_APPEARANCE;
8479 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8480 flags |= MGMT_ADV_PARAM_DURATION;
8481 flags |= MGMT_ADV_PARAM_TIMEOUT;
8482 flags |= MGMT_ADV_PARAM_INTERVALS;
8483 flags |= MGMT_ADV_PARAM_TX_POWER;
8484 flags |= MGMT_ADV_PARAM_SCAN_RSP;
8485
8486 /* In extended adv TX_POWER returned from Set Adv Param
8487 * will be always valid.
8488 */
8489 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
8490 flags |= MGMT_ADV_FLAG_TX_POWER;
8491
8492 if (ext_adv_capable(hdev)) {
8493 flags |= MGMT_ADV_FLAG_SEC_1M;
8494 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8495 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8496
8497 if (le_2m_capable(hdev))
8498 flags |= MGMT_ADV_FLAG_SEC_2M;
8499
8500 if (le_coded_capable(hdev))
8501 flags |= MGMT_ADV_FLAG_SEC_CODED;
8502 }
8503
8504 return flags;
8505 }
8506
read_adv_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8507 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8508 void *data, u16 data_len)
8509 {
8510 struct mgmt_rp_read_adv_features *rp;
8511 size_t rp_len;
8512 int err;
8513 struct adv_info *adv_instance;
8514 u32 supported_flags;
8515 u8 *instance;
8516
8517 bt_dev_dbg(hdev, "sock %p", sk);
8518
8519 if (!lmp_le_capable(hdev))
8520 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8521 MGMT_STATUS_REJECTED);
8522
8523 hci_dev_lock(hdev);
8524
8525 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8526 rp = kmalloc(rp_len, GFP_ATOMIC);
8527 if (!rp) {
8528 hci_dev_unlock(hdev);
8529 return -ENOMEM;
8530 }
8531
8532 supported_flags = get_supported_adv_flags(hdev);
8533
8534 rp->supported_flags = cpu_to_le32(supported_flags);
8535 rp->max_adv_data_len = max_adv_len(hdev);
8536 rp->max_scan_rsp_len = max_adv_len(hdev);
8537 rp->max_instances = hdev->le_num_of_adv_sets;
8538 rp->num_instances = hdev->adv_instance_cnt;
8539
8540 instance = rp->instance;
8541 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8542 /* Only instances 1-le_num_of_adv_sets are externally visible */
8543 if (adv_instance->instance <= hdev->adv_instance_cnt) {
8544 *instance = adv_instance->instance;
8545 instance++;
8546 } else {
8547 rp->num_instances--;
8548 rp_len--;
8549 }
8550 }
8551
8552 hci_dev_unlock(hdev);
8553
8554 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8555 MGMT_STATUS_SUCCESS, rp, rp_len);
8556
8557 kfree(rp);
8558
8559 return err;
8560 }
8561
calculate_name_len(struct hci_dev * hdev)8562 static u8 calculate_name_len(struct hci_dev *hdev)
8563 {
8564 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 2]; /* len + type + name */
8565
8566 return eir_append_local_name(hdev, buf, 0);
8567 }
8568
tlv_data_max_len(struct hci_dev * hdev,u32 adv_flags,bool is_adv_data)8569 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8570 bool is_adv_data)
8571 {
8572 u8 max_len = max_adv_len(hdev);
8573
8574 if (is_adv_data) {
8575 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8576 MGMT_ADV_FLAG_LIMITED_DISCOV |
8577 MGMT_ADV_FLAG_MANAGED_FLAGS))
8578 max_len -= 3;
8579
8580 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8581 max_len -= 3;
8582 } else {
8583 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8584 max_len -= calculate_name_len(hdev);
8585
8586 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8587 max_len -= 4;
8588 }
8589
8590 return max_len;
8591 }
8592
flags_managed(u32 adv_flags)8593 static bool flags_managed(u32 adv_flags)
8594 {
8595 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8596 MGMT_ADV_FLAG_LIMITED_DISCOV |
8597 MGMT_ADV_FLAG_MANAGED_FLAGS);
8598 }
8599
tx_power_managed(u32 adv_flags)8600 static bool tx_power_managed(u32 adv_flags)
8601 {
8602 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8603 }
8604
name_managed(u32 adv_flags)8605 static bool name_managed(u32 adv_flags)
8606 {
8607 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8608 }
8609
appearance_managed(u32 adv_flags)8610 static bool appearance_managed(u32 adv_flags)
8611 {
8612 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8613 }
8614
tlv_data_is_valid(struct hci_dev * hdev,u32 adv_flags,u8 * data,u8 len,bool is_adv_data)8615 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8616 u8 len, bool is_adv_data)
8617 {
8618 int i, cur_len;
8619 u8 max_len;
8620
8621 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8622
8623 if (len > max_len)
8624 return false;
8625
8626 /* Make sure that the data is correctly formatted. */
8627 for (i = 0; i < len; i += (cur_len + 1)) {
8628 cur_len = data[i];
8629
8630 if (!cur_len)
8631 continue;
8632
8633 if (data[i + 1] == EIR_FLAGS &&
8634 (!is_adv_data || flags_managed(adv_flags)))
8635 return false;
8636
8637 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8638 return false;
8639
8640 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8641 return false;
8642
8643 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8644 return false;
8645
8646 if (data[i + 1] == EIR_APPEARANCE &&
8647 appearance_managed(adv_flags))
8648 return false;
8649
8650 /* If the current field length would exceed the total data
8651 * length, then it's invalid.
8652 */
8653 if (i + cur_len >= len)
8654 return false;
8655 }
8656
8657 return true;
8658 }
8659
requested_adv_flags_are_valid(struct hci_dev * hdev,u32 adv_flags)8660 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8661 {
8662 u32 supported_flags, phy_flags;
8663
8664 /* The current implementation only supports a subset of the specified
8665 * flags. Also need to check mutual exclusiveness of sec flags.
8666 */
8667 supported_flags = get_supported_adv_flags(hdev);
8668 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8669 if (adv_flags & ~supported_flags ||
8670 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8671 return false;
8672
8673 return true;
8674 }
8675
adv_busy(struct hci_dev * hdev)8676 static bool adv_busy(struct hci_dev *hdev)
8677 {
8678 return pending_find(MGMT_OP_SET_LE, hdev);
8679 }
8680
add_adv_complete(struct hci_dev * hdev,struct sock * sk,u8 instance,int err)8681 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8682 int err)
8683 {
8684 struct adv_info *adv, *n;
8685
8686 bt_dev_dbg(hdev, "err %d", err);
8687
8688 hci_dev_lock(hdev);
8689
8690 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8691 u8 instance;
8692
8693 if (!adv->pending)
8694 continue;
8695
8696 if (!err) {
8697 adv->pending = false;
8698 continue;
8699 }
8700
8701 instance = adv->instance;
8702
8703 if (hdev->cur_adv_instance == instance)
8704 cancel_adv_timeout(hdev);
8705
8706 hci_remove_adv_instance(hdev, instance);
8707 mgmt_advertising_removed(sk, hdev, instance);
8708 }
8709
8710 hci_dev_unlock(hdev);
8711 }
8712
add_advertising_complete(struct hci_dev * hdev,void * data,int err)8713 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8714 {
8715 struct mgmt_pending_cmd *cmd = data;
8716 struct mgmt_cp_add_advertising *cp = cmd->param;
8717 struct mgmt_rp_add_advertising rp;
8718
8719 memset(&rp, 0, sizeof(rp));
8720
8721 rp.instance = cp->instance;
8722
8723 if (err)
8724 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
8725 mgmt_status(err));
8726 else
8727 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
8728 mgmt_status(err), &rp, sizeof(rp));
8729
8730 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8731
8732 mgmt_pending_free(cmd);
8733 }
8734
add_advertising_sync(struct hci_dev * hdev,void * data)8735 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8736 {
8737 struct mgmt_pending_cmd *cmd = data;
8738 struct mgmt_cp_add_advertising *cp = cmd->param;
8739
8740 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8741 }
8742
add_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8743 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8744 void *data, u16 data_len)
8745 {
8746 struct mgmt_cp_add_advertising *cp = data;
8747 struct mgmt_rp_add_advertising rp;
8748 u32 flags;
8749 u8 status;
8750 u16 timeout, duration;
8751 unsigned int prev_instance_cnt;
8752 u8 schedule_instance = 0;
8753 struct adv_info *adv, *next_instance;
8754 int err;
8755 struct mgmt_pending_cmd *cmd;
8756
8757 bt_dev_dbg(hdev, "sock %p", sk);
8758
8759 status = mgmt_le_support(hdev);
8760 if (status)
8761 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8762 status);
8763
8764 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8765 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8766 MGMT_STATUS_INVALID_PARAMS);
8767
8768 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8769 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8770 MGMT_STATUS_INVALID_PARAMS);
8771
8772 flags = __le32_to_cpu(cp->flags);
8773 timeout = __le16_to_cpu(cp->timeout);
8774 duration = __le16_to_cpu(cp->duration);
8775
8776 if (!requested_adv_flags_are_valid(hdev, flags))
8777 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8778 MGMT_STATUS_INVALID_PARAMS);
8779
8780 hci_dev_lock(hdev);
8781
8782 if (timeout && !hdev_is_powered(hdev)) {
8783 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8784 MGMT_STATUS_REJECTED);
8785 goto unlock;
8786 }
8787
8788 if (adv_busy(hdev)) {
8789 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8790 MGMT_STATUS_BUSY);
8791 goto unlock;
8792 }
8793
8794 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8795 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8796 cp->scan_rsp_len, false)) {
8797 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8798 MGMT_STATUS_INVALID_PARAMS);
8799 goto unlock;
8800 }
8801
8802 prev_instance_cnt = hdev->adv_instance_cnt;
8803
8804 adv = hci_add_adv_instance(hdev, cp->instance, flags,
8805 cp->adv_data_len, cp->data,
8806 cp->scan_rsp_len,
8807 cp->data + cp->adv_data_len,
8808 timeout, duration,
8809 HCI_ADV_TX_POWER_NO_PREFERENCE,
8810 hdev->le_adv_min_interval,
8811 hdev->le_adv_max_interval, 0);
8812 if (IS_ERR(adv)) {
8813 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8814 MGMT_STATUS_FAILED);
8815 goto unlock;
8816 }
8817
8818 /* Only trigger an advertising added event if a new instance was
8819 * actually added.
8820 */
8821 if (hdev->adv_instance_cnt > prev_instance_cnt)
8822 mgmt_advertising_added(sk, hdev, cp->instance);
8823
8824 if (hdev->cur_adv_instance == cp->instance) {
8825 /* If the currently advertised instance is being changed then
8826 * cancel the current advertising and schedule the next
8827 * instance. If there is only one instance then the overridden
8828 * advertising data will be visible right away.
8829 */
8830 cancel_adv_timeout(hdev);
8831
8832 next_instance = hci_get_next_instance(hdev, cp->instance);
8833 if (next_instance)
8834 schedule_instance = next_instance->instance;
8835 } else if (!hdev->adv_instance_timeout) {
8836 /* Immediately advertise the new instance if no other
8837 * instance is currently being advertised.
8838 */
8839 schedule_instance = cp->instance;
8840 }
8841
8842 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
8843 * there is no instance to be advertised then we have no HCI
8844 * communication to make. Simply return.
8845 */
8846 if (!hdev_is_powered(hdev) ||
8847 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8848 !schedule_instance) {
8849 rp.instance = cp->instance;
8850 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8851 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8852 goto unlock;
8853 }
8854
8855 /* We're good to go, update advertising data, parameters, and start
8856 * advertising.
8857 */
8858 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8859 data_len);
8860 if (!cmd) {
8861 err = -ENOMEM;
8862 goto unlock;
8863 }
8864
8865 cp->instance = schedule_instance;
8866
8867 err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8868 add_advertising_complete);
8869 if (err < 0)
8870 mgmt_pending_free(cmd);
8871
8872 unlock:
8873 hci_dev_unlock(hdev);
8874
8875 return err;
8876 }
8877
add_ext_adv_params_complete(struct hci_dev * hdev,void * data,int err)8878 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8879 int err)
8880 {
8881 struct mgmt_pending_cmd *cmd = data;
8882 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8883 struct mgmt_rp_add_ext_adv_params rp;
8884 struct adv_info *adv;
8885 u32 flags;
8886
8887 BT_DBG("%s", hdev->name);
8888
8889 hci_dev_lock(hdev);
8890
8891 adv = hci_find_adv_instance(hdev, cp->instance);
8892 if (!adv)
8893 goto unlock;
8894
8895 rp.instance = cp->instance;
8896 rp.tx_power = adv->tx_power;
8897
8898 /* While we're at it, inform userspace of the available space for this
8899 * advertisement, given the flags that will be used.
8900 */
8901 flags = __le32_to_cpu(cp->flags);
8902 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8903 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8904
8905 if (err) {
8906 /* If this advertisement was previously advertising and we
8907 * failed to update it, we signal that it has been removed and
8908 * delete its structure
8909 */
8910 if (!adv->pending)
8911 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8912
8913 hci_remove_adv_instance(hdev, cp->instance);
8914
8915 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
8916 mgmt_status(err));
8917 } else {
8918 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
8919 mgmt_status(err), &rp, sizeof(rp));
8920 }
8921
8922 unlock:
8923 mgmt_pending_free(cmd);
8924
8925 hci_dev_unlock(hdev);
8926 }
8927
add_ext_adv_params_sync(struct hci_dev * hdev,void * data)8928 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8929 {
8930 struct mgmt_pending_cmd *cmd = data;
8931 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8932
8933 return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8934 }
8935
add_ext_adv_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8936 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8937 void *data, u16 data_len)
8938 {
8939 struct mgmt_cp_add_ext_adv_params *cp = data;
8940 struct mgmt_rp_add_ext_adv_params rp;
8941 struct mgmt_pending_cmd *cmd = NULL;
8942 struct adv_info *adv;
8943 u32 flags, min_interval, max_interval;
8944 u16 timeout, duration;
8945 u8 status;
8946 s8 tx_power;
8947 int err;
8948
8949 BT_DBG("%s", hdev->name);
8950
8951 status = mgmt_le_support(hdev);
8952 if (status)
8953 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8954 status);
8955
8956 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8957 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8958 MGMT_STATUS_INVALID_PARAMS);
8959
8960 /* The purpose of breaking add_advertising into two separate MGMT calls
8961 * for params and data is to allow more parameters to be added to this
8962 * structure in the future. For this reason, we verify that we have the
8963 * bare minimum structure we know of when the interface was defined. Any
8964 * extra parameters we don't know about will be ignored in this request.
8965 */
8966 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8967 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8968 MGMT_STATUS_INVALID_PARAMS);
8969
8970 flags = __le32_to_cpu(cp->flags);
8971
8972 if (!requested_adv_flags_are_valid(hdev, flags))
8973 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8974 MGMT_STATUS_INVALID_PARAMS);
8975
8976 hci_dev_lock(hdev);
8977
8978 /* In new interface, we require that we are powered to register */
8979 if (!hdev_is_powered(hdev)) {
8980 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8981 MGMT_STATUS_REJECTED);
8982 goto unlock;
8983 }
8984
8985 if (adv_busy(hdev)) {
8986 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8987 MGMT_STATUS_BUSY);
8988 goto unlock;
8989 }
8990
8991 /* Parse defined parameters from request, use defaults otherwise */
8992 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8993 __le16_to_cpu(cp->timeout) : 0;
8994
8995 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8996 __le16_to_cpu(cp->duration) :
8997 hdev->def_multi_adv_rotation_duration;
8998
8999 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
9000 __le32_to_cpu(cp->min_interval) :
9001 hdev->le_adv_min_interval;
9002
9003 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
9004 __le32_to_cpu(cp->max_interval) :
9005 hdev->le_adv_max_interval;
9006
9007 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
9008 cp->tx_power :
9009 HCI_ADV_TX_POWER_NO_PREFERENCE;
9010
9011 /* Create advertising instance with no advertising or response data */
9012 adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
9013 timeout, duration, tx_power, min_interval,
9014 max_interval, 0);
9015
9016 if (IS_ERR(adv)) {
9017 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
9018 MGMT_STATUS_FAILED);
9019 goto unlock;
9020 }
9021
9022 /* Submit request for advertising params if ext adv available */
9023 if (ext_adv_capable(hdev)) {
9024 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
9025 data, data_len);
9026 if (!cmd) {
9027 err = -ENOMEM;
9028 hci_remove_adv_instance(hdev, cp->instance);
9029 goto unlock;
9030 }
9031
9032 err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
9033 add_ext_adv_params_complete);
9034 if (err < 0)
9035 mgmt_pending_free(cmd);
9036 } else {
9037 rp.instance = cp->instance;
9038 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
9039 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9040 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9041 err = mgmt_cmd_complete(sk, hdev->id,
9042 MGMT_OP_ADD_EXT_ADV_PARAMS,
9043 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9044 }
9045
9046 unlock:
9047 hci_dev_unlock(hdev);
9048
9049 return err;
9050 }
9051
add_ext_adv_data_complete(struct hci_dev * hdev,void * data,int err)9052 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
9053 {
9054 struct mgmt_pending_cmd *cmd = data;
9055 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
9056 struct mgmt_rp_add_advertising rp;
9057
9058 add_adv_complete(hdev, cmd->sk, cp->instance, err);
9059
9060 memset(&rp, 0, sizeof(rp));
9061
9062 rp.instance = cp->instance;
9063
9064 if (err)
9065 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
9066 mgmt_status(err));
9067 else
9068 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
9069 mgmt_status(err), &rp, sizeof(rp));
9070
9071 mgmt_pending_free(cmd);
9072 }
9073
add_ext_adv_data_sync(struct hci_dev * hdev,void * data)9074 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
9075 {
9076 struct mgmt_pending_cmd *cmd = data;
9077 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
9078 int err;
9079
9080 if (ext_adv_capable(hdev)) {
9081 err = hci_update_adv_data_sync(hdev, cp->instance);
9082 if (err)
9083 return err;
9084
9085 err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
9086 if (err)
9087 return err;
9088
9089 return hci_enable_ext_advertising_sync(hdev, cp->instance);
9090 }
9091
9092 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
9093 }
9094
add_ext_adv_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9095 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
9096 u16 data_len)
9097 {
9098 struct mgmt_cp_add_ext_adv_data *cp = data;
9099 struct mgmt_rp_add_ext_adv_data rp;
9100 u8 schedule_instance = 0;
9101 struct adv_info *next_instance;
9102 struct adv_info *adv_instance;
9103 int err = 0;
9104 struct mgmt_pending_cmd *cmd;
9105
9106 BT_DBG("%s", hdev->name);
9107
9108 hci_dev_lock(hdev);
9109
9110 adv_instance = hci_find_adv_instance(hdev, cp->instance);
9111
9112 if (!adv_instance) {
9113 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9114 MGMT_STATUS_INVALID_PARAMS);
9115 goto unlock;
9116 }
9117
9118 /* In new interface, we require that we are powered to register */
9119 if (!hdev_is_powered(hdev)) {
9120 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9121 MGMT_STATUS_REJECTED);
9122 goto clear_new_instance;
9123 }
9124
9125 if (adv_busy(hdev)) {
9126 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9127 MGMT_STATUS_BUSY);
9128 goto clear_new_instance;
9129 }
9130
9131 /* Validate new data */
9132 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
9133 cp->adv_data_len, true) ||
9134 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
9135 cp->adv_data_len, cp->scan_rsp_len, false)) {
9136 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9137 MGMT_STATUS_INVALID_PARAMS);
9138 goto clear_new_instance;
9139 }
9140
9141 /* Set the data in the advertising instance */
9142 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
9143 cp->data, cp->scan_rsp_len,
9144 cp->data + cp->adv_data_len);
9145
9146 /* If using software rotation, determine next instance to use */
9147 if (hdev->cur_adv_instance == cp->instance) {
9148 /* If the currently advertised instance is being changed
9149 * then cancel the current advertising and schedule the
9150 * next instance. If there is only one instance then the
9151 * overridden advertising data will be visible right
9152 * away
9153 */
9154 cancel_adv_timeout(hdev);
9155
9156 next_instance = hci_get_next_instance(hdev, cp->instance);
9157 if (next_instance)
9158 schedule_instance = next_instance->instance;
9159 } else if (!hdev->adv_instance_timeout) {
9160 /* Immediately advertise the new instance if no other
9161 * instance is currently being advertised.
9162 */
9163 schedule_instance = cp->instance;
9164 }
9165
9166 /* If the HCI_ADVERTISING flag is set or there is no instance to
9167 * be advertised then we have no HCI communication to make.
9168 * Simply return.
9169 */
9170 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
9171 if (adv_instance->pending) {
9172 mgmt_advertising_added(sk, hdev, cp->instance);
9173 adv_instance->pending = false;
9174 }
9175 rp.instance = cp->instance;
9176 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9177 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9178 goto unlock;
9179 }
9180
9181 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9182 data_len);
9183 if (!cmd) {
9184 err = -ENOMEM;
9185 goto clear_new_instance;
9186 }
9187
9188 err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
9189 add_ext_adv_data_complete);
9190 if (err < 0) {
9191 mgmt_pending_free(cmd);
9192 goto clear_new_instance;
9193 }
9194
9195 /* We were successful in updating data, so trigger advertising_added
9196 * event if this is an instance that wasn't previously advertising. If
9197 * a failure occurs in the requests we initiated, we will remove the
9198 * instance again in add_advertising_complete
9199 */
9200 if (adv_instance->pending)
9201 mgmt_advertising_added(sk, hdev, cp->instance);
9202
9203 goto unlock;
9204
9205 clear_new_instance:
9206 hci_remove_adv_instance(hdev, cp->instance);
9207
9208 unlock:
9209 hci_dev_unlock(hdev);
9210
9211 return err;
9212 }
9213
remove_advertising_complete(struct hci_dev * hdev,void * data,int err)9214 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
9215 int err)
9216 {
9217 struct mgmt_pending_cmd *cmd = data;
9218 struct mgmt_cp_remove_advertising *cp = cmd->param;
9219 struct mgmt_rp_remove_advertising rp;
9220
9221 bt_dev_dbg(hdev, "err %d", err);
9222
9223 memset(&rp, 0, sizeof(rp));
9224 rp.instance = cp->instance;
9225
9226 if (err)
9227 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
9228 mgmt_status(err));
9229 else
9230 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
9231 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9232
9233 mgmt_pending_free(cmd);
9234 }
9235
remove_advertising_sync(struct hci_dev * hdev,void * data)9236 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
9237 {
9238 struct mgmt_pending_cmd *cmd = data;
9239 struct mgmt_cp_remove_advertising *cp = cmd->param;
9240 int err;
9241
9242 err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
9243 if (err)
9244 return err;
9245
9246 if (list_empty(&hdev->adv_instances))
9247 err = hci_disable_advertising_sync(hdev);
9248
9249 return err;
9250 }
9251
remove_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9252 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9253 void *data, u16 data_len)
9254 {
9255 struct mgmt_cp_remove_advertising *cp = data;
9256 struct mgmt_pending_cmd *cmd;
9257 int err;
9258
9259 bt_dev_dbg(hdev, "sock %p", sk);
9260
9261 hci_dev_lock(hdev);
9262
9263 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
9264 err = mgmt_cmd_status(sk, hdev->id,
9265 MGMT_OP_REMOVE_ADVERTISING,
9266 MGMT_STATUS_INVALID_PARAMS);
9267 goto unlock;
9268 }
9269
9270 if (pending_find(MGMT_OP_SET_LE, hdev)) {
9271 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9272 MGMT_STATUS_BUSY);
9273 goto unlock;
9274 }
9275
9276 if (list_empty(&hdev->adv_instances)) {
9277 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9278 MGMT_STATUS_INVALID_PARAMS);
9279 goto unlock;
9280 }
9281
9282 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9283 data_len);
9284 if (!cmd) {
9285 err = -ENOMEM;
9286 goto unlock;
9287 }
9288
9289 err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
9290 remove_advertising_complete);
9291 if (err < 0)
9292 mgmt_pending_free(cmd);
9293
9294 unlock:
9295 hci_dev_unlock(hdev);
9296
9297 return err;
9298 }
9299
get_adv_size_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9300 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9301 void *data, u16 data_len)
9302 {
9303 struct mgmt_cp_get_adv_size_info *cp = data;
9304 struct mgmt_rp_get_adv_size_info rp;
9305 u32 flags, supported_flags;
9306
9307 bt_dev_dbg(hdev, "sock %p", sk);
9308
9309 if (!lmp_le_capable(hdev))
9310 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9311 MGMT_STATUS_REJECTED);
9312
9313 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9314 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9315 MGMT_STATUS_INVALID_PARAMS);
9316
9317 flags = __le32_to_cpu(cp->flags);
9318
9319 /* The current implementation only supports a subset of the specified
9320 * flags.
9321 */
9322 supported_flags = get_supported_adv_flags(hdev);
9323 if (flags & ~supported_flags)
9324 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9325 MGMT_STATUS_INVALID_PARAMS);
9326
9327 rp.instance = cp->instance;
9328 rp.flags = cp->flags;
9329 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9330 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9331
9332 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9333 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9334 }
9335
9336 static const struct hci_mgmt_handler mgmt_handlers[] = {
9337 { NULL }, /* 0x0000 (no command) */
9338 { read_version, MGMT_READ_VERSION_SIZE,
9339 HCI_MGMT_NO_HDEV |
9340 HCI_MGMT_UNTRUSTED },
9341 { read_commands, MGMT_READ_COMMANDS_SIZE,
9342 HCI_MGMT_NO_HDEV |
9343 HCI_MGMT_UNTRUSTED },
9344 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
9345 HCI_MGMT_NO_HDEV |
9346 HCI_MGMT_UNTRUSTED },
9347 { read_controller_info, MGMT_READ_INFO_SIZE,
9348 HCI_MGMT_UNTRUSTED },
9349 { set_powered, MGMT_SETTING_SIZE },
9350 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
9351 { set_connectable, MGMT_SETTING_SIZE },
9352 { set_fast_connectable, MGMT_SETTING_SIZE },
9353 { set_bondable, MGMT_SETTING_SIZE },
9354 { set_link_security, MGMT_SETTING_SIZE },
9355 { set_ssp, MGMT_SETTING_SIZE },
9356 { set_hs, MGMT_SETTING_SIZE },
9357 { set_le, MGMT_SETTING_SIZE },
9358 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
9359 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
9360 { add_uuid, MGMT_ADD_UUID_SIZE },
9361 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
9362 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
9363 HCI_MGMT_VAR_LEN },
9364 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9365 HCI_MGMT_VAR_LEN },
9366 { disconnect, MGMT_DISCONNECT_SIZE },
9367 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
9368 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
9369 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
9370 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
9371 { pair_device, MGMT_PAIR_DEVICE_SIZE },
9372 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
9373 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
9374 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
9375 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9376 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
9377 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9378 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
9379 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9380 HCI_MGMT_VAR_LEN },
9381 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9382 { start_discovery, MGMT_START_DISCOVERY_SIZE },
9383 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
9384 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
9385 { block_device, MGMT_BLOCK_DEVICE_SIZE },
9386 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
9387 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
9388 { set_advertising, MGMT_SETTING_SIZE },
9389 { set_bredr, MGMT_SETTING_SIZE },
9390 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
9391 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
9392 { set_secure_conn, MGMT_SETTING_SIZE },
9393 { set_debug_keys, MGMT_SETTING_SIZE },
9394 { set_privacy, MGMT_SET_PRIVACY_SIZE },
9395 { load_irks, MGMT_LOAD_IRKS_SIZE,
9396 HCI_MGMT_VAR_LEN },
9397 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
9398 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
9399 { add_device, MGMT_ADD_DEVICE_SIZE },
9400 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
9401 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
9402 HCI_MGMT_VAR_LEN },
9403 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9404 HCI_MGMT_NO_HDEV |
9405 HCI_MGMT_UNTRUSTED },
9406 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
9407 HCI_MGMT_UNCONFIGURED |
9408 HCI_MGMT_UNTRUSTED },
9409 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
9410 HCI_MGMT_UNCONFIGURED },
9411 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
9412 HCI_MGMT_UNCONFIGURED },
9413 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9414 HCI_MGMT_VAR_LEN },
9415 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9416 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
9417 HCI_MGMT_NO_HDEV |
9418 HCI_MGMT_UNTRUSTED },
9419 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
9420 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
9421 HCI_MGMT_VAR_LEN },
9422 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
9423 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
9424 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9425 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9426 HCI_MGMT_UNTRUSTED },
9427 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
9428 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
9429 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
9430 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9431 HCI_MGMT_VAR_LEN },
9432 { set_wideband_speech, MGMT_SETTING_SIZE },
9433 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
9434 HCI_MGMT_UNTRUSTED },
9435 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
9436 HCI_MGMT_UNTRUSTED |
9437 HCI_MGMT_HDEV_OPTIONAL },
9438 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
9439 HCI_MGMT_VAR_LEN |
9440 HCI_MGMT_HDEV_OPTIONAL },
9441 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9442 HCI_MGMT_UNTRUSTED },
9443 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9444 HCI_MGMT_VAR_LEN },
9445 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9446 HCI_MGMT_UNTRUSTED },
9447 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9448 HCI_MGMT_VAR_LEN },
9449 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
9450 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
9451 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9452 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9453 HCI_MGMT_VAR_LEN },
9454 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
9455 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
9456 HCI_MGMT_VAR_LEN },
9457 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
9458 HCI_MGMT_VAR_LEN },
9459 { add_adv_patterns_monitor_rssi,
9460 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
9461 HCI_MGMT_VAR_LEN },
9462 { set_mesh, MGMT_SET_MESH_RECEIVER_SIZE,
9463 HCI_MGMT_VAR_LEN },
9464 { mesh_features, MGMT_MESH_READ_FEATURES_SIZE },
9465 { mesh_send, MGMT_MESH_SEND_SIZE,
9466 HCI_MGMT_VAR_LEN },
9467 { mesh_send_cancel, MGMT_MESH_SEND_CANCEL_SIZE },
9468 { mgmt_hci_cmd_sync, MGMT_HCI_CMD_SYNC_SIZE, HCI_MGMT_VAR_LEN },
9469 };
9470
mgmt_index_added(struct hci_dev * hdev)9471 void mgmt_index_added(struct hci_dev *hdev)
9472 {
9473 struct mgmt_ev_ext_index ev;
9474
9475 if (hci_test_quirk(hdev, HCI_QUIRK_RAW_DEVICE))
9476 return;
9477
9478 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9479 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0,
9480 HCI_MGMT_UNCONF_INDEX_EVENTS);
9481 ev.type = 0x01;
9482 } else {
9483 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
9484 HCI_MGMT_INDEX_EVENTS);
9485 ev.type = 0x00;
9486 }
9487
9488 ev.bus = hdev->bus;
9489
9490 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
9491 HCI_MGMT_EXT_INDEX_EVENTS);
9492 }
9493
mgmt_index_removed(struct hci_dev * hdev)9494 void mgmt_index_removed(struct hci_dev *hdev)
9495 {
9496 struct mgmt_ev_ext_index ev;
9497 struct cmd_lookup match = { NULL, hdev, MGMT_STATUS_INVALID_INDEX };
9498
9499 if (hci_test_quirk(hdev, HCI_QUIRK_RAW_DEVICE))
9500 return;
9501
9502 mgmt_pending_foreach(0, hdev, true, cmd_complete_rsp, &match);
9503
9504 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9505 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0,
9506 HCI_MGMT_UNCONF_INDEX_EVENTS);
9507 ev.type = 0x01;
9508 } else {
9509 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
9510 HCI_MGMT_INDEX_EVENTS);
9511 ev.type = 0x00;
9512 }
9513
9514 ev.bus = hdev->bus;
9515
9516 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
9517 HCI_MGMT_EXT_INDEX_EVENTS);
9518
9519 /* Cancel any remaining timed work */
9520 if (!hci_dev_test_flag(hdev, HCI_MGMT))
9521 return;
9522 cancel_delayed_work_sync(&hdev->discov_off);
9523 cancel_delayed_work_sync(&hdev->service_cache);
9524 cancel_delayed_work_sync(&hdev->rpa_expired);
9525 cancel_delayed_work_sync(&hdev->mesh_send_done);
9526 }
9527
mgmt_power_on(struct hci_dev * hdev,int err)9528 void mgmt_power_on(struct hci_dev *hdev, int err)
9529 {
9530 struct cmd_lookup match = { NULL, hdev };
9531
9532 bt_dev_dbg(hdev, "err %d", err);
9533
9534 hci_dev_lock(hdev);
9535
9536 if (!err) {
9537 restart_le_actions(hdev);
9538 hci_update_passive_scan(hdev);
9539 }
9540
9541 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, true, settings_rsp,
9542 &match);
9543
9544 new_settings(hdev, match.sk);
9545
9546 if (match.sk)
9547 sock_put(match.sk);
9548
9549 hci_dev_unlock(hdev);
9550 }
9551
__mgmt_power_off(struct hci_dev * hdev)9552 void __mgmt_power_off(struct hci_dev *hdev)
9553 {
9554 struct cmd_lookup match = { NULL, hdev };
9555 u8 zero_cod[] = { 0, 0, 0 };
9556
9557 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, true, settings_rsp,
9558 &match);
9559
9560 /* If the power off is because of hdev unregistration let
9561 * use the appropriate INVALID_INDEX status. Otherwise use
9562 * NOT_POWERED. We cover both scenarios here since later in
9563 * mgmt_index_removed() any hci_conn callbacks will have already
9564 * been triggered, potentially causing misleading DISCONNECTED
9565 * status responses.
9566 */
9567 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9568 match.mgmt_status = MGMT_STATUS_INVALID_INDEX;
9569 else
9570 match.mgmt_status = MGMT_STATUS_NOT_POWERED;
9571
9572 mgmt_pending_foreach(0, hdev, true, cmd_complete_rsp, &match);
9573
9574 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9575 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9576 zero_cod, sizeof(zero_cod),
9577 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9578 ext_info_changed(hdev, NULL);
9579 }
9580
9581 new_settings(hdev, match.sk);
9582
9583 if (match.sk)
9584 sock_put(match.sk);
9585 }
9586
mgmt_set_powered_failed(struct hci_dev * hdev,int err)9587 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9588 {
9589 struct mgmt_pending_cmd *cmd;
9590 u8 status;
9591
9592 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9593 if (!cmd)
9594 return;
9595
9596 if (err == -ERFKILL)
9597 status = MGMT_STATUS_RFKILLED;
9598 else
9599 status = MGMT_STATUS_FAILED;
9600
9601 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9602
9603 mgmt_pending_remove(cmd);
9604 }
9605
mgmt_new_link_key(struct hci_dev * hdev,struct link_key * key,bool persistent)9606 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9607 bool persistent)
9608 {
9609 struct mgmt_ev_new_link_key ev;
9610
9611 memset(&ev, 0, sizeof(ev));
9612
9613 ev.store_hint = persistent;
9614 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9615 ev.key.addr.type = BDADDR_BREDR;
9616 ev.key.type = key->type;
9617 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9618 ev.key.pin_len = key->pin_len;
9619
9620 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9621 }
9622
mgmt_ltk_type(struct smp_ltk * ltk)9623 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9624 {
9625 switch (ltk->type) {
9626 case SMP_LTK:
9627 case SMP_LTK_RESPONDER:
9628 if (ltk->authenticated)
9629 return MGMT_LTK_AUTHENTICATED;
9630 return MGMT_LTK_UNAUTHENTICATED;
9631 case SMP_LTK_P256:
9632 if (ltk->authenticated)
9633 return MGMT_LTK_P256_AUTH;
9634 return MGMT_LTK_P256_UNAUTH;
9635 case SMP_LTK_P256_DEBUG:
9636 return MGMT_LTK_P256_DEBUG;
9637 }
9638
9639 return MGMT_LTK_UNAUTHENTICATED;
9640 }
9641
mgmt_new_ltk(struct hci_dev * hdev,struct smp_ltk * key,bool persistent)9642 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9643 {
9644 struct mgmt_ev_new_long_term_key ev;
9645
9646 memset(&ev, 0, sizeof(ev));
9647
9648 /* Devices using resolvable or non-resolvable random addresses
9649 * without providing an identity resolving key don't require
9650 * to store long term keys. Their addresses will change the
9651 * next time around.
9652 *
9653 * Only when a remote device provides an identity address
9654 * make sure the long term key is stored. If the remote
9655 * identity is known, the long term keys are internally
9656 * mapped to the identity address. So allow static random
9657 * and public addresses here.
9658 */
9659 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9660 (key->bdaddr.b[5] & 0xc0) != 0xc0)
9661 ev.store_hint = 0x00;
9662 else
9663 ev.store_hint = persistent;
9664
9665 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9666 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
9667 ev.key.type = mgmt_ltk_type(key);
9668 ev.key.enc_size = key->enc_size;
9669 ev.key.ediv = key->ediv;
9670 ev.key.rand = key->rand;
9671
9672 if (key->type == SMP_LTK)
9673 ev.key.initiator = 1;
9674
9675 /* Make sure we copy only the significant bytes based on the
9676 * encryption key size, and set the rest of the value to zeroes.
9677 */
9678 memcpy(ev.key.val, key->val, key->enc_size);
9679 memset(ev.key.val + key->enc_size, 0,
9680 sizeof(ev.key.val) - key->enc_size);
9681
9682 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9683 }
9684
mgmt_new_irk(struct hci_dev * hdev,struct smp_irk * irk,bool persistent)9685 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9686 {
9687 struct mgmt_ev_new_irk ev;
9688
9689 memset(&ev, 0, sizeof(ev));
9690
9691 ev.store_hint = persistent;
9692
9693 bacpy(&ev.rpa, &irk->rpa);
9694 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9695 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
9696 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9697
9698 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9699 }
9700
mgmt_new_csrk(struct hci_dev * hdev,struct smp_csrk * csrk,bool persistent)9701 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9702 bool persistent)
9703 {
9704 struct mgmt_ev_new_csrk ev;
9705
9706 memset(&ev, 0, sizeof(ev));
9707
9708 /* Devices using resolvable or non-resolvable random addresses
9709 * without providing an identity resolving key don't require
9710 * to store signature resolving keys. Their addresses will change
9711 * the next time around.
9712 *
9713 * Only when a remote device provides an identity address
9714 * make sure the signature resolving key is stored. So allow
9715 * static random and public addresses here.
9716 */
9717 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9718 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9719 ev.store_hint = 0x00;
9720 else
9721 ev.store_hint = persistent;
9722
9723 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9724 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
9725 ev.key.type = csrk->type;
9726 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9727
9728 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9729 }
9730
mgmt_new_conn_param(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u8 store_hint,u16 min_interval,u16 max_interval,u16 latency,u16 timeout)9731 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9732 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9733 u16 max_interval, u16 latency, u16 timeout)
9734 {
9735 struct mgmt_ev_new_conn_param ev;
9736
9737 if (!hci_is_identity_address(bdaddr, bdaddr_type))
9738 return;
9739
9740 memset(&ev, 0, sizeof(ev));
9741 bacpy(&ev.addr.bdaddr, bdaddr);
9742 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9743 ev.store_hint = store_hint;
9744 ev.min_interval = cpu_to_le16(min_interval);
9745 ev.max_interval = cpu_to_le16(max_interval);
9746 ev.latency = cpu_to_le16(latency);
9747 ev.timeout = cpu_to_le16(timeout);
9748
9749 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9750 }
9751
mgmt_device_connected(struct hci_dev * hdev,struct hci_conn * conn,u8 * name,u8 name_len)9752 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9753 u8 *name, u8 name_len)
9754 {
9755 struct sk_buff *skb;
9756 struct mgmt_ev_device_connected *ev;
9757 u16 eir_len = 0;
9758 u32 flags = 0;
9759
9760 if (test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
9761 return;
9762
9763 /* allocate buff for LE or BR/EDR adv */
9764 if (conn->le_adv_data_len > 0)
9765 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9766 sizeof(*ev) + conn->le_adv_data_len);
9767 else
9768 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9769 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9770 eir_precalc_len(sizeof(conn->dev_class)));
9771
9772 if (!skb)
9773 return;
9774
9775 ev = skb_put(skb, sizeof(*ev));
9776 bacpy(&ev->addr.bdaddr, &conn->dst);
9777 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9778
9779 if (conn->out)
9780 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9781
9782 ev->flags = __cpu_to_le32(flags);
9783
9784 /* We must ensure that the EIR Data fields are ordered and
9785 * unique. Keep it simple for now and avoid the problem by not
9786 * adding any BR/EDR data to the LE adv.
9787 */
9788 if (conn->le_adv_data_len > 0) {
9789 skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9790 eir_len = conn->le_adv_data_len;
9791 } else {
9792 if (name)
9793 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9794
9795 if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9796 eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9797 conn->dev_class, sizeof(conn->dev_class));
9798 }
9799
9800 ev->eir_len = cpu_to_le16(eir_len);
9801
9802 mgmt_event_skb(skb, NULL);
9803 }
9804
unpair_device_rsp(struct mgmt_pending_cmd * cmd,void * data)9805 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9806 {
9807 struct hci_dev *hdev = data;
9808 struct mgmt_cp_unpair_device *cp = cmd->param;
9809
9810 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9811
9812 cmd->cmd_complete(cmd, 0);
9813 }
9814
mgmt_powering_down(struct hci_dev * hdev)9815 bool mgmt_powering_down(struct hci_dev *hdev)
9816 {
9817 struct mgmt_pending_cmd *cmd;
9818 struct mgmt_mode *cp;
9819
9820 if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
9821 return true;
9822
9823 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9824 if (!cmd)
9825 return false;
9826
9827 cp = cmd->param;
9828 if (!cp->val)
9829 return true;
9830
9831 return false;
9832 }
9833
mgmt_device_disconnected(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 reason,bool mgmt_connected)9834 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9835 u8 link_type, u8 addr_type, u8 reason,
9836 bool mgmt_connected)
9837 {
9838 struct mgmt_ev_device_disconnected ev;
9839 struct sock *sk = NULL;
9840
9841 if (!mgmt_connected)
9842 return;
9843
9844 if (link_type != ACL_LINK &&
9845 link_type != LE_LINK &&
9846 link_type != BIS_LINK)
9847 return;
9848
9849 bacpy(&ev.addr.bdaddr, bdaddr);
9850 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9851 ev.reason = reason;
9852
9853 /* Report disconnects due to suspend */
9854 if (hdev->suspended)
9855 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9856
9857 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9858
9859 if (sk)
9860 sock_put(sk);
9861 }
9862
mgmt_disconnect_failed(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9863 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9864 u8 link_type, u8 addr_type, u8 status)
9865 {
9866 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9867 struct mgmt_cp_disconnect *cp;
9868 struct mgmt_pending_cmd *cmd;
9869
9870 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, true,
9871 unpair_device_rsp, hdev);
9872
9873 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9874 if (!cmd)
9875 return;
9876
9877 cp = cmd->param;
9878
9879 if (bacmp(bdaddr, &cp->addr.bdaddr))
9880 return;
9881
9882 if (cp->addr.type != bdaddr_type)
9883 return;
9884
9885 cmd->cmd_complete(cmd, mgmt_status(status));
9886 mgmt_pending_remove(cmd);
9887 }
9888
mgmt_connect_failed(struct hci_dev * hdev,struct hci_conn * conn,u8 status)9889 void mgmt_connect_failed(struct hci_dev *hdev, struct hci_conn *conn, u8 status)
9890 {
9891 struct mgmt_ev_connect_failed ev;
9892
9893 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
9894 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
9895 conn->dst_type, status, true);
9896 return;
9897 }
9898
9899 bacpy(&ev.addr.bdaddr, &conn->dst);
9900 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9901 ev.status = mgmt_status(status);
9902
9903 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9904 }
9905
mgmt_pin_code_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 secure)9906 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9907 {
9908 struct mgmt_ev_pin_code_request ev;
9909
9910 bacpy(&ev.addr.bdaddr, bdaddr);
9911 ev.addr.type = BDADDR_BREDR;
9912 ev.secure = secure;
9913
9914 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9915 }
9916
mgmt_pin_code_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)9917 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9918 u8 status)
9919 {
9920 struct mgmt_pending_cmd *cmd;
9921
9922 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9923 if (!cmd)
9924 return;
9925
9926 cmd->cmd_complete(cmd, mgmt_status(status));
9927 mgmt_pending_remove(cmd);
9928 }
9929
mgmt_pin_code_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)9930 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9931 u8 status)
9932 {
9933 struct mgmt_pending_cmd *cmd;
9934
9935 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9936 if (!cmd)
9937 return;
9938
9939 cmd->cmd_complete(cmd, mgmt_status(status));
9940 mgmt_pending_remove(cmd);
9941 }
9942
mgmt_user_confirm_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 value,u8 confirm_hint)9943 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9944 u8 link_type, u8 addr_type, u32 value,
9945 u8 confirm_hint)
9946 {
9947 struct mgmt_ev_user_confirm_request ev;
9948
9949 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9950
9951 bacpy(&ev.addr.bdaddr, bdaddr);
9952 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9953 ev.confirm_hint = confirm_hint;
9954 ev.value = cpu_to_le32(value);
9955
9956 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9957 NULL);
9958 }
9959
mgmt_user_passkey_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type)9960 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9961 u8 link_type, u8 addr_type)
9962 {
9963 struct mgmt_ev_user_passkey_request ev;
9964
9965 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9966
9967 bacpy(&ev.addr.bdaddr, bdaddr);
9968 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9969
9970 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9971 NULL);
9972 }
9973
user_pairing_resp_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status,u8 opcode)9974 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9975 u8 link_type, u8 addr_type, u8 status,
9976 u8 opcode)
9977 {
9978 struct mgmt_pending_cmd *cmd;
9979
9980 cmd = pending_find(opcode, hdev);
9981 if (!cmd)
9982 return -ENOENT;
9983
9984 cmd->cmd_complete(cmd, mgmt_status(status));
9985 mgmt_pending_remove(cmd);
9986
9987 return 0;
9988 }
9989
mgmt_user_confirm_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9990 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9991 u8 link_type, u8 addr_type, u8 status)
9992 {
9993 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9994 status, MGMT_OP_USER_CONFIRM_REPLY);
9995 }
9996
mgmt_user_confirm_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9997 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9998 u8 link_type, u8 addr_type, u8 status)
9999 {
10000 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
10001 status,
10002 MGMT_OP_USER_CONFIRM_NEG_REPLY);
10003 }
10004
mgmt_user_passkey_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)10005 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10006 u8 link_type, u8 addr_type, u8 status)
10007 {
10008 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
10009 status, MGMT_OP_USER_PASSKEY_REPLY);
10010 }
10011
mgmt_user_passkey_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)10012 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10013 u8 link_type, u8 addr_type, u8 status)
10014 {
10015 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
10016 status,
10017 MGMT_OP_USER_PASSKEY_NEG_REPLY);
10018 }
10019
mgmt_user_passkey_notify(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 passkey,u8 entered)10020 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
10021 u8 link_type, u8 addr_type, u32 passkey,
10022 u8 entered)
10023 {
10024 struct mgmt_ev_passkey_notify ev;
10025
10026 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
10027
10028 bacpy(&ev.addr.bdaddr, bdaddr);
10029 ev.addr.type = link_to_bdaddr(link_type, addr_type);
10030 ev.passkey = __cpu_to_le32(passkey);
10031 ev.entered = entered;
10032
10033 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
10034 }
10035
mgmt_auth_failed(struct hci_conn * conn,u8 hci_status)10036 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
10037 {
10038 struct mgmt_ev_auth_failed ev;
10039 struct mgmt_pending_cmd *cmd;
10040 u8 status = mgmt_status(hci_status);
10041
10042 bacpy(&ev.addr.bdaddr, &conn->dst);
10043 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
10044 ev.status = status;
10045
10046 cmd = find_pairing(conn);
10047
10048 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
10049 cmd ? cmd->sk : NULL);
10050
10051 if (cmd) {
10052 cmd->cmd_complete(cmd, status);
10053 mgmt_pending_remove(cmd);
10054 }
10055 }
10056
mgmt_auth_enable_complete(struct hci_dev * hdev,u8 status)10057 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
10058 {
10059 struct cmd_lookup match = { NULL, hdev };
10060 bool changed;
10061
10062 if (status) {
10063 u8 mgmt_err = mgmt_status(status);
10064 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, true,
10065 cmd_status_rsp, &mgmt_err);
10066 return;
10067 }
10068
10069 if (test_bit(HCI_AUTH, &hdev->flags))
10070 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
10071 else
10072 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
10073
10074 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, true,
10075 settings_rsp, &match);
10076
10077 if (changed)
10078 new_settings(hdev, match.sk);
10079
10080 if (match.sk)
10081 sock_put(match.sk);
10082 }
10083
sk_lookup(struct mgmt_pending_cmd * cmd,void * data)10084 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
10085 {
10086 struct cmd_lookup *match = data;
10087
10088 if (match->sk == NULL) {
10089 match->sk = cmd->sk;
10090 sock_hold(match->sk);
10091 }
10092 }
10093
mgmt_set_class_of_dev_complete(struct hci_dev * hdev,u8 * dev_class,u8 status)10094 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
10095 u8 status)
10096 {
10097 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
10098
10099 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, false, sk_lookup,
10100 &match);
10101 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, false, sk_lookup,
10102 &match);
10103 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, false, sk_lookup,
10104 &match);
10105
10106 if (!status) {
10107 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
10108 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
10109 ext_info_changed(hdev, NULL);
10110 }
10111
10112 if (match.sk)
10113 sock_put(match.sk);
10114 }
10115
mgmt_set_local_name_complete(struct hci_dev * hdev,u8 * name,u8 status)10116 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
10117 {
10118 struct mgmt_cp_set_local_name ev;
10119 struct mgmt_pending_cmd *cmd;
10120
10121 if (status)
10122 return;
10123
10124 memset(&ev, 0, sizeof(ev));
10125 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
10126 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
10127
10128 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
10129 if (!cmd) {
10130 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
10131
10132 /* If this is a HCI command related to powering on the
10133 * HCI dev don't send any mgmt signals.
10134 */
10135 if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
10136 return;
10137
10138 if (pending_find(MGMT_OP_SET_POWERED, hdev))
10139 return;
10140 }
10141
10142 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
10143 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
10144 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
10145 }
10146
has_uuid(u8 * uuid,u16 uuid_count,u8 (* uuids)[16])10147 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
10148 {
10149 int i;
10150
10151 for (i = 0; i < uuid_count; i++) {
10152 if (!memcmp(uuid, uuids[i], 16))
10153 return true;
10154 }
10155
10156 return false;
10157 }
10158
eir_has_uuids(u8 * eir,u16 eir_len,u16 uuid_count,u8 (* uuids)[16])10159 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
10160 {
10161 u16 parsed = 0;
10162
10163 while (parsed < eir_len) {
10164 u8 field_len = eir[0];
10165 u8 uuid[16];
10166 int i;
10167
10168 if (field_len == 0)
10169 break;
10170
10171 if (eir_len - parsed < field_len + 1)
10172 break;
10173
10174 switch (eir[1]) {
10175 case EIR_UUID16_ALL:
10176 case EIR_UUID16_SOME:
10177 for (i = 0; i + 3 <= field_len; i += 2) {
10178 memcpy(uuid, bluetooth_base_uuid, 16);
10179 uuid[13] = eir[i + 3];
10180 uuid[12] = eir[i + 2];
10181 if (has_uuid(uuid, uuid_count, uuids))
10182 return true;
10183 }
10184 break;
10185 case EIR_UUID32_ALL:
10186 case EIR_UUID32_SOME:
10187 for (i = 0; i + 5 <= field_len; i += 4) {
10188 memcpy(uuid, bluetooth_base_uuid, 16);
10189 uuid[15] = eir[i + 5];
10190 uuid[14] = eir[i + 4];
10191 uuid[13] = eir[i + 3];
10192 uuid[12] = eir[i + 2];
10193 if (has_uuid(uuid, uuid_count, uuids))
10194 return true;
10195 }
10196 break;
10197 case EIR_UUID128_ALL:
10198 case EIR_UUID128_SOME:
10199 for (i = 0; i + 17 <= field_len; i += 16) {
10200 memcpy(uuid, eir + i + 2, 16);
10201 if (has_uuid(uuid, uuid_count, uuids))
10202 return true;
10203 }
10204 break;
10205 }
10206
10207 parsed += field_len + 1;
10208 eir += field_len + 1;
10209 }
10210
10211 return false;
10212 }
10213
is_filter_match(struct hci_dev * hdev,s8 rssi,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len)10214 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10215 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10216 {
10217 /* If a RSSI threshold has been specified, and
10218 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10219 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10220 * is set, let it through for further processing, as we might need to
10221 * restart the scan.
10222 *
10223 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10224 * the results are also dropped.
10225 */
10226 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10227 (rssi == HCI_RSSI_INVALID ||
10228 (rssi < hdev->discovery.rssi &&
10229 !hci_test_quirk(hdev, HCI_QUIRK_STRICT_DUPLICATE_FILTER))))
10230 return false;
10231
10232 if (hdev->discovery.uuid_count != 0) {
10233 /* If a list of UUIDs is provided in filter, results with no
10234 * matching UUID should be dropped.
10235 */
10236 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10237 hdev->discovery.uuids) &&
10238 !eir_has_uuids(scan_rsp, scan_rsp_len,
10239 hdev->discovery.uuid_count,
10240 hdev->discovery.uuids))
10241 return false;
10242 }
10243
10244 /* If duplicate filtering does not report RSSI changes, then restart
10245 * scanning to ensure updated result with updated RSSI values.
10246 */
10247 if (hci_test_quirk(hdev, HCI_QUIRK_STRICT_DUPLICATE_FILTER)) {
10248 /* Validate RSSI value against the RSSI threshold once more. */
10249 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10250 rssi < hdev->discovery.rssi)
10251 return false;
10252 }
10253
10254 return true;
10255 }
10256
mgmt_adv_monitor_device_lost(struct hci_dev * hdev,u16 handle,bdaddr_t * bdaddr,u8 addr_type)10257 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
10258 bdaddr_t *bdaddr, u8 addr_type)
10259 {
10260 struct mgmt_ev_adv_monitor_device_lost ev;
10261
10262 ev.monitor_handle = cpu_to_le16(handle);
10263 bacpy(&ev.addr.bdaddr, bdaddr);
10264 ev.addr.type = addr_type;
10265
10266 mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
10267 NULL);
10268 }
10269
mgmt_send_adv_monitor_device_found(struct hci_dev * hdev,struct sk_buff * skb,struct sock * skip_sk,u16 handle)10270 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
10271 struct sk_buff *skb,
10272 struct sock *skip_sk,
10273 u16 handle)
10274 {
10275 struct sk_buff *advmon_skb;
10276 size_t advmon_skb_len;
10277 __le16 *monitor_handle;
10278
10279 if (!skb)
10280 return;
10281
10282 advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
10283 sizeof(struct mgmt_ev_device_found)) + skb->len;
10284 advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
10285 advmon_skb_len);
10286 if (!advmon_skb)
10287 return;
10288
10289 /* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10290 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10291 * store monitor_handle of the matched monitor.
10292 */
10293 monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
10294 *monitor_handle = cpu_to_le16(handle);
10295 skb_put_data(advmon_skb, skb->data, skb->len);
10296
10297 mgmt_event_skb(advmon_skb, skip_sk);
10298 }
10299
mgmt_adv_monitor_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,bool report_device,struct sk_buff * skb,struct sock * skip_sk)10300 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
10301 bdaddr_t *bdaddr, bool report_device,
10302 struct sk_buff *skb,
10303 struct sock *skip_sk)
10304 {
10305 struct monitored_device *dev, *tmp;
10306 bool matched = false;
10307 bool notified = false;
10308
10309 /* We have received the Advertisement Report because:
10310 * 1. the kernel has initiated active discovery
10311 * 2. if not, we have pend_le_reports > 0 in which case we are doing
10312 * passive scanning
10313 * 3. if none of the above is true, we have one or more active
10314 * Advertisement Monitor
10315 *
10316 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10317 * and report ONLY one advertisement per device for the matched Monitor
10318 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10319 *
10320 * For case 3, since we are not active scanning and all advertisements
10321 * received are due to a matched Advertisement Monitor, report all
10322 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10323 */
10324 if (report_device && !hdev->advmon_pend_notify) {
10325 mgmt_event_skb(skb, skip_sk);
10326 return;
10327 }
10328
10329 hdev->advmon_pend_notify = false;
10330
10331 list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
10332 if (!bacmp(&dev->bdaddr, bdaddr)) {
10333 matched = true;
10334
10335 if (!dev->notified) {
10336 mgmt_send_adv_monitor_device_found(hdev, skb,
10337 skip_sk,
10338 dev->handle);
10339 notified = true;
10340 dev->notified = true;
10341 }
10342 }
10343
10344 if (!dev->notified)
10345 hdev->advmon_pend_notify = true;
10346 }
10347
10348 if (!report_device &&
10349 ((matched && !notified) || !msft_monitor_supported(hdev))) {
10350 /* Handle 0 indicates that we are not active scanning and this
10351 * is a subsequent advertisement report for an already matched
10352 * Advertisement Monitor or the controller offloading support
10353 * is not available.
10354 */
10355 mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
10356 }
10357
10358 if (report_device)
10359 mgmt_event_skb(skb, skip_sk);
10360 else
10361 kfree_skb(skb);
10362 }
10363
mesh_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len,u64 instant)10364 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
10365 u8 addr_type, s8 rssi, u32 flags, u8 *eir,
10366 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10367 u64 instant)
10368 {
10369 struct sk_buff *skb;
10370 struct mgmt_ev_mesh_device_found *ev;
10371 int i, j;
10372
10373 if (!hdev->mesh_ad_types[0])
10374 goto accepted;
10375
10376 /* Scan for requested AD types */
10377 if (eir_len > 0) {
10378 for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
10379 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10380 if (!hdev->mesh_ad_types[j])
10381 break;
10382
10383 if (hdev->mesh_ad_types[j] == eir[i + 1])
10384 goto accepted;
10385 }
10386 }
10387 }
10388
10389 if (scan_rsp_len > 0) {
10390 for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
10391 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10392 if (!hdev->mesh_ad_types[j])
10393 break;
10394
10395 if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
10396 goto accepted;
10397 }
10398 }
10399 }
10400
10401 return;
10402
10403 accepted:
10404 skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
10405 sizeof(*ev) + eir_len + scan_rsp_len);
10406 if (!skb)
10407 return;
10408
10409 ev = skb_put(skb, sizeof(*ev));
10410
10411 bacpy(&ev->addr.bdaddr, bdaddr);
10412 ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
10413 ev->rssi = rssi;
10414 ev->flags = cpu_to_le32(flags);
10415 ev->instant = cpu_to_le64(instant);
10416
10417 if (eir_len > 0)
10418 /* Copy EIR or advertising data into event */
10419 skb_put_data(skb, eir, eir_len);
10420
10421 if (scan_rsp_len > 0)
10422 /* Append scan response data to event */
10423 skb_put_data(skb, scan_rsp, scan_rsp_len);
10424
10425 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10426
10427 mgmt_event_skb(skb, NULL);
10428 }
10429
mgmt_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 * dev_class,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len,u64 instant)10430 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10431 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10432 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10433 u64 instant)
10434 {
10435 struct sk_buff *skb;
10436 struct mgmt_ev_device_found *ev;
10437 bool report_device = hci_discovery_active(hdev);
10438
10439 if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
10440 mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
10441 eir, eir_len, scan_rsp, scan_rsp_len,
10442 instant);
10443
10444 /* Don't send events for a non-kernel initiated discovery. With
10445 * LE one exception is if we have pend_le_reports > 0 in which
10446 * case we're doing passive scanning and want these events.
10447 */
10448 if (!hci_discovery_active(hdev)) {
10449 if (link_type == ACL_LINK)
10450 return;
10451 if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
10452 report_device = true;
10453 else if (!hci_is_adv_monitoring(hdev))
10454 return;
10455 }
10456
10457 if (hdev->discovery.result_filtering) {
10458 /* We are using service discovery */
10459 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10460 scan_rsp_len))
10461 return;
10462 }
10463
10464 if (hdev->discovery.limited) {
10465 /* Check for limited discoverable bit */
10466 if (dev_class) {
10467 if (!(dev_class[1] & 0x20))
10468 return;
10469 } else {
10470 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10471 if (!flags || !(flags[0] & LE_AD_LIMITED))
10472 return;
10473 }
10474 }
10475
10476 /* Allocate skb. The 5 extra bytes are for the potential CoD field */
10477 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10478 sizeof(*ev) + eir_len + scan_rsp_len + 5);
10479 if (!skb)
10480 return;
10481
10482 ev = skb_put(skb, sizeof(*ev));
10483
10484 /* In case of device discovery with BR/EDR devices (pre 1.2), the
10485 * RSSI value was reported as 0 when not available. This behavior
10486 * is kept when using device discovery. This is required for full
10487 * backwards compatibility with the API.
10488 *
10489 * However when using service discovery, the value 127 will be
10490 * returned when the RSSI is not available.
10491 */
10492 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10493 link_type == ACL_LINK)
10494 rssi = 0;
10495
10496 bacpy(&ev->addr.bdaddr, bdaddr);
10497 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10498 ev->rssi = rssi;
10499 ev->flags = cpu_to_le32(flags);
10500
10501 if (eir_len > 0)
10502 /* Copy EIR or advertising data into event */
10503 skb_put_data(skb, eir, eir_len);
10504
10505 if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
10506 u8 eir_cod[5];
10507
10508 eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
10509 dev_class, 3);
10510 skb_put_data(skb, eir_cod, sizeof(eir_cod));
10511 }
10512
10513 if (scan_rsp_len > 0)
10514 /* Append scan response data to event */
10515 skb_put_data(skb, scan_rsp, scan_rsp_len);
10516
10517 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10518
10519 mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
10520 }
10521
mgmt_remote_name(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,s8 rssi,u8 * name,u8 name_len)10522 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10523 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10524 {
10525 struct sk_buff *skb;
10526 struct mgmt_ev_device_found *ev;
10527 u16 eir_len = 0;
10528 u32 flags = 0;
10529
10530 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10531 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
10532 if (!skb)
10533 return;
10534
10535 ev = skb_put(skb, sizeof(*ev));
10536 bacpy(&ev->addr.bdaddr, bdaddr);
10537 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10538 ev->rssi = rssi;
10539
10540 if (name)
10541 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
10542 else
10543 flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
10544
10545 ev->eir_len = cpu_to_le16(eir_len);
10546 ev->flags = cpu_to_le32(flags);
10547
10548 mgmt_event_skb(skb, NULL);
10549 }
10550
mgmt_discovering(struct hci_dev * hdev,u8 discovering)10551 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10552 {
10553 struct mgmt_ev_discovering ev;
10554
10555 bt_dev_dbg(hdev, "discovering %u", discovering);
10556
10557 memset(&ev, 0, sizeof(ev));
10558 ev.type = hdev->discovery.type;
10559 ev.discovering = discovering;
10560
10561 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10562 }
10563
mgmt_suspending(struct hci_dev * hdev,u8 state)10564 void mgmt_suspending(struct hci_dev *hdev, u8 state)
10565 {
10566 struct mgmt_ev_controller_suspend ev;
10567
10568 ev.suspend_state = state;
10569 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10570 }
10571
mgmt_resuming(struct hci_dev * hdev,u8 reason,bdaddr_t * bdaddr,u8 addr_type)10572 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10573 u8 addr_type)
10574 {
10575 struct mgmt_ev_controller_resume ev;
10576
10577 ev.wake_reason = reason;
10578 if (bdaddr) {
10579 bacpy(&ev.addr.bdaddr, bdaddr);
10580 ev.addr.type = addr_type;
10581 } else {
10582 memset(&ev.addr, 0, sizeof(ev.addr));
10583 }
10584
10585 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
10586 }
10587
10588 static struct hci_mgmt_chan chan = {
10589 .channel = HCI_CHANNEL_CONTROL,
10590 .handler_count = ARRAY_SIZE(mgmt_handlers),
10591 .handlers = mgmt_handlers,
10592 .hdev_init = mgmt_init_hdev,
10593 };
10594
mgmt_init(void)10595 int mgmt_init(void)
10596 {
10597 return hci_mgmt_chan_register(&chan);
10598 }
10599
mgmt_exit(void)10600 void mgmt_exit(void)
10601 {
10602 hci_mgmt_chan_unregister(&chan);
10603 }
10604
mgmt_cleanup(struct sock * sk)10605 void mgmt_cleanup(struct sock *sk)
10606 {
10607 struct mgmt_mesh_tx *mesh_tx;
10608 struct hci_dev *hdev;
10609
10610 read_lock(&hci_dev_list_lock);
10611
10612 list_for_each_entry(hdev, &hci_dev_list, list) {
10613 do {
10614 mesh_tx = mgmt_mesh_next(hdev, sk);
10615
10616 if (mesh_tx)
10617 mesh_send_complete(hdev, mesh_tx, true);
10618 } while (mesh_tx);
10619 }
10620
10621 read_unlock(&hci_dev_list_lock);
10622 }
10623