1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <linux/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35
36 #include "smp.h"
37 #include "mgmt_util.h"
38 #include "mgmt_config.h"
39 #include "msft.h"
40 #include "eir.h"
41 #include "aosp.h"
42
43 #define MGMT_VERSION 1
44 #define MGMT_REVISION 23
45
46 static const u16 mgmt_commands[] = {
47 MGMT_OP_READ_INDEX_LIST,
48 MGMT_OP_READ_INFO,
49 MGMT_OP_SET_POWERED,
50 MGMT_OP_SET_DISCOVERABLE,
51 MGMT_OP_SET_CONNECTABLE,
52 MGMT_OP_SET_FAST_CONNECTABLE,
53 MGMT_OP_SET_BONDABLE,
54 MGMT_OP_SET_LINK_SECURITY,
55 MGMT_OP_SET_SSP,
56 MGMT_OP_SET_HS,
57 MGMT_OP_SET_LE,
58 MGMT_OP_SET_DEV_CLASS,
59 MGMT_OP_SET_LOCAL_NAME,
60 MGMT_OP_ADD_UUID,
61 MGMT_OP_REMOVE_UUID,
62 MGMT_OP_LOAD_LINK_KEYS,
63 MGMT_OP_LOAD_LONG_TERM_KEYS,
64 MGMT_OP_DISCONNECT,
65 MGMT_OP_GET_CONNECTIONS,
66 MGMT_OP_PIN_CODE_REPLY,
67 MGMT_OP_PIN_CODE_NEG_REPLY,
68 MGMT_OP_SET_IO_CAPABILITY,
69 MGMT_OP_PAIR_DEVICE,
70 MGMT_OP_CANCEL_PAIR_DEVICE,
71 MGMT_OP_UNPAIR_DEVICE,
72 MGMT_OP_USER_CONFIRM_REPLY,
73 MGMT_OP_USER_CONFIRM_NEG_REPLY,
74 MGMT_OP_USER_PASSKEY_REPLY,
75 MGMT_OP_USER_PASSKEY_NEG_REPLY,
76 MGMT_OP_READ_LOCAL_OOB_DATA,
77 MGMT_OP_ADD_REMOTE_OOB_DATA,
78 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
79 MGMT_OP_START_DISCOVERY,
80 MGMT_OP_STOP_DISCOVERY,
81 MGMT_OP_CONFIRM_NAME,
82 MGMT_OP_BLOCK_DEVICE,
83 MGMT_OP_UNBLOCK_DEVICE,
84 MGMT_OP_SET_DEVICE_ID,
85 MGMT_OP_SET_ADVERTISING,
86 MGMT_OP_SET_BREDR,
87 MGMT_OP_SET_STATIC_ADDRESS,
88 MGMT_OP_SET_SCAN_PARAMS,
89 MGMT_OP_SET_SECURE_CONN,
90 MGMT_OP_SET_DEBUG_KEYS,
91 MGMT_OP_SET_PRIVACY,
92 MGMT_OP_LOAD_IRKS,
93 MGMT_OP_GET_CONN_INFO,
94 MGMT_OP_GET_CLOCK_INFO,
95 MGMT_OP_ADD_DEVICE,
96 MGMT_OP_REMOVE_DEVICE,
97 MGMT_OP_LOAD_CONN_PARAM,
98 MGMT_OP_READ_UNCONF_INDEX_LIST,
99 MGMT_OP_READ_CONFIG_INFO,
100 MGMT_OP_SET_EXTERNAL_CONFIG,
101 MGMT_OP_SET_PUBLIC_ADDRESS,
102 MGMT_OP_START_SERVICE_DISCOVERY,
103 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
104 MGMT_OP_READ_EXT_INDEX_LIST,
105 MGMT_OP_READ_ADV_FEATURES,
106 MGMT_OP_ADD_ADVERTISING,
107 MGMT_OP_REMOVE_ADVERTISING,
108 MGMT_OP_GET_ADV_SIZE_INFO,
109 MGMT_OP_START_LIMITED_DISCOVERY,
110 MGMT_OP_READ_EXT_INFO,
111 MGMT_OP_SET_APPEARANCE,
112 MGMT_OP_GET_PHY_CONFIGURATION,
113 MGMT_OP_SET_PHY_CONFIGURATION,
114 MGMT_OP_SET_BLOCKED_KEYS,
115 MGMT_OP_SET_WIDEBAND_SPEECH,
116 MGMT_OP_READ_CONTROLLER_CAP,
117 MGMT_OP_READ_EXP_FEATURES_INFO,
118 MGMT_OP_SET_EXP_FEATURE,
119 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
120 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
121 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
122 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
123 MGMT_OP_GET_DEVICE_FLAGS,
124 MGMT_OP_SET_DEVICE_FLAGS,
125 MGMT_OP_READ_ADV_MONITOR_FEATURES,
126 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
127 MGMT_OP_REMOVE_ADV_MONITOR,
128 MGMT_OP_ADD_EXT_ADV_PARAMS,
129 MGMT_OP_ADD_EXT_ADV_DATA,
130 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
131 MGMT_OP_SET_MESH_RECEIVER,
132 MGMT_OP_MESH_READ_FEATURES,
133 MGMT_OP_MESH_SEND,
134 MGMT_OP_MESH_SEND_CANCEL,
135 MGMT_OP_HCI_CMD_SYNC,
136 };
137
138 static const u16 mgmt_events[] = {
139 MGMT_EV_CONTROLLER_ERROR,
140 MGMT_EV_INDEX_ADDED,
141 MGMT_EV_INDEX_REMOVED,
142 MGMT_EV_NEW_SETTINGS,
143 MGMT_EV_CLASS_OF_DEV_CHANGED,
144 MGMT_EV_LOCAL_NAME_CHANGED,
145 MGMT_EV_NEW_LINK_KEY,
146 MGMT_EV_NEW_LONG_TERM_KEY,
147 MGMT_EV_DEVICE_CONNECTED,
148 MGMT_EV_DEVICE_DISCONNECTED,
149 MGMT_EV_CONNECT_FAILED,
150 MGMT_EV_PIN_CODE_REQUEST,
151 MGMT_EV_USER_CONFIRM_REQUEST,
152 MGMT_EV_USER_PASSKEY_REQUEST,
153 MGMT_EV_AUTH_FAILED,
154 MGMT_EV_DEVICE_FOUND,
155 MGMT_EV_DISCOVERING,
156 MGMT_EV_DEVICE_BLOCKED,
157 MGMT_EV_DEVICE_UNBLOCKED,
158 MGMT_EV_DEVICE_UNPAIRED,
159 MGMT_EV_PASSKEY_NOTIFY,
160 MGMT_EV_NEW_IRK,
161 MGMT_EV_NEW_CSRK,
162 MGMT_EV_DEVICE_ADDED,
163 MGMT_EV_DEVICE_REMOVED,
164 MGMT_EV_NEW_CONN_PARAM,
165 MGMT_EV_UNCONF_INDEX_ADDED,
166 MGMT_EV_UNCONF_INDEX_REMOVED,
167 MGMT_EV_NEW_CONFIG_OPTIONS,
168 MGMT_EV_EXT_INDEX_ADDED,
169 MGMT_EV_EXT_INDEX_REMOVED,
170 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
171 MGMT_EV_ADVERTISING_ADDED,
172 MGMT_EV_ADVERTISING_REMOVED,
173 MGMT_EV_EXT_INFO_CHANGED,
174 MGMT_EV_PHY_CONFIGURATION_CHANGED,
175 MGMT_EV_EXP_FEATURE_CHANGED,
176 MGMT_EV_DEVICE_FLAGS_CHANGED,
177 MGMT_EV_ADV_MONITOR_ADDED,
178 MGMT_EV_ADV_MONITOR_REMOVED,
179 MGMT_EV_CONTROLLER_SUSPEND,
180 MGMT_EV_CONTROLLER_RESUME,
181 MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
182 MGMT_EV_ADV_MONITOR_DEVICE_LOST,
183 };
184
185 static const u16 mgmt_untrusted_commands[] = {
186 MGMT_OP_READ_INDEX_LIST,
187 MGMT_OP_READ_INFO,
188 MGMT_OP_READ_UNCONF_INDEX_LIST,
189 MGMT_OP_READ_CONFIG_INFO,
190 MGMT_OP_READ_EXT_INDEX_LIST,
191 MGMT_OP_READ_EXT_INFO,
192 MGMT_OP_READ_CONTROLLER_CAP,
193 MGMT_OP_READ_EXP_FEATURES_INFO,
194 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
195 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
196 };
197
198 static const u16 mgmt_untrusted_events[] = {
199 MGMT_EV_INDEX_ADDED,
200 MGMT_EV_INDEX_REMOVED,
201 MGMT_EV_NEW_SETTINGS,
202 MGMT_EV_CLASS_OF_DEV_CHANGED,
203 MGMT_EV_LOCAL_NAME_CHANGED,
204 MGMT_EV_UNCONF_INDEX_ADDED,
205 MGMT_EV_UNCONF_INDEX_REMOVED,
206 MGMT_EV_NEW_CONFIG_OPTIONS,
207 MGMT_EV_EXT_INDEX_ADDED,
208 MGMT_EV_EXT_INDEX_REMOVED,
209 MGMT_EV_EXT_INFO_CHANGED,
210 MGMT_EV_EXP_FEATURE_CHANGED,
211 };
212
213 #define CACHE_TIMEOUT secs_to_jiffies(2)
214
215 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
216 "\x00\x00\x00\x00\x00\x00\x00\x00"
217
218 /* HCI to MGMT error code conversion table */
219 static const u8 mgmt_status_table[] = {
220 MGMT_STATUS_SUCCESS,
221 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
222 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
223 MGMT_STATUS_FAILED, /* Hardware Failure */
224 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
225 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
226 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
227 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
228 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
229 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
230 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
231 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
232 MGMT_STATUS_BUSY, /* Command Disallowed */
233 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
234 MGMT_STATUS_REJECTED, /* Rejected Security */
235 MGMT_STATUS_REJECTED, /* Rejected Personal */
236 MGMT_STATUS_TIMEOUT, /* Host Timeout */
237 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
238 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
239 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
240 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
241 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
242 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
243 MGMT_STATUS_BUSY, /* Repeated Attempts */
244 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
245 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
246 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
247 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
248 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
249 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
250 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
251 MGMT_STATUS_FAILED, /* Unspecified Error */
252 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
253 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
254 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
255 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
256 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
257 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
258 MGMT_STATUS_FAILED, /* Unit Link Key Used */
259 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
260 MGMT_STATUS_TIMEOUT, /* Instant Passed */
261 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
262 MGMT_STATUS_FAILED, /* Transaction Collision */
263 MGMT_STATUS_FAILED, /* Reserved for future use */
264 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
265 MGMT_STATUS_REJECTED, /* QoS Rejected */
266 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
267 MGMT_STATUS_REJECTED, /* Insufficient Security */
268 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
269 MGMT_STATUS_FAILED, /* Reserved for future use */
270 MGMT_STATUS_BUSY, /* Role Switch Pending */
271 MGMT_STATUS_FAILED, /* Reserved for future use */
272 MGMT_STATUS_FAILED, /* Slot Violation */
273 MGMT_STATUS_FAILED, /* Role Switch Failed */
274 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
275 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
276 MGMT_STATUS_BUSY, /* Host Busy Pairing */
277 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
278 MGMT_STATUS_BUSY, /* Controller Busy */
279 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
280 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
281 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
282 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
283 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
284 };
285
mgmt_errno_status(int err)286 static u8 mgmt_errno_status(int err)
287 {
288 switch (err) {
289 case 0:
290 return MGMT_STATUS_SUCCESS;
291 case -EPERM:
292 return MGMT_STATUS_REJECTED;
293 case -EINVAL:
294 return MGMT_STATUS_INVALID_PARAMS;
295 case -EOPNOTSUPP:
296 return MGMT_STATUS_NOT_SUPPORTED;
297 case -EBUSY:
298 return MGMT_STATUS_BUSY;
299 case -ETIMEDOUT:
300 return MGMT_STATUS_AUTH_FAILED;
301 case -ENOMEM:
302 return MGMT_STATUS_NO_RESOURCES;
303 case -EISCONN:
304 return MGMT_STATUS_ALREADY_CONNECTED;
305 case -ENOTCONN:
306 return MGMT_STATUS_DISCONNECTED;
307 }
308
309 return MGMT_STATUS_FAILED;
310 }
311
mgmt_status(int err)312 static u8 mgmt_status(int err)
313 {
314 if (err < 0)
315 return mgmt_errno_status(err);
316
317 if (err < ARRAY_SIZE(mgmt_status_table))
318 return mgmt_status_table[err];
319
320 return MGMT_STATUS_FAILED;
321 }
322
mgmt_index_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag)323 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
324 u16 len, int flag)
325 {
326 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
327 flag, NULL);
328 }
329
mgmt_limited_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag,struct sock * skip_sk)330 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
331 u16 len, int flag, struct sock *skip_sk)
332 {
333 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
334 flag, skip_sk);
335 }
336
mgmt_event(u16 event,struct hci_dev * hdev,void * data,u16 len,struct sock * skip_sk)337 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
338 struct sock *skip_sk)
339 {
340 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
341 HCI_SOCK_TRUSTED, skip_sk);
342 }
343
mgmt_event_skb(struct sk_buff * skb,struct sock * skip_sk)344 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
345 {
346 return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
347 skip_sk);
348 }
349
le_addr_type(u8 mgmt_addr_type)350 static u8 le_addr_type(u8 mgmt_addr_type)
351 {
352 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
353 return ADDR_LE_DEV_PUBLIC;
354 else
355 return ADDR_LE_DEV_RANDOM;
356 }
357
mgmt_fill_version_info(void * ver)358 void mgmt_fill_version_info(void *ver)
359 {
360 struct mgmt_rp_read_version *rp = ver;
361
362 rp->version = MGMT_VERSION;
363 rp->revision = cpu_to_le16(MGMT_REVISION);
364 }
365
read_version(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)366 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
367 u16 data_len)
368 {
369 struct mgmt_rp_read_version rp;
370
371 bt_dev_dbg(hdev, "sock %p", sk);
372
373 mgmt_fill_version_info(&rp);
374
375 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
376 &rp, sizeof(rp));
377 }
378
read_commands(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)379 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
380 u16 data_len)
381 {
382 struct mgmt_rp_read_commands *rp;
383 u16 num_commands, num_events;
384 size_t rp_size;
385 int i, err;
386
387 bt_dev_dbg(hdev, "sock %p", sk);
388
389 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
390 num_commands = ARRAY_SIZE(mgmt_commands);
391 num_events = ARRAY_SIZE(mgmt_events);
392 } else {
393 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
394 num_events = ARRAY_SIZE(mgmt_untrusted_events);
395 }
396
397 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
398
399 rp = kmalloc(rp_size, GFP_KERNEL);
400 if (!rp)
401 return -ENOMEM;
402
403 rp->num_commands = cpu_to_le16(num_commands);
404 rp->num_events = cpu_to_le16(num_events);
405
406 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
407 __le16 *opcode = rp->opcodes;
408
409 for (i = 0; i < num_commands; i++, opcode++)
410 put_unaligned_le16(mgmt_commands[i], opcode);
411
412 for (i = 0; i < num_events; i++, opcode++)
413 put_unaligned_le16(mgmt_events[i], opcode);
414 } else {
415 __le16 *opcode = rp->opcodes;
416
417 for (i = 0; i < num_commands; i++, opcode++)
418 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
419
420 for (i = 0; i < num_events; i++, opcode++)
421 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
422 }
423
424 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
425 rp, rp_size);
426 kfree(rp);
427
428 return err;
429 }
430
read_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)431 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
432 u16 data_len)
433 {
434 struct mgmt_rp_read_index_list *rp;
435 struct hci_dev *d;
436 size_t rp_len;
437 u16 count;
438 int err;
439
440 bt_dev_dbg(hdev, "sock %p", sk);
441
442 read_lock(&hci_dev_list_lock);
443
444 count = 0;
445 list_for_each_entry(d, &hci_dev_list, list) {
446 if (!hci_dev_test_flag(d, HCI_UNCONFIGURED))
447 count++;
448 }
449
450 rp_len = sizeof(*rp) + (2 * count);
451 rp = kmalloc(rp_len, GFP_ATOMIC);
452 if (!rp) {
453 read_unlock(&hci_dev_list_lock);
454 return -ENOMEM;
455 }
456
457 count = 0;
458 list_for_each_entry(d, &hci_dev_list, list) {
459 if (hci_dev_test_flag(d, HCI_SETUP) ||
460 hci_dev_test_flag(d, HCI_CONFIG) ||
461 hci_dev_test_flag(d, HCI_USER_CHANNEL))
462 continue;
463
464 /* Devices marked as raw-only are neither configured
465 * nor unconfigured controllers.
466 */
467 if (hci_test_quirk(d, HCI_QUIRK_RAW_DEVICE))
468 continue;
469
470 if (!hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
471 rp->index[count++] = cpu_to_le16(d->id);
472 bt_dev_dbg(hdev, "Added hci%u", d->id);
473 }
474 }
475
476 rp->num_controllers = cpu_to_le16(count);
477 rp_len = sizeof(*rp) + (2 * count);
478
479 read_unlock(&hci_dev_list_lock);
480
481 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
482 0, rp, rp_len);
483
484 kfree(rp);
485
486 return err;
487 }
488
read_unconf_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)489 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
490 void *data, u16 data_len)
491 {
492 struct mgmt_rp_read_unconf_index_list *rp;
493 struct hci_dev *d;
494 size_t rp_len;
495 u16 count;
496 int err;
497
498 bt_dev_dbg(hdev, "sock %p", sk);
499
500 read_lock(&hci_dev_list_lock);
501
502 count = 0;
503 list_for_each_entry(d, &hci_dev_list, list) {
504 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
505 count++;
506 }
507
508 rp_len = sizeof(*rp) + (2 * count);
509 rp = kmalloc(rp_len, GFP_ATOMIC);
510 if (!rp) {
511 read_unlock(&hci_dev_list_lock);
512 return -ENOMEM;
513 }
514
515 count = 0;
516 list_for_each_entry(d, &hci_dev_list, list) {
517 if (hci_dev_test_flag(d, HCI_SETUP) ||
518 hci_dev_test_flag(d, HCI_CONFIG) ||
519 hci_dev_test_flag(d, HCI_USER_CHANNEL))
520 continue;
521
522 /* Devices marked as raw-only are neither configured
523 * nor unconfigured controllers.
524 */
525 if (hci_test_quirk(d, HCI_QUIRK_RAW_DEVICE))
526 continue;
527
528 if (hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
529 rp->index[count++] = cpu_to_le16(d->id);
530 bt_dev_dbg(hdev, "Added hci%u", d->id);
531 }
532 }
533
534 rp->num_controllers = cpu_to_le16(count);
535 rp_len = sizeof(*rp) + (2 * count);
536
537 read_unlock(&hci_dev_list_lock);
538
539 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
540 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
541
542 kfree(rp);
543
544 return err;
545 }
546
read_ext_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)547 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
548 void *data, u16 data_len)
549 {
550 struct mgmt_rp_read_ext_index_list *rp;
551 struct hci_dev *d;
552 u16 count;
553 int err;
554
555 bt_dev_dbg(hdev, "sock %p", sk);
556
557 read_lock(&hci_dev_list_lock);
558
559 count = 0;
560 list_for_each_entry(d, &hci_dev_list, list)
561 count++;
562
563 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
564 if (!rp) {
565 read_unlock(&hci_dev_list_lock);
566 return -ENOMEM;
567 }
568
569 count = 0;
570 list_for_each_entry(d, &hci_dev_list, list) {
571 if (hci_dev_test_flag(d, HCI_SETUP) ||
572 hci_dev_test_flag(d, HCI_CONFIG) ||
573 hci_dev_test_flag(d, HCI_USER_CHANNEL))
574 continue;
575
576 /* Devices marked as raw-only are neither configured
577 * nor unconfigured controllers.
578 */
579 if (hci_test_quirk(d, HCI_QUIRK_RAW_DEVICE))
580 continue;
581
582 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
583 rp->entry[count].type = 0x01;
584 else
585 rp->entry[count].type = 0x00;
586
587 rp->entry[count].bus = d->bus;
588 rp->entry[count++].index = cpu_to_le16(d->id);
589 bt_dev_dbg(hdev, "Added hci%u", d->id);
590 }
591
592 rp->num_controllers = cpu_to_le16(count);
593
594 read_unlock(&hci_dev_list_lock);
595
596 /* If this command is called at least once, then all the
597 * default index and unconfigured index events are disabled
598 * and from now on only extended index events are used.
599 */
600 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
601 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
602 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
603
604 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
605 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
606 struct_size(rp, entry, count));
607
608 kfree(rp);
609
610 return err;
611 }
612
is_configured(struct hci_dev * hdev)613 static bool is_configured(struct hci_dev *hdev)
614 {
615 if (hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG) &&
616 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
617 return false;
618
619 if ((hci_test_quirk(hdev, HCI_QUIRK_INVALID_BDADDR) ||
620 hci_test_quirk(hdev, HCI_QUIRK_USE_BDADDR_PROPERTY)) &&
621 !bacmp(&hdev->public_addr, BDADDR_ANY))
622 return false;
623
624 return true;
625 }
626
get_missing_options(struct hci_dev * hdev)627 static __le32 get_missing_options(struct hci_dev *hdev)
628 {
629 u32 options = 0;
630
631 if (hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG) &&
632 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
633 options |= MGMT_OPTION_EXTERNAL_CONFIG;
634
635 if ((hci_test_quirk(hdev, HCI_QUIRK_INVALID_BDADDR) ||
636 hci_test_quirk(hdev, HCI_QUIRK_USE_BDADDR_PROPERTY)) &&
637 !bacmp(&hdev->public_addr, BDADDR_ANY))
638 options |= MGMT_OPTION_PUBLIC_ADDRESS;
639
640 return cpu_to_le32(options);
641 }
642
new_options(struct hci_dev * hdev,struct sock * skip)643 static int new_options(struct hci_dev *hdev, struct sock *skip)
644 {
645 __le32 options = get_missing_options(hdev);
646
647 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
648 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
649 }
650
send_options_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)651 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
652 {
653 __le32 options = get_missing_options(hdev);
654
655 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
656 sizeof(options));
657 }
658
read_config_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)659 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
660 void *data, u16 data_len)
661 {
662 struct mgmt_rp_read_config_info rp;
663 u32 options = 0;
664
665 bt_dev_dbg(hdev, "sock %p", sk);
666
667 hci_dev_lock(hdev);
668
669 memset(&rp, 0, sizeof(rp));
670 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
671
672 if (hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG))
673 options |= MGMT_OPTION_EXTERNAL_CONFIG;
674
675 if (hdev->set_bdaddr)
676 options |= MGMT_OPTION_PUBLIC_ADDRESS;
677
678 rp.supported_options = cpu_to_le32(options);
679 rp.missing_options = get_missing_options(hdev);
680
681 hci_dev_unlock(hdev);
682
683 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
684 &rp, sizeof(rp));
685 }
686
get_supported_phys(struct hci_dev * hdev)687 static u32 get_supported_phys(struct hci_dev *hdev)
688 {
689 u32 supported_phys = 0;
690
691 if (lmp_bredr_capable(hdev)) {
692 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
693
694 if (hdev->features[0][0] & LMP_3SLOT)
695 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
696
697 if (hdev->features[0][0] & LMP_5SLOT)
698 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
699
700 if (lmp_edr_2m_capable(hdev)) {
701 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
702
703 if (lmp_edr_3slot_capable(hdev))
704 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
705
706 if (lmp_edr_5slot_capable(hdev))
707 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
708
709 if (lmp_edr_3m_capable(hdev)) {
710 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
711
712 if (lmp_edr_3slot_capable(hdev))
713 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
714
715 if (lmp_edr_5slot_capable(hdev))
716 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
717 }
718 }
719 }
720
721 if (lmp_le_capable(hdev)) {
722 supported_phys |= MGMT_PHY_LE_1M_TX;
723 supported_phys |= MGMT_PHY_LE_1M_RX;
724
725 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
726 supported_phys |= MGMT_PHY_LE_2M_TX;
727 supported_phys |= MGMT_PHY_LE_2M_RX;
728 }
729
730 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
731 supported_phys |= MGMT_PHY_LE_CODED_TX;
732 supported_phys |= MGMT_PHY_LE_CODED_RX;
733 }
734 }
735
736 return supported_phys;
737 }
738
get_selected_phys(struct hci_dev * hdev)739 static u32 get_selected_phys(struct hci_dev *hdev)
740 {
741 u32 selected_phys = 0;
742
743 if (lmp_bredr_capable(hdev)) {
744 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
745
746 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
747 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
748
749 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
750 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
751
752 if (lmp_edr_2m_capable(hdev)) {
753 if (!(hdev->pkt_type & HCI_2DH1))
754 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
755
756 if (lmp_edr_3slot_capable(hdev) &&
757 !(hdev->pkt_type & HCI_2DH3))
758 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
759
760 if (lmp_edr_5slot_capable(hdev) &&
761 !(hdev->pkt_type & HCI_2DH5))
762 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
763
764 if (lmp_edr_3m_capable(hdev)) {
765 if (!(hdev->pkt_type & HCI_3DH1))
766 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
767
768 if (lmp_edr_3slot_capable(hdev) &&
769 !(hdev->pkt_type & HCI_3DH3))
770 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
771
772 if (lmp_edr_5slot_capable(hdev) &&
773 !(hdev->pkt_type & HCI_3DH5))
774 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
775 }
776 }
777 }
778
779 if (lmp_le_capable(hdev)) {
780 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
781 selected_phys |= MGMT_PHY_LE_1M_TX;
782
783 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
784 selected_phys |= MGMT_PHY_LE_1M_RX;
785
786 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
787 selected_phys |= MGMT_PHY_LE_2M_TX;
788
789 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
790 selected_phys |= MGMT_PHY_LE_2M_RX;
791
792 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
793 selected_phys |= MGMT_PHY_LE_CODED_TX;
794
795 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
796 selected_phys |= MGMT_PHY_LE_CODED_RX;
797 }
798
799 return selected_phys;
800 }
801
get_configurable_phys(struct hci_dev * hdev)802 static u32 get_configurable_phys(struct hci_dev *hdev)
803 {
804 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
805 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
806 }
807
get_supported_settings(struct hci_dev * hdev)808 static u32 get_supported_settings(struct hci_dev *hdev)
809 {
810 u32 settings = 0;
811
812 settings |= MGMT_SETTING_POWERED;
813 settings |= MGMT_SETTING_BONDABLE;
814 settings |= MGMT_SETTING_DEBUG_KEYS;
815 settings |= MGMT_SETTING_CONNECTABLE;
816 settings |= MGMT_SETTING_DISCOVERABLE;
817
818 if (lmp_bredr_capable(hdev)) {
819 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
820 settings |= MGMT_SETTING_FAST_CONNECTABLE;
821 settings |= MGMT_SETTING_BREDR;
822 settings |= MGMT_SETTING_LINK_SECURITY;
823
824 if (lmp_ssp_capable(hdev)) {
825 settings |= MGMT_SETTING_SSP;
826 }
827
828 if (lmp_sc_capable(hdev))
829 settings |= MGMT_SETTING_SECURE_CONN;
830
831 if (hci_test_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED))
832 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
833 }
834
835 if (lmp_le_capable(hdev)) {
836 settings |= MGMT_SETTING_LE;
837 settings |= MGMT_SETTING_SECURE_CONN;
838 settings |= MGMT_SETTING_PRIVACY;
839 settings |= MGMT_SETTING_STATIC_ADDRESS;
840 settings |= MGMT_SETTING_ADVERTISING;
841 }
842
843 if (hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG) || hdev->set_bdaddr)
844 settings |= MGMT_SETTING_CONFIGURATION;
845
846 if (cis_central_capable(hdev))
847 settings |= MGMT_SETTING_CIS_CENTRAL;
848
849 if (cis_peripheral_capable(hdev))
850 settings |= MGMT_SETTING_CIS_PERIPHERAL;
851
852 if (ll_privacy_capable(hdev))
853 settings |= MGMT_SETTING_LL_PRIVACY;
854
855 settings |= MGMT_SETTING_PHY_CONFIGURATION;
856
857 return settings;
858 }
859
get_current_settings(struct hci_dev * hdev)860 static u32 get_current_settings(struct hci_dev *hdev)
861 {
862 u32 settings = 0;
863
864 if (hdev_is_powered(hdev))
865 settings |= MGMT_SETTING_POWERED;
866
867 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
868 settings |= MGMT_SETTING_CONNECTABLE;
869
870 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
871 settings |= MGMT_SETTING_FAST_CONNECTABLE;
872
873 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
874 settings |= MGMT_SETTING_DISCOVERABLE;
875
876 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
877 settings |= MGMT_SETTING_BONDABLE;
878
879 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
880 settings |= MGMT_SETTING_BREDR;
881
882 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
883 settings |= MGMT_SETTING_LE;
884
885 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
886 settings |= MGMT_SETTING_LINK_SECURITY;
887
888 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
889 settings |= MGMT_SETTING_SSP;
890
891 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
892 settings |= MGMT_SETTING_ADVERTISING;
893
894 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
895 settings |= MGMT_SETTING_SECURE_CONN;
896
897 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
898 settings |= MGMT_SETTING_DEBUG_KEYS;
899
900 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
901 settings |= MGMT_SETTING_PRIVACY;
902
903 /* The current setting for static address has two purposes. The
904 * first is to indicate if the static address will be used and
905 * the second is to indicate if it is actually set.
906 *
907 * This means if the static address is not configured, this flag
908 * will never be set. If the address is configured, then if the
909 * address is actually used decides if the flag is set or not.
910 *
911 * For single mode LE only controllers and dual-mode controllers
912 * with BR/EDR disabled, the existence of the static address will
913 * be evaluated.
914 */
915 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
916 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
917 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
918 if (bacmp(&hdev->static_addr, BDADDR_ANY))
919 settings |= MGMT_SETTING_STATIC_ADDRESS;
920 }
921
922 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
923 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
924
925 if (cis_central_enabled(hdev))
926 settings |= MGMT_SETTING_CIS_CENTRAL;
927
928 if (cis_peripheral_enabled(hdev))
929 settings |= MGMT_SETTING_CIS_PERIPHERAL;
930
931 if (bis_enabled(hdev))
932 settings |= MGMT_SETTING_ISO_BROADCASTER;
933
934 if (sync_recv_enabled(hdev))
935 settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
936
937 if (ll_privacy_enabled(hdev))
938 settings |= MGMT_SETTING_LL_PRIVACY;
939
940 return settings;
941 }
942
pending_find(u16 opcode,struct hci_dev * hdev)943 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
944 {
945 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
946 }
947
mgmt_get_adv_discov_flags(struct hci_dev * hdev)948 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
949 {
950 struct mgmt_pending_cmd *cmd;
951
952 /* If there's a pending mgmt command the flags will not yet have
953 * their final values, so check for this first.
954 */
955 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
956 if (cmd) {
957 struct mgmt_mode *cp = cmd->param;
958 if (cp->val == 0x01)
959 return LE_AD_GENERAL;
960 else if (cp->val == 0x02)
961 return LE_AD_LIMITED;
962 } else {
963 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
964 return LE_AD_LIMITED;
965 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
966 return LE_AD_GENERAL;
967 }
968
969 return 0;
970 }
971
mgmt_get_connectable(struct hci_dev * hdev)972 bool mgmt_get_connectable(struct hci_dev *hdev)
973 {
974 struct mgmt_pending_cmd *cmd;
975
976 /* If there's a pending mgmt command the flag will not yet have
977 * it's final value, so check for this first.
978 */
979 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
980 if (cmd) {
981 struct mgmt_mode *cp = cmd->param;
982
983 return cp->val;
984 }
985
986 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
987 }
988
service_cache_sync(struct hci_dev * hdev,void * data)989 static int service_cache_sync(struct hci_dev *hdev, void *data)
990 {
991 hci_update_eir_sync(hdev);
992 hci_update_class_sync(hdev);
993
994 return 0;
995 }
996
service_cache_off(struct work_struct * work)997 static void service_cache_off(struct work_struct *work)
998 {
999 struct hci_dev *hdev = container_of(work, struct hci_dev,
1000 service_cache.work);
1001
1002 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1003 return;
1004
1005 hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1006 }
1007
rpa_expired_sync(struct hci_dev * hdev,void * data)1008 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1009 {
1010 /* The generation of a new RPA and programming it into the
1011 * controller happens in the hci_req_enable_advertising()
1012 * function.
1013 */
1014 if (ext_adv_capable(hdev))
1015 return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1016 else
1017 return hci_enable_advertising_sync(hdev);
1018 }
1019
rpa_expired(struct work_struct * work)1020 static void rpa_expired(struct work_struct *work)
1021 {
1022 struct hci_dev *hdev = container_of(work, struct hci_dev,
1023 rpa_expired.work);
1024
1025 bt_dev_dbg(hdev, "");
1026
1027 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1028
1029 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1030 return;
1031
1032 hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1033 }
1034
1035 static int set_discoverable_sync(struct hci_dev *hdev, void *data);
1036
discov_off(struct work_struct * work)1037 static void discov_off(struct work_struct *work)
1038 {
1039 struct hci_dev *hdev = container_of(work, struct hci_dev,
1040 discov_off.work);
1041
1042 bt_dev_dbg(hdev, "");
1043
1044 hci_dev_lock(hdev);
1045
1046 /* When discoverable timeout triggers, then just make sure
1047 * the limited discoverable flag is cleared. Even in the case
1048 * of a timeout triggered from general discoverable, it is
1049 * safe to unconditionally clear the flag.
1050 */
1051 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1052 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1053 hdev->discov_timeout = 0;
1054
1055 hci_cmd_sync_queue(hdev, set_discoverable_sync, NULL, NULL);
1056
1057 mgmt_new_settings(hdev);
1058
1059 hci_dev_unlock(hdev);
1060 }
1061
1062 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1063
mesh_send_complete(struct hci_dev * hdev,struct mgmt_mesh_tx * mesh_tx,bool silent)1064 static void mesh_send_complete(struct hci_dev *hdev,
1065 struct mgmt_mesh_tx *mesh_tx, bool silent)
1066 {
1067 u8 handle = mesh_tx->handle;
1068
1069 if (!silent)
1070 mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1071 sizeof(handle), NULL);
1072
1073 mgmt_mesh_remove(mesh_tx);
1074 }
1075
mesh_send_done_sync(struct hci_dev * hdev,void * data)1076 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1077 {
1078 struct mgmt_mesh_tx *mesh_tx;
1079
1080 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1081 if (list_empty(&hdev->adv_instances))
1082 hci_disable_advertising_sync(hdev);
1083 mesh_tx = mgmt_mesh_next(hdev, NULL);
1084
1085 if (mesh_tx)
1086 mesh_send_complete(hdev, mesh_tx, false);
1087
1088 return 0;
1089 }
1090
1091 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1092 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
mesh_next(struct hci_dev * hdev,void * data,int err)1093 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1094 {
1095 struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1096
1097 if (!mesh_tx)
1098 return;
1099
1100 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1101 mesh_send_start_complete);
1102
1103 if (err < 0)
1104 mesh_send_complete(hdev, mesh_tx, false);
1105 else
1106 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1107 }
1108
mesh_send_done(struct work_struct * work)1109 static void mesh_send_done(struct work_struct *work)
1110 {
1111 struct hci_dev *hdev = container_of(work, struct hci_dev,
1112 mesh_send_done.work);
1113
1114 if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1115 return;
1116
1117 hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1118 }
1119
mgmt_init_hdev(struct sock * sk,struct hci_dev * hdev)1120 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1121 {
1122 if (hci_dev_test_flag(hdev, HCI_MGMT))
1123 return;
1124
1125 BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1126
1127 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1128 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1129 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1130 INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1131
1132 /* Non-mgmt controlled devices get this bit set
1133 * implicitly so that pairing works for them, however
1134 * for mgmt we require user-space to explicitly enable
1135 * it
1136 */
1137 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1138
1139 hci_dev_set_flag(hdev, HCI_MGMT);
1140 }
1141
read_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1142 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1143 void *data, u16 data_len)
1144 {
1145 struct mgmt_rp_read_info rp;
1146
1147 bt_dev_dbg(hdev, "sock %p", sk);
1148
1149 hci_dev_lock(hdev);
1150
1151 memset(&rp, 0, sizeof(rp));
1152
1153 bacpy(&rp.bdaddr, &hdev->bdaddr);
1154
1155 rp.version = hdev->hci_ver;
1156 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1157
1158 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1159 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1160
1161 memcpy(rp.dev_class, hdev->dev_class, 3);
1162
1163 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1164 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1165
1166 hci_dev_unlock(hdev);
1167
1168 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1169 sizeof(rp));
1170 }
1171
append_eir_data_to_buf(struct hci_dev * hdev,u8 * eir)1172 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1173 {
1174 u16 eir_len = 0;
1175 size_t name_len;
1176
1177 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1178 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1179 hdev->dev_class, 3);
1180
1181 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1182 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1183 hdev->appearance);
1184
1185 name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1186 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1187 hdev->dev_name, name_len);
1188
1189 name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1190 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1191 hdev->short_name, name_len);
1192
1193 return eir_len;
1194 }
1195
read_ext_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1196 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1197 void *data, u16 data_len)
1198 {
1199 char buf[512];
1200 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1201 u16 eir_len;
1202
1203 bt_dev_dbg(hdev, "sock %p", sk);
1204
1205 memset(&buf, 0, sizeof(buf));
1206
1207 hci_dev_lock(hdev);
1208
1209 bacpy(&rp->bdaddr, &hdev->bdaddr);
1210
1211 rp->version = hdev->hci_ver;
1212 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1213
1214 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1215 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1216
1217
1218 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1219 rp->eir_len = cpu_to_le16(eir_len);
1220
1221 hci_dev_unlock(hdev);
1222
1223 /* If this command is called at least once, then the events
1224 * for class of device and local name changes are disabled
1225 * and only the new extended controller information event
1226 * is used.
1227 */
1228 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1229 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1230 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1231
1232 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1233 sizeof(*rp) + eir_len);
1234 }
1235
ext_info_changed(struct hci_dev * hdev,struct sock * skip)1236 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1237 {
1238 char buf[512];
1239 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1240 u16 eir_len;
1241
1242 memset(buf, 0, sizeof(buf));
1243
1244 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1245 ev->eir_len = cpu_to_le16(eir_len);
1246
1247 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1248 sizeof(*ev) + eir_len,
1249 HCI_MGMT_EXT_INFO_EVENTS, skip);
1250 }
1251
send_settings_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)1252 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1253 {
1254 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1255
1256 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1257 sizeof(settings));
1258 }
1259
mgmt_advertising_added(struct sock * sk,struct hci_dev * hdev,u8 instance)1260 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1261 {
1262 struct mgmt_ev_advertising_added ev;
1263
1264 ev.instance = instance;
1265
1266 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1267 }
1268
mgmt_advertising_removed(struct sock * sk,struct hci_dev * hdev,u8 instance)1269 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1270 u8 instance)
1271 {
1272 struct mgmt_ev_advertising_removed ev;
1273
1274 ev.instance = instance;
1275
1276 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1277 }
1278
cancel_adv_timeout(struct hci_dev * hdev)1279 static void cancel_adv_timeout(struct hci_dev *hdev)
1280 {
1281 if (hdev->adv_instance_timeout) {
1282 hdev->adv_instance_timeout = 0;
1283 cancel_delayed_work(&hdev->adv_instance_expire);
1284 }
1285 }
1286
1287 /* This function requires the caller holds hdev->lock */
restart_le_actions(struct hci_dev * hdev)1288 static void restart_le_actions(struct hci_dev *hdev)
1289 {
1290 struct hci_conn_params *p;
1291
1292 list_for_each_entry(p, &hdev->le_conn_params, list) {
1293 /* Needed for AUTO_OFF case where might not "really"
1294 * have been powered off.
1295 */
1296 hci_pend_le_list_del_init(p);
1297
1298 switch (p->auto_connect) {
1299 case HCI_AUTO_CONN_DIRECT:
1300 case HCI_AUTO_CONN_ALWAYS:
1301 hci_pend_le_list_add(p, &hdev->pend_le_conns);
1302 break;
1303 case HCI_AUTO_CONN_REPORT:
1304 hci_pend_le_list_add(p, &hdev->pend_le_reports);
1305 break;
1306 default:
1307 break;
1308 }
1309 }
1310 }
1311
new_settings(struct hci_dev * hdev,struct sock * skip)1312 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1313 {
1314 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1315
1316 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1317 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1318 }
1319
mgmt_set_powered_complete(struct hci_dev * hdev,void * data,int err)1320 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1321 {
1322 struct mgmt_pending_cmd *cmd = data;
1323 struct mgmt_mode *cp;
1324
1325 /* Make sure cmd still outstanding. */
1326 if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
1327 return;
1328
1329 cp = cmd->param;
1330
1331 bt_dev_dbg(hdev, "err %d", err);
1332
1333 if (!err) {
1334 if (cp->val) {
1335 hci_dev_lock(hdev);
1336 restart_le_actions(hdev);
1337 hci_update_passive_scan(hdev);
1338 hci_dev_unlock(hdev);
1339 }
1340
1341 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1342
1343 /* Only call new_setting for power on as power off is deferred
1344 * to hdev->power_off work which does call hci_dev_do_close.
1345 */
1346 if (cp->val)
1347 new_settings(hdev, cmd->sk);
1348 } else {
1349 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1350 mgmt_status(err));
1351 }
1352
1353 mgmt_pending_free(cmd);
1354 }
1355
set_powered_sync(struct hci_dev * hdev,void * data)1356 static int set_powered_sync(struct hci_dev *hdev, void *data)
1357 {
1358 struct mgmt_pending_cmd *cmd = data;
1359 struct mgmt_mode cp;
1360
1361 mutex_lock(&hdev->mgmt_pending_lock);
1362
1363 /* Make sure cmd still outstanding. */
1364 if (!__mgmt_pending_listed(hdev, cmd)) {
1365 mutex_unlock(&hdev->mgmt_pending_lock);
1366 return -ECANCELED;
1367 }
1368
1369 memcpy(&cp, cmd->param, sizeof(cp));
1370
1371 mutex_unlock(&hdev->mgmt_pending_lock);
1372
1373 BT_DBG("%s", hdev->name);
1374
1375 return hci_set_powered_sync(hdev, cp.val);
1376 }
1377
set_powered(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1378 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1379 u16 len)
1380 {
1381 struct mgmt_mode *cp = data;
1382 struct mgmt_pending_cmd *cmd;
1383 int err;
1384
1385 bt_dev_dbg(hdev, "sock %p", sk);
1386
1387 if (cp->val != 0x00 && cp->val != 0x01)
1388 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1389 MGMT_STATUS_INVALID_PARAMS);
1390
1391 hci_dev_lock(hdev);
1392
1393 if (!cp->val) {
1394 if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN)) {
1395 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1396 MGMT_STATUS_BUSY);
1397 goto failed;
1398 }
1399 }
1400
1401 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1402 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1403 MGMT_STATUS_BUSY);
1404 goto failed;
1405 }
1406
1407 if (!!cp->val == hdev_is_powered(hdev)) {
1408 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1409 goto failed;
1410 }
1411
1412 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1413 if (!cmd) {
1414 err = -ENOMEM;
1415 goto failed;
1416 }
1417
1418 /* Cancel potentially blocking sync operation before power off */
1419 if (cp->val == 0x00) {
1420 hci_cmd_sync_cancel_sync(hdev, -EHOSTDOWN);
1421 err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1422 mgmt_set_powered_complete);
1423 } else {
1424 /* Use hci_cmd_sync_submit since hdev might not be running */
1425 err = hci_cmd_sync_submit(hdev, set_powered_sync, cmd,
1426 mgmt_set_powered_complete);
1427 }
1428
1429 if (err < 0)
1430 mgmt_pending_remove(cmd);
1431
1432 failed:
1433 hci_dev_unlock(hdev);
1434 return err;
1435 }
1436
mgmt_new_settings(struct hci_dev * hdev)1437 int mgmt_new_settings(struct hci_dev *hdev)
1438 {
1439 return new_settings(hdev, NULL);
1440 }
1441
1442 struct cmd_lookup {
1443 struct sock *sk;
1444 struct hci_dev *hdev;
1445 u8 mgmt_status;
1446 };
1447
settings_rsp(struct mgmt_pending_cmd * cmd,void * data)1448 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1449 {
1450 struct cmd_lookup *match = data;
1451
1452 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1453
1454 if (match->sk == NULL) {
1455 match->sk = cmd->sk;
1456 sock_hold(match->sk);
1457 }
1458 }
1459
cmd_status_rsp(struct mgmt_pending_cmd * cmd,void * data)1460 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1461 {
1462 u8 *status = data;
1463
1464 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, *status);
1465 }
1466
cmd_complete_rsp(struct mgmt_pending_cmd * cmd,void * data)1467 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1468 {
1469 struct cmd_lookup *match = data;
1470
1471 /* dequeue cmd_sync entries using cmd as data as that is about to be
1472 * removed/freed.
1473 */
1474 hci_cmd_sync_dequeue(match->hdev, NULL, cmd, NULL);
1475
1476 if (cmd->cmd_complete) {
1477 cmd->cmd_complete(cmd, match->mgmt_status);
1478 return;
1479 }
1480
1481 cmd_status_rsp(cmd, data);
1482 }
1483
generic_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1484 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1485 {
1486 return mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, status,
1487 cmd->param, cmd->param_len);
1488 }
1489
addr_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1490 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1491 {
1492 return mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, status,
1493 cmd->param, sizeof(struct mgmt_addr_info));
1494 }
1495
mgmt_bredr_support(struct hci_dev * hdev)1496 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1497 {
1498 if (!lmp_bredr_capable(hdev))
1499 return MGMT_STATUS_NOT_SUPPORTED;
1500 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1501 return MGMT_STATUS_REJECTED;
1502 else
1503 return MGMT_STATUS_SUCCESS;
1504 }
1505
mgmt_le_support(struct hci_dev * hdev)1506 static u8 mgmt_le_support(struct hci_dev *hdev)
1507 {
1508 if (!lmp_le_capable(hdev))
1509 return MGMT_STATUS_NOT_SUPPORTED;
1510 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1511 return MGMT_STATUS_REJECTED;
1512 else
1513 return MGMT_STATUS_SUCCESS;
1514 }
1515
mgmt_set_discoverable_complete(struct hci_dev * hdev,void * data,int err)1516 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1517 int err)
1518 {
1519 struct mgmt_pending_cmd *cmd = data;
1520
1521 bt_dev_dbg(hdev, "err %d", err);
1522
1523 /* Make sure cmd still outstanding. */
1524 if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
1525 return;
1526
1527 hci_dev_lock(hdev);
1528
1529 if (err) {
1530 u8 mgmt_err = mgmt_status(err);
1531 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
1532 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1533 goto done;
1534 }
1535
1536 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1537 hdev->discov_timeout > 0) {
1538 int to = secs_to_jiffies(hdev->discov_timeout);
1539 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1540 }
1541
1542 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1543 new_settings(hdev, cmd->sk);
1544
1545 done:
1546 mgmt_pending_free(cmd);
1547 hci_dev_unlock(hdev);
1548 }
1549
set_discoverable_sync(struct hci_dev * hdev,void * data)1550 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1551 {
1552 if (!mgmt_pending_listed(hdev, data))
1553 return -ECANCELED;
1554
1555 BT_DBG("%s", hdev->name);
1556
1557 return hci_update_discoverable_sync(hdev);
1558 }
1559
set_discoverable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1560 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1561 u16 len)
1562 {
1563 struct mgmt_cp_set_discoverable *cp = data;
1564 struct mgmt_pending_cmd *cmd;
1565 u16 timeout;
1566 int err;
1567
1568 bt_dev_dbg(hdev, "sock %p", sk);
1569
1570 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1571 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1572 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1573 MGMT_STATUS_REJECTED);
1574
1575 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1576 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1577 MGMT_STATUS_INVALID_PARAMS);
1578
1579 timeout = __le16_to_cpu(cp->timeout);
1580
1581 /* Disabling discoverable requires that no timeout is set,
1582 * and enabling limited discoverable requires a timeout.
1583 */
1584 if ((cp->val == 0x00 && timeout > 0) ||
1585 (cp->val == 0x02 && timeout == 0))
1586 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1587 MGMT_STATUS_INVALID_PARAMS);
1588
1589 hci_dev_lock(hdev);
1590
1591 if (!hdev_is_powered(hdev) && timeout > 0) {
1592 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1593 MGMT_STATUS_NOT_POWERED);
1594 goto failed;
1595 }
1596
1597 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1598 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1599 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1600 MGMT_STATUS_BUSY);
1601 goto failed;
1602 }
1603
1604 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1605 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1606 MGMT_STATUS_REJECTED);
1607 goto failed;
1608 }
1609
1610 if (hdev->advertising_paused) {
1611 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1612 MGMT_STATUS_BUSY);
1613 goto failed;
1614 }
1615
1616 if (!hdev_is_powered(hdev)) {
1617 bool changed = false;
1618
1619 /* Setting limited discoverable when powered off is
1620 * not a valid operation since it requires a timeout
1621 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1622 */
1623 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1624 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1625 changed = true;
1626 }
1627
1628 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1629 if (err < 0)
1630 goto failed;
1631
1632 if (changed)
1633 err = new_settings(hdev, sk);
1634
1635 goto failed;
1636 }
1637
1638 /* If the current mode is the same, then just update the timeout
1639 * value with the new value. And if only the timeout gets updated,
1640 * then no need for any HCI transactions.
1641 */
1642 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1643 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1644 HCI_LIMITED_DISCOVERABLE)) {
1645 cancel_delayed_work(&hdev->discov_off);
1646 hdev->discov_timeout = timeout;
1647
1648 if (cp->val && hdev->discov_timeout > 0) {
1649 int to = secs_to_jiffies(hdev->discov_timeout);
1650 queue_delayed_work(hdev->req_workqueue,
1651 &hdev->discov_off, to);
1652 }
1653
1654 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1655 goto failed;
1656 }
1657
1658 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1659 if (!cmd) {
1660 err = -ENOMEM;
1661 goto failed;
1662 }
1663
1664 /* Cancel any potential discoverable timeout that might be
1665 * still active and store new timeout value. The arming of
1666 * the timeout happens in the complete handler.
1667 */
1668 cancel_delayed_work(&hdev->discov_off);
1669 hdev->discov_timeout = timeout;
1670
1671 if (cp->val)
1672 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1673 else
1674 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1675
1676 /* Limited discoverable mode */
1677 if (cp->val == 0x02)
1678 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1679 else
1680 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1681
1682 err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1683 mgmt_set_discoverable_complete);
1684
1685 if (err < 0)
1686 mgmt_pending_remove(cmd);
1687
1688 failed:
1689 hci_dev_unlock(hdev);
1690 return err;
1691 }
1692
mgmt_set_connectable_complete(struct hci_dev * hdev,void * data,int err)1693 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1694 int err)
1695 {
1696 struct mgmt_pending_cmd *cmd = data;
1697
1698 bt_dev_dbg(hdev, "err %d", err);
1699
1700 /* Make sure cmd still outstanding. */
1701 if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
1702 return;
1703
1704 hci_dev_lock(hdev);
1705
1706 if (err) {
1707 u8 mgmt_err = mgmt_status(err);
1708 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
1709 goto done;
1710 }
1711
1712 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1713 new_settings(hdev, cmd->sk);
1714
1715 done:
1716 mgmt_pending_free(cmd);
1717
1718 hci_dev_unlock(hdev);
1719 }
1720
set_connectable_update_settings(struct hci_dev * hdev,struct sock * sk,u8 val)1721 static int set_connectable_update_settings(struct hci_dev *hdev,
1722 struct sock *sk, u8 val)
1723 {
1724 bool changed = false;
1725 int err;
1726
1727 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1728 changed = true;
1729
1730 if (val) {
1731 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1732 } else {
1733 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1734 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1735 }
1736
1737 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1738 if (err < 0)
1739 return err;
1740
1741 if (changed) {
1742 hci_update_scan(hdev);
1743 hci_update_passive_scan(hdev);
1744 return new_settings(hdev, sk);
1745 }
1746
1747 return 0;
1748 }
1749
set_connectable_sync(struct hci_dev * hdev,void * data)1750 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1751 {
1752 if (!mgmt_pending_listed(hdev, data))
1753 return -ECANCELED;
1754
1755 BT_DBG("%s", hdev->name);
1756
1757 return hci_update_connectable_sync(hdev);
1758 }
1759
set_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1760 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1761 u16 len)
1762 {
1763 struct mgmt_mode *cp = data;
1764 struct mgmt_pending_cmd *cmd;
1765 int err;
1766
1767 bt_dev_dbg(hdev, "sock %p", sk);
1768
1769 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1770 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1771 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1772 MGMT_STATUS_REJECTED);
1773
1774 if (cp->val != 0x00 && cp->val != 0x01)
1775 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1776 MGMT_STATUS_INVALID_PARAMS);
1777
1778 hci_dev_lock(hdev);
1779
1780 if (!hdev_is_powered(hdev)) {
1781 err = set_connectable_update_settings(hdev, sk, cp->val);
1782 goto failed;
1783 }
1784
1785 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1786 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1787 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1788 MGMT_STATUS_BUSY);
1789 goto failed;
1790 }
1791
1792 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1793 if (!cmd) {
1794 err = -ENOMEM;
1795 goto failed;
1796 }
1797
1798 if (cp->val) {
1799 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1800 } else {
1801 if (hdev->discov_timeout > 0)
1802 cancel_delayed_work(&hdev->discov_off);
1803
1804 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1805 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1806 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1807 }
1808
1809 err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1810 mgmt_set_connectable_complete);
1811
1812 if (err < 0)
1813 mgmt_pending_remove(cmd);
1814
1815 failed:
1816 hci_dev_unlock(hdev);
1817 return err;
1818 }
1819
set_bondable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1820 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1821 u16 len)
1822 {
1823 struct mgmt_mode *cp = data;
1824 bool changed;
1825 int err;
1826
1827 bt_dev_dbg(hdev, "sock %p", sk);
1828
1829 if (cp->val != 0x00 && cp->val != 0x01)
1830 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1831 MGMT_STATUS_INVALID_PARAMS);
1832
1833 hci_dev_lock(hdev);
1834
1835 if (cp->val)
1836 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1837 else
1838 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1839
1840 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1841 if (err < 0)
1842 goto unlock;
1843
1844 if (changed) {
1845 /* In limited privacy mode the change of bondable mode
1846 * may affect the local advertising address.
1847 */
1848 hci_update_discoverable(hdev);
1849
1850 err = new_settings(hdev, sk);
1851 }
1852
1853 unlock:
1854 hci_dev_unlock(hdev);
1855 return err;
1856 }
1857
set_link_security(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1858 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1859 u16 len)
1860 {
1861 struct mgmt_mode *cp = data;
1862 struct mgmt_pending_cmd *cmd;
1863 u8 val, status;
1864 int err;
1865
1866 bt_dev_dbg(hdev, "sock %p", sk);
1867
1868 status = mgmt_bredr_support(hdev);
1869 if (status)
1870 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1871 status);
1872
1873 if (cp->val != 0x00 && cp->val != 0x01)
1874 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1875 MGMT_STATUS_INVALID_PARAMS);
1876
1877 hci_dev_lock(hdev);
1878
1879 if (!hdev_is_powered(hdev)) {
1880 bool changed = false;
1881
1882 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1883 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1884 changed = true;
1885 }
1886
1887 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1888 if (err < 0)
1889 goto failed;
1890
1891 if (changed)
1892 err = new_settings(hdev, sk);
1893
1894 goto failed;
1895 }
1896
1897 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1898 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1899 MGMT_STATUS_BUSY);
1900 goto failed;
1901 }
1902
1903 val = !!cp->val;
1904
1905 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1906 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1907 goto failed;
1908 }
1909
1910 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1911 if (!cmd) {
1912 err = -ENOMEM;
1913 goto failed;
1914 }
1915
1916 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1917 if (err < 0) {
1918 mgmt_pending_remove(cmd);
1919 goto failed;
1920 }
1921
1922 failed:
1923 hci_dev_unlock(hdev);
1924 return err;
1925 }
1926
set_ssp_complete(struct hci_dev * hdev,void * data,int err)1927 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1928 {
1929 struct cmd_lookup match = { NULL, hdev };
1930 struct mgmt_pending_cmd *cmd = data;
1931 struct mgmt_mode *cp;
1932 u8 enable;
1933 bool changed;
1934
1935 /* Make sure cmd still outstanding. */
1936 if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
1937 return;
1938
1939 cp = cmd->param;
1940 enable = cp->val;
1941
1942 if (err) {
1943 u8 mgmt_err = mgmt_status(err);
1944
1945 if (enable && hci_dev_test_and_clear_flag(hdev,
1946 HCI_SSP_ENABLED)) {
1947 new_settings(hdev, NULL);
1948 }
1949
1950 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
1951 return;
1952 }
1953
1954 if (enable) {
1955 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1956 } else {
1957 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1958 }
1959
1960 settings_rsp(cmd, &match);
1961
1962 if (changed)
1963 new_settings(hdev, match.sk);
1964
1965 if (match.sk)
1966 sock_put(match.sk);
1967
1968 hci_update_eir_sync(hdev);
1969 }
1970
set_ssp_sync(struct hci_dev * hdev,void * data)1971 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1972 {
1973 struct mgmt_pending_cmd *cmd = data;
1974 struct mgmt_mode cp;
1975 bool changed = false;
1976 int err;
1977
1978 mutex_lock(&hdev->mgmt_pending_lock);
1979
1980 if (!__mgmt_pending_listed(hdev, cmd)) {
1981 mutex_unlock(&hdev->mgmt_pending_lock);
1982 return -ECANCELED;
1983 }
1984
1985 memcpy(&cp, cmd->param, sizeof(cp));
1986
1987 mutex_unlock(&hdev->mgmt_pending_lock);
1988
1989 if (cp.val)
1990 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1991
1992 err = hci_write_ssp_mode_sync(hdev, cp.val);
1993
1994 if (!err && changed)
1995 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1996
1997 return err;
1998 }
1999
set_ssp(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2000 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2001 {
2002 struct mgmt_mode *cp = data;
2003 struct mgmt_pending_cmd *cmd;
2004 u8 status;
2005 int err;
2006
2007 bt_dev_dbg(hdev, "sock %p", sk);
2008
2009 status = mgmt_bredr_support(hdev);
2010 if (status)
2011 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2012
2013 if (!lmp_ssp_capable(hdev))
2014 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2015 MGMT_STATUS_NOT_SUPPORTED);
2016
2017 if (cp->val != 0x00 && cp->val != 0x01)
2018 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2019 MGMT_STATUS_INVALID_PARAMS);
2020
2021 hci_dev_lock(hdev);
2022
2023 if (!hdev_is_powered(hdev)) {
2024 bool changed;
2025
2026 if (cp->val) {
2027 changed = !hci_dev_test_and_set_flag(hdev,
2028 HCI_SSP_ENABLED);
2029 } else {
2030 changed = hci_dev_test_and_clear_flag(hdev,
2031 HCI_SSP_ENABLED);
2032 }
2033
2034 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2035 if (err < 0)
2036 goto failed;
2037
2038 if (changed)
2039 err = new_settings(hdev, sk);
2040
2041 goto failed;
2042 }
2043
2044 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2045 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2046 MGMT_STATUS_BUSY);
2047 goto failed;
2048 }
2049
2050 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2051 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2052 goto failed;
2053 }
2054
2055 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2056 if (!cmd)
2057 err = -ENOMEM;
2058 else
2059 err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2060 set_ssp_complete);
2061
2062 if (err < 0) {
2063 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2064 MGMT_STATUS_FAILED);
2065
2066 if (cmd)
2067 mgmt_pending_remove(cmd);
2068 }
2069
2070 failed:
2071 hci_dev_unlock(hdev);
2072 return err;
2073 }
2074
set_hs(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2075 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2076 {
2077 bt_dev_dbg(hdev, "sock %p", sk);
2078
2079 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2080 MGMT_STATUS_NOT_SUPPORTED);
2081 }
2082
set_le_complete(struct hci_dev * hdev,void * data,int err)2083 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2084 {
2085 struct mgmt_pending_cmd *cmd = data;
2086 struct cmd_lookup match = { NULL, hdev };
2087 u8 status = mgmt_status(err);
2088
2089 bt_dev_dbg(hdev, "err %d", err);
2090
2091 if (err == -ECANCELED || !mgmt_pending_valid(hdev, data))
2092 return;
2093
2094 if (status) {
2095 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, status);
2096 goto done;
2097 }
2098
2099 settings_rsp(cmd, &match);
2100
2101 new_settings(hdev, match.sk);
2102
2103 if (match.sk)
2104 sock_put(match.sk);
2105
2106 done:
2107 mgmt_pending_free(cmd);
2108 }
2109
set_le_sync(struct hci_dev * hdev,void * data)2110 static int set_le_sync(struct hci_dev *hdev, void *data)
2111 {
2112 struct mgmt_pending_cmd *cmd = data;
2113 struct mgmt_mode cp;
2114 u8 val;
2115 int err;
2116
2117 mutex_lock(&hdev->mgmt_pending_lock);
2118
2119 if (!__mgmt_pending_listed(hdev, cmd)) {
2120 mutex_unlock(&hdev->mgmt_pending_lock);
2121 return -ECANCELED;
2122 }
2123
2124 memcpy(&cp, cmd->param, sizeof(cp));
2125 val = !!cp.val;
2126
2127 mutex_unlock(&hdev->mgmt_pending_lock);
2128
2129 if (!val) {
2130 hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2131
2132 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2133 hci_disable_advertising_sync(hdev);
2134
2135 if (ext_adv_capable(hdev))
2136 hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2137 } else {
2138 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2139 }
2140
2141 err = hci_write_le_host_supported_sync(hdev, val, 0);
2142
2143 /* Make sure the controller has a good default for
2144 * advertising data. Restrict the update to when LE
2145 * has actually been enabled. During power on, the
2146 * update in powered_update_hci will take care of it.
2147 */
2148 if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2149 if (ext_adv_capable(hdev)) {
2150 int status;
2151
2152 status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2153 if (!status)
2154 hci_update_scan_rsp_data_sync(hdev, 0x00);
2155 } else {
2156 hci_update_adv_data_sync(hdev, 0x00);
2157 hci_update_scan_rsp_data_sync(hdev, 0x00);
2158 }
2159
2160 hci_update_passive_scan(hdev);
2161 }
2162
2163 return err;
2164 }
2165
set_mesh_complete(struct hci_dev * hdev,void * data,int err)2166 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2167 {
2168 struct mgmt_pending_cmd *cmd = data;
2169 u8 status = mgmt_status(err);
2170 struct sock *sk;
2171
2172 if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
2173 return;
2174
2175 sk = cmd->sk;
2176
2177 if (status) {
2178 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2179 status);
2180 mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev, true,
2181 cmd_status_rsp, &status);
2182 goto done;
2183 }
2184
2185 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2186
2187 done:
2188 mgmt_pending_free(cmd);
2189 }
2190
set_mesh_sync(struct hci_dev * hdev,void * data)2191 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2192 {
2193 struct mgmt_pending_cmd *cmd = data;
2194 DEFINE_FLEX(struct mgmt_cp_set_mesh, cp, ad_types, num_ad_types,
2195 sizeof(hdev->mesh_ad_types));
2196 size_t len;
2197
2198 mutex_lock(&hdev->mgmt_pending_lock);
2199
2200 if (!__mgmt_pending_listed(hdev, cmd)) {
2201 mutex_unlock(&hdev->mgmt_pending_lock);
2202 return -ECANCELED;
2203 }
2204
2205 len = cmd->param_len;
2206 memcpy(cp, cmd->param, min(__struct_size(cp), len));
2207
2208 mutex_unlock(&hdev->mgmt_pending_lock);
2209
2210 memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2211
2212 if (cp->enable)
2213 hci_dev_set_flag(hdev, HCI_MESH);
2214 else
2215 hci_dev_clear_flag(hdev, HCI_MESH);
2216
2217 hdev->le_scan_interval = __le16_to_cpu(cp->period);
2218 hdev->le_scan_window = __le16_to_cpu(cp->window);
2219
2220 len -= sizeof(struct mgmt_cp_set_mesh);
2221
2222 /* If filters don't fit, forward all adv pkts */
2223 if (len <= sizeof(hdev->mesh_ad_types))
2224 memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2225
2226 hci_update_passive_scan_sync(hdev);
2227 return 0;
2228 }
2229
set_mesh(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2230 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2231 {
2232 struct mgmt_cp_set_mesh *cp = data;
2233 struct mgmt_pending_cmd *cmd;
2234 __u16 period, window;
2235 int err = 0;
2236
2237 bt_dev_dbg(hdev, "sock %p", sk);
2238
2239 if (!lmp_le_capable(hdev) ||
2240 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2241 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2242 MGMT_STATUS_NOT_SUPPORTED);
2243
2244 if (cp->enable != 0x00 && cp->enable != 0x01)
2245 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2246 MGMT_STATUS_INVALID_PARAMS);
2247
2248 /* Keep allowed ranges in sync with set_scan_params() */
2249 period = __le16_to_cpu(cp->period);
2250
2251 if (period < 0x0004 || period > 0x4000)
2252 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2253 MGMT_STATUS_INVALID_PARAMS);
2254
2255 window = __le16_to_cpu(cp->window);
2256
2257 if (window < 0x0004 || window > 0x4000)
2258 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2259 MGMT_STATUS_INVALID_PARAMS);
2260
2261 if (window > period)
2262 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2263 MGMT_STATUS_INVALID_PARAMS);
2264
2265 hci_dev_lock(hdev);
2266
2267 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2268 if (!cmd)
2269 err = -ENOMEM;
2270 else
2271 err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2272 set_mesh_complete);
2273
2274 if (err < 0) {
2275 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2276 MGMT_STATUS_FAILED);
2277
2278 if (cmd)
2279 mgmt_pending_remove(cmd);
2280 }
2281
2282 hci_dev_unlock(hdev);
2283 return err;
2284 }
2285
mesh_send_start_complete(struct hci_dev * hdev,void * data,int err)2286 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2287 {
2288 struct mgmt_mesh_tx *mesh_tx = data;
2289 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2290 unsigned long mesh_send_interval;
2291 u8 mgmt_err = mgmt_status(err);
2292
2293 /* Report any errors here, but don't report completion */
2294
2295 if (mgmt_err) {
2296 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2297 /* Send Complete Error Code for handle */
2298 mesh_send_complete(hdev, mesh_tx, false);
2299 return;
2300 }
2301
2302 mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2303 queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2304 mesh_send_interval);
2305 }
2306
mesh_send_sync(struct hci_dev * hdev,void * data)2307 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2308 {
2309 struct mgmt_mesh_tx *mesh_tx = data;
2310 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2311 struct adv_info *adv, *next_instance;
2312 u8 instance = hdev->le_num_of_adv_sets + 1;
2313 u16 timeout, duration;
2314 int err = 0;
2315
2316 if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2317 return MGMT_STATUS_BUSY;
2318
2319 timeout = 1000;
2320 duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2321 adv = hci_add_adv_instance(hdev, instance, 0,
2322 send->adv_data_len, send->adv_data,
2323 0, NULL,
2324 timeout, duration,
2325 HCI_ADV_TX_POWER_NO_PREFERENCE,
2326 hdev->le_adv_min_interval,
2327 hdev->le_adv_max_interval,
2328 mesh_tx->handle);
2329
2330 if (!IS_ERR(adv))
2331 mesh_tx->instance = instance;
2332 else
2333 err = PTR_ERR(adv);
2334
2335 if (hdev->cur_adv_instance == instance) {
2336 /* If the currently advertised instance is being changed then
2337 * cancel the current advertising and schedule the next
2338 * instance. If there is only one instance then the overridden
2339 * advertising data will be visible right away.
2340 */
2341 cancel_adv_timeout(hdev);
2342
2343 next_instance = hci_get_next_instance(hdev, instance);
2344 if (next_instance)
2345 instance = next_instance->instance;
2346 else
2347 instance = 0;
2348 } else if (hdev->adv_instance_timeout) {
2349 /* Immediately advertise the new instance if no other, or
2350 * let it go naturally from queue if ADV is already happening
2351 */
2352 instance = 0;
2353 }
2354
2355 if (instance)
2356 return hci_schedule_adv_instance_sync(hdev, instance, true);
2357
2358 return err;
2359 }
2360
send_count(struct mgmt_mesh_tx * mesh_tx,void * data)2361 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2362 {
2363 struct mgmt_rp_mesh_read_features *rp = data;
2364
2365 if (rp->used_handles >= rp->max_handles)
2366 return;
2367
2368 rp->handles[rp->used_handles++] = mesh_tx->handle;
2369 }
2370
mesh_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2371 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2372 void *data, u16 len)
2373 {
2374 struct mgmt_rp_mesh_read_features rp;
2375
2376 if (!lmp_le_capable(hdev) ||
2377 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2378 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2379 MGMT_STATUS_NOT_SUPPORTED);
2380
2381 memset(&rp, 0, sizeof(rp));
2382 rp.index = cpu_to_le16(hdev->id);
2383 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2384 rp.max_handles = MESH_HANDLES_MAX;
2385
2386 hci_dev_lock(hdev);
2387
2388 if (rp.max_handles)
2389 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2390
2391 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2392 rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2393
2394 hci_dev_unlock(hdev);
2395 return 0;
2396 }
2397
send_cancel(struct hci_dev * hdev,void * data)2398 static int send_cancel(struct hci_dev *hdev, void *data)
2399 {
2400 struct mgmt_pending_cmd *cmd = data;
2401 struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2402 struct mgmt_mesh_tx *mesh_tx;
2403
2404 if (!cancel->handle) {
2405 do {
2406 mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2407
2408 if (mesh_tx)
2409 mesh_send_complete(hdev, mesh_tx, false);
2410 } while (mesh_tx);
2411 } else {
2412 mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2413
2414 if (mesh_tx && mesh_tx->sk == cmd->sk)
2415 mesh_send_complete(hdev, mesh_tx, false);
2416 }
2417
2418 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2419 0, NULL, 0);
2420 mgmt_pending_free(cmd);
2421
2422 return 0;
2423 }
2424
mesh_send_cancel(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2425 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2426 void *data, u16 len)
2427 {
2428 struct mgmt_pending_cmd *cmd;
2429 int err;
2430
2431 if (!lmp_le_capable(hdev) ||
2432 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2433 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2434 MGMT_STATUS_NOT_SUPPORTED);
2435
2436 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2437 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2438 MGMT_STATUS_REJECTED);
2439
2440 hci_dev_lock(hdev);
2441 cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2442 if (!cmd)
2443 err = -ENOMEM;
2444 else
2445 err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2446
2447 if (err < 0) {
2448 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2449 MGMT_STATUS_FAILED);
2450
2451 if (cmd)
2452 mgmt_pending_free(cmd);
2453 }
2454
2455 hci_dev_unlock(hdev);
2456 return err;
2457 }
2458
mesh_send(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2459 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2460 {
2461 struct mgmt_mesh_tx *mesh_tx;
2462 struct mgmt_cp_mesh_send *send = data;
2463 struct mgmt_rp_mesh_read_features rp;
2464 bool sending;
2465 int err = 0;
2466
2467 if (!lmp_le_capable(hdev) ||
2468 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2469 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2470 MGMT_STATUS_NOT_SUPPORTED);
2471 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2472 len <= MGMT_MESH_SEND_SIZE ||
2473 len > (MGMT_MESH_SEND_SIZE + 31))
2474 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2475 MGMT_STATUS_REJECTED);
2476
2477 hci_dev_lock(hdev);
2478
2479 memset(&rp, 0, sizeof(rp));
2480 rp.max_handles = MESH_HANDLES_MAX;
2481
2482 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2483
2484 if (rp.max_handles <= rp.used_handles) {
2485 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2486 MGMT_STATUS_BUSY);
2487 goto done;
2488 }
2489
2490 sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2491 mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2492
2493 if (!mesh_tx)
2494 err = -ENOMEM;
2495 else if (!sending)
2496 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2497 mesh_send_start_complete);
2498
2499 if (err < 0) {
2500 bt_dev_err(hdev, "Send Mesh Failed %d", err);
2501 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2502 MGMT_STATUS_FAILED);
2503
2504 if (mesh_tx) {
2505 if (sending)
2506 mgmt_mesh_remove(mesh_tx);
2507 }
2508 } else {
2509 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2510
2511 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2512 &mesh_tx->handle, 1);
2513 }
2514
2515 done:
2516 hci_dev_unlock(hdev);
2517 return err;
2518 }
2519
set_le(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2520 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2521 {
2522 struct mgmt_mode *cp = data;
2523 struct mgmt_pending_cmd *cmd;
2524 int err;
2525 u8 val, enabled;
2526
2527 bt_dev_dbg(hdev, "sock %p", sk);
2528
2529 if (!lmp_le_capable(hdev))
2530 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2531 MGMT_STATUS_NOT_SUPPORTED);
2532
2533 if (cp->val != 0x00 && cp->val != 0x01)
2534 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2535 MGMT_STATUS_INVALID_PARAMS);
2536
2537 /* Bluetooth single mode LE only controllers or dual-mode
2538 * controllers configured as LE only devices, do not allow
2539 * switching LE off. These have either LE enabled explicitly
2540 * or BR/EDR has been previously switched off.
2541 *
2542 * When trying to enable an already enabled LE, then gracefully
2543 * send a positive response. Trying to disable it however will
2544 * result into rejection.
2545 */
2546 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2547 if (cp->val == 0x01)
2548 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2549
2550 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2551 MGMT_STATUS_REJECTED);
2552 }
2553
2554 hci_dev_lock(hdev);
2555
2556 val = !!cp->val;
2557 enabled = lmp_host_le_capable(hdev);
2558
2559 if (!hdev_is_powered(hdev) || val == enabled) {
2560 bool changed = false;
2561
2562 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2563 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2564 changed = true;
2565 }
2566
2567 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2568 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2569 changed = true;
2570 }
2571
2572 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2573 if (err < 0)
2574 goto unlock;
2575
2576 if (changed)
2577 err = new_settings(hdev, sk);
2578
2579 goto unlock;
2580 }
2581
2582 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2583 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2584 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2585 MGMT_STATUS_BUSY);
2586 goto unlock;
2587 }
2588
2589 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2590 if (!cmd)
2591 err = -ENOMEM;
2592 else
2593 err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2594 set_le_complete);
2595
2596 if (err < 0) {
2597 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2598 MGMT_STATUS_FAILED);
2599
2600 if (cmd)
2601 mgmt_pending_remove(cmd);
2602 }
2603
2604 unlock:
2605 hci_dev_unlock(hdev);
2606 return err;
2607 }
2608
send_hci_cmd_sync(struct hci_dev * hdev,void * data)2609 static int send_hci_cmd_sync(struct hci_dev *hdev, void *data)
2610 {
2611 struct mgmt_pending_cmd *cmd = data;
2612 struct mgmt_cp_hci_cmd_sync *cp = cmd->param;
2613 struct sk_buff *skb;
2614
2615 skb = __hci_cmd_sync_ev(hdev, le16_to_cpu(cp->opcode),
2616 le16_to_cpu(cp->params_len), cp->params,
2617 cp->event, cp->timeout ?
2618 secs_to_jiffies(cp->timeout) :
2619 HCI_CMD_TIMEOUT);
2620 if (IS_ERR(skb)) {
2621 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
2622 mgmt_status(PTR_ERR(skb)));
2623 goto done;
2624 }
2625
2626 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_HCI_CMD_SYNC, 0,
2627 skb->data, skb->len);
2628
2629 kfree_skb(skb);
2630
2631 done:
2632 mgmt_pending_free(cmd);
2633
2634 return 0;
2635 }
2636
mgmt_hci_cmd_sync(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2637 static int mgmt_hci_cmd_sync(struct sock *sk, struct hci_dev *hdev,
2638 void *data, u16 len)
2639 {
2640 struct mgmt_cp_hci_cmd_sync *cp = data;
2641 struct mgmt_pending_cmd *cmd;
2642 int err;
2643
2644 if (len != (offsetof(struct mgmt_cp_hci_cmd_sync, params) +
2645 le16_to_cpu(cp->params_len)))
2646 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
2647 MGMT_STATUS_INVALID_PARAMS);
2648
2649 hci_dev_lock(hdev);
2650 cmd = mgmt_pending_new(sk, MGMT_OP_HCI_CMD_SYNC, hdev, data, len);
2651 if (!cmd)
2652 err = -ENOMEM;
2653 else
2654 err = hci_cmd_sync_queue(hdev, send_hci_cmd_sync, cmd, NULL);
2655
2656 if (err < 0) {
2657 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
2658 MGMT_STATUS_FAILED);
2659
2660 if (cmd)
2661 mgmt_pending_free(cmd);
2662 }
2663
2664 hci_dev_unlock(hdev);
2665 return err;
2666 }
2667
2668 /* This is a helper function to test for pending mgmt commands that can
2669 * cause CoD or EIR HCI commands. We can only allow one such pending
2670 * mgmt command at a time since otherwise we cannot easily track what
2671 * the current values are, will be, and based on that calculate if a new
2672 * HCI command needs to be sent and if yes with what value.
2673 */
pending_eir_or_class(struct hci_dev * hdev)2674 static bool pending_eir_or_class(struct hci_dev *hdev)
2675 {
2676 struct mgmt_pending_cmd *cmd;
2677
2678 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2679 switch (cmd->opcode) {
2680 case MGMT_OP_ADD_UUID:
2681 case MGMT_OP_REMOVE_UUID:
2682 case MGMT_OP_SET_DEV_CLASS:
2683 case MGMT_OP_SET_POWERED:
2684 return true;
2685 }
2686 }
2687
2688 return false;
2689 }
2690
2691 static const u8 bluetooth_base_uuid[] = {
2692 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2693 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2694 };
2695
get_uuid_size(const u8 * uuid)2696 static u8 get_uuid_size(const u8 *uuid)
2697 {
2698 u32 val;
2699
2700 if (memcmp(uuid, bluetooth_base_uuid, 12))
2701 return 128;
2702
2703 val = get_unaligned_le32(&uuid[12]);
2704 if (val > 0xffff)
2705 return 32;
2706
2707 return 16;
2708 }
2709
mgmt_class_complete(struct hci_dev * hdev,void * data,int err)2710 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2711 {
2712 struct mgmt_pending_cmd *cmd = data;
2713
2714 bt_dev_dbg(hdev, "err %d", err);
2715
2716 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
2717 mgmt_status(err), hdev->dev_class, 3);
2718
2719 mgmt_pending_free(cmd);
2720 }
2721
add_uuid_sync(struct hci_dev * hdev,void * data)2722 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2723 {
2724 int err;
2725
2726 err = hci_update_class_sync(hdev);
2727 if (err)
2728 return err;
2729
2730 return hci_update_eir_sync(hdev);
2731 }
2732
add_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2733 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2734 {
2735 struct mgmt_cp_add_uuid *cp = data;
2736 struct mgmt_pending_cmd *cmd;
2737 struct bt_uuid *uuid;
2738 int err;
2739
2740 bt_dev_dbg(hdev, "sock %p", sk);
2741
2742 hci_dev_lock(hdev);
2743
2744 if (pending_eir_or_class(hdev)) {
2745 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2746 MGMT_STATUS_BUSY);
2747 goto failed;
2748 }
2749
2750 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2751 if (!uuid) {
2752 err = -ENOMEM;
2753 goto failed;
2754 }
2755
2756 memcpy(uuid->uuid, cp->uuid, 16);
2757 uuid->svc_hint = cp->svc_hint;
2758 uuid->size = get_uuid_size(cp->uuid);
2759
2760 list_add_tail(&uuid->list, &hdev->uuids);
2761
2762 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2763 if (!cmd) {
2764 err = -ENOMEM;
2765 goto failed;
2766 }
2767
2768 /* MGMT_OP_ADD_UUID don't require adapter the UP/Running so use
2769 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2770 */
2771 err = hci_cmd_sync_submit(hdev, add_uuid_sync, cmd,
2772 mgmt_class_complete);
2773 if (err < 0) {
2774 mgmt_pending_free(cmd);
2775 goto failed;
2776 }
2777
2778 failed:
2779 hci_dev_unlock(hdev);
2780 return err;
2781 }
2782
enable_service_cache(struct hci_dev * hdev)2783 static bool enable_service_cache(struct hci_dev *hdev)
2784 {
2785 if (!hdev_is_powered(hdev))
2786 return false;
2787
2788 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2789 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2790 CACHE_TIMEOUT);
2791 return true;
2792 }
2793
2794 return false;
2795 }
2796
remove_uuid_sync(struct hci_dev * hdev,void * data)2797 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2798 {
2799 int err;
2800
2801 err = hci_update_class_sync(hdev);
2802 if (err)
2803 return err;
2804
2805 return hci_update_eir_sync(hdev);
2806 }
2807
remove_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2808 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2809 u16 len)
2810 {
2811 struct mgmt_cp_remove_uuid *cp = data;
2812 struct mgmt_pending_cmd *cmd;
2813 struct bt_uuid *match, *tmp;
2814 static const u8 bt_uuid_any[] = {
2815 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2816 };
2817 int err, found;
2818
2819 bt_dev_dbg(hdev, "sock %p", sk);
2820
2821 hci_dev_lock(hdev);
2822
2823 if (pending_eir_or_class(hdev)) {
2824 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2825 MGMT_STATUS_BUSY);
2826 goto unlock;
2827 }
2828
2829 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2830 hci_uuids_clear(hdev);
2831
2832 if (enable_service_cache(hdev)) {
2833 err = mgmt_cmd_complete(sk, hdev->id,
2834 MGMT_OP_REMOVE_UUID,
2835 0, hdev->dev_class, 3);
2836 goto unlock;
2837 }
2838
2839 goto update_class;
2840 }
2841
2842 found = 0;
2843
2844 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2845 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2846 continue;
2847
2848 list_del(&match->list);
2849 kfree(match);
2850 found++;
2851 }
2852
2853 if (found == 0) {
2854 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2855 MGMT_STATUS_INVALID_PARAMS);
2856 goto unlock;
2857 }
2858
2859 update_class:
2860 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2861 if (!cmd) {
2862 err = -ENOMEM;
2863 goto unlock;
2864 }
2865
2866 /* MGMT_OP_REMOVE_UUID don't require adapter the UP/Running so use
2867 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2868 */
2869 err = hci_cmd_sync_submit(hdev, remove_uuid_sync, cmd,
2870 mgmt_class_complete);
2871 if (err < 0)
2872 mgmt_pending_free(cmd);
2873
2874 unlock:
2875 hci_dev_unlock(hdev);
2876 return err;
2877 }
2878
set_class_sync(struct hci_dev * hdev,void * data)2879 static int set_class_sync(struct hci_dev *hdev, void *data)
2880 {
2881 int err = 0;
2882
2883 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2884 cancel_delayed_work_sync(&hdev->service_cache);
2885 err = hci_update_eir_sync(hdev);
2886 }
2887
2888 if (err)
2889 return err;
2890
2891 return hci_update_class_sync(hdev);
2892 }
2893
set_dev_class(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2894 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2895 u16 len)
2896 {
2897 struct mgmt_cp_set_dev_class *cp = data;
2898 struct mgmt_pending_cmd *cmd;
2899 int err;
2900
2901 bt_dev_dbg(hdev, "sock %p", sk);
2902
2903 if (!lmp_bredr_capable(hdev))
2904 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2905 MGMT_STATUS_NOT_SUPPORTED);
2906
2907 hci_dev_lock(hdev);
2908
2909 if (pending_eir_or_class(hdev)) {
2910 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2911 MGMT_STATUS_BUSY);
2912 goto unlock;
2913 }
2914
2915 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2916 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2917 MGMT_STATUS_INVALID_PARAMS);
2918 goto unlock;
2919 }
2920
2921 hdev->major_class = cp->major;
2922 hdev->minor_class = cp->minor;
2923
2924 if (!hdev_is_powered(hdev)) {
2925 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2926 hdev->dev_class, 3);
2927 goto unlock;
2928 }
2929
2930 cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2931 if (!cmd) {
2932 err = -ENOMEM;
2933 goto unlock;
2934 }
2935
2936 /* MGMT_OP_SET_DEV_CLASS don't require adapter the UP/Running so use
2937 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2938 */
2939 err = hci_cmd_sync_submit(hdev, set_class_sync, cmd,
2940 mgmt_class_complete);
2941 if (err < 0)
2942 mgmt_pending_free(cmd);
2943
2944 unlock:
2945 hci_dev_unlock(hdev);
2946 return err;
2947 }
2948
load_link_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2949 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2950 u16 len)
2951 {
2952 struct mgmt_cp_load_link_keys *cp = data;
2953 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2954 sizeof(struct mgmt_link_key_info));
2955 u16 key_count, expected_len;
2956 bool changed;
2957 int i;
2958
2959 bt_dev_dbg(hdev, "sock %p", sk);
2960
2961 if (!lmp_bredr_capable(hdev))
2962 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2963 MGMT_STATUS_NOT_SUPPORTED);
2964
2965 key_count = __le16_to_cpu(cp->key_count);
2966 if (key_count > max_key_count) {
2967 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2968 key_count);
2969 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2970 MGMT_STATUS_INVALID_PARAMS);
2971 }
2972
2973 expected_len = struct_size(cp, keys, key_count);
2974 if (expected_len != len) {
2975 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2976 expected_len, len);
2977 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2978 MGMT_STATUS_INVALID_PARAMS);
2979 }
2980
2981 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2982 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2983 MGMT_STATUS_INVALID_PARAMS);
2984
2985 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2986 key_count);
2987
2988 hci_dev_lock(hdev);
2989
2990 hci_link_keys_clear(hdev);
2991
2992 if (cp->debug_keys)
2993 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2994 else
2995 changed = hci_dev_test_and_clear_flag(hdev,
2996 HCI_KEEP_DEBUG_KEYS);
2997
2998 if (changed)
2999 new_settings(hdev, NULL);
3000
3001 for (i = 0; i < key_count; i++) {
3002 struct mgmt_link_key_info *key = &cp->keys[i];
3003
3004 if (hci_is_blocked_key(hdev,
3005 HCI_BLOCKED_KEY_TYPE_LINKKEY,
3006 key->val)) {
3007 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
3008 &key->addr.bdaddr);
3009 continue;
3010 }
3011
3012 if (key->addr.type != BDADDR_BREDR) {
3013 bt_dev_warn(hdev,
3014 "Invalid link address type %u for %pMR",
3015 key->addr.type, &key->addr.bdaddr);
3016 continue;
3017 }
3018
3019 if (key->type > 0x08) {
3020 bt_dev_warn(hdev, "Invalid link key type %u for %pMR",
3021 key->type, &key->addr.bdaddr);
3022 continue;
3023 }
3024
3025 /* Always ignore debug keys and require a new pairing if
3026 * the user wants to use them.
3027 */
3028 if (key->type == HCI_LK_DEBUG_COMBINATION)
3029 continue;
3030
3031 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
3032 key->type, key->pin_len, NULL);
3033 }
3034
3035 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
3036
3037 hci_dev_unlock(hdev);
3038
3039 return 0;
3040 }
3041
device_unpaired(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,struct sock * skip_sk)3042 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
3043 u8 addr_type, struct sock *skip_sk)
3044 {
3045 struct mgmt_ev_device_unpaired ev;
3046
3047 bacpy(&ev.addr.bdaddr, bdaddr);
3048 ev.addr.type = addr_type;
3049
3050 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
3051 skip_sk);
3052 }
3053
unpair_device_complete(struct hci_dev * hdev,void * data,int err)3054 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
3055 {
3056 struct mgmt_pending_cmd *cmd = data;
3057 struct mgmt_cp_unpair_device *cp = cmd->param;
3058
3059 if (!err)
3060 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
3061
3062 cmd->cmd_complete(cmd, err);
3063 mgmt_pending_free(cmd);
3064 }
3065
unpair_device_sync(struct hci_dev * hdev,void * data)3066 static int unpair_device_sync(struct hci_dev *hdev, void *data)
3067 {
3068 struct mgmt_pending_cmd *cmd = data;
3069 struct mgmt_cp_unpair_device *cp = cmd->param;
3070 struct hci_conn *conn;
3071
3072 if (cp->addr.type == BDADDR_BREDR)
3073 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3074 &cp->addr.bdaddr);
3075 else
3076 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3077 le_addr_type(cp->addr.type));
3078
3079 if (!conn)
3080 return 0;
3081
3082 /* Disregard any possible error since the likes of hci_abort_conn_sync
3083 * will clean up the connection no matter the error.
3084 */
3085 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3086
3087 return 0;
3088 }
3089
unpair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3090 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3091 u16 len)
3092 {
3093 struct mgmt_cp_unpair_device *cp = data;
3094 struct mgmt_rp_unpair_device rp;
3095 struct hci_conn_params *params;
3096 struct mgmt_pending_cmd *cmd;
3097 struct hci_conn *conn;
3098 u8 addr_type;
3099 int err;
3100
3101 memset(&rp, 0, sizeof(rp));
3102 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3103 rp.addr.type = cp->addr.type;
3104
3105 if (!bdaddr_type_is_valid(cp->addr.type))
3106 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3107 MGMT_STATUS_INVALID_PARAMS,
3108 &rp, sizeof(rp));
3109
3110 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
3111 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3112 MGMT_STATUS_INVALID_PARAMS,
3113 &rp, sizeof(rp));
3114
3115 hci_dev_lock(hdev);
3116
3117 if (!hdev_is_powered(hdev)) {
3118 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3119 MGMT_STATUS_NOT_POWERED, &rp,
3120 sizeof(rp));
3121 goto unlock;
3122 }
3123
3124 if (cp->addr.type == BDADDR_BREDR) {
3125 /* If disconnection is requested, then look up the
3126 * connection. If the remote device is connected, it
3127 * will be later used to terminate the link.
3128 *
3129 * Setting it to NULL explicitly will cause no
3130 * termination of the link.
3131 */
3132 if (cp->disconnect)
3133 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3134 &cp->addr.bdaddr);
3135 else
3136 conn = NULL;
3137
3138 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
3139 if (err < 0) {
3140 err = mgmt_cmd_complete(sk, hdev->id,
3141 MGMT_OP_UNPAIR_DEVICE,
3142 MGMT_STATUS_NOT_PAIRED, &rp,
3143 sizeof(rp));
3144 goto unlock;
3145 }
3146
3147 goto done;
3148 }
3149
3150 /* LE address type */
3151 addr_type = le_addr_type(cp->addr.type);
3152
3153 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
3154 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
3155 if (err < 0) {
3156 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3157 MGMT_STATUS_NOT_PAIRED, &rp,
3158 sizeof(rp));
3159 goto unlock;
3160 }
3161
3162 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3163 if (!conn) {
3164 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3165 goto done;
3166 }
3167
3168
3169 /* Defer clearing up the connection parameters until closing to
3170 * give a chance of keeping them if a repairing happens.
3171 */
3172 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3173
3174 /* Disable auto-connection parameters if present */
3175 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3176 if (params) {
3177 if (params->explicit_connect)
3178 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3179 else
3180 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3181 }
3182
3183 /* If disconnection is not requested, then clear the connection
3184 * variable so that the link is not terminated.
3185 */
3186 if (!cp->disconnect)
3187 conn = NULL;
3188
3189 done:
3190 /* If the connection variable is set, then termination of the
3191 * link is requested.
3192 */
3193 if (!conn) {
3194 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3195 &rp, sizeof(rp));
3196 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3197 goto unlock;
3198 }
3199
3200 cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3201 sizeof(*cp));
3202 if (!cmd) {
3203 err = -ENOMEM;
3204 goto unlock;
3205 }
3206
3207 cmd->cmd_complete = addr_cmd_complete;
3208
3209 err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3210 unpair_device_complete);
3211 if (err < 0)
3212 mgmt_pending_free(cmd);
3213
3214 unlock:
3215 hci_dev_unlock(hdev);
3216 return err;
3217 }
3218
disconnect_complete(struct hci_dev * hdev,void * data,int err)3219 static void disconnect_complete(struct hci_dev *hdev, void *data, int err)
3220 {
3221 struct mgmt_pending_cmd *cmd = data;
3222
3223 cmd->cmd_complete(cmd, mgmt_status(err));
3224 mgmt_pending_free(cmd);
3225 }
3226
disconnect_sync(struct hci_dev * hdev,void * data)3227 static int disconnect_sync(struct hci_dev *hdev, void *data)
3228 {
3229 struct mgmt_pending_cmd *cmd = data;
3230 struct mgmt_cp_disconnect *cp = cmd->param;
3231 struct hci_conn *conn;
3232
3233 if (cp->addr.type == BDADDR_BREDR)
3234 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3235 &cp->addr.bdaddr);
3236 else
3237 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3238 le_addr_type(cp->addr.type));
3239
3240 if (!conn)
3241 return -ENOTCONN;
3242
3243 /* Disregard any possible error since the likes of hci_abort_conn_sync
3244 * will clean up the connection no matter the error.
3245 */
3246 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3247
3248 return 0;
3249 }
3250
disconnect(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3251 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3252 u16 len)
3253 {
3254 struct mgmt_cp_disconnect *cp = data;
3255 struct mgmt_rp_disconnect rp;
3256 struct mgmt_pending_cmd *cmd;
3257 int err;
3258
3259 bt_dev_dbg(hdev, "sock %p", sk);
3260
3261 memset(&rp, 0, sizeof(rp));
3262 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3263 rp.addr.type = cp->addr.type;
3264
3265 if (!bdaddr_type_is_valid(cp->addr.type))
3266 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3267 MGMT_STATUS_INVALID_PARAMS,
3268 &rp, sizeof(rp));
3269
3270 hci_dev_lock(hdev);
3271
3272 if (!test_bit(HCI_UP, &hdev->flags)) {
3273 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3274 MGMT_STATUS_NOT_POWERED, &rp,
3275 sizeof(rp));
3276 goto failed;
3277 }
3278
3279 cmd = mgmt_pending_new(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3280 if (!cmd) {
3281 err = -ENOMEM;
3282 goto failed;
3283 }
3284
3285 cmd->cmd_complete = generic_cmd_complete;
3286
3287 err = hci_cmd_sync_queue(hdev, disconnect_sync, cmd,
3288 disconnect_complete);
3289 if (err < 0)
3290 mgmt_pending_free(cmd);
3291
3292 failed:
3293 hci_dev_unlock(hdev);
3294 return err;
3295 }
3296
link_to_bdaddr(u8 link_type,u8 addr_type)3297 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3298 {
3299 switch (link_type) {
3300 case CIS_LINK:
3301 case BIS_LINK:
3302 case PA_LINK:
3303 case LE_LINK:
3304 switch (addr_type) {
3305 case ADDR_LE_DEV_PUBLIC:
3306 return BDADDR_LE_PUBLIC;
3307
3308 default:
3309 /* Fallback to LE Random address type */
3310 return BDADDR_LE_RANDOM;
3311 }
3312
3313 default:
3314 /* Fallback to BR/EDR type */
3315 return BDADDR_BREDR;
3316 }
3317 }
3318
get_connections(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)3319 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3320 u16 data_len)
3321 {
3322 struct mgmt_rp_get_connections *rp;
3323 struct hci_conn *c;
3324 int err;
3325 u16 i;
3326
3327 bt_dev_dbg(hdev, "sock %p", sk);
3328
3329 hci_dev_lock(hdev);
3330
3331 if (!hdev_is_powered(hdev)) {
3332 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3333 MGMT_STATUS_NOT_POWERED);
3334 goto unlock;
3335 }
3336
3337 i = 0;
3338 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3339 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3340 i++;
3341 }
3342
3343 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3344 if (!rp) {
3345 err = -ENOMEM;
3346 goto unlock;
3347 }
3348
3349 i = 0;
3350 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3351 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3352 continue;
3353 bacpy(&rp->addr[i].bdaddr, &c->dst);
3354 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3355 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3356 continue;
3357 i++;
3358 }
3359
3360 rp->conn_count = cpu_to_le16(i);
3361
3362 /* Recalculate length in case of filtered SCO connections, etc */
3363 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3364 struct_size(rp, addr, i));
3365
3366 kfree(rp);
3367
3368 unlock:
3369 hci_dev_unlock(hdev);
3370 return err;
3371 }
3372
send_pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_pin_code_neg_reply * cp)3373 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3374 struct mgmt_cp_pin_code_neg_reply *cp)
3375 {
3376 struct mgmt_pending_cmd *cmd;
3377 int err;
3378
3379 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3380 sizeof(*cp));
3381 if (!cmd)
3382 return -ENOMEM;
3383
3384 cmd->cmd_complete = addr_cmd_complete;
3385
3386 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3387 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3388 if (err < 0)
3389 mgmt_pending_remove(cmd);
3390
3391 return err;
3392 }
3393
pin_code_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3394 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3395 u16 len)
3396 {
3397 struct hci_conn *conn;
3398 struct mgmt_cp_pin_code_reply *cp = data;
3399 struct hci_cp_pin_code_reply reply;
3400 struct mgmt_pending_cmd *cmd;
3401 int err;
3402
3403 bt_dev_dbg(hdev, "sock %p", sk);
3404
3405 hci_dev_lock(hdev);
3406
3407 if (!hdev_is_powered(hdev)) {
3408 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3409 MGMT_STATUS_NOT_POWERED);
3410 goto failed;
3411 }
3412
3413 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3414 if (!conn) {
3415 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3416 MGMT_STATUS_NOT_CONNECTED);
3417 goto failed;
3418 }
3419
3420 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3421 struct mgmt_cp_pin_code_neg_reply ncp;
3422
3423 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3424
3425 bt_dev_err(hdev, "PIN code is not 16 bytes long");
3426
3427 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3428 if (err >= 0)
3429 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3430 MGMT_STATUS_INVALID_PARAMS);
3431
3432 goto failed;
3433 }
3434
3435 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3436 if (!cmd) {
3437 err = -ENOMEM;
3438 goto failed;
3439 }
3440
3441 cmd->cmd_complete = addr_cmd_complete;
3442
3443 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3444 reply.pin_len = cp->pin_len;
3445 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3446
3447 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3448 if (err < 0)
3449 mgmt_pending_remove(cmd);
3450
3451 failed:
3452 hci_dev_unlock(hdev);
3453 return err;
3454 }
3455
set_io_capability(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3456 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3457 u16 len)
3458 {
3459 struct mgmt_cp_set_io_capability *cp = data;
3460
3461 bt_dev_dbg(hdev, "sock %p", sk);
3462
3463 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3464 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3465 MGMT_STATUS_INVALID_PARAMS);
3466
3467 hci_dev_lock(hdev);
3468
3469 hdev->io_capability = cp->io_capability;
3470
3471 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3472
3473 hci_dev_unlock(hdev);
3474
3475 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3476 NULL, 0);
3477 }
3478
find_pairing(struct hci_conn * conn)3479 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3480 {
3481 struct hci_dev *hdev = conn->hdev;
3482 struct mgmt_pending_cmd *cmd;
3483
3484 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3485 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3486 continue;
3487
3488 if (cmd->user_data != conn)
3489 continue;
3490
3491 return cmd;
3492 }
3493
3494 return NULL;
3495 }
3496
pairing_complete(struct mgmt_pending_cmd * cmd,u8 status)3497 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3498 {
3499 struct mgmt_rp_pair_device rp;
3500 struct hci_conn *conn = cmd->user_data;
3501 int err;
3502
3503 bacpy(&rp.addr.bdaddr, &conn->dst);
3504 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3505
3506 err = mgmt_cmd_complete(cmd->sk, cmd->hdev->id, MGMT_OP_PAIR_DEVICE,
3507 status, &rp, sizeof(rp));
3508
3509 /* So we don't get further callbacks for this connection */
3510 conn->connect_cfm_cb = NULL;
3511 conn->security_cfm_cb = NULL;
3512 conn->disconn_cfm_cb = NULL;
3513
3514 hci_conn_drop(conn);
3515
3516 /* The device is paired so there is no need to remove
3517 * its connection parameters anymore.
3518 */
3519 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3520
3521 hci_conn_put(conn);
3522
3523 return err;
3524 }
3525
mgmt_smp_complete(struct hci_conn * conn,bool complete)3526 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3527 {
3528 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3529 struct mgmt_pending_cmd *cmd;
3530
3531 cmd = find_pairing(conn);
3532 if (cmd) {
3533 cmd->cmd_complete(cmd, status);
3534 mgmt_pending_remove(cmd);
3535 }
3536 }
3537
pairing_complete_cb(struct hci_conn * conn,u8 status)3538 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3539 {
3540 struct mgmt_pending_cmd *cmd;
3541
3542 BT_DBG("status %u", status);
3543
3544 cmd = find_pairing(conn);
3545 if (!cmd) {
3546 BT_DBG("Unable to find a pending command");
3547 return;
3548 }
3549
3550 cmd->cmd_complete(cmd, mgmt_status(status));
3551 mgmt_pending_remove(cmd);
3552 }
3553
le_pairing_complete_cb(struct hci_conn * conn,u8 status)3554 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3555 {
3556 struct mgmt_pending_cmd *cmd;
3557
3558 BT_DBG("status %u", status);
3559
3560 if (!status)
3561 return;
3562
3563 cmd = find_pairing(conn);
3564 if (!cmd) {
3565 BT_DBG("Unable to find a pending command");
3566 return;
3567 }
3568
3569 cmd->cmd_complete(cmd, mgmt_status(status));
3570 mgmt_pending_remove(cmd);
3571 }
3572
pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3573 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3574 u16 len)
3575 {
3576 struct mgmt_cp_pair_device *cp = data;
3577 struct mgmt_rp_pair_device rp;
3578 struct mgmt_pending_cmd *cmd;
3579 u8 sec_level, auth_type;
3580 struct hci_conn *conn;
3581 int err;
3582
3583 bt_dev_dbg(hdev, "sock %p", sk);
3584
3585 memset(&rp, 0, sizeof(rp));
3586 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3587 rp.addr.type = cp->addr.type;
3588
3589 if (!bdaddr_type_is_valid(cp->addr.type))
3590 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3591 MGMT_STATUS_INVALID_PARAMS,
3592 &rp, sizeof(rp));
3593
3594 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3595 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3596 MGMT_STATUS_INVALID_PARAMS,
3597 &rp, sizeof(rp));
3598
3599 hci_dev_lock(hdev);
3600
3601 if (!hdev_is_powered(hdev)) {
3602 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3603 MGMT_STATUS_NOT_POWERED, &rp,
3604 sizeof(rp));
3605 goto unlock;
3606 }
3607
3608 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3609 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3610 MGMT_STATUS_ALREADY_PAIRED, &rp,
3611 sizeof(rp));
3612 goto unlock;
3613 }
3614
3615 sec_level = BT_SECURITY_MEDIUM;
3616 auth_type = HCI_AT_DEDICATED_BONDING;
3617
3618 if (cp->addr.type == BDADDR_BREDR) {
3619 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3620 auth_type, CONN_REASON_PAIR_DEVICE,
3621 HCI_ACL_CONN_TIMEOUT);
3622 } else {
3623 u8 addr_type = le_addr_type(cp->addr.type);
3624 struct hci_conn_params *p;
3625
3626 /* When pairing a new device, it is expected to remember
3627 * this device for future connections. Adding the connection
3628 * parameter information ahead of time allows tracking
3629 * of the peripheral preferred values and will speed up any
3630 * further connection establishment.
3631 *
3632 * If connection parameters already exist, then they
3633 * will be kept and this function does nothing.
3634 */
3635 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3636 if (!p) {
3637 err = -EIO;
3638 goto unlock;
3639 }
3640
3641 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3642 p->auto_connect = HCI_AUTO_CONN_DISABLED;
3643
3644 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3645 sec_level, HCI_LE_CONN_TIMEOUT,
3646 CONN_REASON_PAIR_DEVICE);
3647 }
3648
3649 if (IS_ERR(conn)) {
3650 int status;
3651
3652 if (PTR_ERR(conn) == -EBUSY)
3653 status = MGMT_STATUS_BUSY;
3654 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3655 status = MGMT_STATUS_NOT_SUPPORTED;
3656 else if (PTR_ERR(conn) == -ECONNREFUSED)
3657 status = MGMT_STATUS_REJECTED;
3658 else
3659 status = MGMT_STATUS_CONNECT_FAILED;
3660
3661 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3662 status, &rp, sizeof(rp));
3663 goto unlock;
3664 }
3665
3666 if (conn->connect_cfm_cb) {
3667 hci_conn_drop(conn);
3668 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3669 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3670 goto unlock;
3671 }
3672
3673 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3674 if (!cmd) {
3675 err = -ENOMEM;
3676 hci_conn_drop(conn);
3677 goto unlock;
3678 }
3679
3680 cmd->cmd_complete = pairing_complete;
3681
3682 /* For LE, just connecting isn't a proof that the pairing finished */
3683 if (cp->addr.type == BDADDR_BREDR) {
3684 conn->connect_cfm_cb = pairing_complete_cb;
3685 conn->security_cfm_cb = pairing_complete_cb;
3686 conn->disconn_cfm_cb = pairing_complete_cb;
3687 } else {
3688 conn->connect_cfm_cb = le_pairing_complete_cb;
3689 conn->security_cfm_cb = le_pairing_complete_cb;
3690 conn->disconn_cfm_cb = le_pairing_complete_cb;
3691 }
3692
3693 conn->io_capability = cp->io_cap;
3694 cmd->user_data = hci_conn_get(conn);
3695
3696 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3697 hci_conn_security(conn, sec_level, auth_type, true)) {
3698 cmd->cmd_complete(cmd, 0);
3699 mgmt_pending_remove(cmd);
3700 }
3701
3702 err = 0;
3703
3704 unlock:
3705 hci_dev_unlock(hdev);
3706 return err;
3707 }
3708
cancel_pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3709 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3710 u16 len)
3711 {
3712 struct mgmt_addr_info *addr = data;
3713 struct mgmt_pending_cmd *cmd;
3714 struct hci_conn *conn;
3715 int err;
3716
3717 bt_dev_dbg(hdev, "sock %p", sk);
3718
3719 hci_dev_lock(hdev);
3720
3721 if (!hdev_is_powered(hdev)) {
3722 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3723 MGMT_STATUS_NOT_POWERED);
3724 goto unlock;
3725 }
3726
3727 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3728 if (!cmd) {
3729 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3730 MGMT_STATUS_INVALID_PARAMS);
3731 goto unlock;
3732 }
3733
3734 conn = cmd->user_data;
3735
3736 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3737 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3738 MGMT_STATUS_INVALID_PARAMS);
3739 goto unlock;
3740 }
3741
3742 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3743 mgmt_pending_remove(cmd);
3744
3745 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3746 addr, sizeof(*addr));
3747
3748 /* Since user doesn't want to proceed with the connection, abort any
3749 * ongoing pairing and then terminate the link if it was created
3750 * because of the pair device action.
3751 */
3752 if (addr->type == BDADDR_BREDR)
3753 hci_remove_link_key(hdev, &addr->bdaddr);
3754 else
3755 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3756 le_addr_type(addr->type));
3757
3758 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3759 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3760
3761 unlock:
3762 hci_dev_unlock(hdev);
3763 return err;
3764 }
3765
user_pairing_resp(struct sock * sk,struct hci_dev * hdev,struct mgmt_addr_info * addr,u16 mgmt_op,u16 hci_op,__le32 passkey)3766 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3767 struct mgmt_addr_info *addr, u16 mgmt_op,
3768 u16 hci_op, __le32 passkey)
3769 {
3770 struct mgmt_pending_cmd *cmd;
3771 struct hci_conn *conn;
3772 int err;
3773
3774 hci_dev_lock(hdev);
3775
3776 if (!hdev_is_powered(hdev)) {
3777 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3778 MGMT_STATUS_NOT_POWERED, addr,
3779 sizeof(*addr));
3780 goto done;
3781 }
3782
3783 if (addr->type == BDADDR_BREDR)
3784 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3785 else
3786 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3787 le_addr_type(addr->type));
3788
3789 if (!conn) {
3790 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3791 MGMT_STATUS_NOT_CONNECTED, addr,
3792 sizeof(*addr));
3793 goto done;
3794 }
3795
3796 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3797 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3798 if (!err)
3799 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3800 MGMT_STATUS_SUCCESS, addr,
3801 sizeof(*addr));
3802 else
3803 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3804 MGMT_STATUS_FAILED, addr,
3805 sizeof(*addr));
3806
3807 goto done;
3808 }
3809
3810 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3811 if (!cmd) {
3812 err = -ENOMEM;
3813 goto done;
3814 }
3815
3816 cmd->cmd_complete = addr_cmd_complete;
3817
3818 /* Continue with pairing via HCI */
3819 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3820 struct hci_cp_user_passkey_reply cp;
3821
3822 bacpy(&cp.bdaddr, &addr->bdaddr);
3823 cp.passkey = passkey;
3824 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3825 } else
3826 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3827 &addr->bdaddr);
3828
3829 if (err < 0)
3830 mgmt_pending_remove(cmd);
3831
3832 done:
3833 hci_dev_unlock(hdev);
3834 return err;
3835 }
3836
pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3837 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3838 void *data, u16 len)
3839 {
3840 struct mgmt_cp_pin_code_neg_reply *cp = data;
3841
3842 bt_dev_dbg(hdev, "sock %p", sk);
3843
3844 return user_pairing_resp(sk, hdev, &cp->addr,
3845 MGMT_OP_PIN_CODE_NEG_REPLY,
3846 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3847 }
3848
user_confirm_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3849 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3850 u16 len)
3851 {
3852 struct mgmt_cp_user_confirm_reply *cp = data;
3853
3854 bt_dev_dbg(hdev, "sock %p", sk);
3855
3856 if (len != sizeof(*cp))
3857 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3858 MGMT_STATUS_INVALID_PARAMS);
3859
3860 return user_pairing_resp(sk, hdev, &cp->addr,
3861 MGMT_OP_USER_CONFIRM_REPLY,
3862 HCI_OP_USER_CONFIRM_REPLY, 0);
3863 }
3864
user_confirm_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3865 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3866 void *data, u16 len)
3867 {
3868 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3869
3870 bt_dev_dbg(hdev, "sock %p", sk);
3871
3872 return user_pairing_resp(sk, hdev, &cp->addr,
3873 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3874 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3875 }
3876
user_passkey_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3877 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3878 u16 len)
3879 {
3880 struct mgmt_cp_user_passkey_reply *cp = data;
3881
3882 bt_dev_dbg(hdev, "sock %p", sk);
3883
3884 return user_pairing_resp(sk, hdev, &cp->addr,
3885 MGMT_OP_USER_PASSKEY_REPLY,
3886 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3887 }
3888
user_passkey_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3889 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3890 void *data, u16 len)
3891 {
3892 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3893
3894 bt_dev_dbg(hdev, "sock %p", sk);
3895
3896 return user_pairing_resp(sk, hdev, &cp->addr,
3897 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3898 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3899 }
3900
adv_expire_sync(struct hci_dev * hdev,u32 flags)3901 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3902 {
3903 struct adv_info *adv_instance;
3904
3905 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3906 if (!adv_instance)
3907 return 0;
3908
3909 /* stop if current instance doesn't need to be changed */
3910 if (!(adv_instance->flags & flags))
3911 return 0;
3912
3913 cancel_adv_timeout(hdev);
3914
3915 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3916 if (!adv_instance)
3917 return 0;
3918
3919 hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3920
3921 return 0;
3922 }
3923
name_changed_sync(struct hci_dev * hdev,void * data)3924 static int name_changed_sync(struct hci_dev *hdev, void *data)
3925 {
3926 return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3927 }
3928
set_name_complete(struct hci_dev * hdev,void * data,int err)3929 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3930 {
3931 struct mgmt_pending_cmd *cmd = data;
3932 struct mgmt_cp_set_local_name *cp;
3933 u8 status = mgmt_status(err);
3934
3935 bt_dev_dbg(hdev, "err %d", err);
3936
3937 if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
3938 return;
3939
3940 cp = cmd->param;
3941
3942 if (status) {
3943 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3944 status);
3945 } else {
3946 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3947 cp, sizeof(*cp));
3948
3949 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3950 hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3951 }
3952
3953 mgmt_pending_free(cmd);
3954 }
3955
set_name_sync(struct hci_dev * hdev,void * data)3956 static int set_name_sync(struct hci_dev *hdev, void *data)
3957 {
3958 struct mgmt_pending_cmd *cmd = data;
3959 struct mgmt_cp_set_local_name cp;
3960
3961 mutex_lock(&hdev->mgmt_pending_lock);
3962
3963 if (!__mgmt_pending_listed(hdev, cmd)) {
3964 mutex_unlock(&hdev->mgmt_pending_lock);
3965 return -ECANCELED;
3966 }
3967
3968 memcpy(&cp, cmd->param, sizeof(cp));
3969
3970 mutex_unlock(&hdev->mgmt_pending_lock);
3971
3972 if (lmp_bredr_capable(hdev)) {
3973 hci_update_name_sync(hdev, cp.name);
3974 hci_update_eir_sync(hdev);
3975 }
3976
3977 /* The name is stored in the scan response data and so
3978 * no need to update the advertising data here.
3979 */
3980 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3981 hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3982
3983 return 0;
3984 }
3985
set_local_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3986 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3987 u16 len)
3988 {
3989 struct mgmt_cp_set_local_name *cp = data;
3990 struct mgmt_pending_cmd *cmd;
3991 int err;
3992
3993 bt_dev_dbg(hdev, "sock %p", sk);
3994
3995 hci_dev_lock(hdev);
3996
3997 /* If the old values are the same as the new ones just return a
3998 * direct command complete event.
3999 */
4000 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
4001 !memcmp(hdev->short_name, cp->short_name,
4002 sizeof(hdev->short_name))) {
4003 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
4004 data, len);
4005 goto failed;
4006 }
4007
4008 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
4009
4010 if (!hdev_is_powered(hdev)) {
4011 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
4012
4013 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
4014 data, len);
4015 if (err < 0)
4016 goto failed;
4017
4018 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
4019 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
4020 ext_info_changed(hdev, sk);
4021
4022 goto failed;
4023 }
4024
4025 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
4026 if (!cmd)
4027 err = -ENOMEM;
4028 else
4029 err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
4030 set_name_complete);
4031
4032 if (err < 0) {
4033 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
4034 MGMT_STATUS_FAILED);
4035
4036 if (cmd)
4037 mgmt_pending_remove(cmd);
4038
4039 goto failed;
4040 }
4041
4042 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
4043
4044 failed:
4045 hci_dev_unlock(hdev);
4046 return err;
4047 }
4048
appearance_changed_sync(struct hci_dev * hdev,void * data)4049 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
4050 {
4051 return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
4052 }
4053
set_appearance(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4054 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
4055 u16 len)
4056 {
4057 struct mgmt_cp_set_appearance *cp = data;
4058 u16 appearance;
4059 int err;
4060
4061 bt_dev_dbg(hdev, "sock %p", sk);
4062
4063 if (!lmp_le_capable(hdev))
4064 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
4065 MGMT_STATUS_NOT_SUPPORTED);
4066
4067 appearance = le16_to_cpu(cp->appearance);
4068
4069 hci_dev_lock(hdev);
4070
4071 if (hdev->appearance != appearance) {
4072 hdev->appearance = appearance;
4073
4074 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
4075 hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
4076 NULL);
4077
4078 ext_info_changed(hdev, sk);
4079 }
4080
4081 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
4082 0);
4083
4084 hci_dev_unlock(hdev);
4085
4086 return err;
4087 }
4088
get_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4089 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4090 void *data, u16 len)
4091 {
4092 struct mgmt_rp_get_phy_configuration rp;
4093
4094 bt_dev_dbg(hdev, "sock %p", sk);
4095
4096 hci_dev_lock(hdev);
4097
4098 memset(&rp, 0, sizeof(rp));
4099
4100 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
4101 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
4102 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
4103
4104 hci_dev_unlock(hdev);
4105
4106 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
4107 &rp, sizeof(rp));
4108 }
4109
mgmt_phy_configuration_changed(struct hci_dev * hdev,struct sock * skip)4110 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
4111 {
4112 struct mgmt_ev_phy_configuration_changed ev;
4113
4114 memset(&ev, 0, sizeof(ev));
4115
4116 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
4117
4118 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
4119 sizeof(ev), skip);
4120 }
4121
set_default_phy_complete(struct hci_dev * hdev,void * data,int err)4122 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
4123 {
4124 struct mgmt_pending_cmd *cmd = data;
4125 struct sk_buff *skb;
4126 u8 status = mgmt_status(err);
4127
4128 skb = cmd->skb;
4129
4130 if (!status) {
4131 if (!skb)
4132 status = MGMT_STATUS_FAILED;
4133 else if (IS_ERR(skb))
4134 status = mgmt_status(PTR_ERR(skb));
4135 else
4136 status = mgmt_status(skb->data[0]);
4137 }
4138
4139 bt_dev_dbg(hdev, "status %d", status);
4140
4141 if (status) {
4142 mgmt_cmd_status(cmd->sk, hdev->id,
4143 MGMT_OP_SET_PHY_CONFIGURATION, status);
4144 } else {
4145 mgmt_cmd_complete(cmd->sk, hdev->id,
4146 MGMT_OP_SET_PHY_CONFIGURATION, 0,
4147 NULL, 0);
4148
4149 mgmt_phy_configuration_changed(hdev, cmd->sk);
4150 }
4151
4152 if (skb && !IS_ERR(skb))
4153 kfree_skb(skb);
4154
4155 mgmt_pending_free(cmd);
4156 }
4157
set_default_phy_sync(struct hci_dev * hdev,void * data)4158 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
4159 {
4160 struct mgmt_pending_cmd *cmd = data;
4161 struct mgmt_cp_set_phy_configuration *cp = cmd->param;
4162 struct hci_cp_le_set_default_phy cp_phy;
4163 u32 selected_phys;
4164
4165 selected_phys = __le32_to_cpu(cp->selected_phys);
4166
4167 memset(&cp_phy, 0, sizeof(cp_phy));
4168
4169 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
4170 cp_phy.all_phys |= 0x01;
4171
4172 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
4173 cp_phy.all_phys |= 0x02;
4174
4175 if (selected_phys & MGMT_PHY_LE_1M_TX)
4176 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
4177
4178 if (selected_phys & MGMT_PHY_LE_2M_TX)
4179 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
4180
4181 if (selected_phys & MGMT_PHY_LE_CODED_TX)
4182 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
4183
4184 if (selected_phys & MGMT_PHY_LE_1M_RX)
4185 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
4186
4187 if (selected_phys & MGMT_PHY_LE_2M_RX)
4188 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
4189
4190 if (selected_phys & MGMT_PHY_LE_CODED_RX)
4191 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
4192
4193 cmd->skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4194 sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
4195
4196 return 0;
4197 }
4198
set_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4199 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4200 void *data, u16 len)
4201 {
4202 struct mgmt_cp_set_phy_configuration *cp = data;
4203 struct mgmt_pending_cmd *cmd;
4204 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4205 u16 pkt_type = (HCI_DH1 | HCI_DM1);
4206 bool changed = false;
4207 int err;
4208
4209 bt_dev_dbg(hdev, "sock %p", sk);
4210
4211 configurable_phys = get_configurable_phys(hdev);
4212 supported_phys = get_supported_phys(hdev);
4213 selected_phys = __le32_to_cpu(cp->selected_phys);
4214
4215 if (selected_phys & ~supported_phys)
4216 return mgmt_cmd_status(sk, hdev->id,
4217 MGMT_OP_SET_PHY_CONFIGURATION,
4218 MGMT_STATUS_INVALID_PARAMS);
4219
4220 unconfigure_phys = supported_phys & ~configurable_phys;
4221
4222 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4223 return mgmt_cmd_status(sk, hdev->id,
4224 MGMT_OP_SET_PHY_CONFIGURATION,
4225 MGMT_STATUS_INVALID_PARAMS);
4226
4227 if (selected_phys == get_selected_phys(hdev))
4228 return mgmt_cmd_complete(sk, hdev->id,
4229 MGMT_OP_SET_PHY_CONFIGURATION,
4230 0, NULL, 0);
4231
4232 hci_dev_lock(hdev);
4233
4234 if (!hdev_is_powered(hdev)) {
4235 err = mgmt_cmd_status(sk, hdev->id,
4236 MGMT_OP_SET_PHY_CONFIGURATION,
4237 MGMT_STATUS_REJECTED);
4238 goto unlock;
4239 }
4240
4241 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4242 err = mgmt_cmd_status(sk, hdev->id,
4243 MGMT_OP_SET_PHY_CONFIGURATION,
4244 MGMT_STATUS_BUSY);
4245 goto unlock;
4246 }
4247
4248 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4249 pkt_type |= (HCI_DH3 | HCI_DM3);
4250 else
4251 pkt_type &= ~(HCI_DH3 | HCI_DM3);
4252
4253 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4254 pkt_type |= (HCI_DH5 | HCI_DM5);
4255 else
4256 pkt_type &= ~(HCI_DH5 | HCI_DM5);
4257
4258 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4259 pkt_type &= ~HCI_2DH1;
4260 else
4261 pkt_type |= HCI_2DH1;
4262
4263 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4264 pkt_type &= ~HCI_2DH3;
4265 else
4266 pkt_type |= HCI_2DH3;
4267
4268 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4269 pkt_type &= ~HCI_2DH5;
4270 else
4271 pkt_type |= HCI_2DH5;
4272
4273 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4274 pkt_type &= ~HCI_3DH1;
4275 else
4276 pkt_type |= HCI_3DH1;
4277
4278 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4279 pkt_type &= ~HCI_3DH3;
4280 else
4281 pkt_type |= HCI_3DH3;
4282
4283 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4284 pkt_type &= ~HCI_3DH5;
4285 else
4286 pkt_type |= HCI_3DH5;
4287
4288 if (pkt_type != hdev->pkt_type) {
4289 hdev->pkt_type = pkt_type;
4290 changed = true;
4291 }
4292
4293 if ((selected_phys & MGMT_PHY_LE_MASK) ==
4294 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4295 if (changed)
4296 mgmt_phy_configuration_changed(hdev, sk);
4297
4298 err = mgmt_cmd_complete(sk, hdev->id,
4299 MGMT_OP_SET_PHY_CONFIGURATION,
4300 0, NULL, 0);
4301
4302 goto unlock;
4303 }
4304
4305 cmd = mgmt_pending_new(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4306 len);
4307 if (!cmd)
4308 err = -ENOMEM;
4309 else
4310 err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4311 set_default_phy_complete);
4312
4313 if (err < 0) {
4314 err = mgmt_cmd_status(sk, hdev->id,
4315 MGMT_OP_SET_PHY_CONFIGURATION,
4316 MGMT_STATUS_FAILED);
4317
4318 if (cmd)
4319 mgmt_pending_remove(cmd);
4320 }
4321
4322 unlock:
4323 hci_dev_unlock(hdev);
4324
4325 return err;
4326 }
4327
set_blocked_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4328 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4329 u16 len)
4330 {
4331 int err = MGMT_STATUS_SUCCESS;
4332 struct mgmt_cp_set_blocked_keys *keys = data;
4333 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4334 sizeof(struct mgmt_blocked_key_info));
4335 u16 key_count, expected_len;
4336 int i;
4337
4338 bt_dev_dbg(hdev, "sock %p", sk);
4339
4340 key_count = __le16_to_cpu(keys->key_count);
4341 if (key_count > max_key_count) {
4342 bt_dev_err(hdev, "too big key_count value %u", key_count);
4343 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4344 MGMT_STATUS_INVALID_PARAMS);
4345 }
4346
4347 expected_len = struct_size(keys, keys, key_count);
4348 if (expected_len != len) {
4349 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4350 expected_len, len);
4351 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4352 MGMT_STATUS_INVALID_PARAMS);
4353 }
4354
4355 hci_dev_lock(hdev);
4356
4357 hci_blocked_keys_clear(hdev);
4358
4359 for (i = 0; i < key_count; ++i) {
4360 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4361
4362 if (!b) {
4363 err = MGMT_STATUS_NO_RESOURCES;
4364 break;
4365 }
4366
4367 b->type = keys->keys[i].type;
4368 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4369 list_add_rcu(&b->list, &hdev->blocked_keys);
4370 }
4371 hci_dev_unlock(hdev);
4372
4373 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4374 err, NULL, 0);
4375 }
4376
set_wideband_speech(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4377 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4378 void *data, u16 len)
4379 {
4380 struct mgmt_mode *cp = data;
4381 int err;
4382 bool changed = false;
4383
4384 bt_dev_dbg(hdev, "sock %p", sk);
4385
4386 if (!hci_test_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED))
4387 return mgmt_cmd_status(sk, hdev->id,
4388 MGMT_OP_SET_WIDEBAND_SPEECH,
4389 MGMT_STATUS_NOT_SUPPORTED);
4390
4391 if (cp->val != 0x00 && cp->val != 0x01)
4392 return mgmt_cmd_status(sk, hdev->id,
4393 MGMT_OP_SET_WIDEBAND_SPEECH,
4394 MGMT_STATUS_INVALID_PARAMS);
4395
4396 hci_dev_lock(hdev);
4397
4398 if (hdev_is_powered(hdev) &&
4399 !!cp->val != hci_dev_test_flag(hdev,
4400 HCI_WIDEBAND_SPEECH_ENABLED)) {
4401 err = mgmt_cmd_status(sk, hdev->id,
4402 MGMT_OP_SET_WIDEBAND_SPEECH,
4403 MGMT_STATUS_REJECTED);
4404 goto unlock;
4405 }
4406
4407 if (cp->val)
4408 changed = !hci_dev_test_and_set_flag(hdev,
4409 HCI_WIDEBAND_SPEECH_ENABLED);
4410 else
4411 changed = hci_dev_test_and_clear_flag(hdev,
4412 HCI_WIDEBAND_SPEECH_ENABLED);
4413
4414 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4415 if (err < 0)
4416 goto unlock;
4417
4418 if (changed)
4419 err = new_settings(hdev, sk);
4420
4421 unlock:
4422 hci_dev_unlock(hdev);
4423 return err;
4424 }
4425
read_controller_cap(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4426 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4427 void *data, u16 data_len)
4428 {
4429 char buf[20];
4430 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4431 u16 cap_len = 0;
4432 u8 flags = 0;
4433 u8 tx_power_range[2];
4434
4435 bt_dev_dbg(hdev, "sock %p", sk);
4436
4437 memset(&buf, 0, sizeof(buf));
4438
4439 hci_dev_lock(hdev);
4440
4441 /* When the Read Simple Pairing Options command is supported, then
4442 * the remote public key validation is supported.
4443 *
4444 * Alternatively, when Microsoft extensions are available, they can
4445 * indicate support for public key validation as well.
4446 */
4447 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4448 flags |= 0x01; /* Remote public key validation (BR/EDR) */
4449
4450 flags |= 0x02; /* Remote public key validation (LE) */
4451
4452 /* When the Read Encryption Key Size command is supported, then the
4453 * encryption key size is enforced.
4454 */
4455 if (hdev->commands[20] & 0x10)
4456 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
4457
4458 flags |= 0x08; /* Encryption key size enforcement (LE) */
4459
4460 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4461 &flags, 1);
4462
4463 /* When the Read Simple Pairing Options command is supported, then
4464 * also max encryption key size information is provided.
4465 */
4466 if (hdev->commands[41] & 0x08)
4467 cap_len = eir_append_le16(rp->cap, cap_len,
4468 MGMT_CAP_MAX_ENC_KEY_SIZE,
4469 hdev->max_enc_key_size);
4470
4471 cap_len = eir_append_le16(rp->cap, cap_len,
4472 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4473 SMP_MAX_ENC_KEY_SIZE);
4474
4475 /* Append the min/max LE tx power parameters if we were able to fetch
4476 * it from the controller
4477 */
4478 if (hdev->commands[38] & 0x80) {
4479 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4480 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4481 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4482 tx_power_range, 2);
4483 }
4484
4485 rp->cap_len = cpu_to_le16(cap_len);
4486
4487 hci_dev_unlock(hdev);
4488
4489 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4490 rp, sizeof(*rp) + cap_len);
4491 }
4492
4493 #ifdef CONFIG_BT_FEATURE_DEBUG
4494 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4495 static const u8 debug_uuid[16] = {
4496 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4497 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4498 };
4499 #endif
4500
4501 /* 330859bc-7506-492d-9370-9a6f0614037f */
4502 static const u8 quality_report_uuid[16] = {
4503 0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4504 0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4505 };
4506
4507 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4508 static const u8 offload_codecs_uuid[16] = {
4509 0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4510 0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4511 };
4512
4513 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4514 static const u8 le_simultaneous_roles_uuid[16] = {
4515 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4516 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4517 };
4518
4519 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4520 static const u8 iso_socket_uuid[16] = {
4521 0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4522 0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4523 };
4524
4525 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4526 static const u8 mgmt_mesh_uuid[16] = {
4527 0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4528 0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4529 };
4530
read_exp_features_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4531 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4532 void *data, u16 data_len)
4533 {
4534 struct mgmt_rp_read_exp_features_info *rp;
4535 size_t len;
4536 u16 idx = 0;
4537 u32 flags;
4538 int status;
4539
4540 bt_dev_dbg(hdev, "sock %p", sk);
4541
4542 /* Enough space for 7 features */
4543 len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4544 rp = kzalloc(len, GFP_KERNEL);
4545 if (!rp)
4546 return -ENOMEM;
4547
4548 #ifdef CONFIG_BT_FEATURE_DEBUG
4549 flags = bt_dbg_get() ? BIT(0) : 0;
4550
4551 memcpy(rp->features[idx].uuid, debug_uuid, 16);
4552 rp->features[idx].flags = cpu_to_le32(flags);
4553 idx++;
4554 #endif
4555
4556 if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4557 if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4558 flags = BIT(0);
4559 else
4560 flags = 0;
4561
4562 memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4563 rp->features[idx].flags = cpu_to_le32(flags);
4564 idx++;
4565 }
4566
4567 if (hdev && (aosp_has_quality_report(hdev) ||
4568 hdev->set_quality_report)) {
4569 if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4570 flags = BIT(0);
4571 else
4572 flags = 0;
4573
4574 memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4575 rp->features[idx].flags = cpu_to_le32(flags);
4576 idx++;
4577 }
4578
4579 if (hdev && hdev->get_data_path_id) {
4580 if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4581 flags = BIT(0);
4582 else
4583 flags = 0;
4584
4585 memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4586 rp->features[idx].flags = cpu_to_le32(flags);
4587 idx++;
4588 }
4589
4590 if (IS_ENABLED(CONFIG_BT_LE)) {
4591 flags = iso_inited() ? BIT(0) : 0;
4592 memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4593 rp->features[idx].flags = cpu_to_le32(flags);
4594 idx++;
4595 }
4596
4597 if (hdev && lmp_le_capable(hdev)) {
4598 if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4599 flags = BIT(0);
4600 else
4601 flags = 0;
4602
4603 memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4604 rp->features[idx].flags = cpu_to_le32(flags);
4605 idx++;
4606 }
4607
4608 rp->feature_count = cpu_to_le16(idx);
4609
4610 /* After reading the experimental features information, enable
4611 * the events to update client on any future change.
4612 */
4613 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4614
4615 status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4616 MGMT_OP_READ_EXP_FEATURES_INFO,
4617 0, rp, sizeof(*rp) + (20 * idx));
4618
4619 kfree(rp);
4620 return status;
4621 }
4622
exp_feature_changed(struct hci_dev * hdev,const u8 * uuid,bool enabled,struct sock * skip)4623 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4624 bool enabled, struct sock *skip)
4625 {
4626 struct mgmt_ev_exp_feature_changed ev;
4627
4628 memset(&ev, 0, sizeof(ev));
4629 memcpy(ev.uuid, uuid, 16);
4630 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4631
4632 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4633 &ev, sizeof(ev),
4634 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4635 }
4636
4637 #define EXP_FEAT(_uuid, _set_func) \
4638 { \
4639 .uuid = _uuid, \
4640 .set_func = _set_func, \
4641 }
4642
4643 /* The zero key uuid is special. Multiple exp features are set through it. */
set_zero_key_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4644 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4645 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4646 {
4647 struct mgmt_rp_set_exp_feature rp;
4648
4649 memset(rp.uuid, 0, 16);
4650 rp.flags = cpu_to_le32(0);
4651
4652 #ifdef CONFIG_BT_FEATURE_DEBUG
4653 if (!hdev) {
4654 bool changed = bt_dbg_get();
4655
4656 bt_dbg_set(false);
4657
4658 if (changed)
4659 exp_feature_changed(NULL, ZERO_KEY, false, sk);
4660 }
4661 #endif
4662
4663 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4664
4665 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4666 MGMT_OP_SET_EXP_FEATURE, 0,
4667 &rp, sizeof(rp));
4668 }
4669
4670 #ifdef CONFIG_BT_FEATURE_DEBUG
set_debug_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4671 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4672 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4673 {
4674 struct mgmt_rp_set_exp_feature rp;
4675
4676 bool val, changed;
4677 int err;
4678
4679 /* Command requires to use the non-controller index */
4680 if (hdev)
4681 return mgmt_cmd_status(sk, hdev->id,
4682 MGMT_OP_SET_EXP_FEATURE,
4683 MGMT_STATUS_INVALID_INDEX);
4684
4685 /* Parameters are limited to a single octet */
4686 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4687 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4688 MGMT_OP_SET_EXP_FEATURE,
4689 MGMT_STATUS_INVALID_PARAMS);
4690
4691 /* Only boolean on/off is supported */
4692 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4693 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4694 MGMT_OP_SET_EXP_FEATURE,
4695 MGMT_STATUS_INVALID_PARAMS);
4696
4697 val = !!cp->param[0];
4698 changed = val ? !bt_dbg_get() : bt_dbg_get();
4699 bt_dbg_set(val);
4700
4701 memcpy(rp.uuid, debug_uuid, 16);
4702 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4703
4704 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4705
4706 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4707 MGMT_OP_SET_EXP_FEATURE, 0,
4708 &rp, sizeof(rp));
4709
4710 if (changed)
4711 exp_feature_changed(hdev, debug_uuid, val, sk);
4712
4713 return err;
4714 }
4715 #endif
4716
set_mgmt_mesh_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4717 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4718 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4719 {
4720 struct mgmt_rp_set_exp_feature rp;
4721 bool val, changed;
4722 int err;
4723
4724 /* Command requires to use the controller index */
4725 if (!hdev)
4726 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4727 MGMT_OP_SET_EXP_FEATURE,
4728 MGMT_STATUS_INVALID_INDEX);
4729
4730 /* Parameters are limited to a single octet */
4731 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4732 return mgmt_cmd_status(sk, hdev->id,
4733 MGMT_OP_SET_EXP_FEATURE,
4734 MGMT_STATUS_INVALID_PARAMS);
4735
4736 /* Only boolean on/off is supported */
4737 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4738 return mgmt_cmd_status(sk, hdev->id,
4739 MGMT_OP_SET_EXP_FEATURE,
4740 MGMT_STATUS_INVALID_PARAMS);
4741
4742 val = !!cp->param[0];
4743
4744 if (val) {
4745 changed = !hci_dev_test_and_set_flag(hdev,
4746 HCI_MESH_EXPERIMENTAL);
4747 } else {
4748 hci_dev_clear_flag(hdev, HCI_MESH);
4749 changed = hci_dev_test_and_clear_flag(hdev,
4750 HCI_MESH_EXPERIMENTAL);
4751 }
4752
4753 memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4754 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4755
4756 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4757
4758 err = mgmt_cmd_complete(sk, hdev->id,
4759 MGMT_OP_SET_EXP_FEATURE, 0,
4760 &rp, sizeof(rp));
4761
4762 if (changed)
4763 exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4764
4765 return err;
4766 }
4767
set_quality_report_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4768 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4769 struct mgmt_cp_set_exp_feature *cp,
4770 u16 data_len)
4771 {
4772 struct mgmt_rp_set_exp_feature rp;
4773 bool val, changed;
4774 int err;
4775
4776 /* Command requires to use a valid controller index */
4777 if (!hdev)
4778 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4779 MGMT_OP_SET_EXP_FEATURE,
4780 MGMT_STATUS_INVALID_INDEX);
4781
4782 /* Parameters are limited to a single octet */
4783 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4784 return mgmt_cmd_status(sk, hdev->id,
4785 MGMT_OP_SET_EXP_FEATURE,
4786 MGMT_STATUS_INVALID_PARAMS);
4787
4788 /* Only boolean on/off is supported */
4789 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4790 return mgmt_cmd_status(sk, hdev->id,
4791 MGMT_OP_SET_EXP_FEATURE,
4792 MGMT_STATUS_INVALID_PARAMS);
4793
4794 hci_req_sync_lock(hdev);
4795
4796 val = !!cp->param[0];
4797 changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4798
4799 if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4800 err = mgmt_cmd_status(sk, hdev->id,
4801 MGMT_OP_SET_EXP_FEATURE,
4802 MGMT_STATUS_NOT_SUPPORTED);
4803 goto unlock_quality_report;
4804 }
4805
4806 if (changed) {
4807 if (hdev->set_quality_report)
4808 err = hdev->set_quality_report(hdev, val);
4809 else
4810 err = aosp_set_quality_report(hdev, val);
4811
4812 if (err) {
4813 err = mgmt_cmd_status(sk, hdev->id,
4814 MGMT_OP_SET_EXP_FEATURE,
4815 MGMT_STATUS_FAILED);
4816 goto unlock_quality_report;
4817 }
4818
4819 if (val)
4820 hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4821 else
4822 hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4823 }
4824
4825 bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4826
4827 memcpy(rp.uuid, quality_report_uuid, 16);
4828 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4829 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4830
4831 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4832 &rp, sizeof(rp));
4833
4834 if (changed)
4835 exp_feature_changed(hdev, quality_report_uuid, val, sk);
4836
4837 unlock_quality_report:
4838 hci_req_sync_unlock(hdev);
4839 return err;
4840 }
4841
set_offload_codec_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4842 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4843 struct mgmt_cp_set_exp_feature *cp,
4844 u16 data_len)
4845 {
4846 bool val, changed;
4847 int err;
4848 struct mgmt_rp_set_exp_feature rp;
4849
4850 /* Command requires to use a valid controller index */
4851 if (!hdev)
4852 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4853 MGMT_OP_SET_EXP_FEATURE,
4854 MGMT_STATUS_INVALID_INDEX);
4855
4856 /* Parameters are limited to a single octet */
4857 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4858 return mgmt_cmd_status(sk, hdev->id,
4859 MGMT_OP_SET_EXP_FEATURE,
4860 MGMT_STATUS_INVALID_PARAMS);
4861
4862 /* Only boolean on/off is supported */
4863 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4864 return mgmt_cmd_status(sk, hdev->id,
4865 MGMT_OP_SET_EXP_FEATURE,
4866 MGMT_STATUS_INVALID_PARAMS);
4867
4868 val = !!cp->param[0];
4869 changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4870
4871 if (!hdev->get_data_path_id) {
4872 return mgmt_cmd_status(sk, hdev->id,
4873 MGMT_OP_SET_EXP_FEATURE,
4874 MGMT_STATUS_NOT_SUPPORTED);
4875 }
4876
4877 if (changed) {
4878 if (val)
4879 hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4880 else
4881 hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4882 }
4883
4884 bt_dev_info(hdev, "offload codecs enable %d changed %d",
4885 val, changed);
4886
4887 memcpy(rp.uuid, offload_codecs_uuid, 16);
4888 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4889 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4890 err = mgmt_cmd_complete(sk, hdev->id,
4891 MGMT_OP_SET_EXP_FEATURE, 0,
4892 &rp, sizeof(rp));
4893
4894 if (changed)
4895 exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4896
4897 return err;
4898 }
4899
set_le_simultaneous_roles_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4900 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4901 struct mgmt_cp_set_exp_feature *cp,
4902 u16 data_len)
4903 {
4904 bool val, changed;
4905 int err;
4906 struct mgmt_rp_set_exp_feature rp;
4907
4908 /* Command requires to use a valid controller index */
4909 if (!hdev)
4910 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4911 MGMT_OP_SET_EXP_FEATURE,
4912 MGMT_STATUS_INVALID_INDEX);
4913
4914 /* Parameters are limited to a single octet */
4915 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4916 return mgmt_cmd_status(sk, hdev->id,
4917 MGMT_OP_SET_EXP_FEATURE,
4918 MGMT_STATUS_INVALID_PARAMS);
4919
4920 /* Only boolean on/off is supported */
4921 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4922 return mgmt_cmd_status(sk, hdev->id,
4923 MGMT_OP_SET_EXP_FEATURE,
4924 MGMT_STATUS_INVALID_PARAMS);
4925
4926 val = !!cp->param[0];
4927 changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4928
4929 if (!hci_dev_le_state_simultaneous(hdev)) {
4930 return mgmt_cmd_status(sk, hdev->id,
4931 MGMT_OP_SET_EXP_FEATURE,
4932 MGMT_STATUS_NOT_SUPPORTED);
4933 }
4934
4935 if (changed) {
4936 if (val)
4937 hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4938 else
4939 hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4940 }
4941
4942 bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4943 val, changed);
4944
4945 memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4946 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4947 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4948 err = mgmt_cmd_complete(sk, hdev->id,
4949 MGMT_OP_SET_EXP_FEATURE, 0,
4950 &rp, sizeof(rp));
4951
4952 if (changed)
4953 exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4954
4955 return err;
4956 }
4957
4958 #ifdef CONFIG_BT_LE
set_iso_socket_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4959 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4960 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4961 {
4962 struct mgmt_rp_set_exp_feature rp;
4963 bool val, changed = false;
4964 int err;
4965
4966 /* Command requires to use the non-controller index */
4967 if (hdev)
4968 return mgmt_cmd_status(sk, hdev->id,
4969 MGMT_OP_SET_EXP_FEATURE,
4970 MGMT_STATUS_INVALID_INDEX);
4971
4972 /* Parameters are limited to a single octet */
4973 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4974 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4975 MGMT_OP_SET_EXP_FEATURE,
4976 MGMT_STATUS_INVALID_PARAMS);
4977
4978 /* Only boolean on/off is supported */
4979 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4980 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4981 MGMT_OP_SET_EXP_FEATURE,
4982 MGMT_STATUS_INVALID_PARAMS);
4983
4984 val = cp->param[0] ? true : false;
4985 if (val)
4986 err = iso_init();
4987 else
4988 err = iso_exit();
4989
4990 if (!err)
4991 changed = true;
4992
4993 memcpy(rp.uuid, iso_socket_uuid, 16);
4994 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4995
4996 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4997
4998 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4999 MGMT_OP_SET_EXP_FEATURE, 0,
5000 &rp, sizeof(rp));
5001
5002 if (changed)
5003 exp_feature_changed(hdev, iso_socket_uuid, val, sk);
5004
5005 return err;
5006 }
5007 #endif
5008
5009 static const struct mgmt_exp_feature {
5010 const u8 *uuid;
5011 int (*set_func)(struct sock *sk, struct hci_dev *hdev,
5012 struct mgmt_cp_set_exp_feature *cp, u16 data_len);
5013 } exp_features[] = {
5014 EXP_FEAT(ZERO_KEY, set_zero_key_func),
5015 #ifdef CONFIG_BT_FEATURE_DEBUG
5016 EXP_FEAT(debug_uuid, set_debug_func),
5017 #endif
5018 EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
5019 EXP_FEAT(quality_report_uuid, set_quality_report_func),
5020 EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
5021 EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
5022 #ifdef CONFIG_BT_LE
5023 EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
5024 #endif
5025
5026 /* end with a null feature */
5027 EXP_FEAT(NULL, NULL)
5028 };
5029
set_exp_feature(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)5030 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
5031 void *data, u16 data_len)
5032 {
5033 struct mgmt_cp_set_exp_feature *cp = data;
5034 size_t i = 0;
5035
5036 bt_dev_dbg(hdev, "sock %p", sk);
5037
5038 for (i = 0; exp_features[i].uuid; i++) {
5039 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
5040 return exp_features[i].set_func(sk, hdev, cp, data_len);
5041 }
5042
5043 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
5044 MGMT_OP_SET_EXP_FEATURE,
5045 MGMT_STATUS_NOT_SUPPORTED);
5046 }
5047
get_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)5048 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5049 u16 data_len)
5050 {
5051 struct mgmt_cp_get_device_flags *cp = data;
5052 struct mgmt_rp_get_device_flags rp;
5053 struct bdaddr_list_with_flags *br_params;
5054 struct hci_conn_params *params;
5055 u32 supported_flags;
5056 u32 current_flags = 0;
5057 u8 status = MGMT_STATUS_INVALID_PARAMS;
5058
5059 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
5060 &cp->addr.bdaddr, cp->addr.type);
5061
5062 hci_dev_lock(hdev);
5063
5064 supported_flags = hdev->conn_flags;
5065
5066 memset(&rp, 0, sizeof(rp));
5067
5068 if (cp->addr.type == BDADDR_BREDR) {
5069 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5070 &cp->addr.bdaddr,
5071 cp->addr.type);
5072 if (!br_params)
5073 goto done;
5074
5075 current_flags = br_params->flags;
5076 } else {
5077 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5078 le_addr_type(cp->addr.type));
5079 if (!params)
5080 goto done;
5081
5082 current_flags = params->flags;
5083 }
5084
5085 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5086 rp.addr.type = cp->addr.type;
5087 rp.supported_flags = cpu_to_le32(supported_flags);
5088 rp.current_flags = cpu_to_le32(current_flags);
5089
5090 status = MGMT_STATUS_SUCCESS;
5091
5092 done:
5093 hci_dev_unlock(hdev);
5094
5095 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5096 &rp, sizeof(rp));
5097 }
5098
device_flags_changed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u32 supported_flags,u32 current_flags)5099 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5100 bdaddr_t *bdaddr, u8 bdaddr_type,
5101 u32 supported_flags, u32 current_flags)
5102 {
5103 struct mgmt_ev_device_flags_changed ev;
5104
5105 bacpy(&ev.addr.bdaddr, bdaddr);
5106 ev.addr.type = bdaddr_type;
5107 ev.supported_flags = cpu_to_le32(supported_flags);
5108 ev.current_flags = cpu_to_le32(current_flags);
5109
5110 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5111 }
5112
set_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5113 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5114 u16 len)
5115 {
5116 struct mgmt_cp_set_device_flags *cp = data;
5117 struct bdaddr_list_with_flags *br_params;
5118 struct hci_conn_params *params;
5119 u8 status = MGMT_STATUS_INVALID_PARAMS;
5120 u32 supported_flags;
5121 u32 current_flags = __le32_to_cpu(cp->current_flags);
5122
5123 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5124 &cp->addr.bdaddr, cp->addr.type, current_flags);
5125
5126 // We should take hci_dev_lock() early, I think.. conn_flags can change
5127 supported_flags = hdev->conn_flags;
5128
5129 if ((supported_flags | current_flags) != supported_flags) {
5130 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5131 current_flags, supported_flags);
5132 goto done;
5133 }
5134
5135 hci_dev_lock(hdev);
5136
5137 if (cp->addr.type == BDADDR_BREDR) {
5138 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5139 &cp->addr.bdaddr,
5140 cp->addr.type);
5141
5142 if (br_params) {
5143 br_params->flags = current_flags;
5144 status = MGMT_STATUS_SUCCESS;
5145 } else {
5146 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5147 &cp->addr.bdaddr, cp->addr.type);
5148 }
5149
5150 goto unlock;
5151 }
5152
5153 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5154 le_addr_type(cp->addr.type));
5155 if (!params) {
5156 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5157 &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5158 goto unlock;
5159 }
5160
5161 supported_flags = hdev->conn_flags;
5162
5163 if ((supported_flags | current_flags) != supported_flags) {
5164 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5165 current_flags, supported_flags);
5166 goto unlock;
5167 }
5168
5169 WRITE_ONCE(params->flags, current_flags);
5170 status = MGMT_STATUS_SUCCESS;
5171
5172 /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5173 * has been set.
5174 */
5175 if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5176 hci_update_passive_scan(hdev);
5177
5178 unlock:
5179 hci_dev_unlock(hdev);
5180
5181 done:
5182 if (status == MGMT_STATUS_SUCCESS)
5183 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5184 supported_flags, current_flags);
5185
5186 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5187 &cp->addr, sizeof(cp->addr));
5188 }
5189
mgmt_adv_monitor_added(struct sock * sk,struct hci_dev * hdev,u16 handle)5190 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5191 u16 handle)
5192 {
5193 struct mgmt_ev_adv_monitor_added ev;
5194
5195 ev.monitor_handle = cpu_to_le16(handle);
5196
5197 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5198 }
5199
mgmt_adv_monitor_removed(struct sock * sk,struct hci_dev * hdev,__le16 handle)5200 static void mgmt_adv_monitor_removed(struct sock *sk, struct hci_dev *hdev,
5201 __le16 handle)
5202 {
5203 struct mgmt_ev_adv_monitor_removed ev;
5204
5205 ev.monitor_handle = handle;
5206
5207 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk);
5208 }
5209
read_adv_mon_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5210 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5211 void *data, u16 len)
5212 {
5213 struct adv_monitor *monitor = NULL;
5214 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5215 int handle, err;
5216 size_t rp_size = 0;
5217 __u32 supported = 0;
5218 __u32 enabled = 0;
5219 __u16 num_handles = 0;
5220 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5221
5222 BT_DBG("request for %s", hdev->name);
5223
5224 hci_dev_lock(hdev);
5225
5226 if (msft_monitor_supported(hdev))
5227 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5228
5229 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5230 handles[num_handles++] = monitor->handle;
5231
5232 hci_dev_unlock(hdev);
5233
5234 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5235 rp = kmalloc(rp_size, GFP_KERNEL);
5236 if (!rp)
5237 return -ENOMEM;
5238
5239 /* All supported features are currently enabled */
5240 enabled = supported;
5241
5242 rp->supported_features = cpu_to_le32(supported);
5243 rp->enabled_features = cpu_to_le32(enabled);
5244 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5245 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5246 rp->num_handles = cpu_to_le16(num_handles);
5247 if (num_handles)
5248 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5249
5250 err = mgmt_cmd_complete(sk, hdev->id,
5251 MGMT_OP_READ_ADV_MONITOR_FEATURES,
5252 MGMT_STATUS_SUCCESS, rp, rp_size);
5253
5254 kfree(rp);
5255
5256 return err;
5257 }
5258
mgmt_add_adv_patterns_monitor_complete(struct hci_dev * hdev,void * data,int status)5259 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5260 void *data, int status)
5261 {
5262 struct mgmt_rp_add_adv_patterns_monitor rp;
5263 struct mgmt_pending_cmd *cmd = data;
5264 struct adv_monitor *monitor;
5265
5266 /* This is likely the result of hdev being closed and mgmt_index_removed
5267 * is attempting to clean up any pending command so
5268 * hci_adv_monitors_clear is about to be called which will take care of
5269 * freeing the adv_monitor instances.
5270 */
5271 if (status == -ECANCELED && !mgmt_pending_valid(hdev, cmd))
5272 return;
5273
5274 monitor = cmd->user_data;
5275
5276 hci_dev_lock(hdev);
5277
5278 rp.monitor_handle = cpu_to_le16(monitor->handle);
5279
5280 if (!status) {
5281 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5282 hdev->adv_monitors_cnt++;
5283 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5284 monitor->state = ADV_MONITOR_STATE_REGISTERED;
5285 hci_update_passive_scan(hdev);
5286 }
5287
5288 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
5289 mgmt_status(status), &rp, sizeof(rp));
5290 mgmt_pending_remove(cmd);
5291
5292 hci_dev_unlock(hdev);
5293 bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5294 rp.monitor_handle, status);
5295 }
5296
mgmt_add_adv_patterns_monitor_sync(struct hci_dev * hdev,void * data)5297 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5298 {
5299 struct mgmt_pending_cmd *cmd = data;
5300 struct adv_monitor *mon;
5301
5302 mutex_lock(&hdev->mgmt_pending_lock);
5303
5304 if (!__mgmt_pending_listed(hdev, cmd)) {
5305 mutex_unlock(&hdev->mgmt_pending_lock);
5306 return -ECANCELED;
5307 }
5308
5309 mon = cmd->user_data;
5310
5311 mutex_unlock(&hdev->mgmt_pending_lock);
5312
5313 return hci_add_adv_monitor(hdev, mon);
5314 }
5315
__add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,struct adv_monitor * m,u8 status,void * data,u16 len,u16 op)5316 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5317 struct adv_monitor *m, u8 status,
5318 void *data, u16 len, u16 op)
5319 {
5320 struct mgmt_pending_cmd *cmd;
5321 int err;
5322
5323 hci_dev_lock(hdev);
5324
5325 if (status)
5326 goto unlock;
5327
5328 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5329 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5330 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5331 status = MGMT_STATUS_BUSY;
5332 goto unlock;
5333 }
5334
5335 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5336 if (!cmd) {
5337 status = MGMT_STATUS_NO_RESOURCES;
5338 goto unlock;
5339 }
5340
5341 cmd->user_data = m;
5342 err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5343 mgmt_add_adv_patterns_monitor_complete);
5344 if (err) {
5345 if (err == -ENOMEM)
5346 status = MGMT_STATUS_NO_RESOURCES;
5347 else
5348 status = MGMT_STATUS_FAILED;
5349
5350 goto unlock;
5351 }
5352
5353 hci_dev_unlock(hdev);
5354
5355 return 0;
5356
5357 unlock:
5358 hci_free_adv_monitor(hdev, m);
5359 hci_dev_unlock(hdev);
5360 return mgmt_cmd_status(sk, hdev->id, op, status);
5361 }
5362
parse_adv_monitor_rssi(struct adv_monitor * m,struct mgmt_adv_rssi_thresholds * rssi)5363 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5364 struct mgmt_adv_rssi_thresholds *rssi)
5365 {
5366 if (rssi) {
5367 m->rssi.low_threshold = rssi->low_threshold;
5368 m->rssi.low_threshold_timeout =
5369 __le16_to_cpu(rssi->low_threshold_timeout);
5370 m->rssi.high_threshold = rssi->high_threshold;
5371 m->rssi.high_threshold_timeout =
5372 __le16_to_cpu(rssi->high_threshold_timeout);
5373 m->rssi.sampling_period = rssi->sampling_period;
5374 } else {
5375 /* Default values. These numbers are the least constricting
5376 * parameters for MSFT API to work, so it behaves as if there
5377 * are no rssi parameter to consider. May need to be changed
5378 * if other API are to be supported.
5379 */
5380 m->rssi.low_threshold = -127;
5381 m->rssi.low_threshold_timeout = 60;
5382 m->rssi.high_threshold = -127;
5383 m->rssi.high_threshold_timeout = 0;
5384 m->rssi.sampling_period = 0;
5385 }
5386 }
5387
parse_adv_monitor_pattern(struct adv_monitor * m,u8 pattern_count,struct mgmt_adv_pattern * patterns)5388 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5389 struct mgmt_adv_pattern *patterns)
5390 {
5391 u8 offset = 0, length = 0;
5392 struct adv_pattern *p = NULL;
5393 int i;
5394
5395 for (i = 0; i < pattern_count; i++) {
5396 offset = patterns[i].offset;
5397 length = patterns[i].length;
5398 if (offset >= HCI_MAX_AD_LENGTH ||
5399 length > HCI_MAX_AD_LENGTH ||
5400 (offset + length) > HCI_MAX_AD_LENGTH)
5401 return MGMT_STATUS_INVALID_PARAMS;
5402
5403 p = kmalloc(sizeof(*p), GFP_KERNEL);
5404 if (!p)
5405 return MGMT_STATUS_NO_RESOURCES;
5406
5407 p->ad_type = patterns[i].ad_type;
5408 p->offset = patterns[i].offset;
5409 p->length = patterns[i].length;
5410 memcpy(p->value, patterns[i].value, p->length);
5411
5412 INIT_LIST_HEAD(&p->list);
5413 list_add(&p->list, &m->patterns);
5414 }
5415
5416 return MGMT_STATUS_SUCCESS;
5417 }
5418
add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5419 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5420 void *data, u16 len)
5421 {
5422 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5423 struct adv_monitor *m = NULL;
5424 u8 status = MGMT_STATUS_SUCCESS;
5425 size_t expected_size = sizeof(*cp);
5426
5427 BT_DBG("request for %s", hdev->name);
5428
5429 if (len <= sizeof(*cp)) {
5430 status = MGMT_STATUS_INVALID_PARAMS;
5431 goto done;
5432 }
5433
5434 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5435 if (len != expected_size) {
5436 status = MGMT_STATUS_INVALID_PARAMS;
5437 goto done;
5438 }
5439
5440 m = kzalloc(sizeof(*m), GFP_KERNEL);
5441 if (!m) {
5442 status = MGMT_STATUS_NO_RESOURCES;
5443 goto done;
5444 }
5445
5446 INIT_LIST_HEAD(&m->patterns);
5447
5448 parse_adv_monitor_rssi(m, NULL);
5449 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5450
5451 done:
5452 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5453 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5454 }
5455
add_adv_patterns_monitor_rssi(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5456 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5457 void *data, u16 len)
5458 {
5459 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5460 struct adv_monitor *m = NULL;
5461 u8 status = MGMT_STATUS_SUCCESS;
5462 size_t expected_size = sizeof(*cp);
5463
5464 BT_DBG("request for %s", hdev->name);
5465
5466 if (len <= sizeof(*cp)) {
5467 status = MGMT_STATUS_INVALID_PARAMS;
5468 goto done;
5469 }
5470
5471 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5472 if (len != expected_size) {
5473 status = MGMT_STATUS_INVALID_PARAMS;
5474 goto done;
5475 }
5476
5477 m = kzalloc(sizeof(*m), GFP_KERNEL);
5478 if (!m) {
5479 status = MGMT_STATUS_NO_RESOURCES;
5480 goto done;
5481 }
5482
5483 INIT_LIST_HEAD(&m->patterns);
5484
5485 parse_adv_monitor_rssi(m, &cp->rssi);
5486 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5487
5488 done:
5489 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5490 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5491 }
5492
mgmt_remove_adv_monitor_complete(struct hci_dev * hdev,void * data,int status)5493 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5494 void *data, int status)
5495 {
5496 struct mgmt_rp_remove_adv_monitor rp;
5497 struct mgmt_pending_cmd *cmd = data;
5498 struct mgmt_cp_remove_adv_monitor *cp;
5499
5500 if (status == -ECANCELED)
5501 return;
5502
5503 hci_dev_lock(hdev);
5504
5505 cp = cmd->param;
5506
5507 rp.monitor_handle = cp->monitor_handle;
5508
5509 if (!status) {
5510 mgmt_adv_monitor_removed(cmd->sk, hdev, cp->monitor_handle);
5511 hci_update_passive_scan(hdev);
5512 }
5513
5514 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
5515 mgmt_status(status), &rp, sizeof(rp));
5516 mgmt_pending_free(cmd);
5517
5518 hci_dev_unlock(hdev);
5519 bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5520 rp.monitor_handle, status);
5521 }
5522
mgmt_remove_adv_monitor_sync(struct hci_dev * hdev,void * data)5523 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5524 {
5525 struct mgmt_pending_cmd *cmd = data;
5526 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5527 u16 handle = __le16_to_cpu(cp->monitor_handle);
5528
5529 if (!handle)
5530 return hci_remove_all_adv_monitor(hdev);
5531
5532 return hci_remove_single_adv_monitor(hdev, handle);
5533 }
5534
remove_adv_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5535 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5536 void *data, u16 len)
5537 {
5538 struct mgmt_pending_cmd *cmd;
5539 int err, status;
5540
5541 hci_dev_lock(hdev);
5542
5543 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5544 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5545 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5546 status = MGMT_STATUS_BUSY;
5547 goto unlock;
5548 }
5549
5550 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5551 if (!cmd) {
5552 status = MGMT_STATUS_NO_RESOURCES;
5553 goto unlock;
5554 }
5555
5556 err = hci_cmd_sync_submit(hdev, mgmt_remove_adv_monitor_sync, cmd,
5557 mgmt_remove_adv_monitor_complete);
5558
5559 if (err) {
5560 mgmt_pending_free(cmd);
5561
5562 if (err == -ENOMEM)
5563 status = MGMT_STATUS_NO_RESOURCES;
5564 else
5565 status = MGMT_STATUS_FAILED;
5566
5567 goto unlock;
5568 }
5569
5570 hci_dev_unlock(hdev);
5571
5572 return 0;
5573
5574 unlock:
5575 hci_dev_unlock(hdev);
5576 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5577 status);
5578 }
5579
read_local_oob_data_complete(struct hci_dev * hdev,void * data,int err)5580 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data,
5581 int err)
5582 {
5583 struct mgmt_rp_read_local_oob_data mgmt_rp;
5584 size_t rp_size = sizeof(mgmt_rp);
5585 struct mgmt_pending_cmd *cmd = data;
5586 struct sk_buff *skb = cmd->skb;
5587 u8 status = mgmt_status(err);
5588
5589 if (!status) {
5590 if (!skb)
5591 status = MGMT_STATUS_FAILED;
5592 else if (IS_ERR(skb))
5593 status = mgmt_status(PTR_ERR(skb));
5594 else
5595 status = mgmt_status(skb->data[0]);
5596 }
5597
5598 bt_dev_dbg(hdev, "status %d", status);
5599
5600 if (status) {
5601 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5602 status);
5603 goto remove;
5604 }
5605
5606 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5607
5608 if (!bredr_sc_enabled(hdev)) {
5609 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5610
5611 if (skb->len < sizeof(*rp)) {
5612 mgmt_cmd_status(cmd->sk, hdev->id,
5613 MGMT_OP_READ_LOCAL_OOB_DATA,
5614 MGMT_STATUS_FAILED);
5615 goto remove;
5616 }
5617
5618 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5619 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5620
5621 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5622 } else {
5623 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5624
5625 if (skb->len < sizeof(*rp)) {
5626 mgmt_cmd_status(cmd->sk, hdev->id,
5627 MGMT_OP_READ_LOCAL_OOB_DATA,
5628 MGMT_STATUS_FAILED);
5629 goto remove;
5630 }
5631
5632 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5633 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5634
5635 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5636 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5637 }
5638
5639 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5640 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5641
5642 remove:
5643 if (skb && !IS_ERR(skb))
5644 kfree_skb(skb);
5645
5646 mgmt_pending_free(cmd);
5647 }
5648
read_local_oob_data_sync(struct hci_dev * hdev,void * data)5649 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5650 {
5651 struct mgmt_pending_cmd *cmd = data;
5652
5653 if (bredr_sc_enabled(hdev))
5654 cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5655 else
5656 cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5657
5658 if (IS_ERR(cmd->skb))
5659 return PTR_ERR(cmd->skb);
5660 else
5661 return 0;
5662 }
5663
read_local_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)5664 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5665 void *data, u16 data_len)
5666 {
5667 struct mgmt_pending_cmd *cmd;
5668 int err;
5669
5670 bt_dev_dbg(hdev, "sock %p", sk);
5671
5672 hci_dev_lock(hdev);
5673
5674 if (!hdev_is_powered(hdev)) {
5675 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5676 MGMT_STATUS_NOT_POWERED);
5677 goto unlock;
5678 }
5679
5680 if (!lmp_ssp_capable(hdev)) {
5681 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5682 MGMT_STATUS_NOT_SUPPORTED);
5683 goto unlock;
5684 }
5685
5686 cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5687 if (!cmd)
5688 err = -ENOMEM;
5689 else
5690 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5691 read_local_oob_data_complete);
5692
5693 if (err < 0) {
5694 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5695 MGMT_STATUS_FAILED);
5696
5697 if (cmd)
5698 mgmt_pending_free(cmd);
5699 }
5700
5701 unlock:
5702 hci_dev_unlock(hdev);
5703 return err;
5704 }
5705
add_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5706 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5707 void *data, u16 len)
5708 {
5709 struct mgmt_addr_info *addr = data;
5710 int err;
5711
5712 bt_dev_dbg(hdev, "sock %p", sk);
5713
5714 if (!bdaddr_type_is_valid(addr->type))
5715 return mgmt_cmd_complete(sk, hdev->id,
5716 MGMT_OP_ADD_REMOTE_OOB_DATA,
5717 MGMT_STATUS_INVALID_PARAMS,
5718 addr, sizeof(*addr));
5719
5720 hci_dev_lock(hdev);
5721
5722 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5723 struct mgmt_cp_add_remote_oob_data *cp = data;
5724 u8 status;
5725
5726 if (cp->addr.type != BDADDR_BREDR) {
5727 err = mgmt_cmd_complete(sk, hdev->id,
5728 MGMT_OP_ADD_REMOTE_OOB_DATA,
5729 MGMT_STATUS_INVALID_PARAMS,
5730 &cp->addr, sizeof(cp->addr));
5731 goto unlock;
5732 }
5733
5734 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5735 cp->addr.type, cp->hash,
5736 cp->rand, NULL, NULL);
5737 if (err < 0)
5738 status = MGMT_STATUS_FAILED;
5739 else
5740 status = MGMT_STATUS_SUCCESS;
5741
5742 err = mgmt_cmd_complete(sk, hdev->id,
5743 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5744 &cp->addr, sizeof(cp->addr));
5745 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5746 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5747 u8 *rand192, *hash192, *rand256, *hash256;
5748 u8 status;
5749
5750 if (bdaddr_type_is_le(cp->addr.type)) {
5751 /* Enforce zero-valued 192-bit parameters as
5752 * long as legacy SMP OOB isn't implemented.
5753 */
5754 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5755 memcmp(cp->hash192, ZERO_KEY, 16)) {
5756 err = mgmt_cmd_complete(sk, hdev->id,
5757 MGMT_OP_ADD_REMOTE_OOB_DATA,
5758 MGMT_STATUS_INVALID_PARAMS,
5759 addr, sizeof(*addr));
5760 goto unlock;
5761 }
5762
5763 rand192 = NULL;
5764 hash192 = NULL;
5765 } else {
5766 /* In case one of the P-192 values is set to zero,
5767 * then just disable OOB data for P-192.
5768 */
5769 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5770 !memcmp(cp->hash192, ZERO_KEY, 16)) {
5771 rand192 = NULL;
5772 hash192 = NULL;
5773 } else {
5774 rand192 = cp->rand192;
5775 hash192 = cp->hash192;
5776 }
5777 }
5778
5779 /* In case one of the P-256 values is set to zero, then just
5780 * disable OOB data for P-256.
5781 */
5782 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5783 !memcmp(cp->hash256, ZERO_KEY, 16)) {
5784 rand256 = NULL;
5785 hash256 = NULL;
5786 } else {
5787 rand256 = cp->rand256;
5788 hash256 = cp->hash256;
5789 }
5790
5791 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5792 cp->addr.type, hash192, rand192,
5793 hash256, rand256);
5794 if (err < 0)
5795 status = MGMT_STATUS_FAILED;
5796 else
5797 status = MGMT_STATUS_SUCCESS;
5798
5799 err = mgmt_cmd_complete(sk, hdev->id,
5800 MGMT_OP_ADD_REMOTE_OOB_DATA,
5801 status, &cp->addr, sizeof(cp->addr));
5802 } else {
5803 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5804 len);
5805 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5806 MGMT_STATUS_INVALID_PARAMS);
5807 }
5808
5809 unlock:
5810 hci_dev_unlock(hdev);
5811 return err;
5812 }
5813
remove_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5814 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5815 void *data, u16 len)
5816 {
5817 struct mgmt_cp_remove_remote_oob_data *cp = data;
5818 u8 status;
5819 int err;
5820
5821 bt_dev_dbg(hdev, "sock %p", sk);
5822
5823 if (cp->addr.type != BDADDR_BREDR)
5824 return mgmt_cmd_complete(sk, hdev->id,
5825 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5826 MGMT_STATUS_INVALID_PARAMS,
5827 &cp->addr, sizeof(cp->addr));
5828
5829 hci_dev_lock(hdev);
5830
5831 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5832 hci_remote_oob_data_clear(hdev);
5833 status = MGMT_STATUS_SUCCESS;
5834 goto done;
5835 }
5836
5837 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5838 if (err < 0)
5839 status = MGMT_STATUS_INVALID_PARAMS;
5840 else
5841 status = MGMT_STATUS_SUCCESS;
5842
5843 done:
5844 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5845 status, &cp->addr, sizeof(cp->addr));
5846
5847 hci_dev_unlock(hdev);
5848 return err;
5849 }
5850
discovery_type_is_valid(struct hci_dev * hdev,uint8_t type,uint8_t * mgmt_status)5851 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5852 uint8_t *mgmt_status)
5853 {
5854 switch (type) {
5855 case DISCOV_TYPE_LE:
5856 *mgmt_status = mgmt_le_support(hdev);
5857 if (*mgmt_status)
5858 return false;
5859 break;
5860 case DISCOV_TYPE_INTERLEAVED:
5861 *mgmt_status = mgmt_le_support(hdev);
5862 if (*mgmt_status)
5863 return false;
5864 fallthrough;
5865 case DISCOV_TYPE_BREDR:
5866 *mgmt_status = mgmt_bredr_support(hdev);
5867 if (*mgmt_status)
5868 return false;
5869 break;
5870 default:
5871 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5872 return false;
5873 }
5874
5875 return true;
5876 }
5877
start_discovery_complete(struct hci_dev * hdev,void * data,int err)5878 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5879 {
5880 struct mgmt_pending_cmd *cmd = data;
5881
5882 bt_dev_dbg(hdev, "err %d", err);
5883
5884 if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
5885 return;
5886
5887 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err),
5888 cmd->param, 1);
5889 mgmt_pending_free(cmd);
5890
5891 hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5892 DISCOVERY_FINDING);
5893 }
5894
start_discovery_sync(struct hci_dev * hdev,void * data)5895 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5896 {
5897 if (!mgmt_pending_listed(hdev, data))
5898 return -ECANCELED;
5899
5900 return hci_start_discovery_sync(hdev);
5901 }
5902
start_discovery_internal(struct sock * sk,struct hci_dev * hdev,u16 op,void * data,u16 len)5903 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5904 u16 op, void *data, u16 len)
5905 {
5906 struct mgmt_cp_start_discovery *cp = data;
5907 struct mgmt_pending_cmd *cmd;
5908 u8 status;
5909 int err;
5910
5911 bt_dev_dbg(hdev, "sock %p", sk);
5912
5913 hci_dev_lock(hdev);
5914
5915 if (!hdev_is_powered(hdev)) {
5916 err = mgmt_cmd_complete(sk, hdev->id, op,
5917 MGMT_STATUS_NOT_POWERED,
5918 &cp->type, sizeof(cp->type));
5919 goto failed;
5920 }
5921
5922 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5923 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5924 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5925 &cp->type, sizeof(cp->type));
5926 goto failed;
5927 }
5928
5929 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5930 err = mgmt_cmd_complete(sk, hdev->id, op, status,
5931 &cp->type, sizeof(cp->type));
5932 goto failed;
5933 }
5934
5935 /* Can't start discovery when it is paused */
5936 if (hdev->discovery_paused) {
5937 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5938 &cp->type, sizeof(cp->type));
5939 goto failed;
5940 }
5941
5942 /* Clear the discovery filter first to free any previously
5943 * allocated memory for the UUID list.
5944 */
5945 hci_discovery_filter_clear(hdev);
5946
5947 hdev->discovery.type = cp->type;
5948 hdev->discovery.report_invalid_rssi = false;
5949 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5950 hdev->discovery.limited = true;
5951 else
5952 hdev->discovery.limited = false;
5953
5954 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5955 if (!cmd) {
5956 err = -ENOMEM;
5957 goto failed;
5958 }
5959
5960 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5961 start_discovery_complete);
5962 if (err < 0) {
5963 mgmt_pending_remove(cmd);
5964 goto failed;
5965 }
5966
5967 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5968
5969 failed:
5970 hci_dev_unlock(hdev);
5971 return err;
5972 }
5973
start_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5974 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5975 void *data, u16 len)
5976 {
5977 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5978 data, len);
5979 }
5980
start_limited_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5981 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5982 void *data, u16 len)
5983 {
5984 return start_discovery_internal(sk, hdev,
5985 MGMT_OP_START_LIMITED_DISCOVERY,
5986 data, len);
5987 }
5988
start_service_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5989 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5990 void *data, u16 len)
5991 {
5992 struct mgmt_cp_start_service_discovery *cp = data;
5993 struct mgmt_pending_cmd *cmd;
5994 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5995 u16 uuid_count, expected_len;
5996 u8 status;
5997 int err;
5998
5999 bt_dev_dbg(hdev, "sock %p", sk);
6000
6001 hci_dev_lock(hdev);
6002
6003 if (!hdev_is_powered(hdev)) {
6004 err = mgmt_cmd_complete(sk, hdev->id,
6005 MGMT_OP_START_SERVICE_DISCOVERY,
6006 MGMT_STATUS_NOT_POWERED,
6007 &cp->type, sizeof(cp->type));
6008 goto failed;
6009 }
6010
6011 if (hdev->discovery.state != DISCOVERY_STOPPED ||
6012 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
6013 err = mgmt_cmd_complete(sk, hdev->id,
6014 MGMT_OP_START_SERVICE_DISCOVERY,
6015 MGMT_STATUS_BUSY, &cp->type,
6016 sizeof(cp->type));
6017 goto failed;
6018 }
6019
6020 if (hdev->discovery_paused) {
6021 err = mgmt_cmd_complete(sk, hdev->id,
6022 MGMT_OP_START_SERVICE_DISCOVERY,
6023 MGMT_STATUS_BUSY, &cp->type,
6024 sizeof(cp->type));
6025 goto failed;
6026 }
6027
6028 uuid_count = __le16_to_cpu(cp->uuid_count);
6029 if (uuid_count > max_uuid_count) {
6030 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
6031 uuid_count);
6032 err = mgmt_cmd_complete(sk, hdev->id,
6033 MGMT_OP_START_SERVICE_DISCOVERY,
6034 MGMT_STATUS_INVALID_PARAMS, &cp->type,
6035 sizeof(cp->type));
6036 goto failed;
6037 }
6038
6039 expected_len = sizeof(*cp) + uuid_count * 16;
6040 if (expected_len != len) {
6041 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
6042 expected_len, len);
6043 err = mgmt_cmd_complete(sk, hdev->id,
6044 MGMT_OP_START_SERVICE_DISCOVERY,
6045 MGMT_STATUS_INVALID_PARAMS, &cp->type,
6046 sizeof(cp->type));
6047 goto failed;
6048 }
6049
6050 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
6051 err = mgmt_cmd_complete(sk, hdev->id,
6052 MGMT_OP_START_SERVICE_DISCOVERY,
6053 status, &cp->type, sizeof(cp->type));
6054 goto failed;
6055 }
6056
6057 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
6058 hdev, data, len);
6059 if (!cmd) {
6060 err = -ENOMEM;
6061 goto failed;
6062 }
6063
6064 /* Clear the discovery filter first to free any previously
6065 * allocated memory for the UUID list.
6066 */
6067 hci_discovery_filter_clear(hdev);
6068
6069 hdev->discovery.result_filtering = true;
6070 hdev->discovery.type = cp->type;
6071 hdev->discovery.rssi = cp->rssi;
6072 hdev->discovery.uuid_count = uuid_count;
6073
6074 if (uuid_count > 0) {
6075 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
6076 GFP_KERNEL);
6077 if (!hdev->discovery.uuids) {
6078 err = mgmt_cmd_complete(sk, hdev->id,
6079 MGMT_OP_START_SERVICE_DISCOVERY,
6080 MGMT_STATUS_FAILED,
6081 &cp->type, sizeof(cp->type));
6082 mgmt_pending_remove(cmd);
6083 goto failed;
6084 }
6085 }
6086
6087 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6088 start_discovery_complete);
6089 if (err < 0) {
6090 mgmt_pending_remove(cmd);
6091 goto failed;
6092 }
6093
6094 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6095
6096 failed:
6097 hci_dev_unlock(hdev);
6098 return err;
6099 }
6100
stop_discovery_complete(struct hci_dev * hdev,void * data,int err)6101 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6102 {
6103 struct mgmt_pending_cmd *cmd = data;
6104
6105 if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
6106 return;
6107
6108 bt_dev_dbg(hdev, "err %d", err);
6109
6110 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err),
6111 cmd->param, 1);
6112 mgmt_pending_free(cmd);
6113
6114 if (!err)
6115 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6116 }
6117
stop_discovery_sync(struct hci_dev * hdev,void * data)6118 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6119 {
6120 if (!mgmt_pending_listed(hdev, data))
6121 return -ECANCELED;
6122
6123 return hci_stop_discovery_sync(hdev);
6124 }
6125
stop_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6126 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6127 u16 len)
6128 {
6129 struct mgmt_cp_stop_discovery *mgmt_cp = data;
6130 struct mgmt_pending_cmd *cmd;
6131 int err;
6132
6133 bt_dev_dbg(hdev, "sock %p", sk);
6134
6135 hci_dev_lock(hdev);
6136
6137 if (!hci_discovery_active(hdev)) {
6138 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6139 MGMT_STATUS_REJECTED, &mgmt_cp->type,
6140 sizeof(mgmt_cp->type));
6141 goto unlock;
6142 }
6143
6144 if (hdev->discovery.type != mgmt_cp->type) {
6145 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6146 MGMT_STATUS_INVALID_PARAMS,
6147 &mgmt_cp->type, sizeof(mgmt_cp->type));
6148 goto unlock;
6149 }
6150
6151 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6152 if (!cmd) {
6153 err = -ENOMEM;
6154 goto unlock;
6155 }
6156
6157 err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6158 stop_discovery_complete);
6159 if (err < 0) {
6160 mgmt_pending_remove(cmd);
6161 goto unlock;
6162 }
6163
6164 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6165
6166 unlock:
6167 hci_dev_unlock(hdev);
6168 return err;
6169 }
6170
confirm_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6171 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6172 u16 len)
6173 {
6174 struct mgmt_cp_confirm_name *cp = data;
6175 struct inquiry_entry *e;
6176 int err;
6177
6178 bt_dev_dbg(hdev, "sock %p", sk);
6179
6180 hci_dev_lock(hdev);
6181
6182 if (!hci_discovery_active(hdev)) {
6183 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6184 MGMT_STATUS_FAILED, &cp->addr,
6185 sizeof(cp->addr));
6186 goto failed;
6187 }
6188
6189 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6190 if (!e) {
6191 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6192 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6193 sizeof(cp->addr));
6194 goto failed;
6195 }
6196
6197 if (cp->name_known) {
6198 e->name_state = NAME_KNOWN;
6199 list_del(&e->list);
6200 } else {
6201 e->name_state = NAME_NEEDED;
6202 hci_inquiry_cache_update_resolve(hdev, e);
6203 }
6204
6205 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6206 &cp->addr, sizeof(cp->addr));
6207
6208 failed:
6209 hci_dev_unlock(hdev);
6210 return err;
6211 }
6212
block_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6213 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6214 u16 len)
6215 {
6216 struct mgmt_cp_block_device *cp = data;
6217 u8 status;
6218 int err;
6219
6220 bt_dev_dbg(hdev, "sock %p", sk);
6221
6222 if (!bdaddr_type_is_valid(cp->addr.type))
6223 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6224 MGMT_STATUS_INVALID_PARAMS,
6225 &cp->addr, sizeof(cp->addr));
6226
6227 hci_dev_lock(hdev);
6228
6229 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6230 cp->addr.type);
6231 if (err < 0) {
6232 status = MGMT_STATUS_FAILED;
6233 goto done;
6234 }
6235
6236 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6237 sk);
6238 status = MGMT_STATUS_SUCCESS;
6239
6240 done:
6241 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6242 &cp->addr, sizeof(cp->addr));
6243
6244 hci_dev_unlock(hdev);
6245
6246 return err;
6247 }
6248
unblock_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6249 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6250 u16 len)
6251 {
6252 struct mgmt_cp_unblock_device *cp = data;
6253 u8 status;
6254 int err;
6255
6256 bt_dev_dbg(hdev, "sock %p", sk);
6257
6258 if (!bdaddr_type_is_valid(cp->addr.type))
6259 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6260 MGMT_STATUS_INVALID_PARAMS,
6261 &cp->addr, sizeof(cp->addr));
6262
6263 hci_dev_lock(hdev);
6264
6265 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6266 cp->addr.type);
6267 if (err < 0) {
6268 status = MGMT_STATUS_INVALID_PARAMS;
6269 goto done;
6270 }
6271
6272 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6273 sk);
6274 status = MGMT_STATUS_SUCCESS;
6275
6276 done:
6277 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6278 &cp->addr, sizeof(cp->addr));
6279
6280 hci_dev_unlock(hdev);
6281
6282 return err;
6283 }
6284
set_device_id_sync(struct hci_dev * hdev,void * data)6285 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6286 {
6287 return hci_update_eir_sync(hdev);
6288 }
6289
set_device_id(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6290 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6291 u16 len)
6292 {
6293 struct mgmt_cp_set_device_id *cp = data;
6294 int err;
6295 __u16 source;
6296
6297 bt_dev_dbg(hdev, "sock %p", sk);
6298
6299 source = __le16_to_cpu(cp->source);
6300
6301 if (source > 0x0002)
6302 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6303 MGMT_STATUS_INVALID_PARAMS);
6304
6305 hci_dev_lock(hdev);
6306
6307 hdev->devid_source = source;
6308 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6309 hdev->devid_product = __le16_to_cpu(cp->product);
6310 hdev->devid_version = __le16_to_cpu(cp->version);
6311
6312 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6313 NULL, 0);
6314
6315 hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6316
6317 hci_dev_unlock(hdev);
6318
6319 return err;
6320 }
6321
enable_advertising_instance(struct hci_dev * hdev,int err)6322 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6323 {
6324 if (err)
6325 bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6326 else
6327 bt_dev_dbg(hdev, "status %d", err);
6328 }
6329
set_advertising_complete(struct hci_dev * hdev,void * data,int err)6330 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6331 {
6332 struct mgmt_pending_cmd *cmd = data;
6333 struct cmd_lookup match = { NULL, hdev };
6334 u8 instance;
6335 struct adv_info *adv_instance;
6336 u8 status = mgmt_status(err);
6337
6338 if (err == -ECANCELED || !mgmt_pending_valid(hdev, data))
6339 return;
6340
6341 if (status) {
6342 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, status);
6343 mgmt_pending_free(cmd);
6344 return;
6345 }
6346
6347 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6348 hci_dev_set_flag(hdev, HCI_ADVERTISING);
6349 else
6350 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6351
6352 settings_rsp(cmd, &match);
6353
6354 new_settings(hdev, match.sk);
6355
6356 if (match.sk)
6357 sock_put(match.sk);
6358
6359 /* If "Set Advertising" was just disabled and instance advertising was
6360 * set up earlier, then re-enable multi-instance advertising.
6361 */
6362 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6363 list_empty(&hdev->adv_instances))
6364 return;
6365
6366 instance = hdev->cur_adv_instance;
6367 if (!instance) {
6368 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6369 struct adv_info, list);
6370 if (!adv_instance)
6371 return;
6372
6373 instance = adv_instance->instance;
6374 }
6375
6376 err = hci_schedule_adv_instance_sync(hdev, instance, true);
6377
6378 enable_advertising_instance(hdev, err);
6379 }
6380
set_adv_sync(struct hci_dev * hdev,void * data)6381 static int set_adv_sync(struct hci_dev *hdev, void *data)
6382 {
6383 struct mgmt_pending_cmd *cmd = data;
6384 struct mgmt_mode cp;
6385 u8 val;
6386
6387 mutex_lock(&hdev->mgmt_pending_lock);
6388
6389 if (!__mgmt_pending_listed(hdev, cmd)) {
6390 mutex_unlock(&hdev->mgmt_pending_lock);
6391 return -ECANCELED;
6392 }
6393
6394 memcpy(&cp, cmd->param, sizeof(cp));
6395
6396 mutex_unlock(&hdev->mgmt_pending_lock);
6397
6398 val = !!cp.val;
6399
6400 if (cp.val == 0x02)
6401 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6402 else
6403 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6404
6405 cancel_adv_timeout(hdev);
6406
6407 if (val) {
6408 /* Switch to instance "0" for the Set Advertising setting.
6409 * We cannot use update_[adv|scan_rsp]_data() here as the
6410 * HCI_ADVERTISING flag is not yet set.
6411 */
6412 hdev->cur_adv_instance = 0x00;
6413
6414 if (ext_adv_capable(hdev)) {
6415 hci_start_ext_adv_sync(hdev, 0x00);
6416 } else {
6417 hci_update_adv_data_sync(hdev, 0x00);
6418 hci_update_scan_rsp_data_sync(hdev, 0x00);
6419 hci_enable_advertising_sync(hdev);
6420 }
6421 } else {
6422 hci_disable_advertising_sync(hdev);
6423 }
6424
6425 return 0;
6426 }
6427
set_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6428 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6429 u16 len)
6430 {
6431 struct mgmt_mode *cp = data;
6432 struct mgmt_pending_cmd *cmd;
6433 u8 val, status;
6434 int err;
6435
6436 bt_dev_dbg(hdev, "sock %p", sk);
6437
6438 status = mgmt_le_support(hdev);
6439 if (status)
6440 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6441 status);
6442
6443 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6444 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6445 MGMT_STATUS_INVALID_PARAMS);
6446
6447 if (hdev->advertising_paused)
6448 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6449 MGMT_STATUS_BUSY);
6450
6451 hci_dev_lock(hdev);
6452
6453 val = !!cp->val;
6454
6455 /* The following conditions are ones which mean that we should
6456 * not do any HCI communication but directly send a mgmt
6457 * response to user space (after toggling the flag if
6458 * necessary).
6459 */
6460 if (!hdev_is_powered(hdev) ||
6461 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6462 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6463 hci_dev_test_flag(hdev, HCI_MESH) ||
6464 hci_conn_num(hdev, LE_LINK) > 0 ||
6465 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6466 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6467 bool changed;
6468
6469 if (cp->val) {
6470 hdev->cur_adv_instance = 0x00;
6471 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6472 if (cp->val == 0x02)
6473 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6474 else
6475 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6476 } else {
6477 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6478 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6479 }
6480
6481 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6482 if (err < 0)
6483 goto unlock;
6484
6485 if (changed)
6486 err = new_settings(hdev, sk);
6487
6488 goto unlock;
6489 }
6490
6491 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6492 pending_find(MGMT_OP_SET_LE, hdev)) {
6493 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6494 MGMT_STATUS_BUSY);
6495 goto unlock;
6496 }
6497
6498 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6499 if (!cmd)
6500 err = -ENOMEM;
6501 else
6502 err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6503 set_advertising_complete);
6504
6505 if (err < 0 && cmd)
6506 mgmt_pending_remove(cmd);
6507
6508 unlock:
6509 hci_dev_unlock(hdev);
6510 return err;
6511 }
6512
set_static_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6513 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6514 void *data, u16 len)
6515 {
6516 struct mgmt_cp_set_static_address *cp = data;
6517 int err;
6518
6519 bt_dev_dbg(hdev, "sock %p", sk);
6520
6521 if (!lmp_le_capable(hdev))
6522 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6523 MGMT_STATUS_NOT_SUPPORTED);
6524
6525 if (hdev_is_powered(hdev))
6526 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6527 MGMT_STATUS_REJECTED);
6528
6529 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6530 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6531 return mgmt_cmd_status(sk, hdev->id,
6532 MGMT_OP_SET_STATIC_ADDRESS,
6533 MGMT_STATUS_INVALID_PARAMS);
6534
6535 /* Two most significant bits shall be set */
6536 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6537 return mgmt_cmd_status(sk, hdev->id,
6538 MGMT_OP_SET_STATIC_ADDRESS,
6539 MGMT_STATUS_INVALID_PARAMS);
6540 }
6541
6542 hci_dev_lock(hdev);
6543
6544 bacpy(&hdev->static_addr, &cp->bdaddr);
6545
6546 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6547 if (err < 0)
6548 goto unlock;
6549
6550 err = new_settings(hdev, sk);
6551
6552 unlock:
6553 hci_dev_unlock(hdev);
6554 return err;
6555 }
6556
set_scan_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6557 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6558 void *data, u16 len)
6559 {
6560 struct mgmt_cp_set_scan_params *cp = data;
6561 __u16 interval, window;
6562 int err;
6563
6564 bt_dev_dbg(hdev, "sock %p", sk);
6565
6566 if (!lmp_le_capable(hdev))
6567 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6568 MGMT_STATUS_NOT_SUPPORTED);
6569
6570 /* Keep allowed ranges in sync with set_mesh() */
6571 interval = __le16_to_cpu(cp->interval);
6572
6573 if (interval < 0x0004 || interval > 0x4000)
6574 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6575 MGMT_STATUS_INVALID_PARAMS);
6576
6577 window = __le16_to_cpu(cp->window);
6578
6579 if (window < 0x0004 || window > 0x4000)
6580 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6581 MGMT_STATUS_INVALID_PARAMS);
6582
6583 if (window > interval)
6584 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6585 MGMT_STATUS_INVALID_PARAMS);
6586
6587 hci_dev_lock(hdev);
6588
6589 hdev->le_scan_interval = interval;
6590 hdev->le_scan_window = window;
6591
6592 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6593 NULL, 0);
6594
6595 /* If background scan is running, restart it so new parameters are
6596 * loaded.
6597 */
6598 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6599 hdev->discovery.state == DISCOVERY_STOPPED)
6600 hci_update_passive_scan(hdev);
6601
6602 hci_dev_unlock(hdev);
6603
6604 return err;
6605 }
6606
fast_connectable_complete(struct hci_dev * hdev,void * data,int err)6607 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6608 {
6609 struct mgmt_pending_cmd *cmd = data;
6610
6611 bt_dev_dbg(hdev, "err %d", err);
6612
6613 if (err) {
6614 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6615 mgmt_status(err));
6616 } else {
6617 struct mgmt_mode *cp = cmd->param;
6618
6619 if (cp->val)
6620 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6621 else
6622 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6623
6624 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6625 new_settings(hdev, cmd->sk);
6626 }
6627
6628 mgmt_pending_free(cmd);
6629 }
6630
write_fast_connectable_sync(struct hci_dev * hdev,void * data)6631 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6632 {
6633 struct mgmt_pending_cmd *cmd = data;
6634 struct mgmt_mode *cp = cmd->param;
6635
6636 return hci_write_fast_connectable_sync(hdev, cp->val);
6637 }
6638
set_fast_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6639 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6640 void *data, u16 len)
6641 {
6642 struct mgmt_mode *cp = data;
6643 struct mgmt_pending_cmd *cmd;
6644 int err;
6645
6646 bt_dev_dbg(hdev, "sock %p", sk);
6647
6648 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6649 hdev->hci_ver < BLUETOOTH_VER_1_2)
6650 return mgmt_cmd_status(sk, hdev->id,
6651 MGMT_OP_SET_FAST_CONNECTABLE,
6652 MGMT_STATUS_NOT_SUPPORTED);
6653
6654 if (cp->val != 0x00 && cp->val != 0x01)
6655 return mgmt_cmd_status(sk, hdev->id,
6656 MGMT_OP_SET_FAST_CONNECTABLE,
6657 MGMT_STATUS_INVALID_PARAMS);
6658
6659 hci_dev_lock(hdev);
6660
6661 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6662 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6663 goto unlock;
6664 }
6665
6666 if (!hdev_is_powered(hdev)) {
6667 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6668 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6669 new_settings(hdev, sk);
6670 goto unlock;
6671 }
6672
6673 cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6674 len);
6675 if (!cmd)
6676 err = -ENOMEM;
6677 else
6678 err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6679 fast_connectable_complete);
6680
6681 if (err < 0) {
6682 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6683 MGMT_STATUS_FAILED);
6684
6685 if (cmd)
6686 mgmt_pending_free(cmd);
6687 }
6688
6689 unlock:
6690 hci_dev_unlock(hdev);
6691
6692 return err;
6693 }
6694
set_bredr_complete(struct hci_dev * hdev,void * data,int err)6695 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6696 {
6697 struct mgmt_pending_cmd *cmd = data;
6698
6699 bt_dev_dbg(hdev, "err %d", err);
6700
6701 if (err) {
6702 u8 mgmt_err = mgmt_status(err);
6703
6704 /* We need to restore the flag if related HCI commands
6705 * failed.
6706 */
6707 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6708
6709 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
6710 } else {
6711 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6712 new_settings(hdev, cmd->sk);
6713 }
6714
6715 mgmt_pending_free(cmd);
6716 }
6717
set_bredr_sync(struct hci_dev * hdev,void * data)6718 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6719 {
6720 int status;
6721
6722 status = hci_write_fast_connectable_sync(hdev, false);
6723
6724 if (!status)
6725 status = hci_update_scan_sync(hdev);
6726
6727 /* Since only the advertising data flags will change, there
6728 * is no need to update the scan response data.
6729 */
6730 if (!status)
6731 status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6732
6733 return status;
6734 }
6735
set_bredr(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6736 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6737 {
6738 struct mgmt_mode *cp = data;
6739 struct mgmt_pending_cmd *cmd;
6740 int err;
6741
6742 bt_dev_dbg(hdev, "sock %p", sk);
6743
6744 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6745 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6746 MGMT_STATUS_NOT_SUPPORTED);
6747
6748 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6749 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6750 MGMT_STATUS_REJECTED);
6751
6752 if (cp->val != 0x00 && cp->val != 0x01)
6753 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6754 MGMT_STATUS_INVALID_PARAMS);
6755
6756 hci_dev_lock(hdev);
6757
6758 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6759 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6760 goto unlock;
6761 }
6762
6763 if (!hdev_is_powered(hdev)) {
6764 if (!cp->val) {
6765 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6766 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6767 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6768 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6769 }
6770
6771 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6772
6773 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6774 if (err < 0)
6775 goto unlock;
6776
6777 err = new_settings(hdev, sk);
6778 goto unlock;
6779 }
6780
6781 /* Reject disabling when powered on */
6782 if (!cp->val) {
6783 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6784 MGMT_STATUS_REJECTED);
6785 goto unlock;
6786 } else {
6787 /* When configuring a dual-mode controller to operate
6788 * with LE only and using a static address, then switching
6789 * BR/EDR back on is not allowed.
6790 *
6791 * Dual-mode controllers shall operate with the public
6792 * address as its identity address for BR/EDR and LE. So
6793 * reject the attempt to create an invalid configuration.
6794 *
6795 * The same restrictions applies when secure connections
6796 * has been enabled. For BR/EDR this is a controller feature
6797 * while for LE it is a host stack feature. This means that
6798 * switching BR/EDR back on when secure connections has been
6799 * enabled is not a supported transaction.
6800 */
6801 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6802 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6803 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6804 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6805 MGMT_STATUS_REJECTED);
6806 goto unlock;
6807 }
6808 }
6809
6810 cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6811 if (!cmd)
6812 err = -ENOMEM;
6813 else
6814 err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6815 set_bredr_complete);
6816
6817 if (err < 0) {
6818 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6819 MGMT_STATUS_FAILED);
6820 if (cmd)
6821 mgmt_pending_free(cmd);
6822
6823 goto unlock;
6824 }
6825
6826 /* We need to flip the bit already here so that
6827 * hci_req_update_adv_data generates the correct flags.
6828 */
6829 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6830
6831 unlock:
6832 hci_dev_unlock(hdev);
6833 return err;
6834 }
6835
set_secure_conn_complete(struct hci_dev * hdev,void * data,int err)6836 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6837 {
6838 struct mgmt_pending_cmd *cmd = data;
6839 struct mgmt_mode *cp;
6840
6841 bt_dev_dbg(hdev, "err %d", err);
6842
6843 if (err) {
6844 u8 mgmt_err = mgmt_status(err);
6845
6846 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
6847 goto done;
6848 }
6849
6850 cp = cmd->param;
6851
6852 switch (cp->val) {
6853 case 0x00:
6854 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6855 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6856 break;
6857 case 0x01:
6858 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6859 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6860 break;
6861 case 0x02:
6862 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6863 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6864 break;
6865 }
6866
6867 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6868 new_settings(hdev, cmd->sk);
6869
6870 done:
6871 mgmt_pending_free(cmd);
6872 }
6873
set_secure_conn_sync(struct hci_dev * hdev,void * data)6874 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6875 {
6876 struct mgmt_pending_cmd *cmd = data;
6877 struct mgmt_mode *cp = cmd->param;
6878 u8 val = !!cp->val;
6879
6880 /* Force write of val */
6881 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6882
6883 return hci_write_sc_support_sync(hdev, val);
6884 }
6885
set_secure_conn(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6886 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6887 void *data, u16 len)
6888 {
6889 struct mgmt_mode *cp = data;
6890 struct mgmt_pending_cmd *cmd;
6891 u8 val;
6892 int err;
6893
6894 bt_dev_dbg(hdev, "sock %p", sk);
6895
6896 if (!lmp_sc_capable(hdev) &&
6897 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6898 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6899 MGMT_STATUS_NOT_SUPPORTED);
6900
6901 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6902 lmp_sc_capable(hdev) &&
6903 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6904 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6905 MGMT_STATUS_REJECTED);
6906
6907 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6908 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6909 MGMT_STATUS_INVALID_PARAMS);
6910
6911 hci_dev_lock(hdev);
6912
6913 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6914 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6915 bool changed;
6916
6917 if (cp->val) {
6918 changed = !hci_dev_test_and_set_flag(hdev,
6919 HCI_SC_ENABLED);
6920 if (cp->val == 0x02)
6921 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6922 else
6923 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6924 } else {
6925 changed = hci_dev_test_and_clear_flag(hdev,
6926 HCI_SC_ENABLED);
6927 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6928 }
6929
6930 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6931 if (err < 0)
6932 goto failed;
6933
6934 if (changed)
6935 err = new_settings(hdev, sk);
6936
6937 goto failed;
6938 }
6939
6940 val = !!cp->val;
6941
6942 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6943 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6944 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6945 goto failed;
6946 }
6947
6948 cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6949 if (!cmd)
6950 err = -ENOMEM;
6951 else
6952 err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6953 set_secure_conn_complete);
6954
6955 if (err < 0) {
6956 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6957 MGMT_STATUS_FAILED);
6958 if (cmd)
6959 mgmt_pending_free(cmd);
6960 }
6961
6962 failed:
6963 hci_dev_unlock(hdev);
6964 return err;
6965 }
6966
set_debug_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6967 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6968 void *data, u16 len)
6969 {
6970 struct mgmt_mode *cp = data;
6971 bool changed, use_changed;
6972 int err;
6973
6974 bt_dev_dbg(hdev, "sock %p", sk);
6975
6976 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6977 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6978 MGMT_STATUS_INVALID_PARAMS);
6979
6980 hci_dev_lock(hdev);
6981
6982 if (cp->val)
6983 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6984 else
6985 changed = hci_dev_test_and_clear_flag(hdev,
6986 HCI_KEEP_DEBUG_KEYS);
6987
6988 if (cp->val == 0x02)
6989 use_changed = !hci_dev_test_and_set_flag(hdev,
6990 HCI_USE_DEBUG_KEYS);
6991 else
6992 use_changed = hci_dev_test_and_clear_flag(hdev,
6993 HCI_USE_DEBUG_KEYS);
6994
6995 if (hdev_is_powered(hdev) && use_changed &&
6996 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6997 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6998 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6999 sizeof(mode), &mode);
7000 }
7001
7002 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
7003 if (err < 0)
7004 goto unlock;
7005
7006 if (changed)
7007 err = new_settings(hdev, sk);
7008
7009 unlock:
7010 hci_dev_unlock(hdev);
7011 return err;
7012 }
7013
set_privacy(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)7014 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7015 u16 len)
7016 {
7017 struct mgmt_cp_set_privacy *cp = cp_data;
7018 bool changed;
7019 int err;
7020
7021 bt_dev_dbg(hdev, "sock %p", sk);
7022
7023 if (!lmp_le_capable(hdev))
7024 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7025 MGMT_STATUS_NOT_SUPPORTED);
7026
7027 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
7028 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7029 MGMT_STATUS_INVALID_PARAMS);
7030
7031 if (hdev_is_powered(hdev))
7032 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7033 MGMT_STATUS_REJECTED);
7034
7035 hci_dev_lock(hdev);
7036
7037 /* If user space supports this command it is also expected to
7038 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
7039 */
7040 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7041
7042 if (cp->privacy) {
7043 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
7044 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
7045 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
7046 hci_adv_instances_set_rpa_expired(hdev, true);
7047 if (cp->privacy == 0x02)
7048 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
7049 else
7050 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7051 } else {
7052 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
7053 memset(hdev->irk, 0, sizeof(hdev->irk));
7054 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
7055 hci_adv_instances_set_rpa_expired(hdev, false);
7056 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7057 }
7058
7059 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
7060 if (err < 0)
7061 goto unlock;
7062
7063 if (changed)
7064 err = new_settings(hdev, sk);
7065
7066 unlock:
7067 hci_dev_unlock(hdev);
7068 return err;
7069 }
7070
irk_is_valid(struct mgmt_irk_info * irk)7071 static bool irk_is_valid(struct mgmt_irk_info *irk)
7072 {
7073 switch (irk->addr.type) {
7074 case BDADDR_LE_PUBLIC:
7075 return true;
7076
7077 case BDADDR_LE_RANDOM:
7078 /* Two most significant bits shall be set */
7079 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7080 return false;
7081 return true;
7082 }
7083
7084 return false;
7085 }
7086
load_irks(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)7087 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7088 u16 len)
7089 {
7090 struct mgmt_cp_load_irks *cp = cp_data;
7091 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
7092 sizeof(struct mgmt_irk_info));
7093 u16 irk_count, expected_len;
7094 int i, err;
7095
7096 bt_dev_dbg(hdev, "sock %p", sk);
7097
7098 if (!lmp_le_capable(hdev))
7099 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7100 MGMT_STATUS_NOT_SUPPORTED);
7101
7102 irk_count = __le16_to_cpu(cp->irk_count);
7103 if (irk_count > max_irk_count) {
7104 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
7105 irk_count);
7106 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7107 MGMT_STATUS_INVALID_PARAMS);
7108 }
7109
7110 expected_len = struct_size(cp, irks, irk_count);
7111 if (expected_len != len) {
7112 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7113 expected_len, len);
7114 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7115 MGMT_STATUS_INVALID_PARAMS);
7116 }
7117
7118 bt_dev_dbg(hdev, "irk_count %u", irk_count);
7119
7120 for (i = 0; i < irk_count; i++) {
7121 struct mgmt_irk_info *key = &cp->irks[i];
7122
7123 if (!irk_is_valid(key))
7124 return mgmt_cmd_status(sk, hdev->id,
7125 MGMT_OP_LOAD_IRKS,
7126 MGMT_STATUS_INVALID_PARAMS);
7127 }
7128
7129 hci_dev_lock(hdev);
7130
7131 hci_smp_irks_clear(hdev);
7132
7133 for (i = 0; i < irk_count; i++) {
7134 struct mgmt_irk_info *irk = &cp->irks[i];
7135
7136 if (hci_is_blocked_key(hdev,
7137 HCI_BLOCKED_KEY_TYPE_IRK,
7138 irk->val)) {
7139 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7140 &irk->addr.bdaddr);
7141 continue;
7142 }
7143
7144 hci_add_irk(hdev, &irk->addr.bdaddr,
7145 le_addr_type(irk->addr.type), irk->val,
7146 BDADDR_ANY);
7147 }
7148
7149 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7150
7151 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7152
7153 hci_dev_unlock(hdev);
7154
7155 return err;
7156 }
7157
ltk_is_valid(struct mgmt_ltk_info * key)7158 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7159 {
7160 if (key->initiator != 0x00 && key->initiator != 0x01)
7161 return false;
7162
7163 switch (key->addr.type) {
7164 case BDADDR_LE_PUBLIC:
7165 return true;
7166
7167 case BDADDR_LE_RANDOM:
7168 /* Two most significant bits shall be set */
7169 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7170 return false;
7171 return true;
7172 }
7173
7174 return false;
7175 }
7176
load_long_term_keys(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)7177 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7178 void *cp_data, u16 len)
7179 {
7180 struct mgmt_cp_load_long_term_keys *cp = cp_data;
7181 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7182 sizeof(struct mgmt_ltk_info));
7183 u16 key_count, expected_len;
7184 int i, err;
7185
7186 bt_dev_dbg(hdev, "sock %p", sk);
7187
7188 if (!lmp_le_capable(hdev))
7189 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7190 MGMT_STATUS_NOT_SUPPORTED);
7191
7192 key_count = __le16_to_cpu(cp->key_count);
7193 if (key_count > max_key_count) {
7194 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7195 key_count);
7196 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7197 MGMT_STATUS_INVALID_PARAMS);
7198 }
7199
7200 expected_len = struct_size(cp, keys, key_count);
7201 if (expected_len != len) {
7202 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7203 expected_len, len);
7204 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7205 MGMT_STATUS_INVALID_PARAMS);
7206 }
7207
7208 bt_dev_dbg(hdev, "key_count %u", key_count);
7209
7210 hci_dev_lock(hdev);
7211
7212 hci_smp_ltks_clear(hdev);
7213
7214 for (i = 0; i < key_count; i++) {
7215 struct mgmt_ltk_info *key = &cp->keys[i];
7216 u8 type, authenticated;
7217
7218 if (hci_is_blocked_key(hdev,
7219 HCI_BLOCKED_KEY_TYPE_LTK,
7220 key->val)) {
7221 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7222 &key->addr.bdaddr);
7223 continue;
7224 }
7225
7226 if (!ltk_is_valid(key)) {
7227 bt_dev_warn(hdev, "Invalid LTK for %pMR",
7228 &key->addr.bdaddr);
7229 continue;
7230 }
7231
7232 switch (key->type) {
7233 case MGMT_LTK_UNAUTHENTICATED:
7234 authenticated = 0x00;
7235 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7236 break;
7237 case MGMT_LTK_AUTHENTICATED:
7238 authenticated = 0x01;
7239 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7240 break;
7241 case MGMT_LTK_P256_UNAUTH:
7242 authenticated = 0x00;
7243 type = SMP_LTK_P256;
7244 break;
7245 case MGMT_LTK_P256_AUTH:
7246 authenticated = 0x01;
7247 type = SMP_LTK_P256;
7248 break;
7249 case MGMT_LTK_P256_DEBUG:
7250 authenticated = 0x00;
7251 type = SMP_LTK_P256_DEBUG;
7252 fallthrough;
7253 default:
7254 continue;
7255 }
7256
7257 hci_add_ltk(hdev, &key->addr.bdaddr,
7258 le_addr_type(key->addr.type), type, authenticated,
7259 key->val, key->enc_size, key->ediv, key->rand);
7260 }
7261
7262 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7263 NULL, 0);
7264
7265 hci_dev_unlock(hdev);
7266
7267 return err;
7268 }
7269
get_conn_info_complete(struct hci_dev * hdev,void * data,int err)7270 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
7271 {
7272 struct mgmt_pending_cmd *cmd = data;
7273 struct hci_conn *conn = cmd->user_data;
7274 struct mgmt_cp_get_conn_info *cp = cmd->param;
7275 struct mgmt_rp_get_conn_info rp;
7276 u8 status;
7277
7278 bt_dev_dbg(hdev, "err %d", err);
7279
7280 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
7281
7282 status = mgmt_status(err);
7283 if (status == MGMT_STATUS_SUCCESS) {
7284 rp.rssi = conn->rssi;
7285 rp.tx_power = conn->tx_power;
7286 rp.max_tx_power = conn->max_tx_power;
7287 } else {
7288 rp.rssi = HCI_RSSI_INVALID;
7289 rp.tx_power = HCI_TX_POWER_INVALID;
7290 rp.max_tx_power = HCI_TX_POWER_INVALID;
7291 }
7292
7293 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, MGMT_OP_GET_CONN_INFO, status,
7294 &rp, sizeof(rp));
7295
7296 mgmt_pending_free(cmd);
7297 }
7298
get_conn_info_sync(struct hci_dev * hdev,void * data)7299 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
7300 {
7301 struct mgmt_pending_cmd *cmd = data;
7302 struct mgmt_cp_get_conn_info *cp = cmd->param;
7303 struct hci_conn *conn;
7304 int err;
7305 __le16 handle;
7306
7307 /* Make sure we are still connected */
7308 if (cp->addr.type == BDADDR_BREDR)
7309 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7310 &cp->addr.bdaddr);
7311 else
7312 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7313
7314 if (!conn || conn->state != BT_CONNECTED)
7315 return MGMT_STATUS_NOT_CONNECTED;
7316
7317 cmd->user_data = conn;
7318 handle = cpu_to_le16(conn->handle);
7319
7320 /* Refresh RSSI each time */
7321 err = hci_read_rssi_sync(hdev, handle);
7322
7323 /* For LE links TX power does not change thus we don't need to
7324 * query for it once value is known.
7325 */
7326 if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
7327 conn->tx_power == HCI_TX_POWER_INVALID))
7328 err = hci_read_tx_power_sync(hdev, handle, 0x00);
7329
7330 /* Max TX power needs to be read only once per connection */
7331 if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
7332 err = hci_read_tx_power_sync(hdev, handle, 0x01);
7333
7334 return err;
7335 }
7336
get_conn_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7337 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7338 u16 len)
7339 {
7340 struct mgmt_cp_get_conn_info *cp = data;
7341 struct mgmt_rp_get_conn_info rp;
7342 struct hci_conn *conn;
7343 unsigned long conn_info_age;
7344 int err = 0;
7345
7346 bt_dev_dbg(hdev, "sock %p", sk);
7347
7348 memset(&rp, 0, sizeof(rp));
7349 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7350 rp.addr.type = cp->addr.type;
7351
7352 if (!bdaddr_type_is_valid(cp->addr.type))
7353 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7354 MGMT_STATUS_INVALID_PARAMS,
7355 &rp, sizeof(rp));
7356
7357 hci_dev_lock(hdev);
7358
7359 if (!hdev_is_powered(hdev)) {
7360 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7361 MGMT_STATUS_NOT_POWERED, &rp,
7362 sizeof(rp));
7363 goto unlock;
7364 }
7365
7366 if (cp->addr.type == BDADDR_BREDR)
7367 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7368 &cp->addr.bdaddr);
7369 else
7370 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7371
7372 if (!conn || conn->state != BT_CONNECTED) {
7373 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7374 MGMT_STATUS_NOT_CONNECTED, &rp,
7375 sizeof(rp));
7376 goto unlock;
7377 }
7378
7379 /* To avoid client trying to guess when to poll again for information we
7380 * calculate conn info age as random value between min/max set in hdev.
7381 */
7382 conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age,
7383 hdev->conn_info_max_age - 1);
7384
7385 /* Query controller to refresh cached values if they are too old or were
7386 * never read.
7387 */
7388 if (time_after(jiffies, conn->conn_info_timestamp +
7389 msecs_to_jiffies(conn_info_age)) ||
7390 !conn->conn_info_timestamp) {
7391 struct mgmt_pending_cmd *cmd;
7392
7393 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
7394 len);
7395 if (!cmd) {
7396 err = -ENOMEM;
7397 } else {
7398 err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
7399 cmd, get_conn_info_complete);
7400 }
7401
7402 if (err < 0) {
7403 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7404 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7405
7406 if (cmd)
7407 mgmt_pending_free(cmd);
7408
7409 goto unlock;
7410 }
7411
7412 conn->conn_info_timestamp = jiffies;
7413 } else {
7414 /* Cache is valid, just reply with values cached in hci_conn */
7415 rp.rssi = conn->rssi;
7416 rp.tx_power = conn->tx_power;
7417 rp.max_tx_power = conn->max_tx_power;
7418
7419 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7420 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7421 }
7422
7423 unlock:
7424 hci_dev_unlock(hdev);
7425 return err;
7426 }
7427
get_clock_info_complete(struct hci_dev * hdev,void * data,int err)7428 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
7429 {
7430 struct mgmt_pending_cmd *cmd = data;
7431 struct mgmt_cp_get_clock_info *cp = cmd->param;
7432 struct mgmt_rp_get_clock_info rp;
7433 struct hci_conn *conn = cmd->user_data;
7434 u8 status = mgmt_status(err);
7435
7436 bt_dev_dbg(hdev, "err %d", err);
7437
7438 memset(&rp, 0, sizeof(rp));
7439 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7440 rp.addr.type = cp->addr.type;
7441
7442 if (err)
7443 goto complete;
7444
7445 rp.local_clock = cpu_to_le32(hdev->clock);
7446
7447 if (conn) {
7448 rp.piconet_clock = cpu_to_le32(conn->clock);
7449 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7450 }
7451
7452 complete:
7453 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, status, &rp,
7454 sizeof(rp));
7455
7456 mgmt_pending_free(cmd);
7457 }
7458
get_clock_info_sync(struct hci_dev * hdev,void * data)7459 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
7460 {
7461 struct mgmt_pending_cmd *cmd = data;
7462 struct mgmt_cp_get_clock_info *cp = cmd->param;
7463 struct hci_cp_read_clock hci_cp;
7464 struct hci_conn *conn;
7465
7466 memset(&hci_cp, 0, sizeof(hci_cp));
7467 hci_read_clock_sync(hdev, &hci_cp);
7468
7469 /* Make sure connection still exists */
7470 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
7471 if (!conn || conn->state != BT_CONNECTED)
7472 return MGMT_STATUS_NOT_CONNECTED;
7473
7474 cmd->user_data = conn;
7475 hci_cp.handle = cpu_to_le16(conn->handle);
7476 hci_cp.which = 0x01; /* Piconet clock */
7477
7478 return hci_read_clock_sync(hdev, &hci_cp);
7479 }
7480
get_clock_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7481 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7482 u16 len)
7483 {
7484 struct mgmt_cp_get_clock_info *cp = data;
7485 struct mgmt_rp_get_clock_info rp;
7486 struct mgmt_pending_cmd *cmd;
7487 struct hci_conn *conn;
7488 int err;
7489
7490 bt_dev_dbg(hdev, "sock %p", sk);
7491
7492 memset(&rp, 0, sizeof(rp));
7493 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7494 rp.addr.type = cp->addr.type;
7495
7496 if (cp->addr.type != BDADDR_BREDR)
7497 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7498 MGMT_STATUS_INVALID_PARAMS,
7499 &rp, sizeof(rp));
7500
7501 hci_dev_lock(hdev);
7502
7503 if (!hdev_is_powered(hdev)) {
7504 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7505 MGMT_STATUS_NOT_POWERED, &rp,
7506 sizeof(rp));
7507 goto unlock;
7508 }
7509
7510 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7511 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7512 &cp->addr.bdaddr);
7513 if (!conn || conn->state != BT_CONNECTED) {
7514 err = mgmt_cmd_complete(sk, hdev->id,
7515 MGMT_OP_GET_CLOCK_INFO,
7516 MGMT_STATUS_NOT_CONNECTED,
7517 &rp, sizeof(rp));
7518 goto unlock;
7519 }
7520 } else {
7521 conn = NULL;
7522 }
7523
7524 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7525 if (!cmd)
7526 err = -ENOMEM;
7527 else
7528 err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7529 get_clock_info_complete);
7530
7531 if (err < 0) {
7532 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7533 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7534
7535 if (cmd)
7536 mgmt_pending_free(cmd);
7537 }
7538
7539
7540 unlock:
7541 hci_dev_unlock(hdev);
7542 return err;
7543 }
7544
is_connected(struct hci_dev * hdev,bdaddr_t * addr,u8 type)7545 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7546 {
7547 struct hci_conn *conn;
7548
7549 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7550 if (!conn)
7551 return false;
7552
7553 if (conn->dst_type != type)
7554 return false;
7555
7556 if (conn->state != BT_CONNECTED)
7557 return false;
7558
7559 return true;
7560 }
7561
7562 /* This function requires the caller holds hdev->lock */
hci_conn_params_set(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type,u8 auto_connect)7563 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7564 u8 addr_type, u8 auto_connect)
7565 {
7566 struct hci_conn_params *params;
7567
7568 params = hci_conn_params_add(hdev, addr, addr_type);
7569 if (!params)
7570 return -EIO;
7571
7572 if (params->auto_connect == auto_connect)
7573 return 0;
7574
7575 hci_pend_le_list_del_init(params);
7576
7577 switch (auto_connect) {
7578 case HCI_AUTO_CONN_DISABLED:
7579 case HCI_AUTO_CONN_LINK_LOSS:
7580 /* If auto connect is being disabled when we're trying to
7581 * connect to device, keep connecting.
7582 */
7583 if (params->explicit_connect)
7584 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7585 break;
7586 case HCI_AUTO_CONN_REPORT:
7587 if (params->explicit_connect)
7588 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7589 else
7590 hci_pend_le_list_add(params, &hdev->pend_le_reports);
7591 break;
7592 case HCI_AUTO_CONN_DIRECT:
7593 case HCI_AUTO_CONN_ALWAYS:
7594 if (!is_connected(hdev, addr, addr_type))
7595 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7596 break;
7597 }
7598
7599 params->auto_connect = auto_connect;
7600
7601 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7602 addr, addr_type, auto_connect);
7603
7604 return 0;
7605 }
7606
device_added(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type,u8 action)7607 static void device_added(struct sock *sk, struct hci_dev *hdev,
7608 bdaddr_t *bdaddr, u8 type, u8 action)
7609 {
7610 struct mgmt_ev_device_added ev;
7611
7612 bacpy(&ev.addr.bdaddr, bdaddr);
7613 ev.addr.type = type;
7614 ev.action = action;
7615
7616 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7617 }
7618
add_device_complete(struct hci_dev * hdev,void * data,int err)7619 static void add_device_complete(struct hci_dev *hdev, void *data, int err)
7620 {
7621 struct mgmt_pending_cmd *cmd = data;
7622 struct mgmt_cp_add_device *cp = cmd->param;
7623
7624 if (!err) {
7625 struct hci_conn_params *params;
7626
7627 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7628 le_addr_type(cp->addr.type));
7629
7630 device_added(cmd->sk, hdev, &cp->addr.bdaddr, cp->addr.type,
7631 cp->action);
7632 device_flags_changed(NULL, hdev, &cp->addr.bdaddr,
7633 cp->addr.type, hdev->conn_flags,
7634 params ? params->flags : 0);
7635 }
7636
7637 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_ADD_DEVICE,
7638 mgmt_status(err), &cp->addr, sizeof(cp->addr));
7639 mgmt_pending_free(cmd);
7640 }
7641
add_device_sync(struct hci_dev * hdev,void * data)7642 static int add_device_sync(struct hci_dev *hdev, void *data)
7643 {
7644 return hci_update_passive_scan_sync(hdev);
7645 }
7646
add_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7647 static int add_device(struct sock *sk, struct hci_dev *hdev,
7648 void *data, u16 len)
7649 {
7650 struct mgmt_pending_cmd *cmd;
7651 struct mgmt_cp_add_device *cp = data;
7652 u8 auto_conn, addr_type;
7653 struct hci_conn_params *params;
7654 int err;
7655 u32 current_flags = 0;
7656 u32 supported_flags;
7657
7658 bt_dev_dbg(hdev, "sock %p", sk);
7659
7660 if (!bdaddr_type_is_valid(cp->addr.type) ||
7661 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7662 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7663 MGMT_STATUS_INVALID_PARAMS,
7664 &cp->addr, sizeof(cp->addr));
7665
7666 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7667 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7668 MGMT_STATUS_INVALID_PARAMS,
7669 &cp->addr, sizeof(cp->addr));
7670
7671 hci_dev_lock(hdev);
7672
7673 if (cp->addr.type == BDADDR_BREDR) {
7674 /* Only incoming connections action is supported for now */
7675 if (cp->action != 0x01) {
7676 err = mgmt_cmd_complete(sk, hdev->id,
7677 MGMT_OP_ADD_DEVICE,
7678 MGMT_STATUS_INVALID_PARAMS,
7679 &cp->addr, sizeof(cp->addr));
7680 goto unlock;
7681 }
7682
7683 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7684 &cp->addr.bdaddr,
7685 cp->addr.type, 0);
7686 if (err)
7687 goto unlock;
7688
7689 hci_update_scan(hdev);
7690
7691 goto added;
7692 }
7693
7694 addr_type = le_addr_type(cp->addr.type);
7695
7696 if (cp->action == 0x02)
7697 auto_conn = HCI_AUTO_CONN_ALWAYS;
7698 else if (cp->action == 0x01)
7699 auto_conn = HCI_AUTO_CONN_DIRECT;
7700 else
7701 auto_conn = HCI_AUTO_CONN_REPORT;
7702
7703 /* Kernel internally uses conn_params with resolvable private
7704 * address, but Add Device allows only identity addresses.
7705 * Make sure it is enforced before calling
7706 * hci_conn_params_lookup.
7707 */
7708 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7709 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7710 MGMT_STATUS_INVALID_PARAMS,
7711 &cp->addr, sizeof(cp->addr));
7712 goto unlock;
7713 }
7714
7715 /* If the connection parameters don't exist for this device,
7716 * they will be created and configured with defaults.
7717 */
7718 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7719 auto_conn) < 0) {
7720 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7721 MGMT_STATUS_FAILED, &cp->addr,
7722 sizeof(cp->addr));
7723 goto unlock;
7724 } else {
7725 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7726 addr_type);
7727 if (params)
7728 current_flags = params->flags;
7729 }
7730
7731 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
7732 if (!cmd) {
7733 err = -ENOMEM;
7734 goto unlock;
7735 }
7736
7737 err = hci_cmd_sync_queue(hdev, add_device_sync, cmd,
7738 add_device_complete);
7739 if (err < 0) {
7740 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7741 MGMT_STATUS_FAILED, &cp->addr,
7742 sizeof(cp->addr));
7743 mgmt_pending_free(cmd);
7744 }
7745
7746 goto unlock;
7747
7748 added:
7749 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7750 supported_flags = hdev->conn_flags;
7751 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7752 supported_flags, current_flags);
7753
7754 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7755 MGMT_STATUS_SUCCESS, &cp->addr,
7756 sizeof(cp->addr));
7757
7758 unlock:
7759 hci_dev_unlock(hdev);
7760 return err;
7761 }
7762
device_removed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)7763 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7764 bdaddr_t *bdaddr, u8 type)
7765 {
7766 struct mgmt_ev_device_removed ev;
7767
7768 bacpy(&ev.addr.bdaddr, bdaddr);
7769 ev.addr.type = type;
7770
7771 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7772 }
7773
remove_device_sync(struct hci_dev * hdev,void * data)7774 static int remove_device_sync(struct hci_dev *hdev, void *data)
7775 {
7776 return hci_update_passive_scan_sync(hdev);
7777 }
7778
remove_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7779 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7780 void *data, u16 len)
7781 {
7782 struct mgmt_cp_remove_device *cp = data;
7783 int err;
7784
7785 bt_dev_dbg(hdev, "sock %p", sk);
7786
7787 hci_dev_lock(hdev);
7788
7789 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7790 struct hci_conn_params *params;
7791 u8 addr_type;
7792
7793 if (!bdaddr_type_is_valid(cp->addr.type)) {
7794 err = mgmt_cmd_complete(sk, hdev->id,
7795 MGMT_OP_REMOVE_DEVICE,
7796 MGMT_STATUS_INVALID_PARAMS,
7797 &cp->addr, sizeof(cp->addr));
7798 goto unlock;
7799 }
7800
7801 if (cp->addr.type == BDADDR_BREDR) {
7802 err = hci_bdaddr_list_del(&hdev->accept_list,
7803 &cp->addr.bdaddr,
7804 cp->addr.type);
7805 if (err) {
7806 err = mgmt_cmd_complete(sk, hdev->id,
7807 MGMT_OP_REMOVE_DEVICE,
7808 MGMT_STATUS_INVALID_PARAMS,
7809 &cp->addr,
7810 sizeof(cp->addr));
7811 goto unlock;
7812 }
7813
7814 hci_update_scan(hdev);
7815
7816 device_removed(sk, hdev, &cp->addr.bdaddr,
7817 cp->addr.type);
7818 goto complete;
7819 }
7820
7821 addr_type = le_addr_type(cp->addr.type);
7822
7823 /* Kernel internally uses conn_params with resolvable private
7824 * address, but Remove Device allows only identity addresses.
7825 * Make sure it is enforced before calling
7826 * hci_conn_params_lookup.
7827 */
7828 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7829 err = mgmt_cmd_complete(sk, hdev->id,
7830 MGMT_OP_REMOVE_DEVICE,
7831 MGMT_STATUS_INVALID_PARAMS,
7832 &cp->addr, sizeof(cp->addr));
7833 goto unlock;
7834 }
7835
7836 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7837 addr_type);
7838 if (!params) {
7839 err = mgmt_cmd_complete(sk, hdev->id,
7840 MGMT_OP_REMOVE_DEVICE,
7841 MGMT_STATUS_INVALID_PARAMS,
7842 &cp->addr, sizeof(cp->addr));
7843 goto unlock;
7844 }
7845
7846 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7847 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7848 err = mgmt_cmd_complete(sk, hdev->id,
7849 MGMT_OP_REMOVE_DEVICE,
7850 MGMT_STATUS_INVALID_PARAMS,
7851 &cp->addr, sizeof(cp->addr));
7852 goto unlock;
7853 }
7854
7855 hci_conn_params_free(params);
7856
7857 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7858 } else {
7859 struct hci_conn_params *p, *tmp;
7860 struct bdaddr_list *b, *btmp;
7861
7862 if (cp->addr.type) {
7863 err = mgmt_cmd_complete(sk, hdev->id,
7864 MGMT_OP_REMOVE_DEVICE,
7865 MGMT_STATUS_INVALID_PARAMS,
7866 &cp->addr, sizeof(cp->addr));
7867 goto unlock;
7868 }
7869
7870 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7871 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7872 list_del(&b->list);
7873 kfree(b);
7874 }
7875
7876 hci_update_scan(hdev);
7877
7878 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7879 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7880 continue;
7881 device_removed(sk, hdev, &p->addr, p->addr_type);
7882 if (p->explicit_connect) {
7883 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7884 continue;
7885 }
7886 hci_conn_params_free(p);
7887 }
7888
7889 bt_dev_dbg(hdev, "All LE connection parameters were removed");
7890 }
7891
7892 hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7893
7894 complete:
7895 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7896 MGMT_STATUS_SUCCESS, &cp->addr,
7897 sizeof(cp->addr));
7898 unlock:
7899 hci_dev_unlock(hdev);
7900 return err;
7901 }
7902
conn_update_sync(struct hci_dev * hdev,void * data)7903 static int conn_update_sync(struct hci_dev *hdev, void *data)
7904 {
7905 struct hci_conn_params *params = data;
7906 struct hci_conn *conn;
7907
7908 conn = hci_conn_hash_lookup_le(hdev, ¶ms->addr, params->addr_type);
7909 if (!conn)
7910 return -ECANCELED;
7911
7912 return hci_le_conn_update_sync(hdev, conn, params);
7913 }
7914
load_conn_param(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7915 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7916 u16 len)
7917 {
7918 struct mgmt_cp_load_conn_param *cp = data;
7919 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7920 sizeof(struct mgmt_conn_param));
7921 u16 param_count, expected_len;
7922 int i;
7923
7924 if (!lmp_le_capable(hdev))
7925 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7926 MGMT_STATUS_NOT_SUPPORTED);
7927
7928 param_count = __le16_to_cpu(cp->param_count);
7929 if (param_count > max_param_count) {
7930 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7931 param_count);
7932 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7933 MGMT_STATUS_INVALID_PARAMS);
7934 }
7935
7936 expected_len = struct_size(cp, params, param_count);
7937 if (expected_len != len) {
7938 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7939 expected_len, len);
7940 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7941 MGMT_STATUS_INVALID_PARAMS);
7942 }
7943
7944 bt_dev_dbg(hdev, "param_count %u", param_count);
7945
7946 hci_dev_lock(hdev);
7947
7948 if (param_count > 1)
7949 hci_conn_params_clear_disabled(hdev);
7950
7951 for (i = 0; i < param_count; i++) {
7952 struct mgmt_conn_param *param = &cp->params[i];
7953 struct hci_conn_params *hci_param;
7954 u16 min, max, latency, timeout;
7955 bool update = false;
7956 u8 addr_type;
7957
7958 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
7959 param->addr.type);
7960
7961 if (param->addr.type == BDADDR_LE_PUBLIC) {
7962 addr_type = ADDR_LE_DEV_PUBLIC;
7963 } else if (param->addr.type == BDADDR_LE_RANDOM) {
7964 addr_type = ADDR_LE_DEV_RANDOM;
7965 } else {
7966 bt_dev_err(hdev, "ignoring invalid connection parameters");
7967 continue;
7968 }
7969
7970 min = le16_to_cpu(param->min_interval);
7971 max = le16_to_cpu(param->max_interval);
7972 latency = le16_to_cpu(param->latency);
7973 timeout = le16_to_cpu(param->timeout);
7974
7975 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7976 min, max, latency, timeout);
7977
7978 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7979 bt_dev_err(hdev, "ignoring invalid connection parameters");
7980 continue;
7981 }
7982
7983 /* Detect when the loading is for an existing parameter then
7984 * attempt to trigger the connection update procedure.
7985 */
7986 if (!i && param_count == 1) {
7987 hci_param = hci_conn_params_lookup(hdev,
7988 ¶m->addr.bdaddr,
7989 addr_type);
7990 if (hci_param)
7991 update = true;
7992 else
7993 hci_conn_params_clear_disabled(hdev);
7994 }
7995
7996 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
7997 addr_type);
7998 if (!hci_param) {
7999 bt_dev_err(hdev, "failed to add connection parameters");
8000 continue;
8001 }
8002
8003 hci_param->conn_min_interval = min;
8004 hci_param->conn_max_interval = max;
8005 hci_param->conn_latency = latency;
8006 hci_param->supervision_timeout = timeout;
8007
8008 /* Check if we need to trigger a connection update */
8009 if (update) {
8010 struct hci_conn *conn;
8011
8012 /* Lookup for existing connection as central and check
8013 * if parameters match and if they don't then trigger
8014 * a connection update.
8015 */
8016 conn = hci_conn_hash_lookup_le(hdev, &hci_param->addr,
8017 addr_type);
8018 if (conn && conn->role == HCI_ROLE_MASTER &&
8019 (conn->le_conn_min_interval != min ||
8020 conn->le_conn_max_interval != max ||
8021 conn->le_conn_latency != latency ||
8022 conn->le_supv_timeout != timeout))
8023 hci_cmd_sync_queue(hdev, conn_update_sync,
8024 hci_param, NULL);
8025 }
8026 }
8027
8028 hci_dev_unlock(hdev);
8029
8030 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
8031 NULL, 0);
8032 }
8033
set_external_config(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)8034 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
8035 void *data, u16 len)
8036 {
8037 struct mgmt_cp_set_external_config *cp = data;
8038 bool changed;
8039 int err;
8040
8041 bt_dev_dbg(hdev, "sock %p", sk);
8042
8043 if (hdev_is_powered(hdev))
8044 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
8045 MGMT_STATUS_REJECTED);
8046
8047 if (cp->config != 0x00 && cp->config != 0x01)
8048 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
8049 MGMT_STATUS_INVALID_PARAMS);
8050
8051 if (!hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG))
8052 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
8053 MGMT_STATUS_NOT_SUPPORTED);
8054
8055 hci_dev_lock(hdev);
8056
8057 if (cp->config)
8058 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
8059 else
8060 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
8061
8062 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
8063 if (err < 0)
8064 goto unlock;
8065
8066 if (!changed)
8067 goto unlock;
8068
8069 err = new_options(hdev, sk);
8070
8071 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
8072 mgmt_index_removed(hdev);
8073
8074 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
8075 hci_dev_set_flag(hdev, HCI_CONFIG);
8076 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8077
8078 queue_work(hdev->req_workqueue, &hdev->power_on);
8079 } else {
8080 set_bit(HCI_RAW, &hdev->flags);
8081 mgmt_index_added(hdev);
8082 }
8083 }
8084
8085 unlock:
8086 hci_dev_unlock(hdev);
8087 return err;
8088 }
8089
set_public_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)8090 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
8091 void *data, u16 len)
8092 {
8093 struct mgmt_cp_set_public_address *cp = data;
8094 bool changed;
8095 int err;
8096
8097 bt_dev_dbg(hdev, "sock %p", sk);
8098
8099 if (hdev_is_powered(hdev))
8100 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8101 MGMT_STATUS_REJECTED);
8102
8103 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
8104 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8105 MGMT_STATUS_INVALID_PARAMS);
8106
8107 if (!hdev->set_bdaddr)
8108 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8109 MGMT_STATUS_NOT_SUPPORTED);
8110
8111 hci_dev_lock(hdev);
8112
8113 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
8114 bacpy(&hdev->public_addr, &cp->bdaddr);
8115
8116 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
8117 if (err < 0)
8118 goto unlock;
8119
8120 if (!changed)
8121 goto unlock;
8122
8123 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
8124 err = new_options(hdev, sk);
8125
8126 if (is_configured(hdev)) {
8127 mgmt_index_removed(hdev);
8128
8129 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
8130
8131 hci_dev_set_flag(hdev, HCI_CONFIG);
8132 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8133
8134 queue_work(hdev->req_workqueue, &hdev->power_on);
8135 }
8136
8137 unlock:
8138 hci_dev_unlock(hdev);
8139 return err;
8140 }
8141
read_local_oob_ext_data_complete(struct hci_dev * hdev,void * data,int err)8142 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
8143 int err)
8144 {
8145 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8146 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8147 u8 *h192, *r192, *h256, *r256;
8148 struct mgmt_pending_cmd *cmd = data;
8149 struct sk_buff *skb = cmd->skb;
8150 u8 status = mgmt_status(err);
8151 u16 eir_len;
8152
8153 if (!status) {
8154 if (!skb)
8155 status = MGMT_STATUS_FAILED;
8156 else if (IS_ERR(skb))
8157 status = mgmt_status(PTR_ERR(skb));
8158 else
8159 status = mgmt_status(skb->data[0]);
8160 }
8161
8162 bt_dev_dbg(hdev, "status %u", status);
8163
8164 mgmt_cp = cmd->param;
8165
8166 if (status) {
8167 status = mgmt_status(status);
8168 eir_len = 0;
8169
8170 h192 = NULL;
8171 r192 = NULL;
8172 h256 = NULL;
8173 r256 = NULL;
8174 } else if (!bredr_sc_enabled(hdev)) {
8175 struct hci_rp_read_local_oob_data *rp;
8176
8177 if (skb->len != sizeof(*rp)) {
8178 status = MGMT_STATUS_FAILED;
8179 eir_len = 0;
8180 } else {
8181 status = MGMT_STATUS_SUCCESS;
8182 rp = (void *)skb->data;
8183
8184 eir_len = 5 + 18 + 18;
8185 h192 = rp->hash;
8186 r192 = rp->rand;
8187 h256 = NULL;
8188 r256 = NULL;
8189 }
8190 } else {
8191 struct hci_rp_read_local_oob_ext_data *rp;
8192
8193 if (skb->len != sizeof(*rp)) {
8194 status = MGMT_STATUS_FAILED;
8195 eir_len = 0;
8196 } else {
8197 status = MGMT_STATUS_SUCCESS;
8198 rp = (void *)skb->data;
8199
8200 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8201 eir_len = 5 + 18 + 18;
8202 h192 = NULL;
8203 r192 = NULL;
8204 } else {
8205 eir_len = 5 + 18 + 18 + 18 + 18;
8206 h192 = rp->hash192;
8207 r192 = rp->rand192;
8208 }
8209
8210 h256 = rp->hash256;
8211 r256 = rp->rand256;
8212 }
8213 }
8214
8215 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8216 if (!mgmt_rp)
8217 goto done;
8218
8219 if (eir_len == 0)
8220 goto send_rsp;
8221
8222 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8223 hdev->dev_class, 3);
8224
8225 if (h192 && r192) {
8226 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8227 EIR_SSP_HASH_C192, h192, 16);
8228 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8229 EIR_SSP_RAND_R192, r192, 16);
8230 }
8231
8232 if (h256 && r256) {
8233 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8234 EIR_SSP_HASH_C256, h256, 16);
8235 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8236 EIR_SSP_RAND_R256, r256, 16);
8237 }
8238
8239 send_rsp:
8240 mgmt_rp->type = mgmt_cp->type;
8241 mgmt_rp->eir_len = cpu_to_le16(eir_len);
8242
8243 err = mgmt_cmd_complete(cmd->sk, hdev->id,
8244 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8245 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8246 if (err < 0 || status)
8247 goto done;
8248
8249 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8250
8251 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8252 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8253 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8254 done:
8255 if (skb && !IS_ERR(skb))
8256 kfree_skb(skb);
8257
8258 kfree(mgmt_rp);
8259 mgmt_pending_free(cmd);
8260 }
8261
read_local_ssp_oob_req(struct hci_dev * hdev,struct sock * sk,struct mgmt_cp_read_local_oob_ext_data * cp)8262 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8263 struct mgmt_cp_read_local_oob_ext_data *cp)
8264 {
8265 struct mgmt_pending_cmd *cmd;
8266 int err;
8267
8268 cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8269 cp, sizeof(*cp));
8270 if (!cmd)
8271 return -ENOMEM;
8272
8273 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
8274 read_local_oob_ext_data_complete);
8275
8276 if (err < 0) {
8277 mgmt_pending_remove(cmd);
8278 return err;
8279 }
8280
8281 return 0;
8282 }
8283
read_local_oob_ext_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8284 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8285 void *data, u16 data_len)
8286 {
8287 struct mgmt_cp_read_local_oob_ext_data *cp = data;
8288 struct mgmt_rp_read_local_oob_ext_data *rp;
8289 size_t rp_len;
8290 u16 eir_len;
8291 u8 status, flags, role, addr[7], hash[16], rand[16];
8292 int err;
8293
8294 bt_dev_dbg(hdev, "sock %p", sk);
8295
8296 if (hdev_is_powered(hdev)) {
8297 switch (cp->type) {
8298 case BIT(BDADDR_BREDR):
8299 status = mgmt_bredr_support(hdev);
8300 if (status)
8301 eir_len = 0;
8302 else
8303 eir_len = 5;
8304 break;
8305 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8306 status = mgmt_le_support(hdev);
8307 if (status)
8308 eir_len = 0;
8309 else
8310 eir_len = 9 + 3 + 18 + 18 + 3;
8311 break;
8312 default:
8313 status = MGMT_STATUS_INVALID_PARAMS;
8314 eir_len = 0;
8315 break;
8316 }
8317 } else {
8318 status = MGMT_STATUS_NOT_POWERED;
8319 eir_len = 0;
8320 }
8321
8322 rp_len = sizeof(*rp) + eir_len;
8323 rp = kmalloc(rp_len, GFP_ATOMIC);
8324 if (!rp)
8325 return -ENOMEM;
8326
8327 if (!status && !lmp_ssp_capable(hdev)) {
8328 status = MGMT_STATUS_NOT_SUPPORTED;
8329 eir_len = 0;
8330 }
8331
8332 if (status)
8333 goto complete;
8334
8335 hci_dev_lock(hdev);
8336
8337 eir_len = 0;
8338 switch (cp->type) {
8339 case BIT(BDADDR_BREDR):
8340 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8341 err = read_local_ssp_oob_req(hdev, sk, cp);
8342 hci_dev_unlock(hdev);
8343 if (!err)
8344 goto done;
8345
8346 status = MGMT_STATUS_FAILED;
8347 goto complete;
8348 } else {
8349 eir_len = eir_append_data(rp->eir, eir_len,
8350 EIR_CLASS_OF_DEV,
8351 hdev->dev_class, 3);
8352 }
8353 break;
8354 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8355 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8356 smp_generate_oob(hdev, hash, rand) < 0) {
8357 hci_dev_unlock(hdev);
8358 status = MGMT_STATUS_FAILED;
8359 goto complete;
8360 }
8361
8362 /* This should return the active RPA, but since the RPA
8363 * is only programmed on demand, it is really hard to fill
8364 * this in at the moment. For now disallow retrieving
8365 * local out-of-band data when privacy is in use.
8366 *
8367 * Returning the identity address will not help here since
8368 * pairing happens before the identity resolving key is
8369 * known and thus the connection establishment happens
8370 * based on the RPA and not the identity address.
8371 */
8372 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8373 hci_dev_unlock(hdev);
8374 status = MGMT_STATUS_REJECTED;
8375 goto complete;
8376 }
8377
8378 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8379 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8380 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8381 bacmp(&hdev->static_addr, BDADDR_ANY))) {
8382 memcpy(addr, &hdev->static_addr, 6);
8383 addr[6] = 0x01;
8384 } else {
8385 memcpy(addr, &hdev->bdaddr, 6);
8386 addr[6] = 0x00;
8387 }
8388
8389 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8390 addr, sizeof(addr));
8391
8392 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8393 role = 0x02;
8394 else
8395 role = 0x01;
8396
8397 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8398 &role, sizeof(role));
8399
8400 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8401 eir_len = eir_append_data(rp->eir, eir_len,
8402 EIR_LE_SC_CONFIRM,
8403 hash, sizeof(hash));
8404
8405 eir_len = eir_append_data(rp->eir, eir_len,
8406 EIR_LE_SC_RANDOM,
8407 rand, sizeof(rand));
8408 }
8409
8410 flags = mgmt_get_adv_discov_flags(hdev);
8411
8412 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8413 flags |= LE_AD_NO_BREDR;
8414
8415 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8416 &flags, sizeof(flags));
8417 break;
8418 }
8419
8420 hci_dev_unlock(hdev);
8421
8422 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8423
8424 status = MGMT_STATUS_SUCCESS;
8425
8426 complete:
8427 rp->type = cp->type;
8428 rp->eir_len = cpu_to_le16(eir_len);
8429
8430 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8431 status, rp, sizeof(*rp) + eir_len);
8432 if (err < 0 || status)
8433 goto done;
8434
8435 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8436 rp, sizeof(*rp) + eir_len,
8437 HCI_MGMT_OOB_DATA_EVENTS, sk);
8438
8439 done:
8440 kfree(rp);
8441
8442 return err;
8443 }
8444
get_supported_adv_flags(struct hci_dev * hdev)8445 static u32 get_supported_adv_flags(struct hci_dev *hdev)
8446 {
8447 u32 flags = 0;
8448
8449 flags |= MGMT_ADV_FLAG_CONNECTABLE;
8450 flags |= MGMT_ADV_FLAG_DISCOV;
8451 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8452 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8453 flags |= MGMT_ADV_FLAG_APPEARANCE;
8454 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8455 flags |= MGMT_ADV_PARAM_DURATION;
8456 flags |= MGMT_ADV_PARAM_TIMEOUT;
8457 flags |= MGMT_ADV_PARAM_INTERVALS;
8458 flags |= MGMT_ADV_PARAM_TX_POWER;
8459 flags |= MGMT_ADV_PARAM_SCAN_RSP;
8460
8461 /* In extended adv TX_POWER returned from Set Adv Param
8462 * will be always valid.
8463 */
8464 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
8465 flags |= MGMT_ADV_FLAG_TX_POWER;
8466
8467 if (ext_adv_capable(hdev)) {
8468 flags |= MGMT_ADV_FLAG_SEC_1M;
8469 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8470 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8471
8472 if (le_2m_capable(hdev))
8473 flags |= MGMT_ADV_FLAG_SEC_2M;
8474
8475 if (le_coded_capable(hdev))
8476 flags |= MGMT_ADV_FLAG_SEC_CODED;
8477 }
8478
8479 return flags;
8480 }
8481
read_adv_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8482 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8483 void *data, u16 data_len)
8484 {
8485 struct mgmt_rp_read_adv_features *rp;
8486 size_t rp_len;
8487 int err;
8488 struct adv_info *adv_instance;
8489 u32 supported_flags;
8490 u8 *instance;
8491
8492 bt_dev_dbg(hdev, "sock %p", sk);
8493
8494 if (!lmp_le_capable(hdev))
8495 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8496 MGMT_STATUS_REJECTED);
8497
8498 hci_dev_lock(hdev);
8499
8500 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8501 rp = kmalloc(rp_len, GFP_ATOMIC);
8502 if (!rp) {
8503 hci_dev_unlock(hdev);
8504 return -ENOMEM;
8505 }
8506
8507 supported_flags = get_supported_adv_flags(hdev);
8508
8509 rp->supported_flags = cpu_to_le32(supported_flags);
8510 rp->max_adv_data_len = max_adv_len(hdev);
8511 rp->max_scan_rsp_len = max_adv_len(hdev);
8512 rp->max_instances = hdev->le_num_of_adv_sets;
8513 rp->num_instances = hdev->adv_instance_cnt;
8514
8515 instance = rp->instance;
8516 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8517 /* Only instances 1-le_num_of_adv_sets are externally visible */
8518 if (adv_instance->instance <= hdev->adv_instance_cnt) {
8519 *instance = adv_instance->instance;
8520 instance++;
8521 } else {
8522 rp->num_instances--;
8523 rp_len--;
8524 }
8525 }
8526
8527 hci_dev_unlock(hdev);
8528
8529 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8530 MGMT_STATUS_SUCCESS, rp, rp_len);
8531
8532 kfree(rp);
8533
8534 return err;
8535 }
8536
calculate_name_len(struct hci_dev * hdev)8537 static u8 calculate_name_len(struct hci_dev *hdev)
8538 {
8539 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 2]; /* len + type + name */
8540
8541 return eir_append_local_name(hdev, buf, 0);
8542 }
8543
tlv_data_max_len(struct hci_dev * hdev,u32 adv_flags,bool is_adv_data)8544 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8545 bool is_adv_data)
8546 {
8547 u8 max_len = max_adv_len(hdev);
8548
8549 if (is_adv_data) {
8550 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8551 MGMT_ADV_FLAG_LIMITED_DISCOV |
8552 MGMT_ADV_FLAG_MANAGED_FLAGS))
8553 max_len -= 3;
8554
8555 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8556 max_len -= 3;
8557 } else {
8558 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8559 max_len -= calculate_name_len(hdev);
8560
8561 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8562 max_len -= 4;
8563 }
8564
8565 return max_len;
8566 }
8567
flags_managed(u32 adv_flags)8568 static bool flags_managed(u32 adv_flags)
8569 {
8570 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8571 MGMT_ADV_FLAG_LIMITED_DISCOV |
8572 MGMT_ADV_FLAG_MANAGED_FLAGS);
8573 }
8574
tx_power_managed(u32 adv_flags)8575 static bool tx_power_managed(u32 adv_flags)
8576 {
8577 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8578 }
8579
name_managed(u32 adv_flags)8580 static bool name_managed(u32 adv_flags)
8581 {
8582 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8583 }
8584
appearance_managed(u32 adv_flags)8585 static bool appearance_managed(u32 adv_flags)
8586 {
8587 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8588 }
8589
tlv_data_is_valid(struct hci_dev * hdev,u32 adv_flags,u8 * data,u8 len,bool is_adv_data)8590 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8591 u8 len, bool is_adv_data)
8592 {
8593 int i, cur_len;
8594 u8 max_len;
8595
8596 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8597
8598 if (len > max_len)
8599 return false;
8600
8601 /* Make sure that the data is correctly formatted. */
8602 for (i = 0; i < len; i += (cur_len + 1)) {
8603 cur_len = data[i];
8604
8605 if (!cur_len)
8606 continue;
8607
8608 if (data[i + 1] == EIR_FLAGS &&
8609 (!is_adv_data || flags_managed(adv_flags)))
8610 return false;
8611
8612 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8613 return false;
8614
8615 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8616 return false;
8617
8618 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8619 return false;
8620
8621 if (data[i + 1] == EIR_APPEARANCE &&
8622 appearance_managed(adv_flags))
8623 return false;
8624
8625 /* If the current field length would exceed the total data
8626 * length, then it's invalid.
8627 */
8628 if (i + cur_len >= len)
8629 return false;
8630 }
8631
8632 return true;
8633 }
8634
requested_adv_flags_are_valid(struct hci_dev * hdev,u32 adv_flags)8635 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8636 {
8637 u32 supported_flags, phy_flags;
8638
8639 /* The current implementation only supports a subset of the specified
8640 * flags. Also need to check mutual exclusiveness of sec flags.
8641 */
8642 supported_flags = get_supported_adv_flags(hdev);
8643 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8644 if (adv_flags & ~supported_flags ||
8645 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8646 return false;
8647
8648 return true;
8649 }
8650
adv_busy(struct hci_dev * hdev)8651 static bool adv_busy(struct hci_dev *hdev)
8652 {
8653 return pending_find(MGMT_OP_SET_LE, hdev);
8654 }
8655
add_adv_complete(struct hci_dev * hdev,struct sock * sk,u8 instance,int err)8656 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8657 int err)
8658 {
8659 struct adv_info *adv, *n;
8660
8661 bt_dev_dbg(hdev, "err %d", err);
8662
8663 hci_dev_lock(hdev);
8664
8665 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8666 u8 instance;
8667
8668 if (!adv->pending)
8669 continue;
8670
8671 if (!err) {
8672 adv->pending = false;
8673 continue;
8674 }
8675
8676 instance = adv->instance;
8677
8678 if (hdev->cur_adv_instance == instance)
8679 cancel_adv_timeout(hdev);
8680
8681 hci_remove_adv_instance(hdev, instance);
8682 mgmt_advertising_removed(sk, hdev, instance);
8683 }
8684
8685 hci_dev_unlock(hdev);
8686 }
8687
add_advertising_complete(struct hci_dev * hdev,void * data,int err)8688 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8689 {
8690 struct mgmt_pending_cmd *cmd = data;
8691 struct mgmt_cp_add_advertising *cp = cmd->param;
8692 struct mgmt_rp_add_advertising rp;
8693
8694 memset(&rp, 0, sizeof(rp));
8695
8696 rp.instance = cp->instance;
8697
8698 if (err)
8699 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
8700 mgmt_status(err));
8701 else
8702 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
8703 mgmt_status(err), &rp, sizeof(rp));
8704
8705 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8706
8707 mgmt_pending_free(cmd);
8708 }
8709
add_advertising_sync(struct hci_dev * hdev,void * data)8710 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8711 {
8712 struct mgmt_pending_cmd *cmd = data;
8713 struct mgmt_cp_add_advertising *cp = cmd->param;
8714
8715 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8716 }
8717
add_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8718 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8719 void *data, u16 data_len)
8720 {
8721 struct mgmt_cp_add_advertising *cp = data;
8722 struct mgmt_rp_add_advertising rp;
8723 u32 flags;
8724 u8 status;
8725 u16 timeout, duration;
8726 unsigned int prev_instance_cnt;
8727 u8 schedule_instance = 0;
8728 struct adv_info *adv, *next_instance;
8729 int err;
8730 struct mgmt_pending_cmd *cmd;
8731
8732 bt_dev_dbg(hdev, "sock %p", sk);
8733
8734 status = mgmt_le_support(hdev);
8735 if (status)
8736 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8737 status);
8738
8739 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8740 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8741 MGMT_STATUS_INVALID_PARAMS);
8742
8743 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8744 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8745 MGMT_STATUS_INVALID_PARAMS);
8746
8747 flags = __le32_to_cpu(cp->flags);
8748 timeout = __le16_to_cpu(cp->timeout);
8749 duration = __le16_to_cpu(cp->duration);
8750
8751 if (!requested_adv_flags_are_valid(hdev, flags))
8752 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8753 MGMT_STATUS_INVALID_PARAMS);
8754
8755 hci_dev_lock(hdev);
8756
8757 if (timeout && !hdev_is_powered(hdev)) {
8758 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8759 MGMT_STATUS_REJECTED);
8760 goto unlock;
8761 }
8762
8763 if (adv_busy(hdev)) {
8764 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8765 MGMT_STATUS_BUSY);
8766 goto unlock;
8767 }
8768
8769 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8770 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8771 cp->scan_rsp_len, false)) {
8772 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8773 MGMT_STATUS_INVALID_PARAMS);
8774 goto unlock;
8775 }
8776
8777 prev_instance_cnt = hdev->adv_instance_cnt;
8778
8779 adv = hci_add_adv_instance(hdev, cp->instance, flags,
8780 cp->adv_data_len, cp->data,
8781 cp->scan_rsp_len,
8782 cp->data + cp->adv_data_len,
8783 timeout, duration,
8784 HCI_ADV_TX_POWER_NO_PREFERENCE,
8785 hdev->le_adv_min_interval,
8786 hdev->le_adv_max_interval, 0);
8787 if (IS_ERR(adv)) {
8788 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8789 MGMT_STATUS_FAILED);
8790 goto unlock;
8791 }
8792
8793 /* Only trigger an advertising added event if a new instance was
8794 * actually added.
8795 */
8796 if (hdev->adv_instance_cnt > prev_instance_cnt)
8797 mgmt_advertising_added(sk, hdev, cp->instance);
8798
8799 if (hdev->cur_adv_instance == cp->instance) {
8800 /* If the currently advertised instance is being changed then
8801 * cancel the current advertising and schedule the next
8802 * instance. If there is only one instance then the overridden
8803 * advertising data will be visible right away.
8804 */
8805 cancel_adv_timeout(hdev);
8806
8807 next_instance = hci_get_next_instance(hdev, cp->instance);
8808 if (next_instance)
8809 schedule_instance = next_instance->instance;
8810 } else if (!hdev->adv_instance_timeout) {
8811 /* Immediately advertise the new instance if no other
8812 * instance is currently being advertised.
8813 */
8814 schedule_instance = cp->instance;
8815 }
8816
8817 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
8818 * there is no instance to be advertised then we have no HCI
8819 * communication to make. Simply return.
8820 */
8821 if (!hdev_is_powered(hdev) ||
8822 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8823 !schedule_instance) {
8824 rp.instance = cp->instance;
8825 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8826 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8827 goto unlock;
8828 }
8829
8830 /* We're good to go, update advertising data, parameters, and start
8831 * advertising.
8832 */
8833 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8834 data_len);
8835 if (!cmd) {
8836 err = -ENOMEM;
8837 goto unlock;
8838 }
8839
8840 cp->instance = schedule_instance;
8841
8842 err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8843 add_advertising_complete);
8844 if (err < 0)
8845 mgmt_pending_free(cmd);
8846
8847 unlock:
8848 hci_dev_unlock(hdev);
8849
8850 return err;
8851 }
8852
add_ext_adv_params_complete(struct hci_dev * hdev,void * data,int err)8853 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8854 int err)
8855 {
8856 struct mgmt_pending_cmd *cmd = data;
8857 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8858 struct mgmt_rp_add_ext_adv_params rp;
8859 struct adv_info *adv;
8860 u32 flags;
8861
8862 BT_DBG("%s", hdev->name);
8863
8864 hci_dev_lock(hdev);
8865
8866 adv = hci_find_adv_instance(hdev, cp->instance);
8867 if (!adv)
8868 goto unlock;
8869
8870 rp.instance = cp->instance;
8871 rp.tx_power = adv->tx_power;
8872
8873 /* While we're at it, inform userspace of the available space for this
8874 * advertisement, given the flags that will be used.
8875 */
8876 flags = __le32_to_cpu(cp->flags);
8877 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8878 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8879
8880 if (err) {
8881 /* If this advertisement was previously advertising and we
8882 * failed to update it, we signal that it has been removed and
8883 * delete its structure
8884 */
8885 if (!adv->pending)
8886 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8887
8888 hci_remove_adv_instance(hdev, cp->instance);
8889
8890 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
8891 mgmt_status(err));
8892 } else {
8893 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
8894 mgmt_status(err), &rp, sizeof(rp));
8895 }
8896
8897 unlock:
8898 mgmt_pending_free(cmd);
8899
8900 hci_dev_unlock(hdev);
8901 }
8902
add_ext_adv_params_sync(struct hci_dev * hdev,void * data)8903 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8904 {
8905 struct mgmt_pending_cmd *cmd = data;
8906 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8907
8908 return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8909 }
8910
add_ext_adv_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8911 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8912 void *data, u16 data_len)
8913 {
8914 struct mgmt_cp_add_ext_adv_params *cp = data;
8915 struct mgmt_rp_add_ext_adv_params rp;
8916 struct mgmt_pending_cmd *cmd = NULL;
8917 struct adv_info *adv;
8918 u32 flags, min_interval, max_interval;
8919 u16 timeout, duration;
8920 u8 status;
8921 s8 tx_power;
8922 int err;
8923
8924 BT_DBG("%s", hdev->name);
8925
8926 status = mgmt_le_support(hdev);
8927 if (status)
8928 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8929 status);
8930
8931 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8932 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8933 MGMT_STATUS_INVALID_PARAMS);
8934
8935 /* The purpose of breaking add_advertising into two separate MGMT calls
8936 * for params and data is to allow more parameters to be added to this
8937 * structure in the future. For this reason, we verify that we have the
8938 * bare minimum structure we know of when the interface was defined. Any
8939 * extra parameters we don't know about will be ignored in this request.
8940 */
8941 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8942 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8943 MGMT_STATUS_INVALID_PARAMS);
8944
8945 flags = __le32_to_cpu(cp->flags);
8946
8947 if (!requested_adv_flags_are_valid(hdev, flags))
8948 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8949 MGMT_STATUS_INVALID_PARAMS);
8950
8951 hci_dev_lock(hdev);
8952
8953 /* In new interface, we require that we are powered to register */
8954 if (!hdev_is_powered(hdev)) {
8955 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8956 MGMT_STATUS_REJECTED);
8957 goto unlock;
8958 }
8959
8960 if (adv_busy(hdev)) {
8961 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8962 MGMT_STATUS_BUSY);
8963 goto unlock;
8964 }
8965
8966 /* Parse defined parameters from request, use defaults otherwise */
8967 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8968 __le16_to_cpu(cp->timeout) : 0;
8969
8970 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8971 __le16_to_cpu(cp->duration) :
8972 hdev->def_multi_adv_rotation_duration;
8973
8974 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8975 __le32_to_cpu(cp->min_interval) :
8976 hdev->le_adv_min_interval;
8977
8978 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8979 __le32_to_cpu(cp->max_interval) :
8980 hdev->le_adv_max_interval;
8981
8982 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8983 cp->tx_power :
8984 HCI_ADV_TX_POWER_NO_PREFERENCE;
8985
8986 /* Create advertising instance with no advertising or response data */
8987 adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
8988 timeout, duration, tx_power, min_interval,
8989 max_interval, 0);
8990
8991 if (IS_ERR(adv)) {
8992 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8993 MGMT_STATUS_FAILED);
8994 goto unlock;
8995 }
8996
8997 /* Submit request for advertising params if ext adv available */
8998 if (ext_adv_capable(hdev)) {
8999 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
9000 data, data_len);
9001 if (!cmd) {
9002 err = -ENOMEM;
9003 hci_remove_adv_instance(hdev, cp->instance);
9004 goto unlock;
9005 }
9006
9007 err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
9008 add_ext_adv_params_complete);
9009 if (err < 0)
9010 mgmt_pending_free(cmd);
9011 } else {
9012 rp.instance = cp->instance;
9013 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
9014 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9015 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9016 err = mgmt_cmd_complete(sk, hdev->id,
9017 MGMT_OP_ADD_EXT_ADV_PARAMS,
9018 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9019 }
9020
9021 unlock:
9022 hci_dev_unlock(hdev);
9023
9024 return err;
9025 }
9026
add_ext_adv_data_complete(struct hci_dev * hdev,void * data,int err)9027 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
9028 {
9029 struct mgmt_pending_cmd *cmd = data;
9030 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
9031 struct mgmt_rp_add_advertising rp;
9032
9033 add_adv_complete(hdev, cmd->sk, cp->instance, err);
9034
9035 memset(&rp, 0, sizeof(rp));
9036
9037 rp.instance = cp->instance;
9038
9039 if (err)
9040 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
9041 mgmt_status(err));
9042 else
9043 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
9044 mgmt_status(err), &rp, sizeof(rp));
9045
9046 mgmt_pending_free(cmd);
9047 }
9048
add_ext_adv_data_sync(struct hci_dev * hdev,void * data)9049 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
9050 {
9051 struct mgmt_pending_cmd *cmd = data;
9052 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
9053 int err;
9054
9055 if (ext_adv_capable(hdev)) {
9056 err = hci_update_adv_data_sync(hdev, cp->instance);
9057 if (err)
9058 return err;
9059
9060 err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
9061 if (err)
9062 return err;
9063
9064 return hci_enable_ext_advertising_sync(hdev, cp->instance);
9065 }
9066
9067 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
9068 }
9069
add_ext_adv_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9070 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
9071 u16 data_len)
9072 {
9073 struct mgmt_cp_add_ext_adv_data *cp = data;
9074 struct mgmt_rp_add_ext_adv_data rp;
9075 u8 schedule_instance = 0;
9076 struct adv_info *next_instance;
9077 struct adv_info *adv_instance;
9078 int err = 0;
9079 struct mgmt_pending_cmd *cmd;
9080
9081 BT_DBG("%s", hdev->name);
9082
9083 hci_dev_lock(hdev);
9084
9085 adv_instance = hci_find_adv_instance(hdev, cp->instance);
9086
9087 if (!adv_instance) {
9088 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9089 MGMT_STATUS_INVALID_PARAMS);
9090 goto unlock;
9091 }
9092
9093 /* In new interface, we require that we are powered to register */
9094 if (!hdev_is_powered(hdev)) {
9095 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9096 MGMT_STATUS_REJECTED);
9097 goto clear_new_instance;
9098 }
9099
9100 if (adv_busy(hdev)) {
9101 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9102 MGMT_STATUS_BUSY);
9103 goto clear_new_instance;
9104 }
9105
9106 /* Validate new data */
9107 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
9108 cp->adv_data_len, true) ||
9109 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
9110 cp->adv_data_len, cp->scan_rsp_len, false)) {
9111 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9112 MGMT_STATUS_INVALID_PARAMS);
9113 goto clear_new_instance;
9114 }
9115
9116 /* Set the data in the advertising instance */
9117 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
9118 cp->data, cp->scan_rsp_len,
9119 cp->data + cp->adv_data_len);
9120
9121 /* If using software rotation, determine next instance to use */
9122 if (hdev->cur_adv_instance == cp->instance) {
9123 /* If the currently advertised instance is being changed
9124 * then cancel the current advertising and schedule the
9125 * next instance. If there is only one instance then the
9126 * overridden advertising data will be visible right
9127 * away
9128 */
9129 cancel_adv_timeout(hdev);
9130
9131 next_instance = hci_get_next_instance(hdev, cp->instance);
9132 if (next_instance)
9133 schedule_instance = next_instance->instance;
9134 } else if (!hdev->adv_instance_timeout) {
9135 /* Immediately advertise the new instance if no other
9136 * instance is currently being advertised.
9137 */
9138 schedule_instance = cp->instance;
9139 }
9140
9141 /* If the HCI_ADVERTISING flag is set or there is no instance to
9142 * be advertised then we have no HCI communication to make.
9143 * Simply return.
9144 */
9145 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
9146 if (adv_instance->pending) {
9147 mgmt_advertising_added(sk, hdev, cp->instance);
9148 adv_instance->pending = false;
9149 }
9150 rp.instance = cp->instance;
9151 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9152 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9153 goto unlock;
9154 }
9155
9156 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9157 data_len);
9158 if (!cmd) {
9159 err = -ENOMEM;
9160 goto clear_new_instance;
9161 }
9162
9163 err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
9164 add_ext_adv_data_complete);
9165 if (err < 0) {
9166 mgmt_pending_free(cmd);
9167 goto clear_new_instance;
9168 }
9169
9170 /* We were successful in updating data, so trigger advertising_added
9171 * event if this is an instance that wasn't previously advertising. If
9172 * a failure occurs in the requests we initiated, we will remove the
9173 * instance again in add_advertising_complete
9174 */
9175 if (adv_instance->pending)
9176 mgmt_advertising_added(sk, hdev, cp->instance);
9177
9178 goto unlock;
9179
9180 clear_new_instance:
9181 hci_remove_adv_instance(hdev, cp->instance);
9182
9183 unlock:
9184 hci_dev_unlock(hdev);
9185
9186 return err;
9187 }
9188
remove_advertising_complete(struct hci_dev * hdev,void * data,int err)9189 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
9190 int err)
9191 {
9192 struct mgmt_pending_cmd *cmd = data;
9193 struct mgmt_cp_remove_advertising *cp = cmd->param;
9194 struct mgmt_rp_remove_advertising rp;
9195
9196 bt_dev_dbg(hdev, "err %d", err);
9197
9198 memset(&rp, 0, sizeof(rp));
9199 rp.instance = cp->instance;
9200
9201 if (err)
9202 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
9203 mgmt_status(err));
9204 else
9205 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
9206 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9207
9208 mgmt_pending_free(cmd);
9209 }
9210
remove_advertising_sync(struct hci_dev * hdev,void * data)9211 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
9212 {
9213 struct mgmt_pending_cmd *cmd = data;
9214 struct mgmt_cp_remove_advertising *cp = cmd->param;
9215 int err;
9216
9217 err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
9218 if (err)
9219 return err;
9220
9221 if (list_empty(&hdev->adv_instances))
9222 err = hci_disable_advertising_sync(hdev);
9223
9224 return err;
9225 }
9226
remove_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9227 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9228 void *data, u16 data_len)
9229 {
9230 struct mgmt_cp_remove_advertising *cp = data;
9231 struct mgmt_pending_cmd *cmd;
9232 int err;
9233
9234 bt_dev_dbg(hdev, "sock %p", sk);
9235
9236 hci_dev_lock(hdev);
9237
9238 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
9239 err = mgmt_cmd_status(sk, hdev->id,
9240 MGMT_OP_REMOVE_ADVERTISING,
9241 MGMT_STATUS_INVALID_PARAMS);
9242 goto unlock;
9243 }
9244
9245 if (pending_find(MGMT_OP_SET_LE, hdev)) {
9246 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9247 MGMT_STATUS_BUSY);
9248 goto unlock;
9249 }
9250
9251 if (list_empty(&hdev->adv_instances)) {
9252 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9253 MGMT_STATUS_INVALID_PARAMS);
9254 goto unlock;
9255 }
9256
9257 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9258 data_len);
9259 if (!cmd) {
9260 err = -ENOMEM;
9261 goto unlock;
9262 }
9263
9264 err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
9265 remove_advertising_complete);
9266 if (err < 0)
9267 mgmt_pending_free(cmd);
9268
9269 unlock:
9270 hci_dev_unlock(hdev);
9271
9272 return err;
9273 }
9274
get_adv_size_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9275 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9276 void *data, u16 data_len)
9277 {
9278 struct mgmt_cp_get_adv_size_info *cp = data;
9279 struct mgmt_rp_get_adv_size_info rp;
9280 u32 flags, supported_flags;
9281
9282 bt_dev_dbg(hdev, "sock %p", sk);
9283
9284 if (!lmp_le_capable(hdev))
9285 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9286 MGMT_STATUS_REJECTED);
9287
9288 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9289 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9290 MGMT_STATUS_INVALID_PARAMS);
9291
9292 flags = __le32_to_cpu(cp->flags);
9293
9294 /* The current implementation only supports a subset of the specified
9295 * flags.
9296 */
9297 supported_flags = get_supported_adv_flags(hdev);
9298 if (flags & ~supported_flags)
9299 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9300 MGMT_STATUS_INVALID_PARAMS);
9301
9302 rp.instance = cp->instance;
9303 rp.flags = cp->flags;
9304 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9305 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9306
9307 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9308 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9309 }
9310
9311 static const struct hci_mgmt_handler mgmt_handlers[] = {
9312 { NULL }, /* 0x0000 (no command) */
9313 { read_version, MGMT_READ_VERSION_SIZE,
9314 HCI_MGMT_NO_HDEV |
9315 HCI_MGMT_UNTRUSTED },
9316 { read_commands, MGMT_READ_COMMANDS_SIZE,
9317 HCI_MGMT_NO_HDEV |
9318 HCI_MGMT_UNTRUSTED },
9319 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
9320 HCI_MGMT_NO_HDEV |
9321 HCI_MGMT_UNTRUSTED },
9322 { read_controller_info, MGMT_READ_INFO_SIZE,
9323 HCI_MGMT_UNTRUSTED },
9324 { set_powered, MGMT_SETTING_SIZE },
9325 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
9326 { set_connectable, MGMT_SETTING_SIZE },
9327 { set_fast_connectable, MGMT_SETTING_SIZE },
9328 { set_bondable, MGMT_SETTING_SIZE },
9329 { set_link_security, MGMT_SETTING_SIZE },
9330 { set_ssp, MGMT_SETTING_SIZE },
9331 { set_hs, MGMT_SETTING_SIZE },
9332 { set_le, MGMT_SETTING_SIZE },
9333 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
9334 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
9335 { add_uuid, MGMT_ADD_UUID_SIZE },
9336 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
9337 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
9338 HCI_MGMT_VAR_LEN },
9339 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9340 HCI_MGMT_VAR_LEN },
9341 { disconnect, MGMT_DISCONNECT_SIZE },
9342 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
9343 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
9344 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
9345 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
9346 { pair_device, MGMT_PAIR_DEVICE_SIZE },
9347 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
9348 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
9349 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
9350 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9351 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
9352 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9353 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
9354 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9355 HCI_MGMT_VAR_LEN },
9356 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9357 { start_discovery, MGMT_START_DISCOVERY_SIZE },
9358 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
9359 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
9360 { block_device, MGMT_BLOCK_DEVICE_SIZE },
9361 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
9362 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
9363 { set_advertising, MGMT_SETTING_SIZE },
9364 { set_bredr, MGMT_SETTING_SIZE },
9365 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
9366 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
9367 { set_secure_conn, MGMT_SETTING_SIZE },
9368 { set_debug_keys, MGMT_SETTING_SIZE },
9369 { set_privacy, MGMT_SET_PRIVACY_SIZE },
9370 { load_irks, MGMT_LOAD_IRKS_SIZE,
9371 HCI_MGMT_VAR_LEN },
9372 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
9373 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
9374 { add_device, MGMT_ADD_DEVICE_SIZE },
9375 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
9376 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
9377 HCI_MGMT_VAR_LEN },
9378 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9379 HCI_MGMT_NO_HDEV |
9380 HCI_MGMT_UNTRUSTED },
9381 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
9382 HCI_MGMT_UNCONFIGURED |
9383 HCI_MGMT_UNTRUSTED },
9384 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
9385 HCI_MGMT_UNCONFIGURED },
9386 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
9387 HCI_MGMT_UNCONFIGURED },
9388 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9389 HCI_MGMT_VAR_LEN },
9390 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9391 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
9392 HCI_MGMT_NO_HDEV |
9393 HCI_MGMT_UNTRUSTED },
9394 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
9395 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
9396 HCI_MGMT_VAR_LEN },
9397 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
9398 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
9399 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9400 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9401 HCI_MGMT_UNTRUSTED },
9402 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
9403 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
9404 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
9405 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9406 HCI_MGMT_VAR_LEN },
9407 { set_wideband_speech, MGMT_SETTING_SIZE },
9408 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
9409 HCI_MGMT_UNTRUSTED },
9410 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
9411 HCI_MGMT_UNTRUSTED |
9412 HCI_MGMT_HDEV_OPTIONAL },
9413 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
9414 HCI_MGMT_VAR_LEN |
9415 HCI_MGMT_HDEV_OPTIONAL },
9416 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9417 HCI_MGMT_UNTRUSTED },
9418 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9419 HCI_MGMT_VAR_LEN },
9420 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9421 HCI_MGMT_UNTRUSTED },
9422 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9423 HCI_MGMT_VAR_LEN },
9424 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
9425 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
9426 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9427 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9428 HCI_MGMT_VAR_LEN },
9429 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
9430 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
9431 HCI_MGMT_VAR_LEN },
9432 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
9433 HCI_MGMT_VAR_LEN },
9434 { add_adv_patterns_monitor_rssi,
9435 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
9436 HCI_MGMT_VAR_LEN },
9437 { set_mesh, MGMT_SET_MESH_RECEIVER_SIZE,
9438 HCI_MGMT_VAR_LEN },
9439 { mesh_features, MGMT_MESH_READ_FEATURES_SIZE },
9440 { mesh_send, MGMT_MESH_SEND_SIZE,
9441 HCI_MGMT_VAR_LEN },
9442 { mesh_send_cancel, MGMT_MESH_SEND_CANCEL_SIZE },
9443 { mgmt_hci_cmd_sync, MGMT_HCI_CMD_SYNC_SIZE, HCI_MGMT_VAR_LEN },
9444 };
9445
mgmt_index_added(struct hci_dev * hdev)9446 void mgmt_index_added(struct hci_dev *hdev)
9447 {
9448 struct mgmt_ev_ext_index ev;
9449
9450 if (hci_test_quirk(hdev, HCI_QUIRK_RAW_DEVICE))
9451 return;
9452
9453 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9454 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0,
9455 HCI_MGMT_UNCONF_INDEX_EVENTS);
9456 ev.type = 0x01;
9457 } else {
9458 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
9459 HCI_MGMT_INDEX_EVENTS);
9460 ev.type = 0x00;
9461 }
9462
9463 ev.bus = hdev->bus;
9464
9465 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
9466 HCI_MGMT_EXT_INDEX_EVENTS);
9467 }
9468
mgmt_index_removed(struct hci_dev * hdev)9469 void mgmt_index_removed(struct hci_dev *hdev)
9470 {
9471 struct mgmt_ev_ext_index ev;
9472 struct cmd_lookup match = { NULL, hdev, MGMT_STATUS_INVALID_INDEX };
9473
9474 if (hci_test_quirk(hdev, HCI_QUIRK_RAW_DEVICE))
9475 return;
9476
9477 mgmt_pending_foreach(0, hdev, true, cmd_complete_rsp, &match);
9478
9479 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9480 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0,
9481 HCI_MGMT_UNCONF_INDEX_EVENTS);
9482 ev.type = 0x01;
9483 } else {
9484 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
9485 HCI_MGMT_INDEX_EVENTS);
9486 ev.type = 0x00;
9487 }
9488
9489 ev.bus = hdev->bus;
9490
9491 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
9492 HCI_MGMT_EXT_INDEX_EVENTS);
9493
9494 /* Cancel any remaining timed work */
9495 if (!hci_dev_test_flag(hdev, HCI_MGMT))
9496 return;
9497 cancel_delayed_work_sync(&hdev->discov_off);
9498 cancel_delayed_work_sync(&hdev->service_cache);
9499 cancel_delayed_work_sync(&hdev->rpa_expired);
9500 }
9501
mgmt_power_on(struct hci_dev * hdev,int err)9502 void mgmt_power_on(struct hci_dev *hdev, int err)
9503 {
9504 struct cmd_lookup match = { NULL, hdev };
9505
9506 bt_dev_dbg(hdev, "err %d", err);
9507
9508 hci_dev_lock(hdev);
9509
9510 if (!err) {
9511 restart_le_actions(hdev);
9512 hci_update_passive_scan(hdev);
9513 }
9514
9515 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, true, settings_rsp,
9516 &match);
9517
9518 new_settings(hdev, match.sk);
9519
9520 if (match.sk)
9521 sock_put(match.sk);
9522
9523 hci_dev_unlock(hdev);
9524 }
9525
__mgmt_power_off(struct hci_dev * hdev)9526 void __mgmt_power_off(struct hci_dev *hdev)
9527 {
9528 struct cmd_lookup match = { NULL, hdev };
9529 u8 zero_cod[] = { 0, 0, 0 };
9530
9531 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, true, settings_rsp,
9532 &match);
9533
9534 /* If the power off is because of hdev unregistration let
9535 * use the appropriate INVALID_INDEX status. Otherwise use
9536 * NOT_POWERED. We cover both scenarios here since later in
9537 * mgmt_index_removed() any hci_conn callbacks will have already
9538 * been triggered, potentially causing misleading DISCONNECTED
9539 * status responses.
9540 */
9541 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9542 match.mgmt_status = MGMT_STATUS_INVALID_INDEX;
9543 else
9544 match.mgmt_status = MGMT_STATUS_NOT_POWERED;
9545
9546 mgmt_pending_foreach(0, hdev, true, cmd_complete_rsp, &match);
9547
9548 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9549 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9550 zero_cod, sizeof(zero_cod),
9551 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9552 ext_info_changed(hdev, NULL);
9553 }
9554
9555 new_settings(hdev, match.sk);
9556
9557 if (match.sk)
9558 sock_put(match.sk);
9559 }
9560
mgmt_set_powered_failed(struct hci_dev * hdev,int err)9561 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9562 {
9563 struct mgmt_pending_cmd *cmd;
9564 u8 status;
9565
9566 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9567 if (!cmd)
9568 return;
9569
9570 if (err == -ERFKILL)
9571 status = MGMT_STATUS_RFKILLED;
9572 else
9573 status = MGMT_STATUS_FAILED;
9574
9575 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9576
9577 mgmt_pending_remove(cmd);
9578 }
9579
mgmt_new_link_key(struct hci_dev * hdev,struct link_key * key,bool persistent)9580 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9581 bool persistent)
9582 {
9583 struct mgmt_ev_new_link_key ev;
9584
9585 memset(&ev, 0, sizeof(ev));
9586
9587 ev.store_hint = persistent;
9588 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9589 ev.key.addr.type = BDADDR_BREDR;
9590 ev.key.type = key->type;
9591 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9592 ev.key.pin_len = key->pin_len;
9593
9594 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9595 }
9596
mgmt_ltk_type(struct smp_ltk * ltk)9597 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9598 {
9599 switch (ltk->type) {
9600 case SMP_LTK:
9601 case SMP_LTK_RESPONDER:
9602 if (ltk->authenticated)
9603 return MGMT_LTK_AUTHENTICATED;
9604 return MGMT_LTK_UNAUTHENTICATED;
9605 case SMP_LTK_P256:
9606 if (ltk->authenticated)
9607 return MGMT_LTK_P256_AUTH;
9608 return MGMT_LTK_P256_UNAUTH;
9609 case SMP_LTK_P256_DEBUG:
9610 return MGMT_LTK_P256_DEBUG;
9611 }
9612
9613 return MGMT_LTK_UNAUTHENTICATED;
9614 }
9615
mgmt_new_ltk(struct hci_dev * hdev,struct smp_ltk * key,bool persistent)9616 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9617 {
9618 struct mgmt_ev_new_long_term_key ev;
9619
9620 memset(&ev, 0, sizeof(ev));
9621
9622 /* Devices using resolvable or non-resolvable random addresses
9623 * without providing an identity resolving key don't require
9624 * to store long term keys. Their addresses will change the
9625 * next time around.
9626 *
9627 * Only when a remote device provides an identity address
9628 * make sure the long term key is stored. If the remote
9629 * identity is known, the long term keys are internally
9630 * mapped to the identity address. So allow static random
9631 * and public addresses here.
9632 */
9633 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9634 (key->bdaddr.b[5] & 0xc0) != 0xc0)
9635 ev.store_hint = 0x00;
9636 else
9637 ev.store_hint = persistent;
9638
9639 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9640 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
9641 ev.key.type = mgmt_ltk_type(key);
9642 ev.key.enc_size = key->enc_size;
9643 ev.key.ediv = key->ediv;
9644 ev.key.rand = key->rand;
9645
9646 if (key->type == SMP_LTK)
9647 ev.key.initiator = 1;
9648
9649 /* Make sure we copy only the significant bytes based on the
9650 * encryption key size, and set the rest of the value to zeroes.
9651 */
9652 memcpy(ev.key.val, key->val, key->enc_size);
9653 memset(ev.key.val + key->enc_size, 0,
9654 sizeof(ev.key.val) - key->enc_size);
9655
9656 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9657 }
9658
mgmt_new_irk(struct hci_dev * hdev,struct smp_irk * irk,bool persistent)9659 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9660 {
9661 struct mgmt_ev_new_irk ev;
9662
9663 memset(&ev, 0, sizeof(ev));
9664
9665 ev.store_hint = persistent;
9666
9667 bacpy(&ev.rpa, &irk->rpa);
9668 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9669 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
9670 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9671
9672 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9673 }
9674
mgmt_new_csrk(struct hci_dev * hdev,struct smp_csrk * csrk,bool persistent)9675 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9676 bool persistent)
9677 {
9678 struct mgmt_ev_new_csrk ev;
9679
9680 memset(&ev, 0, sizeof(ev));
9681
9682 /* Devices using resolvable or non-resolvable random addresses
9683 * without providing an identity resolving key don't require
9684 * to store signature resolving keys. Their addresses will change
9685 * the next time around.
9686 *
9687 * Only when a remote device provides an identity address
9688 * make sure the signature resolving key is stored. So allow
9689 * static random and public addresses here.
9690 */
9691 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9692 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9693 ev.store_hint = 0x00;
9694 else
9695 ev.store_hint = persistent;
9696
9697 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9698 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
9699 ev.key.type = csrk->type;
9700 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9701
9702 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9703 }
9704
mgmt_new_conn_param(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u8 store_hint,u16 min_interval,u16 max_interval,u16 latency,u16 timeout)9705 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9706 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9707 u16 max_interval, u16 latency, u16 timeout)
9708 {
9709 struct mgmt_ev_new_conn_param ev;
9710
9711 if (!hci_is_identity_address(bdaddr, bdaddr_type))
9712 return;
9713
9714 memset(&ev, 0, sizeof(ev));
9715 bacpy(&ev.addr.bdaddr, bdaddr);
9716 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9717 ev.store_hint = store_hint;
9718 ev.min_interval = cpu_to_le16(min_interval);
9719 ev.max_interval = cpu_to_le16(max_interval);
9720 ev.latency = cpu_to_le16(latency);
9721 ev.timeout = cpu_to_le16(timeout);
9722
9723 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9724 }
9725
mgmt_device_connected(struct hci_dev * hdev,struct hci_conn * conn,u8 * name,u8 name_len)9726 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9727 u8 *name, u8 name_len)
9728 {
9729 struct sk_buff *skb;
9730 struct mgmt_ev_device_connected *ev;
9731 u16 eir_len = 0;
9732 u32 flags = 0;
9733
9734 if (test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
9735 return;
9736
9737 /* allocate buff for LE or BR/EDR adv */
9738 if (conn->le_adv_data_len > 0)
9739 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9740 sizeof(*ev) + conn->le_adv_data_len);
9741 else
9742 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9743 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9744 eir_precalc_len(sizeof(conn->dev_class)));
9745
9746 if (!skb)
9747 return;
9748
9749 ev = skb_put(skb, sizeof(*ev));
9750 bacpy(&ev->addr.bdaddr, &conn->dst);
9751 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9752
9753 if (conn->out)
9754 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9755
9756 ev->flags = __cpu_to_le32(flags);
9757
9758 /* We must ensure that the EIR Data fields are ordered and
9759 * unique. Keep it simple for now and avoid the problem by not
9760 * adding any BR/EDR data to the LE adv.
9761 */
9762 if (conn->le_adv_data_len > 0) {
9763 skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9764 eir_len = conn->le_adv_data_len;
9765 } else {
9766 if (name)
9767 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9768
9769 if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9770 eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9771 conn->dev_class, sizeof(conn->dev_class));
9772 }
9773
9774 ev->eir_len = cpu_to_le16(eir_len);
9775
9776 mgmt_event_skb(skb, NULL);
9777 }
9778
unpair_device_rsp(struct mgmt_pending_cmd * cmd,void * data)9779 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9780 {
9781 struct hci_dev *hdev = data;
9782 struct mgmt_cp_unpair_device *cp = cmd->param;
9783
9784 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9785
9786 cmd->cmd_complete(cmd, 0);
9787 }
9788
mgmt_powering_down(struct hci_dev * hdev)9789 bool mgmt_powering_down(struct hci_dev *hdev)
9790 {
9791 struct mgmt_pending_cmd *cmd;
9792 struct mgmt_mode *cp;
9793
9794 if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
9795 return true;
9796
9797 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9798 if (!cmd)
9799 return false;
9800
9801 cp = cmd->param;
9802 if (!cp->val)
9803 return true;
9804
9805 return false;
9806 }
9807
mgmt_device_disconnected(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 reason,bool mgmt_connected)9808 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9809 u8 link_type, u8 addr_type, u8 reason,
9810 bool mgmt_connected)
9811 {
9812 struct mgmt_ev_device_disconnected ev;
9813 struct sock *sk = NULL;
9814
9815 if (!mgmt_connected)
9816 return;
9817
9818 if (link_type != ACL_LINK &&
9819 link_type != LE_LINK &&
9820 link_type != BIS_LINK)
9821 return;
9822
9823 bacpy(&ev.addr.bdaddr, bdaddr);
9824 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9825 ev.reason = reason;
9826
9827 /* Report disconnects due to suspend */
9828 if (hdev->suspended)
9829 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9830
9831 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9832
9833 if (sk)
9834 sock_put(sk);
9835 }
9836
mgmt_disconnect_failed(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9837 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9838 u8 link_type, u8 addr_type, u8 status)
9839 {
9840 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9841 struct mgmt_cp_disconnect *cp;
9842 struct mgmt_pending_cmd *cmd;
9843
9844 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, true,
9845 unpair_device_rsp, hdev);
9846
9847 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9848 if (!cmd)
9849 return;
9850
9851 cp = cmd->param;
9852
9853 if (bacmp(bdaddr, &cp->addr.bdaddr))
9854 return;
9855
9856 if (cp->addr.type != bdaddr_type)
9857 return;
9858
9859 cmd->cmd_complete(cmd, mgmt_status(status));
9860 mgmt_pending_remove(cmd);
9861 }
9862
mgmt_connect_failed(struct hci_dev * hdev,struct hci_conn * conn,u8 status)9863 void mgmt_connect_failed(struct hci_dev *hdev, struct hci_conn *conn, u8 status)
9864 {
9865 struct mgmt_ev_connect_failed ev;
9866
9867 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
9868 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
9869 conn->dst_type, status, true);
9870 return;
9871 }
9872
9873 bacpy(&ev.addr.bdaddr, &conn->dst);
9874 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9875 ev.status = mgmt_status(status);
9876
9877 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9878 }
9879
mgmt_pin_code_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 secure)9880 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9881 {
9882 struct mgmt_ev_pin_code_request ev;
9883
9884 bacpy(&ev.addr.bdaddr, bdaddr);
9885 ev.addr.type = BDADDR_BREDR;
9886 ev.secure = secure;
9887
9888 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9889 }
9890
mgmt_pin_code_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)9891 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9892 u8 status)
9893 {
9894 struct mgmt_pending_cmd *cmd;
9895
9896 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9897 if (!cmd)
9898 return;
9899
9900 cmd->cmd_complete(cmd, mgmt_status(status));
9901 mgmt_pending_remove(cmd);
9902 }
9903
mgmt_pin_code_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)9904 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9905 u8 status)
9906 {
9907 struct mgmt_pending_cmd *cmd;
9908
9909 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9910 if (!cmd)
9911 return;
9912
9913 cmd->cmd_complete(cmd, mgmt_status(status));
9914 mgmt_pending_remove(cmd);
9915 }
9916
mgmt_user_confirm_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 value,u8 confirm_hint)9917 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9918 u8 link_type, u8 addr_type, u32 value,
9919 u8 confirm_hint)
9920 {
9921 struct mgmt_ev_user_confirm_request ev;
9922
9923 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9924
9925 bacpy(&ev.addr.bdaddr, bdaddr);
9926 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9927 ev.confirm_hint = confirm_hint;
9928 ev.value = cpu_to_le32(value);
9929
9930 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9931 NULL);
9932 }
9933
mgmt_user_passkey_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type)9934 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9935 u8 link_type, u8 addr_type)
9936 {
9937 struct mgmt_ev_user_passkey_request ev;
9938
9939 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9940
9941 bacpy(&ev.addr.bdaddr, bdaddr);
9942 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9943
9944 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9945 NULL);
9946 }
9947
user_pairing_resp_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status,u8 opcode)9948 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9949 u8 link_type, u8 addr_type, u8 status,
9950 u8 opcode)
9951 {
9952 struct mgmt_pending_cmd *cmd;
9953
9954 cmd = pending_find(opcode, hdev);
9955 if (!cmd)
9956 return -ENOENT;
9957
9958 cmd->cmd_complete(cmd, mgmt_status(status));
9959 mgmt_pending_remove(cmd);
9960
9961 return 0;
9962 }
9963
mgmt_user_confirm_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9964 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9965 u8 link_type, u8 addr_type, u8 status)
9966 {
9967 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9968 status, MGMT_OP_USER_CONFIRM_REPLY);
9969 }
9970
mgmt_user_confirm_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9971 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9972 u8 link_type, u8 addr_type, u8 status)
9973 {
9974 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9975 status,
9976 MGMT_OP_USER_CONFIRM_NEG_REPLY);
9977 }
9978
mgmt_user_passkey_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9979 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9980 u8 link_type, u8 addr_type, u8 status)
9981 {
9982 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9983 status, MGMT_OP_USER_PASSKEY_REPLY);
9984 }
9985
mgmt_user_passkey_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9986 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9987 u8 link_type, u8 addr_type, u8 status)
9988 {
9989 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9990 status,
9991 MGMT_OP_USER_PASSKEY_NEG_REPLY);
9992 }
9993
mgmt_user_passkey_notify(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 passkey,u8 entered)9994 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9995 u8 link_type, u8 addr_type, u32 passkey,
9996 u8 entered)
9997 {
9998 struct mgmt_ev_passkey_notify ev;
9999
10000 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
10001
10002 bacpy(&ev.addr.bdaddr, bdaddr);
10003 ev.addr.type = link_to_bdaddr(link_type, addr_type);
10004 ev.passkey = __cpu_to_le32(passkey);
10005 ev.entered = entered;
10006
10007 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
10008 }
10009
mgmt_auth_failed(struct hci_conn * conn,u8 hci_status)10010 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
10011 {
10012 struct mgmt_ev_auth_failed ev;
10013 struct mgmt_pending_cmd *cmd;
10014 u8 status = mgmt_status(hci_status);
10015
10016 bacpy(&ev.addr.bdaddr, &conn->dst);
10017 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
10018 ev.status = status;
10019
10020 cmd = find_pairing(conn);
10021
10022 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
10023 cmd ? cmd->sk : NULL);
10024
10025 if (cmd) {
10026 cmd->cmd_complete(cmd, status);
10027 mgmt_pending_remove(cmd);
10028 }
10029 }
10030
mgmt_auth_enable_complete(struct hci_dev * hdev,u8 status)10031 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
10032 {
10033 struct cmd_lookup match = { NULL, hdev };
10034 bool changed;
10035
10036 if (status) {
10037 u8 mgmt_err = mgmt_status(status);
10038 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, true,
10039 cmd_status_rsp, &mgmt_err);
10040 return;
10041 }
10042
10043 if (test_bit(HCI_AUTH, &hdev->flags))
10044 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
10045 else
10046 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
10047
10048 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, true,
10049 settings_rsp, &match);
10050
10051 if (changed)
10052 new_settings(hdev, match.sk);
10053
10054 if (match.sk)
10055 sock_put(match.sk);
10056 }
10057
sk_lookup(struct mgmt_pending_cmd * cmd,void * data)10058 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
10059 {
10060 struct cmd_lookup *match = data;
10061
10062 if (match->sk == NULL) {
10063 match->sk = cmd->sk;
10064 sock_hold(match->sk);
10065 }
10066 }
10067
mgmt_set_class_of_dev_complete(struct hci_dev * hdev,u8 * dev_class,u8 status)10068 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
10069 u8 status)
10070 {
10071 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
10072
10073 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, false, sk_lookup,
10074 &match);
10075 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, false, sk_lookup,
10076 &match);
10077 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, false, sk_lookup,
10078 &match);
10079
10080 if (!status) {
10081 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
10082 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
10083 ext_info_changed(hdev, NULL);
10084 }
10085
10086 if (match.sk)
10087 sock_put(match.sk);
10088 }
10089
mgmt_set_local_name_complete(struct hci_dev * hdev,u8 * name,u8 status)10090 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
10091 {
10092 struct mgmt_cp_set_local_name ev;
10093 struct mgmt_pending_cmd *cmd;
10094
10095 if (status)
10096 return;
10097
10098 memset(&ev, 0, sizeof(ev));
10099 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
10100 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
10101
10102 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
10103 if (!cmd) {
10104 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
10105
10106 /* If this is a HCI command related to powering on the
10107 * HCI dev don't send any mgmt signals.
10108 */
10109 if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
10110 return;
10111
10112 if (pending_find(MGMT_OP_SET_POWERED, hdev))
10113 return;
10114 }
10115
10116 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
10117 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
10118 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
10119 }
10120
has_uuid(u8 * uuid,u16 uuid_count,u8 (* uuids)[16])10121 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
10122 {
10123 int i;
10124
10125 for (i = 0; i < uuid_count; i++) {
10126 if (!memcmp(uuid, uuids[i], 16))
10127 return true;
10128 }
10129
10130 return false;
10131 }
10132
eir_has_uuids(u8 * eir,u16 eir_len,u16 uuid_count,u8 (* uuids)[16])10133 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
10134 {
10135 u16 parsed = 0;
10136
10137 while (parsed < eir_len) {
10138 u8 field_len = eir[0];
10139 u8 uuid[16];
10140 int i;
10141
10142 if (field_len == 0)
10143 break;
10144
10145 if (eir_len - parsed < field_len + 1)
10146 break;
10147
10148 switch (eir[1]) {
10149 case EIR_UUID16_ALL:
10150 case EIR_UUID16_SOME:
10151 for (i = 0; i + 3 <= field_len; i += 2) {
10152 memcpy(uuid, bluetooth_base_uuid, 16);
10153 uuid[13] = eir[i + 3];
10154 uuid[12] = eir[i + 2];
10155 if (has_uuid(uuid, uuid_count, uuids))
10156 return true;
10157 }
10158 break;
10159 case EIR_UUID32_ALL:
10160 case EIR_UUID32_SOME:
10161 for (i = 0; i + 5 <= field_len; i += 4) {
10162 memcpy(uuid, bluetooth_base_uuid, 16);
10163 uuid[15] = eir[i + 5];
10164 uuid[14] = eir[i + 4];
10165 uuid[13] = eir[i + 3];
10166 uuid[12] = eir[i + 2];
10167 if (has_uuid(uuid, uuid_count, uuids))
10168 return true;
10169 }
10170 break;
10171 case EIR_UUID128_ALL:
10172 case EIR_UUID128_SOME:
10173 for (i = 0; i + 17 <= field_len; i += 16) {
10174 memcpy(uuid, eir + i + 2, 16);
10175 if (has_uuid(uuid, uuid_count, uuids))
10176 return true;
10177 }
10178 break;
10179 }
10180
10181 parsed += field_len + 1;
10182 eir += field_len + 1;
10183 }
10184
10185 return false;
10186 }
10187
is_filter_match(struct hci_dev * hdev,s8 rssi,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len)10188 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10189 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10190 {
10191 /* If a RSSI threshold has been specified, and
10192 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10193 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10194 * is set, let it through for further processing, as we might need to
10195 * restart the scan.
10196 *
10197 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10198 * the results are also dropped.
10199 */
10200 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10201 (rssi == HCI_RSSI_INVALID ||
10202 (rssi < hdev->discovery.rssi &&
10203 !hci_test_quirk(hdev, HCI_QUIRK_STRICT_DUPLICATE_FILTER))))
10204 return false;
10205
10206 if (hdev->discovery.uuid_count != 0) {
10207 /* If a list of UUIDs is provided in filter, results with no
10208 * matching UUID should be dropped.
10209 */
10210 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10211 hdev->discovery.uuids) &&
10212 !eir_has_uuids(scan_rsp, scan_rsp_len,
10213 hdev->discovery.uuid_count,
10214 hdev->discovery.uuids))
10215 return false;
10216 }
10217
10218 /* If duplicate filtering does not report RSSI changes, then restart
10219 * scanning to ensure updated result with updated RSSI values.
10220 */
10221 if (hci_test_quirk(hdev, HCI_QUIRK_STRICT_DUPLICATE_FILTER)) {
10222 /* Validate RSSI value against the RSSI threshold once more. */
10223 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10224 rssi < hdev->discovery.rssi)
10225 return false;
10226 }
10227
10228 return true;
10229 }
10230
mgmt_adv_monitor_device_lost(struct hci_dev * hdev,u16 handle,bdaddr_t * bdaddr,u8 addr_type)10231 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
10232 bdaddr_t *bdaddr, u8 addr_type)
10233 {
10234 struct mgmt_ev_adv_monitor_device_lost ev;
10235
10236 ev.monitor_handle = cpu_to_le16(handle);
10237 bacpy(&ev.addr.bdaddr, bdaddr);
10238 ev.addr.type = addr_type;
10239
10240 mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
10241 NULL);
10242 }
10243
mgmt_send_adv_monitor_device_found(struct hci_dev * hdev,struct sk_buff * skb,struct sock * skip_sk,u16 handle)10244 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
10245 struct sk_buff *skb,
10246 struct sock *skip_sk,
10247 u16 handle)
10248 {
10249 struct sk_buff *advmon_skb;
10250 size_t advmon_skb_len;
10251 __le16 *monitor_handle;
10252
10253 if (!skb)
10254 return;
10255
10256 advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
10257 sizeof(struct mgmt_ev_device_found)) + skb->len;
10258 advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
10259 advmon_skb_len);
10260 if (!advmon_skb)
10261 return;
10262
10263 /* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10264 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10265 * store monitor_handle of the matched monitor.
10266 */
10267 monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
10268 *monitor_handle = cpu_to_le16(handle);
10269 skb_put_data(advmon_skb, skb->data, skb->len);
10270
10271 mgmt_event_skb(advmon_skb, skip_sk);
10272 }
10273
mgmt_adv_monitor_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,bool report_device,struct sk_buff * skb,struct sock * skip_sk)10274 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
10275 bdaddr_t *bdaddr, bool report_device,
10276 struct sk_buff *skb,
10277 struct sock *skip_sk)
10278 {
10279 struct monitored_device *dev, *tmp;
10280 bool matched = false;
10281 bool notified = false;
10282
10283 /* We have received the Advertisement Report because:
10284 * 1. the kernel has initiated active discovery
10285 * 2. if not, we have pend_le_reports > 0 in which case we are doing
10286 * passive scanning
10287 * 3. if none of the above is true, we have one or more active
10288 * Advertisement Monitor
10289 *
10290 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10291 * and report ONLY one advertisement per device for the matched Monitor
10292 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10293 *
10294 * For case 3, since we are not active scanning and all advertisements
10295 * received are due to a matched Advertisement Monitor, report all
10296 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10297 */
10298 if (report_device && !hdev->advmon_pend_notify) {
10299 mgmt_event_skb(skb, skip_sk);
10300 return;
10301 }
10302
10303 hdev->advmon_pend_notify = false;
10304
10305 list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
10306 if (!bacmp(&dev->bdaddr, bdaddr)) {
10307 matched = true;
10308
10309 if (!dev->notified) {
10310 mgmt_send_adv_monitor_device_found(hdev, skb,
10311 skip_sk,
10312 dev->handle);
10313 notified = true;
10314 dev->notified = true;
10315 }
10316 }
10317
10318 if (!dev->notified)
10319 hdev->advmon_pend_notify = true;
10320 }
10321
10322 if (!report_device &&
10323 ((matched && !notified) || !msft_monitor_supported(hdev))) {
10324 /* Handle 0 indicates that we are not active scanning and this
10325 * is a subsequent advertisement report for an already matched
10326 * Advertisement Monitor or the controller offloading support
10327 * is not available.
10328 */
10329 mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
10330 }
10331
10332 if (report_device)
10333 mgmt_event_skb(skb, skip_sk);
10334 else
10335 kfree_skb(skb);
10336 }
10337
mesh_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len,u64 instant)10338 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
10339 u8 addr_type, s8 rssi, u32 flags, u8 *eir,
10340 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10341 u64 instant)
10342 {
10343 struct sk_buff *skb;
10344 struct mgmt_ev_mesh_device_found *ev;
10345 int i, j;
10346
10347 if (!hdev->mesh_ad_types[0])
10348 goto accepted;
10349
10350 /* Scan for requested AD types */
10351 if (eir_len > 0) {
10352 for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
10353 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10354 if (!hdev->mesh_ad_types[j])
10355 break;
10356
10357 if (hdev->mesh_ad_types[j] == eir[i + 1])
10358 goto accepted;
10359 }
10360 }
10361 }
10362
10363 if (scan_rsp_len > 0) {
10364 for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
10365 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10366 if (!hdev->mesh_ad_types[j])
10367 break;
10368
10369 if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
10370 goto accepted;
10371 }
10372 }
10373 }
10374
10375 return;
10376
10377 accepted:
10378 skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
10379 sizeof(*ev) + eir_len + scan_rsp_len);
10380 if (!skb)
10381 return;
10382
10383 ev = skb_put(skb, sizeof(*ev));
10384
10385 bacpy(&ev->addr.bdaddr, bdaddr);
10386 ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
10387 ev->rssi = rssi;
10388 ev->flags = cpu_to_le32(flags);
10389 ev->instant = cpu_to_le64(instant);
10390
10391 if (eir_len > 0)
10392 /* Copy EIR or advertising data into event */
10393 skb_put_data(skb, eir, eir_len);
10394
10395 if (scan_rsp_len > 0)
10396 /* Append scan response data to event */
10397 skb_put_data(skb, scan_rsp, scan_rsp_len);
10398
10399 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10400
10401 mgmt_event_skb(skb, NULL);
10402 }
10403
mgmt_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 * dev_class,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len,u64 instant)10404 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10405 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10406 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10407 u64 instant)
10408 {
10409 struct sk_buff *skb;
10410 struct mgmt_ev_device_found *ev;
10411 bool report_device = hci_discovery_active(hdev);
10412
10413 if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
10414 mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
10415 eir, eir_len, scan_rsp, scan_rsp_len,
10416 instant);
10417
10418 /* Don't send events for a non-kernel initiated discovery. With
10419 * LE one exception is if we have pend_le_reports > 0 in which
10420 * case we're doing passive scanning and want these events.
10421 */
10422 if (!hci_discovery_active(hdev)) {
10423 if (link_type == ACL_LINK)
10424 return;
10425 if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
10426 report_device = true;
10427 else if (!hci_is_adv_monitoring(hdev))
10428 return;
10429 }
10430
10431 if (hdev->discovery.result_filtering) {
10432 /* We are using service discovery */
10433 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10434 scan_rsp_len))
10435 return;
10436 }
10437
10438 if (hdev->discovery.limited) {
10439 /* Check for limited discoverable bit */
10440 if (dev_class) {
10441 if (!(dev_class[1] & 0x20))
10442 return;
10443 } else {
10444 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10445 if (!flags || !(flags[0] & LE_AD_LIMITED))
10446 return;
10447 }
10448 }
10449
10450 /* Allocate skb. The 5 extra bytes are for the potential CoD field */
10451 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10452 sizeof(*ev) + eir_len + scan_rsp_len + 5);
10453 if (!skb)
10454 return;
10455
10456 ev = skb_put(skb, sizeof(*ev));
10457
10458 /* In case of device discovery with BR/EDR devices (pre 1.2), the
10459 * RSSI value was reported as 0 when not available. This behavior
10460 * is kept when using device discovery. This is required for full
10461 * backwards compatibility with the API.
10462 *
10463 * However when using service discovery, the value 127 will be
10464 * returned when the RSSI is not available.
10465 */
10466 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10467 link_type == ACL_LINK)
10468 rssi = 0;
10469
10470 bacpy(&ev->addr.bdaddr, bdaddr);
10471 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10472 ev->rssi = rssi;
10473 ev->flags = cpu_to_le32(flags);
10474
10475 if (eir_len > 0)
10476 /* Copy EIR or advertising data into event */
10477 skb_put_data(skb, eir, eir_len);
10478
10479 if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
10480 u8 eir_cod[5];
10481
10482 eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
10483 dev_class, 3);
10484 skb_put_data(skb, eir_cod, sizeof(eir_cod));
10485 }
10486
10487 if (scan_rsp_len > 0)
10488 /* Append scan response data to event */
10489 skb_put_data(skb, scan_rsp, scan_rsp_len);
10490
10491 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10492
10493 mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
10494 }
10495
mgmt_remote_name(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,s8 rssi,u8 * name,u8 name_len)10496 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10497 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10498 {
10499 struct sk_buff *skb;
10500 struct mgmt_ev_device_found *ev;
10501 u16 eir_len = 0;
10502 u32 flags = 0;
10503
10504 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10505 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
10506 if (!skb)
10507 return;
10508
10509 ev = skb_put(skb, sizeof(*ev));
10510 bacpy(&ev->addr.bdaddr, bdaddr);
10511 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10512 ev->rssi = rssi;
10513
10514 if (name)
10515 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
10516 else
10517 flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
10518
10519 ev->eir_len = cpu_to_le16(eir_len);
10520 ev->flags = cpu_to_le32(flags);
10521
10522 mgmt_event_skb(skb, NULL);
10523 }
10524
mgmt_discovering(struct hci_dev * hdev,u8 discovering)10525 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10526 {
10527 struct mgmt_ev_discovering ev;
10528
10529 bt_dev_dbg(hdev, "discovering %u", discovering);
10530
10531 memset(&ev, 0, sizeof(ev));
10532 ev.type = hdev->discovery.type;
10533 ev.discovering = discovering;
10534
10535 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10536 }
10537
mgmt_suspending(struct hci_dev * hdev,u8 state)10538 void mgmt_suspending(struct hci_dev *hdev, u8 state)
10539 {
10540 struct mgmt_ev_controller_suspend ev;
10541
10542 ev.suspend_state = state;
10543 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10544 }
10545
mgmt_resuming(struct hci_dev * hdev,u8 reason,bdaddr_t * bdaddr,u8 addr_type)10546 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10547 u8 addr_type)
10548 {
10549 struct mgmt_ev_controller_resume ev;
10550
10551 ev.wake_reason = reason;
10552 if (bdaddr) {
10553 bacpy(&ev.addr.bdaddr, bdaddr);
10554 ev.addr.type = addr_type;
10555 } else {
10556 memset(&ev.addr, 0, sizeof(ev.addr));
10557 }
10558
10559 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
10560 }
10561
10562 static struct hci_mgmt_chan chan = {
10563 .channel = HCI_CHANNEL_CONTROL,
10564 .handler_count = ARRAY_SIZE(mgmt_handlers),
10565 .handlers = mgmt_handlers,
10566 .hdev_init = mgmt_init_hdev,
10567 };
10568
mgmt_init(void)10569 int mgmt_init(void)
10570 {
10571 return hci_mgmt_chan_register(&chan);
10572 }
10573
mgmt_exit(void)10574 void mgmt_exit(void)
10575 {
10576 hci_mgmt_chan_unregister(&chan);
10577 }
10578
mgmt_cleanup(struct sock * sk)10579 void mgmt_cleanup(struct sock *sk)
10580 {
10581 struct mgmt_mesh_tx *mesh_tx;
10582 struct hci_dev *hdev;
10583
10584 read_lock(&hci_dev_list_lock);
10585
10586 list_for_each_entry(hdev, &hci_dev_list, list) {
10587 do {
10588 mesh_tx = mgmt_mesh_next(hdev, sk);
10589
10590 if (mesh_tx)
10591 mesh_send_complete(hdev, mesh_tx, true);
10592 } while (mesh_tx);
10593 }
10594
10595 read_unlock(&hci_dev_list_lock);
10596 }
10597