1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <linux/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35
36 #include "smp.h"
37 #include "mgmt_util.h"
38 #include "mgmt_config.h"
39 #include "msft.h"
40 #include "eir.h"
41 #include "aosp.h"
42
43 #define MGMT_VERSION 1
44 #define MGMT_REVISION 23
45
46 static const u16 mgmt_commands[] = {
47 MGMT_OP_READ_INDEX_LIST,
48 MGMT_OP_READ_INFO,
49 MGMT_OP_SET_POWERED,
50 MGMT_OP_SET_DISCOVERABLE,
51 MGMT_OP_SET_CONNECTABLE,
52 MGMT_OP_SET_FAST_CONNECTABLE,
53 MGMT_OP_SET_BONDABLE,
54 MGMT_OP_SET_LINK_SECURITY,
55 MGMT_OP_SET_SSP,
56 MGMT_OP_SET_HS,
57 MGMT_OP_SET_LE,
58 MGMT_OP_SET_DEV_CLASS,
59 MGMT_OP_SET_LOCAL_NAME,
60 MGMT_OP_ADD_UUID,
61 MGMT_OP_REMOVE_UUID,
62 MGMT_OP_LOAD_LINK_KEYS,
63 MGMT_OP_LOAD_LONG_TERM_KEYS,
64 MGMT_OP_DISCONNECT,
65 MGMT_OP_GET_CONNECTIONS,
66 MGMT_OP_PIN_CODE_REPLY,
67 MGMT_OP_PIN_CODE_NEG_REPLY,
68 MGMT_OP_SET_IO_CAPABILITY,
69 MGMT_OP_PAIR_DEVICE,
70 MGMT_OP_CANCEL_PAIR_DEVICE,
71 MGMT_OP_UNPAIR_DEVICE,
72 MGMT_OP_USER_CONFIRM_REPLY,
73 MGMT_OP_USER_CONFIRM_NEG_REPLY,
74 MGMT_OP_USER_PASSKEY_REPLY,
75 MGMT_OP_USER_PASSKEY_NEG_REPLY,
76 MGMT_OP_READ_LOCAL_OOB_DATA,
77 MGMT_OP_ADD_REMOTE_OOB_DATA,
78 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
79 MGMT_OP_START_DISCOVERY,
80 MGMT_OP_STOP_DISCOVERY,
81 MGMT_OP_CONFIRM_NAME,
82 MGMT_OP_BLOCK_DEVICE,
83 MGMT_OP_UNBLOCK_DEVICE,
84 MGMT_OP_SET_DEVICE_ID,
85 MGMT_OP_SET_ADVERTISING,
86 MGMT_OP_SET_BREDR,
87 MGMT_OP_SET_STATIC_ADDRESS,
88 MGMT_OP_SET_SCAN_PARAMS,
89 MGMT_OP_SET_SECURE_CONN,
90 MGMT_OP_SET_DEBUG_KEYS,
91 MGMT_OP_SET_PRIVACY,
92 MGMT_OP_LOAD_IRKS,
93 MGMT_OP_GET_CONN_INFO,
94 MGMT_OP_GET_CLOCK_INFO,
95 MGMT_OP_ADD_DEVICE,
96 MGMT_OP_REMOVE_DEVICE,
97 MGMT_OP_LOAD_CONN_PARAM,
98 MGMT_OP_READ_UNCONF_INDEX_LIST,
99 MGMT_OP_READ_CONFIG_INFO,
100 MGMT_OP_SET_EXTERNAL_CONFIG,
101 MGMT_OP_SET_PUBLIC_ADDRESS,
102 MGMT_OP_START_SERVICE_DISCOVERY,
103 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
104 MGMT_OP_READ_EXT_INDEX_LIST,
105 MGMT_OP_READ_ADV_FEATURES,
106 MGMT_OP_ADD_ADVERTISING,
107 MGMT_OP_REMOVE_ADVERTISING,
108 MGMT_OP_GET_ADV_SIZE_INFO,
109 MGMT_OP_START_LIMITED_DISCOVERY,
110 MGMT_OP_READ_EXT_INFO,
111 MGMT_OP_SET_APPEARANCE,
112 MGMT_OP_GET_PHY_CONFIGURATION,
113 MGMT_OP_SET_PHY_CONFIGURATION,
114 MGMT_OP_SET_BLOCKED_KEYS,
115 MGMT_OP_SET_WIDEBAND_SPEECH,
116 MGMT_OP_READ_CONTROLLER_CAP,
117 MGMT_OP_READ_EXP_FEATURES_INFO,
118 MGMT_OP_SET_EXP_FEATURE,
119 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
120 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
121 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
122 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
123 MGMT_OP_GET_DEVICE_FLAGS,
124 MGMT_OP_SET_DEVICE_FLAGS,
125 MGMT_OP_READ_ADV_MONITOR_FEATURES,
126 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
127 MGMT_OP_REMOVE_ADV_MONITOR,
128 MGMT_OP_ADD_EXT_ADV_PARAMS,
129 MGMT_OP_ADD_EXT_ADV_DATA,
130 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
131 MGMT_OP_SET_MESH_RECEIVER,
132 MGMT_OP_MESH_READ_FEATURES,
133 MGMT_OP_MESH_SEND,
134 MGMT_OP_MESH_SEND_CANCEL,
135 MGMT_OP_HCI_CMD_SYNC,
136 };
137
138 static const u16 mgmt_events[] = {
139 MGMT_EV_CONTROLLER_ERROR,
140 MGMT_EV_INDEX_ADDED,
141 MGMT_EV_INDEX_REMOVED,
142 MGMT_EV_NEW_SETTINGS,
143 MGMT_EV_CLASS_OF_DEV_CHANGED,
144 MGMT_EV_LOCAL_NAME_CHANGED,
145 MGMT_EV_NEW_LINK_KEY,
146 MGMT_EV_NEW_LONG_TERM_KEY,
147 MGMT_EV_DEVICE_CONNECTED,
148 MGMT_EV_DEVICE_DISCONNECTED,
149 MGMT_EV_CONNECT_FAILED,
150 MGMT_EV_PIN_CODE_REQUEST,
151 MGMT_EV_USER_CONFIRM_REQUEST,
152 MGMT_EV_USER_PASSKEY_REQUEST,
153 MGMT_EV_AUTH_FAILED,
154 MGMT_EV_DEVICE_FOUND,
155 MGMT_EV_DISCOVERING,
156 MGMT_EV_DEVICE_BLOCKED,
157 MGMT_EV_DEVICE_UNBLOCKED,
158 MGMT_EV_DEVICE_UNPAIRED,
159 MGMT_EV_PASSKEY_NOTIFY,
160 MGMT_EV_NEW_IRK,
161 MGMT_EV_NEW_CSRK,
162 MGMT_EV_DEVICE_ADDED,
163 MGMT_EV_DEVICE_REMOVED,
164 MGMT_EV_NEW_CONN_PARAM,
165 MGMT_EV_UNCONF_INDEX_ADDED,
166 MGMT_EV_UNCONF_INDEX_REMOVED,
167 MGMT_EV_NEW_CONFIG_OPTIONS,
168 MGMT_EV_EXT_INDEX_ADDED,
169 MGMT_EV_EXT_INDEX_REMOVED,
170 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
171 MGMT_EV_ADVERTISING_ADDED,
172 MGMT_EV_ADVERTISING_REMOVED,
173 MGMT_EV_EXT_INFO_CHANGED,
174 MGMT_EV_PHY_CONFIGURATION_CHANGED,
175 MGMT_EV_EXP_FEATURE_CHANGED,
176 MGMT_EV_DEVICE_FLAGS_CHANGED,
177 MGMT_EV_ADV_MONITOR_ADDED,
178 MGMT_EV_ADV_MONITOR_REMOVED,
179 MGMT_EV_CONTROLLER_SUSPEND,
180 MGMT_EV_CONTROLLER_RESUME,
181 MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
182 MGMT_EV_ADV_MONITOR_DEVICE_LOST,
183 };
184
185 static const u16 mgmt_untrusted_commands[] = {
186 MGMT_OP_READ_INDEX_LIST,
187 MGMT_OP_READ_INFO,
188 MGMT_OP_READ_UNCONF_INDEX_LIST,
189 MGMT_OP_READ_CONFIG_INFO,
190 MGMT_OP_READ_EXT_INDEX_LIST,
191 MGMT_OP_READ_EXT_INFO,
192 MGMT_OP_READ_CONTROLLER_CAP,
193 MGMT_OP_READ_EXP_FEATURES_INFO,
194 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
195 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
196 };
197
198 static const u16 mgmt_untrusted_events[] = {
199 MGMT_EV_INDEX_ADDED,
200 MGMT_EV_INDEX_REMOVED,
201 MGMT_EV_NEW_SETTINGS,
202 MGMT_EV_CLASS_OF_DEV_CHANGED,
203 MGMT_EV_LOCAL_NAME_CHANGED,
204 MGMT_EV_UNCONF_INDEX_ADDED,
205 MGMT_EV_UNCONF_INDEX_REMOVED,
206 MGMT_EV_NEW_CONFIG_OPTIONS,
207 MGMT_EV_EXT_INDEX_ADDED,
208 MGMT_EV_EXT_INDEX_REMOVED,
209 MGMT_EV_EXT_INFO_CHANGED,
210 MGMT_EV_EXP_FEATURE_CHANGED,
211 };
212
213 #define CACHE_TIMEOUT secs_to_jiffies(2)
214
215 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
216 "\x00\x00\x00\x00\x00\x00\x00\x00"
217
218 /* HCI to MGMT error code conversion table */
219 static const u8 mgmt_status_table[] = {
220 MGMT_STATUS_SUCCESS,
221 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
222 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
223 MGMT_STATUS_FAILED, /* Hardware Failure */
224 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
225 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
226 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
227 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
228 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
229 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
230 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
231 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
232 MGMT_STATUS_BUSY, /* Command Disallowed */
233 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
234 MGMT_STATUS_REJECTED, /* Rejected Security */
235 MGMT_STATUS_REJECTED, /* Rejected Personal */
236 MGMT_STATUS_TIMEOUT, /* Host Timeout */
237 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
238 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
239 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
240 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
241 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
242 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
243 MGMT_STATUS_BUSY, /* Repeated Attempts */
244 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
245 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
246 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
247 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
248 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
249 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
250 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
251 MGMT_STATUS_FAILED, /* Unspecified Error */
252 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
253 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
254 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
255 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
256 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
257 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
258 MGMT_STATUS_FAILED, /* Unit Link Key Used */
259 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
260 MGMT_STATUS_TIMEOUT, /* Instant Passed */
261 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
262 MGMT_STATUS_FAILED, /* Transaction Collision */
263 MGMT_STATUS_FAILED, /* Reserved for future use */
264 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
265 MGMT_STATUS_REJECTED, /* QoS Rejected */
266 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
267 MGMT_STATUS_REJECTED, /* Insufficient Security */
268 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
269 MGMT_STATUS_FAILED, /* Reserved for future use */
270 MGMT_STATUS_BUSY, /* Role Switch Pending */
271 MGMT_STATUS_FAILED, /* Reserved for future use */
272 MGMT_STATUS_FAILED, /* Slot Violation */
273 MGMT_STATUS_FAILED, /* Role Switch Failed */
274 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
275 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
276 MGMT_STATUS_BUSY, /* Host Busy Pairing */
277 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
278 MGMT_STATUS_BUSY, /* Controller Busy */
279 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
280 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
281 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
282 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
283 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
284 };
285
mgmt_errno_status(int err)286 static u8 mgmt_errno_status(int err)
287 {
288 switch (err) {
289 case 0:
290 return MGMT_STATUS_SUCCESS;
291 case -EPERM:
292 return MGMT_STATUS_REJECTED;
293 case -EINVAL:
294 return MGMT_STATUS_INVALID_PARAMS;
295 case -EOPNOTSUPP:
296 return MGMT_STATUS_NOT_SUPPORTED;
297 case -EBUSY:
298 return MGMT_STATUS_BUSY;
299 case -ETIMEDOUT:
300 return MGMT_STATUS_AUTH_FAILED;
301 case -ENOMEM:
302 return MGMT_STATUS_NO_RESOURCES;
303 case -EISCONN:
304 return MGMT_STATUS_ALREADY_CONNECTED;
305 case -ENOTCONN:
306 return MGMT_STATUS_DISCONNECTED;
307 }
308
309 return MGMT_STATUS_FAILED;
310 }
311
mgmt_status(int err)312 static u8 mgmt_status(int err)
313 {
314 if (err < 0)
315 return mgmt_errno_status(err);
316
317 if (err < ARRAY_SIZE(mgmt_status_table))
318 return mgmt_status_table[err];
319
320 return MGMT_STATUS_FAILED;
321 }
322
mgmt_index_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag)323 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
324 u16 len, int flag)
325 {
326 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
327 flag, NULL);
328 }
329
mgmt_limited_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag,struct sock * skip_sk)330 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
331 u16 len, int flag, struct sock *skip_sk)
332 {
333 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
334 flag, skip_sk);
335 }
336
mgmt_event(u16 event,struct hci_dev * hdev,void * data,u16 len,struct sock * skip_sk)337 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
338 struct sock *skip_sk)
339 {
340 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
341 HCI_SOCK_TRUSTED, skip_sk);
342 }
343
mgmt_event_skb(struct sk_buff * skb,struct sock * skip_sk)344 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
345 {
346 return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
347 skip_sk);
348 }
349
le_addr_type(u8 mgmt_addr_type)350 static u8 le_addr_type(u8 mgmt_addr_type)
351 {
352 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
353 return ADDR_LE_DEV_PUBLIC;
354 else
355 return ADDR_LE_DEV_RANDOM;
356 }
357
mgmt_fill_version_info(void * ver)358 void mgmt_fill_version_info(void *ver)
359 {
360 struct mgmt_rp_read_version *rp = ver;
361
362 rp->version = MGMT_VERSION;
363 rp->revision = cpu_to_le16(MGMT_REVISION);
364 }
365
read_version(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)366 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
367 u16 data_len)
368 {
369 struct mgmt_rp_read_version rp;
370
371 bt_dev_dbg(hdev, "sock %p", sk);
372
373 mgmt_fill_version_info(&rp);
374
375 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
376 &rp, sizeof(rp));
377 }
378
read_commands(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)379 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
380 u16 data_len)
381 {
382 struct mgmt_rp_read_commands *rp;
383 u16 num_commands, num_events;
384 size_t rp_size;
385 int i, err;
386
387 bt_dev_dbg(hdev, "sock %p", sk);
388
389 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
390 num_commands = ARRAY_SIZE(mgmt_commands);
391 num_events = ARRAY_SIZE(mgmt_events);
392 } else {
393 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
394 num_events = ARRAY_SIZE(mgmt_untrusted_events);
395 }
396
397 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
398
399 rp = kmalloc(rp_size, GFP_KERNEL);
400 if (!rp)
401 return -ENOMEM;
402
403 rp->num_commands = cpu_to_le16(num_commands);
404 rp->num_events = cpu_to_le16(num_events);
405
406 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
407 __le16 *opcode = rp->opcodes;
408
409 for (i = 0; i < num_commands; i++, opcode++)
410 put_unaligned_le16(mgmt_commands[i], opcode);
411
412 for (i = 0; i < num_events; i++, opcode++)
413 put_unaligned_le16(mgmt_events[i], opcode);
414 } else {
415 __le16 *opcode = rp->opcodes;
416
417 for (i = 0; i < num_commands; i++, opcode++)
418 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
419
420 for (i = 0; i < num_events; i++, opcode++)
421 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
422 }
423
424 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
425 rp, rp_size);
426 kfree(rp);
427
428 return err;
429 }
430
read_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)431 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
432 u16 data_len)
433 {
434 struct mgmt_rp_read_index_list *rp;
435 struct hci_dev *d;
436 size_t rp_len;
437 u16 count;
438 int err;
439
440 bt_dev_dbg(hdev, "sock %p", sk);
441
442 read_lock(&hci_dev_list_lock);
443
444 count = 0;
445 list_for_each_entry(d, &hci_dev_list, list) {
446 if (!hci_dev_test_flag(d, HCI_UNCONFIGURED))
447 count++;
448 }
449
450 rp_len = sizeof(*rp) + (2 * count);
451 rp = kmalloc(rp_len, GFP_ATOMIC);
452 if (!rp) {
453 read_unlock(&hci_dev_list_lock);
454 return -ENOMEM;
455 }
456
457 count = 0;
458 list_for_each_entry(d, &hci_dev_list, list) {
459 if (hci_dev_test_flag(d, HCI_SETUP) ||
460 hci_dev_test_flag(d, HCI_CONFIG) ||
461 hci_dev_test_flag(d, HCI_USER_CHANNEL))
462 continue;
463
464 /* Devices marked as raw-only are neither configured
465 * nor unconfigured controllers.
466 */
467 if (hci_test_quirk(d, HCI_QUIRK_RAW_DEVICE))
468 continue;
469
470 if (!hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
471 rp->index[count++] = cpu_to_le16(d->id);
472 bt_dev_dbg(hdev, "Added hci%u", d->id);
473 }
474 }
475
476 rp->num_controllers = cpu_to_le16(count);
477 rp_len = sizeof(*rp) + (2 * count);
478
479 read_unlock(&hci_dev_list_lock);
480
481 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
482 0, rp, rp_len);
483
484 kfree(rp);
485
486 return err;
487 }
488
read_unconf_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)489 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
490 void *data, u16 data_len)
491 {
492 struct mgmt_rp_read_unconf_index_list *rp;
493 struct hci_dev *d;
494 size_t rp_len;
495 u16 count;
496 int err;
497
498 bt_dev_dbg(hdev, "sock %p", sk);
499
500 read_lock(&hci_dev_list_lock);
501
502 count = 0;
503 list_for_each_entry(d, &hci_dev_list, list) {
504 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
505 count++;
506 }
507
508 rp_len = sizeof(*rp) + (2 * count);
509 rp = kmalloc(rp_len, GFP_ATOMIC);
510 if (!rp) {
511 read_unlock(&hci_dev_list_lock);
512 return -ENOMEM;
513 }
514
515 count = 0;
516 list_for_each_entry(d, &hci_dev_list, list) {
517 if (hci_dev_test_flag(d, HCI_SETUP) ||
518 hci_dev_test_flag(d, HCI_CONFIG) ||
519 hci_dev_test_flag(d, HCI_USER_CHANNEL))
520 continue;
521
522 /* Devices marked as raw-only are neither configured
523 * nor unconfigured controllers.
524 */
525 if (hci_test_quirk(d, HCI_QUIRK_RAW_DEVICE))
526 continue;
527
528 if (hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
529 rp->index[count++] = cpu_to_le16(d->id);
530 bt_dev_dbg(hdev, "Added hci%u", d->id);
531 }
532 }
533
534 rp->num_controllers = cpu_to_le16(count);
535 rp_len = sizeof(*rp) + (2 * count);
536
537 read_unlock(&hci_dev_list_lock);
538
539 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
540 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
541
542 kfree(rp);
543
544 return err;
545 }
546
read_ext_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)547 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
548 void *data, u16 data_len)
549 {
550 struct mgmt_rp_read_ext_index_list *rp;
551 struct hci_dev *d;
552 u16 count;
553 int err;
554
555 bt_dev_dbg(hdev, "sock %p", sk);
556
557 read_lock(&hci_dev_list_lock);
558
559 count = 0;
560 list_for_each_entry(d, &hci_dev_list, list)
561 count++;
562
563 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
564 if (!rp) {
565 read_unlock(&hci_dev_list_lock);
566 return -ENOMEM;
567 }
568
569 count = 0;
570 list_for_each_entry(d, &hci_dev_list, list) {
571 if (hci_dev_test_flag(d, HCI_SETUP) ||
572 hci_dev_test_flag(d, HCI_CONFIG) ||
573 hci_dev_test_flag(d, HCI_USER_CHANNEL))
574 continue;
575
576 /* Devices marked as raw-only are neither configured
577 * nor unconfigured controllers.
578 */
579 if (hci_test_quirk(d, HCI_QUIRK_RAW_DEVICE))
580 continue;
581
582 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
583 rp->entry[count].type = 0x01;
584 else
585 rp->entry[count].type = 0x00;
586
587 rp->entry[count].bus = d->bus;
588 rp->entry[count++].index = cpu_to_le16(d->id);
589 bt_dev_dbg(hdev, "Added hci%u", d->id);
590 }
591
592 rp->num_controllers = cpu_to_le16(count);
593
594 read_unlock(&hci_dev_list_lock);
595
596 /* If this command is called at least once, then all the
597 * default index and unconfigured index events are disabled
598 * and from now on only extended index events are used.
599 */
600 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
601 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
602 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
603
604 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
605 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
606 struct_size(rp, entry, count));
607
608 kfree(rp);
609
610 return err;
611 }
612
is_configured(struct hci_dev * hdev)613 static bool is_configured(struct hci_dev *hdev)
614 {
615 if (hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG) &&
616 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
617 return false;
618
619 if ((hci_test_quirk(hdev, HCI_QUIRK_INVALID_BDADDR) ||
620 hci_test_quirk(hdev, HCI_QUIRK_USE_BDADDR_PROPERTY)) &&
621 !bacmp(&hdev->public_addr, BDADDR_ANY))
622 return false;
623
624 return true;
625 }
626
get_missing_options(struct hci_dev * hdev)627 static __le32 get_missing_options(struct hci_dev *hdev)
628 {
629 u32 options = 0;
630
631 if (hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG) &&
632 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
633 options |= MGMT_OPTION_EXTERNAL_CONFIG;
634
635 if ((hci_test_quirk(hdev, HCI_QUIRK_INVALID_BDADDR) ||
636 hci_test_quirk(hdev, HCI_QUIRK_USE_BDADDR_PROPERTY)) &&
637 !bacmp(&hdev->public_addr, BDADDR_ANY))
638 options |= MGMT_OPTION_PUBLIC_ADDRESS;
639
640 return cpu_to_le32(options);
641 }
642
new_options(struct hci_dev * hdev,struct sock * skip)643 static int new_options(struct hci_dev *hdev, struct sock *skip)
644 {
645 __le32 options = get_missing_options(hdev);
646
647 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
648 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
649 }
650
send_options_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)651 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
652 {
653 __le32 options = get_missing_options(hdev);
654
655 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
656 sizeof(options));
657 }
658
read_config_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)659 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
660 void *data, u16 data_len)
661 {
662 struct mgmt_rp_read_config_info rp;
663 u32 options = 0;
664
665 bt_dev_dbg(hdev, "sock %p", sk);
666
667 hci_dev_lock(hdev);
668
669 memset(&rp, 0, sizeof(rp));
670 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
671
672 if (hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG))
673 options |= MGMT_OPTION_EXTERNAL_CONFIG;
674
675 if (hdev->set_bdaddr)
676 options |= MGMT_OPTION_PUBLIC_ADDRESS;
677
678 rp.supported_options = cpu_to_le32(options);
679 rp.missing_options = get_missing_options(hdev);
680
681 hci_dev_unlock(hdev);
682
683 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
684 &rp, sizeof(rp));
685 }
686
get_supported_phys(struct hci_dev * hdev)687 static u32 get_supported_phys(struct hci_dev *hdev)
688 {
689 u32 supported_phys = 0;
690
691 if (lmp_bredr_capable(hdev)) {
692 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
693
694 if (hdev->features[0][0] & LMP_3SLOT)
695 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
696
697 if (hdev->features[0][0] & LMP_5SLOT)
698 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
699
700 if (lmp_edr_2m_capable(hdev)) {
701 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
702
703 if (lmp_edr_3slot_capable(hdev))
704 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
705
706 if (lmp_edr_5slot_capable(hdev))
707 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
708
709 if (lmp_edr_3m_capable(hdev)) {
710 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
711
712 if (lmp_edr_3slot_capable(hdev))
713 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
714
715 if (lmp_edr_5slot_capable(hdev))
716 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
717 }
718 }
719 }
720
721 if (lmp_le_capable(hdev)) {
722 supported_phys |= MGMT_PHY_LE_1M_TX;
723 supported_phys |= MGMT_PHY_LE_1M_RX;
724
725 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
726 supported_phys |= MGMT_PHY_LE_2M_TX;
727 supported_phys |= MGMT_PHY_LE_2M_RX;
728 }
729
730 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
731 supported_phys |= MGMT_PHY_LE_CODED_TX;
732 supported_phys |= MGMT_PHY_LE_CODED_RX;
733 }
734 }
735
736 return supported_phys;
737 }
738
get_selected_phys(struct hci_dev * hdev)739 static u32 get_selected_phys(struct hci_dev *hdev)
740 {
741 u32 selected_phys = 0;
742
743 if (lmp_bredr_capable(hdev)) {
744 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
745
746 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
747 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
748
749 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
750 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
751
752 if (lmp_edr_2m_capable(hdev)) {
753 if (!(hdev->pkt_type & HCI_2DH1))
754 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
755
756 if (lmp_edr_3slot_capable(hdev) &&
757 !(hdev->pkt_type & HCI_2DH3))
758 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
759
760 if (lmp_edr_5slot_capable(hdev) &&
761 !(hdev->pkt_type & HCI_2DH5))
762 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
763
764 if (lmp_edr_3m_capable(hdev)) {
765 if (!(hdev->pkt_type & HCI_3DH1))
766 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
767
768 if (lmp_edr_3slot_capable(hdev) &&
769 !(hdev->pkt_type & HCI_3DH3))
770 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
771
772 if (lmp_edr_5slot_capable(hdev) &&
773 !(hdev->pkt_type & HCI_3DH5))
774 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
775 }
776 }
777 }
778
779 if (lmp_le_capable(hdev)) {
780 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
781 selected_phys |= MGMT_PHY_LE_1M_TX;
782
783 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
784 selected_phys |= MGMT_PHY_LE_1M_RX;
785
786 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
787 selected_phys |= MGMT_PHY_LE_2M_TX;
788
789 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
790 selected_phys |= MGMT_PHY_LE_2M_RX;
791
792 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
793 selected_phys |= MGMT_PHY_LE_CODED_TX;
794
795 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
796 selected_phys |= MGMT_PHY_LE_CODED_RX;
797 }
798
799 return selected_phys;
800 }
801
get_configurable_phys(struct hci_dev * hdev)802 static u32 get_configurable_phys(struct hci_dev *hdev)
803 {
804 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
805 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
806 }
807
get_supported_settings(struct hci_dev * hdev)808 static u32 get_supported_settings(struct hci_dev *hdev)
809 {
810 u32 settings = 0;
811
812 settings |= MGMT_SETTING_POWERED;
813 settings |= MGMT_SETTING_BONDABLE;
814 settings |= MGMT_SETTING_DEBUG_KEYS;
815 settings |= MGMT_SETTING_CONNECTABLE;
816 settings |= MGMT_SETTING_DISCOVERABLE;
817
818 if (lmp_bredr_capable(hdev)) {
819 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
820 settings |= MGMT_SETTING_FAST_CONNECTABLE;
821 settings |= MGMT_SETTING_BREDR;
822 settings |= MGMT_SETTING_LINK_SECURITY;
823
824 if (lmp_ssp_capable(hdev)) {
825 settings |= MGMT_SETTING_SSP;
826 }
827
828 if (lmp_sc_capable(hdev))
829 settings |= MGMT_SETTING_SECURE_CONN;
830
831 if (hci_test_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED))
832 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
833 }
834
835 if (lmp_le_capable(hdev)) {
836 settings |= MGMT_SETTING_LE;
837 settings |= MGMT_SETTING_SECURE_CONN;
838 settings |= MGMT_SETTING_PRIVACY;
839 settings |= MGMT_SETTING_STATIC_ADDRESS;
840 settings |= MGMT_SETTING_ADVERTISING;
841 }
842
843 if (hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG) || hdev->set_bdaddr)
844 settings |= MGMT_SETTING_CONFIGURATION;
845
846 if (cis_central_capable(hdev))
847 settings |= MGMT_SETTING_CIS_CENTRAL;
848
849 if (cis_peripheral_capable(hdev))
850 settings |= MGMT_SETTING_CIS_PERIPHERAL;
851
852 if (ll_privacy_capable(hdev))
853 settings |= MGMT_SETTING_LL_PRIVACY;
854
855 settings |= MGMT_SETTING_PHY_CONFIGURATION;
856
857 return settings;
858 }
859
get_current_settings(struct hci_dev * hdev)860 static u32 get_current_settings(struct hci_dev *hdev)
861 {
862 u32 settings = 0;
863
864 if (hdev_is_powered(hdev))
865 settings |= MGMT_SETTING_POWERED;
866
867 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
868 settings |= MGMT_SETTING_CONNECTABLE;
869
870 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
871 settings |= MGMT_SETTING_FAST_CONNECTABLE;
872
873 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
874 settings |= MGMT_SETTING_DISCOVERABLE;
875
876 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
877 settings |= MGMT_SETTING_BONDABLE;
878
879 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
880 settings |= MGMT_SETTING_BREDR;
881
882 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
883 settings |= MGMT_SETTING_LE;
884
885 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
886 settings |= MGMT_SETTING_LINK_SECURITY;
887
888 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
889 settings |= MGMT_SETTING_SSP;
890
891 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
892 settings |= MGMT_SETTING_ADVERTISING;
893
894 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
895 settings |= MGMT_SETTING_SECURE_CONN;
896
897 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
898 settings |= MGMT_SETTING_DEBUG_KEYS;
899
900 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
901 settings |= MGMT_SETTING_PRIVACY;
902
903 /* The current setting for static address has two purposes. The
904 * first is to indicate if the static address will be used and
905 * the second is to indicate if it is actually set.
906 *
907 * This means if the static address is not configured, this flag
908 * will never be set. If the address is configured, then if the
909 * address is actually used decides if the flag is set or not.
910 *
911 * For single mode LE only controllers and dual-mode controllers
912 * with BR/EDR disabled, the existence of the static address will
913 * be evaluated.
914 */
915 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
916 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
917 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
918 if (bacmp(&hdev->static_addr, BDADDR_ANY))
919 settings |= MGMT_SETTING_STATIC_ADDRESS;
920 }
921
922 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
923 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
924
925 if (cis_central_enabled(hdev))
926 settings |= MGMT_SETTING_CIS_CENTRAL;
927
928 if (cis_peripheral_enabled(hdev))
929 settings |= MGMT_SETTING_CIS_PERIPHERAL;
930
931 if (bis_enabled(hdev))
932 settings |= MGMT_SETTING_ISO_BROADCASTER;
933
934 if (sync_recv_enabled(hdev))
935 settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
936
937 if (ll_privacy_enabled(hdev))
938 settings |= MGMT_SETTING_LL_PRIVACY;
939
940 return settings;
941 }
942
pending_find(u16 opcode,struct hci_dev * hdev)943 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
944 {
945 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
946 }
947
mgmt_get_adv_discov_flags(struct hci_dev * hdev)948 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
949 {
950 struct mgmt_pending_cmd *cmd;
951
952 /* If there's a pending mgmt command the flags will not yet have
953 * their final values, so check for this first.
954 */
955 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
956 if (cmd) {
957 struct mgmt_mode *cp = cmd->param;
958 if (cp->val == 0x01)
959 return LE_AD_GENERAL;
960 else if (cp->val == 0x02)
961 return LE_AD_LIMITED;
962 } else {
963 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
964 return LE_AD_LIMITED;
965 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
966 return LE_AD_GENERAL;
967 }
968
969 return 0;
970 }
971
mgmt_get_connectable(struct hci_dev * hdev)972 bool mgmt_get_connectable(struct hci_dev *hdev)
973 {
974 struct mgmt_pending_cmd *cmd;
975
976 /* If there's a pending mgmt command the flag will not yet have
977 * it's final value, so check for this first.
978 */
979 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
980 if (cmd) {
981 struct mgmt_mode *cp = cmd->param;
982
983 return cp->val;
984 }
985
986 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
987 }
988
service_cache_sync(struct hci_dev * hdev,void * data)989 static int service_cache_sync(struct hci_dev *hdev, void *data)
990 {
991 hci_update_eir_sync(hdev);
992 hci_update_class_sync(hdev);
993
994 return 0;
995 }
996
service_cache_off(struct work_struct * work)997 static void service_cache_off(struct work_struct *work)
998 {
999 struct hci_dev *hdev = container_of(work, struct hci_dev,
1000 service_cache.work);
1001
1002 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1003 return;
1004
1005 hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1006 }
1007
rpa_expired_sync(struct hci_dev * hdev,void * data)1008 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1009 {
1010 /* The generation of a new RPA and programming it into the
1011 * controller happens in the hci_req_enable_advertising()
1012 * function.
1013 */
1014 if (ext_adv_capable(hdev))
1015 return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1016 else
1017 return hci_enable_advertising_sync(hdev);
1018 }
1019
rpa_expired(struct work_struct * work)1020 static void rpa_expired(struct work_struct *work)
1021 {
1022 struct hci_dev *hdev = container_of(work, struct hci_dev,
1023 rpa_expired.work);
1024
1025 bt_dev_dbg(hdev, "");
1026
1027 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1028
1029 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1030 return;
1031
1032 hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1033 }
1034
1035 static int set_discoverable_sync(struct hci_dev *hdev, void *data);
1036
discov_off(struct work_struct * work)1037 static void discov_off(struct work_struct *work)
1038 {
1039 struct hci_dev *hdev = container_of(work, struct hci_dev,
1040 discov_off.work);
1041
1042 bt_dev_dbg(hdev, "");
1043
1044 hci_dev_lock(hdev);
1045
1046 /* When discoverable timeout triggers, then just make sure
1047 * the limited discoverable flag is cleared. Even in the case
1048 * of a timeout triggered from general discoverable, it is
1049 * safe to unconditionally clear the flag.
1050 */
1051 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1052 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1053 hdev->discov_timeout = 0;
1054
1055 hci_cmd_sync_queue(hdev, set_discoverable_sync, NULL, NULL);
1056
1057 mgmt_new_settings(hdev);
1058
1059 hci_dev_unlock(hdev);
1060 }
1061
1062 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1063
mesh_send_complete(struct hci_dev * hdev,struct mgmt_mesh_tx * mesh_tx,bool silent)1064 static void mesh_send_complete(struct hci_dev *hdev,
1065 struct mgmt_mesh_tx *mesh_tx, bool silent)
1066 {
1067 u8 handle = mesh_tx->handle;
1068
1069 if (!silent)
1070 mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1071 sizeof(handle), NULL);
1072
1073 mgmt_mesh_remove(mesh_tx);
1074 }
1075
mesh_send_done_sync(struct hci_dev * hdev,void * data)1076 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1077 {
1078 struct mgmt_mesh_tx *mesh_tx;
1079
1080 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1081 if (list_empty(&hdev->adv_instances))
1082 hci_disable_advertising_sync(hdev);
1083 mesh_tx = mgmt_mesh_next(hdev, NULL);
1084
1085 if (mesh_tx)
1086 mesh_send_complete(hdev, mesh_tx, false);
1087
1088 return 0;
1089 }
1090
1091 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1092 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
mesh_next(struct hci_dev * hdev,void * data,int err)1093 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1094 {
1095 struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1096
1097 if (!mesh_tx)
1098 return;
1099
1100 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1101 mesh_send_start_complete);
1102
1103 if (err < 0)
1104 mesh_send_complete(hdev, mesh_tx, false);
1105 else
1106 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1107 }
1108
mesh_send_done(struct work_struct * work)1109 static void mesh_send_done(struct work_struct *work)
1110 {
1111 struct hci_dev *hdev = container_of(work, struct hci_dev,
1112 mesh_send_done.work);
1113
1114 if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1115 return;
1116
1117 hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1118 }
1119
mgmt_init_hdev(struct sock * sk,struct hci_dev * hdev)1120 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1121 {
1122 if (hci_dev_test_flag(hdev, HCI_MGMT))
1123 return;
1124
1125 BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1126
1127 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1128 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1129 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1130 INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1131
1132 /* Non-mgmt controlled devices get this bit set
1133 * implicitly so that pairing works for them, however
1134 * for mgmt we require user-space to explicitly enable
1135 * it
1136 */
1137 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1138
1139 hci_dev_set_flag(hdev, HCI_MGMT);
1140 }
1141
read_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1142 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1143 void *data, u16 data_len)
1144 {
1145 struct mgmt_rp_read_info rp;
1146
1147 bt_dev_dbg(hdev, "sock %p", sk);
1148
1149 hci_dev_lock(hdev);
1150
1151 memset(&rp, 0, sizeof(rp));
1152
1153 bacpy(&rp.bdaddr, &hdev->bdaddr);
1154
1155 rp.version = hdev->hci_ver;
1156 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1157
1158 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1159 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1160
1161 memcpy(rp.dev_class, hdev->dev_class, 3);
1162
1163 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1164 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1165
1166 hci_dev_unlock(hdev);
1167
1168 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1169 sizeof(rp));
1170 }
1171
append_eir_data_to_buf(struct hci_dev * hdev,u8 * eir)1172 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1173 {
1174 u16 eir_len = 0;
1175 size_t name_len;
1176
1177 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1178 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1179 hdev->dev_class, 3);
1180
1181 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1182 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1183 hdev->appearance);
1184
1185 name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1186 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1187 hdev->dev_name, name_len);
1188
1189 name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1190 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1191 hdev->short_name, name_len);
1192
1193 return eir_len;
1194 }
1195
read_ext_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1196 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1197 void *data, u16 data_len)
1198 {
1199 char buf[512];
1200 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1201 u16 eir_len;
1202
1203 bt_dev_dbg(hdev, "sock %p", sk);
1204
1205 memset(&buf, 0, sizeof(buf));
1206
1207 hci_dev_lock(hdev);
1208
1209 bacpy(&rp->bdaddr, &hdev->bdaddr);
1210
1211 rp->version = hdev->hci_ver;
1212 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1213
1214 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1215 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1216
1217
1218 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1219 rp->eir_len = cpu_to_le16(eir_len);
1220
1221 hci_dev_unlock(hdev);
1222
1223 /* If this command is called at least once, then the events
1224 * for class of device and local name changes are disabled
1225 * and only the new extended controller information event
1226 * is used.
1227 */
1228 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1229 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1230 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1231
1232 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1233 sizeof(*rp) + eir_len);
1234 }
1235
ext_info_changed(struct hci_dev * hdev,struct sock * skip)1236 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1237 {
1238 char buf[512];
1239 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1240 u16 eir_len;
1241
1242 memset(buf, 0, sizeof(buf));
1243
1244 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1245 ev->eir_len = cpu_to_le16(eir_len);
1246
1247 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1248 sizeof(*ev) + eir_len,
1249 HCI_MGMT_EXT_INFO_EVENTS, skip);
1250 }
1251
send_settings_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)1252 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1253 {
1254 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1255
1256 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1257 sizeof(settings));
1258 }
1259
mgmt_advertising_added(struct sock * sk,struct hci_dev * hdev,u8 instance)1260 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1261 {
1262 struct mgmt_ev_advertising_added ev;
1263
1264 ev.instance = instance;
1265
1266 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1267 }
1268
mgmt_advertising_removed(struct sock * sk,struct hci_dev * hdev,u8 instance)1269 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1270 u8 instance)
1271 {
1272 struct mgmt_ev_advertising_removed ev;
1273
1274 ev.instance = instance;
1275
1276 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1277 }
1278
cancel_adv_timeout(struct hci_dev * hdev)1279 static void cancel_adv_timeout(struct hci_dev *hdev)
1280 {
1281 if (hdev->adv_instance_timeout) {
1282 hdev->adv_instance_timeout = 0;
1283 cancel_delayed_work(&hdev->adv_instance_expire);
1284 }
1285 }
1286
1287 /* This function requires the caller holds hdev->lock */
restart_le_actions(struct hci_dev * hdev)1288 static void restart_le_actions(struct hci_dev *hdev)
1289 {
1290 struct hci_conn_params *p;
1291
1292 list_for_each_entry(p, &hdev->le_conn_params, list) {
1293 /* Needed for AUTO_OFF case where might not "really"
1294 * have been powered off.
1295 */
1296 hci_pend_le_list_del_init(p);
1297
1298 switch (p->auto_connect) {
1299 case HCI_AUTO_CONN_DIRECT:
1300 case HCI_AUTO_CONN_ALWAYS:
1301 hci_pend_le_list_add(p, &hdev->pend_le_conns);
1302 break;
1303 case HCI_AUTO_CONN_REPORT:
1304 hci_pend_le_list_add(p, &hdev->pend_le_reports);
1305 break;
1306 default:
1307 break;
1308 }
1309 }
1310 }
1311
new_settings(struct hci_dev * hdev,struct sock * skip)1312 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1313 {
1314 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1315
1316 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1317 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1318 }
1319
mgmt_set_powered_complete(struct hci_dev * hdev,void * data,int err)1320 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1321 {
1322 struct mgmt_pending_cmd *cmd = data;
1323 struct mgmt_mode *cp;
1324
1325 /* Make sure cmd still outstanding. */
1326 if (err == -ECANCELED ||
1327 cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1328 return;
1329
1330 cp = cmd->param;
1331
1332 bt_dev_dbg(hdev, "err %d", err);
1333
1334 if (!err) {
1335 if (cp->val) {
1336 hci_dev_lock(hdev);
1337 restart_le_actions(hdev);
1338 hci_update_passive_scan(hdev);
1339 hci_dev_unlock(hdev);
1340 }
1341
1342 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1343
1344 /* Only call new_setting for power on as power off is deferred
1345 * to hdev->power_off work which does call hci_dev_do_close.
1346 */
1347 if (cp->val)
1348 new_settings(hdev, cmd->sk);
1349 } else {
1350 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1351 mgmt_status(err));
1352 }
1353
1354 mgmt_pending_remove(cmd);
1355 }
1356
set_powered_sync(struct hci_dev * hdev,void * data)1357 static int set_powered_sync(struct hci_dev *hdev, void *data)
1358 {
1359 struct mgmt_pending_cmd *cmd = data;
1360 struct mgmt_mode *cp;
1361
1362 /* Make sure cmd still outstanding. */
1363 if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1364 return -ECANCELED;
1365
1366 cp = cmd->param;
1367
1368 BT_DBG("%s", hdev->name);
1369
1370 return hci_set_powered_sync(hdev, cp->val);
1371 }
1372
set_powered(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1373 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1374 u16 len)
1375 {
1376 struct mgmt_mode *cp = data;
1377 struct mgmt_pending_cmd *cmd;
1378 int err;
1379
1380 bt_dev_dbg(hdev, "sock %p", sk);
1381
1382 if (cp->val != 0x00 && cp->val != 0x01)
1383 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1384 MGMT_STATUS_INVALID_PARAMS);
1385
1386 hci_dev_lock(hdev);
1387
1388 if (!cp->val) {
1389 if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN)) {
1390 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1391 MGMT_STATUS_BUSY);
1392 goto failed;
1393 }
1394 }
1395
1396 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1397 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1398 MGMT_STATUS_BUSY);
1399 goto failed;
1400 }
1401
1402 if (!!cp->val == hdev_is_powered(hdev)) {
1403 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1404 goto failed;
1405 }
1406
1407 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1408 if (!cmd) {
1409 err = -ENOMEM;
1410 goto failed;
1411 }
1412
1413 /* Cancel potentially blocking sync operation before power off */
1414 if (cp->val == 0x00) {
1415 hci_cmd_sync_cancel_sync(hdev, -EHOSTDOWN);
1416 err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1417 mgmt_set_powered_complete);
1418 } else {
1419 /* Use hci_cmd_sync_submit since hdev might not be running */
1420 err = hci_cmd_sync_submit(hdev, set_powered_sync, cmd,
1421 mgmt_set_powered_complete);
1422 }
1423
1424 if (err < 0)
1425 mgmt_pending_remove(cmd);
1426
1427 failed:
1428 hci_dev_unlock(hdev);
1429 return err;
1430 }
1431
mgmt_new_settings(struct hci_dev * hdev)1432 int mgmt_new_settings(struct hci_dev *hdev)
1433 {
1434 return new_settings(hdev, NULL);
1435 }
1436
1437 struct cmd_lookup {
1438 struct sock *sk;
1439 struct hci_dev *hdev;
1440 u8 mgmt_status;
1441 };
1442
settings_rsp(struct mgmt_pending_cmd * cmd,void * data)1443 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1444 {
1445 struct cmd_lookup *match = data;
1446
1447 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1448
1449 if (match->sk == NULL) {
1450 match->sk = cmd->sk;
1451 sock_hold(match->sk);
1452 }
1453 }
1454
cmd_status_rsp(struct mgmt_pending_cmd * cmd,void * data)1455 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1456 {
1457 u8 *status = data;
1458
1459 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, *status);
1460 }
1461
cmd_complete_rsp(struct mgmt_pending_cmd * cmd,void * data)1462 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1463 {
1464 struct cmd_lookup *match = data;
1465
1466 /* dequeue cmd_sync entries using cmd as data as that is about to be
1467 * removed/freed.
1468 */
1469 hci_cmd_sync_dequeue(match->hdev, NULL, cmd, NULL);
1470
1471 if (cmd->cmd_complete) {
1472 cmd->cmd_complete(cmd, match->mgmt_status);
1473 return;
1474 }
1475
1476 cmd_status_rsp(cmd, data);
1477 }
1478
generic_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1479 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1480 {
1481 return mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, status,
1482 cmd->param, cmd->param_len);
1483 }
1484
addr_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1485 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1486 {
1487 return mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, status,
1488 cmd->param, sizeof(struct mgmt_addr_info));
1489 }
1490
mgmt_bredr_support(struct hci_dev * hdev)1491 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1492 {
1493 if (!lmp_bredr_capable(hdev))
1494 return MGMT_STATUS_NOT_SUPPORTED;
1495 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1496 return MGMT_STATUS_REJECTED;
1497 else
1498 return MGMT_STATUS_SUCCESS;
1499 }
1500
mgmt_le_support(struct hci_dev * hdev)1501 static u8 mgmt_le_support(struct hci_dev *hdev)
1502 {
1503 if (!lmp_le_capable(hdev))
1504 return MGMT_STATUS_NOT_SUPPORTED;
1505 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1506 return MGMT_STATUS_REJECTED;
1507 else
1508 return MGMT_STATUS_SUCCESS;
1509 }
1510
mgmt_set_discoverable_complete(struct hci_dev * hdev,void * data,int err)1511 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1512 int err)
1513 {
1514 struct mgmt_pending_cmd *cmd = data;
1515
1516 bt_dev_dbg(hdev, "err %d", err);
1517
1518 /* Make sure cmd still outstanding. */
1519 if (err == -ECANCELED ||
1520 cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1521 return;
1522
1523 hci_dev_lock(hdev);
1524
1525 if (err) {
1526 u8 mgmt_err = mgmt_status(err);
1527 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
1528 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1529 goto done;
1530 }
1531
1532 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1533 hdev->discov_timeout > 0) {
1534 int to = secs_to_jiffies(hdev->discov_timeout);
1535 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1536 }
1537
1538 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1539 new_settings(hdev, cmd->sk);
1540
1541 done:
1542 mgmt_pending_remove(cmd);
1543 hci_dev_unlock(hdev);
1544 }
1545
set_discoverable_sync(struct hci_dev * hdev,void * data)1546 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1547 {
1548 BT_DBG("%s", hdev->name);
1549
1550 return hci_update_discoverable_sync(hdev);
1551 }
1552
set_discoverable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1553 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1554 u16 len)
1555 {
1556 struct mgmt_cp_set_discoverable *cp = data;
1557 struct mgmt_pending_cmd *cmd;
1558 u16 timeout;
1559 int err;
1560
1561 bt_dev_dbg(hdev, "sock %p", sk);
1562
1563 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1564 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1565 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1566 MGMT_STATUS_REJECTED);
1567
1568 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1569 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1570 MGMT_STATUS_INVALID_PARAMS);
1571
1572 timeout = __le16_to_cpu(cp->timeout);
1573
1574 /* Disabling discoverable requires that no timeout is set,
1575 * and enabling limited discoverable requires a timeout.
1576 */
1577 if ((cp->val == 0x00 && timeout > 0) ||
1578 (cp->val == 0x02 && timeout == 0))
1579 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1580 MGMT_STATUS_INVALID_PARAMS);
1581
1582 hci_dev_lock(hdev);
1583
1584 if (!hdev_is_powered(hdev) && timeout > 0) {
1585 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1586 MGMT_STATUS_NOT_POWERED);
1587 goto failed;
1588 }
1589
1590 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1591 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1592 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1593 MGMT_STATUS_BUSY);
1594 goto failed;
1595 }
1596
1597 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1598 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1599 MGMT_STATUS_REJECTED);
1600 goto failed;
1601 }
1602
1603 if (hdev->advertising_paused) {
1604 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1605 MGMT_STATUS_BUSY);
1606 goto failed;
1607 }
1608
1609 if (!hdev_is_powered(hdev)) {
1610 bool changed = false;
1611
1612 /* Setting limited discoverable when powered off is
1613 * not a valid operation since it requires a timeout
1614 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1615 */
1616 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1617 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1618 changed = true;
1619 }
1620
1621 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1622 if (err < 0)
1623 goto failed;
1624
1625 if (changed)
1626 err = new_settings(hdev, sk);
1627
1628 goto failed;
1629 }
1630
1631 /* If the current mode is the same, then just update the timeout
1632 * value with the new value. And if only the timeout gets updated,
1633 * then no need for any HCI transactions.
1634 */
1635 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1636 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1637 HCI_LIMITED_DISCOVERABLE)) {
1638 cancel_delayed_work(&hdev->discov_off);
1639 hdev->discov_timeout = timeout;
1640
1641 if (cp->val && hdev->discov_timeout > 0) {
1642 int to = secs_to_jiffies(hdev->discov_timeout);
1643 queue_delayed_work(hdev->req_workqueue,
1644 &hdev->discov_off, to);
1645 }
1646
1647 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1648 goto failed;
1649 }
1650
1651 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1652 if (!cmd) {
1653 err = -ENOMEM;
1654 goto failed;
1655 }
1656
1657 /* Cancel any potential discoverable timeout that might be
1658 * still active and store new timeout value. The arming of
1659 * the timeout happens in the complete handler.
1660 */
1661 cancel_delayed_work(&hdev->discov_off);
1662 hdev->discov_timeout = timeout;
1663
1664 if (cp->val)
1665 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1666 else
1667 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1668
1669 /* Limited discoverable mode */
1670 if (cp->val == 0x02)
1671 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1672 else
1673 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1674
1675 err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1676 mgmt_set_discoverable_complete);
1677
1678 if (err < 0)
1679 mgmt_pending_remove(cmd);
1680
1681 failed:
1682 hci_dev_unlock(hdev);
1683 return err;
1684 }
1685
mgmt_set_connectable_complete(struct hci_dev * hdev,void * data,int err)1686 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1687 int err)
1688 {
1689 struct mgmt_pending_cmd *cmd = data;
1690
1691 bt_dev_dbg(hdev, "err %d", err);
1692
1693 /* Make sure cmd still outstanding. */
1694 if (err == -ECANCELED ||
1695 cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1696 return;
1697
1698 hci_dev_lock(hdev);
1699
1700 if (err) {
1701 u8 mgmt_err = mgmt_status(err);
1702 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
1703 goto done;
1704 }
1705
1706 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1707 new_settings(hdev, cmd->sk);
1708
1709 done:
1710 mgmt_pending_remove(cmd);
1711
1712 hci_dev_unlock(hdev);
1713 }
1714
set_connectable_update_settings(struct hci_dev * hdev,struct sock * sk,u8 val)1715 static int set_connectable_update_settings(struct hci_dev *hdev,
1716 struct sock *sk, u8 val)
1717 {
1718 bool changed = false;
1719 int err;
1720
1721 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1722 changed = true;
1723
1724 if (val) {
1725 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1726 } else {
1727 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1728 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1729 }
1730
1731 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1732 if (err < 0)
1733 return err;
1734
1735 if (changed) {
1736 hci_update_scan(hdev);
1737 hci_update_passive_scan(hdev);
1738 return new_settings(hdev, sk);
1739 }
1740
1741 return 0;
1742 }
1743
set_connectable_sync(struct hci_dev * hdev,void * data)1744 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1745 {
1746 BT_DBG("%s", hdev->name);
1747
1748 return hci_update_connectable_sync(hdev);
1749 }
1750
set_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1751 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1752 u16 len)
1753 {
1754 struct mgmt_mode *cp = data;
1755 struct mgmt_pending_cmd *cmd;
1756 int err;
1757
1758 bt_dev_dbg(hdev, "sock %p", sk);
1759
1760 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1761 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1762 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1763 MGMT_STATUS_REJECTED);
1764
1765 if (cp->val != 0x00 && cp->val != 0x01)
1766 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1767 MGMT_STATUS_INVALID_PARAMS);
1768
1769 hci_dev_lock(hdev);
1770
1771 if (!hdev_is_powered(hdev)) {
1772 err = set_connectable_update_settings(hdev, sk, cp->val);
1773 goto failed;
1774 }
1775
1776 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1777 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1778 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1779 MGMT_STATUS_BUSY);
1780 goto failed;
1781 }
1782
1783 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1784 if (!cmd) {
1785 err = -ENOMEM;
1786 goto failed;
1787 }
1788
1789 if (cp->val) {
1790 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1791 } else {
1792 if (hdev->discov_timeout > 0)
1793 cancel_delayed_work(&hdev->discov_off);
1794
1795 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1796 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1797 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1798 }
1799
1800 err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1801 mgmt_set_connectable_complete);
1802
1803 if (err < 0)
1804 mgmt_pending_remove(cmd);
1805
1806 failed:
1807 hci_dev_unlock(hdev);
1808 return err;
1809 }
1810
set_bondable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1811 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1812 u16 len)
1813 {
1814 struct mgmt_mode *cp = data;
1815 bool changed;
1816 int err;
1817
1818 bt_dev_dbg(hdev, "sock %p", sk);
1819
1820 if (cp->val != 0x00 && cp->val != 0x01)
1821 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1822 MGMT_STATUS_INVALID_PARAMS);
1823
1824 hci_dev_lock(hdev);
1825
1826 if (cp->val)
1827 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1828 else
1829 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1830
1831 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1832 if (err < 0)
1833 goto unlock;
1834
1835 if (changed) {
1836 /* In limited privacy mode the change of bondable mode
1837 * may affect the local advertising address.
1838 */
1839 hci_update_discoverable(hdev);
1840
1841 err = new_settings(hdev, sk);
1842 }
1843
1844 unlock:
1845 hci_dev_unlock(hdev);
1846 return err;
1847 }
1848
set_link_security(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1849 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1850 u16 len)
1851 {
1852 struct mgmt_mode *cp = data;
1853 struct mgmt_pending_cmd *cmd;
1854 u8 val, status;
1855 int err;
1856
1857 bt_dev_dbg(hdev, "sock %p", sk);
1858
1859 status = mgmt_bredr_support(hdev);
1860 if (status)
1861 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1862 status);
1863
1864 if (cp->val != 0x00 && cp->val != 0x01)
1865 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1866 MGMT_STATUS_INVALID_PARAMS);
1867
1868 hci_dev_lock(hdev);
1869
1870 if (!hdev_is_powered(hdev)) {
1871 bool changed = false;
1872
1873 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1874 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1875 changed = true;
1876 }
1877
1878 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1879 if (err < 0)
1880 goto failed;
1881
1882 if (changed)
1883 err = new_settings(hdev, sk);
1884
1885 goto failed;
1886 }
1887
1888 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1889 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1890 MGMT_STATUS_BUSY);
1891 goto failed;
1892 }
1893
1894 val = !!cp->val;
1895
1896 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1897 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1898 goto failed;
1899 }
1900
1901 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1902 if (!cmd) {
1903 err = -ENOMEM;
1904 goto failed;
1905 }
1906
1907 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1908 if (err < 0) {
1909 mgmt_pending_remove(cmd);
1910 goto failed;
1911 }
1912
1913 failed:
1914 hci_dev_unlock(hdev);
1915 return err;
1916 }
1917
set_ssp_complete(struct hci_dev * hdev,void * data,int err)1918 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1919 {
1920 struct cmd_lookup match = { NULL, hdev };
1921 struct mgmt_pending_cmd *cmd = data;
1922 struct mgmt_mode *cp = cmd->param;
1923 u8 enable = cp->val;
1924 bool changed;
1925
1926 /* Make sure cmd still outstanding. */
1927 if (err == -ECANCELED || cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1928 return;
1929
1930 if (err) {
1931 u8 mgmt_err = mgmt_status(err);
1932
1933 if (enable && hci_dev_test_and_clear_flag(hdev,
1934 HCI_SSP_ENABLED)) {
1935 new_settings(hdev, NULL);
1936 }
1937
1938 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, true,
1939 cmd_status_rsp, &mgmt_err);
1940 return;
1941 }
1942
1943 if (enable) {
1944 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1945 } else {
1946 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1947 }
1948
1949 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, true, settings_rsp, &match);
1950
1951 if (changed)
1952 new_settings(hdev, match.sk);
1953
1954 if (match.sk)
1955 sock_put(match.sk);
1956
1957 hci_update_eir_sync(hdev);
1958 }
1959
set_ssp_sync(struct hci_dev * hdev,void * data)1960 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1961 {
1962 struct mgmt_pending_cmd *cmd = data;
1963 struct mgmt_mode *cp = cmd->param;
1964 bool changed = false;
1965 int err;
1966
1967 if (cp->val)
1968 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1969
1970 err = hci_write_ssp_mode_sync(hdev, cp->val);
1971
1972 if (!err && changed)
1973 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1974
1975 return err;
1976 }
1977
set_ssp(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1978 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1979 {
1980 struct mgmt_mode *cp = data;
1981 struct mgmt_pending_cmd *cmd;
1982 u8 status;
1983 int err;
1984
1985 bt_dev_dbg(hdev, "sock %p", sk);
1986
1987 status = mgmt_bredr_support(hdev);
1988 if (status)
1989 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1990
1991 if (!lmp_ssp_capable(hdev))
1992 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1993 MGMT_STATUS_NOT_SUPPORTED);
1994
1995 if (cp->val != 0x00 && cp->val != 0x01)
1996 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1997 MGMT_STATUS_INVALID_PARAMS);
1998
1999 hci_dev_lock(hdev);
2000
2001 if (!hdev_is_powered(hdev)) {
2002 bool changed;
2003
2004 if (cp->val) {
2005 changed = !hci_dev_test_and_set_flag(hdev,
2006 HCI_SSP_ENABLED);
2007 } else {
2008 changed = hci_dev_test_and_clear_flag(hdev,
2009 HCI_SSP_ENABLED);
2010 }
2011
2012 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2013 if (err < 0)
2014 goto failed;
2015
2016 if (changed)
2017 err = new_settings(hdev, sk);
2018
2019 goto failed;
2020 }
2021
2022 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2023 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2024 MGMT_STATUS_BUSY);
2025 goto failed;
2026 }
2027
2028 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2029 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2030 goto failed;
2031 }
2032
2033 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2034 if (!cmd)
2035 err = -ENOMEM;
2036 else
2037 err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2038 set_ssp_complete);
2039
2040 if (err < 0) {
2041 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2042 MGMT_STATUS_FAILED);
2043
2044 if (cmd)
2045 mgmt_pending_remove(cmd);
2046 }
2047
2048 failed:
2049 hci_dev_unlock(hdev);
2050 return err;
2051 }
2052
set_hs(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2053 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2054 {
2055 bt_dev_dbg(hdev, "sock %p", sk);
2056
2057 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2058 MGMT_STATUS_NOT_SUPPORTED);
2059 }
2060
set_le_complete(struct hci_dev * hdev,void * data,int err)2061 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2062 {
2063 struct cmd_lookup match = { NULL, hdev };
2064 u8 status = mgmt_status(err);
2065
2066 bt_dev_dbg(hdev, "err %d", err);
2067
2068 if (status) {
2069 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, true, cmd_status_rsp,
2070 &status);
2071 return;
2072 }
2073
2074 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, true, settings_rsp, &match);
2075
2076 new_settings(hdev, match.sk);
2077
2078 if (match.sk)
2079 sock_put(match.sk);
2080 }
2081
set_le_sync(struct hci_dev * hdev,void * data)2082 static int set_le_sync(struct hci_dev *hdev, void *data)
2083 {
2084 struct mgmt_pending_cmd *cmd = data;
2085 struct mgmt_mode *cp = cmd->param;
2086 u8 val = !!cp->val;
2087 int err;
2088
2089 if (!val) {
2090 hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2091
2092 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2093 hci_disable_advertising_sync(hdev);
2094
2095 if (ext_adv_capable(hdev))
2096 hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2097 } else {
2098 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2099 }
2100
2101 err = hci_write_le_host_supported_sync(hdev, val, 0);
2102
2103 /* Make sure the controller has a good default for
2104 * advertising data. Restrict the update to when LE
2105 * has actually been enabled. During power on, the
2106 * update in powered_update_hci will take care of it.
2107 */
2108 if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2109 if (ext_adv_capable(hdev)) {
2110 int status;
2111
2112 status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2113 if (!status)
2114 hci_update_scan_rsp_data_sync(hdev, 0x00);
2115 } else {
2116 hci_update_adv_data_sync(hdev, 0x00);
2117 hci_update_scan_rsp_data_sync(hdev, 0x00);
2118 }
2119
2120 hci_update_passive_scan(hdev);
2121 }
2122
2123 return err;
2124 }
2125
set_mesh_complete(struct hci_dev * hdev,void * data,int err)2126 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2127 {
2128 struct mgmt_pending_cmd *cmd = data;
2129 u8 status = mgmt_status(err);
2130 struct sock *sk = cmd->sk;
2131
2132 if (status) {
2133 mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev, true,
2134 cmd_status_rsp, &status);
2135 return;
2136 }
2137
2138 mgmt_pending_remove(cmd);
2139 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2140 }
2141
set_mesh_sync(struct hci_dev * hdev,void * data)2142 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2143 {
2144 struct mgmt_pending_cmd *cmd = data;
2145 struct mgmt_cp_set_mesh *cp = cmd->param;
2146 size_t len = cmd->param_len;
2147
2148 memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2149
2150 if (cp->enable)
2151 hci_dev_set_flag(hdev, HCI_MESH);
2152 else
2153 hci_dev_clear_flag(hdev, HCI_MESH);
2154
2155 hdev->le_scan_interval = __le16_to_cpu(cp->period);
2156 hdev->le_scan_window = __le16_to_cpu(cp->window);
2157
2158 len -= sizeof(*cp);
2159
2160 /* If filters don't fit, forward all adv pkts */
2161 if (len <= sizeof(hdev->mesh_ad_types))
2162 memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2163
2164 hci_update_passive_scan_sync(hdev);
2165 return 0;
2166 }
2167
set_mesh(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2168 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2169 {
2170 struct mgmt_cp_set_mesh *cp = data;
2171 struct mgmt_pending_cmd *cmd;
2172 __u16 period, window;
2173 int err = 0;
2174
2175 bt_dev_dbg(hdev, "sock %p", sk);
2176
2177 if (!lmp_le_capable(hdev) ||
2178 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2179 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2180 MGMT_STATUS_NOT_SUPPORTED);
2181
2182 if (cp->enable != 0x00 && cp->enable != 0x01)
2183 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2184 MGMT_STATUS_INVALID_PARAMS);
2185
2186 /* Keep allowed ranges in sync with set_scan_params() */
2187 period = __le16_to_cpu(cp->period);
2188
2189 if (period < 0x0004 || period > 0x4000)
2190 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2191 MGMT_STATUS_INVALID_PARAMS);
2192
2193 window = __le16_to_cpu(cp->window);
2194
2195 if (window < 0x0004 || window > 0x4000)
2196 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2197 MGMT_STATUS_INVALID_PARAMS);
2198
2199 if (window > period)
2200 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2201 MGMT_STATUS_INVALID_PARAMS);
2202
2203 hci_dev_lock(hdev);
2204
2205 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2206 if (!cmd)
2207 err = -ENOMEM;
2208 else
2209 err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2210 set_mesh_complete);
2211
2212 if (err < 0) {
2213 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2214 MGMT_STATUS_FAILED);
2215
2216 if (cmd)
2217 mgmt_pending_remove(cmd);
2218 }
2219
2220 hci_dev_unlock(hdev);
2221 return err;
2222 }
2223
mesh_send_start_complete(struct hci_dev * hdev,void * data,int err)2224 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2225 {
2226 struct mgmt_mesh_tx *mesh_tx = data;
2227 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2228 unsigned long mesh_send_interval;
2229 u8 mgmt_err = mgmt_status(err);
2230
2231 /* Report any errors here, but don't report completion */
2232
2233 if (mgmt_err) {
2234 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2235 /* Send Complete Error Code for handle */
2236 mesh_send_complete(hdev, mesh_tx, false);
2237 return;
2238 }
2239
2240 mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2241 queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2242 mesh_send_interval);
2243 }
2244
mesh_send_sync(struct hci_dev * hdev,void * data)2245 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2246 {
2247 struct mgmt_mesh_tx *mesh_tx = data;
2248 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2249 struct adv_info *adv, *next_instance;
2250 u8 instance = hdev->le_num_of_adv_sets + 1;
2251 u16 timeout, duration;
2252 int err = 0;
2253
2254 if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2255 return MGMT_STATUS_BUSY;
2256
2257 timeout = 1000;
2258 duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2259 adv = hci_add_adv_instance(hdev, instance, 0,
2260 send->adv_data_len, send->adv_data,
2261 0, NULL,
2262 timeout, duration,
2263 HCI_ADV_TX_POWER_NO_PREFERENCE,
2264 hdev->le_adv_min_interval,
2265 hdev->le_adv_max_interval,
2266 mesh_tx->handle);
2267
2268 if (!IS_ERR(adv))
2269 mesh_tx->instance = instance;
2270 else
2271 err = PTR_ERR(adv);
2272
2273 if (hdev->cur_adv_instance == instance) {
2274 /* If the currently advertised instance is being changed then
2275 * cancel the current advertising and schedule the next
2276 * instance. If there is only one instance then the overridden
2277 * advertising data will be visible right away.
2278 */
2279 cancel_adv_timeout(hdev);
2280
2281 next_instance = hci_get_next_instance(hdev, instance);
2282 if (next_instance)
2283 instance = next_instance->instance;
2284 else
2285 instance = 0;
2286 } else if (hdev->adv_instance_timeout) {
2287 /* Immediately advertise the new instance if no other, or
2288 * let it go naturally from queue if ADV is already happening
2289 */
2290 instance = 0;
2291 }
2292
2293 if (instance)
2294 return hci_schedule_adv_instance_sync(hdev, instance, true);
2295
2296 return err;
2297 }
2298
send_count(struct mgmt_mesh_tx * mesh_tx,void * data)2299 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2300 {
2301 struct mgmt_rp_mesh_read_features *rp = data;
2302
2303 if (rp->used_handles >= rp->max_handles)
2304 return;
2305
2306 rp->handles[rp->used_handles++] = mesh_tx->handle;
2307 }
2308
mesh_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2309 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2310 void *data, u16 len)
2311 {
2312 struct mgmt_rp_mesh_read_features rp;
2313
2314 if (!lmp_le_capable(hdev) ||
2315 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2316 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2317 MGMT_STATUS_NOT_SUPPORTED);
2318
2319 memset(&rp, 0, sizeof(rp));
2320 rp.index = cpu_to_le16(hdev->id);
2321 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2322 rp.max_handles = MESH_HANDLES_MAX;
2323
2324 hci_dev_lock(hdev);
2325
2326 if (rp.max_handles)
2327 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2328
2329 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2330 rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2331
2332 hci_dev_unlock(hdev);
2333 return 0;
2334 }
2335
send_cancel(struct hci_dev * hdev,void * data)2336 static int send_cancel(struct hci_dev *hdev, void *data)
2337 {
2338 struct mgmt_pending_cmd *cmd = data;
2339 struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2340 struct mgmt_mesh_tx *mesh_tx;
2341
2342 if (!cancel->handle) {
2343 do {
2344 mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2345
2346 if (mesh_tx)
2347 mesh_send_complete(hdev, mesh_tx, false);
2348 } while (mesh_tx);
2349 } else {
2350 mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2351
2352 if (mesh_tx && mesh_tx->sk == cmd->sk)
2353 mesh_send_complete(hdev, mesh_tx, false);
2354 }
2355
2356 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2357 0, NULL, 0);
2358 mgmt_pending_free(cmd);
2359
2360 return 0;
2361 }
2362
mesh_send_cancel(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2363 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2364 void *data, u16 len)
2365 {
2366 struct mgmt_pending_cmd *cmd;
2367 int err;
2368
2369 if (!lmp_le_capable(hdev) ||
2370 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2371 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2372 MGMT_STATUS_NOT_SUPPORTED);
2373
2374 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2375 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2376 MGMT_STATUS_REJECTED);
2377
2378 hci_dev_lock(hdev);
2379 cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2380 if (!cmd)
2381 err = -ENOMEM;
2382 else
2383 err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2384
2385 if (err < 0) {
2386 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2387 MGMT_STATUS_FAILED);
2388
2389 if (cmd)
2390 mgmt_pending_free(cmd);
2391 }
2392
2393 hci_dev_unlock(hdev);
2394 return err;
2395 }
2396
mesh_send(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2397 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2398 {
2399 struct mgmt_mesh_tx *mesh_tx;
2400 struct mgmt_cp_mesh_send *send = data;
2401 struct mgmt_rp_mesh_read_features rp;
2402 bool sending;
2403 int err = 0;
2404
2405 if (!lmp_le_capable(hdev) ||
2406 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2407 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2408 MGMT_STATUS_NOT_SUPPORTED);
2409 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2410 len <= MGMT_MESH_SEND_SIZE ||
2411 len > (MGMT_MESH_SEND_SIZE + 31))
2412 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2413 MGMT_STATUS_REJECTED);
2414
2415 hci_dev_lock(hdev);
2416
2417 memset(&rp, 0, sizeof(rp));
2418 rp.max_handles = MESH_HANDLES_MAX;
2419
2420 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2421
2422 if (rp.max_handles <= rp.used_handles) {
2423 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2424 MGMT_STATUS_BUSY);
2425 goto done;
2426 }
2427
2428 sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2429 mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2430
2431 if (!mesh_tx)
2432 err = -ENOMEM;
2433 else if (!sending)
2434 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2435 mesh_send_start_complete);
2436
2437 if (err < 0) {
2438 bt_dev_err(hdev, "Send Mesh Failed %d", err);
2439 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2440 MGMT_STATUS_FAILED);
2441
2442 if (mesh_tx) {
2443 if (sending)
2444 mgmt_mesh_remove(mesh_tx);
2445 }
2446 } else {
2447 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2448
2449 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2450 &mesh_tx->handle, 1);
2451 }
2452
2453 done:
2454 hci_dev_unlock(hdev);
2455 return err;
2456 }
2457
set_le(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2458 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2459 {
2460 struct mgmt_mode *cp = data;
2461 struct mgmt_pending_cmd *cmd;
2462 int err;
2463 u8 val, enabled;
2464
2465 bt_dev_dbg(hdev, "sock %p", sk);
2466
2467 if (!lmp_le_capable(hdev))
2468 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2469 MGMT_STATUS_NOT_SUPPORTED);
2470
2471 if (cp->val != 0x00 && cp->val != 0x01)
2472 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2473 MGMT_STATUS_INVALID_PARAMS);
2474
2475 /* Bluetooth single mode LE only controllers or dual-mode
2476 * controllers configured as LE only devices, do not allow
2477 * switching LE off. These have either LE enabled explicitly
2478 * or BR/EDR has been previously switched off.
2479 *
2480 * When trying to enable an already enabled LE, then gracefully
2481 * send a positive response. Trying to disable it however will
2482 * result into rejection.
2483 */
2484 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2485 if (cp->val == 0x01)
2486 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2487
2488 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2489 MGMT_STATUS_REJECTED);
2490 }
2491
2492 hci_dev_lock(hdev);
2493
2494 val = !!cp->val;
2495 enabled = lmp_host_le_capable(hdev);
2496
2497 if (!hdev_is_powered(hdev) || val == enabled) {
2498 bool changed = false;
2499
2500 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2501 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2502 changed = true;
2503 }
2504
2505 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2506 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2507 changed = true;
2508 }
2509
2510 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2511 if (err < 0)
2512 goto unlock;
2513
2514 if (changed)
2515 err = new_settings(hdev, sk);
2516
2517 goto unlock;
2518 }
2519
2520 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2521 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2522 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2523 MGMT_STATUS_BUSY);
2524 goto unlock;
2525 }
2526
2527 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2528 if (!cmd)
2529 err = -ENOMEM;
2530 else
2531 err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2532 set_le_complete);
2533
2534 if (err < 0) {
2535 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2536 MGMT_STATUS_FAILED);
2537
2538 if (cmd)
2539 mgmt_pending_remove(cmd);
2540 }
2541
2542 unlock:
2543 hci_dev_unlock(hdev);
2544 return err;
2545 }
2546
send_hci_cmd_sync(struct hci_dev * hdev,void * data)2547 static int send_hci_cmd_sync(struct hci_dev *hdev, void *data)
2548 {
2549 struct mgmt_pending_cmd *cmd = data;
2550 struct mgmt_cp_hci_cmd_sync *cp = cmd->param;
2551 struct sk_buff *skb;
2552
2553 skb = __hci_cmd_sync_ev(hdev, le16_to_cpu(cp->opcode),
2554 le16_to_cpu(cp->params_len), cp->params,
2555 cp->event, cp->timeout ?
2556 secs_to_jiffies(cp->timeout) :
2557 HCI_CMD_TIMEOUT);
2558 if (IS_ERR(skb)) {
2559 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
2560 mgmt_status(PTR_ERR(skb)));
2561 goto done;
2562 }
2563
2564 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_HCI_CMD_SYNC, 0,
2565 skb->data, skb->len);
2566
2567 kfree_skb(skb);
2568
2569 done:
2570 mgmt_pending_free(cmd);
2571
2572 return 0;
2573 }
2574
mgmt_hci_cmd_sync(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2575 static int mgmt_hci_cmd_sync(struct sock *sk, struct hci_dev *hdev,
2576 void *data, u16 len)
2577 {
2578 struct mgmt_cp_hci_cmd_sync *cp = data;
2579 struct mgmt_pending_cmd *cmd;
2580 int err;
2581
2582 if (len != (offsetof(struct mgmt_cp_hci_cmd_sync, params) +
2583 le16_to_cpu(cp->params_len)))
2584 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
2585 MGMT_STATUS_INVALID_PARAMS);
2586
2587 hci_dev_lock(hdev);
2588 cmd = mgmt_pending_new(sk, MGMT_OP_HCI_CMD_SYNC, hdev, data, len);
2589 if (!cmd)
2590 err = -ENOMEM;
2591 else
2592 err = hci_cmd_sync_queue(hdev, send_hci_cmd_sync, cmd, NULL);
2593
2594 if (err < 0) {
2595 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
2596 MGMT_STATUS_FAILED);
2597
2598 if (cmd)
2599 mgmt_pending_free(cmd);
2600 }
2601
2602 hci_dev_unlock(hdev);
2603 return err;
2604 }
2605
2606 /* This is a helper function to test for pending mgmt commands that can
2607 * cause CoD or EIR HCI commands. We can only allow one such pending
2608 * mgmt command at a time since otherwise we cannot easily track what
2609 * the current values are, will be, and based on that calculate if a new
2610 * HCI command needs to be sent and if yes with what value.
2611 */
pending_eir_or_class(struct hci_dev * hdev)2612 static bool pending_eir_or_class(struct hci_dev *hdev)
2613 {
2614 struct mgmt_pending_cmd *cmd;
2615
2616 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2617 switch (cmd->opcode) {
2618 case MGMT_OP_ADD_UUID:
2619 case MGMT_OP_REMOVE_UUID:
2620 case MGMT_OP_SET_DEV_CLASS:
2621 case MGMT_OP_SET_POWERED:
2622 return true;
2623 }
2624 }
2625
2626 return false;
2627 }
2628
2629 static const u8 bluetooth_base_uuid[] = {
2630 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2631 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2632 };
2633
get_uuid_size(const u8 * uuid)2634 static u8 get_uuid_size(const u8 *uuid)
2635 {
2636 u32 val;
2637
2638 if (memcmp(uuid, bluetooth_base_uuid, 12))
2639 return 128;
2640
2641 val = get_unaligned_le32(&uuid[12]);
2642 if (val > 0xffff)
2643 return 32;
2644
2645 return 16;
2646 }
2647
mgmt_class_complete(struct hci_dev * hdev,void * data,int err)2648 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2649 {
2650 struct mgmt_pending_cmd *cmd = data;
2651
2652 bt_dev_dbg(hdev, "err %d", err);
2653
2654 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
2655 mgmt_status(err), hdev->dev_class, 3);
2656
2657 mgmt_pending_free(cmd);
2658 }
2659
add_uuid_sync(struct hci_dev * hdev,void * data)2660 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2661 {
2662 int err;
2663
2664 err = hci_update_class_sync(hdev);
2665 if (err)
2666 return err;
2667
2668 return hci_update_eir_sync(hdev);
2669 }
2670
add_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2671 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2672 {
2673 struct mgmt_cp_add_uuid *cp = data;
2674 struct mgmt_pending_cmd *cmd;
2675 struct bt_uuid *uuid;
2676 int err;
2677
2678 bt_dev_dbg(hdev, "sock %p", sk);
2679
2680 hci_dev_lock(hdev);
2681
2682 if (pending_eir_or_class(hdev)) {
2683 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2684 MGMT_STATUS_BUSY);
2685 goto failed;
2686 }
2687
2688 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2689 if (!uuid) {
2690 err = -ENOMEM;
2691 goto failed;
2692 }
2693
2694 memcpy(uuid->uuid, cp->uuid, 16);
2695 uuid->svc_hint = cp->svc_hint;
2696 uuid->size = get_uuid_size(cp->uuid);
2697
2698 list_add_tail(&uuid->list, &hdev->uuids);
2699
2700 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2701 if (!cmd) {
2702 err = -ENOMEM;
2703 goto failed;
2704 }
2705
2706 /* MGMT_OP_ADD_UUID don't require adapter the UP/Running so use
2707 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2708 */
2709 err = hci_cmd_sync_submit(hdev, add_uuid_sync, cmd,
2710 mgmt_class_complete);
2711 if (err < 0) {
2712 mgmt_pending_free(cmd);
2713 goto failed;
2714 }
2715
2716 failed:
2717 hci_dev_unlock(hdev);
2718 return err;
2719 }
2720
enable_service_cache(struct hci_dev * hdev)2721 static bool enable_service_cache(struct hci_dev *hdev)
2722 {
2723 if (!hdev_is_powered(hdev))
2724 return false;
2725
2726 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2727 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2728 CACHE_TIMEOUT);
2729 return true;
2730 }
2731
2732 return false;
2733 }
2734
remove_uuid_sync(struct hci_dev * hdev,void * data)2735 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2736 {
2737 int err;
2738
2739 err = hci_update_class_sync(hdev);
2740 if (err)
2741 return err;
2742
2743 return hci_update_eir_sync(hdev);
2744 }
2745
remove_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2746 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2747 u16 len)
2748 {
2749 struct mgmt_cp_remove_uuid *cp = data;
2750 struct mgmt_pending_cmd *cmd;
2751 struct bt_uuid *match, *tmp;
2752 static const u8 bt_uuid_any[] = {
2753 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2754 };
2755 int err, found;
2756
2757 bt_dev_dbg(hdev, "sock %p", sk);
2758
2759 hci_dev_lock(hdev);
2760
2761 if (pending_eir_or_class(hdev)) {
2762 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2763 MGMT_STATUS_BUSY);
2764 goto unlock;
2765 }
2766
2767 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2768 hci_uuids_clear(hdev);
2769
2770 if (enable_service_cache(hdev)) {
2771 err = mgmt_cmd_complete(sk, hdev->id,
2772 MGMT_OP_REMOVE_UUID,
2773 0, hdev->dev_class, 3);
2774 goto unlock;
2775 }
2776
2777 goto update_class;
2778 }
2779
2780 found = 0;
2781
2782 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2783 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2784 continue;
2785
2786 list_del(&match->list);
2787 kfree(match);
2788 found++;
2789 }
2790
2791 if (found == 0) {
2792 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2793 MGMT_STATUS_INVALID_PARAMS);
2794 goto unlock;
2795 }
2796
2797 update_class:
2798 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2799 if (!cmd) {
2800 err = -ENOMEM;
2801 goto unlock;
2802 }
2803
2804 /* MGMT_OP_REMOVE_UUID don't require adapter the UP/Running so use
2805 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2806 */
2807 err = hci_cmd_sync_submit(hdev, remove_uuid_sync, cmd,
2808 mgmt_class_complete);
2809 if (err < 0)
2810 mgmt_pending_free(cmd);
2811
2812 unlock:
2813 hci_dev_unlock(hdev);
2814 return err;
2815 }
2816
set_class_sync(struct hci_dev * hdev,void * data)2817 static int set_class_sync(struct hci_dev *hdev, void *data)
2818 {
2819 int err = 0;
2820
2821 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2822 cancel_delayed_work_sync(&hdev->service_cache);
2823 err = hci_update_eir_sync(hdev);
2824 }
2825
2826 if (err)
2827 return err;
2828
2829 return hci_update_class_sync(hdev);
2830 }
2831
set_dev_class(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2832 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2833 u16 len)
2834 {
2835 struct mgmt_cp_set_dev_class *cp = data;
2836 struct mgmt_pending_cmd *cmd;
2837 int err;
2838
2839 bt_dev_dbg(hdev, "sock %p", sk);
2840
2841 if (!lmp_bredr_capable(hdev))
2842 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2843 MGMT_STATUS_NOT_SUPPORTED);
2844
2845 hci_dev_lock(hdev);
2846
2847 if (pending_eir_or_class(hdev)) {
2848 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2849 MGMT_STATUS_BUSY);
2850 goto unlock;
2851 }
2852
2853 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2854 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2855 MGMT_STATUS_INVALID_PARAMS);
2856 goto unlock;
2857 }
2858
2859 hdev->major_class = cp->major;
2860 hdev->minor_class = cp->minor;
2861
2862 if (!hdev_is_powered(hdev)) {
2863 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2864 hdev->dev_class, 3);
2865 goto unlock;
2866 }
2867
2868 cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2869 if (!cmd) {
2870 err = -ENOMEM;
2871 goto unlock;
2872 }
2873
2874 /* MGMT_OP_SET_DEV_CLASS don't require adapter the UP/Running so use
2875 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2876 */
2877 err = hci_cmd_sync_submit(hdev, set_class_sync, cmd,
2878 mgmt_class_complete);
2879 if (err < 0)
2880 mgmt_pending_free(cmd);
2881
2882 unlock:
2883 hci_dev_unlock(hdev);
2884 return err;
2885 }
2886
load_link_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2887 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2888 u16 len)
2889 {
2890 struct mgmt_cp_load_link_keys *cp = data;
2891 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2892 sizeof(struct mgmt_link_key_info));
2893 u16 key_count, expected_len;
2894 bool changed;
2895 int i;
2896
2897 bt_dev_dbg(hdev, "sock %p", sk);
2898
2899 if (!lmp_bredr_capable(hdev))
2900 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2901 MGMT_STATUS_NOT_SUPPORTED);
2902
2903 key_count = __le16_to_cpu(cp->key_count);
2904 if (key_count > max_key_count) {
2905 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2906 key_count);
2907 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2908 MGMT_STATUS_INVALID_PARAMS);
2909 }
2910
2911 expected_len = struct_size(cp, keys, key_count);
2912 if (expected_len != len) {
2913 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2914 expected_len, len);
2915 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2916 MGMT_STATUS_INVALID_PARAMS);
2917 }
2918
2919 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2920 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2921 MGMT_STATUS_INVALID_PARAMS);
2922
2923 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2924 key_count);
2925
2926 hci_dev_lock(hdev);
2927
2928 hci_link_keys_clear(hdev);
2929
2930 if (cp->debug_keys)
2931 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2932 else
2933 changed = hci_dev_test_and_clear_flag(hdev,
2934 HCI_KEEP_DEBUG_KEYS);
2935
2936 if (changed)
2937 new_settings(hdev, NULL);
2938
2939 for (i = 0; i < key_count; i++) {
2940 struct mgmt_link_key_info *key = &cp->keys[i];
2941
2942 if (hci_is_blocked_key(hdev,
2943 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2944 key->val)) {
2945 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2946 &key->addr.bdaddr);
2947 continue;
2948 }
2949
2950 if (key->addr.type != BDADDR_BREDR) {
2951 bt_dev_warn(hdev,
2952 "Invalid link address type %u for %pMR",
2953 key->addr.type, &key->addr.bdaddr);
2954 continue;
2955 }
2956
2957 if (key->type > 0x08) {
2958 bt_dev_warn(hdev, "Invalid link key type %u for %pMR",
2959 key->type, &key->addr.bdaddr);
2960 continue;
2961 }
2962
2963 /* Always ignore debug keys and require a new pairing if
2964 * the user wants to use them.
2965 */
2966 if (key->type == HCI_LK_DEBUG_COMBINATION)
2967 continue;
2968
2969 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2970 key->type, key->pin_len, NULL);
2971 }
2972
2973 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2974
2975 hci_dev_unlock(hdev);
2976
2977 return 0;
2978 }
2979
device_unpaired(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,struct sock * skip_sk)2980 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2981 u8 addr_type, struct sock *skip_sk)
2982 {
2983 struct mgmt_ev_device_unpaired ev;
2984
2985 bacpy(&ev.addr.bdaddr, bdaddr);
2986 ev.addr.type = addr_type;
2987
2988 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2989 skip_sk);
2990 }
2991
unpair_device_complete(struct hci_dev * hdev,void * data,int err)2992 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2993 {
2994 struct mgmt_pending_cmd *cmd = data;
2995 struct mgmt_cp_unpair_device *cp = cmd->param;
2996
2997 if (!err)
2998 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2999
3000 cmd->cmd_complete(cmd, err);
3001 mgmt_pending_free(cmd);
3002 }
3003
unpair_device_sync(struct hci_dev * hdev,void * data)3004 static int unpair_device_sync(struct hci_dev *hdev, void *data)
3005 {
3006 struct mgmt_pending_cmd *cmd = data;
3007 struct mgmt_cp_unpair_device *cp = cmd->param;
3008 struct hci_conn *conn;
3009
3010 if (cp->addr.type == BDADDR_BREDR)
3011 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3012 &cp->addr.bdaddr);
3013 else
3014 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3015 le_addr_type(cp->addr.type));
3016
3017 if (!conn)
3018 return 0;
3019
3020 /* Disregard any possible error since the likes of hci_abort_conn_sync
3021 * will clean up the connection no matter the error.
3022 */
3023 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3024
3025 return 0;
3026 }
3027
unpair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3028 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3029 u16 len)
3030 {
3031 struct mgmt_cp_unpair_device *cp = data;
3032 struct mgmt_rp_unpair_device rp;
3033 struct hci_conn_params *params;
3034 struct mgmt_pending_cmd *cmd;
3035 struct hci_conn *conn;
3036 u8 addr_type;
3037 int err;
3038
3039 memset(&rp, 0, sizeof(rp));
3040 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3041 rp.addr.type = cp->addr.type;
3042
3043 if (!bdaddr_type_is_valid(cp->addr.type))
3044 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3045 MGMT_STATUS_INVALID_PARAMS,
3046 &rp, sizeof(rp));
3047
3048 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
3049 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3050 MGMT_STATUS_INVALID_PARAMS,
3051 &rp, sizeof(rp));
3052
3053 hci_dev_lock(hdev);
3054
3055 if (!hdev_is_powered(hdev)) {
3056 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3057 MGMT_STATUS_NOT_POWERED, &rp,
3058 sizeof(rp));
3059 goto unlock;
3060 }
3061
3062 if (cp->addr.type == BDADDR_BREDR) {
3063 /* If disconnection is requested, then look up the
3064 * connection. If the remote device is connected, it
3065 * will be later used to terminate the link.
3066 *
3067 * Setting it to NULL explicitly will cause no
3068 * termination of the link.
3069 */
3070 if (cp->disconnect)
3071 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3072 &cp->addr.bdaddr);
3073 else
3074 conn = NULL;
3075
3076 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
3077 if (err < 0) {
3078 err = mgmt_cmd_complete(sk, hdev->id,
3079 MGMT_OP_UNPAIR_DEVICE,
3080 MGMT_STATUS_NOT_PAIRED, &rp,
3081 sizeof(rp));
3082 goto unlock;
3083 }
3084
3085 goto done;
3086 }
3087
3088 /* LE address type */
3089 addr_type = le_addr_type(cp->addr.type);
3090
3091 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
3092 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
3093 if (err < 0) {
3094 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3095 MGMT_STATUS_NOT_PAIRED, &rp,
3096 sizeof(rp));
3097 goto unlock;
3098 }
3099
3100 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3101 if (!conn) {
3102 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3103 goto done;
3104 }
3105
3106
3107 /* Defer clearing up the connection parameters until closing to
3108 * give a chance of keeping them if a repairing happens.
3109 */
3110 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3111
3112 /* Disable auto-connection parameters if present */
3113 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3114 if (params) {
3115 if (params->explicit_connect)
3116 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3117 else
3118 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3119 }
3120
3121 /* If disconnection is not requested, then clear the connection
3122 * variable so that the link is not terminated.
3123 */
3124 if (!cp->disconnect)
3125 conn = NULL;
3126
3127 done:
3128 /* If the connection variable is set, then termination of the
3129 * link is requested.
3130 */
3131 if (!conn) {
3132 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3133 &rp, sizeof(rp));
3134 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3135 goto unlock;
3136 }
3137
3138 cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3139 sizeof(*cp));
3140 if (!cmd) {
3141 err = -ENOMEM;
3142 goto unlock;
3143 }
3144
3145 cmd->cmd_complete = addr_cmd_complete;
3146
3147 err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3148 unpair_device_complete);
3149 if (err < 0)
3150 mgmt_pending_free(cmd);
3151
3152 unlock:
3153 hci_dev_unlock(hdev);
3154 return err;
3155 }
3156
disconnect_complete(struct hci_dev * hdev,void * data,int err)3157 static void disconnect_complete(struct hci_dev *hdev, void *data, int err)
3158 {
3159 struct mgmt_pending_cmd *cmd = data;
3160
3161 cmd->cmd_complete(cmd, mgmt_status(err));
3162 mgmt_pending_free(cmd);
3163 }
3164
disconnect_sync(struct hci_dev * hdev,void * data)3165 static int disconnect_sync(struct hci_dev *hdev, void *data)
3166 {
3167 struct mgmt_pending_cmd *cmd = data;
3168 struct mgmt_cp_disconnect *cp = cmd->param;
3169 struct hci_conn *conn;
3170
3171 if (cp->addr.type == BDADDR_BREDR)
3172 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3173 &cp->addr.bdaddr);
3174 else
3175 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3176 le_addr_type(cp->addr.type));
3177
3178 if (!conn)
3179 return -ENOTCONN;
3180
3181 /* Disregard any possible error since the likes of hci_abort_conn_sync
3182 * will clean up the connection no matter the error.
3183 */
3184 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3185
3186 return 0;
3187 }
3188
disconnect(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3189 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3190 u16 len)
3191 {
3192 struct mgmt_cp_disconnect *cp = data;
3193 struct mgmt_rp_disconnect rp;
3194 struct mgmt_pending_cmd *cmd;
3195 int err;
3196
3197 bt_dev_dbg(hdev, "sock %p", sk);
3198
3199 memset(&rp, 0, sizeof(rp));
3200 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3201 rp.addr.type = cp->addr.type;
3202
3203 if (!bdaddr_type_is_valid(cp->addr.type))
3204 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3205 MGMT_STATUS_INVALID_PARAMS,
3206 &rp, sizeof(rp));
3207
3208 hci_dev_lock(hdev);
3209
3210 if (!test_bit(HCI_UP, &hdev->flags)) {
3211 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3212 MGMT_STATUS_NOT_POWERED, &rp,
3213 sizeof(rp));
3214 goto failed;
3215 }
3216
3217 cmd = mgmt_pending_new(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3218 if (!cmd) {
3219 err = -ENOMEM;
3220 goto failed;
3221 }
3222
3223 cmd->cmd_complete = generic_cmd_complete;
3224
3225 err = hci_cmd_sync_queue(hdev, disconnect_sync, cmd,
3226 disconnect_complete);
3227 if (err < 0)
3228 mgmt_pending_free(cmd);
3229
3230 failed:
3231 hci_dev_unlock(hdev);
3232 return err;
3233 }
3234
link_to_bdaddr(u8 link_type,u8 addr_type)3235 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3236 {
3237 switch (link_type) {
3238 case CIS_LINK:
3239 case BIS_LINK:
3240 case PA_LINK:
3241 case LE_LINK:
3242 switch (addr_type) {
3243 case ADDR_LE_DEV_PUBLIC:
3244 return BDADDR_LE_PUBLIC;
3245
3246 default:
3247 /* Fallback to LE Random address type */
3248 return BDADDR_LE_RANDOM;
3249 }
3250
3251 default:
3252 /* Fallback to BR/EDR type */
3253 return BDADDR_BREDR;
3254 }
3255 }
3256
get_connections(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)3257 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3258 u16 data_len)
3259 {
3260 struct mgmt_rp_get_connections *rp;
3261 struct hci_conn *c;
3262 int err;
3263 u16 i;
3264
3265 bt_dev_dbg(hdev, "sock %p", sk);
3266
3267 hci_dev_lock(hdev);
3268
3269 if (!hdev_is_powered(hdev)) {
3270 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3271 MGMT_STATUS_NOT_POWERED);
3272 goto unlock;
3273 }
3274
3275 i = 0;
3276 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3277 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3278 i++;
3279 }
3280
3281 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3282 if (!rp) {
3283 err = -ENOMEM;
3284 goto unlock;
3285 }
3286
3287 i = 0;
3288 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3289 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3290 continue;
3291 bacpy(&rp->addr[i].bdaddr, &c->dst);
3292 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3293 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3294 continue;
3295 i++;
3296 }
3297
3298 rp->conn_count = cpu_to_le16(i);
3299
3300 /* Recalculate length in case of filtered SCO connections, etc */
3301 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3302 struct_size(rp, addr, i));
3303
3304 kfree(rp);
3305
3306 unlock:
3307 hci_dev_unlock(hdev);
3308 return err;
3309 }
3310
send_pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_pin_code_neg_reply * cp)3311 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3312 struct mgmt_cp_pin_code_neg_reply *cp)
3313 {
3314 struct mgmt_pending_cmd *cmd;
3315 int err;
3316
3317 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3318 sizeof(*cp));
3319 if (!cmd)
3320 return -ENOMEM;
3321
3322 cmd->cmd_complete = addr_cmd_complete;
3323
3324 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3325 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3326 if (err < 0)
3327 mgmt_pending_remove(cmd);
3328
3329 return err;
3330 }
3331
pin_code_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3332 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3333 u16 len)
3334 {
3335 struct hci_conn *conn;
3336 struct mgmt_cp_pin_code_reply *cp = data;
3337 struct hci_cp_pin_code_reply reply;
3338 struct mgmt_pending_cmd *cmd;
3339 int err;
3340
3341 bt_dev_dbg(hdev, "sock %p", sk);
3342
3343 hci_dev_lock(hdev);
3344
3345 if (!hdev_is_powered(hdev)) {
3346 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3347 MGMT_STATUS_NOT_POWERED);
3348 goto failed;
3349 }
3350
3351 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3352 if (!conn) {
3353 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3354 MGMT_STATUS_NOT_CONNECTED);
3355 goto failed;
3356 }
3357
3358 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3359 struct mgmt_cp_pin_code_neg_reply ncp;
3360
3361 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3362
3363 bt_dev_err(hdev, "PIN code is not 16 bytes long");
3364
3365 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3366 if (err >= 0)
3367 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3368 MGMT_STATUS_INVALID_PARAMS);
3369
3370 goto failed;
3371 }
3372
3373 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3374 if (!cmd) {
3375 err = -ENOMEM;
3376 goto failed;
3377 }
3378
3379 cmd->cmd_complete = addr_cmd_complete;
3380
3381 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3382 reply.pin_len = cp->pin_len;
3383 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3384
3385 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3386 if (err < 0)
3387 mgmt_pending_remove(cmd);
3388
3389 failed:
3390 hci_dev_unlock(hdev);
3391 return err;
3392 }
3393
set_io_capability(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3394 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3395 u16 len)
3396 {
3397 struct mgmt_cp_set_io_capability *cp = data;
3398
3399 bt_dev_dbg(hdev, "sock %p", sk);
3400
3401 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3402 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3403 MGMT_STATUS_INVALID_PARAMS);
3404
3405 hci_dev_lock(hdev);
3406
3407 hdev->io_capability = cp->io_capability;
3408
3409 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3410
3411 hci_dev_unlock(hdev);
3412
3413 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3414 NULL, 0);
3415 }
3416
find_pairing(struct hci_conn * conn)3417 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3418 {
3419 struct hci_dev *hdev = conn->hdev;
3420 struct mgmt_pending_cmd *cmd;
3421
3422 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3423 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3424 continue;
3425
3426 if (cmd->user_data != conn)
3427 continue;
3428
3429 return cmd;
3430 }
3431
3432 return NULL;
3433 }
3434
pairing_complete(struct mgmt_pending_cmd * cmd,u8 status)3435 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3436 {
3437 struct mgmt_rp_pair_device rp;
3438 struct hci_conn *conn = cmd->user_data;
3439 int err;
3440
3441 bacpy(&rp.addr.bdaddr, &conn->dst);
3442 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3443
3444 err = mgmt_cmd_complete(cmd->sk, cmd->hdev->id, MGMT_OP_PAIR_DEVICE,
3445 status, &rp, sizeof(rp));
3446
3447 /* So we don't get further callbacks for this connection */
3448 conn->connect_cfm_cb = NULL;
3449 conn->security_cfm_cb = NULL;
3450 conn->disconn_cfm_cb = NULL;
3451
3452 hci_conn_drop(conn);
3453
3454 /* The device is paired so there is no need to remove
3455 * its connection parameters anymore.
3456 */
3457 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3458
3459 hci_conn_put(conn);
3460
3461 return err;
3462 }
3463
mgmt_smp_complete(struct hci_conn * conn,bool complete)3464 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3465 {
3466 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3467 struct mgmt_pending_cmd *cmd;
3468
3469 cmd = find_pairing(conn);
3470 if (cmd) {
3471 cmd->cmd_complete(cmd, status);
3472 mgmt_pending_remove(cmd);
3473 }
3474 }
3475
pairing_complete_cb(struct hci_conn * conn,u8 status)3476 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3477 {
3478 struct mgmt_pending_cmd *cmd;
3479
3480 BT_DBG("status %u", status);
3481
3482 cmd = find_pairing(conn);
3483 if (!cmd) {
3484 BT_DBG("Unable to find a pending command");
3485 return;
3486 }
3487
3488 cmd->cmd_complete(cmd, mgmt_status(status));
3489 mgmt_pending_remove(cmd);
3490 }
3491
le_pairing_complete_cb(struct hci_conn * conn,u8 status)3492 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3493 {
3494 struct mgmt_pending_cmd *cmd;
3495
3496 BT_DBG("status %u", status);
3497
3498 if (!status)
3499 return;
3500
3501 cmd = find_pairing(conn);
3502 if (!cmd) {
3503 BT_DBG("Unable to find a pending command");
3504 return;
3505 }
3506
3507 cmd->cmd_complete(cmd, mgmt_status(status));
3508 mgmt_pending_remove(cmd);
3509 }
3510
pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3511 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3512 u16 len)
3513 {
3514 struct mgmt_cp_pair_device *cp = data;
3515 struct mgmt_rp_pair_device rp;
3516 struct mgmt_pending_cmd *cmd;
3517 u8 sec_level, auth_type;
3518 struct hci_conn *conn;
3519 int err;
3520
3521 bt_dev_dbg(hdev, "sock %p", sk);
3522
3523 memset(&rp, 0, sizeof(rp));
3524 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3525 rp.addr.type = cp->addr.type;
3526
3527 if (!bdaddr_type_is_valid(cp->addr.type))
3528 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3529 MGMT_STATUS_INVALID_PARAMS,
3530 &rp, sizeof(rp));
3531
3532 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3533 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3534 MGMT_STATUS_INVALID_PARAMS,
3535 &rp, sizeof(rp));
3536
3537 hci_dev_lock(hdev);
3538
3539 if (!hdev_is_powered(hdev)) {
3540 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3541 MGMT_STATUS_NOT_POWERED, &rp,
3542 sizeof(rp));
3543 goto unlock;
3544 }
3545
3546 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3547 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3548 MGMT_STATUS_ALREADY_PAIRED, &rp,
3549 sizeof(rp));
3550 goto unlock;
3551 }
3552
3553 sec_level = BT_SECURITY_MEDIUM;
3554 auth_type = HCI_AT_DEDICATED_BONDING;
3555
3556 if (cp->addr.type == BDADDR_BREDR) {
3557 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3558 auth_type, CONN_REASON_PAIR_DEVICE,
3559 HCI_ACL_CONN_TIMEOUT);
3560 } else {
3561 u8 addr_type = le_addr_type(cp->addr.type);
3562 struct hci_conn_params *p;
3563
3564 /* When pairing a new device, it is expected to remember
3565 * this device for future connections. Adding the connection
3566 * parameter information ahead of time allows tracking
3567 * of the peripheral preferred values and will speed up any
3568 * further connection establishment.
3569 *
3570 * If connection parameters already exist, then they
3571 * will be kept and this function does nothing.
3572 */
3573 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3574 if (!p) {
3575 err = -EIO;
3576 goto unlock;
3577 }
3578
3579 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3580 p->auto_connect = HCI_AUTO_CONN_DISABLED;
3581
3582 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3583 sec_level, HCI_LE_CONN_TIMEOUT,
3584 CONN_REASON_PAIR_DEVICE);
3585 }
3586
3587 if (IS_ERR(conn)) {
3588 int status;
3589
3590 if (PTR_ERR(conn) == -EBUSY)
3591 status = MGMT_STATUS_BUSY;
3592 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3593 status = MGMT_STATUS_NOT_SUPPORTED;
3594 else if (PTR_ERR(conn) == -ECONNREFUSED)
3595 status = MGMT_STATUS_REJECTED;
3596 else
3597 status = MGMT_STATUS_CONNECT_FAILED;
3598
3599 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3600 status, &rp, sizeof(rp));
3601 goto unlock;
3602 }
3603
3604 if (conn->connect_cfm_cb) {
3605 hci_conn_drop(conn);
3606 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3607 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3608 goto unlock;
3609 }
3610
3611 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3612 if (!cmd) {
3613 err = -ENOMEM;
3614 hci_conn_drop(conn);
3615 goto unlock;
3616 }
3617
3618 cmd->cmd_complete = pairing_complete;
3619
3620 /* For LE, just connecting isn't a proof that the pairing finished */
3621 if (cp->addr.type == BDADDR_BREDR) {
3622 conn->connect_cfm_cb = pairing_complete_cb;
3623 conn->security_cfm_cb = pairing_complete_cb;
3624 conn->disconn_cfm_cb = pairing_complete_cb;
3625 } else {
3626 conn->connect_cfm_cb = le_pairing_complete_cb;
3627 conn->security_cfm_cb = le_pairing_complete_cb;
3628 conn->disconn_cfm_cb = le_pairing_complete_cb;
3629 }
3630
3631 conn->io_capability = cp->io_cap;
3632 cmd->user_data = hci_conn_get(conn);
3633
3634 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3635 hci_conn_security(conn, sec_level, auth_type, true)) {
3636 cmd->cmd_complete(cmd, 0);
3637 mgmt_pending_remove(cmd);
3638 }
3639
3640 err = 0;
3641
3642 unlock:
3643 hci_dev_unlock(hdev);
3644 return err;
3645 }
3646
cancel_pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3647 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3648 u16 len)
3649 {
3650 struct mgmt_addr_info *addr = data;
3651 struct mgmt_pending_cmd *cmd;
3652 struct hci_conn *conn;
3653 int err;
3654
3655 bt_dev_dbg(hdev, "sock %p", sk);
3656
3657 hci_dev_lock(hdev);
3658
3659 if (!hdev_is_powered(hdev)) {
3660 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3661 MGMT_STATUS_NOT_POWERED);
3662 goto unlock;
3663 }
3664
3665 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3666 if (!cmd) {
3667 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3668 MGMT_STATUS_INVALID_PARAMS);
3669 goto unlock;
3670 }
3671
3672 conn = cmd->user_data;
3673
3674 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3675 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3676 MGMT_STATUS_INVALID_PARAMS);
3677 goto unlock;
3678 }
3679
3680 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3681 mgmt_pending_remove(cmd);
3682
3683 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3684 addr, sizeof(*addr));
3685
3686 /* Since user doesn't want to proceed with the connection, abort any
3687 * ongoing pairing and then terminate the link if it was created
3688 * because of the pair device action.
3689 */
3690 if (addr->type == BDADDR_BREDR)
3691 hci_remove_link_key(hdev, &addr->bdaddr);
3692 else
3693 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3694 le_addr_type(addr->type));
3695
3696 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3697 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3698
3699 unlock:
3700 hci_dev_unlock(hdev);
3701 return err;
3702 }
3703
user_pairing_resp(struct sock * sk,struct hci_dev * hdev,struct mgmt_addr_info * addr,u16 mgmt_op,u16 hci_op,__le32 passkey)3704 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3705 struct mgmt_addr_info *addr, u16 mgmt_op,
3706 u16 hci_op, __le32 passkey)
3707 {
3708 struct mgmt_pending_cmd *cmd;
3709 struct hci_conn *conn;
3710 int err;
3711
3712 hci_dev_lock(hdev);
3713
3714 if (!hdev_is_powered(hdev)) {
3715 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3716 MGMT_STATUS_NOT_POWERED, addr,
3717 sizeof(*addr));
3718 goto done;
3719 }
3720
3721 if (addr->type == BDADDR_BREDR)
3722 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3723 else
3724 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3725 le_addr_type(addr->type));
3726
3727 if (!conn) {
3728 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3729 MGMT_STATUS_NOT_CONNECTED, addr,
3730 sizeof(*addr));
3731 goto done;
3732 }
3733
3734 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3735 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3736 if (!err)
3737 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3738 MGMT_STATUS_SUCCESS, addr,
3739 sizeof(*addr));
3740 else
3741 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3742 MGMT_STATUS_FAILED, addr,
3743 sizeof(*addr));
3744
3745 goto done;
3746 }
3747
3748 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3749 if (!cmd) {
3750 err = -ENOMEM;
3751 goto done;
3752 }
3753
3754 cmd->cmd_complete = addr_cmd_complete;
3755
3756 /* Continue with pairing via HCI */
3757 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3758 struct hci_cp_user_passkey_reply cp;
3759
3760 bacpy(&cp.bdaddr, &addr->bdaddr);
3761 cp.passkey = passkey;
3762 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3763 } else
3764 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3765 &addr->bdaddr);
3766
3767 if (err < 0)
3768 mgmt_pending_remove(cmd);
3769
3770 done:
3771 hci_dev_unlock(hdev);
3772 return err;
3773 }
3774
pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3775 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3776 void *data, u16 len)
3777 {
3778 struct mgmt_cp_pin_code_neg_reply *cp = data;
3779
3780 bt_dev_dbg(hdev, "sock %p", sk);
3781
3782 return user_pairing_resp(sk, hdev, &cp->addr,
3783 MGMT_OP_PIN_CODE_NEG_REPLY,
3784 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3785 }
3786
user_confirm_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3787 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3788 u16 len)
3789 {
3790 struct mgmt_cp_user_confirm_reply *cp = data;
3791
3792 bt_dev_dbg(hdev, "sock %p", sk);
3793
3794 if (len != sizeof(*cp))
3795 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3796 MGMT_STATUS_INVALID_PARAMS);
3797
3798 return user_pairing_resp(sk, hdev, &cp->addr,
3799 MGMT_OP_USER_CONFIRM_REPLY,
3800 HCI_OP_USER_CONFIRM_REPLY, 0);
3801 }
3802
user_confirm_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3803 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3804 void *data, u16 len)
3805 {
3806 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3807
3808 bt_dev_dbg(hdev, "sock %p", sk);
3809
3810 return user_pairing_resp(sk, hdev, &cp->addr,
3811 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3812 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3813 }
3814
user_passkey_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3815 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3816 u16 len)
3817 {
3818 struct mgmt_cp_user_passkey_reply *cp = data;
3819
3820 bt_dev_dbg(hdev, "sock %p", sk);
3821
3822 return user_pairing_resp(sk, hdev, &cp->addr,
3823 MGMT_OP_USER_PASSKEY_REPLY,
3824 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3825 }
3826
user_passkey_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3827 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3828 void *data, u16 len)
3829 {
3830 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3831
3832 bt_dev_dbg(hdev, "sock %p", sk);
3833
3834 return user_pairing_resp(sk, hdev, &cp->addr,
3835 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3836 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3837 }
3838
adv_expire_sync(struct hci_dev * hdev,u32 flags)3839 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3840 {
3841 struct adv_info *adv_instance;
3842
3843 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3844 if (!adv_instance)
3845 return 0;
3846
3847 /* stop if current instance doesn't need to be changed */
3848 if (!(adv_instance->flags & flags))
3849 return 0;
3850
3851 cancel_adv_timeout(hdev);
3852
3853 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3854 if (!adv_instance)
3855 return 0;
3856
3857 hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3858
3859 return 0;
3860 }
3861
name_changed_sync(struct hci_dev * hdev,void * data)3862 static int name_changed_sync(struct hci_dev *hdev, void *data)
3863 {
3864 return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3865 }
3866
set_name_complete(struct hci_dev * hdev,void * data,int err)3867 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3868 {
3869 struct mgmt_pending_cmd *cmd = data;
3870 struct mgmt_cp_set_local_name *cp = cmd->param;
3871 u8 status = mgmt_status(err);
3872
3873 bt_dev_dbg(hdev, "err %d", err);
3874
3875 if (err == -ECANCELED ||
3876 cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3877 return;
3878
3879 if (status) {
3880 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3881 status);
3882 } else {
3883 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3884 cp, sizeof(*cp));
3885
3886 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3887 hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3888 }
3889
3890 mgmt_pending_remove(cmd);
3891 }
3892
set_name_sync(struct hci_dev * hdev,void * data)3893 static int set_name_sync(struct hci_dev *hdev, void *data)
3894 {
3895 struct mgmt_pending_cmd *cmd = data;
3896 struct mgmt_cp_set_local_name *cp = cmd->param;
3897
3898 if (lmp_bredr_capable(hdev)) {
3899 hci_update_name_sync(hdev, cp->name);
3900 hci_update_eir_sync(hdev);
3901 }
3902
3903 /* The name is stored in the scan response data and so
3904 * no need to update the advertising data here.
3905 */
3906 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3907 hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3908
3909 return 0;
3910 }
3911
set_local_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3912 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3913 u16 len)
3914 {
3915 struct mgmt_cp_set_local_name *cp = data;
3916 struct mgmt_pending_cmd *cmd;
3917 int err;
3918
3919 bt_dev_dbg(hdev, "sock %p", sk);
3920
3921 hci_dev_lock(hdev);
3922
3923 /* If the old values are the same as the new ones just return a
3924 * direct command complete event.
3925 */
3926 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3927 !memcmp(hdev->short_name, cp->short_name,
3928 sizeof(hdev->short_name))) {
3929 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3930 data, len);
3931 goto failed;
3932 }
3933
3934 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3935
3936 if (!hdev_is_powered(hdev)) {
3937 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3938
3939 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3940 data, len);
3941 if (err < 0)
3942 goto failed;
3943
3944 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3945 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3946 ext_info_changed(hdev, sk);
3947
3948 goto failed;
3949 }
3950
3951 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3952 if (!cmd)
3953 err = -ENOMEM;
3954 else
3955 err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3956 set_name_complete);
3957
3958 if (err < 0) {
3959 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3960 MGMT_STATUS_FAILED);
3961
3962 if (cmd)
3963 mgmt_pending_remove(cmd);
3964
3965 goto failed;
3966 }
3967
3968 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3969
3970 failed:
3971 hci_dev_unlock(hdev);
3972 return err;
3973 }
3974
appearance_changed_sync(struct hci_dev * hdev,void * data)3975 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3976 {
3977 return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3978 }
3979
set_appearance(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3980 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3981 u16 len)
3982 {
3983 struct mgmt_cp_set_appearance *cp = data;
3984 u16 appearance;
3985 int err;
3986
3987 bt_dev_dbg(hdev, "sock %p", sk);
3988
3989 if (!lmp_le_capable(hdev))
3990 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3991 MGMT_STATUS_NOT_SUPPORTED);
3992
3993 appearance = le16_to_cpu(cp->appearance);
3994
3995 hci_dev_lock(hdev);
3996
3997 if (hdev->appearance != appearance) {
3998 hdev->appearance = appearance;
3999
4000 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
4001 hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
4002 NULL);
4003
4004 ext_info_changed(hdev, sk);
4005 }
4006
4007 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
4008 0);
4009
4010 hci_dev_unlock(hdev);
4011
4012 return err;
4013 }
4014
get_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4015 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4016 void *data, u16 len)
4017 {
4018 struct mgmt_rp_get_phy_configuration rp;
4019
4020 bt_dev_dbg(hdev, "sock %p", sk);
4021
4022 hci_dev_lock(hdev);
4023
4024 memset(&rp, 0, sizeof(rp));
4025
4026 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
4027 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
4028 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
4029
4030 hci_dev_unlock(hdev);
4031
4032 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
4033 &rp, sizeof(rp));
4034 }
4035
mgmt_phy_configuration_changed(struct hci_dev * hdev,struct sock * skip)4036 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
4037 {
4038 struct mgmt_ev_phy_configuration_changed ev;
4039
4040 memset(&ev, 0, sizeof(ev));
4041
4042 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
4043
4044 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
4045 sizeof(ev), skip);
4046 }
4047
set_default_phy_complete(struct hci_dev * hdev,void * data,int err)4048 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
4049 {
4050 struct mgmt_pending_cmd *cmd = data;
4051 struct sk_buff *skb = cmd->skb;
4052 u8 status = mgmt_status(err);
4053
4054 if (err == -ECANCELED ||
4055 cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
4056 return;
4057
4058 if (!status) {
4059 if (!skb)
4060 status = MGMT_STATUS_FAILED;
4061 else if (IS_ERR(skb))
4062 status = mgmt_status(PTR_ERR(skb));
4063 else
4064 status = mgmt_status(skb->data[0]);
4065 }
4066
4067 bt_dev_dbg(hdev, "status %d", status);
4068
4069 if (status) {
4070 mgmt_cmd_status(cmd->sk, hdev->id,
4071 MGMT_OP_SET_PHY_CONFIGURATION, status);
4072 } else {
4073 mgmt_cmd_complete(cmd->sk, hdev->id,
4074 MGMT_OP_SET_PHY_CONFIGURATION, 0,
4075 NULL, 0);
4076
4077 mgmt_phy_configuration_changed(hdev, cmd->sk);
4078 }
4079
4080 if (skb && !IS_ERR(skb))
4081 kfree_skb(skb);
4082
4083 mgmt_pending_remove(cmd);
4084 }
4085
set_default_phy_sync(struct hci_dev * hdev,void * data)4086 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
4087 {
4088 struct mgmt_pending_cmd *cmd = data;
4089 struct mgmt_cp_set_phy_configuration *cp = cmd->param;
4090 struct hci_cp_le_set_default_phy cp_phy;
4091 u32 selected_phys = __le32_to_cpu(cp->selected_phys);
4092
4093 memset(&cp_phy, 0, sizeof(cp_phy));
4094
4095 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
4096 cp_phy.all_phys |= 0x01;
4097
4098 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
4099 cp_phy.all_phys |= 0x02;
4100
4101 if (selected_phys & MGMT_PHY_LE_1M_TX)
4102 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
4103
4104 if (selected_phys & MGMT_PHY_LE_2M_TX)
4105 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
4106
4107 if (selected_phys & MGMT_PHY_LE_CODED_TX)
4108 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
4109
4110 if (selected_phys & MGMT_PHY_LE_1M_RX)
4111 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
4112
4113 if (selected_phys & MGMT_PHY_LE_2M_RX)
4114 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
4115
4116 if (selected_phys & MGMT_PHY_LE_CODED_RX)
4117 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
4118
4119 cmd->skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4120 sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
4121
4122 return 0;
4123 }
4124
set_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4125 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4126 void *data, u16 len)
4127 {
4128 struct mgmt_cp_set_phy_configuration *cp = data;
4129 struct mgmt_pending_cmd *cmd;
4130 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4131 u16 pkt_type = (HCI_DH1 | HCI_DM1);
4132 bool changed = false;
4133 int err;
4134
4135 bt_dev_dbg(hdev, "sock %p", sk);
4136
4137 configurable_phys = get_configurable_phys(hdev);
4138 supported_phys = get_supported_phys(hdev);
4139 selected_phys = __le32_to_cpu(cp->selected_phys);
4140
4141 if (selected_phys & ~supported_phys)
4142 return mgmt_cmd_status(sk, hdev->id,
4143 MGMT_OP_SET_PHY_CONFIGURATION,
4144 MGMT_STATUS_INVALID_PARAMS);
4145
4146 unconfigure_phys = supported_phys & ~configurable_phys;
4147
4148 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4149 return mgmt_cmd_status(sk, hdev->id,
4150 MGMT_OP_SET_PHY_CONFIGURATION,
4151 MGMT_STATUS_INVALID_PARAMS);
4152
4153 if (selected_phys == get_selected_phys(hdev))
4154 return mgmt_cmd_complete(sk, hdev->id,
4155 MGMT_OP_SET_PHY_CONFIGURATION,
4156 0, NULL, 0);
4157
4158 hci_dev_lock(hdev);
4159
4160 if (!hdev_is_powered(hdev)) {
4161 err = mgmt_cmd_status(sk, hdev->id,
4162 MGMT_OP_SET_PHY_CONFIGURATION,
4163 MGMT_STATUS_REJECTED);
4164 goto unlock;
4165 }
4166
4167 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4168 err = mgmt_cmd_status(sk, hdev->id,
4169 MGMT_OP_SET_PHY_CONFIGURATION,
4170 MGMT_STATUS_BUSY);
4171 goto unlock;
4172 }
4173
4174 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4175 pkt_type |= (HCI_DH3 | HCI_DM3);
4176 else
4177 pkt_type &= ~(HCI_DH3 | HCI_DM3);
4178
4179 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4180 pkt_type |= (HCI_DH5 | HCI_DM5);
4181 else
4182 pkt_type &= ~(HCI_DH5 | HCI_DM5);
4183
4184 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4185 pkt_type &= ~HCI_2DH1;
4186 else
4187 pkt_type |= HCI_2DH1;
4188
4189 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4190 pkt_type &= ~HCI_2DH3;
4191 else
4192 pkt_type |= HCI_2DH3;
4193
4194 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4195 pkt_type &= ~HCI_2DH5;
4196 else
4197 pkt_type |= HCI_2DH5;
4198
4199 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4200 pkt_type &= ~HCI_3DH1;
4201 else
4202 pkt_type |= HCI_3DH1;
4203
4204 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4205 pkt_type &= ~HCI_3DH3;
4206 else
4207 pkt_type |= HCI_3DH3;
4208
4209 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4210 pkt_type &= ~HCI_3DH5;
4211 else
4212 pkt_type |= HCI_3DH5;
4213
4214 if (pkt_type != hdev->pkt_type) {
4215 hdev->pkt_type = pkt_type;
4216 changed = true;
4217 }
4218
4219 if ((selected_phys & MGMT_PHY_LE_MASK) ==
4220 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4221 if (changed)
4222 mgmt_phy_configuration_changed(hdev, sk);
4223
4224 err = mgmt_cmd_complete(sk, hdev->id,
4225 MGMT_OP_SET_PHY_CONFIGURATION,
4226 0, NULL, 0);
4227
4228 goto unlock;
4229 }
4230
4231 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4232 len);
4233 if (!cmd)
4234 err = -ENOMEM;
4235 else
4236 err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4237 set_default_phy_complete);
4238
4239 if (err < 0) {
4240 err = mgmt_cmd_status(sk, hdev->id,
4241 MGMT_OP_SET_PHY_CONFIGURATION,
4242 MGMT_STATUS_FAILED);
4243
4244 if (cmd)
4245 mgmt_pending_remove(cmd);
4246 }
4247
4248 unlock:
4249 hci_dev_unlock(hdev);
4250
4251 return err;
4252 }
4253
set_blocked_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4254 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4255 u16 len)
4256 {
4257 int err = MGMT_STATUS_SUCCESS;
4258 struct mgmt_cp_set_blocked_keys *keys = data;
4259 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4260 sizeof(struct mgmt_blocked_key_info));
4261 u16 key_count, expected_len;
4262 int i;
4263
4264 bt_dev_dbg(hdev, "sock %p", sk);
4265
4266 key_count = __le16_to_cpu(keys->key_count);
4267 if (key_count > max_key_count) {
4268 bt_dev_err(hdev, "too big key_count value %u", key_count);
4269 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4270 MGMT_STATUS_INVALID_PARAMS);
4271 }
4272
4273 expected_len = struct_size(keys, keys, key_count);
4274 if (expected_len != len) {
4275 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4276 expected_len, len);
4277 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4278 MGMT_STATUS_INVALID_PARAMS);
4279 }
4280
4281 hci_dev_lock(hdev);
4282
4283 hci_blocked_keys_clear(hdev);
4284
4285 for (i = 0; i < key_count; ++i) {
4286 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4287
4288 if (!b) {
4289 err = MGMT_STATUS_NO_RESOURCES;
4290 break;
4291 }
4292
4293 b->type = keys->keys[i].type;
4294 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4295 list_add_rcu(&b->list, &hdev->blocked_keys);
4296 }
4297 hci_dev_unlock(hdev);
4298
4299 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4300 err, NULL, 0);
4301 }
4302
set_wideband_speech(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4303 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4304 void *data, u16 len)
4305 {
4306 struct mgmt_mode *cp = data;
4307 int err;
4308 bool changed = false;
4309
4310 bt_dev_dbg(hdev, "sock %p", sk);
4311
4312 if (!hci_test_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED))
4313 return mgmt_cmd_status(sk, hdev->id,
4314 MGMT_OP_SET_WIDEBAND_SPEECH,
4315 MGMT_STATUS_NOT_SUPPORTED);
4316
4317 if (cp->val != 0x00 && cp->val != 0x01)
4318 return mgmt_cmd_status(sk, hdev->id,
4319 MGMT_OP_SET_WIDEBAND_SPEECH,
4320 MGMT_STATUS_INVALID_PARAMS);
4321
4322 hci_dev_lock(hdev);
4323
4324 if (hdev_is_powered(hdev) &&
4325 !!cp->val != hci_dev_test_flag(hdev,
4326 HCI_WIDEBAND_SPEECH_ENABLED)) {
4327 err = mgmt_cmd_status(sk, hdev->id,
4328 MGMT_OP_SET_WIDEBAND_SPEECH,
4329 MGMT_STATUS_REJECTED);
4330 goto unlock;
4331 }
4332
4333 if (cp->val)
4334 changed = !hci_dev_test_and_set_flag(hdev,
4335 HCI_WIDEBAND_SPEECH_ENABLED);
4336 else
4337 changed = hci_dev_test_and_clear_flag(hdev,
4338 HCI_WIDEBAND_SPEECH_ENABLED);
4339
4340 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4341 if (err < 0)
4342 goto unlock;
4343
4344 if (changed)
4345 err = new_settings(hdev, sk);
4346
4347 unlock:
4348 hci_dev_unlock(hdev);
4349 return err;
4350 }
4351
read_controller_cap(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4352 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4353 void *data, u16 data_len)
4354 {
4355 char buf[20];
4356 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4357 u16 cap_len = 0;
4358 u8 flags = 0;
4359 u8 tx_power_range[2];
4360
4361 bt_dev_dbg(hdev, "sock %p", sk);
4362
4363 memset(&buf, 0, sizeof(buf));
4364
4365 hci_dev_lock(hdev);
4366
4367 /* When the Read Simple Pairing Options command is supported, then
4368 * the remote public key validation is supported.
4369 *
4370 * Alternatively, when Microsoft extensions are available, they can
4371 * indicate support for public key validation as well.
4372 */
4373 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4374 flags |= 0x01; /* Remote public key validation (BR/EDR) */
4375
4376 flags |= 0x02; /* Remote public key validation (LE) */
4377
4378 /* When the Read Encryption Key Size command is supported, then the
4379 * encryption key size is enforced.
4380 */
4381 if (hdev->commands[20] & 0x10)
4382 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
4383
4384 flags |= 0x08; /* Encryption key size enforcement (LE) */
4385
4386 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4387 &flags, 1);
4388
4389 /* When the Read Simple Pairing Options command is supported, then
4390 * also max encryption key size information is provided.
4391 */
4392 if (hdev->commands[41] & 0x08)
4393 cap_len = eir_append_le16(rp->cap, cap_len,
4394 MGMT_CAP_MAX_ENC_KEY_SIZE,
4395 hdev->max_enc_key_size);
4396
4397 cap_len = eir_append_le16(rp->cap, cap_len,
4398 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4399 SMP_MAX_ENC_KEY_SIZE);
4400
4401 /* Append the min/max LE tx power parameters if we were able to fetch
4402 * it from the controller
4403 */
4404 if (hdev->commands[38] & 0x80) {
4405 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4406 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4407 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4408 tx_power_range, 2);
4409 }
4410
4411 rp->cap_len = cpu_to_le16(cap_len);
4412
4413 hci_dev_unlock(hdev);
4414
4415 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4416 rp, sizeof(*rp) + cap_len);
4417 }
4418
4419 #ifdef CONFIG_BT_FEATURE_DEBUG
4420 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4421 static const u8 debug_uuid[16] = {
4422 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4423 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4424 };
4425 #endif
4426
4427 /* 330859bc-7506-492d-9370-9a6f0614037f */
4428 static const u8 quality_report_uuid[16] = {
4429 0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4430 0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4431 };
4432
4433 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4434 static const u8 offload_codecs_uuid[16] = {
4435 0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4436 0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4437 };
4438
4439 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4440 static const u8 le_simultaneous_roles_uuid[16] = {
4441 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4442 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4443 };
4444
4445 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4446 static const u8 iso_socket_uuid[16] = {
4447 0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4448 0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4449 };
4450
4451 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4452 static const u8 mgmt_mesh_uuid[16] = {
4453 0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4454 0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4455 };
4456
read_exp_features_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4457 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4458 void *data, u16 data_len)
4459 {
4460 struct mgmt_rp_read_exp_features_info *rp;
4461 size_t len;
4462 u16 idx = 0;
4463 u32 flags;
4464 int status;
4465
4466 bt_dev_dbg(hdev, "sock %p", sk);
4467
4468 /* Enough space for 7 features */
4469 len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4470 rp = kzalloc(len, GFP_KERNEL);
4471 if (!rp)
4472 return -ENOMEM;
4473
4474 #ifdef CONFIG_BT_FEATURE_DEBUG
4475 if (!hdev) {
4476 flags = bt_dbg_get() ? BIT(0) : 0;
4477
4478 memcpy(rp->features[idx].uuid, debug_uuid, 16);
4479 rp->features[idx].flags = cpu_to_le32(flags);
4480 idx++;
4481 }
4482 #endif
4483
4484 if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4485 if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4486 flags = BIT(0);
4487 else
4488 flags = 0;
4489
4490 memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4491 rp->features[idx].flags = cpu_to_le32(flags);
4492 idx++;
4493 }
4494
4495 if (hdev && (aosp_has_quality_report(hdev) ||
4496 hdev->set_quality_report)) {
4497 if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4498 flags = BIT(0);
4499 else
4500 flags = 0;
4501
4502 memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4503 rp->features[idx].flags = cpu_to_le32(flags);
4504 idx++;
4505 }
4506
4507 if (hdev && hdev->get_data_path_id) {
4508 if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4509 flags = BIT(0);
4510 else
4511 flags = 0;
4512
4513 memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4514 rp->features[idx].flags = cpu_to_le32(flags);
4515 idx++;
4516 }
4517
4518 if (IS_ENABLED(CONFIG_BT_LE)) {
4519 flags = iso_inited() ? BIT(0) : 0;
4520 memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4521 rp->features[idx].flags = cpu_to_le32(flags);
4522 idx++;
4523 }
4524
4525 if (hdev && lmp_le_capable(hdev)) {
4526 if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4527 flags = BIT(0);
4528 else
4529 flags = 0;
4530
4531 memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4532 rp->features[idx].flags = cpu_to_le32(flags);
4533 idx++;
4534 }
4535
4536 rp->feature_count = cpu_to_le16(idx);
4537
4538 /* After reading the experimental features information, enable
4539 * the events to update client on any future change.
4540 */
4541 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4542
4543 status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4544 MGMT_OP_READ_EXP_FEATURES_INFO,
4545 0, rp, sizeof(*rp) + (20 * idx));
4546
4547 kfree(rp);
4548 return status;
4549 }
4550
exp_feature_changed(struct hci_dev * hdev,const u8 * uuid,bool enabled,struct sock * skip)4551 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4552 bool enabled, struct sock *skip)
4553 {
4554 struct mgmt_ev_exp_feature_changed ev;
4555
4556 memset(&ev, 0, sizeof(ev));
4557 memcpy(ev.uuid, uuid, 16);
4558 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4559
4560 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4561 &ev, sizeof(ev),
4562 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4563 }
4564
4565 #define EXP_FEAT(_uuid, _set_func) \
4566 { \
4567 .uuid = _uuid, \
4568 .set_func = _set_func, \
4569 }
4570
4571 /* The zero key uuid is special. Multiple exp features are set through it. */
set_zero_key_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4572 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4573 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4574 {
4575 struct mgmt_rp_set_exp_feature rp;
4576
4577 memset(rp.uuid, 0, 16);
4578 rp.flags = cpu_to_le32(0);
4579
4580 #ifdef CONFIG_BT_FEATURE_DEBUG
4581 if (!hdev) {
4582 bool changed = bt_dbg_get();
4583
4584 bt_dbg_set(false);
4585
4586 if (changed)
4587 exp_feature_changed(NULL, ZERO_KEY, false, sk);
4588 }
4589 #endif
4590
4591 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4592
4593 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4594 MGMT_OP_SET_EXP_FEATURE, 0,
4595 &rp, sizeof(rp));
4596 }
4597
4598 #ifdef CONFIG_BT_FEATURE_DEBUG
set_debug_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4599 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4600 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4601 {
4602 struct mgmt_rp_set_exp_feature rp;
4603
4604 bool val, changed;
4605 int err;
4606
4607 /* Command requires to use the non-controller index */
4608 if (hdev)
4609 return mgmt_cmd_status(sk, hdev->id,
4610 MGMT_OP_SET_EXP_FEATURE,
4611 MGMT_STATUS_INVALID_INDEX);
4612
4613 /* Parameters are limited to a single octet */
4614 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4615 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4616 MGMT_OP_SET_EXP_FEATURE,
4617 MGMT_STATUS_INVALID_PARAMS);
4618
4619 /* Only boolean on/off is supported */
4620 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4621 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4622 MGMT_OP_SET_EXP_FEATURE,
4623 MGMT_STATUS_INVALID_PARAMS);
4624
4625 val = !!cp->param[0];
4626 changed = val ? !bt_dbg_get() : bt_dbg_get();
4627 bt_dbg_set(val);
4628
4629 memcpy(rp.uuid, debug_uuid, 16);
4630 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4631
4632 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4633
4634 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4635 MGMT_OP_SET_EXP_FEATURE, 0,
4636 &rp, sizeof(rp));
4637
4638 if (changed)
4639 exp_feature_changed(hdev, debug_uuid, val, sk);
4640
4641 return err;
4642 }
4643 #endif
4644
set_mgmt_mesh_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4645 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4646 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4647 {
4648 struct mgmt_rp_set_exp_feature rp;
4649 bool val, changed;
4650 int err;
4651
4652 /* Command requires to use the controller index */
4653 if (!hdev)
4654 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4655 MGMT_OP_SET_EXP_FEATURE,
4656 MGMT_STATUS_INVALID_INDEX);
4657
4658 /* Parameters are limited to a single octet */
4659 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4660 return mgmt_cmd_status(sk, hdev->id,
4661 MGMT_OP_SET_EXP_FEATURE,
4662 MGMT_STATUS_INVALID_PARAMS);
4663
4664 /* Only boolean on/off is supported */
4665 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4666 return mgmt_cmd_status(sk, hdev->id,
4667 MGMT_OP_SET_EXP_FEATURE,
4668 MGMT_STATUS_INVALID_PARAMS);
4669
4670 val = !!cp->param[0];
4671
4672 if (val) {
4673 changed = !hci_dev_test_and_set_flag(hdev,
4674 HCI_MESH_EXPERIMENTAL);
4675 } else {
4676 hci_dev_clear_flag(hdev, HCI_MESH);
4677 changed = hci_dev_test_and_clear_flag(hdev,
4678 HCI_MESH_EXPERIMENTAL);
4679 }
4680
4681 memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4682 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4683
4684 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4685
4686 err = mgmt_cmd_complete(sk, hdev->id,
4687 MGMT_OP_SET_EXP_FEATURE, 0,
4688 &rp, sizeof(rp));
4689
4690 if (changed)
4691 exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4692
4693 return err;
4694 }
4695
set_quality_report_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4696 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4697 struct mgmt_cp_set_exp_feature *cp,
4698 u16 data_len)
4699 {
4700 struct mgmt_rp_set_exp_feature rp;
4701 bool val, changed;
4702 int err;
4703
4704 /* Command requires to use a valid controller index */
4705 if (!hdev)
4706 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4707 MGMT_OP_SET_EXP_FEATURE,
4708 MGMT_STATUS_INVALID_INDEX);
4709
4710 /* Parameters are limited to a single octet */
4711 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4712 return mgmt_cmd_status(sk, hdev->id,
4713 MGMT_OP_SET_EXP_FEATURE,
4714 MGMT_STATUS_INVALID_PARAMS);
4715
4716 /* Only boolean on/off is supported */
4717 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4718 return mgmt_cmd_status(sk, hdev->id,
4719 MGMT_OP_SET_EXP_FEATURE,
4720 MGMT_STATUS_INVALID_PARAMS);
4721
4722 hci_req_sync_lock(hdev);
4723
4724 val = !!cp->param[0];
4725 changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4726
4727 if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4728 err = mgmt_cmd_status(sk, hdev->id,
4729 MGMT_OP_SET_EXP_FEATURE,
4730 MGMT_STATUS_NOT_SUPPORTED);
4731 goto unlock_quality_report;
4732 }
4733
4734 if (changed) {
4735 if (hdev->set_quality_report)
4736 err = hdev->set_quality_report(hdev, val);
4737 else
4738 err = aosp_set_quality_report(hdev, val);
4739
4740 if (err) {
4741 err = mgmt_cmd_status(sk, hdev->id,
4742 MGMT_OP_SET_EXP_FEATURE,
4743 MGMT_STATUS_FAILED);
4744 goto unlock_quality_report;
4745 }
4746
4747 if (val)
4748 hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4749 else
4750 hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4751 }
4752
4753 bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4754
4755 memcpy(rp.uuid, quality_report_uuid, 16);
4756 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4757 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4758
4759 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4760 &rp, sizeof(rp));
4761
4762 if (changed)
4763 exp_feature_changed(hdev, quality_report_uuid, val, sk);
4764
4765 unlock_quality_report:
4766 hci_req_sync_unlock(hdev);
4767 return err;
4768 }
4769
set_offload_codec_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4770 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4771 struct mgmt_cp_set_exp_feature *cp,
4772 u16 data_len)
4773 {
4774 bool val, changed;
4775 int err;
4776 struct mgmt_rp_set_exp_feature rp;
4777
4778 /* Command requires to use a valid controller index */
4779 if (!hdev)
4780 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4781 MGMT_OP_SET_EXP_FEATURE,
4782 MGMT_STATUS_INVALID_INDEX);
4783
4784 /* Parameters are limited to a single octet */
4785 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4786 return mgmt_cmd_status(sk, hdev->id,
4787 MGMT_OP_SET_EXP_FEATURE,
4788 MGMT_STATUS_INVALID_PARAMS);
4789
4790 /* Only boolean on/off is supported */
4791 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4792 return mgmt_cmd_status(sk, hdev->id,
4793 MGMT_OP_SET_EXP_FEATURE,
4794 MGMT_STATUS_INVALID_PARAMS);
4795
4796 val = !!cp->param[0];
4797 changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4798
4799 if (!hdev->get_data_path_id) {
4800 return mgmt_cmd_status(sk, hdev->id,
4801 MGMT_OP_SET_EXP_FEATURE,
4802 MGMT_STATUS_NOT_SUPPORTED);
4803 }
4804
4805 if (changed) {
4806 if (val)
4807 hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4808 else
4809 hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4810 }
4811
4812 bt_dev_info(hdev, "offload codecs enable %d changed %d",
4813 val, changed);
4814
4815 memcpy(rp.uuid, offload_codecs_uuid, 16);
4816 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4817 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4818 err = mgmt_cmd_complete(sk, hdev->id,
4819 MGMT_OP_SET_EXP_FEATURE, 0,
4820 &rp, sizeof(rp));
4821
4822 if (changed)
4823 exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4824
4825 return err;
4826 }
4827
set_le_simultaneous_roles_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4828 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4829 struct mgmt_cp_set_exp_feature *cp,
4830 u16 data_len)
4831 {
4832 bool val, changed;
4833 int err;
4834 struct mgmt_rp_set_exp_feature rp;
4835
4836 /* Command requires to use a valid controller index */
4837 if (!hdev)
4838 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4839 MGMT_OP_SET_EXP_FEATURE,
4840 MGMT_STATUS_INVALID_INDEX);
4841
4842 /* Parameters are limited to a single octet */
4843 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4844 return mgmt_cmd_status(sk, hdev->id,
4845 MGMT_OP_SET_EXP_FEATURE,
4846 MGMT_STATUS_INVALID_PARAMS);
4847
4848 /* Only boolean on/off is supported */
4849 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4850 return mgmt_cmd_status(sk, hdev->id,
4851 MGMT_OP_SET_EXP_FEATURE,
4852 MGMT_STATUS_INVALID_PARAMS);
4853
4854 val = !!cp->param[0];
4855 changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4856
4857 if (!hci_dev_le_state_simultaneous(hdev)) {
4858 return mgmt_cmd_status(sk, hdev->id,
4859 MGMT_OP_SET_EXP_FEATURE,
4860 MGMT_STATUS_NOT_SUPPORTED);
4861 }
4862
4863 if (changed) {
4864 if (val)
4865 hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4866 else
4867 hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4868 }
4869
4870 bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4871 val, changed);
4872
4873 memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4874 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4875 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4876 err = mgmt_cmd_complete(sk, hdev->id,
4877 MGMT_OP_SET_EXP_FEATURE, 0,
4878 &rp, sizeof(rp));
4879
4880 if (changed)
4881 exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4882
4883 return err;
4884 }
4885
4886 #ifdef CONFIG_BT_LE
set_iso_socket_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4887 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4888 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4889 {
4890 struct mgmt_rp_set_exp_feature rp;
4891 bool val, changed = false;
4892 int err;
4893
4894 /* Command requires to use the non-controller index */
4895 if (hdev)
4896 return mgmt_cmd_status(sk, hdev->id,
4897 MGMT_OP_SET_EXP_FEATURE,
4898 MGMT_STATUS_INVALID_INDEX);
4899
4900 /* Parameters are limited to a single octet */
4901 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4902 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4903 MGMT_OP_SET_EXP_FEATURE,
4904 MGMT_STATUS_INVALID_PARAMS);
4905
4906 /* Only boolean on/off is supported */
4907 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4908 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4909 MGMT_OP_SET_EXP_FEATURE,
4910 MGMT_STATUS_INVALID_PARAMS);
4911
4912 val = cp->param[0] ? true : false;
4913 if (val)
4914 err = iso_init();
4915 else
4916 err = iso_exit();
4917
4918 if (!err)
4919 changed = true;
4920
4921 memcpy(rp.uuid, iso_socket_uuid, 16);
4922 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4923
4924 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4925
4926 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4927 MGMT_OP_SET_EXP_FEATURE, 0,
4928 &rp, sizeof(rp));
4929
4930 if (changed)
4931 exp_feature_changed(hdev, iso_socket_uuid, val, sk);
4932
4933 return err;
4934 }
4935 #endif
4936
4937 static const struct mgmt_exp_feature {
4938 const u8 *uuid;
4939 int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4940 struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4941 } exp_features[] = {
4942 EXP_FEAT(ZERO_KEY, set_zero_key_func),
4943 #ifdef CONFIG_BT_FEATURE_DEBUG
4944 EXP_FEAT(debug_uuid, set_debug_func),
4945 #endif
4946 EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
4947 EXP_FEAT(quality_report_uuid, set_quality_report_func),
4948 EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
4949 EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
4950 #ifdef CONFIG_BT_LE
4951 EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
4952 #endif
4953
4954 /* end with a null feature */
4955 EXP_FEAT(NULL, NULL)
4956 };
4957
set_exp_feature(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4958 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4959 void *data, u16 data_len)
4960 {
4961 struct mgmt_cp_set_exp_feature *cp = data;
4962 size_t i = 0;
4963
4964 bt_dev_dbg(hdev, "sock %p", sk);
4965
4966 for (i = 0; exp_features[i].uuid; i++) {
4967 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
4968 return exp_features[i].set_func(sk, hdev, cp, data_len);
4969 }
4970
4971 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4972 MGMT_OP_SET_EXP_FEATURE,
4973 MGMT_STATUS_NOT_SUPPORTED);
4974 }
4975
get_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4976 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4977 u16 data_len)
4978 {
4979 struct mgmt_cp_get_device_flags *cp = data;
4980 struct mgmt_rp_get_device_flags rp;
4981 struct bdaddr_list_with_flags *br_params;
4982 struct hci_conn_params *params;
4983 u32 supported_flags;
4984 u32 current_flags = 0;
4985 u8 status = MGMT_STATUS_INVALID_PARAMS;
4986
4987 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4988 &cp->addr.bdaddr, cp->addr.type);
4989
4990 hci_dev_lock(hdev);
4991
4992 supported_flags = hdev->conn_flags;
4993
4994 memset(&rp, 0, sizeof(rp));
4995
4996 if (cp->addr.type == BDADDR_BREDR) {
4997 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4998 &cp->addr.bdaddr,
4999 cp->addr.type);
5000 if (!br_params)
5001 goto done;
5002
5003 current_flags = br_params->flags;
5004 } else {
5005 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5006 le_addr_type(cp->addr.type));
5007 if (!params)
5008 goto done;
5009
5010 current_flags = params->flags;
5011 }
5012
5013 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5014 rp.addr.type = cp->addr.type;
5015 rp.supported_flags = cpu_to_le32(supported_flags);
5016 rp.current_flags = cpu_to_le32(current_flags);
5017
5018 status = MGMT_STATUS_SUCCESS;
5019
5020 done:
5021 hci_dev_unlock(hdev);
5022
5023 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5024 &rp, sizeof(rp));
5025 }
5026
device_flags_changed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u32 supported_flags,u32 current_flags)5027 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5028 bdaddr_t *bdaddr, u8 bdaddr_type,
5029 u32 supported_flags, u32 current_flags)
5030 {
5031 struct mgmt_ev_device_flags_changed ev;
5032
5033 bacpy(&ev.addr.bdaddr, bdaddr);
5034 ev.addr.type = bdaddr_type;
5035 ev.supported_flags = cpu_to_le32(supported_flags);
5036 ev.current_flags = cpu_to_le32(current_flags);
5037
5038 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5039 }
5040
set_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5041 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5042 u16 len)
5043 {
5044 struct mgmt_cp_set_device_flags *cp = data;
5045 struct bdaddr_list_with_flags *br_params;
5046 struct hci_conn_params *params;
5047 u8 status = MGMT_STATUS_INVALID_PARAMS;
5048 u32 supported_flags;
5049 u32 current_flags = __le32_to_cpu(cp->current_flags);
5050
5051 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5052 &cp->addr.bdaddr, cp->addr.type, current_flags);
5053
5054 // We should take hci_dev_lock() early, I think.. conn_flags can change
5055 supported_flags = hdev->conn_flags;
5056
5057 if ((supported_flags | current_flags) != supported_flags) {
5058 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5059 current_flags, supported_flags);
5060 goto done;
5061 }
5062
5063 hci_dev_lock(hdev);
5064
5065 if (cp->addr.type == BDADDR_BREDR) {
5066 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5067 &cp->addr.bdaddr,
5068 cp->addr.type);
5069
5070 if (br_params) {
5071 br_params->flags = current_flags;
5072 status = MGMT_STATUS_SUCCESS;
5073 } else {
5074 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5075 &cp->addr.bdaddr, cp->addr.type);
5076 }
5077
5078 goto unlock;
5079 }
5080
5081 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5082 le_addr_type(cp->addr.type));
5083 if (!params) {
5084 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5085 &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5086 goto unlock;
5087 }
5088
5089 supported_flags = hdev->conn_flags;
5090
5091 if ((supported_flags | current_flags) != supported_flags) {
5092 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5093 current_flags, supported_flags);
5094 goto unlock;
5095 }
5096
5097 WRITE_ONCE(params->flags, current_flags);
5098 status = MGMT_STATUS_SUCCESS;
5099
5100 /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5101 * has been set.
5102 */
5103 if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5104 hci_update_passive_scan(hdev);
5105
5106 unlock:
5107 hci_dev_unlock(hdev);
5108
5109 done:
5110 if (status == MGMT_STATUS_SUCCESS)
5111 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5112 supported_flags, current_flags);
5113
5114 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5115 &cp->addr, sizeof(cp->addr));
5116 }
5117
mgmt_adv_monitor_added(struct sock * sk,struct hci_dev * hdev,u16 handle)5118 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5119 u16 handle)
5120 {
5121 struct mgmt_ev_adv_monitor_added ev;
5122
5123 ev.monitor_handle = cpu_to_le16(handle);
5124
5125 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5126 }
5127
mgmt_adv_monitor_removed(struct sock * sk,struct hci_dev * hdev,__le16 handle)5128 static void mgmt_adv_monitor_removed(struct sock *sk, struct hci_dev *hdev,
5129 __le16 handle)
5130 {
5131 struct mgmt_ev_adv_monitor_removed ev;
5132
5133 ev.monitor_handle = handle;
5134
5135 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk);
5136 }
5137
read_adv_mon_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5138 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5139 void *data, u16 len)
5140 {
5141 struct adv_monitor *monitor = NULL;
5142 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5143 int handle, err;
5144 size_t rp_size = 0;
5145 __u32 supported = 0;
5146 __u32 enabled = 0;
5147 __u16 num_handles = 0;
5148 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5149
5150 BT_DBG("request for %s", hdev->name);
5151
5152 hci_dev_lock(hdev);
5153
5154 if (msft_monitor_supported(hdev))
5155 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5156
5157 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5158 handles[num_handles++] = monitor->handle;
5159
5160 hci_dev_unlock(hdev);
5161
5162 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5163 rp = kmalloc(rp_size, GFP_KERNEL);
5164 if (!rp)
5165 return -ENOMEM;
5166
5167 /* All supported features are currently enabled */
5168 enabled = supported;
5169
5170 rp->supported_features = cpu_to_le32(supported);
5171 rp->enabled_features = cpu_to_le32(enabled);
5172 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5173 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5174 rp->num_handles = cpu_to_le16(num_handles);
5175 if (num_handles)
5176 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5177
5178 err = mgmt_cmd_complete(sk, hdev->id,
5179 MGMT_OP_READ_ADV_MONITOR_FEATURES,
5180 MGMT_STATUS_SUCCESS, rp, rp_size);
5181
5182 kfree(rp);
5183
5184 return err;
5185 }
5186
mgmt_add_adv_patterns_monitor_complete(struct hci_dev * hdev,void * data,int status)5187 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5188 void *data, int status)
5189 {
5190 struct mgmt_rp_add_adv_patterns_monitor rp;
5191 struct mgmt_pending_cmd *cmd = data;
5192 struct adv_monitor *monitor = cmd->user_data;
5193
5194 hci_dev_lock(hdev);
5195
5196 rp.monitor_handle = cpu_to_le16(monitor->handle);
5197
5198 if (!status) {
5199 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5200 hdev->adv_monitors_cnt++;
5201 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5202 monitor->state = ADV_MONITOR_STATE_REGISTERED;
5203 hci_update_passive_scan(hdev);
5204 }
5205
5206 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
5207 mgmt_status(status), &rp, sizeof(rp));
5208 mgmt_pending_remove(cmd);
5209
5210 hci_dev_unlock(hdev);
5211 bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5212 rp.monitor_handle, status);
5213 }
5214
mgmt_add_adv_patterns_monitor_sync(struct hci_dev * hdev,void * data)5215 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5216 {
5217 struct mgmt_pending_cmd *cmd = data;
5218 struct adv_monitor *monitor = cmd->user_data;
5219
5220 return hci_add_adv_monitor(hdev, monitor);
5221 }
5222
__add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,struct adv_monitor * m,u8 status,void * data,u16 len,u16 op)5223 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5224 struct adv_monitor *m, u8 status,
5225 void *data, u16 len, u16 op)
5226 {
5227 struct mgmt_pending_cmd *cmd;
5228 int err;
5229
5230 hci_dev_lock(hdev);
5231
5232 if (status)
5233 goto unlock;
5234
5235 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5236 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5237 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5238 status = MGMT_STATUS_BUSY;
5239 goto unlock;
5240 }
5241
5242 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5243 if (!cmd) {
5244 status = MGMT_STATUS_NO_RESOURCES;
5245 goto unlock;
5246 }
5247
5248 cmd->user_data = m;
5249 err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5250 mgmt_add_adv_patterns_monitor_complete);
5251 if (err) {
5252 if (err == -ENOMEM)
5253 status = MGMT_STATUS_NO_RESOURCES;
5254 else
5255 status = MGMT_STATUS_FAILED;
5256
5257 goto unlock;
5258 }
5259
5260 hci_dev_unlock(hdev);
5261
5262 return 0;
5263
5264 unlock:
5265 hci_free_adv_monitor(hdev, m);
5266 hci_dev_unlock(hdev);
5267 return mgmt_cmd_status(sk, hdev->id, op, status);
5268 }
5269
parse_adv_monitor_rssi(struct adv_monitor * m,struct mgmt_adv_rssi_thresholds * rssi)5270 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5271 struct mgmt_adv_rssi_thresholds *rssi)
5272 {
5273 if (rssi) {
5274 m->rssi.low_threshold = rssi->low_threshold;
5275 m->rssi.low_threshold_timeout =
5276 __le16_to_cpu(rssi->low_threshold_timeout);
5277 m->rssi.high_threshold = rssi->high_threshold;
5278 m->rssi.high_threshold_timeout =
5279 __le16_to_cpu(rssi->high_threshold_timeout);
5280 m->rssi.sampling_period = rssi->sampling_period;
5281 } else {
5282 /* Default values. These numbers are the least constricting
5283 * parameters for MSFT API to work, so it behaves as if there
5284 * are no rssi parameter to consider. May need to be changed
5285 * if other API are to be supported.
5286 */
5287 m->rssi.low_threshold = -127;
5288 m->rssi.low_threshold_timeout = 60;
5289 m->rssi.high_threshold = -127;
5290 m->rssi.high_threshold_timeout = 0;
5291 m->rssi.sampling_period = 0;
5292 }
5293 }
5294
parse_adv_monitor_pattern(struct adv_monitor * m,u8 pattern_count,struct mgmt_adv_pattern * patterns)5295 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5296 struct mgmt_adv_pattern *patterns)
5297 {
5298 u8 offset = 0, length = 0;
5299 struct adv_pattern *p = NULL;
5300 int i;
5301
5302 for (i = 0; i < pattern_count; i++) {
5303 offset = patterns[i].offset;
5304 length = patterns[i].length;
5305 if (offset >= HCI_MAX_EXT_AD_LENGTH ||
5306 length > HCI_MAX_EXT_AD_LENGTH ||
5307 (offset + length) > HCI_MAX_EXT_AD_LENGTH)
5308 return MGMT_STATUS_INVALID_PARAMS;
5309
5310 p = kmalloc(sizeof(*p), GFP_KERNEL);
5311 if (!p)
5312 return MGMT_STATUS_NO_RESOURCES;
5313
5314 p->ad_type = patterns[i].ad_type;
5315 p->offset = patterns[i].offset;
5316 p->length = patterns[i].length;
5317 memcpy(p->value, patterns[i].value, p->length);
5318
5319 INIT_LIST_HEAD(&p->list);
5320 list_add(&p->list, &m->patterns);
5321 }
5322
5323 return MGMT_STATUS_SUCCESS;
5324 }
5325
add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5326 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5327 void *data, u16 len)
5328 {
5329 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5330 struct adv_monitor *m = NULL;
5331 u8 status = MGMT_STATUS_SUCCESS;
5332 size_t expected_size = sizeof(*cp);
5333
5334 BT_DBG("request for %s", hdev->name);
5335
5336 if (len <= sizeof(*cp)) {
5337 status = MGMT_STATUS_INVALID_PARAMS;
5338 goto done;
5339 }
5340
5341 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5342 if (len != expected_size) {
5343 status = MGMT_STATUS_INVALID_PARAMS;
5344 goto done;
5345 }
5346
5347 m = kzalloc(sizeof(*m), GFP_KERNEL);
5348 if (!m) {
5349 status = MGMT_STATUS_NO_RESOURCES;
5350 goto done;
5351 }
5352
5353 INIT_LIST_HEAD(&m->patterns);
5354
5355 parse_adv_monitor_rssi(m, NULL);
5356 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5357
5358 done:
5359 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5360 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5361 }
5362
add_adv_patterns_monitor_rssi(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5363 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5364 void *data, u16 len)
5365 {
5366 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5367 struct adv_monitor *m = NULL;
5368 u8 status = MGMT_STATUS_SUCCESS;
5369 size_t expected_size = sizeof(*cp);
5370
5371 BT_DBG("request for %s", hdev->name);
5372
5373 if (len <= sizeof(*cp)) {
5374 status = MGMT_STATUS_INVALID_PARAMS;
5375 goto done;
5376 }
5377
5378 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5379 if (len != expected_size) {
5380 status = MGMT_STATUS_INVALID_PARAMS;
5381 goto done;
5382 }
5383
5384 m = kzalloc(sizeof(*m), GFP_KERNEL);
5385 if (!m) {
5386 status = MGMT_STATUS_NO_RESOURCES;
5387 goto done;
5388 }
5389
5390 INIT_LIST_HEAD(&m->patterns);
5391
5392 parse_adv_monitor_rssi(m, &cp->rssi);
5393 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5394
5395 done:
5396 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5397 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5398 }
5399
mgmt_remove_adv_monitor_complete(struct hci_dev * hdev,void * data,int status)5400 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5401 void *data, int status)
5402 {
5403 struct mgmt_rp_remove_adv_monitor rp;
5404 struct mgmt_pending_cmd *cmd = data;
5405 struct mgmt_cp_remove_adv_monitor *cp;
5406
5407 if (status == -ECANCELED)
5408 return;
5409
5410 hci_dev_lock(hdev);
5411
5412 cp = cmd->param;
5413
5414 rp.monitor_handle = cp->monitor_handle;
5415
5416 if (!status) {
5417 mgmt_adv_monitor_removed(cmd->sk, hdev, cp->monitor_handle);
5418 hci_update_passive_scan(hdev);
5419 }
5420
5421 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
5422 mgmt_status(status), &rp, sizeof(rp));
5423 mgmt_pending_free(cmd);
5424
5425 hci_dev_unlock(hdev);
5426 bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5427 rp.monitor_handle, status);
5428 }
5429
mgmt_remove_adv_monitor_sync(struct hci_dev * hdev,void * data)5430 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5431 {
5432 struct mgmt_pending_cmd *cmd = data;
5433 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5434 u16 handle = __le16_to_cpu(cp->monitor_handle);
5435
5436 if (!handle)
5437 return hci_remove_all_adv_monitor(hdev);
5438
5439 return hci_remove_single_adv_monitor(hdev, handle);
5440 }
5441
remove_adv_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5442 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5443 void *data, u16 len)
5444 {
5445 struct mgmt_pending_cmd *cmd;
5446 int err, status;
5447
5448 hci_dev_lock(hdev);
5449
5450 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5451 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5452 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5453 status = MGMT_STATUS_BUSY;
5454 goto unlock;
5455 }
5456
5457 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5458 if (!cmd) {
5459 status = MGMT_STATUS_NO_RESOURCES;
5460 goto unlock;
5461 }
5462
5463 err = hci_cmd_sync_submit(hdev, mgmt_remove_adv_monitor_sync, cmd,
5464 mgmt_remove_adv_monitor_complete);
5465
5466 if (err) {
5467 mgmt_pending_free(cmd);
5468
5469 if (err == -ENOMEM)
5470 status = MGMT_STATUS_NO_RESOURCES;
5471 else
5472 status = MGMT_STATUS_FAILED;
5473
5474 goto unlock;
5475 }
5476
5477 hci_dev_unlock(hdev);
5478
5479 return 0;
5480
5481 unlock:
5482 hci_dev_unlock(hdev);
5483 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5484 status);
5485 }
5486
read_local_oob_data_complete(struct hci_dev * hdev,void * data,int err)5487 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5488 {
5489 struct mgmt_rp_read_local_oob_data mgmt_rp;
5490 size_t rp_size = sizeof(mgmt_rp);
5491 struct mgmt_pending_cmd *cmd = data;
5492 struct sk_buff *skb = cmd->skb;
5493 u8 status = mgmt_status(err);
5494
5495 if (!status) {
5496 if (!skb)
5497 status = MGMT_STATUS_FAILED;
5498 else if (IS_ERR(skb))
5499 status = mgmt_status(PTR_ERR(skb));
5500 else
5501 status = mgmt_status(skb->data[0]);
5502 }
5503
5504 bt_dev_dbg(hdev, "status %d", status);
5505
5506 if (status) {
5507 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5508 goto remove;
5509 }
5510
5511 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5512
5513 if (!bredr_sc_enabled(hdev)) {
5514 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5515
5516 if (skb->len < sizeof(*rp)) {
5517 mgmt_cmd_status(cmd->sk, hdev->id,
5518 MGMT_OP_READ_LOCAL_OOB_DATA,
5519 MGMT_STATUS_FAILED);
5520 goto remove;
5521 }
5522
5523 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5524 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5525
5526 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5527 } else {
5528 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5529
5530 if (skb->len < sizeof(*rp)) {
5531 mgmt_cmd_status(cmd->sk, hdev->id,
5532 MGMT_OP_READ_LOCAL_OOB_DATA,
5533 MGMT_STATUS_FAILED);
5534 goto remove;
5535 }
5536
5537 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5538 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5539
5540 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5541 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5542 }
5543
5544 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5545 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5546
5547 remove:
5548 if (skb && !IS_ERR(skb))
5549 kfree_skb(skb);
5550
5551 mgmt_pending_free(cmd);
5552 }
5553
read_local_oob_data_sync(struct hci_dev * hdev,void * data)5554 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5555 {
5556 struct mgmt_pending_cmd *cmd = data;
5557
5558 if (bredr_sc_enabled(hdev))
5559 cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5560 else
5561 cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5562
5563 if (IS_ERR(cmd->skb))
5564 return PTR_ERR(cmd->skb);
5565 else
5566 return 0;
5567 }
5568
read_local_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)5569 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5570 void *data, u16 data_len)
5571 {
5572 struct mgmt_pending_cmd *cmd;
5573 int err;
5574
5575 bt_dev_dbg(hdev, "sock %p", sk);
5576
5577 hci_dev_lock(hdev);
5578
5579 if (!hdev_is_powered(hdev)) {
5580 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5581 MGMT_STATUS_NOT_POWERED);
5582 goto unlock;
5583 }
5584
5585 if (!lmp_ssp_capable(hdev)) {
5586 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5587 MGMT_STATUS_NOT_SUPPORTED);
5588 goto unlock;
5589 }
5590
5591 cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5592 if (!cmd)
5593 err = -ENOMEM;
5594 else
5595 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5596 read_local_oob_data_complete);
5597
5598 if (err < 0) {
5599 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5600 MGMT_STATUS_FAILED);
5601
5602 if (cmd)
5603 mgmt_pending_free(cmd);
5604 }
5605
5606 unlock:
5607 hci_dev_unlock(hdev);
5608 return err;
5609 }
5610
add_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5611 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5612 void *data, u16 len)
5613 {
5614 struct mgmt_addr_info *addr = data;
5615 int err;
5616
5617 bt_dev_dbg(hdev, "sock %p", sk);
5618
5619 if (!bdaddr_type_is_valid(addr->type))
5620 return mgmt_cmd_complete(sk, hdev->id,
5621 MGMT_OP_ADD_REMOTE_OOB_DATA,
5622 MGMT_STATUS_INVALID_PARAMS,
5623 addr, sizeof(*addr));
5624
5625 hci_dev_lock(hdev);
5626
5627 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5628 struct mgmt_cp_add_remote_oob_data *cp = data;
5629 u8 status;
5630
5631 if (cp->addr.type != BDADDR_BREDR) {
5632 err = mgmt_cmd_complete(sk, hdev->id,
5633 MGMT_OP_ADD_REMOTE_OOB_DATA,
5634 MGMT_STATUS_INVALID_PARAMS,
5635 &cp->addr, sizeof(cp->addr));
5636 goto unlock;
5637 }
5638
5639 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5640 cp->addr.type, cp->hash,
5641 cp->rand, NULL, NULL);
5642 if (err < 0)
5643 status = MGMT_STATUS_FAILED;
5644 else
5645 status = MGMT_STATUS_SUCCESS;
5646
5647 err = mgmt_cmd_complete(sk, hdev->id,
5648 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5649 &cp->addr, sizeof(cp->addr));
5650 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5651 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5652 u8 *rand192, *hash192, *rand256, *hash256;
5653 u8 status;
5654
5655 if (bdaddr_type_is_le(cp->addr.type)) {
5656 /* Enforce zero-valued 192-bit parameters as
5657 * long as legacy SMP OOB isn't implemented.
5658 */
5659 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5660 memcmp(cp->hash192, ZERO_KEY, 16)) {
5661 err = mgmt_cmd_complete(sk, hdev->id,
5662 MGMT_OP_ADD_REMOTE_OOB_DATA,
5663 MGMT_STATUS_INVALID_PARAMS,
5664 addr, sizeof(*addr));
5665 goto unlock;
5666 }
5667
5668 rand192 = NULL;
5669 hash192 = NULL;
5670 } else {
5671 /* In case one of the P-192 values is set to zero,
5672 * then just disable OOB data for P-192.
5673 */
5674 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5675 !memcmp(cp->hash192, ZERO_KEY, 16)) {
5676 rand192 = NULL;
5677 hash192 = NULL;
5678 } else {
5679 rand192 = cp->rand192;
5680 hash192 = cp->hash192;
5681 }
5682 }
5683
5684 /* In case one of the P-256 values is set to zero, then just
5685 * disable OOB data for P-256.
5686 */
5687 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5688 !memcmp(cp->hash256, ZERO_KEY, 16)) {
5689 rand256 = NULL;
5690 hash256 = NULL;
5691 } else {
5692 rand256 = cp->rand256;
5693 hash256 = cp->hash256;
5694 }
5695
5696 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5697 cp->addr.type, hash192, rand192,
5698 hash256, rand256);
5699 if (err < 0)
5700 status = MGMT_STATUS_FAILED;
5701 else
5702 status = MGMT_STATUS_SUCCESS;
5703
5704 err = mgmt_cmd_complete(sk, hdev->id,
5705 MGMT_OP_ADD_REMOTE_OOB_DATA,
5706 status, &cp->addr, sizeof(cp->addr));
5707 } else {
5708 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5709 len);
5710 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5711 MGMT_STATUS_INVALID_PARAMS);
5712 }
5713
5714 unlock:
5715 hci_dev_unlock(hdev);
5716 return err;
5717 }
5718
remove_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5719 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5720 void *data, u16 len)
5721 {
5722 struct mgmt_cp_remove_remote_oob_data *cp = data;
5723 u8 status;
5724 int err;
5725
5726 bt_dev_dbg(hdev, "sock %p", sk);
5727
5728 if (cp->addr.type != BDADDR_BREDR)
5729 return mgmt_cmd_complete(sk, hdev->id,
5730 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5731 MGMT_STATUS_INVALID_PARAMS,
5732 &cp->addr, sizeof(cp->addr));
5733
5734 hci_dev_lock(hdev);
5735
5736 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5737 hci_remote_oob_data_clear(hdev);
5738 status = MGMT_STATUS_SUCCESS;
5739 goto done;
5740 }
5741
5742 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5743 if (err < 0)
5744 status = MGMT_STATUS_INVALID_PARAMS;
5745 else
5746 status = MGMT_STATUS_SUCCESS;
5747
5748 done:
5749 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5750 status, &cp->addr, sizeof(cp->addr));
5751
5752 hci_dev_unlock(hdev);
5753 return err;
5754 }
5755
discovery_type_is_valid(struct hci_dev * hdev,uint8_t type,uint8_t * mgmt_status)5756 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5757 uint8_t *mgmt_status)
5758 {
5759 switch (type) {
5760 case DISCOV_TYPE_LE:
5761 *mgmt_status = mgmt_le_support(hdev);
5762 if (*mgmt_status)
5763 return false;
5764 break;
5765 case DISCOV_TYPE_INTERLEAVED:
5766 *mgmt_status = mgmt_le_support(hdev);
5767 if (*mgmt_status)
5768 return false;
5769 fallthrough;
5770 case DISCOV_TYPE_BREDR:
5771 *mgmt_status = mgmt_bredr_support(hdev);
5772 if (*mgmt_status)
5773 return false;
5774 break;
5775 default:
5776 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5777 return false;
5778 }
5779
5780 return true;
5781 }
5782
start_discovery_complete(struct hci_dev * hdev,void * data,int err)5783 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5784 {
5785 struct mgmt_pending_cmd *cmd = data;
5786
5787 bt_dev_dbg(hdev, "err %d", err);
5788
5789 if (err == -ECANCELED)
5790 return;
5791
5792 if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5793 cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5794 cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5795 return;
5796
5797 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err),
5798 cmd->param, 1);
5799 mgmt_pending_remove(cmd);
5800
5801 hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5802 DISCOVERY_FINDING);
5803 }
5804
start_discovery_sync(struct hci_dev * hdev,void * data)5805 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5806 {
5807 return hci_start_discovery_sync(hdev);
5808 }
5809
start_discovery_internal(struct sock * sk,struct hci_dev * hdev,u16 op,void * data,u16 len)5810 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5811 u16 op, void *data, u16 len)
5812 {
5813 struct mgmt_cp_start_discovery *cp = data;
5814 struct mgmt_pending_cmd *cmd;
5815 u8 status;
5816 int err;
5817
5818 bt_dev_dbg(hdev, "sock %p", sk);
5819
5820 hci_dev_lock(hdev);
5821
5822 if (!hdev_is_powered(hdev)) {
5823 err = mgmt_cmd_complete(sk, hdev->id, op,
5824 MGMT_STATUS_NOT_POWERED,
5825 &cp->type, sizeof(cp->type));
5826 goto failed;
5827 }
5828
5829 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5830 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5831 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5832 &cp->type, sizeof(cp->type));
5833 goto failed;
5834 }
5835
5836 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5837 err = mgmt_cmd_complete(sk, hdev->id, op, status,
5838 &cp->type, sizeof(cp->type));
5839 goto failed;
5840 }
5841
5842 /* Can't start discovery when it is paused */
5843 if (hdev->discovery_paused) {
5844 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5845 &cp->type, sizeof(cp->type));
5846 goto failed;
5847 }
5848
5849 /* Clear the discovery filter first to free any previously
5850 * allocated memory for the UUID list.
5851 */
5852 hci_discovery_filter_clear(hdev);
5853
5854 hdev->discovery.type = cp->type;
5855 hdev->discovery.report_invalid_rssi = false;
5856 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5857 hdev->discovery.limited = true;
5858 else
5859 hdev->discovery.limited = false;
5860
5861 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5862 if (!cmd) {
5863 err = -ENOMEM;
5864 goto failed;
5865 }
5866
5867 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5868 start_discovery_complete);
5869 if (err < 0) {
5870 mgmt_pending_remove(cmd);
5871 goto failed;
5872 }
5873
5874 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5875
5876 failed:
5877 hci_dev_unlock(hdev);
5878 return err;
5879 }
5880
start_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5881 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5882 void *data, u16 len)
5883 {
5884 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5885 data, len);
5886 }
5887
start_limited_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5888 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5889 void *data, u16 len)
5890 {
5891 return start_discovery_internal(sk, hdev,
5892 MGMT_OP_START_LIMITED_DISCOVERY,
5893 data, len);
5894 }
5895
start_service_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5896 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5897 void *data, u16 len)
5898 {
5899 struct mgmt_cp_start_service_discovery *cp = data;
5900 struct mgmt_pending_cmd *cmd;
5901 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5902 u16 uuid_count, expected_len;
5903 u8 status;
5904 int err;
5905
5906 bt_dev_dbg(hdev, "sock %p", sk);
5907
5908 hci_dev_lock(hdev);
5909
5910 if (!hdev_is_powered(hdev)) {
5911 err = mgmt_cmd_complete(sk, hdev->id,
5912 MGMT_OP_START_SERVICE_DISCOVERY,
5913 MGMT_STATUS_NOT_POWERED,
5914 &cp->type, sizeof(cp->type));
5915 goto failed;
5916 }
5917
5918 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5919 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5920 err = mgmt_cmd_complete(sk, hdev->id,
5921 MGMT_OP_START_SERVICE_DISCOVERY,
5922 MGMT_STATUS_BUSY, &cp->type,
5923 sizeof(cp->type));
5924 goto failed;
5925 }
5926
5927 if (hdev->discovery_paused) {
5928 err = mgmt_cmd_complete(sk, hdev->id,
5929 MGMT_OP_START_SERVICE_DISCOVERY,
5930 MGMT_STATUS_BUSY, &cp->type,
5931 sizeof(cp->type));
5932 goto failed;
5933 }
5934
5935 uuid_count = __le16_to_cpu(cp->uuid_count);
5936 if (uuid_count > max_uuid_count) {
5937 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5938 uuid_count);
5939 err = mgmt_cmd_complete(sk, hdev->id,
5940 MGMT_OP_START_SERVICE_DISCOVERY,
5941 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5942 sizeof(cp->type));
5943 goto failed;
5944 }
5945
5946 expected_len = sizeof(*cp) + uuid_count * 16;
5947 if (expected_len != len) {
5948 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
5949 expected_len, len);
5950 err = mgmt_cmd_complete(sk, hdev->id,
5951 MGMT_OP_START_SERVICE_DISCOVERY,
5952 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5953 sizeof(cp->type));
5954 goto failed;
5955 }
5956
5957 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5958 err = mgmt_cmd_complete(sk, hdev->id,
5959 MGMT_OP_START_SERVICE_DISCOVERY,
5960 status, &cp->type, sizeof(cp->type));
5961 goto failed;
5962 }
5963
5964 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
5965 hdev, data, len);
5966 if (!cmd) {
5967 err = -ENOMEM;
5968 goto failed;
5969 }
5970
5971 /* Clear the discovery filter first to free any previously
5972 * allocated memory for the UUID list.
5973 */
5974 hci_discovery_filter_clear(hdev);
5975
5976 hdev->discovery.result_filtering = true;
5977 hdev->discovery.type = cp->type;
5978 hdev->discovery.rssi = cp->rssi;
5979 hdev->discovery.uuid_count = uuid_count;
5980
5981 if (uuid_count > 0) {
5982 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
5983 GFP_KERNEL);
5984 if (!hdev->discovery.uuids) {
5985 err = mgmt_cmd_complete(sk, hdev->id,
5986 MGMT_OP_START_SERVICE_DISCOVERY,
5987 MGMT_STATUS_FAILED,
5988 &cp->type, sizeof(cp->type));
5989 mgmt_pending_remove(cmd);
5990 goto failed;
5991 }
5992 }
5993
5994 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5995 start_discovery_complete);
5996 if (err < 0) {
5997 mgmt_pending_remove(cmd);
5998 goto failed;
5999 }
6000
6001 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6002
6003 failed:
6004 hci_dev_unlock(hdev);
6005 return err;
6006 }
6007
stop_discovery_complete(struct hci_dev * hdev,void * data,int err)6008 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6009 {
6010 struct mgmt_pending_cmd *cmd = data;
6011
6012 if (err == -ECANCELED ||
6013 cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
6014 return;
6015
6016 bt_dev_dbg(hdev, "err %d", err);
6017
6018 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err),
6019 cmd->param, 1);
6020 mgmt_pending_remove(cmd);
6021
6022 if (!err)
6023 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6024 }
6025
stop_discovery_sync(struct hci_dev * hdev,void * data)6026 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6027 {
6028 return hci_stop_discovery_sync(hdev);
6029 }
6030
stop_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6031 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6032 u16 len)
6033 {
6034 struct mgmt_cp_stop_discovery *mgmt_cp = data;
6035 struct mgmt_pending_cmd *cmd;
6036 int err;
6037
6038 bt_dev_dbg(hdev, "sock %p", sk);
6039
6040 hci_dev_lock(hdev);
6041
6042 if (!hci_discovery_active(hdev)) {
6043 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6044 MGMT_STATUS_REJECTED, &mgmt_cp->type,
6045 sizeof(mgmt_cp->type));
6046 goto unlock;
6047 }
6048
6049 if (hdev->discovery.type != mgmt_cp->type) {
6050 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6051 MGMT_STATUS_INVALID_PARAMS,
6052 &mgmt_cp->type, sizeof(mgmt_cp->type));
6053 goto unlock;
6054 }
6055
6056 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6057 if (!cmd) {
6058 err = -ENOMEM;
6059 goto unlock;
6060 }
6061
6062 err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6063 stop_discovery_complete);
6064 if (err < 0) {
6065 mgmt_pending_remove(cmd);
6066 goto unlock;
6067 }
6068
6069 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6070
6071 unlock:
6072 hci_dev_unlock(hdev);
6073 return err;
6074 }
6075
confirm_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6076 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6077 u16 len)
6078 {
6079 struct mgmt_cp_confirm_name *cp = data;
6080 struct inquiry_entry *e;
6081 int err;
6082
6083 bt_dev_dbg(hdev, "sock %p", sk);
6084
6085 hci_dev_lock(hdev);
6086
6087 if (!hci_discovery_active(hdev)) {
6088 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6089 MGMT_STATUS_FAILED, &cp->addr,
6090 sizeof(cp->addr));
6091 goto failed;
6092 }
6093
6094 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6095 if (!e) {
6096 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6097 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6098 sizeof(cp->addr));
6099 goto failed;
6100 }
6101
6102 if (cp->name_known) {
6103 e->name_state = NAME_KNOWN;
6104 list_del(&e->list);
6105 } else {
6106 e->name_state = NAME_NEEDED;
6107 hci_inquiry_cache_update_resolve(hdev, e);
6108 }
6109
6110 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6111 &cp->addr, sizeof(cp->addr));
6112
6113 failed:
6114 hci_dev_unlock(hdev);
6115 return err;
6116 }
6117
block_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6118 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6119 u16 len)
6120 {
6121 struct mgmt_cp_block_device *cp = data;
6122 u8 status;
6123 int err;
6124
6125 bt_dev_dbg(hdev, "sock %p", sk);
6126
6127 if (!bdaddr_type_is_valid(cp->addr.type))
6128 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6129 MGMT_STATUS_INVALID_PARAMS,
6130 &cp->addr, sizeof(cp->addr));
6131
6132 hci_dev_lock(hdev);
6133
6134 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6135 cp->addr.type);
6136 if (err < 0) {
6137 status = MGMT_STATUS_FAILED;
6138 goto done;
6139 }
6140
6141 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6142 sk);
6143 status = MGMT_STATUS_SUCCESS;
6144
6145 done:
6146 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6147 &cp->addr, sizeof(cp->addr));
6148
6149 hci_dev_unlock(hdev);
6150
6151 return err;
6152 }
6153
unblock_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6154 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6155 u16 len)
6156 {
6157 struct mgmt_cp_unblock_device *cp = data;
6158 u8 status;
6159 int err;
6160
6161 bt_dev_dbg(hdev, "sock %p", sk);
6162
6163 if (!bdaddr_type_is_valid(cp->addr.type))
6164 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6165 MGMT_STATUS_INVALID_PARAMS,
6166 &cp->addr, sizeof(cp->addr));
6167
6168 hci_dev_lock(hdev);
6169
6170 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6171 cp->addr.type);
6172 if (err < 0) {
6173 status = MGMT_STATUS_INVALID_PARAMS;
6174 goto done;
6175 }
6176
6177 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6178 sk);
6179 status = MGMT_STATUS_SUCCESS;
6180
6181 done:
6182 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6183 &cp->addr, sizeof(cp->addr));
6184
6185 hci_dev_unlock(hdev);
6186
6187 return err;
6188 }
6189
set_device_id_sync(struct hci_dev * hdev,void * data)6190 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6191 {
6192 return hci_update_eir_sync(hdev);
6193 }
6194
set_device_id(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6195 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6196 u16 len)
6197 {
6198 struct mgmt_cp_set_device_id *cp = data;
6199 int err;
6200 __u16 source;
6201
6202 bt_dev_dbg(hdev, "sock %p", sk);
6203
6204 source = __le16_to_cpu(cp->source);
6205
6206 if (source > 0x0002)
6207 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6208 MGMT_STATUS_INVALID_PARAMS);
6209
6210 hci_dev_lock(hdev);
6211
6212 hdev->devid_source = source;
6213 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6214 hdev->devid_product = __le16_to_cpu(cp->product);
6215 hdev->devid_version = __le16_to_cpu(cp->version);
6216
6217 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6218 NULL, 0);
6219
6220 hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6221
6222 hci_dev_unlock(hdev);
6223
6224 return err;
6225 }
6226
enable_advertising_instance(struct hci_dev * hdev,int err)6227 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6228 {
6229 if (err)
6230 bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6231 else
6232 bt_dev_dbg(hdev, "status %d", err);
6233 }
6234
set_advertising_complete(struct hci_dev * hdev,void * data,int err)6235 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6236 {
6237 struct cmd_lookup match = { NULL, hdev };
6238 u8 instance;
6239 struct adv_info *adv_instance;
6240 u8 status = mgmt_status(err);
6241
6242 if (status) {
6243 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, true,
6244 cmd_status_rsp, &status);
6245 return;
6246 }
6247
6248 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6249 hci_dev_set_flag(hdev, HCI_ADVERTISING);
6250 else
6251 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6252
6253 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, true, settings_rsp,
6254 &match);
6255
6256 new_settings(hdev, match.sk);
6257
6258 if (match.sk)
6259 sock_put(match.sk);
6260
6261 /* If "Set Advertising" was just disabled and instance advertising was
6262 * set up earlier, then re-enable multi-instance advertising.
6263 */
6264 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6265 list_empty(&hdev->adv_instances))
6266 return;
6267
6268 instance = hdev->cur_adv_instance;
6269 if (!instance) {
6270 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6271 struct adv_info, list);
6272 if (!adv_instance)
6273 return;
6274
6275 instance = adv_instance->instance;
6276 }
6277
6278 err = hci_schedule_adv_instance_sync(hdev, instance, true);
6279
6280 enable_advertising_instance(hdev, err);
6281 }
6282
set_adv_sync(struct hci_dev * hdev,void * data)6283 static int set_adv_sync(struct hci_dev *hdev, void *data)
6284 {
6285 struct mgmt_pending_cmd *cmd = data;
6286 struct mgmt_mode *cp = cmd->param;
6287 u8 val = !!cp->val;
6288
6289 if (cp->val == 0x02)
6290 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6291 else
6292 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6293
6294 cancel_adv_timeout(hdev);
6295
6296 if (val) {
6297 /* Switch to instance "0" for the Set Advertising setting.
6298 * We cannot use update_[adv|scan_rsp]_data() here as the
6299 * HCI_ADVERTISING flag is not yet set.
6300 */
6301 hdev->cur_adv_instance = 0x00;
6302
6303 if (ext_adv_capable(hdev)) {
6304 hci_start_ext_adv_sync(hdev, 0x00);
6305 } else {
6306 hci_update_adv_data_sync(hdev, 0x00);
6307 hci_update_scan_rsp_data_sync(hdev, 0x00);
6308 hci_enable_advertising_sync(hdev);
6309 }
6310 } else {
6311 hci_disable_advertising_sync(hdev);
6312 }
6313
6314 return 0;
6315 }
6316
set_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6317 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6318 u16 len)
6319 {
6320 struct mgmt_mode *cp = data;
6321 struct mgmt_pending_cmd *cmd;
6322 u8 val, status;
6323 int err;
6324
6325 bt_dev_dbg(hdev, "sock %p", sk);
6326
6327 status = mgmt_le_support(hdev);
6328 if (status)
6329 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6330 status);
6331
6332 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6333 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6334 MGMT_STATUS_INVALID_PARAMS);
6335
6336 if (hdev->advertising_paused)
6337 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6338 MGMT_STATUS_BUSY);
6339
6340 hci_dev_lock(hdev);
6341
6342 val = !!cp->val;
6343
6344 /* The following conditions are ones which mean that we should
6345 * not do any HCI communication but directly send a mgmt
6346 * response to user space (after toggling the flag if
6347 * necessary).
6348 */
6349 if (!hdev_is_powered(hdev) ||
6350 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6351 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6352 hci_dev_test_flag(hdev, HCI_MESH) ||
6353 hci_conn_num(hdev, LE_LINK) > 0 ||
6354 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6355 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6356 bool changed;
6357
6358 if (cp->val) {
6359 hdev->cur_adv_instance = 0x00;
6360 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6361 if (cp->val == 0x02)
6362 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6363 else
6364 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6365 } else {
6366 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6367 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6368 }
6369
6370 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6371 if (err < 0)
6372 goto unlock;
6373
6374 if (changed)
6375 err = new_settings(hdev, sk);
6376
6377 goto unlock;
6378 }
6379
6380 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6381 pending_find(MGMT_OP_SET_LE, hdev)) {
6382 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6383 MGMT_STATUS_BUSY);
6384 goto unlock;
6385 }
6386
6387 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6388 if (!cmd)
6389 err = -ENOMEM;
6390 else
6391 err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6392 set_advertising_complete);
6393
6394 if (err < 0 && cmd)
6395 mgmt_pending_remove(cmd);
6396
6397 unlock:
6398 hci_dev_unlock(hdev);
6399 return err;
6400 }
6401
set_static_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6402 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6403 void *data, u16 len)
6404 {
6405 struct mgmt_cp_set_static_address *cp = data;
6406 int err;
6407
6408 bt_dev_dbg(hdev, "sock %p", sk);
6409
6410 if (!lmp_le_capable(hdev))
6411 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6412 MGMT_STATUS_NOT_SUPPORTED);
6413
6414 if (hdev_is_powered(hdev))
6415 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6416 MGMT_STATUS_REJECTED);
6417
6418 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6419 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6420 return mgmt_cmd_status(sk, hdev->id,
6421 MGMT_OP_SET_STATIC_ADDRESS,
6422 MGMT_STATUS_INVALID_PARAMS);
6423
6424 /* Two most significant bits shall be set */
6425 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6426 return mgmt_cmd_status(sk, hdev->id,
6427 MGMT_OP_SET_STATIC_ADDRESS,
6428 MGMT_STATUS_INVALID_PARAMS);
6429 }
6430
6431 hci_dev_lock(hdev);
6432
6433 bacpy(&hdev->static_addr, &cp->bdaddr);
6434
6435 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6436 if (err < 0)
6437 goto unlock;
6438
6439 err = new_settings(hdev, sk);
6440
6441 unlock:
6442 hci_dev_unlock(hdev);
6443 return err;
6444 }
6445
set_scan_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6446 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6447 void *data, u16 len)
6448 {
6449 struct mgmt_cp_set_scan_params *cp = data;
6450 __u16 interval, window;
6451 int err;
6452
6453 bt_dev_dbg(hdev, "sock %p", sk);
6454
6455 if (!lmp_le_capable(hdev))
6456 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6457 MGMT_STATUS_NOT_SUPPORTED);
6458
6459 /* Keep allowed ranges in sync with set_mesh() */
6460 interval = __le16_to_cpu(cp->interval);
6461
6462 if (interval < 0x0004 || interval > 0x4000)
6463 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6464 MGMT_STATUS_INVALID_PARAMS);
6465
6466 window = __le16_to_cpu(cp->window);
6467
6468 if (window < 0x0004 || window > 0x4000)
6469 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6470 MGMT_STATUS_INVALID_PARAMS);
6471
6472 if (window > interval)
6473 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6474 MGMT_STATUS_INVALID_PARAMS);
6475
6476 hci_dev_lock(hdev);
6477
6478 hdev->le_scan_interval = interval;
6479 hdev->le_scan_window = window;
6480
6481 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6482 NULL, 0);
6483
6484 /* If background scan is running, restart it so new parameters are
6485 * loaded.
6486 */
6487 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6488 hdev->discovery.state == DISCOVERY_STOPPED)
6489 hci_update_passive_scan(hdev);
6490
6491 hci_dev_unlock(hdev);
6492
6493 return err;
6494 }
6495
fast_connectable_complete(struct hci_dev * hdev,void * data,int err)6496 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6497 {
6498 struct mgmt_pending_cmd *cmd = data;
6499
6500 bt_dev_dbg(hdev, "err %d", err);
6501
6502 if (err) {
6503 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6504 mgmt_status(err));
6505 } else {
6506 struct mgmt_mode *cp = cmd->param;
6507
6508 if (cp->val)
6509 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6510 else
6511 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6512
6513 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6514 new_settings(hdev, cmd->sk);
6515 }
6516
6517 mgmt_pending_free(cmd);
6518 }
6519
write_fast_connectable_sync(struct hci_dev * hdev,void * data)6520 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6521 {
6522 struct mgmt_pending_cmd *cmd = data;
6523 struct mgmt_mode *cp = cmd->param;
6524
6525 return hci_write_fast_connectable_sync(hdev, cp->val);
6526 }
6527
set_fast_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6528 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6529 void *data, u16 len)
6530 {
6531 struct mgmt_mode *cp = data;
6532 struct mgmt_pending_cmd *cmd;
6533 int err;
6534
6535 bt_dev_dbg(hdev, "sock %p", sk);
6536
6537 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6538 hdev->hci_ver < BLUETOOTH_VER_1_2)
6539 return mgmt_cmd_status(sk, hdev->id,
6540 MGMT_OP_SET_FAST_CONNECTABLE,
6541 MGMT_STATUS_NOT_SUPPORTED);
6542
6543 if (cp->val != 0x00 && cp->val != 0x01)
6544 return mgmt_cmd_status(sk, hdev->id,
6545 MGMT_OP_SET_FAST_CONNECTABLE,
6546 MGMT_STATUS_INVALID_PARAMS);
6547
6548 hci_dev_lock(hdev);
6549
6550 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6551 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6552 goto unlock;
6553 }
6554
6555 if (!hdev_is_powered(hdev)) {
6556 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6557 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6558 new_settings(hdev, sk);
6559 goto unlock;
6560 }
6561
6562 cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6563 len);
6564 if (!cmd)
6565 err = -ENOMEM;
6566 else
6567 err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6568 fast_connectable_complete);
6569
6570 if (err < 0) {
6571 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6572 MGMT_STATUS_FAILED);
6573
6574 if (cmd)
6575 mgmt_pending_free(cmd);
6576 }
6577
6578 unlock:
6579 hci_dev_unlock(hdev);
6580
6581 return err;
6582 }
6583
set_bredr_complete(struct hci_dev * hdev,void * data,int err)6584 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6585 {
6586 struct mgmt_pending_cmd *cmd = data;
6587
6588 bt_dev_dbg(hdev, "err %d", err);
6589
6590 if (err) {
6591 u8 mgmt_err = mgmt_status(err);
6592
6593 /* We need to restore the flag if related HCI commands
6594 * failed.
6595 */
6596 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6597
6598 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
6599 } else {
6600 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6601 new_settings(hdev, cmd->sk);
6602 }
6603
6604 mgmt_pending_free(cmd);
6605 }
6606
set_bredr_sync(struct hci_dev * hdev,void * data)6607 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6608 {
6609 int status;
6610
6611 status = hci_write_fast_connectable_sync(hdev, false);
6612
6613 if (!status)
6614 status = hci_update_scan_sync(hdev);
6615
6616 /* Since only the advertising data flags will change, there
6617 * is no need to update the scan response data.
6618 */
6619 if (!status)
6620 status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6621
6622 return status;
6623 }
6624
set_bredr(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6625 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6626 {
6627 struct mgmt_mode *cp = data;
6628 struct mgmt_pending_cmd *cmd;
6629 int err;
6630
6631 bt_dev_dbg(hdev, "sock %p", sk);
6632
6633 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6634 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6635 MGMT_STATUS_NOT_SUPPORTED);
6636
6637 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6638 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6639 MGMT_STATUS_REJECTED);
6640
6641 if (cp->val != 0x00 && cp->val != 0x01)
6642 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6643 MGMT_STATUS_INVALID_PARAMS);
6644
6645 hci_dev_lock(hdev);
6646
6647 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6648 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6649 goto unlock;
6650 }
6651
6652 if (!hdev_is_powered(hdev)) {
6653 if (!cp->val) {
6654 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6655 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6656 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6657 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6658 }
6659
6660 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6661
6662 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6663 if (err < 0)
6664 goto unlock;
6665
6666 err = new_settings(hdev, sk);
6667 goto unlock;
6668 }
6669
6670 /* Reject disabling when powered on */
6671 if (!cp->val) {
6672 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6673 MGMT_STATUS_REJECTED);
6674 goto unlock;
6675 } else {
6676 /* When configuring a dual-mode controller to operate
6677 * with LE only and using a static address, then switching
6678 * BR/EDR back on is not allowed.
6679 *
6680 * Dual-mode controllers shall operate with the public
6681 * address as its identity address for BR/EDR and LE. So
6682 * reject the attempt to create an invalid configuration.
6683 *
6684 * The same restrictions applies when secure connections
6685 * has been enabled. For BR/EDR this is a controller feature
6686 * while for LE it is a host stack feature. This means that
6687 * switching BR/EDR back on when secure connections has been
6688 * enabled is not a supported transaction.
6689 */
6690 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6691 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6692 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6693 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6694 MGMT_STATUS_REJECTED);
6695 goto unlock;
6696 }
6697 }
6698
6699 cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6700 if (!cmd)
6701 err = -ENOMEM;
6702 else
6703 err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6704 set_bredr_complete);
6705
6706 if (err < 0) {
6707 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6708 MGMT_STATUS_FAILED);
6709 if (cmd)
6710 mgmt_pending_free(cmd);
6711
6712 goto unlock;
6713 }
6714
6715 /* We need to flip the bit already here so that
6716 * hci_req_update_adv_data generates the correct flags.
6717 */
6718 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6719
6720 unlock:
6721 hci_dev_unlock(hdev);
6722 return err;
6723 }
6724
set_secure_conn_complete(struct hci_dev * hdev,void * data,int err)6725 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6726 {
6727 struct mgmt_pending_cmd *cmd = data;
6728 struct mgmt_mode *cp;
6729
6730 bt_dev_dbg(hdev, "err %d", err);
6731
6732 if (err) {
6733 u8 mgmt_err = mgmt_status(err);
6734
6735 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
6736 goto done;
6737 }
6738
6739 cp = cmd->param;
6740
6741 switch (cp->val) {
6742 case 0x00:
6743 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6744 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6745 break;
6746 case 0x01:
6747 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6748 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6749 break;
6750 case 0x02:
6751 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6752 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6753 break;
6754 }
6755
6756 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6757 new_settings(hdev, cmd->sk);
6758
6759 done:
6760 mgmt_pending_free(cmd);
6761 }
6762
set_secure_conn_sync(struct hci_dev * hdev,void * data)6763 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6764 {
6765 struct mgmt_pending_cmd *cmd = data;
6766 struct mgmt_mode *cp = cmd->param;
6767 u8 val = !!cp->val;
6768
6769 /* Force write of val */
6770 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6771
6772 return hci_write_sc_support_sync(hdev, val);
6773 }
6774
set_secure_conn(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6775 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6776 void *data, u16 len)
6777 {
6778 struct mgmt_mode *cp = data;
6779 struct mgmt_pending_cmd *cmd;
6780 u8 val;
6781 int err;
6782
6783 bt_dev_dbg(hdev, "sock %p", sk);
6784
6785 if (!lmp_sc_capable(hdev) &&
6786 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6787 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6788 MGMT_STATUS_NOT_SUPPORTED);
6789
6790 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6791 lmp_sc_capable(hdev) &&
6792 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6793 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6794 MGMT_STATUS_REJECTED);
6795
6796 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6797 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6798 MGMT_STATUS_INVALID_PARAMS);
6799
6800 hci_dev_lock(hdev);
6801
6802 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6803 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6804 bool changed;
6805
6806 if (cp->val) {
6807 changed = !hci_dev_test_and_set_flag(hdev,
6808 HCI_SC_ENABLED);
6809 if (cp->val == 0x02)
6810 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6811 else
6812 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6813 } else {
6814 changed = hci_dev_test_and_clear_flag(hdev,
6815 HCI_SC_ENABLED);
6816 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6817 }
6818
6819 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6820 if (err < 0)
6821 goto failed;
6822
6823 if (changed)
6824 err = new_settings(hdev, sk);
6825
6826 goto failed;
6827 }
6828
6829 val = !!cp->val;
6830
6831 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6832 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6833 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6834 goto failed;
6835 }
6836
6837 cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6838 if (!cmd)
6839 err = -ENOMEM;
6840 else
6841 err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6842 set_secure_conn_complete);
6843
6844 if (err < 0) {
6845 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6846 MGMT_STATUS_FAILED);
6847 if (cmd)
6848 mgmt_pending_free(cmd);
6849 }
6850
6851 failed:
6852 hci_dev_unlock(hdev);
6853 return err;
6854 }
6855
set_debug_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6856 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6857 void *data, u16 len)
6858 {
6859 struct mgmt_mode *cp = data;
6860 bool changed, use_changed;
6861 int err;
6862
6863 bt_dev_dbg(hdev, "sock %p", sk);
6864
6865 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6866 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6867 MGMT_STATUS_INVALID_PARAMS);
6868
6869 hci_dev_lock(hdev);
6870
6871 if (cp->val)
6872 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6873 else
6874 changed = hci_dev_test_and_clear_flag(hdev,
6875 HCI_KEEP_DEBUG_KEYS);
6876
6877 if (cp->val == 0x02)
6878 use_changed = !hci_dev_test_and_set_flag(hdev,
6879 HCI_USE_DEBUG_KEYS);
6880 else
6881 use_changed = hci_dev_test_and_clear_flag(hdev,
6882 HCI_USE_DEBUG_KEYS);
6883
6884 if (hdev_is_powered(hdev) && use_changed &&
6885 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6886 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6887 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6888 sizeof(mode), &mode);
6889 }
6890
6891 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6892 if (err < 0)
6893 goto unlock;
6894
6895 if (changed)
6896 err = new_settings(hdev, sk);
6897
6898 unlock:
6899 hci_dev_unlock(hdev);
6900 return err;
6901 }
6902
set_privacy(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)6903 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6904 u16 len)
6905 {
6906 struct mgmt_cp_set_privacy *cp = cp_data;
6907 bool changed;
6908 int err;
6909
6910 bt_dev_dbg(hdev, "sock %p", sk);
6911
6912 if (!lmp_le_capable(hdev))
6913 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6914 MGMT_STATUS_NOT_SUPPORTED);
6915
6916 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6917 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6918 MGMT_STATUS_INVALID_PARAMS);
6919
6920 if (hdev_is_powered(hdev))
6921 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6922 MGMT_STATUS_REJECTED);
6923
6924 hci_dev_lock(hdev);
6925
6926 /* If user space supports this command it is also expected to
6927 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
6928 */
6929 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6930
6931 if (cp->privacy) {
6932 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
6933 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
6934 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
6935 hci_adv_instances_set_rpa_expired(hdev, true);
6936 if (cp->privacy == 0x02)
6937 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
6938 else
6939 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6940 } else {
6941 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
6942 memset(hdev->irk, 0, sizeof(hdev->irk));
6943 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
6944 hci_adv_instances_set_rpa_expired(hdev, false);
6945 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6946 }
6947
6948 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
6949 if (err < 0)
6950 goto unlock;
6951
6952 if (changed)
6953 err = new_settings(hdev, sk);
6954
6955 unlock:
6956 hci_dev_unlock(hdev);
6957 return err;
6958 }
6959
irk_is_valid(struct mgmt_irk_info * irk)6960 static bool irk_is_valid(struct mgmt_irk_info *irk)
6961 {
6962 switch (irk->addr.type) {
6963 case BDADDR_LE_PUBLIC:
6964 return true;
6965
6966 case BDADDR_LE_RANDOM:
6967 /* Two most significant bits shall be set */
6968 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6969 return false;
6970 return true;
6971 }
6972
6973 return false;
6974 }
6975
load_irks(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)6976 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6977 u16 len)
6978 {
6979 struct mgmt_cp_load_irks *cp = cp_data;
6980 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
6981 sizeof(struct mgmt_irk_info));
6982 u16 irk_count, expected_len;
6983 int i, err;
6984
6985 bt_dev_dbg(hdev, "sock %p", sk);
6986
6987 if (!lmp_le_capable(hdev))
6988 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6989 MGMT_STATUS_NOT_SUPPORTED);
6990
6991 irk_count = __le16_to_cpu(cp->irk_count);
6992 if (irk_count > max_irk_count) {
6993 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
6994 irk_count);
6995 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6996 MGMT_STATUS_INVALID_PARAMS);
6997 }
6998
6999 expected_len = struct_size(cp, irks, irk_count);
7000 if (expected_len != len) {
7001 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7002 expected_len, len);
7003 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7004 MGMT_STATUS_INVALID_PARAMS);
7005 }
7006
7007 bt_dev_dbg(hdev, "irk_count %u", irk_count);
7008
7009 for (i = 0; i < irk_count; i++) {
7010 struct mgmt_irk_info *key = &cp->irks[i];
7011
7012 if (!irk_is_valid(key))
7013 return mgmt_cmd_status(sk, hdev->id,
7014 MGMT_OP_LOAD_IRKS,
7015 MGMT_STATUS_INVALID_PARAMS);
7016 }
7017
7018 hci_dev_lock(hdev);
7019
7020 hci_smp_irks_clear(hdev);
7021
7022 for (i = 0; i < irk_count; i++) {
7023 struct mgmt_irk_info *irk = &cp->irks[i];
7024
7025 if (hci_is_blocked_key(hdev,
7026 HCI_BLOCKED_KEY_TYPE_IRK,
7027 irk->val)) {
7028 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7029 &irk->addr.bdaddr);
7030 continue;
7031 }
7032
7033 hci_add_irk(hdev, &irk->addr.bdaddr,
7034 le_addr_type(irk->addr.type), irk->val,
7035 BDADDR_ANY);
7036 }
7037
7038 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7039
7040 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7041
7042 hci_dev_unlock(hdev);
7043
7044 return err;
7045 }
7046
ltk_is_valid(struct mgmt_ltk_info * key)7047 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7048 {
7049 if (key->initiator != 0x00 && key->initiator != 0x01)
7050 return false;
7051
7052 switch (key->addr.type) {
7053 case BDADDR_LE_PUBLIC:
7054 return true;
7055
7056 case BDADDR_LE_RANDOM:
7057 /* Two most significant bits shall be set */
7058 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7059 return false;
7060 return true;
7061 }
7062
7063 return false;
7064 }
7065
load_long_term_keys(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)7066 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7067 void *cp_data, u16 len)
7068 {
7069 struct mgmt_cp_load_long_term_keys *cp = cp_data;
7070 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7071 sizeof(struct mgmt_ltk_info));
7072 u16 key_count, expected_len;
7073 int i, err;
7074
7075 bt_dev_dbg(hdev, "sock %p", sk);
7076
7077 if (!lmp_le_capable(hdev))
7078 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7079 MGMT_STATUS_NOT_SUPPORTED);
7080
7081 key_count = __le16_to_cpu(cp->key_count);
7082 if (key_count > max_key_count) {
7083 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7084 key_count);
7085 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7086 MGMT_STATUS_INVALID_PARAMS);
7087 }
7088
7089 expected_len = struct_size(cp, keys, key_count);
7090 if (expected_len != len) {
7091 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7092 expected_len, len);
7093 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7094 MGMT_STATUS_INVALID_PARAMS);
7095 }
7096
7097 bt_dev_dbg(hdev, "key_count %u", key_count);
7098
7099 hci_dev_lock(hdev);
7100
7101 hci_smp_ltks_clear(hdev);
7102
7103 for (i = 0; i < key_count; i++) {
7104 struct mgmt_ltk_info *key = &cp->keys[i];
7105 u8 type, authenticated;
7106
7107 if (hci_is_blocked_key(hdev,
7108 HCI_BLOCKED_KEY_TYPE_LTK,
7109 key->val)) {
7110 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7111 &key->addr.bdaddr);
7112 continue;
7113 }
7114
7115 if (!ltk_is_valid(key)) {
7116 bt_dev_warn(hdev, "Invalid LTK for %pMR",
7117 &key->addr.bdaddr);
7118 continue;
7119 }
7120
7121 switch (key->type) {
7122 case MGMT_LTK_UNAUTHENTICATED:
7123 authenticated = 0x00;
7124 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7125 break;
7126 case MGMT_LTK_AUTHENTICATED:
7127 authenticated = 0x01;
7128 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7129 break;
7130 case MGMT_LTK_P256_UNAUTH:
7131 authenticated = 0x00;
7132 type = SMP_LTK_P256;
7133 break;
7134 case MGMT_LTK_P256_AUTH:
7135 authenticated = 0x01;
7136 type = SMP_LTK_P256;
7137 break;
7138 case MGMT_LTK_P256_DEBUG:
7139 authenticated = 0x00;
7140 type = SMP_LTK_P256_DEBUG;
7141 fallthrough;
7142 default:
7143 continue;
7144 }
7145
7146 hci_add_ltk(hdev, &key->addr.bdaddr,
7147 le_addr_type(key->addr.type), type, authenticated,
7148 key->val, key->enc_size, key->ediv, key->rand);
7149 }
7150
7151 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7152 NULL, 0);
7153
7154 hci_dev_unlock(hdev);
7155
7156 return err;
7157 }
7158
get_conn_info_complete(struct hci_dev * hdev,void * data,int err)7159 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
7160 {
7161 struct mgmt_pending_cmd *cmd = data;
7162 struct hci_conn *conn = cmd->user_data;
7163 struct mgmt_cp_get_conn_info *cp = cmd->param;
7164 struct mgmt_rp_get_conn_info rp;
7165 u8 status;
7166
7167 bt_dev_dbg(hdev, "err %d", err);
7168
7169 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
7170
7171 status = mgmt_status(err);
7172 if (status == MGMT_STATUS_SUCCESS) {
7173 rp.rssi = conn->rssi;
7174 rp.tx_power = conn->tx_power;
7175 rp.max_tx_power = conn->max_tx_power;
7176 } else {
7177 rp.rssi = HCI_RSSI_INVALID;
7178 rp.tx_power = HCI_TX_POWER_INVALID;
7179 rp.max_tx_power = HCI_TX_POWER_INVALID;
7180 }
7181
7182 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, MGMT_OP_GET_CONN_INFO, status,
7183 &rp, sizeof(rp));
7184
7185 mgmt_pending_free(cmd);
7186 }
7187
get_conn_info_sync(struct hci_dev * hdev,void * data)7188 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
7189 {
7190 struct mgmt_pending_cmd *cmd = data;
7191 struct mgmt_cp_get_conn_info *cp = cmd->param;
7192 struct hci_conn *conn;
7193 int err;
7194 __le16 handle;
7195
7196 /* Make sure we are still connected */
7197 if (cp->addr.type == BDADDR_BREDR)
7198 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7199 &cp->addr.bdaddr);
7200 else
7201 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7202
7203 if (!conn || conn->state != BT_CONNECTED)
7204 return MGMT_STATUS_NOT_CONNECTED;
7205
7206 cmd->user_data = conn;
7207 handle = cpu_to_le16(conn->handle);
7208
7209 /* Refresh RSSI each time */
7210 err = hci_read_rssi_sync(hdev, handle);
7211
7212 /* For LE links TX power does not change thus we don't need to
7213 * query for it once value is known.
7214 */
7215 if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
7216 conn->tx_power == HCI_TX_POWER_INVALID))
7217 err = hci_read_tx_power_sync(hdev, handle, 0x00);
7218
7219 /* Max TX power needs to be read only once per connection */
7220 if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
7221 err = hci_read_tx_power_sync(hdev, handle, 0x01);
7222
7223 return err;
7224 }
7225
get_conn_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7226 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7227 u16 len)
7228 {
7229 struct mgmt_cp_get_conn_info *cp = data;
7230 struct mgmt_rp_get_conn_info rp;
7231 struct hci_conn *conn;
7232 unsigned long conn_info_age;
7233 int err = 0;
7234
7235 bt_dev_dbg(hdev, "sock %p", sk);
7236
7237 memset(&rp, 0, sizeof(rp));
7238 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7239 rp.addr.type = cp->addr.type;
7240
7241 if (!bdaddr_type_is_valid(cp->addr.type))
7242 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7243 MGMT_STATUS_INVALID_PARAMS,
7244 &rp, sizeof(rp));
7245
7246 hci_dev_lock(hdev);
7247
7248 if (!hdev_is_powered(hdev)) {
7249 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7250 MGMT_STATUS_NOT_POWERED, &rp,
7251 sizeof(rp));
7252 goto unlock;
7253 }
7254
7255 if (cp->addr.type == BDADDR_BREDR)
7256 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7257 &cp->addr.bdaddr);
7258 else
7259 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7260
7261 if (!conn || conn->state != BT_CONNECTED) {
7262 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7263 MGMT_STATUS_NOT_CONNECTED, &rp,
7264 sizeof(rp));
7265 goto unlock;
7266 }
7267
7268 /* To avoid client trying to guess when to poll again for information we
7269 * calculate conn info age as random value between min/max set in hdev.
7270 */
7271 conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age,
7272 hdev->conn_info_max_age - 1);
7273
7274 /* Query controller to refresh cached values if they are too old or were
7275 * never read.
7276 */
7277 if (time_after(jiffies, conn->conn_info_timestamp +
7278 msecs_to_jiffies(conn_info_age)) ||
7279 !conn->conn_info_timestamp) {
7280 struct mgmt_pending_cmd *cmd;
7281
7282 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
7283 len);
7284 if (!cmd) {
7285 err = -ENOMEM;
7286 } else {
7287 err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
7288 cmd, get_conn_info_complete);
7289 }
7290
7291 if (err < 0) {
7292 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7293 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7294
7295 if (cmd)
7296 mgmt_pending_free(cmd);
7297
7298 goto unlock;
7299 }
7300
7301 conn->conn_info_timestamp = jiffies;
7302 } else {
7303 /* Cache is valid, just reply with values cached in hci_conn */
7304 rp.rssi = conn->rssi;
7305 rp.tx_power = conn->tx_power;
7306 rp.max_tx_power = conn->max_tx_power;
7307
7308 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7309 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7310 }
7311
7312 unlock:
7313 hci_dev_unlock(hdev);
7314 return err;
7315 }
7316
get_clock_info_complete(struct hci_dev * hdev,void * data,int err)7317 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
7318 {
7319 struct mgmt_pending_cmd *cmd = data;
7320 struct mgmt_cp_get_clock_info *cp = cmd->param;
7321 struct mgmt_rp_get_clock_info rp;
7322 struct hci_conn *conn = cmd->user_data;
7323 u8 status = mgmt_status(err);
7324
7325 bt_dev_dbg(hdev, "err %d", err);
7326
7327 memset(&rp, 0, sizeof(rp));
7328 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7329 rp.addr.type = cp->addr.type;
7330
7331 if (err)
7332 goto complete;
7333
7334 rp.local_clock = cpu_to_le32(hdev->clock);
7335
7336 if (conn) {
7337 rp.piconet_clock = cpu_to_le32(conn->clock);
7338 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7339 }
7340
7341 complete:
7342 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, status, &rp,
7343 sizeof(rp));
7344
7345 mgmt_pending_free(cmd);
7346 }
7347
get_clock_info_sync(struct hci_dev * hdev,void * data)7348 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
7349 {
7350 struct mgmt_pending_cmd *cmd = data;
7351 struct mgmt_cp_get_clock_info *cp = cmd->param;
7352 struct hci_cp_read_clock hci_cp;
7353 struct hci_conn *conn;
7354
7355 memset(&hci_cp, 0, sizeof(hci_cp));
7356 hci_read_clock_sync(hdev, &hci_cp);
7357
7358 /* Make sure connection still exists */
7359 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
7360 if (!conn || conn->state != BT_CONNECTED)
7361 return MGMT_STATUS_NOT_CONNECTED;
7362
7363 cmd->user_data = conn;
7364 hci_cp.handle = cpu_to_le16(conn->handle);
7365 hci_cp.which = 0x01; /* Piconet clock */
7366
7367 return hci_read_clock_sync(hdev, &hci_cp);
7368 }
7369
get_clock_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7370 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7371 u16 len)
7372 {
7373 struct mgmt_cp_get_clock_info *cp = data;
7374 struct mgmt_rp_get_clock_info rp;
7375 struct mgmt_pending_cmd *cmd;
7376 struct hci_conn *conn;
7377 int err;
7378
7379 bt_dev_dbg(hdev, "sock %p", sk);
7380
7381 memset(&rp, 0, sizeof(rp));
7382 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7383 rp.addr.type = cp->addr.type;
7384
7385 if (cp->addr.type != BDADDR_BREDR)
7386 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7387 MGMT_STATUS_INVALID_PARAMS,
7388 &rp, sizeof(rp));
7389
7390 hci_dev_lock(hdev);
7391
7392 if (!hdev_is_powered(hdev)) {
7393 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7394 MGMT_STATUS_NOT_POWERED, &rp,
7395 sizeof(rp));
7396 goto unlock;
7397 }
7398
7399 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7400 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7401 &cp->addr.bdaddr);
7402 if (!conn || conn->state != BT_CONNECTED) {
7403 err = mgmt_cmd_complete(sk, hdev->id,
7404 MGMT_OP_GET_CLOCK_INFO,
7405 MGMT_STATUS_NOT_CONNECTED,
7406 &rp, sizeof(rp));
7407 goto unlock;
7408 }
7409 } else {
7410 conn = NULL;
7411 }
7412
7413 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7414 if (!cmd)
7415 err = -ENOMEM;
7416 else
7417 err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7418 get_clock_info_complete);
7419
7420 if (err < 0) {
7421 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7422 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7423
7424 if (cmd)
7425 mgmt_pending_free(cmd);
7426 }
7427
7428
7429 unlock:
7430 hci_dev_unlock(hdev);
7431 return err;
7432 }
7433
is_connected(struct hci_dev * hdev,bdaddr_t * addr,u8 type)7434 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7435 {
7436 struct hci_conn *conn;
7437
7438 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7439 if (!conn)
7440 return false;
7441
7442 if (conn->dst_type != type)
7443 return false;
7444
7445 if (conn->state != BT_CONNECTED)
7446 return false;
7447
7448 return true;
7449 }
7450
7451 /* This function requires the caller holds hdev->lock */
hci_conn_params_set(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type,u8 auto_connect)7452 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7453 u8 addr_type, u8 auto_connect)
7454 {
7455 struct hci_conn_params *params;
7456
7457 params = hci_conn_params_add(hdev, addr, addr_type);
7458 if (!params)
7459 return -EIO;
7460
7461 if (params->auto_connect == auto_connect)
7462 return 0;
7463
7464 hci_pend_le_list_del_init(params);
7465
7466 switch (auto_connect) {
7467 case HCI_AUTO_CONN_DISABLED:
7468 case HCI_AUTO_CONN_LINK_LOSS:
7469 /* If auto connect is being disabled when we're trying to
7470 * connect to device, keep connecting.
7471 */
7472 if (params->explicit_connect)
7473 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7474 break;
7475 case HCI_AUTO_CONN_REPORT:
7476 if (params->explicit_connect)
7477 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7478 else
7479 hci_pend_le_list_add(params, &hdev->pend_le_reports);
7480 break;
7481 case HCI_AUTO_CONN_DIRECT:
7482 case HCI_AUTO_CONN_ALWAYS:
7483 if (!is_connected(hdev, addr, addr_type))
7484 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7485 break;
7486 }
7487
7488 params->auto_connect = auto_connect;
7489
7490 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7491 addr, addr_type, auto_connect);
7492
7493 return 0;
7494 }
7495
device_added(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type,u8 action)7496 static void device_added(struct sock *sk, struct hci_dev *hdev,
7497 bdaddr_t *bdaddr, u8 type, u8 action)
7498 {
7499 struct mgmt_ev_device_added ev;
7500
7501 bacpy(&ev.addr.bdaddr, bdaddr);
7502 ev.addr.type = type;
7503 ev.action = action;
7504
7505 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7506 }
7507
add_device_complete(struct hci_dev * hdev,void * data,int err)7508 static void add_device_complete(struct hci_dev *hdev, void *data, int err)
7509 {
7510 struct mgmt_pending_cmd *cmd = data;
7511 struct mgmt_cp_add_device *cp = cmd->param;
7512
7513 if (!err) {
7514 struct hci_conn_params *params;
7515
7516 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7517 le_addr_type(cp->addr.type));
7518
7519 device_added(cmd->sk, hdev, &cp->addr.bdaddr, cp->addr.type,
7520 cp->action);
7521 device_flags_changed(NULL, hdev, &cp->addr.bdaddr,
7522 cp->addr.type, hdev->conn_flags,
7523 params ? params->flags : 0);
7524 }
7525
7526 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_ADD_DEVICE,
7527 mgmt_status(err), &cp->addr, sizeof(cp->addr));
7528 mgmt_pending_free(cmd);
7529 }
7530
add_device_sync(struct hci_dev * hdev,void * data)7531 static int add_device_sync(struct hci_dev *hdev, void *data)
7532 {
7533 return hci_update_passive_scan_sync(hdev);
7534 }
7535
add_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7536 static int add_device(struct sock *sk, struct hci_dev *hdev,
7537 void *data, u16 len)
7538 {
7539 struct mgmt_pending_cmd *cmd;
7540 struct mgmt_cp_add_device *cp = data;
7541 u8 auto_conn, addr_type;
7542 struct hci_conn_params *params;
7543 int err;
7544 u32 current_flags = 0;
7545 u32 supported_flags;
7546
7547 bt_dev_dbg(hdev, "sock %p", sk);
7548
7549 if (!bdaddr_type_is_valid(cp->addr.type) ||
7550 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7551 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7552 MGMT_STATUS_INVALID_PARAMS,
7553 &cp->addr, sizeof(cp->addr));
7554
7555 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7556 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7557 MGMT_STATUS_INVALID_PARAMS,
7558 &cp->addr, sizeof(cp->addr));
7559
7560 hci_dev_lock(hdev);
7561
7562 if (cp->addr.type == BDADDR_BREDR) {
7563 /* Only incoming connections action is supported for now */
7564 if (cp->action != 0x01) {
7565 err = mgmt_cmd_complete(sk, hdev->id,
7566 MGMT_OP_ADD_DEVICE,
7567 MGMT_STATUS_INVALID_PARAMS,
7568 &cp->addr, sizeof(cp->addr));
7569 goto unlock;
7570 }
7571
7572 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7573 &cp->addr.bdaddr,
7574 cp->addr.type, 0);
7575 if (err)
7576 goto unlock;
7577
7578 hci_update_scan(hdev);
7579
7580 goto added;
7581 }
7582
7583 addr_type = le_addr_type(cp->addr.type);
7584
7585 if (cp->action == 0x02)
7586 auto_conn = HCI_AUTO_CONN_ALWAYS;
7587 else if (cp->action == 0x01)
7588 auto_conn = HCI_AUTO_CONN_DIRECT;
7589 else
7590 auto_conn = HCI_AUTO_CONN_REPORT;
7591
7592 /* Kernel internally uses conn_params with resolvable private
7593 * address, but Add Device allows only identity addresses.
7594 * Make sure it is enforced before calling
7595 * hci_conn_params_lookup.
7596 */
7597 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7598 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7599 MGMT_STATUS_INVALID_PARAMS,
7600 &cp->addr, sizeof(cp->addr));
7601 goto unlock;
7602 }
7603
7604 /* If the connection parameters don't exist for this device,
7605 * they will be created and configured with defaults.
7606 */
7607 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7608 auto_conn) < 0) {
7609 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7610 MGMT_STATUS_FAILED, &cp->addr,
7611 sizeof(cp->addr));
7612 goto unlock;
7613 } else {
7614 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7615 addr_type);
7616 if (params)
7617 current_flags = params->flags;
7618 }
7619
7620 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
7621 if (!cmd) {
7622 err = -ENOMEM;
7623 goto unlock;
7624 }
7625
7626 err = hci_cmd_sync_queue(hdev, add_device_sync, cmd,
7627 add_device_complete);
7628 if (err < 0) {
7629 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7630 MGMT_STATUS_FAILED, &cp->addr,
7631 sizeof(cp->addr));
7632 mgmt_pending_free(cmd);
7633 }
7634
7635 goto unlock;
7636
7637 added:
7638 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7639 supported_flags = hdev->conn_flags;
7640 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7641 supported_flags, current_flags);
7642
7643 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7644 MGMT_STATUS_SUCCESS, &cp->addr,
7645 sizeof(cp->addr));
7646
7647 unlock:
7648 hci_dev_unlock(hdev);
7649 return err;
7650 }
7651
device_removed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)7652 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7653 bdaddr_t *bdaddr, u8 type)
7654 {
7655 struct mgmt_ev_device_removed ev;
7656
7657 bacpy(&ev.addr.bdaddr, bdaddr);
7658 ev.addr.type = type;
7659
7660 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7661 }
7662
remove_device_sync(struct hci_dev * hdev,void * data)7663 static int remove_device_sync(struct hci_dev *hdev, void *data)
7664 {
7665 return hci_update_passive_scan_sync(hdev);
7666 }
7667
remove_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7668 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7669 void *data, u16 len)
7670 {
7671 struct mgmt_cp_remove_device *cp = data;
7672 int err;
7673
7674 bt_dev_dbg(hdev, "sock %p", sk);
7675
7676 hci_dev_lock(hdev);
7677
7678 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7679 struct hci_conn_params *params;
7680 u8 addr_type;
7681
7682 if (!bdaddr_type_is_valid(cp->addr.type)) {
7683 err = mgmt_cmd_complete(sk, hdev->id,
7684 MGMT_OP_REMOVE_DEVICE,
7685 MGMT_STATUS_INVALID_PARAMS,
7686 &cp->addr, sizeof(cp->addr));
7687 goto unlock;
7688 }
7689
7690 if (cp->addr.type == BDADDR_BREDR) {
7691 err = hci_bdaddr_list_del(&hdev->accept_list,
7692 &cp->addr.bdaddr,
7693 cp->addr.type);
7694 if (err) {
7695 err = mgmt_cmd_complete(sk, hdev->id,
7696 MGMT_OP_REMOVE_DEVICE,
7697 MGMT_STATUS_INVALID_PARAMS,
7698 &cp->addr,
7699 sizeof(cp->addr));
7700 goto unlock;
7701 }
7702
7703 hci_update_scan(hdev);
7704
7705 device_removed(sk, hdev, &cp->addr.bdaddr,
7706 cp->addr.type);
7707 goto complete;
7708 }
7709
7710 addr_type = le_addr_type(cp->addr.type);
7711
7712 /* Kernel internally uses conn_params with resolvable private
7713 * address, but Remove Device allows only identity addresses.
7714 * Make sure it is enforced before calling
7715 * hci_conn_params_lookup.
7716 */
7717 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7718 err = mgmt_cmd_complete(sk, hdev->id,
7719 MGMT_OP_REMOVE_DEVICE,
7720 MGMT_STATUS_INVALID_PARAMS,
7721 &cp->addr, sizeof(cp->addr));
7722 goto unlock;
7723 }
7724
7725 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7726 addr_type);
7727 if (!params) {
7728 err = mgmt_cmd_complete(sk, hdev->id,
7729 MGMT_OP_REMOVE_DEVICE,
7730 MGMT_STATUS_INVALID_PARAMS,
7731 &cp->addr, sizeof(cp->addr));
7732 goto unlock;
7733 }
7734
7735 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7736 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7737 err = mgmt_cmd_complete(sk, hdev->id,
7738 MGMT_OP_REMOVE_DEVICE,
7739 MGMT_STATUS_INVALID_PARAMS,
7740 &cp->addr, sizeof(cp->addr));
7741 goto unlock;
7742 }
7743
7744 hci_conn_params_free(params);
7745
7746 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7747 } else {
7748 struct hci_conn_params *p, *tmp;
7749 struct bdaddr_list *b, *btmp;
7750
7751 if (cp->addr.type) {
7752 err = mgmt_cmd_complete(sk, hdev->id,
7753 MGMT_OP_REMOVE_DEVICE,
7754 MGMT_STATUS_INVALID_PARAMS,
7755 &cp->addr, sizeof(cp->addr));
7756 goto unlock;
7757 }
7758
7759 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7760 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7761 list_del(&b->list);
7762 kfree(b);
7763 }
7764
7765 hci_update_scan(hdev);
7766
7767 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7768 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7769 continue;
7770 device_removed(sk, hdev, &p->addr, p->addr_type);
7771 if (p->explicit_connect) {
7772 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7773 continue;
7774 }
7775 hci_conn_params_free(p);
7776 }
7777
7778 bt_dev_dbg(hdev, "All LE connection parameters were removed");
7779 }
7780
7781 hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7782
7783 complete:
7784 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7785 MGMT_STATUS_SUCCESS, &cp->addr,
7786 sizeof(cp->addr));
7787 unlock:
7788 hci_dev_unlock(hdev);
7789 return err;
7790 }
7791
conn_update_sync(struct hci_dev * hdev,void * data)7792 static int conn_update_sync(struct hci_dev *hdev, void *data)
7793 {
7794 struct hci_conn_params *params = data;
7795 struct hci_conn *conn;
7796
7797 conn = hci_conn_hash_lookup_le(hdev, ¶ms->addr, params->addr_type);
7798 if (!conn)
7799 return -ECANCELED;
7800
7801 return hci_le_conn_update_sync(hdev, conn, params);
7802 }
7803
load_conn_param(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7804 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7805 u16 len)
7806 {
7807 struct mgmt_cp_load_conn_param *cp = data;
7808 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7809 sizeof(struct mgmt_conn_param));
7810 u16 param_count, expected_len;
7811 int i;
7812
7813 if (!lmp_le_capable(hdev))
7814 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7815 MGMT_STATUS_NOT_SUPPORTED);
7816
7817 param_count = __le16_to_cpu(cp->param_count);
7818 if (param_count > max_param_count) {
7819 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7820 param_count);
7821 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7822 MGMT_STATUS_INVALID_PARAMS);
7823 }
7824
7825 expected_len = struct_size(cp, params, param_count);
7826 if (expected_len != len) {
7827 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7828 expected_len, len);
7829 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7830 MGMT_STATUS_INVALID_PARAMS);
7831 }
7832
7833 bt_dev_dbg(hdev, "param_count %u", param_count);
7834
7835 hci_dev_lock(hdev);
7836
7837 if (param_count > 1)
7838 hci_conn_params_clear_disabled(hdev);
7839
7840 for (i = 0; i < param_count; i++) {
7841 struct mgmt_conn_param *param = &cp->params[i];
7842 struct hci_conn_params *hci_param;
7843 u16 min, max, latency, timeout;
7844 bool update = false;
7845 u8 addr_type;
7846
7847 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
7848 param->addr.type);
7849
7850 if (param->addr.type == BDADDR_LE_PUBLIC) {
7851 addr_type = ADDR_LE_DEV_PUBLIC;
7852 } else if (param->addr.type == BDADDR_LE_RANDOM) {
7853 addr_type = ADDR_LE_DEV_RANDOM;
7854 } else {
7855 bt_dev_err(hdev, "ignoring invalid connection parameters");
7856 continue;
7857 }
7858
7859 min = le16_to_cpu(param->min_interval);
7860 max = le16_to_cpu(param->max_interval);
7861 latency = le16_to_cpu(param->latency);
7862 timeout = le16_to_cpu(param->timeout);
7863
7864 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7865 min, max, latency, timeout);
7866
7867 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7868 bt_dev_err(hdev, "ignoring invalid connection parameters");
7869 continue;
7870 }
7871
7872 /* Detect when the loading is for an existing parameter then
7873 * attempt to trigger the connection update procedure.
7874 */
7875 if (!i && param_count == 1) {
7876 hci_param = hci_conn_params_lookup(hdev,
7877 ¶m->addr.bdaddr,
7878 addr_type);
7879 if (hci_param)
7880 update = true;
7881 else
7882 hci_conn_params_clear_disabled(hdev);
7883 }
7884
7885 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
7886 addr_type);
7887 if (!hci_param) {
7888 bt_dev_err(hdev, "failed to add connection parameters");
7889 continue;
7890 }
7891
7892 hci_param->conn_min_interval = min;
7893 hci_param->conn_max_interval = max;
7894 hci_param->conn_latency = latency;
7895 hci_param->supervision_timeout = timeout;
7896
7897 /* Check if we need to trigger a connection update */
7898 if (update) {
7899 struct hci_conn *conn;
7900
7901 /* Lookup for existing connection as central and check
7902 * if parameters match and if they don't then trigger
7903 * a connection update.
7904 */
7905 conn = hci_conn_hash_lookup_le(hdev, &hci_param->addr,
7906 addr_type);
7907 if (conn && conn->role == HCI_ROLE_MASTER &&
7908 (conn->le_conn_min_interval != min ||
7909 conn->le_conn_max_interval != max ||
7910 conn->le_conn_latency != latency ||
7911 conn->le_supv_timeout != timeout))
7912 hci_cmd_sync_queue(hdev, conn_update_sync,
7913 hci_param, NULL);
7914 }
7915 }
7916
7917 hci_dev_unlock(hdev);
7918
7919 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7920 NULL, 0);
7921 }
7922
set_external_config(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7923 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7924 void *data, u16 len)
7925 {
7926 struct mgmt_cp_set_external_config *cp = data;
7927 bool changed;
7928 int err;
7929
7930 bt_dev_dbg(hdev, "sock %p", sk);
7931
7932 if (hdev_is_powered(hdev))
7933 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7934 MGMT_STATUS_REJECTED);
7935
7936 if (cp->config != 0x00 && cp->config != 0x01)
7937 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7938 MGMT_STATUS_INVALID_PARAMS);
7939
7940 if (!hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG))
7941 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7942 MGMT_STATUS_NOT_SUPPORTED);
7943
7944 hci_dev_lock(hdev);
7945
7946 if (cp->config)
7947 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7948 else
7949 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7950
7951 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7952 if (err < 0)
7953 goto unlock;
7954
7955 if (!changed)
7956 goto unlock;
7957
7958 err = new_options(hdev, sk);
7959
7960 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7961 mgmt_index_removed(hdev);
7962
7963 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7964 hci_dev_set_flag(hdev, HCI_CONFIG);
7965 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7966
7967 queue_work(hdev->req_workqueue, &hdev->power_on);
7968 } else {
7969 set_bit(HCI_RAW, &hdev->flags);
7970 mgmt_index_added(hdev);
7971 }
7972 }
7973
7974 unlock:
7975 hci_dev_unlock(hdev);
7976 return err;
7977 }
7978
set_public_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7979 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
7980 void *data, u16 len)
7981 {
7982 struct mgmt_cp_set_public_address *cp = data;
7983 bool changed;
7984 int err;
7985
7986 bt_dev_dbg(hdev, "sock %p", sk);
7987
7988 if (hdev_is_powered(hdev))
7989 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7990 MGMT_STATUS_REJECTED);
7991
7992 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
7993 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7994 MGMT_STATUS_INVALID_PARAMS);
7995
7996 if (!hdev->set_bdaddr)
7997 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7998 MGMT_STATUS_NOT_SUPPORTED);
7999
8000 hci_dev_lock(hdev);
8001
8002 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
8003 bacpy(&hdev->public_addr, &cp->bdaddr);
8004
8005 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
8006 if (err < 0)
8007 goto unlock;
8008
8009 if (!changed)
8010 goto unlock;
8011
8012 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
8013 err = new_options(hdev, sk);
8014
8015 if (is_configured(hdev)) {
8016 mgmt_index_removed(hdev);
8017
8018 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
8019
8020 hci_dev_set_flag(hdev, HCI_CONFIG);
8021 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8022
8023 queue_work(hdev->req_workqueue, &hdev->power_on);
8024 }
8025
8026 unlock:
8027 hci_dev_unlock(hdev);
8028 return err;
8029 }
8030
read_local_oob_ext_data_complete(struct hci_dev * hdev,void * data,int err)8031 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
8032 int err)
8033 {
8034 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8035 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8036 u8 *h192, *r192, *h256, *r256;
8037 struct mgmt_pending_cmd *cmd = data;
8038 struct sk_buff *skb = cmd->skb;
8039 u8 status = mgmt_status(err);
8040 u16 eir_len;
8041
8042 if (err == -ECANCELED ||
8043 cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
8044 return;
8045
8046 if (!status) {
8047 if (!skb)
8048 status = MGMT_STATUS_FAILED;
8049 else if (IS_ERR(skb))
8050 status = mgmt_status(PTR_ERR(skb));
8051 else
8052 status = mgmt_status(skb->data[0]);
8053 }
8054
8055 bt_dev_dbg(hdev, "status %u", status);
8056
8057 mgmt_cp = cmd->param;
8058
8059 if (status) {
8060 status = mgmt_status(status);
8061 eir_len = 0;
8062
8063 h192 = NULL;
8064 r192 = NULL;
8065 h256 = NULL;
8066 r256 = NULL;
8067 } else if (!bredr_sc_enabled(hdev)) {
8068 struct hci_rp_read_local_oob_data *rp;
8069
8070 if (skb->len != sizeof(*rp)) {
8071 status = MGMT_STATUS_FAILED;
8072 eir_len = 0;
8073 } else {
8074 status = MGMT_STATUS_SUCCESS;
8075 rp = (void *)skb->data;
8076
8077 eir_len = 5 + 18 + 18;
8078 h192 = rp->hash;
8079 r192 = rp->rand;
8080 h256 = NULL;
8081 r256 = NULL;
8082 }
8083 } else {
8084 struct hci_rp_read_local_oob_ext_data *rp;
8085
8086 if (skb->len != sizeof(*rp)) {
8087 status = MGMT_STATUS_FAILED;
8088 eir_len = 0;
8089 } else {
8090 status = MGMT_STATUS_SUCCESS;
8091 rp = (void *)skb->data;
8092
8093 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8094 eir_len = 5 + 18 + 18;
8095 h192 = NULL;
8096 r192 = NULL;
8097 } else {
8098 eir_len = 5 + 18 + 18 + 18 + 18;
8099 h192 = rp->hash192;
8100 r192 = rp->rand192;
8101 }
8102
8103 h256 = rp->hash256;
8104 r256 = rp->rand256;
8105 }
8106 }
8107
8108 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8109 if (!mgmt_rp)
8110 goto done;
8111
8112 if (eir_len == 0)
8113 goto send_rsp;
8114
8115 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8116 hdev->dev_class, 3);
8117
8118 if (h192 && r192) {
8119 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8120 EIR_SSP_HASH_C192, h192, 16);
8121 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8122 EIR_SSP_RAND_R192, r192, 16);
8123 }
8124
8125 if (h256 && r256) {
8126 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8127 EIR_SSP_HASH_C256, h256, 16);
8128 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8129 EIR_SSP_RAND_R256, r256, 16);
8130 }
8131
8132 send_rsp:
8133 mgmt_rp->type = mgmt_cp->type;
8134 mgmt_rp->eir_len = cpu_to_le16(eir_len);
8135
8136 err = mgmt_cmd_complete(cmd->sk, hdev->id,
8137 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8138 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8139 if (err < 0 || status)
8140 goto done;
8141
8142 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8143
8144 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8145 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8146 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8147 done:
8148 if (skb && !IS_ERR(skb))
8149 kfree_skb(skb);
8150
8151 kfree(mgmt_rp);
8152 mgmt_pending_remove(cmd);
8153 }
8154
read_local_ssp_oob_req(struct hci_dev * hdev,struct sock * sk,struct mgmt_cp_read_local_oob_ext_data * cp)8155 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8156 struct mgmt_cp_read_local_oob_ext_data *cp)
8157 {
8158 struct mgmt_pending_cmd *cmd;
8159 int err;
8160
8161 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8162 cp, sizeof(*cp));
8163 if (!cmd)
8164 return -ENOMEM;
8165
8166 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
8167 read_local_oob_ext_data_complete);
8168
8169 if (err < 0) {
8170 mgmt_pending_remove(cmd);
8171 return err;
8172 }
8173
8174 return 0;
8175 }
8176
read_local_oob_ext_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8177 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8178 void *data, u16 data_len)
8179 {
8180 struct mgmt_cp_read_local_oob_ext_data *cp = data;
8181 struct mgmt_rp_read_local_oob_ext_data *rp;
8182 size_t rp_len;
8183 u16 eir_len;
8184 u8 status, flags, role, addr[7], hash[16], rand[16];
8185 int err;
8186
8187 bt_dev_dbg(hdev, "sock %p", sk);
8188
8189 if (hdev_is_powered(hdev)) {
8190 switch (cp->type) {
8191 case BIT(BDADDR_BREDR):
8192 status = mgmt_bredr_support(hdev);
8193 if (status)
8194 eir_len = 0;
8195 else
8196 eir_len = 5;
8197 break;
8198 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8199 status = mgmt_le_support(hdev);
8200 if (status)
8201 eir_len = 0;
8202 else
8203 eir_len = 9 + 3 + 18 + 18 + 3;
8204 break;
8205 default:
8206 status = MGMT_STATUS_INVALID_PARAMS;
8207 eir_len = 0;
8208 break;
8209 }
8210 } else {
8211 status = MGMT_STATUS_NOT_POWERED;
8212 eir_len = 0;
8213 }
8214
8215 rp_len = sizeof(*rp) + eir_len;
8216 rp = kmalloc(rp_len, GFP_ATOMIC);
8217 if (!rp)
8218 return -ENOMEM;
8219
8220 if (!status && !lmp_ssp_capable(hdev)) {
8221 status = MGMT_STATUS_NOT_SUPPORTED;
8222 eir_len = 0;
8223 }
8224
8225 if (status)
8226 goto complete;
8227
8228 hci_dev_lock(hdev);
8229
8230 eir_len = 0;
8231 switch (cp->type) {
8232 case BIT(BDADDR_BREDR):
8233 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8234 err = read_local_ssp_oob_req(hdev, sk, cp);
8235 hci_dev_unlock(hdev);
8236 if (!err)
8237 goto done;
8238
8239 status = MGMT_STATUS_FAILED;
8240 goto complete;
8241 } else {
8242 eir_len = eir_append_data(rp->eir, eir_len,
8243 EIR_CLASS_OF_DEV,
8244 hdev->dev_class, 3);
8245 }
8246 break;
8247 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8248 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8249 smp_generate_oob(hdev, hash, rand) < 0) {
8250 hci_dev_unlock(hdev);
8251 status = MGMT_STATUS_FAILED;
8252 goto complete;
8253 }
8254
8255 /* This should return the active RPA, but since the RPA
8256 * is only programmed on demand, it is really hard to fill
8257 * this in at the moment. For now disallow retrieving
8258 * local out-of-band data when privacy is in use.
8259 *
8260 * Returning the identity address will not help here since
8261 * pairing happens before the identity resolving key is
8262 * known and thus the connection establishment happens
8263 * based on the RPA and not the identity address.
8264 */
8265 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8266 hci_dev_unlock(hdev);
8267 status = MGMT_STATUS_REJECTED;
8268 goto complete;
8269 }
8270
8271 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8272 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8273 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8274 bacmp(&hdev->static_addr, BDADDR_ANY))) {
8275 memcpy(addr, &hdev->static_addr, 6);
8276 addr[6] = 0x01;
8277 } else {
8278 memcpy(addr, &hdev->bdaddr, 6);
8279 addr[6] = 0x00;
8280 }
8281
8282 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8283 addr, sizeof(addr));
8284
8285 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8286 role = 0x02;
8287 else
8288 role = 0x01;
8289
8290 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8291 &role, sizeof(role));
8292
8293 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8294 eir_len = eir_append_data(rp->eir, eir_len,
8295 EIR_LE_SC_CONFIRM,
8296 hash, sizeof(hash));
8297
8298 eir_len = eir_append_data(rp->eir, eir_len,
8299 EIR_LE_SC_RANDOM,
8300 rand, sizeof(rand));
8301 }
8302
8303 flags = mgmt_get_adv_discov_flags(hdev);
8304
8305 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8306 flags |= LE_AD_NO_BREDR;
8307
8308 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8309 &flags, sizeof(flags));
8310 break;
8311 }
8312
8313 hci_dev_unlock(hdev);
8314
8315 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8316
8317 status = MGMT_STATUS_SUCCESS;
8318
8319 complete:
8320 rp->type = cp->type;
8321 rp->eir_len = cpu_to_le16(eir_len);
8322
8323 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8324 status, rp, sizeof(*rp) + eir_len);
8325 if (err < 0 || status)
8326 goto done;
8327
8328 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8329 rp, sizeof(*rp) + eir_len,
8330 HCI_MGMT_OOB_DATA_EVENTS, sk);
8331
8332 done:
8333 kfree(rp);
8334
8335 return err;
8336 }
8337
get_supported_adv_flags(struct hci_dev * hdev)8338 static u32 get_supported_adv_flags(struct hci_dev *hdev)
8339 {
8340 u32 flags = 0;
8341
8342 flags |= MGMT_ADV_FLAG_CONNECTABLE;
8343 flags |= MGMT_ADV_FLAG_DISCOV;
8344 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8345 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8346 flags |= MGMT_ADV_FLAG_APPEARANCE;
8347 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8348 flags |= MGMT_ADV_PARAM_DURATION;
8349 flags |= MGMT_ADV_PARAM_TIMEOUT;
8350 flags |= MGMT_ADV_PARAM_INTERVALS;
8351 flags |= MGMT_ADV_PARAM_TX_POWER;
8352 flags |= MGMT_ADV_PARAM_SCAN_RSP;
8353
8354 /* In extended adv TX_POWER returned from Set Adv Param
8355 * will be always valid.
8356 */
8357 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
8358 flags |= MGMT_ADV_FLAG_TX_POWER;
8359
8360 if (ext_adv_capable(hdev)) {
8361 flags |= MGMT_ADV_FLAG_SEC_1M;
8362 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8363 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8364
8365 if (le_2m_capable(hdev))
8366 flags |= MGMT_ADV_FLAG_SEC_2M;
8367
8368 if (le_coded_capable(hdev))
8369 flags |= MGMT_ADV_FLAG_SEC_CODED;
8370 }
8371
8372 return flags;
8373 }
8374
read_adv_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8375 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8376 void *data, u16 data_len)
8377 {
8378 struct mgmt_rp_read_adv_features *rp;
8379 size_t rp_len;
8380 int err;
8381 struct adv_info *adv_instance;
8382 u32 supported_flags;
8383 u8 *instance;
8384
8385 bt_dev_dbg(hdev, "sock %p", sk);
8386
8387 if (!lmp_le_capable(hdev))
8388 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8389 MGMT_STATUS_REJECTED);
8390
8391 hci_dev_lock(hdev);
8392
8393 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8394 rp = kmalloc(rp_len, GFP_ATOMIC);
8395 if (!rp) {
8396 hci_dev_unlock(hdev);
8397 return -ENOMEM;
8398 }
8399
8400 supported_flags = get_supported_adv_flags(hdev);
8401
8402 rp->supported_flags = cpu_to_le32(supported_flags);
8403 rp->max_adv_data_len = max_adv_len(hdev);
8404 rp->max_scan_rsp_len = max_adv_len(hdev);
8405 rp->max_instances = hdev->le_num_of_adv_sets;
8406 rp->num_instances = hdev->adv_instance_cnt;
8407
8408 instance = rp->instance;
8409 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8410 /* Only instances 1-le_num_of_adv_sets are externally visible */
8411 if (adv_instance->instance <= hdev->adv_instance_cnt) {
8412 *instance = adv_instance->instance;
8413 instance++;
8414 } else {
8415 rp->num_instances--;
8416 rp_len--;
8417 }
8418 }
8419
8420 hci_dev_unlock(hdev);
8421
8422 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8423 MGMT_STATUS_SUCCESS, rp, rp_len);
8424
8425 kfree(rp);
8426
8427 return err;
8428 }
8429
calculate_name_len(struct hci_dev * hdev)8430 static u8 calculate_name_len(struct hci_dev *hdev)
8431 {
8432 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 2]; /* len + type + name */
8433
8434 return eir_append_local_name(hdev, buf, 0);
8435 }
8436
tlv_data_max_len(struct hci_dev * hdev,u32 adv_flags,bool is_adv_data)8437 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8438 bool is_adv_data)
8439 {
8440 u8 max_len = max_adv_len(hdev);
8441
8442 if (is_adv_data) {
8443 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8444 MGMT_ADV_FLAG_LIMITED_DISCOV |
8445 MGMT_ADV_FLAG_MANAGED_FLAGS))
8446 max_len -= 3;
8447
8448 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8449 max_len -= 3;
8450 } else {
8451 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8452 max_len -= calculate_name_len(hdev);
8453
8454 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8455 max_len -= 4;
8456 }
8457
8458 return max_len;
8459 }
8460
flags_managed(u32 adv_flags)8461 static bool flags_managed(u32 adv_flags)
8462 {
8463 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8464 MGMT_ADV_FLAG_LIMITED_DISCOV |
8465 MGMT_ADV_FLAG_MANAGED_FLAGS);
8466 }
8467
tx_power_managed(u32 adv_flags)8468 static bool tx_power_managed(u32 adv_flags)
8469 {
8470 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8471 }
8472
name_managed(u32 adv_flags)8473 static bool name_managed(u32 adv_flags)
8474 {
8475 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8476 }
8477
appearance_managed(u32 adv_flags)8478 static bool appearance_managed(u32 adv_flags)
8479 {
8480 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8481 }
8482
tlv_data_is_valid(struct hci_dev * hdev,u32 adv_flags,u8 * data,u8 len,bool is_adv_data)8483 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8484 u8 len, bool is_adv_data)
8485 {
8486 int i, cur_len;
8487 u8 max_len;
8488
8489 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8490
8491 if (len > max_len)
8492 return false;
8493
8494 /* Make sure that the data is correctly formatted. */
8495 for (i = 0; i < len; i += (cur_len + 1)) {
8496 cur_len = data[i];
8497
8498 if (!cur_len)
8499 continue;
8500
8501 if (data[i + 1] == EIR_FLAGS &&
8502 (!is_adv_data || flags_managed(adv_flags)))
8503 return false;
8504
8505 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8506 return false;
8507
8508 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8509 return false;
8510
8511 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8512 return false;
8513
8514 if (data[i + 1] == EIR_APPEARANCE &&
8515 appearance_managed(adv_flags))
8516 return false;
8517
8518 /* If the current field length would exceed the total data
8519 * length, then it's invalid.
8520 */
8521 if (i + cur_len >= len)
8522 return false;
8523 }
8524
8525 return true;
8526 }
8527
requested_adv_flags_are_valid(struct hci_dev * hdev,u32 adv_flags)8528 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8529 {
8530 u32 supported_flags, phy_flags;
8531
8532 /* The current implementation only supports a subset of the specified
8533 * flags. Also need to check mutual exclusiveness of sec flags.
8534 */
8535 supported_flags = get_supported_adv_flags(hdev);
8536 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8537 if (adv_flags & ~supported_flags ||
8538 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8539 return false;
8540
8541 return true;
8542 }
8543
adv_busy(struct hci_dev * hdev)8544 static bool adv_busy(struct hci_dev *hdev)
8545 {
8546 return pending_find(MGMT_OP_SET_LE, hdev);
8547 }
8548
add_adv_complete(struct hci_dev * hdev,struct sock * sk,u8 instance,int err)8549 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8550 int err)
8551 {
8552 struct adv_info *adv, *n;
8553
8554 bt_dev_dbg(hdev, "err %d", err);
8555
8556 hci_dev_lock(hdev);
8557
8558 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8559 u8 instance;
8560
8561 if (!adv->pending)
8562 continue;
8563
8564 if (!err) {
8565 adv->pending = false;
8566 continue;
8567 }
8568
8569 instance = adv->instance;
8570
8571 if (hdev->cur_adv_instance == instance)
8572 cancel_adv_timeout(hdev);
8573
8574 hci_remove_adv_instance(hdev, instance);
8575 mgmt_advertising_removed(sk, hdev, instance);
8576 }
8577
8578 hci_dev_unlock(hdev);
8579 }
8580
add_advertising_complete(struct hci_dev * hdev,void * data,int err)8581 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8582 {
8583 struct mgmt_pending_cmd *cmd = data;
8584 struct mgmt_cp_add_advertising *cp = cmd->param;
8585 struct mgmt_rp_add_advertising rp;
8586
8587 memset(&rp, 0, sizeof(rp));
8588
8589 rp.instance = cp->instance;
8590
8591 if (err)
8592 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
8593 mgmt_status(err));
8594 else
8595 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
8596 mgmt_status(err), &rp, sizeof(rp));
8597
8598 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8599
8600 mgmt_pending_free(cmd);
8601 }
8602
add_advertising_sync(struct hci_dev * hdev,void * data)8603 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8604 {
8605 struct mgmt_pending_cmd *cmd = data;
8606 struct mgmt_cp_add_advertising *cp = cmd->param;
8607
8608 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8609 }
8610
add_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8611 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8612 void *data, u16 data_len)
8613 {
8614 struct mgmt_cp_add_advertising *cp = data;
8615 struct mgmt_rp_add_advertising rp;
8616 u32 flags;
8617 u8 status;
8618 u16 timeout, duration;
8619 unsigned int prev_instance_cnt;
8620 u8 schedule_instance = 0;
8621 struct adv_info *adv, *next_instance;
8622 int err;
8623 struct mgmt_pending_cmd *cmd;
8624
8625 bt_dev_dbg(hdev, "sock %p", sk);
8626
8627 status = mgmt_le_support(hdev);
8628 if (status)
8629 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8630 status);
8631
8632 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8633 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8634 MGMT_STATUS_INVALID_PARAMS);
8635
8636 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8637 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8638 MGMT_STATUS_INVALID_PARAMS);
8639
8640 flags = __le32_to_cpu(cp->flags);
8641 timeout = __le16_to_cpu(cp->timeout);
8642 duration = __le16_to_cpu(cp->duration);
8643
8644 if (!requested_adv_flags_are_valid(hdev, flags))
8645 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8646 MGMT_STATUS_INVALID_PARAMS);
8647
8648 hci_dev_lock(hdev);
8649
8650 if (timeout && !hdev_is_powered(hdev)) {
8651 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8652 MGMT_STATUS_REJECTED);
8653 goto unlock;
8654 }
8655
8656 if (adv_busy(hdev)) {
8657 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8658 MGMT_STATUS_BUSY);
8659 goto unlock;
8660 }
8661
8662 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8663 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8664 cp->scan_rsp_len, false)) {
8665 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8666 MGMT_STATUS_INVALID_PARAMS);
8667 goto unlock;
8668 }
8669
8670 prev_instance_cnt = hdev->adv_instance_cnt;
8671
8672 adv = hci_add_adv_instance(hdev, cp->instance, flags,
8673 cp->adv_data_len, cp->data,
8674 cp->scan_rsp_len,
8675 cp->data + cp->adv_data_len,
8676 timeout, duration,
8677 HCI_ADV_TX_POWER_NO_PREFERENCE,
8678 hdev->le_adv_min_interval,
8679 hdev->le_adv_max_interval, 0);
8680 if (IS_ERR(adv)) {
8681 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8682 MGMT_STATUS_FAILED);
8683 goto unlock;
8684 }
8685
8686 /* Only trigger an advertising added event if a new instance was
8687 * actually added.
8688 */
8689 if (hdev->adv_instance_cnt > prev_instance_cnt)
8690 mgmt_advertising_added(sk, hdev, cp->instance);
8691
8692 if (hdev->cur_adv_instance == cp->instance) {
8693 /* If the currently advertised instance is being changed then
8694 * cancel the current advertising and schedule the next
8695 * instance. If there is only one instance then the overridden
8696 * advertising data will be visible right away.
8697 */
8698 cancel_adv_timeout(hdev);
8699
8700 next_instance = hci_get_next_instance(hdev, cp->instance);
8701 if (next_instance)
8702 schedule_instance = next_instance->instance;
8703 } else if (!hdev->adv_instance_timeout) {
8704 /* Immediately advertise the new instance if no other
8705 * instance is currently being advertised.
8706 */
8707 schedule_instance = cp->instance;
8708 }
8709
8710 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
8711 * there is no instance to be advertised then we have no HCI
8712 * communication to make. Simply return.
8713 */
8714 if (!hdev_is_powered(hdev) ||
8715 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8716 !schedule_instance) {
8717 rp.instance = cp->instance;
8718 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8719 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8720 goto unlock;
8721 }
8722
8723 /* We're good to go, update advertising data, parameters, and start
8724 * advertising.
8725 */
8726 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8727 data_len);
8728 if (!cmd) {
8729 err = -ENOMEM;
8730 goto unlock;
8731 }
8732
8733 cp->instance = schedule_instance;
8734
8735 err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8736 add_advertising_complete);
8737 if (err < 0)
8738 mgmt_pending_free(cmd);
8739
8740 unlock:
8741 hci_dev_unlock(hdev);
8742
8743 return err;
8744 }
8745
add_ext_adv_params_complete(struct hci_dev * hdev,void * data,int err)8746 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8747 int err)
8748 {
8749 struct mgmt_pending_cmd *cmd = data;
8750 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8751 struct mgmt_rp_add_ext_adv_params rp;
8752 struct adv_info *adv;
8753 u32 flags;
8754
8755 BT_DBG("%s", hdev->name);
8756
8757 hci_dev_lock(hdev);
8758
8759 adv = hci_find_adv_instance(hdev, cp->instance);
8760 if (!adv)
8761 goto unlock;
8762
8763 rp.instance = cp->instance;
8764 rp.tx_power = adv->tx_power;
8765
8766 /* While we're at it, inform userspace of the available space for this
8767 * advertisement, given the flags that will be used.
8768 */
8769 flags = __le32_to_cpu(cp->flags);
8770 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8771 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8772
8773 if (err) {
8774 /* If this advertisement was previously advertising and we
8775 * failed to update it, we signal that it has been removed and
8776 * delete its structure
8777 */
8778 if (!adv->pending)
8779 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8780
8781 hci_remove_adv_instance(hdev, cp->instance);
8782
8783 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
8784 mgmt_status(err));
8785 } else {
8786 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
8787 mgmt_status(err), &rp, sizeof(rp));
8788 }
8789
8790 unlock:
8791 mgmt_pending_free(cmd);
8792
8793 hci_dev_unlock(hdev);
8794 }
8795
add_ext_adv_params_sync(struct hci_dev * hdev,void * data)8796 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8797 {
8798 struct mgmt_pending_cmd *cmd = data;
8799 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8800
8801 return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8802 }
8803
add_ext_adv_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8804 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8805 void *data, u16 data_len)
8806 {
8807 struct mgmt_cp_add_ext_adv_params *cp = data;
8808 struct mgmt_rp_add_ext_adv_params rp;
8809 struct mgmt_pending_cmd *cmd = NULL;
8810 struct adv_info *adv;
8811 u32 flags, min_interval, max_interval;
8812 u16 timeout, duration;
8813 u8 status;
8814 s8 tx_power;
8815 int err;
8816
8817 BT_DBG("%s", hdev->name);
8818
8819 status = mgmt_le_support(hdev);
8820 if (status)
8821 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8822 status);
8823
8824 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8825 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8826 MGMT_STATUS_INVALID_PARAMS);
8827
8828 /* The purpose of breaking add_advertising into two separate MGMT calls
8829 * for params and data is to allow more parameters to be added to this
8830 * structure in the future. For this reason, we verify that we have the
8831 * bare minimum structure we know of when the interface was defined. Any
8832 * extra parameters we don't know about will be ignored in this request.
8833 */
8834 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8835 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8836 MGMT_STATUS_INVALID_PARAMS);
8837
8838 flags = __le32_to_cpu(cp->flags);
8839
8840 if (!requested_adv_flags_are_valid(hdev, flags))
8841 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8842 MGMT_STATUS_INVALID_PARAMS);
8843
8844 hci_dev_lock(hdev);
8845
8846 /* In new interface, we require that we are powered to register */
8847 if (!hdev_is_powered(hdev)) {
8848 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8849 MGMT_STATUS_REJECTED);
8850 goto unlock;
8851 }
8852
8853 if (adv_busy(hdev)) {
8854 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8855 MGMT_STATUS_BUSY);
8856 goto unlock;
8857 }
8858
8859 /* Parse defined parameters from request, use defaults otherwise */
8860 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8861 __le16_to_cpu(cp->timeout) : 0;
8862
8863 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8864 __le16_to_cpu(cp->duration) :
8865 hdev->def_multi_adv_rotation_duration;
8866
8867 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8868 __le32_to_cpu(cp->min_interval) :
8869 hdev->le_adv_min_interval;
8870
8871 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8872 __le32_to_cpu(cp->max_interval) :
8873 hdev->le_adv_max_interval;
8874
8875 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8876 cp->tx_power :
8877 HCI_ADV_TX_POWER_NO_PREFERENCE;
8878
8879 /* Create advertising instance with no advertising or response data */
8880 adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
8881 timeout, duration, tx_power, min_interval,
8882 max_interval, 0);
8883
8884 if (IS_ERR(adv)) {
8885 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8886 MGMT_STATUS_FAILED);
8887 goto unlock;
8888 }
8889
8890 /* Submit request for advertising params if ext adv available */
8891 if (ext_adv_capable(hdev)) {
8892 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8893 data, data_len);
8894 if (!cmd) {
8895 err = -ENOMEM;
8896 hci_remove_adv_instance(hdev, cp->instance);
8897 goto unlock;
8898 }
8899
8900 err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8901 add_ext_adv_params_complete);
8902 if (err < 0)
8903 mgmt_pending_free(cmd);
8904 } else {
8905 rp.instance = cp->instance;
8906 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8907 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8908 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8909 err = mgmt_cmd_complete(sk, hdev->id,
8910 MGMT_OP_ADD_EXT_ADV_PARAMS,
8911 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8912 }
8913
8914 unlock:
8915 hci_dev_unlock(hdev);
8916
8917 return err;
8918 }
8919
add_ext_adv_data_complete(struct hci_dev * hdev,void * data,int err)8920 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8921 {
8922 struct mgmt_pending_cmd *cmd = data;
8923 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8924 struct mgmt_rp_add_advertising rp;
8925
8926 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8927
8928 memset(&rp, 0, sizeof(rp));
8929
8930 rp.instance = cp->instance;
8931
8932 if (err)
8933 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
8934 mgmt_status(err));
8935 else
8936 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
8937 mgmt_status(err), &rp, sizeof(rp));
8938
8939 mgmt_pending_free(cmd);
8940 }
8941
add_ext_adv_data_sync(struct hci_dev * hdev,void * data)8942 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8943 {
8944 struct mgmt_pending_cmd *cmd = data;
8945 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8946 int err;
8947
8948 if (ext_adv_capable(hdev)) {
8949 err = hci_update_adv_data_sync(hdev, cp->instance);
8950 if (err)
8951 return err;
8952
8953 err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
8954 if (err)
8955 return err;
8956
8957 return hci_enable_ext_advertising_sync(hdev, cp->instance);
8958 }
8959
8960 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8961 }
8962
add_ext_adv_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8963 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8964 u16 data_len)
8965 {
8966 struct mgmt_cp_add_ext_adv_data *cp = data;
8967 struct mgmt_rp_add_ext_adv_data rp;
8968 u8 schedule_instance = 0;
8969 struct adv_info *next_instance;
8970 struct adv_info *adv_instance;
8971 int err = 0;
8972 struct mgmt_pending_cmd *cmd;
8973
8974 BT_DBG("%s", hdev->name);
8975
8976 hci_dev_lock(hdev);
8977
8978 adv_instance = hci_find_adv_instance(hdev, cp->instance);
8979
8980 if (!adv_instance) {
8981 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8982 MGMT_STATUS_INVALID_PARAMS);
8983 goto unlock;
8984 }
8985
8986 /* In new interface, we require that we are powered to register */
8987 if (!hdev_is_powered(hdev)) {
8988 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8989 MGMT_STATUS_REJECTED);
8990 goto clear_new_instance;
8991 }
8992
8993 if (adv_busy(hdev)) {
8994 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8995 MGMT_STATUS_BUSY);
8996 goto clear_new_instance;
8997 }
8998
8999 /* Validate new data */
9000 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
9001 cp->adv_data_len, true) ||
9002 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
9003 cp->adv_data_len, cp->scan_rsp_len, false)) {
9004 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9005 MGMT_STATUS_INVALID_PARAMS);
9006 goto clear_new_instance;
9007 }
9008
9009 /* Set the data in the advertising instance */
9010 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
9011 cp->data, cp->scan_rsp_len,
9012 cp->data + cp->adv_data_len);
9013
9014 /* If using software rotation, determine next instance to use */
9015 if (hdev->cur_adv_instance == cp->instance) {
9016 /* If the currently advertised instance is being changed
9017 * then cancel the current advertising and schedule the
9018 * next instance. If there is only one instance then the
9019 * overridden advertising data will be visible right
9020 * away
9021 */
9022 cancel_adv_timeout(hdev);
9023
9024 next_instance = hci_get_next_instance(hdev, cp->instance);
9025 if (next_instance)
9026 schedule_instance = next_instance->instance;
9027 } else if (!hdev->adv_instance_timeout) {
9028 /* Immediately advertise the new instance if no other
9029 * instance is currently being advertised.
9030 */
9031 schedule_instance = cp->instance;
9032 }
9033
9034 /* If the HCI_ADVERTISING flag is set or there is no instance to
9035 * be advertised then we have no HCI communication to make.
9036 * Simply return.
9037 */
9038 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
9039 if (adv_instance->pending) {
9040 mgmt_advertising_added(sk, hdev, cp->instance);
9041 adv_instance->pending = false;
9042 }
9043 rp.instance = cp->instance;
9044 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9045 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9046 goto unlock;
9047 }
9048
9049 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9050 data_len);
9051 if (!cmd) {
9052 err = -ENOMEM;
9053 goto clear_new_instance;
9054 }
9055
9056 err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
9057 add_ext_adv_data_complete);
9058 if (err < 0) {
9059 mgmt_pending_free(cmd);
9060 goto clear_new_instance;
9061 }
9062
9063 /* We were successful in updating data, so trigger advertising_added
9064 * event if this is an instance that wasn't previously advertising. If
9065 * a failure occurs in the requests we initiated, we will remove the
9066 * instance again in add_advertising_complete
9067 */
9068 if (adv_instance->pending)
9069 mgmt_advertising_added(sk, hdev, cp->instance);
9070
9071 goto unlock;
9072
9073 clear_new_instance:
9074 hci_remove_adv_instance(hdev, cp->instance);
9075
9076 unlock:
9077 hci_dev_unlock(hdev);
9078
9079 return err;
9080 }
9081
remove_advertising_complete(struct hci_dev * hdev,void * data,int err)9082 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
9083 int err)
9084 {
9085 struct mgmt_pending_cmd *cmd = data;
9086 struct mgmt_cp_remove_advertising *cp = cmd->param;
9087 struct mgmt_rp_remove_advertising rp;
9088
9089 bt_dev_dbg(hdev, "err %d", err);
9090
9091 memset(&rp, 0, sizeof(rp));
9092 rp.instance = cp->instance;
9093
9094 if (err)
9095 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
9096 mgmt_status(err));
9097 else
9098 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
9099 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9100
9101 mgmt_pending_free(cmd);
9102 }
9103
remove_advertising_sync(struct hci_dev * hdev,void * data)9104 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
9105 {
9106 struct mgmt_pending_cmd *cmd = data;
9107 struct mgmt_cp_remove_advertising *cp = cmd->param;
9108 int err;
9109
9110 err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
9111 if (err)
9112 return err;
9113
9114 if (list_empty(&hdev->adv_instances))
9115 err = hci_disable_advertising_sync(hdev);
9116
9117 return err;
9118 }
9119
remove_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9120 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9121 void *data, u16 data_len)
9122 {
9123 struct mgmt_cp_remove_advertising *cp = data;
9124 struct mgmt_pending_cmd *cmd;
9125 int err;
9126
9127 bt_dev_dbg(hdev, "sock %p", sk);
9128
9129 hci_dev_lock(hdev);
9130
9131 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
9132 err = mgmt_cmd_status(sk, hdev->id,
9133 MGMT_OP_REMOVE_ADVERTISING,
9134 MGMT_STATUS_INVALID_PARAMS);
9135 goto unlock;
9136 }
9137
9138 if (pending_find(MGMT_OP_SET_LE, hdev)) {
9139 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9140 MGMT_STATUS_BUSY);
9141 goto unlock;
9142 }
9143
9144 if (list_empty(&hdev->adv_instances)) {
9145 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9146 MGMT_STATUS_INVALID_PARAMS);
9147 goto unlock;
9148 }
9149
9150 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9151 data_len);
9152 if (!cmd) {
9153 err = -ENOMEM;
9154 goto unlock;
9155 }
9156
9157 err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
9158 remove_advertising_complete);
9159 if (err < 0)
9160 mgmt_pending_free(cmd);
9161
9162 unlock:
9163 hci_dev_unlock(hdev);
9164
9165 return err;
9166 }
9167
get_adv_size_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9168 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9169 void *data, u16 data_len)
9170 {
9171 struct mgmt_cp_get_adv_size_info *cp = data;
9172 struct mgmt_rp_get_adv_size_info rp;
9173 u32 flags, supported_flags;
9174
9175 bt_dev_dbg(hdev, "sock %p", sk);
9176
9177 if (!lmp_le_capable(hdev))
9178 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9179 MGMT_STATUS_REJECTED);
9180
9181 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9182 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9183 MGMT_STATUS_INVALID_PARAMS);
9184
9185 flags = __le32_to_cpu(cp->flags);
9186
9187 /* The current implementation only supports a subset of the specified
9188 * flags.
9189 */
9190 supported_flags = get_supported_adv_flags(hdev);
9191 if (flags & ~supported_flags)
9192 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9193 MGMT_STATUS_INVALID_PARAMS);
9194
9195 rp.instance = cp->instance;
9196 rp.flags = cp->flags;
9197 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9198 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9199
9200 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9201 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9202 }
9203
9204 static const struct hci_mgmt_handler mgmt_handlers[] = {
9205 { NULL }, /* 0x0000 (no command) */
9206 { read_version, MGMT_READ_VERSION_SIZE,
9207 HCI_MGMT_NO_HDEV |
9208 HCI_MGMT_UNTRUSTED },
9209 { read_commands, MGMT_READ_COMMANDS_SIZE,
9210 HCI_MGMT_NO_HDEV |
9211 HCI_MGMT_UNTRUSTED },
9212 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
9213 HCI_MGMT_NO_HDEV |
9214 HCI_MGMT_UNTRUSTED },
9215 { read_controller_info, MGMT_READ_INFO_SIZE,
9216 HCI_MGMT_UNTRUSTED },
9217 { set_powered, MGMT_SETTING_SIZE },
9218 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
9219 { set_connectable, MGMT_SETTING_SIZE },
9220 { set_fast_connectable, MGMT_SETTING_SIZE },
9221 { set_bondable, MGMT_SETTING_SIZE },
9222 { set_link_security, MGMT_SETTING_SIZE },
9223 { set_ssp, MGMT_SETTING_SIZE },
9224 { set_hs, MGMT_SETTING_SIZE },
9225 { set_le, MGMT_SETTING_SIZE },
9226 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
9227 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
9228 { add_uuid, MGMT_ADD_UUID_SIZE },
9229 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
9230 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
9231 HCI_MGMT_VAR_LEN },
9232 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9233 HCI_MGMT_VAR_LEN },
9234 { disconnect, MGMT_DISCONNECT_SIZE },
9235 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
9236 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
9237 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
9238 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
9239 { pair_device, MGMT_PAIR_DEVICE_SIZE },
9240 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
9241 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
9242 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
9243 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9244 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
9245 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9246 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
9247 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9248 HCI_MGMT_VAR_LEN },
9249 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9250 { start_discovery, MGMT_START_DISCOVERY_SIZE },
9251 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
9252 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
9253 { block_device, MGMT_BLOCK_DEVICE_SIZE },
9254 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
9255 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
9256 { set_advertising, MGMT_SETTING_SIZE },
9257 { set_bredr, MGMT_SETTING_SIZE },
9258 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
9259 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
9260 { set_secure_conn, MGMT_SETTING_SIZE },
9261 { set_debug_keys, MGMT_SETTING_SIZE },
9262 { set_privacy, MGMT_SET_PRIVACY_SIZE },
9263 { load_irks, MGMT_LOAD_IRKS_SIZE,
9264 HCI_MGMT_VAR_LEN },
9265 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
9266 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
9267 { add_device, MGMT_ADD_DEVICE_SIZE },
9268 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
9269 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
9270 HCI_MGMT_VAR_LEN },
9271 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9272 HCI_MGMT_NO_HDEV |
9273 HCI_MGMT_UNTRUSTED },
9274 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
9275 HCI_MGMT_UNCONFIGURED |
9276 HCI_MGMT_UNTRUSTED },
9277 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
9278 HCI_MGMT_UNCONFIGURED },
9279 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
9280 HCI_MGMT_UNCONFIGURED },
9281 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9282 HCI_MGMT_VAR_LEN },
9283 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9284 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
9285 HCI_MGMT_NO_HDEV |
9286 HCI_MGMT_UNTRUSTED },
9287 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
9288 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
9289 HCI_MGMT_VAR_LEN },
9290 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
9291 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
9292 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9293 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9294 HCI_MGMT_UNTRUSTED },
9295 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
9296 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
9297 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
9298 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9299 HCI_MGMT_VAR_LEN },
9300 { set_wideband_speech, MGMT_SETTING_SIZE },
9301 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
9302 HCI_MGMT_UNTRUSTED },
9303 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
9304 HCI_MGMT_UNTRUSTED |
9305 HCI_MGMT_HDEV_OPTIONAL },
9306 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
9307 HCI_MGMT_VAR_LEN |
9308 HCI_MGMT_HDEV_OPTIONAL },
9309 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9310 HCI_MGMT_UNTRUSTED },
9311 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9312 HCI_MGMT_VAR_LEN },
9313 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9314 HCI_MGMT_UNTRUSTED },
9315 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9316 HCI_MGMT_VAR_LEN },
9317 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
9318 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
9319 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9320 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9321 HCI_MGMT_VAR_LEN },
9322 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
9323 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
9324 HCI_MGMT_VAR_LEN },
9325 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
9326 HCI_MGMT_VAR_LEN },
9327 { add_adv_patterns_monitor_rssi,
9328 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
9329 HCI_MGMT_VAR_LEN },
9330 { set_mesh, MGMT_SET_MESH_RECEIVER_SIZE,
9331 HCI_MGMT_VAR_LEN },
9332 { mesh_features, MGMT_MESH_READ_FEATURES_SIZE },
9333 { mesh_send, MGMT_MESH_SEND_SIZE,
9334 HCI_MGMT_VAR_LEN },
9335 { mesh_send_cancel, MGMT_MESH_SEND_CANCEL_SIZE },
9336 { mgmt_hci_cmd_sync, MGMT_HCI_CMD_SYNC_SIZE, HCI_MGMT_VAR_LEN },
9337 };
9338
mgmt_index_added(struct hci_dev * hdev)9339 void mgmt_index_added(struct hci_dev *hdev)
9340 {
9341 struct mgmt_ev_ext_index ev;
9342
9343 if (hci_test_quirk(hdev, HCI_QUIRK_RAW_DEVICE))
9344 return;
9345
9346 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9347 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0,
9348 HCI_MGMT_UNCONF_INDEX_EVENTS);
9349 ev.type = 0x01;
9350 } else {
9351 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
9352 HCI_MGMT_INDEX_EVENTS);
9353 ev.type = 0x00;
9354 }
9355
9356 ev.bus = hdev->bus;
9357
9358 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
9359 HCI_MGMT_EXT_INDEX_EVENTS);
9360 }
9361
mgmt_index_removed(struct hci_dev * hdev)9362 void mgmt_index_removed(struct hci_dev *hdev)
9363 {
9364 struct mgmt_ev_ext_index ev;
9365 struct cmd_lookup match = { NULL, hdev, MGMT_STATUS_INVALID_INDEX };
9366
9367 if (hci_test_quirk(hdev, HCI_QUIRK_RAW_DEVICE))
9368 return;
9369
9370 mgmt_pending_foreach(0, hdev, true, cmd_complete_rsp, &match);
9371
9372 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9373 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0,
9374 HCI_MGMT_UNCONF_INDEX_EVENTS);
9375 ev.type = 0x01;
9376 } else {
9377 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
9378 HCI_MGMT_INDEX_EVENTS);
9379 ev.type = 0x00;
9380 }
9381
9382 ev.bus = hdev->bus;
9383
9384 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
9385 HCI_MGMT_EXT_INDEX_EVENTS);
9386
9387 /* Cancel any remaining timed work */
9388 if (!hci_dev_test_flag(hdev, HCI_MGMT))
9389 return;
9390 cancel_delayed_work_sync(&hdev->discov_off);
9391 cancel_delayed_work_sync(&hdev->service_cache);
9392 cancel_delayed_work_sync(&hdev->rpa_expired);
9393 }
9394
mgmt_power_on(struct hci_dev * hdev,int err)9395 void mgmt_power_on(struct hci_dev *hdev, int err)
9396 {
9397 struct cmd_lookup match = { NULL, hdev };
9398
9399 bt_dev_dbg(hdev, "err %d", err);
9400
9401 hci_dev_lock(hdev);
9402
9403 if (!err) {
9404 restart_le_actions(hdev);
9405 hci_update_passive_scan(hdev);
9406 }
9407
9408 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, true, settings_rsp,
9409 &match);
9410
9411 new_settings(hdev, match.sk);
9412
9413 if (match.sk)
9414 sock_put(match.sk);
9415
9416 hci_dev_unlock(hdev);
9417 }
9418
__mgmt_power_off(struct hci_dev * hdev)9419 void __mgmt_power_off(struct hci_dev *hdev)
9420 {
9421 struct cmd_lookup match = { NULL, hdev };
9422 u8 zero_cod[] = { 0, 0, 0 };
9423
9424 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, true, settings_rsp,
9425 &match);
9426
9427 /* If the power off is because of hdev unregistration let
9428 * use the appropriate INVALID_INDEX status. Otherwise use
9429 * NOT_POWERED. We cover both scenarios here since later in
9430 * mgmt_index_removed() any hci_conn callbacks will have already
9431 * been triggered, potentially causing misleading DISCONNECTED
9432 * status responses.
9433 */
9434 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9435 match.mgmt_status = MGMT_STATUS_INVALID_INDEX;
9436 else
9437 match.mgmt_status = MGMT_STATUS_NOT_POWERED;
9438
9439 mgmt_pending_foreach(0, hdev, true, cmd_complete_rsp, &match);
9440
9441 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9442 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9443 zero_cod, sizeof(zero_cod),
9444 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9445 ext_info_changed(hdev, NULL);
9446 }
9447
9448 new_settings(hdev, match.sk);
9449
9450 if (match.sk)
9451 sock_put(match.sk);
9452 }
9453
mgmt_set_powered_failed(struct hci_dev * hdev,int err)9454 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9455 {
9456 struct mgmt_pending_cmd *cmd;
9457 u8 status;
9458
9459 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9460 if (!cmd)
9461 return;
9462
9463 if (err == -ERFKILL)
9464 status = MGMT_STATUS_RFKILLED;
9465 else
9466 status = MGMT_STATUS_FAILED;
9467
9468 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9469
9470 mgmt_pending_remove(cmd);
9471 }
9472
mgmt_new_link_key(struct hci_dev * hdev,struct link_key * key,bool persistent)9473 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9474 bool persistent)
9475 {
9476 struct mgmt_ev_new_link_key ev;
9477
9478 memset(&ev, 0, sizeof(ev));
9479
9480 ev.store_hint = persistent;
9481 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9482 ev.key.addr.type = BDADDR_BREDR;
9483 ev.key.type = key->type;
9484 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9485 ev.key.pin_len = key->pin_len;
9486
9487 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9488 }
9489
mgmt_ltk_type(struct smp_ltk * ltk)9490 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9491 {
9492 switch (ltk->type) {
9493 case SMP_LTK:
9494 case SMP_LTK_RESPONDER:
9495 if (ltk->authenticated)
9496 return MGMT_LTK_AUTHENTICATED;
9497 return MGMT_LTK_UNAUTHENTICATED;
9498 case SMP_LTK_P256:
9499 if (ltk->authenticated)
9500 return MGMT_LTK_P256_AUTH;
9501 return MGMT_LTK_P256_UNAUTH;
9502 case SMP_LTK_P256_DEBUG:
9503 return MGMT_LTK_P256_DEBUG;
9504 }
9505
9506 return MGMT_LTK_UNAUTHENTICATED;
9507 }
9508
mgmt_new_ltk(struct hci_dev * hdev,struct smp_ltk * key,bool persistent)9509 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9510 {
9511 struct mgmt_ev_new_long_term_key ev;
9512
9513 memset(&ev, 0, sizeof(ev));
9514
9515 /* Devices using resolvable or non-resolvable random addresses
9516 * without providing an identity resolving key don't require
9517 * to store long term keys. Their addresses will change the
9518 * next time around.
9519 *
9520 * Only when a remote device provides an identity address
9521 * make sure the long term key is stored. If the remote
9522 * identity is known, the long term keys are internally
9523 * mapped to the identity address. So allow static random
9524 * and public addresses here.
9525 */
9526 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9527 (key->bdaddr.b[5] & 0xc0) != 0xc0)
9528 ev.store_hint = 0x00;
9529 else
9530 ev.store_hint = persistent;
9531
9532 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9533 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
9534 ev.key.type = mgmt_ltk_type(key);
9535 ev.key.enc_size = key->enc_size;
9536 ev.key.ediv = key->ediv;
9537 ev.key.rand = key->rand;
9538
9539 if (key->type == SMP_LTK)
9540 ev.key.initiator = 1;
9541
9542 /* Make sure we copy only the significant bytes based on the
9543 * encryption key size, and set the rest of the value to zeroes.
9544 */
9545 memcpy(ev.key.val, key->val, key->enc_size);
9546 memset(ev.key.val + key->enc_size, 0,
9547 sizeof(ev.key.val) - key->enc_size);
9548
9549 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9550 }
9551
mgmt_new_irk(struct hci_dev * hdev,struct smp_irk * irk,bool persistent)9552 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9553 {
9554 struct mgmt_ev_new_irk ev;
9555
9556 memset(&ev, 0, sizeof(ev));
9557
9558 ev.store_hint = persistent;
9559
9560 bacpy(&ev.rpa, &irk->rpa);
9561 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9562 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
9563 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9564
9565 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9566 }
9567
mgmt_new_csrk(struct hci_dev * hdev,struct smp_csrk * csrk,bool persistent)9568 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9569 bool persistent)
9570 {
9571 struct mgmt_ev_new_csrk ev;
9572
9573 memset(&ev, 0, sizeof(ev));
9574
9575 /* Devices using resolvable or non-resolvable random addresses
9576 * without providing an identity resolving key don't require
9577 * to store signature resolving keys. Their addresses will change
9578 * the next time around.
9579 *
9580 * Only when a remote device provides an identity address
9581 * make sure the signature resolving key is stored. So allow
9582 * static random and public addresses here.
9583 */
9584 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9585 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9586 ev.store_hint = 0x00;
9587 else
9588 ev.store_hint = persistent;
9589
9590 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9591 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
9592 ev.key.type = csrk->type;
9593 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9594
9595 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9596 }
9597
mgmt_new_conn_param(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u8 store_hint,u16 min_interval,u16 max_interval,u16 latency,u16 timeout)9598 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9599 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9600 u16 max_interval, u16 latency, u16 timeout)
9601 {
9602 struct mgmt_ev_new_conn_param ev;
9603
9604 if (!hci_is_identity_address(bdaddr, bdaddr_type))
9605 return;
9606
9607 memset(&ev, 0, sizeof(ev));
9608 bacpy(&ev.addr.bdaddr, bdaddr);
9609 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9610 ev.store_hint = store_hint;
9611 ev.min_interval = cpu_to_le16(min_interval);
9612 ev.max_interval = cpu_to_le16(max_interval);
9613 ev.latency = cpu_to_le16(latency);
9614 ev.timeout = cpu_to_le16(timeout);
9615
9616 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9617 }
9618
mgmt_device_connected(struct hci_dev * hdev,struct hci_conn * conn,u8 * name,u8 name_len)9619 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9620 u8 *name, u8 name_len)
9621 {
9622 struct sk_buff *skb;
9623 struct mgmt_ev_device_connected *ev;
9624 u16 eir_len = 0;
9625 u32 flags = 0;
9626
9627 if (test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
9628 return;
9629
9630 /* allocate buff for LE or BR/EDR adv */
9631 if (conn->le_adv_data_len > 0)
9632 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9633 sizeof(*ev) + conn->le_adv_data_len);
9634 else
9635 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9636 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9637 eir_precalc_len(sizeof(conn->dev_class)));
9638
9639 if (!skb)
9640 return;
9641
9642 ev = skb_put(skb, sizeof(*ev));
9643 bacpy(&ev->addr.bdaddr, &conn->dst);
9644 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9645
9646 if (conn->out)
9647 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9648
9649 ev->flags = __cpu_to_le32(flags);
9650
9651 /* We must ensure that the EIR Data fields are ordered and
9652 * unique. Keep it simple for now and avoid the problem by not
9653 * adding any BR/EDR data to the LE adv.
9654 */
9655 if (conn->le_adv_data_len > 0) {
9656 skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9657 eir_len = conn->le_adv_data_len;
9658 } else {
9659 if (name)
9660 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9661
9662 if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9663 eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9664 conn->dev_class, sizeof(conn->dev_class));
9665 }
9666
9667 ev->eir_len = cpu_to_le16(eir_len);
9668
9669 mgmt_event_skb(skb, NULL);
9670 }
9671
unpair_device_rsp(struct mgmt_pending_cmd * cmd,void * data)9672 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9673 {
9674 struct hci_dev *hdev = data;
9675 struct mgmt_cp_unpair_device *cp = cmd->param;
9676
9677 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9678
9679 cmd->cmd_complete(cmd, 0);
9680 }
9681
mgmt_powering_down(struct hci_dev * hdev)9682 bool mgmt_powering_down(struct hci_dev *hdev)
9683 {
9684 struct mgmt_pending_cmd *cmd;
9685 struct mgmt_mode *cp;
9686
9687 if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
9688 return true;
9689
9690 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9691 if (!cmd)
9692 return false;
9693
9694 cp = cmd->param;
9695 if (!cp->val)
9696 return true;
9697
9698 return false;
9699 }
9700
mgmt_device_disconnected(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 reason,bool mgmt_connected)9701 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9702 u8 link_type, u8 addr_type, u8 reason,
9703 bool mgmt_connected)
9704 {
9705 struct mgmt_ev_device_disconnected ev;
9706 struct sock *sk = NULL;
9707
9708 if (!mgmt_connected)
9709 return;
9710
9711 if (link_type != ACL_LINK &&
9712 link_type != LE_LINK &&
9713 link_type != BIS_LINK)
9714 return;
9715
9716 bacpy(&ev.addr.bdaddr, bdaddr);
9717 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9718 ev.reason = reason;
9719
9720 /* Report disconnects due to suspend */
9721 if (hdev->suspended)
9722 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9723
9724 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9725
9726 if (sk)
9727 sock_put(sk);
9728 }
9729
mgmt_disconnect_failed(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9730 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9731 u8 link_type, u8 addr_type, u8 status)
9732 {
9733 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9734 struct mgmt_cp_disconnect *cp;
9735 struct mgmt_pending_cmd *cmd;
9736
9737 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, true,
9738 unpair_device_rsp, hdev);
9739
9740 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9741 if (!cmd)
9742 return;
9743
9744 cp = cmd->param;
9745
9746 if (bacmp(bdaddr, &cp->addr.bdaddr))
9747 return;
9748
9749 if (cp->addr.type != bdaddr_type)
9750 return;
9751
9752 cmd->cmd_complete(cmd, mgmt_status(status));
9753 mgmt_pending_remove(cmd);
9754 }
9755
mgmt_connect_failed(struct hci_dev * hdev,struct hci_conn * conn,u8 status)9756 void mgmt_connect_failed(struct hci_dev *hdev, struct hci_conn *conn, u8 status)
9757 {
9758 struct mgmt_ev_connect_failed ev;
9759
9760 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
9761 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
9762 conn->dst_type, status, true);
9763 return;
9764 }
9765
9766 bacpy(&ev.addr.bdaddr, &conn->dst);
9767 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9768 ev.status = mgmt_status(status);
9769
9770 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9771 }
9772
mgmt_pin_code_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 secure)9773 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9774 {
9775 struct mgmt_ev_pin_code_request ev;
9776
9777 bacpy(&ev.addr.bdaddr, bdaddr);
9778 ev.addr.type = BDADDR_BREDR;
9779 ev.secure = secure;
9780
9781 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9782 }
9783
mgmt_pin_code_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)9784 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9785 u8 status)
9786 {
9787 struct mgmt_pending_cmd *cmd;
9788
9789 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9790 if (!cmd)
9791 return;
9792
9793 cmd->cmd_complete(cmd, mgmt_status(status));
9794 mgmt_pending_remove(cmd);
9795 }
9796
mgmt_pin_code_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)9797 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9798 u8 status)
9799 {
9800 struct mgmt_pending_cmd *cmd;
9801
9802 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9803 if (!cmd)
9804 return;
9805
9806 cmd->cmd_complete(cmd, mgmt_status(status));
9807 mgmt_pending_remove(cmd);
9808 }
9809
mgmt_user_confirm_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 value,u8 confirm_hint)9810 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9811 u8 link_type, u8 addr_type, u32 value,
9812 u8 confirm_hint)
9813 {
9814 struct mgmt_ev_user_confirm_request ev;
9815
9816 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9817
9818 bacpy(&ev.addr.bdaddr, bdaddr);
9819 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9820 ev.confirm_hint = confirm_hint;
9821 ev.value = cpu_to_le32(value);
9822
9823 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9824 NULL);
9825 }
9826
mgmt_user_passkey_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type)9827 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9828 u8 link_type, u8 addr_type)
9829 {
9830 struct mgmt_ev_user_passkey_request ev;
9831
9832 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9833
9834 bacpy(&ev.addr.bdaddr, bdaddr);
9835 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9836
9837 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9838 NULL);
9839 }
9840
user_pairing_resp_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status,u8 opcode)9841 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9842 u8 link_type, u8 addr_type, u8 status,
9843 u8 opcode)
9844 {
9845 struct mgmt_pending_cmd *cmd;
9846
9847 cmd = pending_find(opcode, hdev);
9848 if (!cmd)
9849 return -ENOENT;
9850
9851 cmd->cmd_complete(cmd, mgmt_status(status));
9852 mgmt_pending_remove(cmd);
9853
9854 return 0;
9855 }
9856
mgmt_user_confirm_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9857 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9858 u8 link_type, u8 addr_type, u8 status)
9859 {
9860 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9861 status, MGMT_OP_USER_CONFIRM_REPLY);
9862 }
9863
mgmt_user_confirm_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9864 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9865 u8 link_type, u8 addr_type, u8 status)
9866 {
9867 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9868 status,
9869 MGMT_OP_USER_CONFIRM_NEG_REPLY);
9870 }
9871
mgmt_user_passkey_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9872 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9873 u8 link_type, u8 addr_type, u8 status)
9874 {
9875 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9876 status, MGMT_OP_USER_PASSKEY_REPLY);
9877 }
9878
mgmt_user_passkey_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9879 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9880 u8 link_type, u8 addr_type, u8 status)
9881 {
9882 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9883 status,
9884 MGMT_OP_USER_PASSKEY_NEG_REPLY);
9885 }
9886
mgmt_user_passkey_notify(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 passkey,u8 entered)9887 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9888 u8 link_type, u8 addr_type, u32 passkey,
9889 u8 entered)
9890 {
9891 struct mgmt_ev_passkey_notify ev;
9892
9893 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9894
9895 bacpy(&ev.addr.bdaddr, bdaddr);
9896 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9897 ev.passkey = __cpu_to_le32(passkey);
9898 ev.entered = entered;
9899
9900 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9901 }
9902
mgmt_auth_failed(struct hci_conn * conn,u8 hci_status)9903 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9904 {
9905 struct mgmt_ev_auth_failed ev;
9906 struct mgmt_pending_cmd *cmd;
9907 u8 status = mgmt_status(hci_status);
9908
9909 bacpy(&ev.addr.bdaddr, &conn->dst);
9910 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9911 ev.status = status;
9912
9913 cmd = find_pairing(conn);
9914
9915 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9916 cmd ? cmd->sk : NULL);
9917
9918 if (cmd) {
9919 cmd->cmd_complete(cmd, status);
9920 mgmt_pending_remove(cmd);
9921 }
9922 }
9923
mgmt_auth_enable_complete(struct hci_dev * hdev,u8 status)9924 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9925 {
9926 struct cmd_lookup match = { NULL, hdev };
9927 bool changed;
9928
9929 if (status) {
9930 u8 mgmt_err = mgmt_status(status);
9931 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, true,
9932 cmd_status_rsp, &mgmt_err);
9933 return;
9934 }
9935
9936 if (test_bit(HCI_AUTH, &hdev->flags))
9937 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9938 else
9939 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9940
9941 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, true,
9942 settings_rsp, &match);
9943
9944 if (changed)
9945 new_settings(hdev, match.sk);
9946
9947 if (match.sk)
9948 sock_put(match.sk);
9949 }
9950
sk_lookup(struct mgmt_pending_cmd * cmd,void * data)9951 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9952 {
9953 struct cmd_lookup *match = data;
9954
9955 if (match->sk == NULL) {
9956 match->sk = cmd->sk;
9957 sock_hold(match->sk);
9958 }
9959 }
9960
mgmt_set_class_of_dev_complete(struct hci_dev * hdev,u8 * dev_class,u8 status)9961 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9962 u8 status)
9963 {
9964 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
9965
9966 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, false, sk_lookup,
9967 &match);
9968 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, false, sk_lookup,
9969 &match);
9970 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, false, sk_lookup,
9971 &match);
9972
9973 if (!status) {
9974 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
9975 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9976 ext_info_changed(hdev, NULL);
9977 }
9978
9979 if (match.sk)
9980 sock_put(match.sk);
9981 }
9982
mgmt_set_local_name_complete(struct hci_dev * hdev,u8 * name,u8 status)9983 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
9984 {
9985 struct mgmt_cp_set_local_name ev;
9986 struct mgmt_pending_cmd *cmd;
9987
9988 if (status)
9989 return;
9990
9991 memset(&ev, 0, sizeof(ev));
9992 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
9993 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
9994
9995 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
9996 if (!cmd) {
9997 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
9998
9999 /* If this is a HCI command related to powering on the
10000 * HCI dev don't send any mgmt signals.
10001 */
10002 if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
10003 return;
10004
10005 if (pending_find(MGMT_OP_SET_POWERED, hdev))
10006 return;
10007 }
10008
10009 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
10010 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
10011 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
10012 }
10013
has_uuid(u8 * uuid,u16 uuid_count,u8 (* uuids)[16])10014 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
10015 {
10016 int i;
10017
10018 for (i = 0; i < uuid_count; i++) {
10019 if (!memcmp(uuid, uuids[i], 16))
10020 return true;
10021 }
10022
10023 return false;
10024 }
10025
eir_has_uuids(u8 * eir,u16 eir_len,u16 uuid_count,u8 (* uuids)[16])10026 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
10027 {
10028 u16 parsed = 0;
10029
10030 while (parsed < eir_len) {
10031 u8 field_len = eir[0];
10032 u8 uuid[16];
10033 int i;
10034
10035 if (field_len == 0)
10036 break;
10037
10038 if (eir_len - parsed < field_len + 1)
10039 break;
10040
10041 switch (eir[1]) {
10042 case EIR_UUID16_ALL:
10043 case EIR_UUID16_SOME:
10044 for (i = 0; i + 3 <= field_len; i += 2) {
10045 memcpy(uuid, bluetooth_base_uuid, 16);
10046 uuid[13] = eir[i + 3];
10047 uuid[12] = eir[i + 2];
10048 if (has_uuid(uuid, uuid_count, uuids))
10049 return true;
10050 }
10051 break;
10052 case EIR_UUID32_ALL:
10053 case EIR_UUID32_SOME:
10054 for (i = 0; i + 5 <= field_len; i += 4) {
10055 memcpy(uuid, bluetooth_base_uuid, 16);
10056 uuid[15] = eir[i + 5];
10057 uuid[14] = eir[i + 4];
10058 uuid[13] = eir[i + 3];
10059 uuid[12] = eir[i + 2];
10060 if (has_uuid(uuid, uuid_count, uuids))
10061 return true;
10062 }
10063 break;
10064 case EIR_UUID128_ALL:
10065 case EIR_UUID128_SOME:
10066 for (i = 0; i + 17 <= field_len; i += 16) {
10067 memcpy(uuid, eir + i + 2, 16);
10068 if (has_uuid(uuid, uuid_count, uuids))
10069 return true;
10070 }
10071 break;
10072 }
10073
10074 parsed += field_len + 1;
10075 eir += field_len + 1;
10076 }
10077
10078 return false;
10079 }
10080
is_filter_match(struct hci_dev * hdev,s8 rssi,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len)10081 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10082 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10083 {
10084 /* If a RSSI threshold has been specified, and
10085 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10086 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10087 * is set, let it through for further processing, as we might need to
10088 * restart the scan.
10089 *
10090 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10091 * the results are also dropped.
10092 */
10093 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10094 (rssi == HCI_RSSI_INVALID ||
10095 (rssi < hdev->discovery.rssi &&
10096 !hci_test_quirk(hdev, HCI_QUIRK_STRICT_DUPLICATE_FILTER))))
10097 return false;
10098
10099 if (hdev->discovery.uuid_count != 0) {
10100 /* If a list of UUIDs is provided in filter, results with no
10101 * matching UUID should be dropped.
10102 */
10103 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10104 hdev->discovery.uuids) &&
10105 !eir_has_uuids(scan_rsp, scan_rsp_len,
10106 hdev->discovery.uuid_count,
10107 hdev->discovery.uuids))
10108 return false;
10109 }
10110
10111 /* If duplicate filtering does not report RSSI changes, then restart
10112 * scanning to ensure updated result with updated RSSI values.
10113 */
10114 if (hci_test_quirk(hdev, HCI_QUIRK_STRICT_DUPLICATE_FILTER)) {
10115 /* Validate RSSI value against the RSSI threshold once more. */
10116 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10117 rssi < hdev->discovery.rssi)
10118 return false;
10119 }
10120
10121 return true;
10122 }
10123
mgmt_adv_monitor_device_lost(struct hci_dev * hdev,u16 handle,bdaddr_t * bdaddr,u8 addr_type)10124 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
10125 bdaddr_t *bdaddr, u8 addr_type)
10126 {
10127 struct mgmt_ev_adv_monitor_device_lost ev;
10128
10129 ev.monitor_handle = cpu_to_le16(handle);
10130 bacpy(&ev.addr.bdaddr, bdaddr);
10131 ev.addr.type = addr_type;
10132
10133 mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
10134 NULL);
10135 }
10136
mgmt_send_adv_monitor_device_found(struct hci_dev * hdev,struct sk_buff * skb,struct sock * skip_sk,u16 handle)10137 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
10138 struct sk_buff *skb,
10139 struct sock *skip_sk,
10140 u16 handle)
10141 {
10142 struct sk_buff *advmon_skb;
10143 size_t advmon_skb_len;
10144 __le16 *monitor_handle;
10145
10146 if (!skb)
10147 return;
10148
10149 advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
10150 sizeof(struct mgmt_ev_device_found)) + skb->len;
10151 advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
10152 advmon_skb_len);
10153 if (!advmon_skb)
10154 return;
10155
10156 /* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10157 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10158 * store monitor_handle of the matched monitor.
10159 */
10160 monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
10161 *monitor_handle = cpu_to_le16(handle);
10162 skb_put_data(advmon_skb, skb->data, skb->len);
10163
10164 mgmt_event_skb(advmon_skb, skip_sk);
10165 }
10166
mgmt_adv_monitor_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,bool report_device,struct sk_buff * skb,struct sock * skip_sk)10167 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
10168 bdaddr_t *bdaddr, bool report_device,
10169 struct sk_buff *skb,
10170 struct sock *skip_sk)
10171 {
10172 struct monitored_device *dev, *tmp;
10173 bool matched = false;
10174 bool notified = false;
10175
10176 /* We have received the Advertisement Report because:
10177 * 1. the kernel has initiated active discovery
10178 * 2. if not, we have pend_le_reports > 0 in which case we are doing
10179 * passive scanning
10180 * 3. if none of the above is true, we have one or more active
10181 * Advertisement Monitor
10182 *
10183 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10184 * and report ONLY one advertisement per device for the matched Monitor
10185 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10186 *
10187 * For case 3, since we are not active scanning and all advertisements
10188 * received are due to a matched Advertisement Monitor, report all
10189 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10190 */
10191 if (report_device && !hdev->advmon_pend_notify) {
10192 mgmt_event_skb(skb, skip_sk);
10193 return;
10194 }
10195
10196 hdev->advmon_pend_notify = false;
10197
10198 list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
10199 if (!bacmp(&dev->bdaddr, bdaddr)) {
10200 matched = true;
10201
10202 if (!dev->notified) {
10203 mgmt_send_adv_monitor_device_found(hdev, skb,
10204 skip_sk,
10205 dev->handle);
10206 notified = true;
10207 dev->notified = true;
10208 }
10209 }
10210
10211 if (!dev->notified)
10212 hdev->advmon_pend_notify = true;
10213 }
10214
10215 if (!report_device &&
10216 ((matched && !notified) || !msft_monitor_supported(hdev))) {
10217 /* Handle 0 indicates that we are not active scanning and this
10218 * is a subsequent advertisement report for an already matched
10219 * Advertisement Monitor or the controller offloading support
10220 * is not available.
10221 */
10222 mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
10223 }
10224
10225 if (report_device)
10226 mgmt_event_skb(skb, skip_sk);
10227 else
10228 kfree_skb(skb);
10229 }
10230
mesh_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len,u64 instant)10231 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
10232 u8 addr_type, s8 rssi, u32 flags, u8 *eir,
10233 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10234 u64 instant)
10235 {
10236 struct sk_buff *skb;
10237 struct mgmt_ev_mesh_device_found *ev;
10238 int i, j;
10239
10240 if (!hdev->mesh_ad_types[0])
10241 goto accepted;
10242
10243 /* Scan for requested AD types */
10244 if (eir_len > 0) {
10245 for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
10246 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10247 if (!hdev->mesh_ad_types[j])
10248 break;
10249
10250 if (hdev->mesh_ad_types[j] == eir[i + 1])
10251 goto accepted;
10252 }
10253 }
10254 }
10255
10256 if (scan_rsp_len > 0) {
10257 for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
10258 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10259 if (!hdev->mesh_ad_types[j])
10260 break;
10261
10262 if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
10263 goto accepted;
10264 }
10265 }
10266 }
10267
10268 return;
10269
10270 accepted:
10271 skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
10272 sizeof(*ev) + eir_len + scan_rsp_len);
10273 if (!skb)
10274 return;
10275
10276 ev = skb_put(skb, sizeof(*ev));
10277
10278 bacpy(&ev->addr.bdaddr, bdaddr);
10279 ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
10280 ev->rssi = rssi;
10281 ev->flags = cpu_to_le32(flags);
10282 ev->instant = cpu_to_le64(instant);
10283
10284 if (eir_len > 0)
10285 /* Copy EIR or advertising data into event */
10286 skb_put_data(skb, eir, eir_len);
10287
10288 if (scan_rsp_len > 0)
10289 /* Append scan response data to event */
10290 skb_put_data(skb, scan_rsp, scan_rsp_len);
10291
10292 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10293
10294 mgmt_event_skb(skb, NULL);
10295 }
10296
mgmt_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 * dev_class,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len,u64 instant)10297 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10298 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10299 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10300 u64 instant)
10301 {
10302 struct sk_buff *skb;
10303 struct mgmt_ev_device_found *ev;
10304 bool report_device = hci_discovery_active(hdev);
10305
10306 if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
10307 mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
10308 eir, eir_len, scan_rsp, scan_rsp_len,
10309 instant);
10310
10311 /* Don't send events for a non-kernel initiated discovery. With
10312 * LE one exception is if we have pend_le_reports > 0 in which
10313 * case we're doing passive scanning and want these events.
10314 */
10315 if (!hci_discovery_active(hdev)) {
10316 if (link_type == ACL_LINK)
10317 return;
10318 if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
10319 report_device = true;
10320 else if (!hci_is_adv_monitoring(hdev))
10321 return;
10322 }
10323
10324 if (hdev->discovery.result_filtering) {
10325 /* We are using service discovery */
10326 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10327 scan_rsp_len))
10328 return;
10329 }
10330
10331 if (hdev->discovery.limited) {
10332 /* Check for limited discoverable bit */
10333 if (dev_class) {
10334 if (!(dev_class[1] & 0x20))
10335 return;
10336 } else {
10337 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10338 if (!flags || !(flags[0] & LE_AD_LIMITED))
10339 return;
10340 }
10341 }
10342
10343 /* Allocate skb. The 5 extra bytes are for the potential CoD field */
10344 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10345 sizeof(*ev) + eir_len + scan_rsp_len + 5);
10346 if (!skb)
10347 return;
10348
10349 ev = skb_put(skb, sizeof(*ev));
10350
10351 /* In case of device discovery with BR/EDR devices (pre 1.2), the
10352 * RSSI value was reported as 0 when not available. This behavior
10353 * is kept when using device discovery. This is required for full
10354 * backwards compatibility with the API.
10355 *
10356 * However when using service discovery, the value 127 will be
10357 * returned when the RSSI is not available.
10358 */
10359 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10360 link_type == ACL_LINK)
10361 rssi = 0;
10362
10363 bacpy(&ev->addr.bdaddr, bdaddr);
10364 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10365 ev->rssi = rssi;
10366 ev->flags = cpu_to_le32(flags);
10367
10368 if (eir_len > 0)
10369 /* Copy EIR or advertising data into event */
10370 skb_put_data(skb, eir, eir_len);
10371
10372 if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
10373 u8 eir_cod[5];
10374
10375 eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
10376 dev_class, 3);
10377 skb_put_data(skb, eir_cod, sizeof(eir_cod));
10378 }
10379
10380 if (scan_rsp_len > 0)
10381 /* Append scan response data to event */
10382 skb_put_data(skb, scan_rsp, scan_rsp_len);
10383
10384 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10385
10386 mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
10387 }
10388
mgmt_remote_name(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,s8 rssi,u8 * name,u8 name_len)10389 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10390 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10391 {
10392 struct sk_buff *skb;
10393 struct mgmt_ev_device_found *ev;
10394 u16 eir_len = 0;
10395 u32 flags = 0;
10396
10397 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10398 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
10399 if (!skb)
10400 return;
10401
10402 ev = skb_put(skb, sizeof(*ev));
10403 bacpy(&ev->addr.bdaddr, bdaddr);
10404 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10405 ev->rssi = rssi;
10406
10407 if (name)
10408 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
10409 else
10410 flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
10411
10412 ev->eir_len = cpu_to_le16(eir_len);
10413 ev->flags = cpu_to_le32(flags);
10414
10415 mgmt_event_skb(skb, NULL);
10416 }
10417
mgmt_discovering(struct hci_dev * hdev,u8 discovering)10418 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10419 {
10420 struct mgmt_ev_discovering ev;
10421
10422 bt_dev_dbg(hdev, "discovering %u", discovering);
10423
10424 memset(&ev, 0, sizeof(ev));
10425 ev.type = hdev->discovery.type;
10426 ev.discovering = discovering;
10427
10428 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10429 }
10430
mgmt_suspending(struct hci_dev * hdev,u8 state)10431 void mgmt_suspending(struct hci_dev *hdev, u8 state)
10432 {
10433 struct mgmt_ev_controller_suspend ev;
10434
10435 ev.suspend_state = state;
10436 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10437 }
10438
mgmt_resuming(struct hci_dev * hdev,u8 reason,bdaddr_t * bdaddr,u8 addr_type)10439 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10440 u8 addr_type)
10441 {
10442 struct mgmt_ev_controller_resume ev;
10443
10444 ev.wake_reason = reason;
10445 if (bdaddr) {
10446 bacpy(&ev.addr.bdaddr, bdaddr);
10447 ev.addr.type = addr_type;
10448 } else {
10449 memset(&ev.addr, 0, sizeof(ev.addr));
10450 }
10451
10452 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
10453 }
10454
10455 static struct hci_mgmt_chan chan = {
10456 .channel = HCI_CHANNEL_CONTROL,
10457 .handler_count = ARRAY_SIZE(mgmt_handlers),
10458 .handlers = mgmt_handlers,
10459 .hdev_init = mgmt_init_hdev,
10460 };
10461
mgmt_init(void)10462 int mgmt_init(void)
10463 {
10464 return hci_mgmt_chan_register(&chan);
10465 }
10466
mgmt_exit(void)10467 void mgmt_exit(void)
10468 {
10469 hci_mgmt_chan_unregister(&chan);
10470 }
10471
mgmt_cleanup(struct sock * sk)10472 void mgmt_cleanup(struct sock *sk)
10473 {
10474 struct mgmt_mesh_tx *mesh_tx;
10475 struct hci_dev *hdev;
10476
10477 read_lock(&hci_dev_list_lock);
10478
10479 list_for_each_entry(hdev, &hci_dev_list, list) {
10480 do {
10481 mesh_tx = mgmt_mesh_next(hdev, sk);
10482
10483 if (mesh_tx)
10484 mesh_send_complete(hdev, mesh_tx, true);
10485 } while (mesh_tx);
10486 }
10487
10488 read_unlock(&hci_dev_list_lock);
10489 }
10490