1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <linux/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35
36 #include "smp.h"
37 #include "mgmt_util.h"
38 #include "mgmt_config.h"
39 #include "msft.h"
40 #include "eir.h"
41 #include "aosp.h"
42
43 #define MGMT_VERSION 1
44 #define MGMT_REVISION 23
45
46 static const u16 mgmt_commands[] = {
47 MGMT_OP_READ_INDEX_LIST,
48 MGMT_OP_READ_INFO,
49 MGMT_OP_SET_POWERED,
50 MGMT_OP_SET_DISCOVERABLE,
51 MGMT_OP_SET_CONNECTABLE,
52 MGMT_OP_SET_FAST_CONNECTABLE,
53 MGMT_OP_SET_BONDABLE,
54 MGMT_OP_SET_LINK_SECURITY,
55 MGMT_OP_SET_SSP,
56 MGMT_OP_SET_HS,
57 MGMT_OP_SET_LE,
58 MGMT_OP_SET_DEV_CLASS,
59 MGMT_OP_SET_LOCAL_NAME,
60 MGMT_OP_ADD_UUID,
61 MGMT_OP_REMOVE_UUID,
62 MGMT_OP_LOAD_LINK_KEYS,
63 MGMT_OP_LOAD_LONG_TERM_KEYS,
64 MGMT_OP_DISCONNECT,
65 MGMT_OP_GET_CONNECTIONS,
66 MGMT_OP_PIN_CODE_REPLY,
67 MGMT_OP_PIN_CODE_NEG_REPLY,
68 MGMT_OP_SET_IO_CAPABILITY,
69 MGMT_OP_PAIR_DEVICE,
70 MGMT_OP_CANCEL_PAIR_DEVICE,
71 MGMT_OP_UNPAIR_DEVICE,
72 MGMT_OP_USER_CONFIRM_REPLY,
73 MGMT_OP_USER_CONFIRM_NEG_REPLY,
74 MGMT_OP_USER_PASSKEY_REPLY,
75 MGMT_OP_USER_PASSKEY_NEG_REPLY,
76 MGMT_OP_READ_LOCAL_OOB_DATA,
77 MGMT_OP_ADD_REMOTE_OOB_DATA,
78 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
79 MGMT_OP_START_DISCOVERY,
80 MGMT_OP_STOP_DISCOVERY,
81 MGMT_OP_CONFIRM_NAME,
82 MGMT_OP_BLOCK_DEVICE,
83 MGMT_OP_UNBLOCK_DEVICE,
84 MGMT_OP_SET_DEVICE_ID,
85 MGMT_OP_SET_ADVERTISING,
86 MGMT_OP_SET_BREDR,
87 MGMT_OP_SET_STATIC_ADDRESS,
88 MGMT_OP_SET_SCAN_PARAMS,
89 MGMT_OP_SET_SECURE_CONN,
90 MGMT_OP_SET_DEBUG_KEYS,
91 MGMT_OP_SET_PRIVACY,
92 MGMT_OP_LOAD_IRKS,
93 MGMT_OP_GET_CONN_INFO,
94 MGMT_OP_GET_CLOCK_INFO,
95 MGMT_OP_ADD_DEVICE,
96 MGMT_OP_REMOVE_DEVICE,
97 MGMT_OP_LOAD_CONN_PARAM,
98 MGMT_OP_READ_UNCONF_INDEX_LIST,
99 MGMT_OP_READ_CONFIG_INFO,
100 MGMT_OP_SET_EXTERNAL_CONFIG,
101 MGMT_OP_SET_PUBLIC_ADDRESS,
102 MGMT_OP_START_SERVICE_DISCOVERY,
103 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
104 MGMT_OP_READ_EXT_INDEX_LIST,
105 MGMT_OP_READ_ADV_FEATURES,
106 MGMT_OP_ADD_ADVERTISING,
107 MGMT_OP_REMOVE_ADVERTISING,
108 MGMT_OP_GET_ADV_SIZE_INFO,
109 MGMT_OP_START_LIMITED_DISCOVERY,
110 MGMT_OP_READ_EXT_INFO,
111 MGMT_OP_SET_APPEARANCE,
112 MGMT_OP_GET_PHY_CONFIGURATION,
113 MGMT_OP_SET_PHY_CONFIGURATION,
114 MGMT_OP_SET_BLOCKED_KEYS,
115 MGMT_OP_SET_WIDEBAND_SPEECH,
116 MGMT_OP_READ_CONTROLLER_CAP,
117 MGMT_OP_READ_EXP_FEATURES_INFO,
118 MGMT_OP_SET_EXP_FEATURE,
119 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
120 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
121 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
122 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
123 MGMT_OP_GET_DEVICE_FLAGS,
124 MGMT_OP_SET_DEVICE_FLAGS,
125 MGMT_OP_READ_ADV_MONITOR_FEATURES,
126 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
127 MGMT_OP_REMOVE_ADV_MONITOR,
128 MGMT_OP_ADD_EXT_ADV_PARAMS,
129 MGMT_OP_ADD_EXT_ADV_DATA,
130 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
131 MGMT_OP_SET_MESH_RECEIVER,
132 MGMT_OP_MESH_READ_FEATURES,
133 MGMT_OP_MESH_SEND,
134 MGMT_OP_MESH_SEND_CANCEL,
135 MGMT_OP_HCI_CMD_SYNC,
136 };
137
138 static const u16 mgmt_events[] = {
139 MGMT_EV_CONTROLLER_ERROR,
140 MGMT_EV_INDEX_ADDED,
141 MGMT_EV_INDEX_REMOVED,
142 MGMT_EV_NEW_SETTINGS,
143 MGMT_EV_CLASS_OF_DEV_CHANGED,
144 MGMT_EV_LOCAL_NAME_CHANGED,
145 MGMT_EV_NEW_LINK_KEY,
146 MGMT_EV_NEW_LONG_TERM_KEY,
147 MGMT_EV_DEVICE_CONNECTED,
148 MGMT_EV_DEVICE_DISCONNECTED,
149 MGMT_EV_CONNECT_FAILED,
150 MGMT_EV_PIN_CODE_REQUEST,
151 MGMT_EV_USER_CONFIRM_REQUEST,
152 MGMT_EV_USER_PASSKEY_REQUEST,
153 MGMT_EV_AUTH_FAILED,
154 MGMT_EV_DEVICE_FOUND,
155 MGMT_EV_DISCOVERING,
156 MGMT_EV_DEVICE_BLOCKED,
157 MGMT_EV_DEVICE_UNBLOCKED,
158 MGMT_EV_DEVICE_UNPAIRED,
159 MGMT_EV_PASSKEY_NOTIFY,
160 MGMT_EV_NEW_IRK,
161 MGMT_EV_NEW_CSRK,
162 MGMT_EV_DEVICE_ADDED,
163 MGMT_EV_DEVICE_REMOVED,
164 MGMT_EV_NEW_CONN_PARAM,
165 MGMT_EV_UNCONF_INDEX_ADDED,
166 MGMT_EV_UNCONF_INDEX_REMOVED,
167 MGMT_EV_NEW_CONFIG_OPTIONS,
168 MGMT_EV_EXT_INDEX_ADDED,
169 MGMT_EV_EXT_INDEX_REMOVED,
170 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
171 MGMT_EV_ADVERTISING_ADDED,
172 MGMT_EV_ADVERTISING_REMOVED,
173 MGMT_EV_EXT_INFO_CHANGED,
174 MGMT_EV_PHY_CONFIGURATION_CHANGED,
175 MGMT_EV_EXP_FEATURE_CHANGED,
176 MGMT_EV_DEVICE_FLAGS_CHANGED,
177 MGMT_EV_ADV_MONITOR_ADDED,
178 MGMT_EV_ADV_MONITOR_REMOVED,
179 MGMT_EV_CONTROLLER_SUSPEND,
180 MGMT_EV_CONTROLLER_RESUME,
181 MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
182 MGMT_EV_ADV_MONITOR_DEVICE_LOST,
183 };
184
185 static const u16 mgmt_untrusted_commands[] = {
186 MGMT_OP_READ_INDEX_LIST,
187 MGMT_OP_READ_INFO,
188 MGMT_OP_READ_UNCONF_INDEX_LIST,
189 MGMT_OP_READ_CONFIG_INFO,
190 MGMT_OP_READ_EXT_INDEX_LIST,
191 MGMT_OP_READ_EXT_INFO,
192 MGMT_OP_READ_CONTROLLER_CAP,
193 MGMT_OP_READ_EXP_FEATURES_INFO,
194 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
195 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
196 };
197
198 static const u16 mgmt_untrusted_events[] = {
199 MGMT_EV_INDEX_ADDED,
200 MGMT_EV_INDEX_REMOVED,
201 MGMT_EV_NEW_SETTINGS,
202 MGMT_EV_CLASS_OF_DEV_CHANGED,
203 MGMT_EV_LOCAL_NAME_CHANGED,
204 MGMT_EV_UNCONF_INDEX_ADDED,
205 MGMT_EV_UNCONF_INDEX_REMOVED,
206 MGMT_EV_NEW_CONFIG_OPTIONS,
207 MGMT_EV_EXT_INDEX_ADDED,
208 MGMT_EV_EXT_INDEX_REMOVED,
209 MGMT_EV_EXT_INFO_CHANGED,
210 MGMT_EV_EXP_FEATURE_CHANGED,
211 };
212
213 #define CACHE_TIMEOUT secs_to_jiffies(2)
214
215 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
216 "\x00\x00\x00\x00\x00\x00\x00\x00"
217
218 /* HCI to MGMT error code conversion table */
219 static const u8 mgmt_status_table[] = {
220 MGMT_STATUS_SUCCESS,
221 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
222 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
223 MGMT_STATUS_FAILED, /* Hardware Failure */
224 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
225 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
226 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
227 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
228 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
229 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
230 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
231 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
232 MGMT_STATUS_BUSY, /* Command Disallowed */
233 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
234 MGMT_STATUS_REJECTED, /* Rejected Security */
235 MGMT_STATUS_REJECTED, /* Rejected Personal */
236 MGMT_STATUS_TIMEOUT, /* Host Timeout */
237 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
238 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
239 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
240 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
241 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
242 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
243 MGMT_STATUS_BUSY, /* Repeated Attempts */
244 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
245 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
246 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
247 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
248 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
249 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
250 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
251 MGMT_STATUS_FAILED, /* Unspecified Error */
252 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
253 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
254 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
255 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
256 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
257 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
258 MGMT_STATUS_FAILED, /* Unit Link Key Used */
259 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
260 MGMT_STATUS_TIMEOUT, /* Instant Passed */
261 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
262 MGMT_STATUS_FAILED, /* Transaction Collision */
263 MGMT_STATUS_FAILED, /* Reserved for future use */
264 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
265 MGMT_STATUS_REJECTED, /* QoS Rejected */
266 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
267 MGMT_STATUS_REJECTED, /* Insufficient Security */
268 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
269 MGMT_STATUS_FAILED, /* Reserved for future use */
270 MGMT_STATUS_BUSY, /* Role Switch Pending */
271 MGMT_STATUS_FAILED, /* Reserved for future use */
272 MGMT_STATUS_FAILED, /* Slot Violation */
273 MGMT_STATUS_FAILED, /* Role Switch Failed */
274 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
275 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
276 MGMT_STATUS_BUSY, /* Host Busy Pairing */
277 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
278 MGMT_STATUS_BUSY, /* Controller Busy */
279 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
280 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
281 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
282 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
283 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
284 };
285
mgmt_errno_status(int err)286 static u8 mgmt_errno_status(int err)
287 {
288 switch (err) {
289 case 0:
290 return MGMT_STATUS_SUCCESS;
291 case -EPERM:
292 return MGMT_STATUS_REJECTED;
293 case -EINVAL:
294 return MGMT_STATUS_INVALID_PARAMS;
295 case -EOPNOTSUPP:
296 return MGMT_STATUS_NOT_SUPPORTED;
297 case -EBUSY:
298 return MGMT_STATUS_BUSY;
299 case -ETIMEDOUT:
300 return MGMT_STATUS_AUTH_FAILED;
301 case -ENOMEM:
302 return MGMT_STATUS_NO_RESOURCES;
303 case -EISCONN:
304 return MGMT_STATUS_ALREADY_CONNECTED;
305 case -ENOTCONN:
306 return MGMT_STATUS_DISCONNECTED;
307 }
308
309 return MGMT_STATUS_FAILED;
310 }
311
mgmt_status(int err)312 static u8 mgmt_status(int err)
313 {
314 if (err < 0)
315 return mgmt_errno_status(err);
316
317 if (err < ARRAY_SIZE(mgmt_status_table))
318 return mgmt_status_table[err];
319
320 return MGMT_STATUS_FAILED;
321 }
322
mgmt_index_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag)323 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
324 u16 len, int flag)
325 {
326 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
327 flag, NULL);
328 }
329
mgmt_limited_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag,struct sock * skip_sk)330 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
331 u16 len, int flag, struct sock *skip_sk)
332 {
333 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
334 flag, skip_sk);
335 }
336
mgmt_event(u16 event,struct hci_dev * hdev,void * data,u16 len,struct sock * skip_sk)337 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
338 struct sock *skip_sk)
339 {
340 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
341 HCI_SOCK_TRUSTED, skip_sk);
342 }
343
mgmt_event_skb(struct sk_buff * skb,struct sock * skip_sk)344 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
345 {
346 return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
347 skip_sk);
348 }
349
le_addr_type(u8 mgmt_addr_type)350 static u8 le_addr_type(u8 mgmt_addr_type)
351 {
352 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
353 return ADDR_LE_DEV_PUBLIC;
354 else
355 return ADDR_LE_DEV_RANDOM;
356 }
357
mgmt_fill_version_info(void * ver)358 void mgmt_fill_version_info(void *ver)
359 {
360 struct mgmt_rp_read_version *rp = ver;
361
362 rp->version = MGMT_VERSION;
363 rp->revision = cpu_to_le16(MGMT_REVISION);
364 }
365
read_version(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)366 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
367 u16 data_len)
368 {
369 struct mgmt_rp_read_version rp;
370
371 bt_dev_dbg(hdev, "sock %p", sk);
372
373 mgmt_fill_version_info(&rp);
374
375 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
376 &rp, sizeof(rp));
377 }
378
read_commands(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)379 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
380 u16 data_len)
381 {
382 struct mgmt_rp_read_commands *rp;
383 u16 num_commands, num_events;
384 size_t rp_size;
385 int i, err;
386
387 bt_dev_dbg(hdev, "sock %p", sk);
388
389 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
390 num_commands = ARRAY_SIZE(mgmt_commands);
391 num_events = ARRAY_SIZE(mgmt_events);
392 } else {
393 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
394 num_events = ARRAY_SIZE(mgmt_untrusted_events);
395 }
396
397 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
398
399 rp = kmalloc(rp_size, GFP_KERNEL);
400 if (!rp)
401 return -ENOMEM;
402
403 rp->num_commands = cpu_to_le16(num_commands);
404 rp->num_events = cpu_to_le16(num_events);
405
406 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
407 __le16 *opcode = rp->opcodes;
408
409 for (i = 0; i < num_commands; i++, opcode++)
410 put_unaligned_le16(mgmt_commands[i], opcode);
411
412 for (i = 0; i < num_events; i++, opcode++)
413 put_unaligned_le16(mgmt_events[i], opcode);
414 } else {
415 __le16 *opcode = rp->opcodes;
416
417 for (i = 0; i < num_commands; i++, opcode++)
418 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
419
420 for (i = 0; i < num_events; i++, opcode++)
421 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
422 }
423
424 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
425 rp, rp_size);
426 kfree(rp);
427
428 return err;
429 }
430
read_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)431 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
432 u16 data_len)
433 {
434 struct mgmt_rp_read_index_list *rp;
435 struct hci_dev *d;
436 size_t rp_len;
437 u16 count;
438 int err;
439
440 bt_dev_dbg(hdev, "sock %p", sk);
441
442 read_lock(&hci_dev_list_lock);
443
444 count = 0;
445 list_for_each_entry(d, &hci_dev_list, list) {
446 if (!hci_dev_test_flag(d, HCI_UNCONFIGURED))
447 count++;
448 }
449
450 rp_len = sizeof(*rp) + (2 * count);
451 rp = kmalloc(rp_len, GFP_ATOMIC);
452 if (!rp) {
453 read_unlock(&hci_dev_list_lock);
454 return -ENOMEM;
455 }
456
457 count = 0;
458 list_for_each_entry(d, &hci_dev_list, list) {
459 if (hci_dev_test_flag(d, HCI_SETUP) ||
460 hci_dev_test_flag(d, HCI_CONFIG) ||
461 hci_dev_test_flag(d, HCI_USER_CHANNEL))
462 continue;
463
464 /* Devices marked as raw-only are neither configured
465 * nor unconfigured controllers.
466 */
467 if (hci_test_quirk(d, HCI_QUIRK_RAW_DEVICE))
468 continue;
469
470 if (!hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
471 rp->index[count++] = cpu_to_le16(d->id);
472 bt_dev_dbg(hdev, "Added hci%u", d->id);
473 }
474 }
475
476 rp->num_controllers = cpu_to_le16(count);
477 rp_len = sizeof(*rp) + (2 * count);
478
479 read_unlock(&hci_dev_list_lock);
480
481 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
482 0, rp, rp_len);
483
484 kfree(rp);
485
486 return err;
487 }
488
read_unconf_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)489 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
490 void *data, u16 data_len)
491 {
492 struct mgmt_rp_read_unconf_index_list *rp;
493 struct hci_dev *d;
494 size_t rp_len;
495 u16 count;
496 int err;
497
498 bt_dev_dbg(hdev, "sock %p", sk);
499
500 read_lock(&hci_dev_list_lock);
501
502 count = 0;
503 list_for_each_entry(d, &hci_dev_list, list) {
504 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
505 count++;
506 }
507
508 rp_len = sizeof(*rp) + (2 * count);
509 rp = kmalloc(rp_len, GFP_ATOMIC);
510 if (!rp) {
511 read_unlock(&hci_dev_list_lock);
512 return -ENOMEM;
513 }
514
515 count = 0;
516 list_for_each_entry(d, &hci_dev_list, list) {
517 if (hci_dev_test_flag(d, HCI_SETUP) ||
518 hci_dev_test_flag(d, HCI_CONFIG) ||
519 hci_dev_test_flag(d, HCI_USER_CHANNEL))
520 continue;
521
522 /* Devices marked as raw-only are neither configured
523 * nor unconfigured controllers.
524 */
525 if (hci_test_quirk(d, HCI_QUIRK_RAW_DEVICE))
526 continue;
527
528 if (hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
529 rp->index[count++] = cpu_to_le16(d->id);
530 bt_dev_dbg(hdev, "Added hci%u", d->id);
531 }
532 }
533
534 rp->num_controllers = cpu_to_le16(count);
535 rp_len = sizeof(*rp) + (2 * count);
536
537 read_unlock(&hci_dev_list_lock);
538
539 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
540 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
541
542 kfree(rp);
543
544 return err;
545 }
546
read_ext_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)547 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
548 void *data, u16 data_len)
549 {
550 struct mgmt_rp_read_ext_index_list *rp;
551 struct hci_dev *d;
552 u16 count;
553 int err;
554
555 bt_dev_dbg(hdev, "sock %p", sk);
556
557 read_lock(&hci_dev_list_lock);
558
559 count = 0;
560 list_for_each_entry(d, &hci_dev_list, list)
561 count++;
562
563 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
564 if (!rp) {
565 read_unlock(&hci_dev_list_lock);
566 return -ENOMEM;
567 }
568
569 count = 0;
570 list_for_each_entry(d, &hci_dev_list, list) {
571 if (hci_dev_test_flag(d, HCI_SETUP) ||
572 hci_dev_test_flag(d, HCI_CONFIG) ||
573 hci_dev_test_flag(d, HCI_USER_CHANNEL))
574 continue;
575
576 /* Devices marked as raw-only are neither configured
577 * nor unconfigured controllers.
578 */
579 if (hci_test_quirk(d, HCI_QUIRK_RAW_DEVICE))
580 continue;
581
582 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
583 rp->entry[count].type = 0x01;
584 else
585 rp->entry[count].type = 0x00;
586
587 rp->entry[count].bus = d->bus;
588 rp->entry[count++].index = cpu_to_le16(d->id);
589 bt_dev_dbg(hdev, "Added hci%u", d->id);
590 }
591
592 rp->num_controllers = cpu_to_le16(count);
593
594 read_unlock(&hci_dev_list_lock);
595
596 /* If this command is called at least once, then all the
597 * default index and unconfigured index events are disabled
598 * and from now on only extended index events are used.
599 */
600 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
601 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
602 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
603
604 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
605 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
606 struct_size(rp, entry, count));
607
608 kfree(rp);
609
610 return err;
611 }
612
is_configured(struct hci_dev * hdev)613 static bool is_configured(struct hci_dev *hdev)
614 {
615 if (hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG) &&
616 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
617 return false;
618
619 if ((hci_test_quirk(hdev, HCI_QUIRK_INVALID_BDADDR) ||
620 hci_test_quirk(hdev, HCI_QUIRK_USE_BDADDR_PROPERTY)) &&
621 !bacmp(&hdev->public_addr, BDADDR_ANY))
622 return false;
623
624 return true;
625 }
626
get_missing_options(struct hci_dev * hdev)627 static __le32 get_missing_options(struct hci_dev *hdev)
628 {
629 u32 options = 0;
630
631 if (hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG) &&
632 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
633 options |= MGMT_OPTION_EXTERNAL_CONFIG;
634
635 if ((hci_test_quirk(hdev, HCI_QUIRK_INVALID_BDADDR) ||
636 hci_test_quirk(hdev, HCI_QUIRK_USE_BDADDR_PROPERTY)) &&
637 !bacmp(&hdev->public_addr, BDADDR_ANY))
638 options |= MGMT_OPTION_PUBLIC_ADDRESS;
639
640 return cpu_to_le32(options);
641 }
642
new_options(struct hci_dev * hdev,struct sock * skip)643 static int new_options(struct hci_dev *hdev, struct sock *skip)
644 {
645 __le32 options = get_missing_options(hdev);
646
647 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
648 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
649 }
650
send_options_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)651 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
652 {
653 __le32 options = get_missing_options(hdev);
654
655 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
656 sizeof(options));
657 }
658
read_config_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)659 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
660 void *data, u16 data_len)
661 {
662 struct mgmt_rp_read_config_info rp;
663 u32 options = 0;
664
665 bt_dev_dbg(hdev, "sock %p", sk);
666
667 hci_dev_lock(hdev);
668
669 memset(&rp, 0, sizeof(rp));
670 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
671
672 if (hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG))
673 options |= MGMT_OPTION_EXTERNAL_CONFIG;
674
675 if (hdev->set_bdaddr)
676 options |= MGMT_OPTION_PUBLIC_ADDRESS;
677
678 rp.supported_options = cpu_to_le32(options);
679 rp.missing_options = get_missing_options(hdev);
680
681 hci_dev_unlock(hdev);
682
683 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
684 &rp, sizeof(rp));
685 }
686
get_supported_phys(struct hci_dev * hdev)687 static u32 get_supported_phys(struct hci_dev *hdev)
688 {
689 u32 supported_phys = 0;
690
691 if (lmp_bredr_capable(hdev)) {
692 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
693
694 if (hdev->features[0][0] & LMP_3SLOT)
695 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
696
697 if (hdev->features[0][0] & LMP_5SLOT)
698 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
699
700 if (lmp_edr_2m_capable(hdev)) {
701 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
702
703 if (lmp_edr_3slot_capable(hdev))
704 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
705
706 if (lmp_edr_5slot_capable(hdev))
707 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
708
709 if (lmp_edr_3m_capable(hdev)) {
710 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
711
712 if (lmp_edr_3slot_capable(hdev))
713 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
714
715 if (lmp_edr_5slot_capable(hdev))
716 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
717 }
718 }
719 }
720
721 if (lmp_le_capable(hdev)) {
722 supported_phys |= MGMT_PHY_LE_1M_TX;
723 supported_phys |= MGMT_PHY_LE_1M_RX;
724
725 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
726 supported_phys |= MGMT_PHY_LE_2M_TX;
727 supported_phys |= MGMT_PHY_LE_2M_RX;
728 }
729
730 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
731 supported_phys |= MGMT_PHY_LE_CODED_TX;
732 supported_phys |= MGMT_PHY_LE_CODED_RX;
733 }
734 }
735
736 return supported_phys;
737 }
738
get_selected_phys(struct hci_dev * hdev)739 static u32 get_selected_phys(struct hci_dev *hdev)
740 {
741 u32 selected_phys = 0;
742
743 if (lmp_bredr_capable(hdev)) {
744 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
745
746 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
747 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
748
749 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
750 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
751
752 if (lmp_edr_2m_capable(hdev)) {
753 if (!(hdev->pkt_type & HCI_2DH1))
754 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
755
756 if (lmp_edr_3slot_capable(hdev) &&
757 !(hdev->pkt_type & HCI_2DH3))
758 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
759
760 if (lmp_edr_5slot_capable(hdev) &&
761 !(hdev->pkt_type & HCI_2DH5))
762 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
763
764 if (lmp_edr_3m_capable(hdev)) {
765 if (!(hdev->pkt_type & HCI_3DH1))
766 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
767
768 if (lmp_edr_3slot_capable(hdev) &&
769 !(hdev->pkt_type & HCI_3DH3))
770 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
771
772 if (lmp_edr_5slot_capable(hdev) &&
773 !(hdev->pkt_type & HCI_3DH5))
774 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
775 }
776 }
777 }
778
779 if (lmp_le_capable(hdev)) {
780 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
781 selected_phys |= MGMT_PHY_LE_1M_TX;
782
783 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
784 selected_phys |= MGMT_PHY_LE_1M_RX;
785
786 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
787 selected_phys |= MGMT_PHY_LE_2M_TX;
788
789 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
790 selected_phys |= MGMT_PHY_LE_2M_RX;
791
792 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
793 selected_phys |= MGMT_PHY_LE_CODED_TX;
794
795 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
796 selected_phys |= MGMT_PHY_LE_CODED_RX;
797 }
798
799 return selected_phys;
800 }
801
get_configurable_phys(struct hci_dev * hdev)802 static u32 get_configurable_phys(struct hci_dev *hdev)
803 {
804 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
805 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
806 }
807
get_supported_settings(struct hci_dev * hdev)808 static u32 get_supported_settings(struct hci_dev *hdev)
809 {
810 u32 settings = 0;
811
812 settings |= MGMT_SETTING_POWERED;
813 settings |= MGMT_SETTING_BONDABLE;
814 settings |= MGMT_SETTING_DEBUG_KEYS;
815 settings |= MGMT_SETTING_CONNECTABLE;
816 settings |= MGMT_SETTING_DISCOVERABLE;
817
818 if (lmp_bredr_capable(hdev)) {
819 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
820 settings |= MGMT_SETTING_FAST_CONNECTABLE;
821 settings |= MGMT_SETTING_BREDR;
822 settings |= MGMT_SETTING_LINK_SECURITY;
823
824 if (lmp_ssp_capable(hdev)) {
825 settings |= MGMT_SETTING_SSP;
826 }
827
828 if (lmp_sc_capable(hdev))
829 settings |= MGMT_SETTING_SECURE_CONN;
830
831 if (hci_test_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED))
832 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
833 }
834
835 if (lmp_le_capable(hdev)) {
836 settings |= MGMT_SETTING_LE;
837 settings |= MGMT_SETTING_SECURE_CONN;
838 settings |= MGMT_SETTING_PRIVACY;
839 settings |= MGMT_SETTING_STATIC_ADDRESS;
840 settings |= MGMT_SETTING_ADVERTISING;
841 }
842
843 if (hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG) || hdev->set_bdaddr)
844 settings |= MGMT_SETTING_CONFIGURATION;
845
846 if (cis_central_capable(hdev))
847 settings |= MGMT_SETTING_CIS_CENTRAL;
848
849 if (cis_peripheral_capable(hdev))
850 settings |= MGMT_SETTING_CIS_PERIPHERAL;
851
852 if (ll_privacy_capable(hdev))
853 settings |= MGMT_SETTING_LL_PRIVACY;
854
855 settings |= MGMT_SETTING_PHY_CONFIGURATION;
856
857 return settings;
858 }
859
get_current_settings(struct hci_dev * hdev)860 static u32 get_current_settings(struct hci_dev *hdev)
861 {
862 u32 settings = 0;
863
864 if (hdev_is_powered(hdev))
865 settings |= MGMT_SETTING_POWERED;
866
867 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
868 settings |= MGMT_SETTING_CONNECTABLE;
869
870 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
871 settings |= MGMT_SETTING_FAST_CONNECTABLE;
872
873 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
874 settings |= MGMT_SETTING_DISCOVERABLE;
875
876 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
877 settings |= MGMT_SETTING_BONDABLE;
878
879 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
880 settings |= MGMT_SETTING_BREDR;
881
882 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
883 settings |= MGMT_SETTING_LE;
884
885 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
886 settings |= MGMT_SETTING_LINK_SECURITY;
887
888 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
889 settings |= MGMT_SETTING_SSP;
890
891 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
892 settings |= MGMT_SETTING_ADVERTISING;
893
894 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
895 settings |= MGMT_SETTING_SECURE_CONN;
896
897 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
898 settings |= MGMT_SETTING_DEBUG_KEYS;
899
900 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
901 settings |= MGMT_SETTING_PRIVACY;
902
903 /* The current setting for static address has two purposes. The
904 * first is to indicate if the static address will be used and
905 * the second is to indicate if it is actually set.
906 *
907 * This means if the static address is not configured, this flag
908 * will never be set. If the address is configured, then if the
909 * address is actually used decides if the flag is set or not.
910 *
911 * For single mode LE only controllers and dual-mode controllers
912 * with BR/EDR disabled, the existence of the static address will
913 * be evaluated.
914 */
915 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
916 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
917 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
918 if (bacmp(&hdev->static_addr, BDADDR_ANY))
919 settings |= MGMT_SETTING_STATIC_ADDRESS;
920 }
921
922 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
923 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
924
925 if (cis_central_capable(hdev))
926 settings |= MGMT_SETTING_CIS_CENTRAL;
927
928 if (cis_peripheral_capable(hdev))
929 settings |= MGMT_SETTING_CIS_PERIPHERAL;
930
931 if (bis_capable(hdev))
932 settings |= MGMT_SETTING_ISO_BROADCASTER;
933
934 if (sync_recv_capable(hdev))
935 settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
936
937 if (ll_privacy_capable(hdev))
938 settings |= MGMT_SETTING_LL_PRIVACY;
939
940 return settings;
941 }
942
pending_find(u16 opcode,struct hci_dev * hdev)943 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
944 {
945 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
946 }
947
mgmt_get_adv_discov_flags(struct hci_dev * hdev)948 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
949 {
950 struct mgmt_pending_cmd *cmd;
951
952 /* If there's a pending mgmt command the flags will not yet have
953 * their final values, so check for this first.
954 */
955 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
956 if (cmd) {
957 struct mgmt_mode *cp = cmd->param;
958 if (cp->val == 0x01)
959 return LE_AD_GENERAL;
960 else if (cp->val == 0x02)
961 return LE_AD_LIMITED;
962 } else {
963 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
964 return LE_AD_LIMITED;
965 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
966 return LE_AD_GENERAL;
967 }
968
969 return 0;
970 }
971
mgmt_get_connectable(struct hci_dev * hdev)972 bool mgmt_get_connectable(struct hci_dev *hdev)
973 {
974 struct mgmt_pending_cmd *cmd;
975
976 /* If there's a pending mgmt command the flag will not yet have
977 * it's final value, so check for this first.
978 */
979 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
980 if (cmd) {
981 struct mgmt_mode *cp = cmd->param;
982
983 return cp->val;
984 }
985
986 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
987 }
988
service_cache_sync(struct hci_dev * hdev,void * data)989 static int service_cache_sync(struct hci_dev *hdev, void *data)
990 {
991 hci_update_eir_sync(hdev);
992 hci_update_class_sync(hdev);
993
994 return 0;
995 }
996
service_cache_off(struct work_struct * work)997 static void service_cache_off(struct work_struct *work)
998 {
999 struct hci_dev *hdev = container_of(work, struct hci_dev,
1000 service_cache.work);
1001
1002 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1003 return;
1004
1005 hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1006 }
1007
rpa_expired_sync(struct hci_dev * hdev,void * data)1008 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1009 {
1010 /* The generation of a new RPA and programming it into the
1011 * controller happens in the hci_req_enable_advertising()
1012 * function.
1013 */
1014 if (ext_adv_capable(hdev))
1015 return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1016 else
1017 return hci_enable_advertising_sync(hdev);
1018 }
1019
rpa_expired(struct work_struct * work)1020 static void rpa_expired(struct work_struct *work)
1021 {
1022 struct hci_dev *hdev = container_of(work, struct hci_dev,
1023 rpa_expired.work);
1024
1025 bt_dev_dbg(hdev, "");
1026
1027 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1028
1029 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1030 return;
1031
1032 hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1033 }
1034
1035 static int set_discoverable_sync(struct hci_dev *hdev, void *data);
1036
discov_off(struct work_struct * work)1037 static void discov_off(struct work_struct *work)
1038 {
1039 struct hci_dev *hdev = container_of(work, struct hci_dev,
1040 discov_off.work);
1041
1042 bt_dev_dbg(hdev, "");
1043
1044 hci_dev_lock(hdev);
1045
1046 /* When discoverable timeout triggers, then just make sure
1047 * the limited discoverable flag is cleared. Even in the case
1048 * of a timeout triggered from general discoverable, it is
1049 * safe to unconditionally clear the flag.
1050 */
1051 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1052 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1053 hdev->discov_timeout = 0;
1054
1055 hci_cmd_sync_queue(hdev, set_discoverable_sync, NULL, NULL);
1056
1057 mgmt_new_settings(hdev);
1058
1059 hci_dev_unlock(hdev);
1060 }
1061
1062 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1063
mesh_send_complete(struct hci_dev * hdev,struct mgmt_mesh_tx * mesh_tx,bool silent)1064 static void mesh_send_complete(struct hci_dev *hdev,
1065 struct mgmt_mesh_tx *mesh_tx, bool silent)
1066 {
1067 u8 handle = mesh_tx->handle;
1068
1069 if (!silent)
1070 mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1071 sizeof(handle), NULL);
1072
1073 mgmt_mesh_remove(mesh_tx);
1074 }
1075
mesh_send_done_sync(struct hci_dev * hdev,void * data)1076 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1077 {
1078 struct mgmt_mesh_tx *mesh_tx;
1079
1080 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1081 if (list_empty(&hdev->adv_instances))
1082 hci_disable_advertising_sync(hdev);
1083 mesh_tx = mgmt_mesh_next(hdev, NULL);
1084
1085 if (mesh_tx)
1086 mesh_send_complete(hdev, mesh_tx, false);
1087
1088 return 0;
1089 }
1090
1091 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1092 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
mesh_next(struct hci_dev * hdev,void * data,int err)1093 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1094 {
1095 struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1096
1097 if (!mesh_tx)
1098 return;
1099
1100 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1101 mesh_send_start_complete);
1102
1103 if (err < 0)
1104 mesh_send_complete(hdev, mesh_tx, false);
1105 else
1106 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1107 }
1108
mesh_send_done(struct work_struct * work)1109 static void mesh_send_done(struct work_struct *work)
1110 {
1111 struct hci_dev *hdev = container_of(work, struct hci_dev,
1112 mesh_send_done.work);
1113
1114 if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1115 return;
1116
1117 hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1118 }
1119
mgmt_init_hdev(struct sock * sk,struct hci_dev * hdev)1120 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1121 {
1122 if (hci_dev_test_flag(hdev, HCI_MGMT))
1123 return;
1124
1125 BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1126
1127 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1128 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1129 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1130 INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1131
1132 /* Non-mgmt controlled devices get this bit set
1133 * implicitly so that pairing works for them, however
1134 * for mgmt we require user-space to explicitly enable
1135 * it
1136 */
1137 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1138
1139 hci_dev_set_flag(hdev, HCI_MGMT);
1140 }
1141
read_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1142 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1143 void *data, u16 data_len)
1144 {
1145 struct mgmt_rp_read_info rp;
1146
1147 bt_dev_dbg(hdev, "sock %p", sk);
1148
1149 hci_dev_lock(hdev);
1150
1151 memset(&rp, 0, sizeof(rp));
1152
1153 bacpy(&rp.bdaddr, &hdev->bdaddr);
1154
1155 rp.version = hdev->hci_ver;
1156 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1157
1158 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1159 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1160
1161 memcpy(rp.dev_class, hdev->dev_class, 3);
1162
1163 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1164 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1165
1166 hci_dev_unlock(hdev);
1167
1168 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1169 sizeof(rp));
1170 }
1171
append_eir_data_to_buf(struct hci_dev * hdev,u8 * eir)1172 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1173 {
1174 u16 eir_len = 0;
1175 size_t name_len;
1176
1177 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1178 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1179 hdev->dev_class, 3);
1180
1181 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1182 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1183 hdev->appearance);
1184
1185 name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1186 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1187 hdev->dev_name, name_len);
1188
1189 name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1190 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1191 hdev->short_name, name_len);
1192
1193 return eir_len;
1194 }
1195
read_ext_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1196 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1197 void *data, u16 data_len)
1198 {
1199 char buf[512];
1200 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1201 u16 eir_len;
1202
1203 bt_dev_dbg(hdev, "sock %p", sk);
1204
1205 memset(&buf, 0, sizeof(buf));
1206
1207 hci_dev_lock(hdev);
1208
1209 bacpy(&rp->bdaddr, &hdev->bdaddr);
1210
1211 rp->version = hdev->hci_ver;
1212 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1213
1214 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1215 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1216
1217
1218 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1219 rp->eir_len = cpu_to_le16(eir_len);
1220
1221 hci_dev_unlock(hdev);
1222
1223 /* If this command is called at least once, then the events
1224 * for class of device and local name changes are disabled
1225 * and only the new extended controller information event
1226 * is used.
1227 */
1228 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1229 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1230 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1231
1232 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1233 sizeof(*rp) + eir_len);
1234 }
1235
ext_info_changed(struct hci_dev * hdev,struct sock * skip)1236 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1237 {
1238 char buf[512];
1239 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1240 u16 eir_len;
1241
1242 memset(buf, 0, sizeof(buf));
1243
1244 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1245 ev->eir_len = cpu_to_le16(eir_len);
1246
1247 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1248 sizeof(*ev) + eir_len,
1249 HCI_MGMT_EXT_INFO_EVENTS, skip);
1250 }
1251
send_settings_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)1252 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1253 {
1254 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1255
1256 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1257 sizeof(settings));
1258 }
1259
mgmt_advertising_added(struct sock * sk,struct hci_dev * hdev,u8 instance)1260 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1261 {
1262 struct mgmt_ev_advertising_added ev;
1263
1264 ev.instance = instance;
1265
1266 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1267 }
1268
mgmt_advertising_removed(struct sock * sk,struct hci_dev * hdev,u8 instance)1269 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1270 u8 instance)
1271 {
1272 struct mgmt_ev_advertising_removed ev;
1273
1274 ev.instance = instance;
1275
1276 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1277 }
1278
cancel_adv_timeout(struct hci_dev * hdev)1279 static void cancel_adv_timeout(struct hci_dev *hdev)
1280 {
1281 if (hdev->adv_instance_timeout) {
1282 hdev->adv_instance_timeout = 0;
1283 cancel_delayed_work(&hdev->adv_instance_expire);
1284 }
1285 }
1286
1287 /* This function requires the caller holds hdev->lock */
restart_le_actions(struct hci_dev * hdev)1288 static void restart_le_actions(struct hci_dev *hdev)
1289 {
1290 struct hci_conn_params *p;
1291
1292 list_for_each_entry(p, &hdev->le_conn_params, list) {
1293 /* Needed for AUTO_OFF case where might not "really"
1294 * have been powered off.
1295 */
1296 hci_pend_le_list_del_init(p);
1297
1298 switch (p->auto_connect) {
1299 case HCI_AUTO_CONN_DIRECT:
1300 case HCI_AUTO_CONN_ALWAYS:
1301 hci_pend_le_list_add(p, &hdev->pend_le_conns);
1302 break;
1303 case HCI_AUTO_CONN_REPORT:
1304 hci_pend_le_list_add(p, &hdev->pend_le_reports);
1305 break;
1306 default:
1307 break;
1308 }
1309 }
1310 }
1311
new_settings(struct hci_dev * hdev,struct sock * skip)1312 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1313 {
1314 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1315
1316 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1317 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1318 }
1319
mgmt_set_powered_complete(struct hci_dev * hdev,void * data,int err)1320 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1321 {
1322 struct mgmt_pending_cmd *cmd = data;
1323 struct mgmt_mode *cp;
1324
1325 /* Make sure cmd still outstanding. */
1326 if (err == -ECANCELED ||
1327 cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1328 return;
1329
1330 cp = cmd->param;
1331
1332 bt_dev_dbg(hdev, "err %d", err);
1333
1334 if (!err) {
1335 if (cp->val) {
1336 hci_dev_lock(hdev);
1337 restart_le_actions(hdev);
1338 hci_update_passive_scan(hdev);
1339 hci_dev_unlock(hdev);
1340 }
1341
1342 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1343
1344 /* Only call new_setting for power on as power off is deferred
1345 * to hdev->power_off work which does call hci_dev_do_close.
1346 */
1347 if (cp->val)
1348 new_settings(hdev, cmd->sk);
1349 } else {
1350 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1351 mgmt_status(err));
1352 }
1353
1354 mgmt_pending_remove(cmd);
1355 }
1356
set_powered_sync(struct hci_dev * hdev,void * data)1357 static int set_powered_sync(struct hci_dev *hdev, void *data)
1358 {
1359 struct mgmt_pending_cmd *cmd = data;
1360 struct mgmt_mode *cp;
1361
1362 /* Make sure cmd still outstanding. */
1363 if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1364 return -ECANCELED;
1365
1366 cp = cmd->param;
1367
1368 BT_DBG("%s", hdev->name);
1369
1370 return hci_set_powered_sync(hdev, cp->val);
1371 }
1372
set_powered(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1373 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1374 u16 len)
1375 {
1376 struct mgmt_mode *cp = data;
1377 struct mgmt_pending_cmd *cmd;
1378 int err;
1379
1380 bt_dev_dbg(hdev, "sock %p", sk);
1381
1382 if (cp->val != 0x00 && cp->val != 0x01)
1383 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1384 MGMT_STATUS_INVALID_PARAMS);
1385
1386 hci_dev_lock(hdev);
1387
1388 if (!cp->val) {
1389 if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN)) {
1390 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1391 MGMT_STATUS_BUSY);
1392 goto failed;
1393 }
1394 }
1395
1396 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1397 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1398 MGMT_STATUS_BUSY);
1399 goto failed;
1400 }
1401
1402 if (!!cp->val == hdev_is_powered(hdev)) {
1403 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1404 goto failed;
1405 }
1406
1407 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1408 if (!cmd) {
1409 err = -ENOMEM;
1410 goto failed;
1411 }
1412
1413 /* Cancel potentially blocking sync operation before power off */
1414 if (cp->val == 0x00) {
1415 hci_cmd_sync_cancel_sync(hdev, -EHOSTDOWN);
1416 err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1417 mgmt_set_powered_complete);
1418 } else {
1419 /* Use hci_cmd_sync_submit since hdev might not be running */
1420 err = hci_cmd_sync_submit(hdev, set_powered_sync, cmd,
1421 mgmt_set_powered_complete);
1422 }
1423
1424 if (err < 0)
1425 mgmt_pending_remove(cmd);
1426
1427 failed:
1428 hci_dev_unlock(hdev);
1429 return err;
1430 }
1431
mgmt_new_settings(struct hci_dev * hdev)1432 int mgmt_new_settings(struct hci_dev *hdev)
1433 {
1434 return new_settings(hdev, NULL);
1435 }
1436
1437 struct cmd_lookup {
1438 struct sock *sk;
1439 struct hci_dev *hdev;
1440 u8 mgmt_status;
1441 };
1442
settings_rsp(struct mgmt_pending_cmd * cmd,void * data)1443 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1444 {
1445 struct cmd_lookup *match = data;
1446
1447 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1448
1449 if (match->sk == NULL) {
1450 match->sk = cmd->sk;
1451 sock_hold(match->sk);
1452 }
1453 }
1454
cmd_status_rsp(struct mgmt_pending_cmd * cmd,void * data)1455 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1456 {
1457 u8 *status = data;
1458
1459 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, *status);
1460 }
1461
cmd_complete_rsp(struct mgmt_pending_cmd * cmd,void * data)1462 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1463 {
1464 struct cmd_lookup *match = data;
1465
1466 /* dequeue cmd_sync entries using cmd as data as that is about to be
1467 * removed/freed.
1468 */
1469 hci_cmd_sync_dequeue(match->hdev, NULL, cmd, NULL);
1470
1471 if (cmd->cmd_complete) {
1472 cmd->cmd_complete(cmd, match->mgmt_status);
1473 return;
1474 }
1475
1476 cmd_status_rsp(cmd, data);
1477 }
1478
generic_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1479 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1480 {
1481 return mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, status,
1482 cmd->param, cmd->param_len);
1483 }
1484
addr_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1485 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1486 {
1487 return mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, status,
1488 cmd->param, sizeof(struct mgmt_addr_info));
1489 }
1490
mgmt_bredr_support(struct hci_dev * hdev)1491 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1492 {
1493 if (!lmp_bredr_capable(hdev))
1494 return MGMT_STATUS_NOT_SUPPORTED;
1495 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1496 return MGMT_STATUS_REJECTED;
1497 else
1498 return MGMT_STATUS_SUCCESS;
1499 }
1500
mgmt_le_support(struct hci_dev * hdev)1501 static u8 mgmt_le_support(struct hci_dev *hdev)
1502 {
1503 if (!lmp_le_capable(hdev))
1504 return MGMT_STATUS_NOT_SUPPORTED;
1505 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1506 return MGMT_STATUS_REJECTED;
1507 else
1508 return MGMT_STATUS_SUCCESS;
1509 }
1510
mgmt_set_discoverable_complete(struct hci_dev * hdev,void * data,int err)1511 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1512 int err)
1513 {
1514 struct mgmt_pending_cmd *cmd = data;
1515
1516 bt_dev_dbg(hdev, "err %d", err);
1517
1518 /* Make sure cmd still outstanding. */
1519 if (err == -ECANCELED ||
1520 cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1521 return;
1522
1523 hci_dev_lock(hdev);
1524
1525 if (err) {
1526 u8 mgmt_err = mgmt_status(err);
1527 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
1528 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1529 goto done;
1530 }
1531
1532 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1533 hdev->discov_timeout > 0) {
1534 int to = secs_to_jiffies(hdev->discov_timeout);
1535 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1536 }
1537
1538 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1539 new_settings(hdev, cmd->sk);
1540
1541 done:
1542 mgmt_pending_remove(cmd);
1543 hci_dev_unlock(hdev);
1544 }
1545
set_discoverable_sync(struct hci_dev * hdev,void * data)1546 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1547 {
1548 BT_DBG("%s", hdev->name);
1549
1550 return hci_update_discoverable_sync(hdev);
1551 }
1552
set_discoverable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1553 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1554 u16 len)
1555 {
1556 struct mgmt_cp_set_discoverable *cp = data;
1557 struct mgmt_pending_cmd *cmd;
1558 u16 timeout;
1559 int err;
1560
1561 bt_dev_dbg(hdev, "sock %p", sk);
1562
1563 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1564 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1565 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1566 MGMT_STATUS_REJECTED);
1567
1568 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1569 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1570 MGMT_STATUS_INVALID_PARAMS);
1571
1572 timeout = __le16_to_cpu(cp->timeout);
1573
1574 /* Disabling discoverable requires that no timeout is set,
1575 * and enabling limited discoverable requires a timeout.
1576 */
1577 if ((cp->val == 0x00 && timeout > 0) ||
1578 (cp->val == 0x02 && timeout == 0))
1579 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1580 MGMT_STATUS_INVALID_PARAMS);
1581
1582 hci_dev_lock(hdev);
1583
1584 if (!hdev_is_powered(hdev) && timeout > 0) {
1585 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1586 MGMT_STATUS_NOT_POWERED);
1587 goto failed;
1588 }
1589
1590 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1591 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1592 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1593 MGMT_STATUS_BUSY);
1594 goto failed;
1595 }
1596
1597 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1598 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1599 MGMT_STATUS_REJECTED);
1600 goto failed;
1601 }
1602
1603 if (hdev->advertising_paused) {
1604 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1605 MGMT_STATUS_BUSY);
1606 goto failed;
1607 }
1608
1609 if (!hdev_is_powered(hdev)) {
1610 bool changed = false;
1611
1612 /* Setting limited discoverable when powered off is
1613 * not a valid operation since it requires a timeout
1614 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1615 */
1616 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1617 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1618 changed = true;
1619 }
1620
1621 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1622 if (err < 0)
1623 goto failed;
1624
1625 if (changed)
1626 err = new_settings(hdev, sk);
1627
1628 goto failed;
1629 }
1630
1631 /* If the current mode is the same, then just update the timeout
1632 * value with the new value. And if only the timeout gets updated,
1633 * then no need for any HCI transactions.
1634 */
1635 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1636 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1637 HCI_LIMITED_DISCOVERABLE)) {
1638 cancel_delayed_work(&hdev->discov_off);
1639 hdev->discov_timeout = timeout;
1640
1641 if (cp->val && hdev->discov_timeout > 0) {
1642 int to = secs_to_jiffies(hdev->discov_timeout);
1643 queue_delayed_work(hdev->req_workqueue,
1644 &hdev->discov_off, to);
1645 }
1646
1647 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1648 goto failed;
1649 }
1650
1651 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1652 if (!cmd) {
1653 err = -ENOMEM;
1654 goto failed;
1655 }
1656
1657 /* Cancel any potential discoverable timeout that might be
1658 * still active and store new timeout value. The arming of
1659 * the timeout happens in the complete handler.
1660 */
1661 cancel_delayed_work(&hdev->discov_off);
1662 hdev->discov_timeout = timeout;
1663
1664 if (cp->val)
1665 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1666 else
1667 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1668
1669 /* Limited discoverable mode */
1670 if (cp->val == 0x02)
1671 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1672 else
1673 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1674
1675 err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1676 mgmt_set_discoverable_complete);
1677
1678 if (err < 0)
1679 mgmt_pending_remove(cmd);
1680
1681 failed:
1682 hci_dev_unlock(hdev);
1683 return err;
1684 }
1685
mgmt_set_connectable_complete(struct hci_dev * hdev,void * data,int err)1686 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1687 int err)
1688 {
1689 struct mgmt_pending_cmd *cmd = data;
1690
1691 bt_dev_dbg(hdev, "err %d", err);
1692
1693 /* Make sure cmd still outstanding. */
1694 if (err == -ECANCELED ||
1695 cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1696 return;
1697
1698 hci_dev_lock(hdev);
1699
1700 if (err) {
1701 u8 mgmt_err = mgmt_status(err);
1702 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
1703 goto done;
1704 }
1705
1706 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1707 new_settings(hdev, cmd->sk);
1708
1709 done:
1710 mgmt_pending_remove(cmd);
1711
1712 hci_dev_unlock(hdev);
1713 }
1714
set_connectable_update_settings(struct hci_dev * hdev,struct sock * sk,u8 val)1715 static int set_connectable_update_settings(struct hci_dev *hdev,
1716 struct sock *sk, u8 val)
1717 {
1718 bool changed = false;
1719 int err;
1720
1721 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1722 changed = true;
1723
1724 if (val) {
1725 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1726 } else {
1727 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1728 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1729 }
1730
1731 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1732 if (err < 0)
1733 return err;
1734
1735 if (changed) {
1736 hci_update_scan(hdev);
1737 hci_update_passive_scan(hdev);
1738 return new_settings(hdev, sk);
1739 }
1740
1741 return 0;
1742 }
1743
set_connectable_sync(struct hci_dev * hdev,void * data)1744 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1745 {
1746 BT_DBG("%s", hdev->name);
1747
1748 return hci_update_connectable_sync(hdev);
1749 }
1750
set_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1751 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1752 u16 len)
1753 {
1754 struct mgmt_mode *cp = data;
1755 struct mgmt_pending_cmd *cmd;
1756 int err;
1757
1758 bt_dev_dbg(hdev, "sock %p", sk);
1759
1760 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1761 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1762 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1763 MGMT_STATUS_REJECTED);
1764
1765 if (cp->val != 0x00 && cp->val != 0x01)
1766 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1767 MGMT_STATUS_INVALID_PARAMS);
1768
1769 hci_dev_lock(hdev);
1770
1771 if (!hdev_is_powered(hdev)) {
1772 err = set_connectable_update_settings(hdev, sk, cp->val);
1773 goto failed;
1774 }
1775
1776 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1777 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1778 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1779 MGMT_STATUS_BUSY);
1780 goto failed;
1781 }
1782
1783 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1784 if (!cmd) {
1785 err = -ENOMEM;
1786 goto failed;
1787 }
1788
1789 if (cp->val) {
1790 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1791 } else {
1792 if (hdev->discov_timeout > 0)
1793 cancel_delayed_work(&hdev->discov_off);
1794
1795 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1796 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1797 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1798 }
1799
1800 err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1801 mgmt_set_connectable_complete);
1802
1803 if (err < 0)
1804 mgmt_pending_remove(cmd);
1805
1806 failed:
1807 hci_dev_unlock(hdev);
1808 return err;
1809 }
1810
set_bondable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1811 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1812 u16 len)
1813 {
1814 struct mgmt_mode *cp = data;
1815 bool changed;
1816 int err;
1817
1818 bt_dev_dbg(hdev, "sock %p", sk);
1819
1820 if (cp->val != 0x00 && cp->val != 0x01)
1821 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1822 MGMT_STATUS_INVALID_PARAMS);
1823
1824 hci_dev_lock(hdev);
1825
1826 if (cp->val)
1827 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1828 else
1829 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1830
1831 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1832 if (err < 0)
1833 goto unlock;
1834
1835 if (changed) {
1836 /* In limited privacy mode the change of bondable mode
1837 * may affect the local advertising address.
1838 */
1839 hci_update_discoverable(hdev);
1840
1841 err = new_settings(hdev, sk);
1842 }
1843
1844 unlock:
1845 hci_dev_unlock(hdev);
1846 return err;
1847 }
1848
set_link_security(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1849 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1850 u16 len)
1851 {
1852 struct mgmt_mode *cp = data;
1853 struct mgmt_pending_cmd *cmd;
1854 u8 val, status;
1855 int err;
1856
1857 bt_dev_dbg(hdev, "sock %p", sk);
1858
1859 status = mgmt_bredr_support(hdev);
1860 if (status)
1861 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1862 status);
1863
1864 if (cp->val != 0x00 && cp->val != 0x01)
1865 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1866 MGMT_STATUS_INVALID_PARAMS);
1867
1868 hci_dev_lock(hdev);
1869
1870 if (!hdev_is_powered(hdev)) {
1871 bool changed = false;
1872
1873 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1874 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1875 changed = true;
1876 }
1877
1878 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1879 if (err < 0)
1880 goto failed;
1881
1882 if (changed)
1883 err = new_settings(hdev, sk);
1884
1885 goto failed;
1886 }
1887
1888 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1889 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1890 MGMT_STATUS_BUSY);
1891 goto failed;
1892 }
1893
1894 val = !!cp->val;
1895
1896 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1897 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1898 goto failed;
1899 }
1900
1901 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1902 if (!cmd) {
1903 err = -ENOMEM;
1904 goto failed;
1905 }
1906
1907 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1908 if (err < 0) {
1909 mgmt_pending_remove(cmd);
1910 goto failed;
1911 }
1912
1913 failed:
1914 hci_dev_unlock(hdev);
1915 return err;
1916 }
1917
set_ssp_complete(struct hci_dev * hdev,void * data,int err)1918 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1919 {
1920 struct cmd_lookup match = { NULL, hdev };
1921 struct mgmt_pending_cmd *cmd = data;
1922 struct mgmt_mode *cp = cmd->param;
1923 u8 enable = cp->val;
1924 bool changed;
1925
1926 /* Make sure cmd still outstanding. */
1927 if (err == -ECANCELED || cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1928 return;
1929
1930 if (err) {
1931 u8 mgmt_err = mgmt_status(err);
1932
1933 if (enable && hci_dev_test_and_clear_flag(hdev,
1934 HCI_SSP_ENABLED)) {
1935 new_settings(hdev, NULL);
1936 }
1937
1938 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, true,
1939 cmd_status_rsp, &mgmt_err);
1940 return;
1941 }
1942
1943 if (enable) {
1944 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1945 } else {
1946 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1947 }
1948
1949 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, true, settings_rsp, &match);
1950
1951 if (changed)
1952 new_settings(hdev, match.sk);
1953
1954 if (match.sk)
1955 sock_put(match.sk);
1956
1957 hci_update_eir_sync(hdev);
1958 }
1959
set_ssp_sync(struct hci_dev * hdev,void * data)1960 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1961 {
1962 struct mgmt_pending_cmd *cmd = data;
1963 struct mgmt_mode *cp = cmd->param;
1964 bool changed = false;
1965 int err;
1966
1967 if (cp->val)
1968 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1969
1970 err = hci_write_ssp_mode_sync(hdev, cp->val);
1971
1972 if (!err && changed)
1973 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1974
1975 return err;
1976 }
1977
set_ssp(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1978 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1979 {
1980 struct mgmt_mode *cp = data;
1981 struct mgmt_pending_cmd *cmd;
1982 u8 status;
1983 int err;
1984
1985 bt_dev_dbg(hdev, "sock %p", sk);
1986
1987 status = mgmt_bredr_support(hdev);
1988 if (status)
1989 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1990
1991 if (!lmp_ssp_capable(hdev))
1992 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1993 MGMT_STATUS_NOT_SUPPORTED);
1994
1995 if (cp->val != 0x00 && cp->val != 0x01)
1996 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1997 MGMT_STATUS_INVALID_PARAMS);
1998
1999 hci_dev_lock(hdev);
2000
2001 if (!hdev_is_powered(hdev)) {
2002 bool changed;
2003
2004 if (cp->val) {
2005 changed = !hci_dev_test_and_set_flag(hdev,
2006 HCI_SSP_ENABLED);
2007 } else {
2008 changed = hci_dev_test_and_clear_flag(hdev,
2009 HCI_SSP_ENABLED);
2010 }
2011
2012 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2013 if (err < 0)
2014 goto failed;
2015
2016 if (changed)
2017 err = new_settings(hdev, sk);
2018
2019 goto failed;
2020 }
2021
2022 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2023 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2024 MGMT_STATUS_BUSY);
2025 goto failed;
2026 }
2027
2028 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2029 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2030 goto failed;
2031 }
2032
2033 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2034 if (!cmd)
2035 err = -ENOMEM;
2036 else
2037 err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2038 set_ssp_complete);
2039
2040 if (err < 0) {
2041 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2042 MGMT_STATUS_FAILED);
2043
2044 if (cmd)
2045 mgmt_pending_remove(cmd);
2046 }
2047
2048 failed:
2049 hci_dev_unlock(hdev);
2050 return err;
2051 }
2052
set_hs(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2053 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2054 {
2055 bt_dev_dbg(hdev, "sock %p", sk);
2056
2057 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2058 MGMT_STATUS_NOT_SUPPORTED);
2059 }
2060
set_le_complete(struct hci_dev * hdev,void * data,int err)2061 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2062 {
2063 struct cmd_lookup match = { NULL, hdev };
2064 u8 status = mgmt_status(err);
2065
2066 bt_dev_dbg(hdev, "err %d", err);
2067
2068 if (status) {
2069 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, true, cmd_status_rsp,
2070 &status);
2071 return;
2072 }
2073
2074 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, true, settings_rsp, &match);
2075
2076 new_settings(hdev, match.sk);
2077
2078 if (match.sk)
2079 sock_put(match.sk);
2080 }
2081
set_le_sync(struct hci_dev * hdev,void * data)2082 static int set_le_sync(struct hci_dev *hdev, void *data)
2083 {
2084 struct mgmt_pending_cmd *cmd = data;
2085 struct mgmt_mode *cp = cmd->param;
2086 u8 val = !!cp->val;
2087 int err;
2088
2089 if (!val) {
2090 hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2091
2092 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2093 hci_disable_advertising_sync(hdev);
2094
2095 if (ext_adv_capable(hdev))
2096 hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2097 } else {
2098 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2099 }
2100
2101 err = hci_write_le_host_supported_sync(hdev, val, 0);
2102
2103 /* Make sure the controller has a good default for
2104 * advertising data. Restrict the update to when LE
2105 * has actually been enabled. During power on, the
2106 * update in powered_update_hci will take care of it.
2107 */
2108 if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2109 if (ext_adv_capable(hdev)) {
2110 int status;
2111
2112 status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2113 if (!status)
2114 hci_update_scan_rsp_data_sync(hdev, 0x00);
2115 } else {
2116 hci_update_adv_data_sync(hdev, 0x00);
2117 hci_update_scan_rsp_data_sync(hdev, 0x00);
2118 }
2119
2120 hci_update_passive_scan(hdev);
2121 }
2122
2123 return err;
2124 }
2125
set_mesh_complete(struct hci_dev * hdev,void * data,int err)2126 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2127 {
2128 struct mgmt_pending_cmd *cmd = data;
2129 u8 status = mgmt_status(err);
2130 struct sock *sk = cmd->sk;
2131
2132 if (status) {
2133 mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev, true,
2134 cmd_status_rsp, &status);
2135 return;
2136 }
2137
2138 mgmt_pending_remove(cmd);
2139 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2140 }
2141
set_mesh_sync(struct hci_dev * hdev,void * data)2142 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2143 {
2144 struct mgmt_pending_cmd *cmd = data;
2145 struct mgmt_cp_set_mesh *cp = cmd->param;
2146 size_t len = cmd->param_len;
2147
2148 memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2149
2150 if (cp->enable)
2151 hci_dev_set_flag(hdev, HCI_MESH);
2152 else
2153 hci_dev_clear_flag(hdev, HCI_MESH);
2154
2155 hdev->le_scan_interval = __le16_to_cpu(cp->period);
2156 hdev->le_scan_window = __le16_to_cpu(cp->window);
2157
2158 len -= sizeof(*cp);
2159
2160 /* If filters don't fit, forward all adv pkts */
2161 if (len <= sizeof(hdev->mesh_ad_types))
2162 memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2163
2164 hci_update_passive_scan_sync(hdev);
2165 return 0;
2166 }
2167
set_mesh(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2168 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2169 {
2170 struct mgmt_cp_set_mesh *cp = data;
2171 struct mgmt_pending_cmd *cmd;
2172 __u16 period, window;
2173 int err = 0;
2174
2175 bt_dev_dbg(hdev, "sock %p", sk);
2176
2177 if (!lmp_le_capable(hdev) ||
2178 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2179 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2180 MGMT_STATUS_NOT_SUPPORTED);
2181
2182 if (cp->enable != 0x00 && cp->enable != 0x01)
2183 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2184 MGMT_STATUS_INVALID_PARAMS);
2185
2186 /* Keep allowed ranges in sync with set_scan_params() */
2187 period = __le16_to_cpu(cp->period);
2188
2189 if (period < 0x0004 || period > 0x4000)
2190 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2191 MGMT_STATUS_INVALID_PARAMS);
2192
2193 window = __le16_to_cpu(cp->window);
2194
2195 if (window < 0x0004 || window > 0x4000)
2196 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2197 MGMT_STATUS_INVALID_PARAMS);
2198
2199 if (window > period)
2200 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2201 MGMT_STATUS_INVALID_PARAMS);
2202
2203 hci_dev_lock(hdev);
2204
2205 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2206 if (!cmd)
2207 err = -ENOMEM;
2208 else
2209 err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2210 set_mesh_complete);
2211
2212 if (err < 0) {
2213 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2214 MGMT_STATUS_FAILED);
2215
2216 if (cmd)
2217 mgmt_pending_remove(cmd);
2218 }
2219
2220 hci_dev_unlock(hdev);
2221 return err;
2222 }
2223
mesh_send_start_complete(struct hci_dev * hdev,void * data,int err)2224 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2225 {
2226 struct mgmt_mesh_tx *mesh_tx = data;
2227 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2228 unsigned long mesh_send_interval;
2229 u8 mgmt_err = mgmt_status(err);
2230
2231 /* Report any errors here, but don't report completion */
2232
2233 if (mgmt_err) {
2234 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2235 /* Send Complete Error Code for handle */
2236 mesh_send_complete(hdev, mesh_tx, false);
2237 return;
2238 }
2239
2240 mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2241 queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2242 mesh_send_interval);
2243 }
2244
mesh_send_sync(struct hci_dev * hdev,void * data)2245 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2246 {
2247 struct mgmt_mesh_tx *mesh_tx = data;
2248 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2249 struct adv_info *adv, *next_instance;
2250 u8 instance = hdev->le_num_of_adv_sets + 1;
2251 u16 timeout, duration;
2252 int err = 0;
2253
2254 if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2255 return MGMT_STATUS_BUSY;
2256
2257 timeout = 1000;
2258 duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2259 adv = hci_add_adv_instance(hdev, instance, 0,
2260 send->adv_data_len, send->adv_data,
2261 0, NULL,
2262 timeout, duration,
2263 HCI_ADV_TX_POWER_NO_PREFERENCE,
2264 hdev->le_adv_min_interval,
2265 hdev->le_adv_max_interval,
2266 mesh_tx->handle);
2267
2268 if (!IS_ERR(adv))
2269 mesh_tx->instance = instance;
2270 else
2271 err = PTR_ERR(adv);
2272
2273 if (hdev->cur_adv_instance == instance) {
2274 /* If the currently advertised instance is being changed then
2275 * cancel the current advertising and schedule the next
2276 * instance. If there is only one instance then the overridden
2277 * advertising data will be visible right away.
2278 */
2279 cancel_adv_timeout(hdev);
2280
2281 next_instance = hci_get_next_instance(hdev, instance);
2282 if (next_instance)
2283 instance = next_instance->instance;
2284 else
2285 instance = 0;
2286 } else if (hdev->adv_instance_timeout) {
2287 /* Immediately advertise the new instance if no other, or
2288 * let it go naturally from queue if ADV is already happening
2289 */
2290 instance = 0;
2291 }
2292
2293 if (instance)
2294 return hci_schedule_adv_instance_sync(hdev, instance, true);
2295
2296 return err;
2297 }
2298
send_count(struct mgmt_mesh_tx * mesh_tx,void * data)2299 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2300 {
2301 struct mgmt_rp_mesh_read_features *rp = data;
2302
2303 if (rp->used_handles >= rp->max_handles)
2304 return;
2305
2306 rp->handles[rp->used_handles++] = mesh_tx->handle;
2307 }
2308
mesh_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2309 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2310 void *data, u16 len)
2311 {
2312 struct mgmt_rp_mesh_read_features rp;
2313
2314 if (!lmp_le_capable(hdev) ||
2315 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2316 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2317 MGMT_STATUS_NOT_SUPPORTED);
2318
2319 memset(&rp, 0, sizeof(rp));
2320 rp.index = cpu_to_le16(hdev->id);
2321 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2322 rp.max_handles = MESH_HANDLES_MAX;
2323
2324 hci_dev_lock(hdev);
2325
2326 if (rp.max_handles)
2327 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2328
2329 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2330 rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2331
2332 hci_dev_unlock(hdev);
2333 return 0;
2334 }
2335
send_cancel(struct hci_dev * hdev,void * data)2336 static int send_cancel(struct hci_dev *hdev, void *data)
2337 {
2338 struct mgmt_pending_cmd *cmd = data;
2339 struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2340 struct mgmt_mesh_tx *mesh_tx;
2341
2342 if (!cancel->handle) {
2343 do {
2344 mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2345
2346 if (mesh_tx)
2347 mesh_send_complete(hdev, mesh_tx, false);
2348 } while (mesh_tx);
2349 } else {
2350 mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2351
2352 if (mesh_tx && mesh_tx->sk == cmd->sk)
2353 mesh_send_complete(hdev, mesh_tx, false);
2354 }
2355
2356 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2357 0, NULL, 0);
2358 mgmt_pending_free(cmd);
2359
2360 return 0;
2361 }
2362
mesh_send_cancel(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2363 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2364 void *data, u16 len)
2365 {
2366 struct mgmt_pending_cmd *cmd;
2367 int err;
2368
2369 if (!lmp_le_capable(hdev) ||
2370 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2371 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2372 MGMT_STATUS_NOT_SUPPORTED);
2373
2374 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2375 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2376 MGMT_STATUS_REJECTED);
2377
2378 hci_dev_lock(hdev);
2379 cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2380 if (!cmd)
2381 err = -ENOMEM;
2382 else
2383 err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2384
2385 if (err < 0) {
2386 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2387 MGMT_STATUS_FAILED);
2388
2389 if (cmd)
2390 mgmt_pending_free(cmd);
2391 }
2392
2393 hci_dev_unlock(hdev);
2394 return err;
2395 }
2396
mesh_send(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2397 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2398 {
2399 struct mgmt_mesh_tx *mesh_tx;
2400 struct mgmt_cp_mesh_send *send = data;
2401 struct mgmt_rp_mesh_read_features rp;
2402 bool sending;
2403 int err = 0;
2404
2405 if (!lmp_le_capable(hdev) ||
2406 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2407 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2408 MGMT_STATUS_NOT_SUPPORTED);
2409 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2410 len <= MGMT_MESH_SEND_SIZE ||
2411 len > (MGMT_MESH_SEND_SIZE + 31))
2412 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2413 MGMT_STATUS_REJECTED);
2414
2415 hci_dev_lock(hdev);
2416
2417 memset(&rp, 0, sizeof(rp));
2418 rp.max_handles = MESH_HANDLES_MAX;
2419
2420 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2421
2422 if (rp.max_handles <= rp.used_handles) {
2423 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2424 MGMT_STATUS_BUSY);
2425 goto done;
2426 }
2427
2428 sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2429 mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2430
2431 if (!mesh_tx)
2432 err = -ENOMEM;
2433 else if (!sending)
2434 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2435 mesh_send_start_complete);
2436
2437 if (err < 0) {
2438 bt_dev_err(hdev, "Send Mesh Failed %d", err);
2439 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2440 MGMT_STATUS_FAILED);
2441
2442 if (mesh_tx) {
2443 if (sending)
2444 mgmt_mesh_remove(mesh_tx);
2445 }
2446 } else {
2447 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2448
2449 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2450 &mesh_tx->handle, 1);
2451 }
2452
2453 done:
2454 hci_dev_unlock(hdev);
2455 return err;
2456 }
2457
set_le(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2458 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2459 {
2460 struct mgmt_mode *cp = data;
2461 struct mgmt_pending_cmd *cmd;
2462 int err;
2463 u8 val, enabled;
2464
2465 bt_dev_dbg(hdev, "sock %p", sk);
2466
2467 if (!lmp_le_capable(hdev))
2468 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2469 MGMT_STATUS_NOT_SUPPORTED);
2470
2471 if (cp->val != 0x00 && cp->val != 0x01)
2472 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2473 MGMT_STATUS_INVALID_PARAMS);
2474
2475 /* Bluetooth single mode LE only controllers or dual-mode
2476 * controllers configured as LE only devices, do not allow
2477 * switching LE off. These have either LE enabled explicitly
2478 * or BR/EDR has been previously switched off.
2479 *
2480 * When trying to enable an already enabled LE, then gracefully
2481 * send a positive response. Trying to disable it however will
2482 * result into rejection.
2483 */
2484 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2485 if (cp->val == 0x01)
2486 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2487
2488 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2489 MGMT_STATUS_REJECTED);
2490 }
2491
2492 hci_dev_lock(hdev);
2493
2494 val = !!cp->val;
2495 enabled = lmp_host_le_capable(hdev);
2496
2497 if (!hdev_is_powered(hdev) || val == enabled) {
2498 bool changed = false;
2499
2500 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2501 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2502 changed = true;
2503 }
2504
2505 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2506 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2507 changed = true;
2508 }
2509
2510 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2511 if (err < 0)
2512 goto unlock;
2513
2514 if (changed)
2515 err = new_settings(hdev, sk);
2516
2517 goto unlock;
2518 }
2519
2520 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2521 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2522 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2523 MGMT_STATUS_BUSY);
2524 goto unlock;
2525 }
2526
2527 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2528 if (!cmd)
2529 err = -ENOMEM;
2530 else
2531 err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2532 set_le_complete);
2533
2534 if (err < 0) {
2535 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2536 MGMT_STATUS_FAILED);
2537
2538 if (cmd)
2539 mgmt_pending_remove(cmd);
2540 }
2541
2542 unlock:
2543 hci_dev_unlock(hdev);
2544 return err;
2545 }
2546
send_hci_cmd_sync(struct hci_dev * hdev,void * data)2547 static int send_hci_cmd_sync(struct hci_dev *hdev, void *data)
2548 {
2549 struct mgmt_pending_cmd *cmd = data;
2550 struct mgmt_cp_hci_cmd_sync *cp = cmd->param;
2551 struct sk_buff *skb;
2552
2553 skb = __hci_cmd_sync_ev(hdev, le16_to_cpu(cp->opcode),
2554 le16_to_cpu(cp->params_len), cp->params,
2555 cp->event, cp->timeout ?
2556 secs_to_jiffies(cp->timeout) :
2557 HCI_CMD_TIMEOUT);
2558 if (IS_ERR(skb)) {
2559 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
2560 mgmt_status(PTR_ERR(skb)));
2561 goto done;
2562 }
2563
2564 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_HCI_CMD_SYNC, 0,
2565 skb->data, skb->len);
2566
2567 kfree_skb(skb);
2568
2569 done:
2570 mgmt_pending_free(cmd);
2571
2572 return 0;
2573 }
2574
mgmt_hci_cmd_sync(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2575 static int mgmt_hci_cmd_sync(struct sock *sk, struct hci_dev *hdev,
2576 void *data, u16 len)
2577 {
2578 struct mgmt_cp_hci_cmd_sync *cp = data;
2579 struct mgmt_pending_cmd *cmd;
2580 int err;
2581
2582 if (len != (offsetof(struct mgmt_cp_hci_cmd_sync, params) +
2583 le16_to_cpu(cp->params_len)))
2584 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
2585 MGMT_STATUS_INVALID_PARAMS);
2586
2587 hci_dev_lock(hdev);
2588 cmd = mgmt_pending_new(sk, MGMT_OP_HCI_CMD_SYNC, hdev, data, len);
2589 if (!cmd)
2590 err = -ENOMEM;
2591 else
2592 err = hci_cmd_sync_queue(hdev, send_hci_cmd_sync, cmd, NULL);
2593
2594 if (err < 0) {
2595 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
2596 MGMT_STATUS_FAILED);
2597
2598 if (cmd)
2599 mgmt_pending_free(cmd);
2600 }
2601
2602 hci_dev_unlock(hdev);
2603 return err;
2604 }
2605
2606 /* This is a helper function to test for pending mgmt commands that can
2607 * cause CoD or EIR HCI commands. We can only allow one such pending
2608 * mgmt command at a time since otherwise we cannot easily track what
2609 * the current values are, will be, and based on that calculate if a new
2610 * HCI command needs to be sent and if yes with what value.
2611 */
pending_eir_or_class(struct hci_dev * hdev)2612 static bool pending_eir_or_class(struct hci_dev *hdev)
2613 {
2614 struct mgmt_pending_cmd *cmd;
2615
2616 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2617 switch (cmd->opcode) {
2618 case MGMT_OP_ADD_UUID:
2619 case MGMT_OP_REMOVE_UUID:
2620 case MGMT_OP_SET_DEV_CLASS:
2621 case MGMT_OP_SET_POWERED:
2622 return true;
2623 }
2624 }
2625
2626 return false;
2627 }
2628
2629 static const u8 bluetooth_base_uuid[] = {
2630 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2631 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2632 };
2633
get_uuid_size(const u8 * uuid)2634 static u8 get_uuid_size(const u8 *uuid)
2635 {
2636 u32 val;
2637
2638 if (memcmp(uuid, bluetooth_base_uuid, 12))
2639 return 128;
2640
2641 val = get_unaligned_le32(&uuid[12]);
2642 if (val > 0xffff)
2643 return 32;
2644
2645 return 16;
2646 }
2647
mgmt_class_complete(struct hci_dev * hdev,void * data,int err)2648 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2649 {
2650 struct mgmt_pending_cmd *cmd = data;
2651
2652 bt_dev_dbg(hdev, "err %d", err);
2653
2654 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
2655 mgmt_status(err), hdev->dev_class, 3);
2656
2657 mgmt_pending_free(cmd);
2658 }
2659
add_uuid_sync(struct hci_dev * hdev,void * data)2660 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2661 {
2662 int err;
2663
2664 err = hci_update_class_sync(hdev);
2665 if (err)
2666 return err;
2667
2668 return hci_update_eir_sync(hdev);
2669 }
2670
add_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2671 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2672 {
2673 struct mgmt_cp_add_uuid *cp = data;
2674 struct mgmt_pending_cmd *cmd;
2675 struct bt_uuid *uuid;
2676 int err;
2677
2678 bt_dev_dbg(hdev, "sock %p", sk);
2679
2680 hci_dev_lock(hdev);
2681
2682 if (pending_eir_or_class(hdev)) {
2683 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2684 MGMT_STATUS_BUSY);
2685 goto failed;
2686 }
2687
2688 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2689 if (!uuid) {
2690 err = -ENOMEM;
2691 goto failed;
2692 }
2693
2694 memcpy(uuid->uuid, cp->uuid, 16);
2695 uuid->svc_hint = cp->svc_hint;
2696 uuid->size = get_uuid_size(cp->uuid);
2697
2698 list_add_tail(&uuid->list, &hdev->uuids);
2699
2700 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2701 if (!cmd) {
2702 err = -ENOMEM;
2703 goto failed;
2704 }
2705
2706 /* MGMT_OP_ADD_UUID don't require adapter the UP/Running so use
2707 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2708 */
2709 err = hci_cmd_sync_submit(hdev, add_uuid_sync, cmd,
2710 mgmt_class_complete);
2711 if (err < 0) {
2712 mgmt_pending_free(cmd);
2713 goto failed;
2714 }
2715
2716 failed:
2717 hci_dev_unlock(hdev);
2718 return err;
2719 }
2720
enable_service_cache(struct hci_dev * hdev)2721 static bool enable_service_cache(struct hci_dev *hdev)
2722 {
2723 if (!hdev_is_powered(hdev))
2724 return false;
2725
2726 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2727 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2728 CACHE_TIMEOUT);
2729 return true;
2730 }
2731
2732 return false;
2733 }
2734
remove_uuid_sync(struct hci_dev * hdev,void * data)2735 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2736 {
2737 int err;
2738
2739 err = hci_update_class_sync(hdev);
2740 if (err)
2741 return err;
2742
2743 return hci_update_eir_sync(hdev);
2744 }
2745
remove_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2746 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2747 u16 len)
2748 {
2749 struct mgmt_cp_remove_uuid *cp = data;
2750 struct mgmt_pending_cmd *cmd;
2751 struct bt_uuid *match, *tmp;
2752 static const u8 bt_uuid_any[] = {
2753 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2754 };
2755 int err, found;
2756
2757 bt_dev_dbg(hdev, "sock %p", sk);
2758
2759 hci_dev_lock(hdev);
2760
2761 if (pending_eir_or_class(hdev)) {
2762 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2763 MGMT_STATUS_BUSY);
2764 goto unlock;
2765 }
2766
2767 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2768 hci_uuids_clear(hdev);
2769
2770 if (enable_service_cache(hdev)) {
2771 err = mgmt_cmd_complete(sk, hdev->id,
2772 MGMT_OP_REMOVE_UUID,
2773 0, hdev->dev_class, 3);
2774 goto unlock;
2775 }
2776
2777 goto update_class;
2778 }
2779
2780 found = 0;
2781
2782 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2783 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2784 continue;
2785
2786 list_del(&match->list);
2787 kfree(match);
2788 found++;
2789 }
2790
2791 if (found == 0) {
2792 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2793 MGMT_STATUS_INVALID_PARAMS);
2794 goto unlock;
2795 }
2796
2797 update_class:
2798 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2799 if (!cmd) {
2800 err = -ENOMEM;
2801 goto unlock;
2802 }
2803
2804 /* MGMT_OP_REMOVE_UUID don't require adapter the UP/Running so use
2805 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2806 */
2807 err = hci_cmd_sync_submit(hdev, remove_uuid_sync, cmd,
2808 mgmt_class_complete);
2809 if (err < 0)
2810 mgmt_pending_free(cmd);
2811
2812 unlock:
2813 hci_dev_unlock(hdev);
2814 return err;
2815 }
2816
set_class_sync(struct hci_dev * hdev,void * data)2817 static int set_class_sync(struct hci_dev *hdev, void *data)
2818 {
2819 int err = 0;
2820
2821 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2822 cancel_delayed_work_sync(&hdev->service_cache);
2823 err = hci_update_eir_sync(hdev);
2824 }
2825
2826 if (err)
2827 return err;
2828
2829 return hci_update_class_sync(hdev);
2830 }
2831
set_dev_class(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2832 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2833 u16 len)
2834 {
2835 struct mgmt_cp_set_dev_class *cp = data;
2836 struct mgmt_pending_cmd *cmd;
2837 int err;
2838
2839 bt_dev_dbg(hdev, "sock %p", sk);
2840
2841 if (!lmp_bredr_capable(hdev))
2842 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2843 MGMT_STATUS_NOT_SUPPORTED);
2844
2845 hci_dev_lock(hdev);
2846
2847 if (pending_eir_or_class(hdev)) {
2848 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2849 MGMT_STATUS_BUSY);
2850 goto unlock;
2851 }
2852
2853 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2854 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2855 MGMT_STATUS_INVALID_PARAMS);
2856 goto unlock;
2857 }
2858
2859 hdev->major_class = cp->major;
2860 hdev->minor_class = cp->minor;
2861
2862 if (!hdev_is_powered(hdev)) {
2863 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2864 hdev->dev_class, 3);
2865 goto unlock;
2866 }
2867
2868 cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2869 if (!cmd) {
2870 err = -ENOMEM;
2871 goto unlock;
2872 }
2873
2874 /* MGMT_OP_SET_DEV_CLASS don't require adapter the UP/Running so use
2875 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2876 */
2877 err = hci_cmd_sync_submit(hdev, set_class_sync, cmd,
2878 mgmt_class_complete);
2879 if (err < 0)
2880 mgmt_pending_free(cmd);
2881
2882 unlock:
2883 hci_dev_unlock(hdev);
2884 return err;
2885 }
2886
load_link_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2887 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2888 u16 len)
2889 {
2890 struct mgmt_cp_load_link_keys *cp = data;
2891 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2892 sizeof(struct mgmt_link_key_info));
2893 u16 key_count, expected_len;
2894 bool changed;
2895 int i;
2896
2897 bt_dev_dbg(hdev, "sock %p", sk);
2898
2899 if (!lmp_bredr_capable(hdev))
2900 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2901 MGMT_STATUS_NOT_SUPPORTED);
2902
2903 key_count = __le16_to_cpu(cp->key_count);
2904 if (key_count > max_key_count) {
2905 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2906 key_count);
2907 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2908 MGMT_STATUS_INVALID_PARAMS);
2909 }
2910
2911 expected_len = struct_size(cp, keys, key_count);
2912 if (expected_len != len) {
2913 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2914 expected_len, len);
2915 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2916 MGMT_STATUS_INVALID_PARAMS);
2917 }
2918
2919 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2920 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2921 MGMT_STATUS_INVALID_PARAMS);
2922
2923 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2924 key_count);
2925
2926 hci_dev_lock(hdev);
2927
2928 hci_link_keys_clear(hdev);
2929
2930 if (cp->debug_keys)
2931 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2932 else
2933 changed = hci_dev_test_and_clear_flag(hdev,
2934 HCI_KEEP_DEBUG_KEYS);
2935
2936 if (changed)
2937 new_settings(hdev, NULL);
2938
2939 for (i = 0; i < key_count; i++) {
2940 struct mgmt_link_key_info *key = &cp->keys[i];
2941
2942 if (hci_is_blocked_key(hdev,
2943 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2944 key->val)) {
2945 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2946 &key->addr.bdaddr);
2947 continue;
2948 }
2949
2950 if (key->addr.type != BDADDR_BREDR) {
2951 bt_dev_warn(hdev,
2952 "Invalid link address type %u for %pMR",
2953 key->addr.type, &key->addr.bdaddr);
2954 continue;
2955 }
2956
2957 if (key->type > 0x08) {
2958 bt_dev_warn(hdev, "Invalid link key type %u for %pMR",
2959 key->type, &key->addr.bdaddr);
2960 continue;
2961 }
2962
2963 /* Always ignore debug keys and require a new pairing if
2964 * the user wants to use them.
2965 */
2966 if (key->type == HCI_LK_DEBUG_COMBINATION)
2967 continue;
2968
2969 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2970 key->type, key->pin_len, NULL);
2971 }
2972
2973 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2974
2975 hci_dev_unlock(hdev);
2976
2977 return 0;
2978 }
2979
device_unpaired(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,struct sock * skip_sk)2980 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2981 u8 addr_type, struct sock *skip_sk)
2982 {
2983 struct mgmt_ev_device_unpaired ev;
2984
2985 bacpy(&ev.addr.bdaddr, bdaddr);
2986 ev.addr.type = addr_type;
2987
2988 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2989 skip_sk);
2990 }
2991
unpair_device_complete(struct hci_dev * hdev,void * data,int err)2992 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2993 {
2994 struct mgmt_pending_cmd *cmd = data;
2995 struct mgmt_cp_unpair_device *cp = cmd->param;
2996
2997 if (!err)
2998 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2999
3000 cmd->cmd_complete(cmd, err);
3001 mgmt_pending_free(cmd);
3002 }
3003
unpair_device_sync(struct hci_dev * hdev,void * data)3004 static int unpair_device_sync(struct hci_dev *hdev, void *data)
3005 {
3006 struct mgmt_pending_cmd *cmd = data;
3007 struct mgmt_cp_unpair_device *cp = cmd->param;
3008 struct hci_conn *conn;
3009
3010 if (cp->addr.type == BDADDR_BREDR)
3011 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3012 &cp->addr.bdaddr);
3013 else
3014 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3015 le_addr_type(cp->addr.type));
3016
3017 if (!conn)
3018 return 0;
3019
3020 /* Disregard any possible error since the likes of hci_abort_conn_sync
3021 * will clean up the connection no matter the error.
3022 */
3023 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3024
3025 return 0;
3026 }
3027
unpair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3028 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3029 u16 len)
3030 {
3031 struct mgmt_cp_unpair_device *cp = data;
3032 struct mgmt_rp_unpair_device rp;
3033 struct hci_conn_params *params;
3034 struct mgmt_pending_cmd *cmd;
3035 struct hci_conn *conn;
3036 u8 addr_type;
3037 int err;
3038
3039 memset(&rp, 0, sizeof(rp));
3040 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3041 rp.addr.type = cp->addr.type;
3042
3043 if (!bdaddr_type_is_valid(cp->addr.type))
3044 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3045 MGMT_STATUS_INVALID_PARAMS,
3046 &rp, sizeof(rp));
3047
3048 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
3049 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3050 MGMT_STATUS_INVALID_PARAMS,
3051 &rp, sizeof(rp));
3052
3053 hci_dev_lock(hdev);
3054
3055 if (!hdev_is_powered(hdev)) {
3056 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3057 MGMT_STATUS_NOT_POWERED, &rp,
3058 sizeof(rp));
3059 goto unlock;
3060 }
3061
3062 if (cp->addr.type == BDADDR_BREDR) {
3063 /* If disconnection is requested, then look up the
3064 * connection. If the remote device is connected, it
3065 * will be later used to terminate the link.
3066 *
3067 * Setting it to NULL explicitly will cause no
3068 * termination of the link.
3069 */
3070 if (cp->disconnect)
3071 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3072 &cp->addr.bdaddr);
3073 else
3074 conn = NULL;
3075
3076 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
3077 if (err < 0) {
3078 err = mgmt_cmd_complete(sk, hdev->id,
3079 MGMT_OP_UNPAIR_DEVICE,
3080 MGMT_STATUS_NOT_PAIRED, &rp,
3081 sizeof(rp));
3082 goto unlock;
3083 }
3084
3085 goto done;
3086 }
3087
3088 /* LE address type */
3089 addr_type = le_addr_type(cp->addr.type);
3090
3091 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
3092 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
3093 if (err < 0) {
3094 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3095 MGMT_STATUS_NOT_PAIRED, &rp,
3096 sizeof(rp));
3097 goto unlock;
3098 }
3099
3100 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3101 if (!conn) {
3102 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3103 goto done;
3104 }
3105
3106
3107 /* Defer clearing up the connection parameters until closing to
3108 * give a chance of keeping them if a repairing happens.
3109 */
3110 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3111
3112 /* Disable auto-connection parameters if present */
3113 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3114 if (params) {
3115 if (params->explicit_connect)
3116 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3117 else
3118 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3119 }
3120
3121 /* If disconnection is not requested, then clear the connection
3122 * variable so that the link is not terminated.
3123 */
3124 if (!cp->disconnect)
3125 conn = NULL;
3126
3127 done:
3128 /* If the connection variable is set, then termination of the
3129 * link is requested.
3130 */
3131 if (!conn) {
3132 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3133 &rp, sizeof(rp));
3134 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3135 goto unlock;
3136 }
3137
3138 cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3139 sizeof(*cp));
3140 if (!cmd) {
3141 err = -ENOMEM;
3142 goto unlock;
3143 }
3144
3145 cmd->cmd_complete = addr_cmd_complete;
3146
3147 err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3148 unpair_device_complete);
3149 if (err < 0)
3150 mgmt_pending_free(cmd);
3151
3152 unlock:
3153 hci_dev_unlock(hdev);
3154 return err;
3155 }
3156
disconnect_complete(struct hci_dev * hdev,void * data,int err)3157 static void disconnect_complete(struct hci_dev *hdev, void *data, int err)
3158 {
3159 struct mgmt_pending_cmd *cmd = data;
3160
3161 cmd->cmd_complete(cmd, mgmt_status(err));
3162 mgmt_pending_free(cmd);
3163 }
3164
disconnect_sync(struct hci_dev * hdev,void * data)3165 static int disconnect_sync(struct hci_dev *hdev, void *data)
3166 {
3167 struct mgmt_pending_cmd *cmd = data;
3168 struct mgmt_cp_disconnect *cp = cmd->param;
3169 struct hci_conn *conn;
3170
3171 if (cp->addr.type == BDADDR_BREDR)
3172 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3173 &cp->addr.bdaddr);
3174 else
3175 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3176 le_addr_type(cp->addr.type));
3177
3178 if (!conn)
3179 return -ENOTCONN;
3180
3181 /* Disregard any possible error since the likes of hci_abort_conn_sync
3182 * will clean up the connection no matter the error.
3183 */
3184 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3185
3186 return 0;
3187 }
3188
disconnect(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3189 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3190 u16 len)
3191 {
3192 struct mgmt_cp_disconnect *cp = data;
3193 struct mgmt_rp_disconnect rp;
3194 struct mgmt_pending_cmd *cmd;
3195 int err;
3196
3197 bt_dev_dbg(hdev, "sock %p", sk);
3198
3199 memset(&rp, 0, sizeof(rp));
3200 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3201 rp.addr.type = cp->addr.type;
3202
3203 if (!bdaddr_type_is_valid(cp->addr.type))
3204 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3205 MGMT_STATUS_INVALID_PARAMS,
3206 &rp, sizeof(rp));
3207
3208 hci_dev_lock(hdev);
3209
3210 if (!test_bit(HCI_UP, &hdev->flags)) {
3211 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3212 MGMT_STATUS_NOT_POWERED, &rp,
3213 sizeof(rp));
3214 goto failed;
3215 }
3216
3217 cmd = mgmt_pending_new(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3218 if (!cmd) {
3219 err = -ENOMEM;
3220 goto failed;
3221 }
3222
3223 cmd->cmd_complete = generic_cmd_complete;
3224
3225 err = hci_cmd_sync_queue(hdev, disconnect_sync, cmd,
3226 disconnect_complete);
3227 if (err < 0)
3228 mgmt_pending_free(cmd);
3229
3230 failed:
3231 hci_dev_unlock(hdev);
3232 return err;
3233 }
3234
link_to_bdaddr(u8 link_type,u8 addr_type)3235 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3236 {
3237 switch (link_type) {
3238 case CIS_LINK:
3239 case BIS_LINK:
3240 case LE_LINK:
3241 switch (addr_type) {
3242 case ADDR_LE_DEV_PUBLIC:
3243 return BDADDR_LE_PUBLIC;
3244
3245 default:
3246 /* Fallback to LE Random address type */
3247 return BDADDR_LE_RANDOM;
3248 }
3249
3250 default:
3251 /* Fallback to BR/EDR type */
3252 return BDADDR_BREDR;
3253 }
3254 }
3255
get_connections(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)3256 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3257 u16 data_len)
3258 {
3259 struct mgmt_rp_get_connections *rp;
3260 struct hci_conn *c;
3261 int err;
3262 u16 i;
3263
3264 bt_dev_dbg(hdev, "sock %p", sk);
3265
3266 hci_dev_lock(hdev);
3267
3268 if (!hdev_is_powered(hdev)) {
3269 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3270 MGMT_STATUS_NOT_POWERED);
3271 goto unlock;
3272 }
3273
3274 i = 0;
3275 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3276 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3277 i++;
3278 }
3279
3280 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3281 if (!rp) {
3282 err = -ENOMEM;
3283 goto unlock;
3284 }
3285
3286 i = 0;
3287 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3288 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3289 continue;
3290 bacpy(&rp->addr[i].bdaddr, &c->dst);
3291 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3292 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3293 continue;
3294 i++;
3295 }
3296
3297 rp->conn_count = cpu_to_le16(i);
3298
3299 /* Recalculate length in case of filtered SCO connections, etc */
3300 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3301 struct_size(rp, addr, i));
3302
3303 kfree(rp);
3304
3305 unlock:
3306 hci_dev_unlock(hdev);
3307 return err;
3308 }
3309
send_pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_pin_code_neg_reply * cp)3310 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3311 struct mgmt_cp_pin_code_neg_reply *cp)
3312 {
3313 struct mgmt_pending_cmd *cmd;
3314 int err;
3315
3316 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3317 sizeof(*cp));
3318 if (!cmd)
3319 return -ENOMEM;
3320
3321 cmd->cmd_complete = addr_cmd_complete;
3322
3323 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3324 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3325 if (err < 0)
3326 mgmt_pending_remove(cmd);
3327
3328 return err;
3329 }
3330
pin_code_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3331 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3332 u16 len)
3333 {
3334 struct hci_conn *conn;
3335 struct mgmt_cp_pin_code_reply *cp = data;
3336 struct hci_cp_pin_code_reply reply;
3337 struct mgmt_pending_cmd *cmd;
3338 int err;
3339
3340 bt_dev_dbg(hdev, "sock %p", sk);
3341
3342 hci_dev_lock(hdev);
3343
3344 if (!hdev_is_powered(hdev)) {
3345 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3346 MGMT_STATUS_NOT_POWERED);
3347 goto failed;
3348 }
3349
3350 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3351 if (!conn) {
3352 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3353 MGMT_STATUS_NOT_CONNECTED);
3354 goto failed;
3355 }
3356
3357 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3358 struct mgmt_cp_pin_code_neg_reply ncp;
3359
3360 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3361
3362 bt_dev_err(hdev, "PIN code is not 16 bytes long");
3363
3364 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3365 if (err >= 0)
3366 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3367 MGMT_STATUS_INVALID_PARAMS);
3368
3369 goto failed;
3370 }
3371
3372 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3373 if (!cmd) {
3374 err = -ENOMEM;
3375 goto failed;
3376 }
3377
3378 cmd->cmd_complete = addr_cmd_complete;
3379
3380 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3381 reply.pin_len = cp->pin_len;
3382 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3383
3384 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3385 if (err < 0)
3386 mgmt_pending_remove(cmd);
3387
3388 failed:
3389 hci_dev_unlock(hdev);
3390 return err;
3391 }
3392
set_io_capability(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3393 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3394 u16 len)
3395 {
3396 struct mgmt_cp_set_io_capability *cp = data;
3397
3398 bt_dev_dbg(hdev, "sock %p", sk);
3399
3400 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3401 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3402 MGMT_STATUS_INVALID_PARAMS);
3403
3404 hci_dev_lock(hdev);
3405
3406 hdev->io_capability = cp->io_capability;
3407
3408 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3409
3410 hci_dev_unlock(hdev);
3411
3412 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3413 NULL, 0);
3414 }
3415
find_pairing(struct hci_conn * conn)3416 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3417 {
3418 struct hci_dev *hdev = conn->hdev;
3419 struct mgmt_pending_cmd *cmd;
3420
3421 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3422 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3423 continue;
3424
3425 if (cmd->user_data != conn)
3426 continue;
3427
3428 return cmd;
3429 }
3430
3431 return NULL;
3432 }
3433
pairing_complete(struct mgmt_pending_cmd * cmd,u8 status)3434 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3435 {
3436 struct mgmt_rp_pair_device rp;
3437 struct hci_conn *conn = cmd->user_data;
3438 int err;
3439
3440 bacpy(&rp.addr.bdaddr, &conn->dst);
3441 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3442
3443 err = mgmt_cmd_complete(cmd->sk, cmd->hdev->id, MGMT_OP_PAIR_DEVICE,
3444 status, &rp, sizeof(rp));
3445
3446 /* So we don't get further callbacks for this connection */
3447 conn->connect_cfm_cb = NULL;
3448 conn->security_cfm_cb = NULL;
3449 conn->disconn_cfm_cb = NULL;
3450
3451 hci_conn_drop(conn);
3452
3453 /* The device is paired so there is no need to remove
3454 * its connection parameters anymore.
3455 */
3456 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3457
3458 hci_conn_put(conn);
3459
3460 return err;
3461 }
3462
mgmt_smp_complete(struct hci_conn * conn,bool complete)3463 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3464 {
3465 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3466 struct mgmt_pending_cmd *cmd;
3467
3468 cmd = find_pairing(conn);
3469 if (cmd) {
3470 cmd->cmd_complete(cmd, status);
3471 mgmt_pending_remove(cmd);
3472 }
3473 }
3474
pairing_complete_cb(struct hci_conn * conn,u8 status)3475 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3476 {
3477 struct mgmt_pending_cmd *cmd;
3478
3479 BT_DBG("status %u", status);
3480
3481 cmd = find_pairing(conn);
3482 if (!cmd) {
3483 BT_DBG("Unable to find a pending command");
3484 return;
3485 }
3486
3487 cmd->cmd_complete(cmd, mgmt_status(status));
3488 mgmt_pending_remove(cmd);
3489 }
3490
le_pairing_complete_cb(struct hci_conn * conn,u8 status)3491 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3492 {
3493 struct mgmt_pending_cmd *cmd;
3494
3495 BT_DBG("status %u", status);
3496
3497 if (!status)
3498 return;
3499
3500 cmd = find_pairing(conn);
3501 if (!cmd) {
3502 BT_DBG("Unable to find a pending command");
3503 return;
3504 }
3505
3506 cmd->cmd_complete(cmd, mgmt_status(status));
3507 mgmt_pending_remove(cmd);
3508 }
3509
pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3510 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3511 u16 len)
3512 {
3513 struct mgmt_cp_pair_device *cp = data;
3514 struct mgmt_rp_pair_device rp;
3515 struct mgmt_pending_cmd *cmd;
3516 u8 sec_level, auth_type;
3517 struct hci_conn *conn;
3518 int err;
3519
3520 bt_dev_dbg(hdev, "sock %p", sk);
3521
3522 memset(&rp, 0, sizeof(rp));
3523 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3524 rp.addr.type = cp->addr.type;
3525
3526 if (!bdaddr_type_is_valid(cp->addr.type))
3527 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3528 MGMT_STATUS_INVALID_PARAMS,
3529 &rp, sizeof(rp));
3530
3531 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3532 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3533 MGMT_STATUS_INVALID_PARAMS,
3534 &rp, sizeof(rp));
3535
3536 hci_dev_lock(hdev);
3537
3538 if (!hdev_is_powered(hdev)) {
3539 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3540 MGMT_STATUS_NOT_POWERED, &rp,
3541 sizeof(rp));
3542 goto unlock;
3543 }
3544
3545 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3546 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3547 MGMT_STATUS_ALREADY_PAIRED, &rp,
3548 sizeof(rp));
3549 goto unlock;
3550 }
3551
3552 sec_level = BT_SECURITY_MEDIUM;
3553 auth_type = HCI_AT_DEDICATED_BONDING;
3554
3555 if (cp->addr.type == BDADDR_BREDR) {
3556 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3557 auth_type, CONN_REASON_PAIR_DEVICE,
3558 HCI_ACL_CONN_TIMEOUT);
3559 } else {
3560 u8 addr_type = le_addr_type(cp->addr.type);
3561 struct hci_conn_params *p;
3562
3563 /* When pairing a new device, it is expected to remember
3564 * this device for future connections. Adding the connection
3565 * parameter information ahead of time allows tracking
3566 * of the peripheral preferred values and will speed up any
3567 * further connection establishment.
3568 *
3569 * If connection parameters already exist, then they
3570 * will be kept and this function does nothing.
3571 */
3572 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3573 if (!p) {
3574 err = -EIO;
3575 goto unlock;
3576 }
3577
3578 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3579 p->auto_connect = HCI_AUTO_CONN_DISABLED;
3580
3581 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3582 sec_level, HCI_LE_CONN_TIMEOUT,
3583 CONN_REASON_PAIR_DEVICE);
3584 }
3585
3586 if (IS_ERR(conn)) {
3587 int status;
3588
3589 if (PTR_ERR(conn) == -EBUSY)
3590 status = MGMT_STATUS_BUSY;
3591 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3592 status = MGMT_STATUS_NOT_SUPPORTED;
3593 else if (PTR_ERR(conn) == -ECONNREFUSED)
3594 status = MGMT_STATUS_REJECTED;
3595 else
3596 status = MGMT_STATUS_CONNECT_FAILED;
3597
3598 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3599 status, &rp, sizeof(rp));
3600 goto unlock;
3601 }
3602
3603 if (conn->connect_cfm_cb) {
3604 hci_conn_drop(conn);
3605 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3606 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3607 goto unlock;
3608 }
3609
3610 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3611 if (!cmd) {
3612 err = -ENOMEM;
3613 hci_conn_drop(conn);
3614 goto unlock;
3615 }
3616
3617 cmd->cmd_complete = pairing_complete;
3618
3619 /* For LE, just connecting isn't a proof that the pairing finished */
3620 if (cp->addr.type == BDADDR_BREDR) {
3621 conn->connect_cfm_cb = pairing_complete_cb;
3622 conn->security_cfm_cb = pairing_complete_cb;
3623 conn->disconn_cfm_cb = pairing_complete_cb;
3624 } else {
3625 conn->connect_cfm_cb = le_pairing_complete_cb;
3626 conn->security_cfm_cb = le_pairing_complete_cb;
3627 conn->disconn_cfm_cb = le_pairing_complete_cb;
3628 }
3629
3630 conn->io_capability = cp->io_cap;
3631 cmd->user_data = hci_conn_get(conn);
3632
3633 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3634 hci_conn_security(conn, sec_level, auth_type, true)) {
3635 cmd->cmd_complete(cmd, 0);
3636 mgmt_pending_remove(cmd);
3637 }
3638
3639 err = 0;
3640
3641 unlock:
3642 hci_dev_unlock(hdev);
3643 return err;
3644 }
3645
cancel_pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3646 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3647 u16 len)
3648 {
3649 struct mgmt_addr_info *addr = data;
3650 struct mgmt_pending_cmd *cmd;
3651 struct hci_conn *conn;
3652 int err;
3653
3654 bt_dev_dbg(hdev, "sock %p", sk);
3655
3656 hci_dev_lock(hdev);
3657
3658 if (!hdev_is_powered(hdev)) {
3659 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3660 MGMT_STATUS_NOT_POWERED);
3661 goto unlock;
3662 }
3663
3664 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3665 if (!cmd) {
3666 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3667 MGMT_STATUS_INVALID_PARAMS);
3668 goto unlock;
3669 }
3670
3671 conn = cmd->user_data;
3672
3673 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3674 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3675 MGMT_STATUS_INVALID_PARAMS);
3676 goto unlock;
3677 }
3678
3679 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3680 mgmt_pending_remove(cmd);
3681
3682 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3683 addr, sizeof(*addr));
3684
3685 /* Since user doesn't want to proceed with the connection, abort any
3686 * ongoing pairing and then terminate the link if it was created
3687 * because of the pair device action.
3688 */
3689 if (addr->type == BDADDR_BREDR)
3690 hci_remove_link_key(hdev, &addr->bdaddr);
3691 else
3692 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3693 le_addr_type(addr->type));
3694
3695 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3696 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3697
3698 unlock:
3699 hci_dev_unlock(hdev);
3700 return err;
3701 }
3702
user_pairing_resp(struct sock * sk,struct hci_dev * hdev,struct mgmt_addr_info * addr,u16 mgmt_op,u16 hci_op,__le32 passkey)3703 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3704 struct mgmt_addr_info *addr, u16 mgmt_op,
3705 u16 hci_op, __le32 passkey)
3706 {
3707 struct mgmt_pending_cmd *cmd;
3708 struct hci_conn *conn;
3709 int err;
3710
3711 hci_dev_lock(hdev);
3712
3713 if (!hdev_is_powered(hdev)) {
3714 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3715 MGMT_STATUS_NOT_POWERED, addr,
3716 sizeof(*addr));
3717 goto done;
3718 }
3719
3720 if (addr->type == BDADDR_BREDR)
3721 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3722 else
3723 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3724 le_addr_type(addr->type));
3725
3726 if (!conn) {
3727 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3728 MGMT_STATUS_NOT_CONNECTED, addr,
3729 sizeof(*addr));
3730 goto done;
3731 }
3732
3733 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3734 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3735 if (!err)
3736 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3737 MGMT_STATUS_SUCCESS, addr,
3738 sizeof(*addr));
3739 else
3740 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3741 MGMT_STATUS_FAILED, addr,
3742 sizeof(*addr));
3743
3744 goto done;
3745 }
3746
3747 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3748 if (!cmd) {
3749 err = -ENOMEM;
3750 goto done;
3751 }
3752
3753 cmd->cmd_complete = addr_cmd_complete;
3754
3755 /* Continue with pairing via HCI */
3756 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3757 struct hci_cp_user_passkey_reply cp;
3758
3759 bacpy(&cp.bdaddr, &addr->bdaddr);
3760 cp.passkey = passkey;
3761 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3762 } else
3763 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3764 &addr->bdaddr);
3765
3766 if (err < 0)
3767 mgmt_pending_remove(cmd);
3768
3769 done:
3770 hci_dev_unlock(hdev);
3771 return err;
3772 }
3773
pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3774 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3775 void *data, u16 len)
3776 {
3777 struct mgmt_cp_pin_code_neg_reply *cp = data;
3778
3779 bt_dev_dbg(hdev, "sock %p", sk);
3780
3781 return user_pairing_resp(sk, hdev, &cp->addr,
3782 MGMT_OP_PIN_CODE_NEG_REPLY,
3783 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3784 }
3785
user_confirm_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3786 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3787 u16 len)
3788 {
3789 struct mgmt_cp_user_confirm_reply *cp = data;
3790
3791 bt_dev_dbg(hdev, "sock %p", sk);
3792
3793 if (len != sizeof(*cp))
3794 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3795 MGMT_STATUS_INVALID_PARAMS);
3796
3797 return user_pairing_resp(sk, hdev, &cp->addr,
3798 MGMT_OP_USER_CONFIRM_REPLY,
3799 HCI_OP_USER_CONFIRM_REPLY, 0);
3800 }
3801
user_confirm_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3802 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3803 void *data, u16 len)
3804 {
3805 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3806
3807 bt_dev_dbg(hdev, "sock %p", sk);
3808
3809 return user_pairing_resp(sk, hdev, &cp->addr,
3810 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3811 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3812 }
3813
user_passkey_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3814 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3815 u16 len)
3816 {
3817 struct mgmt_cp_user_passkey_reply *cp = data;
3818
3819 bt_dev_dbg(hdev, "sock %p", sk);
3820
3821 return user_pairing_resp(sk, hdev, &cp->addr,
3822 MGMT_OP_USER_PASSKEY_REPLY,
3823 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3824 }
3825
user_passkey_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3826 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3827 void *data, u16 len)
3828 {
3829 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3830
3831 bt_dev_dbg(hdev, "sock %p", sk);
3832
3833 return user_pairing_resp(sk, hdev, &cp->addr,
3834 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3835 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3836 }
3837
adv_expire_sync(struct hci_dev * hdev,u32 flags)3838 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3839 {
3840 struct adv_info *adv_instance;
3841
3842 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3843 if (!adv_instance)
3844 return 0;
3845
3846 /* stop if current instance doesn't need to be changed */
3847 if (!(adv_instance->flags & flags))
3848 return 0;
3849
3850 cancel_adv_timeout(hdev);
3851
3852 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3853 if (!adv_instance)
3854 return 0;
3855
3856 hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3857
3858 return 0;
3859 }
3860
name_changed_sync(struct hci_dev * hdev,void * data)3861 static int name_changed_sync(struct hci_dev *hdev, void *data)
3862 {
3863 return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3864 }
3865
set_name_complete(struct hci_dev * hdev,void * data,int err)3866 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3867 {
3868 struct mgmt_pending_cmd *cmd = data;
3869 struct mgmt_cp_set_local_name *cp = cmd->param;
3870 u8 status = mgmt_status(err);
3871
3872 bt_dev_dbg(hdev, "err %d", err);
3873
3874 if (err == -ECANCELED ||
3875 cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3876 return;
3877
3878 if (status) {
3879 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3880 status);
3881 } else {
3882 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3883 cp, sizeof(*cp));
3884
3885 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3886 hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3887 }
3888
3889 mgmt_pending_remove(cmd);
3890 }
3891
set_name_sync(struct hci_dev * hdev,void * data)3892 static int set_name_sync(struct hci_dev *hdev, void *data)
3893 {
3894 if (lmp_bredr_capable(hdev)) {
3895 hci_update_name_sync(hdev);
3896 hci_update_eir_sync(hdev);
3897 }
3898
3899 /* The name is stored in the scan response data and so
3900 * no need to update the advertising data here.
3901 */
3902 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3903 hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3904
3905 return 0;
3906 }
3907
set_local_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3908 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3909 u16 len)
3910 {
3911 struct mgmt_cp_set_local_name *cp = data;
3912 struct mgmt_pending_cmd *cmd;
3913 int err;
3914
3915 bt_dev_dbg(hdev, "sock %p", sk);
3916
3917 hci_dev_lock(hdev);
3918
3919 /* If the old values are the same as the new ones just return a
3920 * direct command complete event.
3921 */
3922 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3923 !memcmp(hdev->short_name, cp->short_name,
3924 sizeof(hdev->short_name))) {
3925 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3926 data, len);
3927 goto failed;
3928 }
3929
3930 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3931
3932 if (!hdev_is_powered(hdev)) {
3933 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3934
3935 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3936 data, len);
3937 if (err < 0)
3938 goto failed;
3939
3940 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3941 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3942 ext_info_changed(hdev, sk);
3943
3944 goto failed;
3945 }
3946
3947 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3948 if (!cmd)
3949 err = -ENOMEM;
3950 else
3951 err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3952 set_name_complete);
3953
3954 if (err < 0) {
3955 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3956 MGMT_STATUS_FAILED);
3957
3958 if (cmd)
3959 mgmt_pending_remove(cmd);
3960
3961 goto failed;
3962 }
3963
3964 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3965
3966 failed:
3967 hci_dev_unlock(hdev);
3968 return err;
3969 }
3970
appearance_changed_sync(struct hci_dev * hdev,void * data)3971 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3972 {
3973 return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3974 }
3975
set_appearance(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3976 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3977 u16 len)
3978 {
3979 struct mgmt_cp_set_appearance *cp = data;
3980 u16 appearance;
3981 int err;
3982
3983 bt_dev_dbg(hdev, "sock %p", sk);
3984
3985 if (!lmp_le_capable(hdev))
3986 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3987 MGMT_STATUS_NOT_SUPPORTED);
3988
3989 appearance = le16_to_cpu(cp->appearance);
3990
3991 hci_dev_lock(hdev);
3992
3993 if (hdev->appearance != appearance) {
3994 hdev->appearance = appearance;
3995
3996 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3997 hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3998 NULL);
3999
4000 ext_info_changed(hdev, sk);
4001 }
4002
4003 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
4004 0);
4005
4006 hci_dev_unlock(hdev);
4007
4008 return err;
4009 }
4010
get_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4011 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4012 void *data, u16 len)
4013 {
4014 struct mgmt_rp_get_phy_configuration rp;
4015
4016 bt_dev_dbg(hdev, "sock %p", sk);
4017
4018 hci_dev_lock(hdev);
4019
4020 memset(&rp, 0, sizeof(rp));
4021
4022 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
4023 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
4024 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
4025
4026 hci_dev_unlock(hdev);
4027
4028 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
4029 &rp, sizeof(rp));
4030 }
4031
mgmt_phy_configuration_changed(struct hci_dev * hdev,struct sock * skip)4032 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
4033 {
4034 struct mgmt_ev_phy_configuration_changed ev;
4035
4036 memset(&ev, 0, sizeof(ev));
4037
4038 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
4039
4040 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
4041 sizeof(ev), skip);
4042 }
4043
set_default_phy_complete(struct hci_dev * hdev,void * data,int err)4044 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
4045 {
4046 struct mgmt_pending_cmd *cmd = data;
4047 struct sk_buff *skb = cmd->skb;
4048 u8 status = mgmt_status(err);
4049
4050 if (err == -ECANCELED ||
4051 cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
4052 return;
4053
4054 if (!status) {
4055 if (!skb)
4056 status = MGMT_STATUS_FAILED;
4057 else if (IS_ERR(skb))
4058 status = mgmt_status(PTR_ERR(skb));
4059 else
4060 status = mgmt_status(skb->data[0]);
4061 }
4062
4063 bt_dev_dbg(hdev, "status %d", status);
4064
4065 if (status) {
4066 mgmt_cmd_status(cmd->sk, hdev->id,
4067 MGMT_OP_SET_PHY_CONFIGURATION, status);
4068 } else {
4069 mgmt_cmd_complete(cmd->sk, hdev->id,
4070 MGMT_OP_SET_PHY_CONFIGURATION, 0,
4071 NULL, 0);
4072
4073 mgmt_phy_configuration_changed(hdev, cmd->sk);
4074 }
4075
4076 if (skb && !IS_ERR(skb))
4077 kfree_skb(skb);
4078
4079 mgmt_pending_remove(cmd);
4080 }
4081
set_default_phy_sync(struct hci_dev * hdev,void * data)4082 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
4083 {
4084 struct mgmt_pending_cmd *cmd = data;
4085 struct mgmt_cp_set_phy_configuration *cp = cmd->param;
4086 struct hci_cp_le_set_default_phy cp_phy;
4087 u32 selected_phys = __le32_to_cpu(cp->selected_phys);
4088
4089 memset(&cp_phy, 0, sizeof(cp_phy));
4090
4091 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
4092 cp_phy.all_phys |= 0x01;
4093
4094 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
4095 cp_phy.all_phys |= 0x02;
4096
4097 if (selected_phys & MGMT_PHY_LE_1M_TX)
4098 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
4099
4100 if (selected_phys & MGMT_PHY_LE_2M_TX)
4101 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
4102
4103 if (selected_phys & MGMT_PHY_LE_CODED_TX)
4104 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
4105
4106 if (selected_phys & MGMT_PHY_LE_1M_RX)
4107 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
4108
4109 if (selected_phys & MGMT_PHY_LE_2M_RX)
4110 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
4111
4112 if (selected_phys & MGMT_PHY_LE_CODED_RX)
4113 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
4114
4115 cmd->skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4116 sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
4117
4118 return 0;
4119 }
4120
set_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4121 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4122 void *data, u16 len)
4123 {
4124 struct mgmt_cp_set_phy_configuration *cp = data;
4125 struct mgmt_pending_cmd *cmd;
4126 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4127 u16 pkt_type = (HCI_DH1 | HCI_DM1);
4128 bool changed = false;
4129 int err;
4130
4131 bt_dev_dbg(hdev, "sock %p", sk);
4132
4133 configurable_phys = get_configurable_phys(hdev);
4134 supported_phys = get_supported_phys(hdev);
4135 selected_phys = __le32_to_cpu(cp->selected_phys);
4136
4137 if (selected_phys & ~supported_phys)
4138 return mgmt_cmd_status(sk, hdev->id,
4139 MGMT_OP_SET_PHY_CONFIGURATION,
4140 MGMT_STATUS_INVALID_PARAMS);
4141
4142 unconfigure_phys = supported_phys & ~configurable_phys;
4143
4144 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4145 return mgmt_cmd_status(sk, hdev->id,
4146 MGMT_OP_SET_PHY_CONFIGURATION,
4147 MGMT_STATUS_INVALID_PARAMS);
4148
4149 if (selected_phys == get_selected_phys(hdev))
4150 return mgmt_cmd_complete(sk, hdev->id,
4151 MGMT_OP_SET_PHY_CONFIGURATION,
4152 0, NULL, 0);
4153
4154 hci_dev_lock(hdev);
4155
4156 if (!hdev_is_powered(hdev)) {
4157 err = mgmt_cmd_status(sk, hdev->id,
4158 MGMT_OP_SET_PHY_CONFIGURATION,
4159 MGMT_STATUS_REJECTED);
4160 goto unlock;
4161 }
4162
4163 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4164 err = mgmt_cmd_status(sk, hdev->id,
4165 MGMT_OP_SET_PHY_CONFIGURATION,
4166 MGMT_STATUS_BUSY);
4167 goto unlock;
4168 }
4169
4170 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4171 pkt_type |= (HCI_DH3 | HCI_DM3);
4172 else
4173 pkt_type &= ~(HCI_DH3 | HCI_DM3);
4174
4175 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4176 pkt_type |= (HCI_DH5 | HCI_DM5);
4177 else
4178 pkt_type &= ~(HCI_DH5 | HCI_DM5);
4179
4180 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4181 pkt_type &= ~HCI_2DH1;
4182 else
4183 pkt_type |= HCI_2DH1;
4184
4185 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4186 pkt_type &= ~HCI_2DH3;
4187 else
4188 pkt_type |= HCI_2DH3;
4189
4190 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4191 pkt_type &= ~HCI_2DH5;
4192 else
4193 pkt_type |= HCI_2DH5;
4194
4195 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4196 pkt_type &= ~HCI_3DH1;
4197 else
4198 pkt_type |= HCI_3DH1;
4199
4200 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4201 pkt_type &= ~HCI_3DH3;
4202 else
4203 pkt_type |= HCI_3DH3;
4204
4205 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4206 pkt_type &= ~HCI_3DH5;
4207 else
4208 pkt_type |= HCI_3DH5;
4209
4210 if (pkt_type != hdev->pkt_type) {
4211 hdev->pkt_type = pkt_type;
4212 changed = true;
4213 }
4214
4215 if ((selected_phys & MGMT_PHY_LE_MASK) ==
4216 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4217 if (changed)
4218 mgmt_phy_configuration_changed(hdev, sk);
4219
4220 err = mgmt_cmd_complete(sk, hdev->id,
4221 MGMT_OP_SET_PHY_CONFIGURATION,
4222 0, NULL, 0);
4223
4224 goto unlock;
4225 }
4226
4227 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4228 len);
4229 if (!cmd)
4230 err = -ENOMEM;
4231 else
4232 err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4233 set_default_phy_complete);
4234
4235 if (err < 0) {
4236 err = mgmt_cmd_status(sk, hdev->id,
4237 MGMT_OP_SET_PHY_CONFIGURATION,
4238 MGMT_STATUS_FAILED);
4239
4240 if (cmd)
4241 mgmt_pending_remove(cmd);
4242 }
4243
4244 unlock:
4245 hci_dev_unlock(hdev);
4246
4247 return err;
4248 }
4249
set_blocked_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4250 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4251 u16 len)
4252 {
4253 int err = MGMT_STATUS_SUCCESS;
4254 struct mgmt_cp_set_blocked_keys *keys = data;
4255 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4256 sizeof(struct mgmt_blocked_key_info));
4257 u16 key_count, expected_len;
4258 int i;
4259
4260 bt_dev_dbg(hdev, "sock %p", sk);
4261
4262 key_count = __le16_to_cpu(keys->key_count);
4263 if (key_count > max_key_count) {
4264 bt_dev_err(hdev, "too big key_count value %u", key_count);
4265 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4266 MGMT_STATUS_INVALID_PARAMS);
4267 }
4268
4269 expected_len = struct_size(keys, keys, key_count);
4270 if (expected_len != len) {
4271 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4272 expected_len, len);
4273 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4274 MGMT_STATUS_INVALID_PARAMS);
4275 }
4276
4277 hci_dev_lock(hdev);
4278
4279 hci_blocked_keys_clear(hdev);
4280
4281 for (i = 0; i < key_count; ++i) {
4282 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4283
4284 if (!b) {
4285 err = MGMT_STATUS_NO_RESOURCES;
4286 break;
4287 }
4288
4289 b->type = keys->keys[i].type;
4290 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4291 list_add_rcu(&b->list, &hdev->blocked_keys);
4292 }
4293 hci_dev_unlock(hdev);
4294
4295 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4296 err, NULL, 0);
4297 }
4298
set_wideband_speech(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4299 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4300 void *data, u16 len)
4301 {
4302 struct mgmt_mode *cp = data;
4303 int err;
4304 bool changed = false;
4305
4306 bt_dev_dbg(hdev, "sock %p", sk);
4307
4308 if (!hci_test_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED))
4309 return mgmt_cmd_status(sk, hdev->id,
4310 MGMT_OP_SET_WIDEBAND_SPEECH,
4311 MGMT_STATUS_NOT_SUPPORTED);
4312
4313 if (cp->val != 0x00 && cp->val != 0x01)
4314 return mgmt_cmd_status(sk, hdev->id,
4315 MGMT_OP_SET_WIDEBAND_SPEECH,
4316 MGMT_STATUS_INVALID_PARAMS);
4317
4318 hci_dev_lock(hdev);
4319
4320 if (hdev_is_powered(hdev) &&
4321 !!cp->val != hci_dev_test_flag(hdev,
4322 HCI_WIDEBAND_SPEECH_ENABLED)) {
4323 err = mgmt_cmd_status(sk, hdev->id,
4324 MGMT_OP_SET_WIDEBAND_SPEECH,
4325 MGMT_STATUS_REJECTED);
4326 goto unlock;
4327 }
4328
4329 if (cp->val)
4330 changed = !hci_dev_test_and_set_flag(hdev,
4331 HCI_WIDEBAND_SPEECH_ENABLED);
4332 else
4333 changed = hci_dev_test_and_clear_flag(hdev,
4334 HCI_WIDEBAND_SPEECH_ENABLED);
4335
4336 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4337 if (err < 0)
4338 goto unlock;
4339
4340 if (changed)
4341 err = new_settings(hdev, sk);
4342
4343 unlock:
4344 hci_dev_unlock(hdev);
4345 return err;
4346 }
4347
read_controller_cap(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4348 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4349 void *data, u16 data_len)
4350 {
4351 char buf[20];
4352 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4353 u16 cap_len = 0;
4354 u8 flags = 0;
4355 u8 tx_power_range[2];
4356
4357 bt_dev_dbg(hdev, "sock %p", sk);
4358
4359 memset(&buf, 0, sizeof(buf));
4360
4361 hci_dev_lock(hdev);
4362
4363 /* When the Read Simple Pairing Options command is supported, then
4364 * the remote public key validation is supported.
4365 *
4366 * Alternatively, when Microsoft extensions are available, they can
4367 * indicate support for public key validation as well.
4368 */
4369 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4370 flags |= 0x01; /* Remote public key validation (BR/EDR) */
4371
4372 flags |= 0x02; /* Remote public key validation (LE) */
4373
4374 /* When the Read Encryption Key Size command is supported, then the
4375 * encryption key size is enforced.
4376 */
4377 if (hdev->commands[20] & 0x10)
4378 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
4379
4380 flags |= 0x08; /* Encryption key size enforcement (LE) */
4381
4382 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4383 &flags, 1);
4384
4385 /* When the Read Simple Pairing Options command is supported, then
4386 * also max encryption key size information is provided.
4387 */
4388 if (hdev->commands[41] & 0x08)
4389 cap_len = eir_append_le16(rp->cap, cap_len,
4390 MGMT_CAP_MAX_ENC_KEY_SIZE,
4391 hdev->max_enc_key_size);
4392
4393 cap_len = eir_append_le16(rp->cap, cap_len,
4394 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4395 SMP_MAX_ENC_KEY_SIZE);
4396
4397 /* Append the min/max LE tx power parameters if we were able to fetch
4398 * it from the controller
4399 */
4400 if (hdev->commands[38] & 0x80) {
4401 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4402 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4403 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4404 tx_power_range, 2);
4405 }
4406
4407 rp->cap_len = cpu_to_le16(cap_len);
4408
4409 hci_dev_unlock(hdev);
4410
4411 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4412 rp, sizeof(*rp) + cap_len);
4413 }
4414
4415 #ifdef CONFIG_BT_FEATURE_DEBUG
4416 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4417 static const u8 debug_uuid[16] = {
4418 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4419 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4420 };
4421 #endif
4422
4423 /* 330859bc-7506-492d-9370-9a6f0614037f */
4424 static const u8 quality_report_uuid[16] = {
4425 0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4426 0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4427 };
4428
4429 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4430 static const u8 offload_codecs_uuid[16] = {
4431 0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4432 0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4433 };
4434
4435 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4436 static const u8 le_simultaneous_roles_uuid[16] = {
4437 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4438 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4439 };
4440
4441 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4442 static const u8 iso_socket_uuid[16] = {
4443 0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4444 0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4445 };
4446
4447 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4448 static const u8 mgmt_mesh_uuid[16] = {
4449 0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4450 0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4451 };
4452
read_exp_features_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4453 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4454 void *data, u16 data_len)
4455 {
4456 struct mgmt_rp_read_exp_features_info *rp;
4457 size_t len;
4458 u16 idx = 0;
4459 u32 flags;
4460 int status;
4461
4462 bt_dev_dbg(hdev, "sock %p", sk);
4463
4464 /* Enough space for 7 features */
4465 len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4466 rp = kzalloc(len, GFP_KERNEL);
4467 if (!rp)
4468 return -ENOMEM;
4469
4470 #ifdef CONFIG_BT_FEATURE_DEBUG
4471 if (!hdev) {
4472 flags = bt_dbg_get() ? BIT(0) : 0;
4473
4474 memcpy(rp->features[idx].uuid, debug_uuid, 16);
4475 rp->features[idx].flags = cpu_to_le32(flags);
4476 idx++;
4477 }
4478 #endif
4479
4480 if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4481 if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4482 flags = BIT(0);
4483 else
4484 flags = 0;
4485
4486 memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4487 rp->features[idx].flags = cpu_to_le32(flags);
4488 idx++;
4489 }
4490
4491 if (hdev && (aosp_has_quality_report(hdev) ||
4492 hdev->set_quality_report)) {
4493 if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4494 flags = BIT(0);
4495 else
4496 flags = 0;
4497
4498 memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4499 rp->features[idx].flags = cpu_to_le32(flags);
4500 idx++;
4501 }
4502
4503 if (hdev && hdev->get_data_path_id) {
4504 if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4505 flags = BIT(0);
4506 else
4507 flags = 0;
4508
4509 memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4510 rp->features[idx].flags = cpu_to_le32(flags);
4511 idx++;
4512 }
4513
4514 if (IS_ENABLED(CONFIG_BT_LE)) {
4515 flags = iso_enabled() ? BIT(0) : 0;
4516 memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4517 rp->features[idx].flags = cpu_to_le32(flags);
4518 idx++;
4519 }
4520
4521 if (hdev && lmp_le_capable(hdev)) {
4522 if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4523 flags = BIT(0);
4524 else
4525 flags = 0;
4526
4527 memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4528 rp->features[idx].flags = cpu_to_le32(flags);
4529 idx++;
4530 }
4531
4532 rp->feature_count = cpu_to_le16(idx);
4533
4534 /* After reading the experimental features information, enable
4535 * the events to update client on any future change.
4536 */
4537 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4538
4539 status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4540 MGMT_OP_READ_EXP_FEATURES_INFO,
4541 0, rp, sizeof(*rp) + (20 * idx));
4542
4543 kfree(rp);
4544 return status;
4545 }
4546
exp_feature_changed(struct hci_dev * hdev,const u8 * uuid,bool enabled,struct sock * skip)4547 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4548 bool enabled, struct sock *skip)
4549 {
4550 struct mgmt_ev_exp_feature_changed ev;
4551
4552 memset(&ev, 0, sizeof(ev));
4553 memcpy(ev.uuid, uuid, 16);
4554 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4555
4556 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4557 &ev, sizeof(ev),
4558 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4559 }
4560
4561 #define EXP_FEAT(_uuid, _set_func) \
4562 { \
4563 .uuid = _uuid, \
4564 .set_func = _set_func, \
4565 }
4566
4567 /* The zero key uuid is special. Multiple exp features are set through it. */
set_zero_key_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4568 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4569 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4570 {
4571 struct mgmt_rp_set_exp_feature rp;
4572
4573 memset(rp.uuid, 0, 16);
4574 rp.flags = cpu_to_le32(0);
4575
4576 #ifdef CONFIG_BT_FEATURE_DEBUG
4577 if (!hdev) {
4578 bool changed = bt_dbg_get();
4579
4580 bt_dbg_set(false);
4581
4582 if (changed)
4583 exp_feature_changed(NULL, ZERO_KEY, false, sk);
4584 }
4585 #endif
4586
4587 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4588
4589 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4590 MGMT_OP_SET_EXP_FEATURE, 0,
4591 &rp, sizeof(rp));
4592 }
4593
4594 #ifdef CONFIG_BT_FEATURE_DEBUG
set_debug_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4595 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4596 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4597 {
4598 struct mgmt_rp_set_exp_feature rp;
4599
4600 bool val, changed;
4601 int err;
4602
4603 /* Command requires to use the non-controller index */
4604 if (hdev)
4605 return mgmt_cmd_status(sk, hdev->id,
4606 MGMT_OP_SET_EXP_FEATURE,
4607 MGMT_STATUS_INVALID_INDEX);
4608
4609 /* Parameters are limited to a single octet */
4610 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4611 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4612 MGMT_OP_SET_EXP_FEATURE,
4613 MGMT_STATUS_INVALID_PARAMS);
4614
4615 /* Only boolean on/off is supported */
4616 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4617 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4618 MGMT_OP_SET_EXP_FEATURE,
4619 MGMT_STATUS_INVALID_PARAMS);
4620
4621 val = !!cp->param[0];
4622 changed = val ? !bt_dbg_get() : bt_dbg_get();
4623 bt_dbg_set(val);
4624
4625 memcpy(rp.uuid, debug_uuid, 16);
4626 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4627
4628 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4629
4630 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4631 MGMT_OP_SET_EXP_FEATURE, 0,
4632 &rp, sizeof(rp));
4633
4634 if (changed)
4635 exp_feature_changed(hdev, debug_uuid, val, sk);
4636
4637 return err;
4638 }
4639 #endif
4640
set_mgmt_mesh_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4641 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4642 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4643 {
4644 struct mgmt_rp_set_exp_feature rp;
4645 bool val, changed;
4646 int err;
4647
4648 /* Command requires to use the controller index */
4649 if (!hdev)
4650 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4651 MGMT_OP_SET_EXP_FEATURE,
4652 MGMT_STATUS_INVALID_INDEX);
4653
4654 /* Parameters are limited to a single octet */
4655 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4656 return mgmt_cmd_status(sk, hdev->id,
4657 MGMT_OP_SET_EXP_FEATURE,
4658 MGMT_STATUS_INVALID_PARAMS);
4659
4660 /* Only boolean on/off is supported */
4661 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4662 return mgmt_cmd_status(sk, hdev->id,
4663 MGMT_OP_SET_EXP_FEATURE,
4664 MGMT_STATUS_INVALID_PARAMS);
4665
4666 val = !!cp->param[0];
4667
4668 if (val) {
4669 changed = !hci_dev_test_and_set_flag(hdev,
4670 HCI_MESH_EXPERIMENTAL);
4671 } else {
4672 hci_dev_clear_flag(hdev, HCI_MESH);
4673 changed = hci_dev_test_and_clear_flag(hdev,
4674 HCI_MESH_EXPERIMENTAL);
4675 }
4676
4677 memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4678 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4679
4680 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4681
4682 err = mgmt_cmd_complete(sk, hdev->id,
4683 MGMT_OP_SET_EXP_FEATURE, 0,
4684 &rp, sizeof(rp));
4685
4686 if (changed)
4687 exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4688
4689 return err;
4690 }
4691
set_quality_report_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4692 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4693 struct mgmt_cp_set_exp_feature *cp,
4694 u16 data_len)
4695 {
4696 struct mgmt_rp_set_exp_feature rp;
4697 bool val, changed;
4698 int err;
4699
4700 /* Command requires to use a valid controller index */
4701 if (!hdev)
4702 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4703 MGMT_OP_SET_EXP_FEATURE,
4704 MGMT_STATUS_INVALID_INDEX);
4705
4706 /* Parameters are limited to a single octet */
4707 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4708 return mgmt_cmd_status(sk, hdev->id,
4709 MGMT_OP_SET_EXP_FEATURE,
4710 MGMT_STATUS_INVALID_PARAMS);
4711
4712 /* Only boolean on/off is supported */
4713 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4714 return mgmt_cmd_status(sk, hdev->id,
4715 MGMT_OP_SET_EXP_FEATURE,
4716 MGMT_STATUS_INVALID_PARAMS);
4717
4718 hci_req_sync_lock(hdev);
4719
4720 val = !!cp->param[0];
4721 changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4722
4723 if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4724 err = mgmt_cmd_status(sk, hdev->id,
4725 MGMT_OP_SET_EXP_FEATURE,
4726 MGMT_STATUS_NOT_SUPPORTED);
4727 goto unlock_quality_report;
4728 }
4729
4730 if (changed) {
4731 if (hdev->set_quality_report)
4732 err = hdev->set_quality_report(hdev, val);
4733 else
4734 err = aosp_set_quality_report(hdev, val);
4735
4736 if (err) {
4737 err = mgmt_cmd_status(sk, hdev->id,
4738 MGMT_OP_SET_EXP_FEATURE,
4739 MGMT_STATUS_FAILED);
4740 goto unlock_quality_report;
4741 }
4742
4743 if (val)
4744 hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4745 else
4746 hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4747 }
4748
4749 bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4750
4751 memcpy(rp.uuid, quality_report_uuid, 16);
4752 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4753 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4754
4755 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4756 &rp, sizeof(rp));
4757
4758 if (changed)
4759 exp_feature_changed(hdev, quality_report_uuid, val, sk);
4760
4761 unlock_quality_report:
4762 hci_req_sync_unlock(hdev);
4763 return err;
4764 }
4765
set_offload_codec_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4766 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4767 struct mgmt_cp_set_exp_feature *cp,
4768 u16 data_len)
4769 {
4770 bool val, changed;
4771 int err;
4772 struct mgmt_rp_set_exp_feature rp;
4773
4774 /* Command requires to use a valid controller index */
4775 if (!hdev)
4776 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4777 MGMT_OP_SET_EXP_FEATURE,
4778 MGMT_STATUS_INVALID_INDEX);
4779
4780 /* Parameters are limited to a single octet */
4781 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4782 return mgmt_cmd_status(sk, hdev->id,
4783 MGMT_OP_SET_EXP_FEATURE,
4784 MGMT_STATUS_INVALID_PARAMS);
4785
4786 /* Only boolean on/off is supported */
4787 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4788 return mgmt_cmd_status(sk, hdev->id,
4789 MGMT_OP_SET_EXP_FEATURE,
4790 MGMT_STATUS_INVALID_PARAMS);
4791
4792 val = !!cp->param[0];
4793 changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4794
4795 if (!hdev->get_data_path_id) {
4796 return mgmt_cmd_status(sk, hdev->id,
4797 MGMT_OP_SET_EXP_FEATURE,
4798 MGMT_STATUS_NOT_SUPPORTED);
4799 }
4800
4801 if (changed) {
4802 if (val)
4803 hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4804 else
4805 hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4806 }
4807
4808 bt_dev_info(hdev, "offload codecs enable %d changed %d",
4809 val, changed);
4810
4811 memcpy(rp.uuid, offload_codecs_uuid, 16);
4812 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4813 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4814 err = mgmt_cmd_complete(sk, hdev->id,
4815 MGMT_OP_SET_EXP_FEATURE, 0,
4816 &rp, sizeof(rp));
4817
4818 if (changed)
4819 exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4820
4821 return err;
4822 }
4823
set_le_simultaneous_roles_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4824 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4825 struct mgmt_cp_set_exp_feature *cp,
4826 u16 data_len)
4827 {
4828 bool val, changed;
4829 int err;
4830 struct mgmt_rp_set_exp_feature rp;
4831
4832 /* Command requires to use a valid controller index */
4833 if (!hdev)
4834 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4835 MGMT_OP_SET_EXP_FEATURE,
4836 MGMT_STATUS_INVALID_INDEX);
4837
4838 /* Parameters are limited to a single octet */
4839 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4840 return mgmt_cmd_status(sk, hdev->id,
4841 MGMT_OP_SET_EXP_FEATURE,
4842 MGMT_STATUS_INVALID_PARAMS);
4843
4844 /* Only boolean on/off is supported */
4845 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4846 return mgmt_cmd_status(sk, hdev->id,
4847 MGMT_OP_SET_EXP_FEATURE,
4848 MGMT_STATUS_INVALID_PARAMS);
4849
4850 val = !!cp->param[0];
4851 changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4852
4853 if (!hci_dev_le_state_simultaneous(hdev)) {
4854 return mgmt_cmd_status(sk, hdev->id,
4855 MGMT_OP_SET_EXP_FEATURE,
4856 MGMT_STATUS_NOT_SUPPORTED);
4857 }
4858
4859 if (changed) {
4860 if (val)
4861 hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4862 else
4863 hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4864 }
4865
4866 bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4867 val, changed);
4868
4869 memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4870 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4871 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4872 err = mgmt_cmd_complete(sk, hdev->id,
4873 MGMT_OP_SET_EXP_FEATURE, 0,
4874 &rp, sizeof(rp));
4875
4876 if (changed)
4877 exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4878
4879 return err;
4880 }
4881
4882 #ifdef CONFIG_BT_LE
set_iso_socket_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4883 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4884 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4885 {
4886 struct mgmt_rp_set_exp_feature rp;
4887 bool val, changed = false;
4888 int err;
4889
4890 /* Command requires to use the non-controller index */
4891 if (hdev)
4892 return mgmt_cmd_status(sk, hdev->id,
4893 MGMT_OP_SET_EXP_FEATURE,
4894 MGMT_STATUS_INVALID_INDEX);
4895
4896 /* Parameters are limited to a single octet */
4897 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4898 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4899 MGMT_OP_SET_EXP_FEATURE,
4900 MGMT_STATUS_INVALID_PARAMS);
4901
4902 /* Only boolean on/off is supported */
4903 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4904 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4905 MGMT_OP_SET_EXP_FEATURE,
4906 MGMT_STATUS_INVALID_PARAMS);
4907
4908 val = cp->param[0] ? true : false;
4909 if (val)
4910 err = iso_init();
4911 else
4912 err = iso_exit();
4913
4914 if (!err)
4915 changed = true;
4916
4917 memcpy(rp.uuid, iso_socket_uuid, 16);
4918 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4919
4920 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4921
4922 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4923 MGMT_OP_SET_EXP_FEATURE, 0,
4924 &rp, sizeof(rp));
4925
4926 if (changed)
4927 exp_feature_changed(hdev, iso_socket_uuid, val, sk);
4928
4929 return err;
4930 }
4931 #endif
4932
4933 static const struct mgmt_exp_feature {
4934 const u8 *uuid;
4935 int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4936 struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4937 } exp_features[] = {
4938 EXP_FEAT(ZERO_KEY, set_zero_key_func),
4939 #ifdef CONFIG_BT_FEATURE_DEBUG
4940 EXP_FEAT(debug_uuid, set_debug_func),
4941 #endif
4942 EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
4943 EXP_FEAT(quality_report_uuid, set_quality_report_func),
4944 EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
4945 EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
4946 #ifdef CONFIG_BT_LE
4947 EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
4948 #endif
4949
4950 /* end with a null feature */
4951 EXP_FEAT(NULL, NULL)
4952 };
4953
set_exp_feature(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4954 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4955 void *data, u16 data_len)
4956 {
4957 struct mgmt_cp_set_exp_feature *cp = data;
4958 size_t i = 0;
4959
4960 bt_dev_dbg(hdev, "sock %p", sk);
4961
4962 for (i = 0; exp_features[i].uuid; i++) {
4963 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
4964 return exp_features[i].set_func(sk, hdev, cp, data_len);
4965 }
4966
4967 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4968 MGMT_OP_SET_EXP_FEATURE,
4969 MGMT_STATUS_NOT_SUPPORTED);
4970 }
4971
get_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4972 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4973 u16 data_len)
4974 {
4975 struct mgmt_cp_get_device_flags *cp = data;
4976 struct mgmt_rp_get_device_flags rp;
4977 struct bdaddr_list_with_flags *br_params;
4978 struct hci_conn_params *params;
4979 u32 supported_flags;
4980 u32 current_flags = 0;
4981 u8 status = MGMT_STATUS_INVALID_PARAMS;
4982
4983 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4984 &cp->addr.bdaddr, cp->addr.type);
4985
4986 hci_dev_lock(hdev);
4987
4988 supported_flags = hdev->conn_flags;
4989
4990 memset(&rp, 0, sizeof(rp));
4991
4992 if (cp->addr.type == BDADDR_BREDR) {
4993 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4994 &cp->addr.bdaddr,
4995 cp->addr.type);
4996 if (!br_params)
4997 goto done;
4998
4999 current_flags = br_params->flags;
5000 } else {
5001 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5002 le_addr_type(cp->addr.type));
5003 if (!params)
5004 goto done;
5005
5006 current_flags = params->flags;
5007 }
5008
5009 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5010 rp.addr.type = cp->addr.type;
5011 rp.supported_flags = cpu_to_le32(supported_flags);
5012 rp.current_flags = cpu_to_le32(current_flags);
5013
5014 status = MGMT_STATUS_SUCCESS;
5015
5016 done:
5017 hci_dev_unlock(hdev);
5018
5019 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5020 &rp, sizeof(rp));
5021 }
5022
device_flags_changed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u32 supported_flags,u32 current_flags)5023 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5024 bdaddr_t *bdaddr, u8 bdaddr_type,
5025 u32 supported_flags, u32 current_flags)
5026 {
5027 struct mgmt_ev_device_flags_changed ev;
5028
5029 bacpy(&ev.addr.bdaddr, bdaddr);
5030 ev.addr.type = bdaddr_type;
5031 ev.supported_flags = cpu_to_le32(supported_flags);
5032 ev.current_flags = cpu_to_le32(current_flags);
5033
5034 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5035 }
5036
set_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5037 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5038 u16 len)
5039 {
5040 struct mgmt_cp_set_device_flags *cp = data;
5041 struct bdaddr_list_with_flags *br_params;
5042 struct hci_conn_params *params;
5043 u8 status = MGMT_STATUS_INVALID_PARAMS;
5044 u32 supported_flags;
5045 u32 current_flags = __le32_to_cpu(cp->current_flags);
5046
5047 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5048 &cp->addr.bdaddr, cp->addr.type, current_flags);
5049
5050 // We should take hci_dev_lock() early, I think.. conn_flags can change
5051 supported_flags = hdev->conn_flags;
5052
5053 if ((supported_flags | current_flags) != supported_flags) {
5054 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5055 current_flags, supported_flags);
5056 goto done;
5057 }
5058
5059 hci_dev_lock(hdev);
5060
5061 if (cp->addr.type == BDADDR_BREDR) {
5062 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5063 &cp->addr.bdaddr,
5064 cp->addr.type);
5065
5066 if (br_params) {
5067 br_params->flags = current_flags;
5068 status = MGMT_STATUS_SUCCESS;
5069 } else {
5070 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5071 &cp->addr.bdaddr, cp->addr.type);
5072 }
5073
5074 goto unlock;
5075 }
5076
5077 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5078 le_addr_type(cp->addr.type));
5079 if (!params) {
5080 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5081 &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5082 goto unlock;
5083 }
5084
5085 supported_flags = hdev->conn_flags;
5086
5087 if ((supported_flags | current_flags) != supported_flags) {
5088 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5089 current_flags, supported_flags);
5090 goto unlock;
5091 }
5092
5093 WRITE_ONCE(params->flags, current_flags);
5094 status = MGMT_STATUS_SUCCESS;
5095
5096 /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5097 * has been set.
5098 */
5099 if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5100 hci_update_passive_scan(hdev);
5101
5102 unlock:
5103 hci_dev_unlock(hdev);
5104
5105 done:
5106 if (status == MGMT_STATUS_SUCCESS)
5107 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5108 supported_flags, current_flags);
5109
5110 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5111 &cp->addr, sizeof(cp->addr));
5112 }
5113
mgmt_adv_monitor_added(struct sock * sk,struct hci_dev * hdev,u16 handle)5114 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5115 u16 handle)
5116 {
5117 struct mgmt_ev_adv_monitor_added ev;
5118
5119 ev.monitor_handle = cpu_to_le16(handle);
5120
5121 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5122 }
5123
mgmt_adv_monitor_removed(struct sock * sk,struct hci_dev * hdev,__le16 handle)5124 static void mgmt_adv_monitor_removed(struct sock *sk, struct hci_dev *hdev,
5125 __le16 handle)
5126 {
5127 struct mgmt_ev_adv_monitor_removed ev;
5128
5129 ev.monitor_handle = handle;
5130
5131 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk);
5132 }
5133
read_adv_mon_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5134 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5135 void *data, u16 len)
5136 {
5137 struct adv_monitor *monitor = NULL;
5138 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5139 int handle, err;
5140 size_t rp_size = 0;
5141 __u32 supported = 0;
5142 __u32 enabled = 0;
5143 __u16 num_handles = 0;
5144 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5145
5146 BT_DBG("request for %s", hdev->name);
5147
5148 hci_dev_lock(hdev);
5149
5150 if (msft_monitor_supported(hdev))
5151 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5152
5153 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5154 handles[num_handles++] = monitor->handle;
5155
5156 hci_dev_unlock(hdev);
5157
5158 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5159 rp = kmalloc(rp_size, GFP_KERNEL);
5160 if (!rp)
5161 return -ENOMEM;
5162
5163 /* All supported features are currently enabled */
5164 enabled = supported;
5165
5166 rp->supported_features = cpu_to_le32(supported);
5167 rp->enabled_features = cpu_to_le32(enabled);
5168 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5169 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5170 rp->num_handles = cpu_to_le16(num_handles);
5171 if (num_handles)
5172 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5173
5174 err = mgmt_cmd_complete(sk, hdev->id,
5175 MGMT_OP_READ_ADV_MONITOR_FEATURES,
5176 MGMT_STATUS_SUCCESS, rp, rp_size);
5177
5178 kfree(rp);
5179
5180 return err;
5181 }
5182
mgmt_add_adv_patterns_monitor_complete(struct hci_dev * hdev,void * data,int status)5183 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5184 void *data, int status)
5185 {
5186 struct mgmt_rp_add_adv_patterns_monitor rp;
5187 struct mgmt_pending_cmd *cmd = data;
5188 struct adv_monitor *monitor = cmd->user_data;
5189
5190 hci_dev_lock(hdev);
5191
5192 rp.monitor_handle = cpu_to_le16(monitor->handle);
5193
5194 if (!status) {
5195 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5196 hdev->adv_monitors_cnt++;
5197 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5198 monitor->state = ADV_MONITOR_STATE_REGISTERED;
5199 hci_update_passive_scan(hdev);
5200 }
5201
5202 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
5203 mgmt_status(status), &rp, sizeof(rp));
5204 mgmt_pending_remove(cmd);
5205
5206 hci_dev_unlock(hdev);
5207 bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5208 rp.monitor_handle, status);
5209 }
5210
mgmt_add_adv_patterns_monitor_sync(struct hci_dev * hdev,void * data)5211 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5212 {
5213 struct mgmt_pending_cmd *cmd = data;
5214 struct adv_monitor *monitor = cmd->user_data;
5215
5216 return hci_add_adv_monitor(hdev, monitor);
5217 }
5218
__add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,struct adv_monitor * m,u8 status,void * data,u16 len,u16 op)5219 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5220 struct adv_monitor *m, u8 status,
5221 void *data, u16 len, u16 op)
5222 {
5223 struct mgmt_pending_cmd *cmd;
5224 int err;
5225
5226 hci_dev_lock(hdev);
5227
5228 if (status)
5229 goto unlock;
5230
5231 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5232 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5233 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5234 status = MGMT_STATUS_BUSY;
5235 goto unlock;
5236 }
5237
5238 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5239 if (!cmd) {
5240 status = MGMT_STATUS_NO_RESOURCES;
5241 goto unlock;
5242 }
5243
5244 cmd->user_data = m;
5245 err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5246 mgmt_add_adv_patterns_monitor_complete);
5247 if (err) {
5248 if (err == -ENOMEM)
5249 status = MGMT_STATUS_NO_RESOURCES;
5250 else
5251 status = MGMT_STATUS_FAILED;
5252
5253 goto unlock;
5254 }
5255
5256 hci_dev_unlock(hdev);
5257
5258 return 0;
5259
5260 unlock:
5261 hci_free_adv_monitor(hdev, m);
5262 hci_dev_unlock(hdev);
5263 return mgmt_cmd_status(sk, hdev->id, op, status);
5264 }
5265
parse_adv_monitor_rssi(struct adv_monitor * m,struct mgmt_adv_rssi_thresholds * rssi)5266 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5267 struct mgmt_adv_rssi_thresholds *rssi)
5268 {
5269 if (rssi) {
5270 m->rssi.low_threshold = rssi->low_threshold;
5271 m->rssi.low_threshold_timeout =
5272 __le16_to_cpu(rssi->low_threshold_timeout);
5273 m->rssi.high_threshold = rssi->high_threshold;
5274 m->rssi.high_threshold_timeout =
5275 __le16_to_cpu(rssi->high_threshold_timeout);
5276 m->rssi.sampling_period = rssi->sampling_period;
5277 } else {
5278 /* Default values. These numbers are the least constricting
5279 * parameters for MSFT API to work, so it behaves as if there
5280 * are no rssi parameter to consider. May need to be changed
5281 * if other API are to be supported.
5282 */
5283 m->rssi.low_threshold = -127;
5284 m->rssi.low_threshold_timeout = 60;
5285 m->rssi.high_threshold = -127;
5286 m->rssi.high_threshold_timeout = 0;
5287 m->rssi.sampling_period = 0;
5288 }
5289 }
5290
parse_adv_monitor_pattern(struct adv_monitor * m,u8 pattern_count,struct mgmt_adv_pattern * patterns)5291 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5292 struct mgmt_adv_pattern *patterns)
5293 {
5294 u8 offset = 0, length = 0;
5295 struct adv_pattern *p = NULL;
5296 int i;
5297
5298 for (i = 0; i < pattern_count; i++) {
5299 offset = patterns[i].offset;
5300 length = patterns[i].length;
5301 if (offset >= HCI_MAX_EXT_AD_LENGTH ||
5302 length > HCI_MAX_EXT_AD_LENGTH ||
5303 (offset + length) > HCI_MAX_EXT_AD_LENGTH)
5304 return MGMT_STATUS_INVALID_PARAMS;
5305
5306 p = kmalloc(sizeof(*p), GFP_KERNEL);
5307 if (!p)
5308 return MGMT_STATUS_NO_RESOURCES;
5309
5310 p->ad_type = patterns[i].ad_type;
5311 p->offset = patterns[i].offset;
5312 p->length = patterns[i].length;
5313 memcpy(p->value, patterns[i].value, p->length);
5314
5315 INIT_LIST_HEAD(&p->list);
5316 list_add(&p->list, &m->patterns);
5317 }
5318
5319 return MGMT_STATUS_SUCCESS;
5320 }
5321
add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5322 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5323 void *data, u16 len)
5324 {
5325 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5326 struct adv_monitor *m = NULL;
5327 u8 status = MGMT_STATUS_SUCCESS;
5328 size_t expected_size = sizeof(*cp);
5329
5330 BT_DBG("request for %s", hdev->name);
5331
5332 if (len <= sizeof(*cp)) {
5333 status = MGMT_STATUS_INVALID_PARAMS;
5334 goto done;
5335 }
5336
5337 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5338 if (len != expected_size) {
5339 status = MGMT_STATUS_INVALID_PARAMS;
5340 goto done;
5341 }
5342
5343 m = kzalloc(sizeof(*m), GFP_KERNEL);
5344 if (!m) {
5345 status = MGMT_STATUS_NO_RESOURCES;
5346 goto done;
5347 }
5348
5349 INIT_LIST_HEAD(&m->patterns);
5350
5351 parse_adv_monitor_rssi(m, NULL);
5352 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5353
5354 done:
5355 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5356 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5357 }
5358
add_adv_patterns_monitor_rssi(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5359 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5360 void *data, u16 len)
5361 {
5362 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5363 struct adv_monitor *m = NULL;
5364 u8 status = MGMT_STATUS_SUCCESS;
5365 size_t expected_size = sizeof(*cp);
5366
5367 BT_DBG("request for %s", hdev->name);
5368
5369 if (len <= sizeof(*cp)) {
5370 status = MGMT_STATUS_INVALID_PARAMS;
5371 goto done;
5372 }
5373
5374 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5375 if (len != expected_size) {
5376 status = MGMT_STATUS_INVALID_PARAMS;
5377 goto done;
5378 }
5379
5380 m = kzalloc(sizeof(*m), GFP_KERNEL);
5381 if (!m) {
5382 status = MGMT_STATUS_NO_RESOURCES;
5383 goto done;
5384 }
5385
5386 INIT_LIST_HEAD(&m->patterns);
5387
5388 parse_adv_monitor_rssi(m, &cp->rssi);
5389 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5390
5391 done:
5392 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5393 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5394 }
5395
mgmt_remove_adv_monitor_complete(struct hci_dev * hdev,void * data,int status)5396 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5397 void *data, int status)
5398 {
5399 struct mgmt_rp_remove_adv_monitor rp;
5400 struct mgmt_pending_cmd *cmd = data;
5401 struct mgmt_cp_remove_adv_monitor *cp;
5402
5403 if (status == -ECANCELED)
5404 return;
5405
5406 hci_dev_lock(hdev);
5407
5408 cp = cmd->param;
5409
5410 rp.monitor_handle = cp->monitor_handle;
5411
5412 if (!status) {
5413 mgmt_adv_monitor_removed(cmd->sk, hdev, cp->monitor_handle);
5414 hci_update_passive_scan(hdev);
5415 }
5416
5417 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
5418 mgmt_status(status), &rp, sizeof(rp));
5419 mgmt_pending_free(cmd);
5420
5421 hci_dev_unlock(hdev);
5422 bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5423 rp.monitor_handle, status);
5424 }
5425
mgmt_remove_adv_monitor_sync(struct hci_dev * hdev,void * data)5426 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5427 {
5428 struct mgmt_pending_cmd *cmd = data;
5429 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5430 u16 handle = __le16_to_cpu(cp->monitor_handle);
5431
5432 if (!handle)
5433 return hci_remove_all_adv_monitor(hdev);
5434
5435 return hci_remove_single_adv_monitor(hdev, handle);
5436 }
5437
remove_adv_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5438 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5439 void *data, u16 len)
5440 {
5441 struct mgmt_pending_cmd *cmd;
5442 int err, status;
5443
5444 hci_dev_lock(hdev);
5445
5446 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5447 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5448 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5449 status = MGMT_STATUS_BUSY;
5450 goto unlock;
5451 }
5452
5453 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5454 if (!cmd) {
5455 status = MGMT_STATUS_NO_RESOURCES;
5456 goto unlock;
5457 }
5458
5459 err = hci_cmd_sync_submit(hdev, mgmt_remove_adv_monitor_sync, cmd,
5460 mgmt_remove_adv_monitor_complete);
5461
5462 if (err) {
5463 mgmt_pending_free(cmd);
5464
5465 if (err == -ENOMEM)
5466 status = MGMT_STATUS_NO_RESOURCES;
5467 else
5468 status = MGMT_STATUS_FAILED;
5469
5470 goto unlock;
5471 }
5472
5473 hci_dev_unlock(hdev);
5474
5475 return 0;
5476
5477 unlock:
5478 hci_dev_unlock(hdev);
5479 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5480 status);
5481 }
5482
read_local_oob_data_complete(struct hci_dev * hdev,void * data,int err)5483 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5484 {
5485 struct mgmt_rp_read_local_oob_data mgmt_rp;
5486 size_t rp_size = sizeof(mgmt_rp);
5487 struct mgmt_pending_cmd *cmd = data;
5488 struct sk_buff *skb = cmd->skb;
5489 u8 status = mgmt_status(err);
5490
5491 if (!status) {
5492 if (!skb)
5493 status = MGMT_STATUS_FAILED;
5494 else if (IS_ERR(skb))
5495 status = mgmt_status(PTR_ERR(skb));
5496 else
5497 status = mgmt_status(skb->data[0]);
5498 }
5499
5500 bt_dev_dbg(hdev, "status %d", status);
5501
5502 if (status) {
5503 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5504 goto remove;
5505 }
5506
5507 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5508
5509 if (!bredr_sc_enabled(hdev)) {
5510 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5511
5512 if (skb->len < sizeof(*rp)) {
5513 mgmt_cmd_status(cmd->sk, hdev->id,
5514 MGMT_OP_READ_LOCAL_OOB_DATA,
5515 MGMT_STATUS_FAILED);
5516 goto remove;
5517 }
5518
5519 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5520 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5521
5522 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5523 } else {
5524 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5525
5526 if (skb->len < sizeof(*rp)) {
5527 mgmt_cmd_status(cmd->sk, hdev->id,
5528 MGMT_OP_READ_LOCAL_OOB_DATA,
5529 MGMT_STATUS_FAILED);
5530 goto remove;
5531 }
5532
5533 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5534 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5535
5536 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5537 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5538 }
5539
5540 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5541 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5542
5543 remove:
5544 if (skb && !IS_ERR(skb))
5545 kfree_skb(skb);
5546
5547 mgmt_pending_free(cmd);
5548 }
5549
read_local_oob_data_sync(struct hci_dev * hdev,void * data)5550 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5551 {
5552 struct mgmt_pending_cmd *cmd = data;
5553
5554 if (bredr_sc_enabled(hdev))
5555 cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5556 else
5557 cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5558
5559 if (IS_ERR(cmd->skb))
5560 return PTR_ERR(cmd->skb);
5561 else
5562 return 0;
5563 }
5564
read_local_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)5565 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5566 void *data, u16 data_len)
5567 {
5568 struct mgmt_pending_cmd *cmd;
5569 int err;
5570
5571 bt_dev_dbg(hdev, "sock %p", sk);
5572
5573 hci_dev_lock(hdev);
5574
5575 if (!hdev_is_powered(hdev)) {
5576 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5577 MGMT_STATUS_NOT_POWERED);
5578 goto unlock;
5579 }
5580
5581 if (!lmp_ssp_capable(hdev)) {
5582 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5583 MGMT_STATUS_NOT_SUPPORTED);
5584 goto unlock;
5585 }
5586
5587 cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5588 if (!cmd)
5589 err = -ENOMEM;
5590 else
5591 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5592 read_local_oob_data_complete);
5593
5594 if (err < 0) {
5595 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5596 MGMT_STATUS_FAILED);
5597
5598 if (cmd)
5599 mgmt_pending_free(cmd);
5600 }
5601
5602 unlock:
5603 hci_dev_unlock(hdev);
5604 return err;
5605 }
5606
add_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5607 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5608 void *data, u16 len)
5609 {
5610 struct mgmt_addr_info *addr = data;
5611 int err;
5612
5613 bt_dev_dbg(hdev, "sock %p", sk);
5614
5615 if (!bdaddr_type_is_valid(addr->type))
5616 return mgmt_cmd_complete(sk, hdev->id,
5617 MGMT_OP_ADD_REMOTE_OOB_DATA,
5618 MGMT_STATUS_INVALID_PARAMS,
5619 addr, sizeof(*addr));
5620
5621 hci_dev_lock(hdev);
5622
5623 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5624 struct mgmt_cp_add_remote_oob_data *cp = data;
5625 u8 status;
5626
5627 if (cp->addr.type != BDADDR_BREDR) {
5628 err = mgmt_cmd_complete(sk, hdev->id,
5629 MGMT_OP_ADD_REMOTE_OOB_DATA,
5630 MGMT_STATUS_INVALID_PARAMS,
5631 &cp->addr, sizeof(cp->addr));
5632 goto unlock;
5633 }
5634
5635 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5636 cp->addr.type, cp->hash,
5637 cp->rand, NULL, NULL);
5638 if (err < 0)
5639 status = MGMT_STATUS_FAILED;
5640 else
5641 status = MGMT_STATUS_SUCCESS;
5642
5643 err = mgmt_cmd_complete(sk, hdev->id,
5644 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5645 &cp->addr, sizeof(cp->addr));
5646 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5647 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5648 u8 *rand192, *hash192, *rand256, *hash256;
5649 u8 status;
5650
5651 if (bdaddr_type_is_le(cp->addr.type)) {
5652 /* Enforce zero-valued 192-bit parameters as
5653 * long as legacy SMP OOB isn't implemented.
5654 */
5655 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5656 memcmp(cp->hash192, ZERO_KEY, 16)) {
5657 err = mgmt_cmd_complete(sk, hdev->id,
5658 MGMT_OP_ADD_REMOTE_OOB_DATA,
5659 MGMT_STATUS_INVALID_PARAMS,
5660 addr, sizeof(*addr));
5661 goto unlock;
5662 }
5663
5664 rand192 = NULL;
5665 hash192 = NULL;
5666 } else {
5667 /* In case one of the P-192 values is set to zero,
5668 * then just disable OOB data for P-192.
5669 */
5670 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5671 !memcmp(cp->hash192, ZERO_KEY, 16)) {
5672 rand192 = NULL;
5673 hash192 = NULL;
5674 } else {
5675 rand192 = cp->rand192;
5676 hash192 = cp->hash192;
5677 }
5678 }
5679
5680 /* In case one of the P-256 values is set to zero, then just
5681 * disable OOB data for P-256.
5682 */
5683 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5684 !memcmp(cp->hash256, ZERO_KEY, 16)) {
5685 rand256 = NULL;
5686 hash256 = NULL;
5687 } else {
5688 rand256 = cp->rand256;
5689 hash256 = cp->hash256;
5690 }
5691
5692 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5693 cp->addr.type, hash192, rand192,
5694 hash256, rand256);
5695 if (err < 0)
5696 status = MGMT_STATUS_FAILED;
5697 else
5698 status = MGMT_STATUS_SUCCESS;
5699
5700 err = mgmt_cmd_complete(sk, hdev->id,
5701 MGMT_OP_ADD_REMOTE_OOB_DATA,
5702 status, &cp->addr, sizeof(cp->addr));
5703 } else {
5704 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5705 len);
5706 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5707 MGMT_STATUS_INVALID_PARAMS);
5708 }
5709
5710 unlock:
5711 hci_dev_unlock(hdev);
5712 return err;
5713 }
5714
remove_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5715 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5716 void *data, u16 len)
5717 {
5718 struct mgmt_cp_remove_remote_oob_data *cp = data;
5719 u8 status;
5720 int err;
5721
5722 bt_dev_dbg(hdev, "sock %p", sk);
5723
5724 if (cp->addr.type != BDADDR_BREDR)
5725 return mgmt_cmd_complete(sk, hdev->id,
5726 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5727 MGMT_STATUS_INVALID_PARAMS,
5728 &cp->addr, sizeof(cp->addr));
5729
5730 hci_dev_lock(hdev);
5731
5732 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5733 hci_remote_oob_data_clear(hdev);
5734 status = MGMT_STATUS_SUCCESS;
5735 goto done;
5736 }
5737
5738 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5739 if (err < 0)
5740 status = MGMT_STATUS_INVALID_PARAMS;
5741 else
5742 status = MGMT_STATUS_SUCCESS;
5743
5744 done:
5745 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5746 status, &cp->addr, sizeof(cp->addr));
5747
5748 hci_dev_unlock(hdev);
5749 return err;
5750 }
5751
discovery_type_is_valid(struct hci_dev * hdev,uint8_t type,uint8_t * mgmt_status)5752 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5753 uint8_t *mgmt_status)
5754 {
5755 switch (type) {
5756 case DISCOV_TYPE_LE:
5757 *mgmt_status = mgmt_le_support(hdev);
5758 if (*mgmt_status)
5759 return false;
5760 break;
5761 case DISCOV_TYPE_INTERLEAVED:
5762 *mgmt_status = mgmt_le_support(hdev);
5763 if (*mgmt_status)
5764 return false;
5765 fallthrough;
5766 case DISCOV_TYPE_BREDR:
5767 *mgmt_status = mgmt_bredr_support(hdev);
5768 if (*mgmt_status)
5769 return false;
5770 break;
5771 default:
5772 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5773 return false;
5774 }
5775
5776 return true;
5777 }
5778
start_discovery_complete(struct hci_dev * hdev,void * data,int err)5779 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5780 {
5781 struct mgmt_pending_cmd *cmd = data;
5782
5783 bt_dev_dbg(hdev, "err %d", err);
5784
5785 if (err == -ECANCELED)
5786 return;
5787
5788 if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5789 cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5790 cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5791 return;
5792
5793 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err),
5794 cmd->param, 1);
5795 mgmt_pending_remove(cmd);
5796
5797 hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5798 DISCOVERY_FINDING);
5799 }
5800
start_discovery_sync(struct hci_dev * hdev,void * data)5801 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5802 {
5803 return hci_start_discovery_sync(hdev);
5804 }
5805
start_discovery_internal(struct sock * sk,struct hci_dev * hdev,u16 op,void * data,u16 len)5806 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5807 u16 op, void *data, u16 len)
5808 {
5809 struct mgmt_cp_start_discovery *cp = data;
5810 struct mgmt_pending_cmd *cmd;
5811 u8 status;
5812 int err;
5813
5814 bt_dev_dbg(hdev, "sock %p", sk);
5815
5816 hci_dev_lock(hdev);
5817
5818 if (!hdev_is_powered(hdev)) {
5819 err = mgmt_cmd_complete(sk, hdev->id, op,
5820 MGMT_STATUS_NOT_POWERED,
5821 &cp->type, sizeof(cp->type));
5822 goto failed;
5823 }
5824
5825 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5826 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5827 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5828 &cp->type, sizeof(cp->type));
5829 goto failed;
5830 }
5831
5832 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5833 err = mgmt_cmd_complete(sk, hdev->id, op, status,
5834 &cp->type, sizeof(cp->type));
5835 goto failed;
5836 }
5837
5838 /* Can't start discovery when it is paused */
5839 if (hdev->discovery_paused) {
5840 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5841 &cp->type, sizeof(cp->type));
5842 goto failed;
5843 }
5844
5845 /* Clear the discovery filter first to free any previously
5846 * allocated memory for the UUID list.
5847 */
5848 hci_discovery_filter_clear(hdev);
5849
5850 hdev->discovery.type = cp->type;
5851 hdev->discovery.report_invalid_rssi = false;
5852 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5853 hdev->discovery.limited = true;
5854 else
5855 hdev->discovery.limited = false;
5856
5857 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5858 if (!cmd) {
5859 err = -ENOMEM;
5860 goto failed;
5861 }
5862
5863 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5864 start_discovery_complete);
5865 if (err < 0) {
5866 mgmt_pending_remove(cmd);
5867 goto failed;
5868 }
5869
5870 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5871
5872 failed:
5873 hci_dev_unlock(hdev);
5874 return err;
5875 }
5876
start_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5877 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5878 void *data, u16 len)
5879 {
5880 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5881 data, len);
5882 }
5883
start_limited_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5884 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5885 void *data, u16 len)
5886 {
5887 return start_discovery_internal(sk, hdev,
5888 MGMT_OP_START_LIMITED_DISCOVERY,
5889 data, len);
5890 }
5891
start_service_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5892 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5893 void *data, u16 len)
5894 {
5895 struct mgmt_cp_start_service_discovery *cp = data;
5896 struct mgmt_pending_cmd *cmd;
5897 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5898 u16 uuid_count, expected_len;
5899 u8 status;
5900 int err;
5901
5902 bt_dev_dbg(hdev, "sock %p", sk);
5903
5904 hci_dev_lock(hdev);
5905
5906 if (!hdev_is_powered(hdev)) {
5907 err = mgmt_cmd_complete(sk, hdev->id,
5908 MGMT_OP_START_SERVICE_DISCOVERY,
5909 MGMT_STATUS_NOT_POWERED,
5910 &cp->type, sizeof(cp->type));
5911 goto failed;
5912 }
5913
5914 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5915 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5916 err = mgmt_cmd_complete(sk, hdev->id,
5917 MGMT_OP_START_SERVICE_DISCOVERY,
5918 MGMT_STATUS_BUSY, &cp->type,
5919 sizeof(cp->type));
5920 goto failed;
5921 }
5922
5923 if (hdev->discovery_paused) {
5924 err = mgmt_cmd_complete(sk, hdev->id,
5925 MGMT_OP_START_SERVICE_DISCOVERY,
5926 MGMT_STATUS_BUSY, &cp->type,
5927 sizeof(cp->type));
5928 goto failed;
5929 }
5930
5931 uuid_count = __le16_to_cpu(cp->uuid_count);
5932 if (uuid_count > max_uuid_count) {
5933 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5934 uuid_count);
5935 err = mgmt_cmd_complete(sk, hdev->id,
5936 MGMT_OP_START_SERVICE_DISCOVERY,
5937 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5938 sizeof(cp->type));
5939 goto failed;
5940 }
5941
5942 expected_len = sizeof(*cp) + uuid_count * 16;
5943 if (expected_len != len) {
5944 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
5945 expected_len, len);
5946 err = mgmt_cmd_complete(sk, hdev->id,
5947 MGMT_OP_START_SERVICE_DISCOVERY,
5948 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5949 sizeof(cp->type));
5950 goto failed;
5951 }
5952
5953 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5954 err = mgmt_cmd_complete(sk, hdev->id,
5955 MGMT_OP_START_SERVICE_DISCOVERY,
5956 status, &cp->type, sizeof(cp->type));
5957 goto failed;
5958 }
5959
5960 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
5961 hdev, data, len);
5962 if (!cmd) {
5963 err = -ENOMEM;
5964 goto failed;
5965 }
5966
5967 /* Clear the discovery filter first to free any previously
5968 * allocated memory for the UUID list.
5969 */
5970 hci_discovery_filter_clear(hdev);
5971
5972 hdev->discovery.result_filtering = true;
5973 hdev->discovery.type = cp->type;
5974 hdev->discovery.rssi = cp->rssi;
5975 hdev->discovery.uuid_count = uuid_count;
5976
5977 if (uuid_count > 0) {
5978 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
5979 GFP_KERNEL);
5980 if (!hdev->discovery.uuids) {
5981 err = mgmt_cmd_complete(sk, hdev->id,
5982 MGMT_OP_START_SERVICE_DISCOVERY,
5983 MGMT_STATUS_FAILED,
5984 &cp->type, sizeof(cp->type));
5985 mgmt_pending_remove(cmd);
5986 goto failed;
5987 }
5988 }
5989
5990 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5991 start_discovery_complete);
5992 if (err < 0) {
5993 mgmt_pending_remove(cmd);
5994 goto failed;
5995 }
5996
5997 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5998
5999 failed:
6000 hci_dev_unlock(hdev);
6001 return err;
6002 }
6003
stop_discovery_complete(struct hci_dev * hdev,void * data,int err)6004 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6005 {
6006 struct mgmt_pending_cmd *cmd = data;
6007
6008 if (err == -ECANCELED ||
6009 cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
6010 return;
6011
6012 bt_dev_dbg(hdev, "err %d", err);
6013
6014 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err),
6015 cmd->param, 1);
6016 mgmt_pending_remove(cmd);
6017
6018 if (!err)
6019 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6020 }
6021
stop_discovery_sync(struct hci_dev * hdev,void * data)6022 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6023 {
6024 return hci_stop_discovery_sync(hdev);
6025 }
6026
stop_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6027 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6028 u16 len)
6029 {
6030 struct mgmt_cp_stop_discovery *mgmt_cp = data;
6031 struct mgmt_pending_cmd *cmd;
6032 int err;
6033
6034 bt_dev_dbg(hdev, "sock %p", sk);
6035
6036 hci_dev_lock(hdev);
6037
6038 if (!hci_discovery_active(hdev)) {
6039 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6040 MGMT_STATUS_REJECTED, &mgmt_cp->type,
6041 sizeof(mgmt_cp->type));
6042 goto unlock;
6043 }
6044
6045 if (hdev->discovery.type != mgmt_cp->type) {
6046 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6047 MGMT_STATUS_INVALID_PARAMS,
6048 &mgmt_cp->type, sizeof(mgmt_cp->type));
6049 goto unlock;
6050 }
6051
6052 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6053 if (!cmd) {
6054 err = -ENOMEM;
6055 goto unlock;
6056 }
6057
6058 err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6059 stop_discovery_complete);
6060 if (err < 0) {
6061 mgmt_pending_remove(cmd);
6062 goto unlock;
6063 }
6064
6065 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6066
6067 unlock:
6068 hci_dev_unlock(hdev);
6069 return err;
6070 }
6071
confirm_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6072 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6073 u16 len)
6074 {
6075 struct mgmt_cp_confirm_name *cp = data;
6076 struct inquiry_entry *e;
6077 int err;
6078
6079 bt_dev_dbg(hdev, "sock %p", sk);
6080
6081 hci_dev_lock(hdev);
6082
6083 if (!hci_discovery_active(hdev)) {
6084 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6085 MGMT_STATUS_FAILED, &cp->addr,
6086 sizeof(cp->addr));
6087 goto failed;
6088 }
6089
6090 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6091 if (!e) {
6092 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6093 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6094 sizeof(cp->addr));
6095 goto failed;
6096 }
6097
6098 if (cp->name_known) {
6099 e->name_state = NAME_KNOWN;
6100 list_del(&e->list);
6101 } else {
6102 e->name_state = NAME_NEEDED;
6103 hci_inquiry_cache_update_resolve(hdev, e);
6104 }
6105
6106 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6107 &cp->addr, sizeof(cp->addr));
6108
6109 failed:
6110 hci_dev_unlock(hdev);
6111 return err;
6112 }
6113
block_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6114 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6115 u16 len)
6116 {
6117 struct mgmt_cp_block_device *cp = data;
6118 u8 status;
6119 int err;
6120
6121 bt_dev_dbg(hdev, "sock %p", sk);
6122
6123 if (!bdaddr_type_is_valid(cp->addr.type))
6124 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6125 MGMT_STATUS_INVALID_PARAMS,
6126 &cp->addr, sizeof(cp->addr));
6127
6128 hci_dev_lock(hdev);
6129
6130 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6131 cp->addr.type);
6132 if (err < 0) {
6133 status = MGMT_STATUS_FAILED;
6134 goto done;
6135 }
6136
6137 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6138 sk);
6139 status = MGMT_STATUS_SUCCESS;
6140
6141 done:
6142 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6143 &cp->addr, sizeof(cp->addr));
6144
6145 hci_dev_unlock(hdev);
6146
6147 return err;
6148 }
6149
unblock_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6150 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6151 u16 len)
6152 {
6153 struct mgmt_cp_unblock_device *cp = data;
6154 u8 status;
6155 int err;
6156
6157 bt_dev_dbg(hdev, "sock %p", sk);
6158
6159 if (!bdaddr_type_is_valid(cp->addr.type))
6160 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6161 MGMT_STATUS_INVALID_PARAMS,
6162 &cp->addr, sizeof(cp->addr));
6163
6164 hci_dev_lock(hdev);
6165
6166 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6167 cp->addr.type);
6168 if (err < 0) {
6169 status = MGMT_STATUS_INVALID_PARAMS;
6170 goto done;
6171 }
6172
6173 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6174 sk);
6175 status = MGMT_STATUS_SUCCESS;
6176
6177 done:
6178 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6179 &cp->addr, sizeof(cp->addr));
6180
6181 hci_dev_unlock(hdev);
6182
6183 return err;
6184 }
6185
set_device_id_sync(struct hci_dev * hdev,void * data)6186 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6187 {
6188 return hci_update_eir_sync(hdev);
6189 }
6190
set_device_id(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6191 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6192 u16 len)
6193 {
6194 struct mgmt_cp_set_device_id *cp = data;
6195 int err;
6196 __u16 source;
6197
6198 bt_dev_dbg(hdev, "sock %p", sk);
6199
6200 source = __le16_to_cpu(cp->source);
6201
6202 if (source > 0x0002)
6203 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6204 MGMT_STATUS_INVALID_PARAMS);
6205
6206 hci_dev_lock(hdev);
6207
6208 hdev->devid_source = source;
6209 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6210 hdev->devid_product = __le16_to_cpu(cp->product);
6211 hdev->devid_version = __le16_to_cpu(cp->version);
6212
6213 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6214 NULL, 0);
6215
6216 hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6217
6218 hci_dev_unlock(hdev);
6219
6220 return err;
6221 }
6222
enable_advertising_instance(struct hci_dev * hdev,int err)6223 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6224 {
6225 if (err)
6226 bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6227 else
6228 bt_dev_dbg(hdev, "status %d", err);
6229 }
6230
set_advertising_complete(struct hci_dev * hdev,void * data,int err)6231 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6232 {
6233 struct cmd_lookup match = { NULL, hdev };
6234 u8 instance;
6235 struct adv_info *adv_instance;
6236 u8 status = mgmt_status(err);
6237
6238 if (status) {
6239 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, true,
6240 cmd_status_rsp, &status);
6241 return;
6242 }
6243
6244 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6245 hci_dev_set_flag(hdev, HCI_ADVERTISING);
6246 else
6247 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6248
6249 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, true, settings_rsp,
6250 &match);
6251
6252 new_settings(hdev, match.sk);
6253
6254 if (match.sk)
6255 sock_put(match.sk);
6256
6257 /* If "Set Advertising" was just disabled and instance advertising was
6258 * set up earlier, then re-enable multi-instance advertising.
6259 */
6260 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6261 list_empty(&hdev->adv_instances))
6262 return;
6263
6264 instance = hdev->cur_adv_instance;
6265 if (!instance) {
6266 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6267 struct adv_info, list);
6268 if (!adv_instance)
6269 return;
6270
6271 instance = adv_instance->instance;
6272 }
6273
6274 err = hci_schedule_adv_instance_sync(hdev, instance, true);
6275
6276 enable_advertising_instance(hdev, err);
6277 }
6278
set_adv_sync(struct hci_dev * hdev,void * data)6279 static int set_adv_sync(struct hci_dev *hdev, void *data)
6280 {
6281 struct mgmt_pending_cmd *cmd = data;
6282 struct mgmt_mode *cp = cmd->param;
6283 u8 val = !!cp->val;
6284
6285 if (cp->val == 0x02)
6286 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6287 else
6288 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6289
6290 cancel_adv_timeout(hdev);
6291
6292 if (val) {
6293 /* Switch to instance "0" for the Set Advertising setting.
6294 * We cannot use update_[adv|scan_rsp]_data() here as the
6295 * HCI_ADVERTISING flag is not yet set.
6296 */
6297 hdev->cur_adv_instance = 0x00;
6298
6299 if (ext_adv_capable(hdev)) {
6300 hci_start_ext_adv_sync(hdev, 0x00);
6301 } else {
6302 hci_update_adv_data_sync(hdev, 0x00);
6303 hci_update_scan_rsp_data_sync(hdev, 0x00);
6304 hci_enable_advertising_sync(hdev);
6305 }
6306 } else {
6307 hci_disable_advertising_sync(hdev);
6308 }
6309
6310 return 0;
6311 }
6312
set_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6313 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6314 u16 len)
6315 {
6316 struct mgmt_mode *cp = data;
6317 struct mgmt_pending_cmd *cmd;
6318 u8 val, status;
6319 int err;
6320
6321 bt_dev_dbg(hdev, "sock %p", sk);
6322
6323 status = mgmt_le_support(hdev);
6324 if (status)
6325 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6326 status);
6327
6328 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6329 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6330 MGMT_STATUS_INVALID_PARAMS);
6331
6332 if (hdev->advertising_paused)
6333 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6334 MGMT_STATUS_BUSY);
6335
6336 hci_dev_lock(hdev);
6337
6338 val = !!cp->val;
6339
6340 /* The following conditions are ones which mean that we should
6341 * not do any HCI communication but directly send a mgmt
6342 * response to user space (after toggling the flag if
6343 * necessary).
6344 */
6345 if (!hdev_is_powered(hdev) ||
6346 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6347 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6348 hci_dev_test_flag(hdev, HCI_MESH) ||
6349 hci_conn_num(hdev, LE_LINK) > 0 ||
6350 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6351 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6352 bool changed;
6353
6354 if (cp->val) {
6355 hdev->cur_adv_instance = 0x00;
6356 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6357 if (cp->val == 0x02)
6358 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6359 else
6360 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6361 } else {
6362 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6363 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6364 }
6365
6366 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6367 if (err < 0)
6368 goto unlock;
6369
6370 if (changed)
6371 err = new_settings(hdev, sk);
6372
6373 goto unlock;
6374 }
6375
6376 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6377 pending_find(MGMT_OP_SET_LE, hdev)) {
6378 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6379 MGMT_STATUS_BUSY);
6380 goto unlock;
6381 }
6382
6383 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6384 if (!cmd)
6385 err = -ENOMEM;
6386 else
6387 err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6388 set_advertising_complete);
6389
6390 if (err < 0 && cmd)
6391 mgmt_pending_remove(cmd);
6392
6393 unlock:
6394 hci_dev_unlock(hdev);
6395 return err;
6396 }
6397
set_static_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6398 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6399 void *data, u16 len)
6400 {
6401 struct mgmt_cp_set_static_address *cp = data;
6402 int err;
6403
6404 bt_dev_dbg(hdev, "sock %p", sk);
6405
6406 if (!lmp_le_capable(hdev))
6407 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6408 MGMT_STATUS_NOT_SUPPORTED);
6409
6410 if (hdev_is_powered(hdev))
6411 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6412 MGMT_STATUS_REJECTED);
6413
6414 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6415 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6416 return mgmt_cmd_status(sk, hdev->id,
6417 MGMT_OP_SET_STATIC_ADDRESS,
6418 MGMT_STATUS_INVALID_PARAMS);
6419
6420 /* Two most significant bits shall be set */
6421 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6422 return mgmt_cmd_status(sk, hdev->id,
6423 MGMT_OP_SET_STATIC_ADDRESS,
6424 MGMT_STATUS_INVALID_PARAMS);
6425 }
6426
6427 hci_dev_lock(hdev);
6428
6429 bacpy(&hdev->static_addr, &cp->bdaddr);
6430
6431 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6432 if (err < 0)
6433 goto unlock;
6434
6435 err = new_settings(hdev, sk);
6436
6437 unlock:
6438 hci_dev_unlock(hdev);
6439 return err;
6440 }
6441
set_scan_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6442 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6443 void *data, u16 len)
6444 {
6445 struct mgmt_cp_set_scan_params *cp = data;
6446 __u16 interval, window;
6447 int err;
6448
6449 bt_dev_dbg(hdev, "sock %p", sk);
6450
6451 if (!lmp_le_capable(hdev))
6452 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6453 MGMT_STATUS_NOT_SUPPORTED);
6454
6455 /* Keep allowed ranges in sync with set_mesh() */
6456 interval = __le16_to_cpu(cp->interval);
6457
6458 if (interval < 0x0004 || interval > 0x4000)
6459 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6460 MGMT_STATUS_INVALID_PARAMS);
6461
6462 window = __le16_to_cpu(cp->window);
6463
6464 if (window < 0x0004 || window > 0x4000)
6465 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6466 MGMT_STATUS_INVALID_PARAMS);
6467
6468 if (window > interval)
6469 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6470 MGMT_STATUS_INVALID_PARAMS);
6471
6472 hci_dev_lock(hdev);
6473
6474 hdev->le_scan_interval = interval;
6475 hdev->le_scan_window = window;
6476
6477 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6478 NULL, 0);
6479
6480 /* If background scan is running, restart it so new parameters are
6481 * loaded.
6482 */
6483 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6484 hdev->discovery.state == DISCOVERY_STOPPED)
6485 hci_update_passive_scan(hdev);
6486
6487 hci_dev_unlock(hdev);
6488
6489 return err;
6490 }
6491
fast_connectable_complete(struct hci_dev * hdev,void * data,int err)6492 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6493 {
6494 struct mgmt_pending_cmd *cmd = data;
6495
6496 bt_dev_dbg(hdev, "err %d", err);
6497
6498 if (err) {
6499 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6500 mgmt_status(err));
6501 } else {
6502 struct mgmt_mode *cp = cmd->param;
6503
6504 if (cp->val)
6505 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6506 else
6507 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6508
6509 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6510 new_settings(hdev, cmd->sk);
6511 }
6512
6513 mgmt_pending_free(cmd);
6514 }
6515
write_fast_connectable_sync(struct hci_dev * hdev,void * data)6516 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6517 {
6518 struct mgmt_pending_cmd *cmd = data;
6519 struct mgmt_mode *cp = cmd->param;
6520
6521 return hci_write_fast_connectable_sync(hdev, cp->val);
6522 }
6523
set_fast_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6524 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6525 void *data, u16 len)
6526 {
6527 struct mgmt_mode *cp = data;
6528 struct mgmt_pending_cmd *cmd;
6529 int err;
6530
6531 bt_dev_dbg(hdev, "sock %p", sk);
6532
6533 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6534 hdev->hci_ver < BLUETOOTH_VER_1_2)
6535 return mgmt_cmd_status(sk, hdev->id,
6536 MGMT_OP_SET_FAST_CONNECTABLE,
6537 MGMT_STATUS_NOT_SUPPORTED);
6538
6539 if (cp->val != 0x00 && cp->val != 0x01)
6540 return mgmt_cmd_status(sk, hdev->id,
6541 MGMT_OP_SET_FAST_CONNECTABLE,
6542 MGMT_STATUS_INVALID_PARAMS);
6543
6544 hci_dev_lock(hdev);
6545
6546 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6547 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6548 goto unlock;
6549 }
6550
6551 if (!hdev_is_powered(hdev)) {
6552 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6553 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6554 new_settings(hdev, sk);
6555 goto unlock;
6556 }
6557
6558 cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6559 len);
6560 if (!cmd)
6561 err = -ENOMEM;
6562 else
6563 err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6564 fast_connectable_complete);
6565
6566 if (err < 0) {
6567 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6568 MGMT_STATUS_FAILED);
6569
6570 if (cmd)
6571 mgmt_pending_free(cmd);
6572 }
6573
6574 unlock:
6575 hci_dev_unlock(hdev);
6576
6577 return err;
6578 }
6579
set_bredr_complete(struct hci_dev * hdev,void * data,int err)6580 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6581 {
6582 struct mgmt_pending_cmd *cmd = data;
6583
6584 bt_dev_dbg(hdev, "err %d", err);
6585
6586 if (err) {
6587 u8 mgmt_err = mgmt_status(err);
6588
6589 /* We need to restore the flag if related HCI commands
6590 * failed.
6591 */
6592 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6593
6594 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
6595 } else {
6596 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6597 new_settings(hdev, cmd->sk);
6598 }
6599
6600 mgmt_pending_free(cmd);
6601 }
6602
set_bredr_sync(struct hci_dev * hdev,void * data)6603 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6604 {
6605 int status;
6606
6607 status = hci_write_fast_connectable_sync(hdev, false);
6608
6609 if (!status)
6610 status = hci_update_scan_sync(hdev);
6611
6612 /* Since only the advertising data flags will change, there
6613 * is no need to update the scan response data.
6614 */
6615 if (!status)
6616 status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6617
6618 return status;
6619 }
6620
set_bredr(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6621 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6622 {
6623 struct mgmt_mode *cp = data;
6624 struct mgmt_pending_cmd *cmd;
6625 int err;
6626
6627 bt_dev_dbg(hdev, "sock %p", sk);
6628
6629 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6630 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6631 MGMT_STATUS_NOT_SUPPORTED);
6632
6633 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6634 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6635 MGMT_STATUS_REJECTED);
6636
6637 if (cp->val != 0x00 && cp->val != 0x01)
6638 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6639 MGMT_STATUS_INVALID_PARAMS);
6640
6641 hci_dev_lock(hdev);
6642
6643 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6644 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6645 goto unlock;
6646 }
6647
6648 if (!hdev_is_powered(hdev)) {
6649 if (!cp->val) {
6650 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6651 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6652 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6653 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6654 }
6655
6656 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6657
6658 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6659 if (err < 0)
6660 goto unlock;
6661
6662 err = new_settings(hdev, sk);
6663 goto unlock;
6664 }
6665
6666 /* Reject disabling when powered on */
6667 if (!cp->val) {
6668 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6669 MGMT_STATUS_REJECTED);
6670 goto unlock;
6671 } else {
6672 /* When configuring a dual-mode controller to operate
6673 * with LE only and using a static address, then switching
6674 * BR/EDR back on is not allowed.
6675 *
6676 * Dual-mode controllers shall operate with the public
6677 * address as its identity address for BR/EDR and LE. So
6678 * reject the attempt to create an invalid configuration.
6679 *
6680 * The same restrictions applies when secure connections
6681 * has been enabled. For BR/EDR this is a controller feature
6682 * while for LE it is a host stack feature. This means that
6683 * switching BR/EDR back on when secure connections has been
6684 * enabled is not a supported transaction.
6685 */
6686 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6687 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6688 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6689 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6690 MGMT_STATUS_REJECTED);
6691 goto unlock;
6692 }
6693 }
6694
6695 cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6696 if (!cmd)
6697 err = -ENOMEM;
6698 else
6699 err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6700 set_bredr_complete);
6701
6702 if (err < 0) {
6703 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6704 MGMT_STATUS_FAILED);
6705 if (cmd)
6706 mgmt_pending_free(cmd);
6707
6708 goto unlock;
6709 }
6710
6711 /* We need to flip the bit already here so that
6712 * hci_req_update_adv_data generates the correct flags.
6713 */
6714 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6715
6716 unlock:
6717 hci_dev_unlock(hdev);
6718 return err;
6719 }
6720
set_secure_conn_complete(struct hci_dev * hdev,void * data,int err)6721 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6722 {
6723 struct mgmt_pending_cmd *cmd = data;
6724 struct mgmt_mode *cp;
6725
6726 bt_dev_dbg(hdev, "err %d", err);
6727
6728 if (err) {
6729 u8 mgmt_err = mgmt_status(err);
6730
6731 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
6732 goto done;
6733 }
6734
6735 cp = cmd->param;
6736
6737 switch (cp->val) {
6738 case 0x00:
6739 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6740 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6741 break;
6742 case 0x01:
6743 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6744 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6745 break;
6746 case 0x02:
6747 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6748 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6749 break;
6750 }
6751
6752 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6753 new_settings(hdev, cmd->sk);
6754
6755 done:
6756 mgmt_pending_free(cmd);
6757 }
6758
set_secure_conn_sync(struct hci_dev * hdev,void * data)6759 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6760 {
6761 struct mgmt_pending_cmd *cmd = data;
6762 struct mgmt_mode *cp = cmd->param;
6763 u8 val = !!cp->val;
6764
6765 /* Force write of val */
6766 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6767
6768 return hci_write_sc_support_sync(hdev, val);
6769 }
6770
set_secure_conn(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6771 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6772 void *data, u16 len)
6773 {
6774 struct mgmt_mode *cp = data;
6775 struct mgmt_pending_cmd *cmd;
6776 u8 val;
6777 int err;
6778
6779 bt_dev_dbg(hdev, "sock %p", sk);
6780
6781 if (!lmp_sc_capable(hdev) &&
6782 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6783 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6784 MGMT_STATUS_NOT_SUPPORTED);
6785
6786 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6787 lmp_sc_capable(hdev) &&
6788 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6789 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6790 MGMT_STATUS_REJECTED);
6791
6792 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6793 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6794 MGMT_STATUS_INVALID_PARAMS);
6795
6796 hci_dev_lock(hdev);
6797
6798 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6799 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6800 bool changed;
6801
6802 if (cp->val) {
6803 changed = !hci_dev_test_and_set_flag(hdev,
6804 HCI_SC_ENABLED);
6805 if (cp->val == 0x02)
6806 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6807 else
6808 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6809 } else {
6810 changed = hci_dev_test_and_clear_flag(hdev,
6811 HCI_SC_ENABLED);
6812 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6813 }
6814
6815 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6816 if (err < 0)
6817 goto failed;
6818
6819 if (changed)
6820 err = new_settings(hdev, sk);
6821
6822 goto failed;
6823 }
6824
6825 val = !!cp->val;
6826
6827 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6828 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6829 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6830 goto failed;
6831 }
6832
6833 cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6834 if (!cmd)
6835 err = -ENOMEM;
6836 else
6837 err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6838 set_secure_conn_complete);
6839
6840 if (err < 0) {
6841 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6842 MGMT_STATUS_FAILED);
6843 if (cmd)
6844 mgmt_pending_free(cmd);
6845 }
6846
6847 failed:
6848 hci_dev_unlock(hdev);
6849 return err;
6850 }
6851
set_debug_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6852 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6853 void *data, u16 len)
6854 {
6855 struct mgmt_mode *cp = data;
6856 bool changed, use_changed;
6857 int err;
6858
6859 bt_dev_dbg(hdev, "sock %p", sk);
6860
6861 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6862 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6863 MGMT_STATUS_INVALID_PARAMS);
6864
6865 hci_dev_lock(hdev);
6866
6867 if (cp->val)
6868 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6869 else
6870 changed = hci_dev_test_and_clear_flag(hdev,
6871 HCI_KEEP_DEBUG_KEYS);
6872
6873 if (cp->val == 0x02)
6874 use_changed = !hci_dev_test_and_set_flag(hdev,
6875 HCI_USE_DEBUG_KEYS);
6876 else
6877 use_changed = hci_dev_test_and_clear_flag(hdev,
6878 HCI_USE_DEBUG_KEYS);
6879
6880 if (hdev_is_powered(hdev) && use_changed &&
6881 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6882 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6883 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6884 sizeof(mode), &mode);
6885 }
6886
6887 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6888 if (err < 0)
6889 goto unlock;
6890
6891 if (changed)
6892 err = new_settings(hdev, sk);
6893
6894 unlock:
6895 hci_dev_unlock(hdev);
6896 return err;
6897 }
6898
set_privacy(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)6899 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6900 u16 len)
6901 {
6902 struct mgmt_cp_set_privacy *cp = cp_data;
6903 bool changed;
6904 int err;
6905
6906 bt_dev_dbg(hdev, "sock %p", sk);
6907
6908 if (!lmp_le_capable(hdev))
6909 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6910 MGMT_STATUS_NOT_SUPPORTED);
6911
6912 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6913 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6914 MGMT_STATUS_INVALID_PARAMS);
6915
6916 if (hdev_is_powered(hdev))
6917 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6918 MGMT_STATUS_REJECTED);
6919
6920 hci_dev_lock(hdev);
6921
6922 /* If user space supports this command it is also expected to
6923 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
6924 */
6925 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6926
6927 if (cp->privacy) {
6928 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
6929 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
6930 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
6931 hci_adv_instances_set_rpa_expired(hdev, true);
6932 if (cp->privacy == 0x02)
6933 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
6934 else
6935 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6936 } else {
6937 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
6938 memset(hdev->irk, 0, sizeof(hdev->irk));
6939 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
6940 hci_adv_instances_set_rpa_expired(hdev, false);
6941 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6942 }
6943
6944 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
6945 if (err < 0)
6946 goto unlock;
6947
6948 if (changed)
6949 err = new_settings(hdev, sk);
6950
6951 unlock:
6952 hci_dev_unlock(hdev);
6953 return err;
6954 }
6955
irk_is_valid(struct mgmt_irk_info * irk)6956 static bool irk_is_valid(struct mgmt_irk_info *irk)
6957 {
6958 switch (irk->addr.type) {
6959 case BDADDR_LE_PUBLIC:
6960 return true;
6961
6962 case BDADDR_LE_RANDOM:
6963 /* Two most significant bits shall be set */
6964 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6965 return false;
6966 return true;
6967 }
6968
6969 return false;
6970 }
6971
load_irks(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)6972 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6973 u16 len)
6974 {
6975 struct mgmt_cp_load_irks *cp = cp_data;
6976 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
6977 sizeof(struct mgmt_irk_info));
6978 u16 irk_count, expected_len;
6979 int i, err;
6980
6981 bt_dev_dbg(hdev, "sock %p", sk);
6982
6983 if (!lmp_le_capable(hdev))
6984 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6985 MGMT_STATUS_NOT_SUPPORTED);
6986
6987 irk_count = __le16_to_cpu(cp->irk_count);
6988 if (irk_count > max_irk_count) {
6989 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
6990 irk_count);
6991 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6992 MGMT_STATUS_INVALID_PARAMS);
6993 }
6994
6995 expected_len = struct_size(cp, irks, irk_count);
6996 if (expected_len != len) {
6997 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
6998 expected_len, len);
6999 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7000 MGMT_STATUS_INVALID_PARAMS);
7001 }
7002
7003 bt_dev_dbg(hdev, "irk_count %u", irk_count);
7004
7005 for (i = 0; i < irk_count; i++) {
7006 struct mgmt_irk_info *key = &cp->irks[i];
7007
7008 if (!irk_is_valid(key))
7009 return mgmt_cmd_status(sk, hdev->id,
7010 MGMT_OP_LOAD_IRKS,
7011 MGMT_STATUS_INVALID_PARAMS);
7012 }
7013
7014 hci_dev_lock(hdev);
7015
7016 hci_smp_irks_clear(hdev);
7017
7018 for (i = 0; i < irk_count; i++) {
7019 struct mgmt_irk_info *irk = &cp->irks[i];
7020
7021 if (hci_is_blocked_key(hdev,
7022 HCI_BLOCKED_KEY_TYPE_IRK,
7023 irk->val)) {
7024 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7025 &irk->addr.bdaddr);
7026 continue;
7027 }
7028
7029 hci_add_irk(hdev, &irk->addr.bdaddr,
7030 le_addr_type(irk->addr.type), irk->val,
7031 BDADDR_ANY);
7032 }
7033
7034 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7035
7036 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7037
7038 hci_dev_unlock(hdev);
7039
7040 return err;
7041 }
7042
ltk_is_valid(struct mgmt_ltk_info * key)7043 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7044 {
7045 if (key->initiator != 0x00 && key->initiator != 0x01)
7046 return false;
7047
7048 switch (key->addr.type) {
7049 case BDADDR_LE_PUBLIC:
7050 return true;
7051
7052 case BDADDR_LE_RANDOM:
7053 /* Two most significant bits shall be set */
7054 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7055 return false;
7056 return true;
7057 }
7058
7059 return false;
7060 }
7061
load_long_term_keys(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)7062 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7063 void *cp_data, u16 len)
7064 {
7065 struct mgmt_cp_load_long_term_keys *cp = cp_data;
7066 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7067 sizeof(struct mgmt_ltk_info));
7068 u16 key_count, expected_len;
7069 int i, err;
7070
7071 bt_dev_dbg(hdev, "sock %p", sk);
7072
7073 if (!lmp_le_capable(hdev))
7074 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7075 MGMT_STATUS_NOT_SUPPORTED);
7076
7077 key_count = __le16_to_cpu(cp->key_count);
7078 if (key_count > max_key_count) {
7079 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7080 key_count);
7081 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7082 MGMT_STATUS_INVALID_PARAMS);
7083 }
7084
7085 expected_len = struct_size(cp, keys, key_count);
7086 if (expected_len != len) {
7087 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7088 expected_len, len);
7089 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7090 MGMT_STATUS_INVALID_PARAMS);
7091 }
7092
7093 bt_dev_dbg(hdev, "key_count %u", key_count);
7094
7095 hci_dev_lock(hdev);
7096
7097 hci_smp_ltks_clear(hdev);
7098
7099 for (i = 0; i < key_count; i++) {
7100 struct mgmt_ltk_info *key = &cp->keys[i];
7101 u8 type, authenticated;
7102
7103 if (hci_is_blocked_key(hdev,
7104 HCI_BLOCKED_KEY_TYPE_LTK,
7105 key->val)) {
7106 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7107 &key->addr.bdaddr);
7108 continue;
7109 }
7110
7111 if (!ltk_is_valid(key)) {
7112 bt_dev_warn(hdev, "Invalid LTK for %pMR",
7113 &key->addr.bdaddr);
7114 continue;
7115 }
7116
7117 switch (key->type) {
7118 case MGMT_LTK_UNAUTHENTICATED:
7119 authenticated = 0x00;
7120 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7121 break;
7122 case MGMT_LTK_AUTHENTICATED:
7123 authenticated = 0x01;
7124 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7125 break;
7126 case MGMT_LTK_P256_UNAUTH:
7127 authenticated = 0x00;
7128 type = SMP_LTK_P256;
7129 break;
7130 case MGMT_LTK_P256_AUTH:
7131 authenticated = 0x01;
7132 type = SMP_LTK_P256;
7133 break;
7134 case MGMT_LTK_P256_DEBUG:
7135 authenticated = 0x00;
7136 type = SMP_LTK_P256_DEBUG;
7137 fallthrough;
7138 default:
7139 continue;
7140 }
7141
7142 hci_add_ltk(hdev, &key->addr.bdaddr,
7143 le_addr_type(key->addr.type), type, authenticated,
7144 key->val, key->enc_size, key->ediv, key->rand);
7145 }
7146
7147 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7148 NULL, 0);
7149
7150 hci_dev_unlock(hdev);
7151
7152 return err;
7153 }
7154
get_conn_info_complete(struct hci_dev * hdev,void * data,int err)7155 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
7156 {
7157 struct mgmt_pending_cmd *cmd = data;
7158 struct hci_conn *conn = cmd->user_data;
7159 struct mgmt_cp_get_conn_info *cp = cmd->param;
7160 struct mgmt_rp_get_conn_info rp;
7161 u8 status;
7162
7163 bt_dev_dbg(hdev, "err %d", err);
7164
7165 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
7166
7167 status = mgmt_status(err);
7168 if (status == MGMT_STATUS_SUCCESS) {
7169 rp.rssi = conn->rssi;
7170 rp.tx_power = conn->tx_power;
7171 rp.max_tx_power = conn->max_tx_power;
7172 } else {
7173 rp.rssi = HCI_RSSI_INVALID;
7174 rp.tx_power = HCI_TX_POWER_INVALID;
7175 rp.max_tx_power = HCI_TX_POWER_INVALID;
7176 }
7177
7178 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, MGMT_OP_GET_CONN_INFO, status,
7179 &rp, sizeof(rp));
7180
7181 mgmt_pending_free(cmd);
7182 }
7183
get_conn_info_sync(struct hci_dev * hdev,void * data)7184 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
7185 {
7186 struct mgmt_pending_cmd *cmd = data;
7187 struct mgmt_cp_get_conn_info *cp = cmd->param;
7188 struct hci_conn *conn;
7189 int err;
7190 __le16 handle;
7191
7192 /* Make sure we are still connected */
7193 if (cp->addr.type == BDADDR_BREDR)
7194 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7195 &cp->addr.bdaddr);
7196 else
7197 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7198
7199 if (!conn || conn->state != BT_CONNECTED)
7200 return MGMT_STATUS_NOT_CONNECTED;
7201
7202 cmd->user_data = conn;
7203 handle = cpu_to_le16(conn->handle);
7204
7205 /* Refresh RSSI each time */
7206 err = hci_read_rssi_sync(hdev, handle);
7207
7208 /* For LE links TX power does not change thus we don't need to
7209 * query for it once value is known.
7210 */
7211 if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
7212 conn->tx_power == HCI_TX_POWER_INVALID))
7213 err = hci_read_tx_power_sync(hdev, handle, 0x00);
7214
7215 /* Max TX power needs to be read only once per connection */
7216 if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
7217 err = hci_read_tx_power_sync(hdev, handle, 0x01);
7218
7219 return err;
7220 }
7221
get_conn_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7222 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7223 u16 len)
7224 {
7225 struct mgmt_cp_get_conn_info *cp = data;
7226 struct mgmt_rp_get_conn_info rp;
7227 struct hci_conn *conn;
7228 unsigned long conn_info_age;
7229 int err = 0;
7230
7231 bt_dev_dbg(hdev, "sock %p", sk);
7232
7233 memset(&rp, 0, sizeof(rp));
7234 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7235 rp.addr.type = cp->addr.type;
7236
7237 if (!bdaddr_type_is_valid(cp->addr.type))
7238 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7239 MGMT_STATUS_INVALID_PARAMS,
7240 &rp, sizeof(rp));
7241
7242 hci_dev_lock(hdev);
7243
7244 if (!hdev_is_powered(hdev)) {
7245 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7246 MGMT_STATUS_NOT_POWERED, &rp,
7247 sizeof(rp));
7248 goto unlock;
7249 }
7250
7251 if (cp->addr.type == BDADDR_BREDR)
7252 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7253 &cp->addr.bdaddr);
7254 else
7255 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7256
7257 if (!conn || conn->state != BT_CONNECTED) {
7258 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7259 MGMT_STATUS_NOT_CONNECTED, &rp,
7260 sizeof(rp));
7261 goto unlock;
7262 }
7263
7264 /* To avoid client trying to guess when to poll again for information we
7265 * calculate conn info age as random value between min/max set in hdev.
7266 */
7267 conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age,
7268 hdev->conn_info_max_age - 1);
7269
7270 /* Query controller to refresh cached values if they are too old or were
7271 * never read.
7272 */
7273 if (time_after(jiffies, conn->conn_info_timestamp +
7274 msecs_to_jiffies(conn_info_age)) ||
7275 !conn->conn_info_timestamp) {
7276 struct mgmt_pending_cmd *cmd;
7277
7278 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
7279 len);
7280 if (!cmd) {
7281 err = -ENOMEM;
7282 } else {
7283 err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
7284 cmd, get_conn_info_complete);
7285 }
7286
7287 if (err < 0) {
7288 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7289 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7290
7291 if (cmd)
7292 mgmt_pending_free(cmd);
7293
7294 goto unlock;
7295 }
7296
7297 conn->conn_info_timestamp = jiffies;
7298 } else {
7299 /* Cache is valid, just reply with values cached in hci_conn */
7300 rp.rssi = conn->rssi;
7301 rp.tx_power = conn->tx_power;
7302 rp.max_tx_power = conn->max_tx_power;
7303
7304 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7305 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7306 }
7307
7308 unlock:
7309 hci_dev_unlock(hdev);
7310 return err;
7311 }
7312
get_clock_info_complete(struct hci_dev * hdev,void * data,int err)7313 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
7314 {
7315 struct mgmt_pending_cmd *cmd = data;
7316 struct mgmt_cp_get_clock_info *cp = cmd->param;
7317 struct mgmt_rp_get_clock_info rp;
7318 struct hci_conn *conn = cmd->user_data;
7319 u8 status = mgmt_status(err);
7320
7321 bt_dev_dbg(hdev, "err %d", err);
7322
7323 memset(&rp, 0, sizeof(rp));
7324 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7325 rp.addr.type = cp->addr.type;
7326
7327 if (err)
7328 goto complete;
7329
7330 rp.local_clock = cpu_to_le32(hdev->clock);
7331
7332 if (conn) {
7333 rp.piconet_clock = cpu_to_le32(conn->clock);
7334 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7335 }
7336
7337 complete:
7338 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, status, &rp,
7339 sizeof(rp));
7340
7341 mgmt_pending_free(cmd);
7342 }
7343
get_clock_info_sync(struct hci_dev * hdev,void * data)7344 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
7345 {
7346 struct mgmt_pending_cmd *cmd = data;
7347 struct mgmt_cp_get_clock_info *cp = cmd->param;
7348 struct hci_cp_read_clock hci_cp;
7349 struct hci_conn *conn;
7350
7351 memset(&hci_cp, 0, sizeof(hci_cp));
7352 hci_read_clock_sync(hdev, &hci_cp);
7353
7354 /* Make sure connection still exists */
7355 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
7356 if (!conn || conn->state != BT_CONNECTED)
7357 return MGMT_STATUS_NOT_CONNECTED;
7358
7359 cmd->user_data = conn;
7360 hci_cp.handle = cpu_to_le16(conn->handle);
7361 hci_cp.which = 0x01; /* Piconet clock */
7362
7363 return hci_read_clock_sync(hdev, &hci_cp);
7364 }
7365
get_clock_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7366 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7367 u16 len)
7368 {
7369 struct mgmt_cp_get_clock_info *cp = data;
7370 struct mgmt_rp_get_clock_info rp;
7371 struct mgmt_pending_cmd *cmd;
7372 struct hci_conn *conn;
7373 int err;
7374
7375 bt_dev_dbg(hdev, "sock %p", sk);
7376
7377 memset(&rp, 0, sizeof(rp));
7378 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7379 rp.addr.type = cp->addr.type;
7380
7381 if (cp->addr.type != BDADDR_BREDR)
7382 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7383 MGMT_STATUS_INVALID_PARAMS,
7384 &rp, sizeof(rp));
7385
7386 hci_dev_lock(hdev);
7387
7388 if (!hdev_is_powered(hdev)) {
7389 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7390 MGMT_STATUS_NOT_POWERED, &rp,
7391 sizeof(rp));
7392 goto unlock;
7393 }
7394
7395 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7396 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7397 &cp->addr.bdaddr);
7398 if (!conn || conn->state != BT_CONNECTED) {
7399 err = mgmt_cmd_complete(sk, hdev->id,
7400 MGMT_OP_GET_CLOCK_INFO,
7401 MGMT_STATUS_NOT_CONNECTED,
7402 &rp, sizeof(rp));
7403 goto unlock;
7404 }
7405 } else {
7406 conn = NULL;
7407 }
7408
7409 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7410 if (!cmd)
7411 err = -ENOMEM;
7412 else
7413 err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7414 get_clock_info_complete);
7415
7416 if (err < 0) {
7417 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7418 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7419
7420 if (cmd)
7421 mgmt_pending_free(cmd);
7422 }
7423
7424
7425 unlock:
7426 hci_dev_unlock(hdev);
7427 return err;
7428 }
7429
is_connected(struct hci_dev * hdev,bdaddr_t * addr,u8 type)7430 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7431 {
7432 struct hci_conn *conn;
7433
7434 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7435 if (!conn)
7436 return false;
7437
7438 if (conn->dst_type != type)
7439 return false;
7440
7441 if (conn->state != BT_CONNECTED)
7442 return false;
7443
7444 return true;
7445 }
7446
7447 /* This function requires the caller holds hdev->lock */
hci_conn_params_set(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type,u8 auto_connect)7448 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7449 u8 addr_type, u8 auto_connect)
7450 {
7451 struct hci_conn_params *params;
7452
7453 params = hci_conn_params_add(hdev, addr, addr_type);
7454 if (!params)
7455 return -EIO;
7456
7457 if (params->auto_connect == auto_connect)
7458 return 0;
7459
7460 hci_pend_le_list_del_init(params);
7461
7462 switch (auto_connect) {
7463 case HCI_AUTO_CONN_DISABLED:
7464 case HCI_AUTO_CONN_LINK_LOSS:
7465 /* If auto connect is being disabled when we're trying to
7466 * connect to device, keep connecting.
7467 */
7468 if (params->explicit_connect)
7469 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7470 break;
7471 case HCI_AUTO_CONN_REPORT:
7472 if (params->explicit_connect)
7473 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7474 else
7475 hci_pend_le_list_add(params, &hdev->pend_le_reports);
7476 break;
7477 case HCI_AUTO_CONN_DIRECT:
7478 case HCI_AUTO_CONN_ALWAYS:
7479 if (!is_connected(hdev, addr, addr_type))
7480 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7481 break;
7482 }
7483
7484 params->auto_connect = auto_connect;
7485
7486 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7487 addr, addr_type, auto_connect);
7488
7489 return 0;
7490 }
7491
device_added(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type,u8 action)7492 static void device_added(struct sock *sk, struct hci_dev *hdev,
7493 bdaddr_t *bdaddr, u8 type, u8 action)
7494 {
7495 struct mgmt_ev_device_added ev;
7496
7497 bacpy(&ev.addr.bdaddr, bdaddr);
7498 ev.addr.type = type;
7499 ev.action = action;
7500
7501 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7502 }
7503
add_device_complete(struct hci_dev * hdev,void * data,int err)7504 static void add_device_complete(struct hci_dev *hdev, void *data, int err)
7505 {
7506 struct mgmt_pending_cmd *cmd = data;
7507 struct mgmt_cp_add_device *cp = cmd->param;
7508
7509 if (!err) {
7510 struct hci_conn_params *params;
7511
7512 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7513 le_addr_type(cp->addr.type));
7514
7515 device_added(cmd->sk, hdev, &cp->addr.bdaddr, cp->addr.type,
7516 cp->action);
7517 device_flags_changed(NULL, hdev, &cp->addr.bdaddr,
7518 cp->addr.type, hdev->conn_flags,
7519 params ? params->flags : 0);
7520 }
7521
7522 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_ADD_DEVICE,
7523 mgmt_status(err), &cp->addr, sizeof(cp->addr));
7524 mgmt_pending_free(cmd);
7525 }
7526
add_device_sync(struct hci_dev * hdev,void * data)7527 static int add_device_sync(struct hci_dev *hdev, void *data)
7528 {
7529 return hci_update_passive_scan_sync(hdev);
7530 }
7531
add_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7532 static int add_device(struct sock *sk, struct hci_dev *hdev,
7533 void *data, u16 len)
7534 {
7535 struct mgmt_pending_cmd *cmd;
7536 struct mgmt_cp_add_device *cp = data;
7537 u8 auto_conn, addr_type;
7538 struct hci_conn_params *params;
7539 int err;
7540 u32 current_flags = 0;
7541 u32 supported_flags;
7542
7543 bt_dev_dbg(hdev, "sock %p", sk);
7544
7545 if (!bdaddr_type_is_valid(cp->addr.type) ||
7546 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7547 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7548 MGMT_STATUS_INVALID_PARAMS,
7549 &cp->addr, sizeof(cp->addr));
7550
7551 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7552 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7553 MGMT_STATUS_INVALID_PARAMS,
7554 &cp->addr, sizeof(cp->addr));
7555
7556 hci_dev_lock(hdev);
7557
7558 if (cp->addr.type == BDADDR_BREDR) {
7559 /* Only incoming connections action is supported for now */
7560 if (cp->action != 0x01) {
7561 err = mgmt_cmd_complete(sk, hdev->id,
7562 MGMT_OP_ADD_DEVICE,
7563 MGMT_STATUS_INVALID_PARAMS,
7564 &cp->addr, sizeof(cp->addr));
7565 goto unlock;
7566 }
7567
7568 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7569 &cp->addr.bdaddr,
7570 cp->addr.type, 0);
7571 if (err)
7572 goto unlock;
7573
7574 hci_update_scan(hdev);
7575
7576 goto added;
7577 }
7578
7579 addr_type = le_addr_type(cp->addr.type);
7580
7581 if (cp->action == 0x02)
7582 auto_conn = HCI_AUTO_CONN_ALWAYS;
7583 else if (cp->action == 0x01)
7584 auto_conn = HCI_AUTO_CONN_DIRECT;
7585 else
7586 auto_conn = HCI_AUTO_CONN_REPORT;
7587
7588 /* Kernel internally uses conn_params with resolvable private
7589 * address, but Add Device allows only identity addresses.
7590 * Make sure it is enforced before calling
7591 * hci_conn_params_lookup.
7592 */
7593 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7594 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7595 MGMT_STATUS_INVALID_PARAMS,
7596 &cp->addr, sizeof(cp->addr));
7597 goto unlock;
7598 }
7599
7600 /* If the connection parameters don't exist for this device,
7601 * they will be created and configured with defaults.
7602 */
7603 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7604 auto_conn) < 0) {
7605 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7606 MGMT_STATUS_FAILED, &cp->addr,
7607 sizeof(cp->addr));
7608 goto unlock;
7609 } else {
7610 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7611 addr_type);
7612 if (params)
7613 current_flags = params->flags;
7614 }
7615
7616 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
7617 if (!cmd) {
7618 err = -ENOMEM;
7619 goto unlock;
7620 }
7621
7622 err = hci_cmd_sync_queue(hdev, add_device_sync, cmd,
7623 add_device_complete);
7624 if (err < 0) {
7625 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7626 MGMT_STATUS_FAILED, &cp->addr,
7627 sizeof(cp->addr));
7628 mgmt_pending_free(cmd);
7629 }
7630
7631 goto unlock;
7632
7633 added:
7634 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7635 supported_flags = hdev->conn_flags;
7636 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7637 supported_flags, current_flags);
7638
7639 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7640 MGMT_STATUS_SUCCESS, &cp->addr,
7641 sizeof(cp->addr));
7642
7643 unlock:
7644 hci_dev_unlock(hdev);
7645 return err;
7646 }
7647
device_removed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)7648 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7649 bdaddr_t *bdaddr, u8 type)
7650 {
7651 struct mgmt_ev_device_removed ev;
7652
7653 bacpy(&ev.addr.bdaddr, bdaddr);
7654 ev.addr.type = type;
7655
7656 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7657 }
7658
remove_device_sync(struct hci_dev * hdev,void * data)7659 static int remove_device_sync(struct hci_dev *hdev, void *data)
7660 {
7661 return hci_update_passive_scan_sync(hdev);
7662 }
7663
remove_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7664 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7665 void *data, u16 len)
7666 {
7667 struct mgmt_cp_remove_device *cp = data;
7668 int err;
7669
7670 bt_dev_dbg(hdev, "sock %p", sk);
7671
7672 hci_dev_lock(hdev);
7673
7674 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7675 struct hci_conn_params *params;
7676 u8 addr_type;
7677
7678 if (!bdaddr_type_is_valid(cp->addr.type)) {
7679 err = mgmt_cmd_complete(sk, hdev->id,
7680 MGMT_OP_REMOVE_DEVICE,
7681 MGMT_STATUS_INVALID_PARAMS,
7682 &cp->addr, sizeof(cp->addr));
7683 goto unlock;
7684 }
7685
7686 if (cp->addr.type == BDADDR_BREDR) {
7687 err = hci_bdaddr_list_del(&hdev->accept_list,
7688 &cp->addr.bdaddr,
7689 cp->addr.type);
7690 if (err) {
7691 err = mgmt_cmd_complete(sk, hdev->id,
7692 MGMT_OP_REMOVE_DEVICE,
7693 MGMT_STATUS_INVALID_PARAMS,
7694 &cp->addr,
7695 sizeof(cp->addr));
7696 goto unlock;
7697 }
7698
7699 hci_update_scan(hdev);
7700
7701 device_removed(sk, hdev, &cp->addr.bdaddr,
7702 cp->addr.type);
7703 goto complete;
7704 }
7705
7706 addr_type = le_addr_type(cp->addr.type);
7707
7708 /* Kernel internally uses conn_params with resolvable private
7709 * address, but Remove Device allows only identity addresses.
7710 * Make sure it is enforced before calling
7711 * hci_conn_params_lookup.
7712 */
7713 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7714 err = mgmt_cmd_complete(sk, hdev->id,
7715 MGMT_OP_REMOVE_DEVICE,
7716 MGMT_STATUS_INVALID_PARAMS,
7717 &cp->addr, sizeof(cp->addr));
7718 goto unlock;
7719 }
7720
7721 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7722 addr_type);
7723 if (!params) {
7724 err = mgmt_cmd_complete(sk, hdev->id,
7725 MGMT_OP_REMOVE_DEVICE,
7726 MGMT_STATUS_INVALID_PARAMS,
7727 &cp->addr, sizeof(cp->addr));
7728 goto unlock;
7729 }
7730
7731 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7732 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7733 err = mgmt_cmd_complete(sk, hdev->id,
7734 MGMT_OP_REMOVE_DEVICE,
7735 MGMT_STATUS_INVALID_PARAMS,
7736 &cp->addr, sizeof(cp->addr));
7737 goto unlock;
7738 }
7739
7740 hci_conn_params_free(params);
7741
7742 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7743 } else {
7744 struct hci_conn_params *p, *tmp;
7745 struct bdaddr_list *b, *btmp;
7746
7747 if (cp->addr.type) {
7748 err = mgmt_cmd_complete(sk, hdev->id,
7749 MGMT_OP_REMOVE_DEVICE,
7750 MGMT_STATUS_INVALID_PARAMS,
7751 &cp->addr, sizeof(cp->addr));
7752 goto unlock;
7753 }
7754
7755 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7756 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7757 list_del(&b->list);
7758 kfree(b);
7759 }
7760
7761 hci_update_scan(hdev);
7762
7763 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7764 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7765 continue;
7766 device_removed(sk, hdev, &p->addr, p->addr_type);
7767 if (p->explicit_connect) {
7768 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7769 continue;
7770 }
7771 hci_conn_params_free(p);
7772 }
7773
7774 bt_dev_dbg(hdev, "All LE connection parameters were removed");
7775 }
7776
7777 hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7778
7779 complete:
7780 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7781 MGMT_STATUS_SUCCESS, &cp->addr,
7782 sizeof(cp->addr));
7783 unlock:
7784 hci_dev_unlock(hdev);
7785 return err;
7786 }
7787
conn_update_sync(struct hci_dev * hdev,void * data)7788 static int conn_update_sync(struct hci_dev *hdev, void *data)
7789 {
7790 struct hci_conn_params *params = data;
7791 struct hci_conn *conn;
7792
7793 conn = hci_conn_hash_lookup_le(hdev, ¶ms->addr, params->addr_type);
7794 if (!conn)
7795 return -ECANCELED;
7796
7797 return hci_le_conn_update_sync(hdev, conn, params);
7798 }
7799
load_conn_param(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7800 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7801 u16 len)
7802 {
7803 struct mgmt_cp_load_conn_param *cp = data;
7804 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7805 sizeof(struct mgmt_conn_param));
7806 u16 param_count, expected_len;
7807 int i;
7808
7809 if (!lmp_le_capable(hdev))
7810 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7811 MGMT_STATUS_NOT_SUPPORTED);
7812
7813 param_count = __le16_to_cpu(cp->param_count);
7814 if (param_count > max_param_count) {
7815 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7816 param_count);
7817 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7818 MGMT_STATUS_INVALID_PARAMS);
7819 }
7820
7821 expected_len = struct_size(cp, params, param_count);
7822 if (expected_len != len) {
7823 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7824 expected_len, len);
7825 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7826 MGMT_STATUS_INVALID_PARAMS);
7827 }
7828
7829 bt_dev_dbg(hdev, "param_count %u", param_count);
7830
7831 hci_dev_lock(hdev);
7832
7833 if (param_count > 1)
7834 hci_conn_params_clear_disabled(hdev);
7835
7836 for (i = 0; i < param_count; i++) {
7837 struct mgmt_conn_param *param = &cp->params[i];
7838 struct hci_conn_params *hci_param;
7839 u16 min, max, latency, timeout;
7840 bool update = false;
7841 u8 addr_type;
7842
7843 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
7844 param->addr.type);
7845
7846 if (param->addr.type == BDADDR_LE_PUBLIC) {
7847 addr_type = ADDR_LE_DEV_PUBLIC;
7848 } else if (param->addr.type == BDADDR_LE_RANDOM) {
7849 addr_type = ADDR_LE_DEV_RANDOM;
7850 } else {
7851 bt_dev_err(hdev, "ignoring invalid connection parameters");
7852 continue;
7853 }
7854
7855 min = le16_to_cpu(param->min_interval);
7856 max = le16_to_cpu(param->max_interval);
7857 latency = le16_to_cpu(param->latency);
7858 timeout = le16_to_cpu(param->timeout);
7859
7860 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7861 min, max, latency, timeout);
7862
7863 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7864 bt_dev_err(hdev, "ignoring invalid connection parameters");
7865 continue;
7866 }
7867
7868 /* Detect when the loading is for an existing parameter then
7869 * attempt to trigger the connection update procedure.
7870 */
7871 if (!i && param_count == 1) {
7872 hci_param = hci_conn_params_lookup(hdev,
7873 ¶m->addr.bdaddr,
7874 addr_type);
7875 if (hci_param)
7876 update = true;
7877 else
7878 hci_conn_params_clear_disabled(hdev);
7879 }
7880
7881 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
7882 addr_type);
7883 if (!hci_param) {
7884 bt_dev_err(hdev, "failed to add connection parameters");
7885 continue;
7886 }
7887
7888 hci_param->conn_min_interval = min;
7889 hci_param->conn_max_interval = max;
7890 hci_param->conn_latency = latency;
7891 hci_param->supervision_timeout = timeout;
7892
7893 /* Check if we need to trigger a connection update */
7894 if (update) {
7895 struct hci_conn *conn;
7896
7897 /* Lookup for existing connection as central and check
7898 * if parameters match and if they don't then trigger
7899 * a connection update.
7900 */
7901 conn = hci_conn_hash_lookup_le(hdev, &hci_param->addr,
7902 addr_type);
7903 if (conn && conn->role == HCI_ROLE_MASTER &&
7904 (conn->le_conn_min_interval != min ||
7905 conn->le_conn_max_interval != max ||
7906 conn->le_conn_latency != latency ||
7907 conn->le_supv_timeout != timeout))
7908 hci_cmd_sync_queue(hdev, conn_update_sync,
7909 hci_param, NULL);
7910 }
7911 }
7912
7913 hci_dev_unlock(hdev);
7914
7915 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7916 NULL, 0);
7917 }
7918
set_external_config(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7919 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7920 void *data, u16 len)
7921 {
7922 struct mgmt_cp_set_external_config *cp = data;
7923 bool changed;
7924 int err;
7925
7926 bt_dev_dbg(hdev, "sock %p", sk);
7927
7928 if (hdev_is_powered(hdev))
7929 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7930 MGMT_STATUS_REJECTED);
7931
7932 if (cp->config != 0x00 && cp->config != 0x01)
7933 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7934 MGMT_STATUS_INVALID_PARAMS);
7935
7936 if (!hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG))
7937 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7938 MGMT_STATUS_NOT_SUPPORTED);
7939
7940 hci_dev_lock(hdev);
7941
7942 if (cp->config)
7943 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7944 else
7945 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7946
7947 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7948 if (err < 0)
7949 goto unlock;
7950
7951 if (!changed)
7952 goto unlock;
7953
7954 err = new_options(hdev, sk);
7955
7956 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7957 mgmt_index_removed(hdev);
7958
7959 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7960 hci_dev_set_flag(hdev, HCI_CONFIG);
7961 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7962
7963 queue_work(hdev->req_workqueue, &hdev->power_on);
7964 } else {
7965 set_bit(HCI_RAW, &hdev->flags);
7966 mgmt_index_added(hdev);
7967 }
7968 }
7969
7970 unlock:
7971 hci_dev_unlock(hdev);
7972 return err;
7973 }
7974
set_public_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7975 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
7976 void *data, u16 len)
7977 {
7978 struct mgmt_cp_set_public_address *cp = data;
7979 bool changed;
7980 int err;
7981
7982 bt_dev_dbg(hdev, "sock %p", sk);
7983
7984 if (hdev_is_powered(hdev))
7985 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7986 MGMT_STATUS_REJECTED);
7987
7988 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
7989 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7990 MGMT_STATUS_INVALID_PARAMS);
7991
7992 if (!hdev->set_bdaddr)
7993 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7994 MGMT_STATUS_NOT_SUPPORTED);
7995
7996 hci_dev_lock(hdev);
7997
7998 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
7999 bacpy(&hdev->public_addr, &cp->bdaddr);
8000
8001 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
8002 if (err < 0)
8003 goto unlock;
8004
8005 if (!changed)
8006 goto unlock;
8007
8008 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
8009 err = new_options(hdev, sk);
8010
8011 if (is_configured(hdev)) {
8012 mgmt_index_removed(hdev);
8013
8014 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
8015
8016 hci_dev_set_flag(hdev, HCI_CONFIG);
8017 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8018
8019 queue_work(hdev->req_workqueue, &hdev->power_on);
8020 }
8021
8022 unlock:
8023 hci_dev_unlock(hdev);
8024 return err;
8025 }
8026
read_local_oob_ext_data_complete(struct hci_dev * hdev,void * data,int err)8027 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
8028 int err)
8029 {
8030 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8031 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8032 u8 *h192, *r192, *h256, *r256;
8033 struct mgmt_pending_cmd *cmd = data;
8034 struct sk_buff *skb = cmd->skb;
8035 u8 status = mgmt_status(err);
8036 u16 eir_len;
8037
8038 if (err == -ECANCELED ||
8039 cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
8040 return;
8041
8042 if (!status) {
8043 if (!skb)
8044 status = MGMT_STATUS_FAILED;
8045 else if (IS_ERR(skb))
8046 status = mgmt_status(PTR_ERR(skb));
8047 else
8048 status = mgmt_status(skb->data[0]);
8049 }
8050
8051 bt_dev_dbg(hdev, "status %u", status);
8052
8053 mgmt_cp = cmd->param;
8054
8055 if (status) {
8056 status = mgmt_status(status);
8057 eir_len = 0;
8058
8059 h192 = NULL;
8060 r192 = NULL;
8061 h256 = NULL;
8062 r256 = NULL;
8063 } else if (!bredr_sc_enabled(hdev)) {
8064 struct hci_rp_read_local_oob_data *rp;
8065
8066 if (skb->len != sizeof(*rp)) {
8067 status = MGMT_STATUS_FAILED;
8068 eir_len = 0;
8069 } else {
8070 status = MGMT_STATUS_SUCCESS;
8071 rp = (void *)skb->data;
8072
8073 eir_len = 5 + 18 + 18;
8074 h192 = rp->hash;
8075 r192 = rp->rand;
8076 h256 = NULL;
8077 r256 = NULL;
8078 }
8079 } else {
8080 struct hci_rp_read_local_oob_ext_data *rp;
8081
8082 if (skb->len != sizeof(*rp)) {
8083 status = MGMT_STATUS_FAILED;
8084 eir_len = 0;
8085 } else {
8086 status = MGMT_STATUS_SUCCESS;
8087 rp = (void *)skb->data;
8088
8089 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8090 eir_len = 5 + 18 + 18;
8091 h192 = NULL;
8092 r192 = NULL;
8093 } else {
8094 eir_len = 5 + 18 + 18 + 18 + 18;
8095 h192 = rp->hash192;
8096 r192 = rp->rand192;
8097 }
8098
8099 h256 = rp->hash256;
8100 r256 = rp->rand256;
8101 }
8102 }
8103
8104 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8105 if (!mgmt_rp)
8106 goto done;
8107
8108 if (eir_len == 0)
8109 goto send_rsp;
8110
8111 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8112 hdev->dev_class, 3);
8113
8114 if (h192 && r192) {
8115 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8116 EIR_SSP_HASH_C192, h192, 16);
8117 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8118 EIR_SSP_RAND_R192, r192, 16);
8119 }
8120
8121 if (h256 && r256) {
8122 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8123 EIR_SSP_HASH_C256, h256, 16);
8124 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8125 EIR_SSP_RAND_R256, r256, 16);
8126 }
8127
8128 send_rsp:
8129 mgmt_rp->type = mgmt_cp->type;
8130 mgmt_rp->eir_len = cpu_to_le16(eir_len);
8131
8132 err = mgmt_cmd_complete(cmd->sk, hdev->id,
8133 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8134 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8135 if (err < 0 || status)
8136 goto done;
8137
8138 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8139
8140 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8141 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8142 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8143 done:
8144 if (skb && !IS_ERR(skb))
8145 kfree_skb(skb);
8146
8147 kfree(mgmt_rp);
8148 mgmt_pending_remove(cmd);
8149 }
8150
read_local_ssp_oob_req(struct hci_dev * hdev,struct sock * sk,struct mgmt_cp_read_local_oob_ext_data * cp)8151 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8152 struct mgmt_cp_read_local_oob_ext_data *cp)
8153 {
8154 struct mgmt_pending_cmd *cmd;
8155 int err;
8156
8157 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8158 cp, sizeof(*cp));
8159 if (!cmd)
8160 return -ENOMEM;
8161
8162 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
8163 read_local_oob_ext_data_complete);
8164
8165 if (err < 0) {
8166 mgmt_pending_remove(cmd);
8167 return err;
8168 }
8169
8170 return 0;
8171 }
8172
read_local_oob_ext_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8173 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8174 void *data, u16 data_len)
8175 {
8176 struct mgmt_cp_read_local_oob_ext_data *cp = data;
8177 struct mgmt_rp_read_local_oob_ext_data *rp;
8178 size_t rp_len;
8179 u16 eir_len;
8180 u8 status, flags, role, addr[7], hash[16], rand[16];
8181 int err;
8182
8183 bt_dev_dbg(hdev, "sock %p", sk);
8184
8185 if (hdev_is_powered(hdev)) {
8186 switch (cp->type) {
8187 case BIT(BDADDR_BREDR):
8188 status = mgmt_bredr_support(hdev);
8189 if (status)
8190 eir_len = 0;
8191 else
8192 eir_len = 5;
8193 break;
8194 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8195 status = mgmt_le_support(hdev);
8196 if (status)
8197 eir_len = 0;
8198 else
8199 eir_len = 9 + 3 + 18 + 18 + 3;
8200 break;
8201 default:
8202 status = MGMT_STATUS_INVALID_PARAMS;
8203 eir_len = 0;
8204 break;
8205 }
8206 } else {
8207 status = MGMT_STATUS_NOT_POWERED;
8208 eir_len = 0;
8209 }
8210
8211 rp_len = sizeof(*rp) + eir_len;
8212 rp = kmalloc(rp_len, GFP_ATOMIC);
8213 if (!rp)
8214 return -ENOMEM;
8215
8216 if (!status && !lmp_ssp_capable(hdev)) {
8217 status = MGMT_STATUS_NOT_SUPPORTED;
8218 eir_len = 0;
8219 }
8220
8221 if (status)
8222 goto complete;
8223
8224 hci_dev_lock(hdev);
8225
8226 eir_len = 0;
8227 switch (cp->type) {
8228 case BIT(BDADDR_BREDR):
8229 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8230 err = read_local_ssp_oob_req(hdev, sk, cp);
8231 hci_dev_unlock(hdev);
8232 if (!err)
8233 goto done;
8234
8235 status = MGMT_STATUS_FAILED;
8236 goto complete;
8237 } else {
8238 eir_len = eir_append_data(rp->eir, eir_len,
8239 EIR_CLASS_OF_DEV,
8240 hdev->dev_class, 3);
8241 }
8242 break;
8243 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8244 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8245 smp_generate_oob(hdev, hash, rand) < 0) {
8246 hci_dev_unlock(hdev);
8247 status = MGMT_STATUS_FAILED;
8248 goto complete;
8249 }
8250
8251 /* This should return the active RPA, but since the RPA
8252 * is only programmed on demand, it is really hard to fill
8253 * this in at the moment. For now disallow retrieving
8254 * local out-of-band data when privacy is in use.
8255 *
8256 * Returning the identity address will not help here since
8257 * pairing happens before the identity resolving key is
8258 * known and thus the connection establishment happens
8259 * based on the RPA and not the identity address.
8260 */
8261 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8262 hci_dev_unlock(hdev);
8263 status = MGMT_STATUS_REJECTED;
8264 goto complete;
8265 }
8266
8267 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8268 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8269 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8270 bacmp(&hdev->static_addr, BDADDR_ANY))) {
8271 memcpy(addr, &hdev->static_addr, 6);
8272 addr[6] = 0x01;
8273 } else {
8274 memcpy(addr, &hdev->bdaddr, 6);
8275 addr[6] = 0x00;
8276 }
8277
8278 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8279 addr, sizeof(addr));
8280
8281 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8282 role = 0x02;
8283 else
8284 role = 0x01;
8285
8286 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8287 &role, sizeof(role));
8288
8289 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8290 eir_len = eir_append_data(rp->eir, eir_len,
8291 EIR_LE_SC_CONFIRM,
8292 hash, sizeof(hash));
8293
8294 eir_len = eir_append_data(rp->eir, eir_len,
8295 EIR_LE_SC_RANDOM,
8296 rand, sizeof(rand));
8297 }
8298
8299 flags = mgmt_get_adv_discov_flags(hdev);
8300
8301 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8302 flags |= LE_AD_NO_BREDR;
8303
8304 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8305 &flags, sizeof(flags));
8306 break;
8307 }
8308
8309 hci_dev_unlock(hdev);
8310
8311 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8312
8313 status = MGMT_STATUS_SUCCESS;
8314
8315 complete:
8316 rp->type = cp->type;
8317 rp->eir_len = cpu_to_le16(eir_len);
8318
8319 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8320 status, rp, sizeof(*rp) + eir_len);
8321 if (err < 0 || status)
8322 goto done;
8323
8324 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8325 rp, sizeof(*rp) + eir_len,
8326 HCI_MGMT_OOB_DATA_EVENTS, sk);
8327
8328 done:
8329 kfree(rp);
8330
8331 return err;
8332 }
8333
get_supported_adv_flags(struct hci_dev * hdev)8334 static u32 get_supported_adv_flags(struct hci_dev *hdev)
8335 {
8336 u32 flags = 0;
8337
8338 flags |= MGMT_ADV_FLAG_CONNECTABLE;
8339 flags |= MGMT_ADV_FLAG_DISCOV;
8340 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8341 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8342 flags |= MGMT_ADV_FLAG_APPEARANCE;
8343 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8344 flags |= MGMT_ADV_PARAM_DURATION;
8345 flags |= MGMT_ADV_PARAM_TIMEOUT;
8346 flags |= MGMT_ADV_PARAM_INTERVALS;
8347 flags |= MGMT_ADV_PARAM_TX_POWER;
8348 flags |= MGMT_ADV_PARAM_SCAN_RSP;
8349
8350 /* In extended adv TX_POWER returned from Set Adv Param
8351 * will be always valid.
8352 */
8353 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
8354 flags |= MGMT_ADV_FLAG_TX_POWER;
8355
8356 if (ext_adv_capable(hdev)) {
8357 flags |= MGMT_ADV_FLAG_SEC_1M;
8358 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8359 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8360
8361 if (le_2m_capable(hdev))
8362 flags |= MGMT_ADV_FLAG_SEC_2M;
8363
8364 if (le_coded_capable(hdev))
8365 flags |= MGMT_ADV_FLAG_SEC_CODED;
8366 }
8367
8368 return flags;
8369 }
8370
read_adv_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8371 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8372 void *data, u16 data_len)
8373 {
8374 struct mgmt_rp_read_adv_features *rp;
8375 size_t rp_len;
8376 int err;
8377 struct adv_info *adv_instance;
8378 u32 supported_flags;
8379 u8 *instance;
8380
8381 bt_dev_dbg(hdev, "sock %p", sk);
8382
8383 if (!lmp_le_capable(hdev))
8384 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8385 MGMT_STATUS_REJECTED);
8386
8387 hci_dev_lock(hdev);
8388
8389 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8390 rp = kmalloc(rp_len, GFP_ATOMIC);
8391 if (!rp) {
8392 hci_dev_unlock(hdev);
8393 return -ENOMEM;
8394 }
8395
8396 supported_flags = get_supported_adv_flags(hdev);
8397
8398 rp->supported_flags = cpu_to_le32(supported_flags);
8399 rp->max_adv_data_len = max_adv_len(hdev);
8400 rp->max_scan_rsp_len = max_adv_len(hdev);
8401 rp->max_instances = hdev->le_num_of_adv_sets;
8402 rp->num_instances = hdev->adv_instance_cnt;
8403
8404 instance = rp->instance;
8405 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8406 /* Only instances 1-le_num_of_adv_sets are externally visible */
8407 if (adv_instance->instance <= hdev->adv_instance_cnt) {
8408 *instance = adv_instance->instance;
8409 instance++;
8410 } else {
8411 rp->num_instances--;
8412 rp_len--;
8413 }
8414 }
8415
8416 hci_dev_unlock(hdev);
8417
8418 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8419 MGMT_STATUS_SUCCESS, rp, rp_len);
8420
8421 kfree(rp);
8422
8423 return err;
8424 }
8425
calculate_name_len(struct hci_dev * hdev)8426 static u8 calculate_name_len(struct hci_dev *hdev)
8427 {
8428 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 2]; /* len + type + name */
8429
8430 return eir_append_local_name(hdev, buf, 0);
8431 }
8432
tlv_data_max_len(struct hci_dev * hdev,u32 adv_flags,bool is_adv_data)8433 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8434 bool is_adv_data)
8435 {
8436 u8 max_len = max_adv_len(hdev);
8437
8438 if (is_adv_data) {
8439 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8440 MGMT_ADV_FLAG_LIMITED_DISCOV |
8441 MGMT_ADV_FLAG_MANAGED_FLAGS))
8442 max_len -= 3;
8443
8444 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8445 max_len -= 3;
8446 } else {
8447 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8448 max_len -= calculate_name_len(hdev);
8449
8450 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8451 max_len -= 4;
8452 }
8453
8454 return max_len;
8455 }
8456
flags_managed(u32 adv_flags)8457 static bool flags_managed(u32 adv_flags)
8458 {
8459 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8460 MGMT_ADV_FLAG_LIMITED_DISCOV |
8461 MGMT_ADV_FLAG_MANAGED_FLAGS);
8462 }
8463
tx_power_managed(u32 adv_flags)8464 static bool tx_power_managed(u32 adv_flags)
8465 {
8466 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8467 }
8468
name_managed(u32 adv_flags)8469 static bool name_managed(u32 adv_flags)
8470 {
8471 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8472 }
8473
appearance_managed(u32 adv_flags)8474 static bool appearance_managed(u32 adv_flags)
8475 {
8476 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8477 }
8478
tlv_data_is_valid(struct hci_dev * hdev,u32 adv_flags,u8 * data,u8 len,bool is_adv_data)8479 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8480 u8 len, bool is_adv_data)
8481 {
8482 int i, cur_len;
8483 u8 max_len;
8484
8485 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8486
8487 if (len > max_len)
8488 return false;
8489
8490 /* Make sure that the data is correctly formatted. */
8491 for (i = 0; i < len; i += (cur_len + 1)) {
8492 cur_len = data[i];
8493
8494 if (!cur_len)
8495 continue;
8496
8497 if (data[i + 1] == EIR_FLAGS &&
8498 (!is_adv_data || flags_managed(adv_flags)))
8499 return false;
8500
8501 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8502 return false;
8503
8504 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8505 return false;
8506
8507 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8508 return false;
8509
8510 if (data[i + 1] == EIR_APPEARANCE &&
8511 appearance_managed(adv_flags))
8512 return false;
8513
8514 /* If the current field length would exceed the total data
8515 * length, then it's invalid.
8516 */
8517 if (i + cur_len >= len)
8518 return false;
8519 }
8520
8521 return true;
8522 }
8523
requested_adv_flags_are_valid(struct hci_dev * hdev,u32 adv_flags)8524 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8525 {
8526 u32 supported_flags, phy_flags;
8527
8528 /* The current implementation only supports a subset of the specified
8529 * flags. Also need to check mutual exclusiveness of sec flags.
8530 */
8531 supported_flags = get_supported_adv_flags(hdev);
8532 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8533 if (adv_flags & ~supported_flags ||
8534 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8535 return false;
8536
8537 return true;
8538 }
8539
adv_busy(struct hci_dev * hdev)8540 static bool adv_busy(struct hci_dev *hdev)
8541 {
8542 return pending_find(MGMT_OP_SET_LE, hdev);
8543 }
8544
add_adv_complete(struct hci_dev * hdev,struct sock * sk,u8 instance,int err)8545 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8546 int err)
8547 {
8548 struct adv_info *adv, *n;
8549
8550 bt_dev_dbg(hdev, "err %d", err);
8551
8552 hci_dev_lock(hdev);
8553
8554 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8555 u8 instance;
8556
8557 if (!adv->pending)
8558 continue;
8559
8560 if (!err) {
8561 adv->pending = false;
8562 continue;
8563 }
8564
8565 instance = adv->instance;
8566
8567 if (hdev->cur_adv_instance == instance)
8568 cancel_adv_timeout(hdev);
8569
8570 hci_remove_adv_instance(hdev, instance);
8571 mgmt_advertising_removed(sk, hdev, instance);
8572 }
8573
8574 hci_dev_unlock(hdev);
8575 }
8576
add_advertising_complete(struct hci_dev * hdev,void * data,int err)8577 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8578 {
8579 struct mgmt_pending_cmd *cmd = data;
8580 struct mgmt_cp_add_advertising *cp = cmd->param;
8581 struct mgmt_rp_add_advertising rp;
8582
8583 memset(&rp, 0, sizeof(rp));
8584
8585 rp.instance = cp->instance;
8586
8587 if (err)
8588 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
8589 mgmt_status(err));
8590 else
8591 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
8592 mgmt_status(err), &rp, sizeof(rp));
8593
8594 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8595
8596 mgmt_pending_free(cmd);
8597 }
8598
add_advertising_sync(struct hci_dev * hdev,void * data)8599 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8600 {
8601 struct mgmt_pending_cmd *cmd = data;
8602 struct mgmt_cp_add_advertising *cp = cmd->param;
8603
8604 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8605 }
8606
add_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8607 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8608 void *data, u16 data_len)
8609 {
8610 struct mgmt_cp_add_advertising *cp = data;
8611 struct mgmt_rp_add_advertising rp;
8612 u32 flags;
8613 u8 status;
8614 u16 timeout, duration;
8615 unsigned int prev_instance_cnt;
8616 u8 schedule_instance = 0;
8617 struct adv_info *adv, *next_instance;
8618 int err;
8619 struct mgmt_pending_cmd *cmd;
8620
8621 bt_dev_dbg(hdev, "sock %p", sk);
8622
8623 status = mgmt_le_support(hdev);
8624 if (status)
8625 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8626 status);
8627
8628 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8629 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8630 MGMT_STATUS_INVALID_PARAMS);
8631
8632 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8633 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8634 MGMT_STATUS_INVALID_PARAMS);
8635
8636 flags = __le32_to_cpu(cp->flags);
8637 timeout = __le16_to_cpu(cp->timeout);
8638 duration = __le16_to_cpu(cp->duration);
8639
8640 if (!requested_adv_flags_are_valid(hdev, flags))
8641 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8642 MGMT_STATUS_INVALID_PARAMS);
8643
8644 hci_dev_lock(hdev);
8645
8646 if (timeout && !hdev_is_powered(hdev)) {
8647 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8648 MGMT_STATUS_REJECTED);
8649 goto unlock;
8650 }
8651
8652 if (adv_busy(hdev)) {
8653 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8654 MGMT_STATUS_BUSY);
8655 goto unlock;
8656 }
8657
8658 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8659 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8660 cp->scan_rsp_len, false)) {
8661 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8662 MGMT_STATUS_INVALID_PARAMS);
8663 goto unlock;
8664 }
8665
8666 prev_instance_cnt = hdev->adv_instance_cnt;
8667
8668 adv = hci_add_adv_instance(hdev, cp->instance, flags,
8669 cp->adv_data_len, cp->data,
8670 cp->scan_rsp_len,
8671 cp->data + cp->adv_data_len,
8672 timeout, duration,
8673 HCI_ADV_TX_POWER_NO_PREFERENCE,
8674 hdev->le_adv_min_interval,
8675 hdev->le_adv_max_interval, 0);
8676 if (IS_ERR(adv)) {
8677 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8678 MGMT_STATUS_FAILED);
8679 goto unlock;
8680 }
8681
8682 /* Only trigger an advertising added event if a new instance was
8683 * actually added.
8684 */
8685 if (hdev->adv_instance_cnt > prev_instance_cnt)
8686 mgmt_advertising_added(sk, hdev, cp->instance);
8687
8688 if (hdev->cur_adv_instance == cp->instance) {
8689 /* If the currently advertised instance is being changed then
8690 * cancel the current advertising and schedule the next
8691 * instance. If there is only one instance then the overridden
8692 * advertising data will be visible right away.
8693 */
8694 cancel_adv_timeout(hdev);
8695
8696 next_instance = hci_get_next_instance(hdev, cp->instance);
8697 if (next_instance)
8698 schedule_instance = next_instance->instance;
8699 } else if (!hdev->adv_instance_timeout) {
8700 /* Immediately advertise the new instance if no other
8701 * instance is currently being advertised.
8702 */
8703 schedule_instance = cp->instance;
8704 }
8705
8706 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
8707 * there is no instance to be advertised then we have no HCI
8708 * communication to make. Simply return.
8709 */
8710 if (!hdev_is_powered(hdev) ||
8711 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8712 !schedule_instance) {
8713 rp.instance = cp->instance;
8714 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8715 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8716 goto unlock;
8717 }
8718
8719 /* We're good to go, update advertising data, parameters, and start
8720 * advertising.
8721 */
8722 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8723 data_len);
8724 if (!cmd) {
8725 err = -ENOMEM;
8726 goto unlock;
8727 }
8728
8729 cp->instance = schedule_instance;
8730
8731 err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8732 add_advertising_complete);
8733 if (err < 0)
8734 mgmt_pending_free(cmd);
8735
8736 unlock:
8737 hci_dev_unlock(hdev);
8738
8739 return err;
8740 }
8741
add_ext_adv_params_complete(struct hci_dev * hdev,void * data,int err)8742 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8743 int err)
8744 {
8745 struct mgmt_pending_cmd *cmd = data;
8746 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8747 struct mgmt_rp_add_ext_adv_params rp;
8748 struct adv_info *adv;
8749 u32 flags;
8750
8751 BT_DBG("%s", hdev->name);
8752
8753 hci_dev_lock(hdev);
8754
8755 adv = hci_find_adv_instance(hdev, cp->instance);
8756 if (!adv)
8757 goto unlock;
8758
8759 rp.instance = cp->instance;
8760 rp.tx_power = adv->tx_power;
8761
8762 /* While we're at it, inform userspace of the available space for this
8763 * advertisement, given the flags that will be used.
8764 */
8765 flags = __le32_to_cpu(cp->flags);
8766 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8767 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8768
8769 if (err) {
8770 /* If this advertisement was previously advertising and we
8771 * failed to update it, we signal that it has been removed and
8772 * delete its structure
8773 */
8774 if (!adv->pending)
8775 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8776
8777 hci_remove_adv_instance(hdev, cp->instance);
8778
8779 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
8780 mgmt_status(err));
8781 } else {
8782 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
8783 mgmt_status(err), &rp, sizeof(rp));
8784 }
8785
8786 unlock:
8787 mgmt_pending_free(cmd);
8788
8789 hci_dev_unlock(hdev);
8790 }
8791
add_ext_adv_params_sync(struct hci_dev * hdev,void * data)8792 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8793 {
8794 struct mgmt_pending_cmd *cmd = data;
8795 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8796
8797 return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8798 }
8799
add_ext_adv_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8800 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8801 void *data, u16 data_len)
8802 {
8803 struct mgmt_cp_add_ext_adv_params *cp = data;
8804 struct mgmt_rp_add_ext_adv_params rp;
8805 struct mgmt_pending_cmd *cmd = NULL;
8806 struct adv_info *adv;
8807 u32 flags, min_interval, max_interval;
8808 u16 timeout, duration;
8809 u8 status;
8810 s8 tx_power;
8811 int err;
8812
8813 BT_DBG("%s", hdev->name);
8814
8815 status = mgmt_le_support(hdev);
8816 if (status)
8817 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8818 status);
8819
8820 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8821 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8822 MGMT_STATUS_INVALID_PARAMS);
8823
8824 /* The purpose of breaking add_advertising into two separate MGMT calls
8825 * for params and data is to allow more parameters to be added to this
8826 * structure in the future. For this reason, we verify that we have the
8827 * bare minimum structure we know of when the interface was defined. Any
8828 * extra parameters we don't know about will be ignored in this request.
8829 */
8830 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8831 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8832 MGMT_STATUS_INVALID_PARAMS);
8833
8834 flags = __le32_to_cpu(cp->flags);
8835
8836 if (!requested_adv_flags_are_valid(hdev, flags))
8837 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8838 MGMT_STATUS_INVALID_PARAMS);
8839
8840 hci_dev_lock(hdev);
8841
8842 /* In new interface, we require that we are powered to register */
8843 if (!hdev_is_powered(hdev)) {
8844 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8845 MGMT_STATUS_REJECTED);
8846 goto unlock;
8847 }
8848
8849 if (adv_busy(hdev)) {
8850 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8851 MGMT_STATUS_BUSY);
8852 goto unlock;
8853 }
8854
8855 /* Parse defined parameters from request, use defaults otherwise */
8856 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8857 __le16_to_cpu(cp->timeout) : 0;
8858
8859 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8860 __le16_to_cpu(cp->duration) :
8861 hdev->def_multi_adv_rotation_duration;
8862
8863 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8864 __le32_to_cpu(cp->min_interval) :
8865 hdev->le_adv_min_interval;
8866
8867 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8868 __le32_to_cpu(cp->max_interval) :
8869 hdev->le_adv_max_interval;
8870
8871 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8872 cp->tx_power :
8873 HCI_ADV_TX_POWER_NO_PREFERENCE;
8874
8875 /* Create advertising instance with no advertising or response data */
8876 adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
8877 timeout, duration, tx_power, min_interval,
8878 max_interval, 0);
8879
8880 if (IS_ERR(adv)) {
8881 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8882 MGMT_STATUS_FAILED);
8883 goto unlock;
8884 }
8885
8886 /* Submit request for advertising params if ext adv available */
8887 if (ext_adv_capable(hdev)) {
8888 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8889 data, data_len);
8890 if (!cmd) {
8891 err = -ENOMEM;
8892 hci_remove_adv_instance(hdev, cp->instance);
8893 goto unlock;
8894 }
8895
8896 err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8897 add_ext_adv_params_complete);
8898 if (err < 0)
8899 mgmt_pending_free(cmd);
8900 } else {
8901 rp.instance = cp->instance;
8902 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8903 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8904 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8905 err = mgmt_cmd_complete(sk, hdev->id,
8906 MGMT_OP_ADD_EXT_ADV_PARAMS,
8907 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8908 }
8909
8910 unlock:
8911 hci_dev_unlock(hdev);
8912
8913 return err;
8914 }
8915
add_ext_adv_data_complete(struct hci_dev * hdev,void * data,int err)8916 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8917 {
8918 struct mgmt_pending_cmd *cmd = data;
8919 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8920 struct mgmt_rp_add_advertising rp;
8921
8922 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8923
8924 memset(&rp, 0, sizeof(rp));
8925
8926 rp.instance = cp->instance;
8927
8928 if (err)
8929 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
8930 mgmt_status(err));
8931 else
8932 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
8933 mgmt_status(err), &rp, sizeof(rp));
8934
8935 mgmt_pending_free(cmd);
8936 }
8937
add_ext_adv_data_sync(struct hci_dev * hdev,void * data)8938 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8939 {
8940 struct mgmt_pending_cmd *cmd = data;
8941 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8942 int err;
8943
8944 if (ext_adv_capable(hdev)) {
8945 err = hci_update_adv_data_sync(hdev, cp->instance);
8946 if (err)
8947 return err;
8948
8949 err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
8950 if (err)
8951 return err;
8952
8953 return hci_enable_ext_advertising_sync(hdev, cp->instance);
8954 }
8955
8956 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8957 }
8958
add_ext_adv_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8959 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8960 u16 data_len)
8961 {
8962 struct mgmt_cp_add_ext_adv_data *cp = data;
8963 struct mgmt_rp_add_ext_adv_data rp;
8964 u8 schedule_instance = 0;
8965 struct adv_info *next_instance;
8966 struct adv_info *adv_instance;
8967 int err = 0;
8968 struct mgmt_pending_cmd *cmd;
8969
8970 BT_DBG("%s", hdev->name);
8971
8972 hci_dev_lock(hdev);
8973
8974 adv_instance = hci_find_adv_instance(hdev, cp->instance);
8975
8976 if (!adv_instance) {
8977 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8978 MGMT_STATUS_INVALID_PARAMS);
8979 goto unlock;
8980 }
8981
8982 /* In new interface, we require that we are powered to register */
8983 if (!hdev_is_powered(hdev)) {
8984 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8985 MGMT_STATUS_REJECTED);
8986 goto clear_new_instance;
8987 }
8988
8989 if (adv_busy(hdev)) {
8990 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8991 MGMT_STATUS_BUSY);
8992 goto clear_new_instance;
8993 }
8994
8995 /* Validate new data */
8996 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
8997 cp->adv_data_len, true) ||
8998 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
8999 cp->adv_data_len, cp->scan_rsp_len, false)) {
9000 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9001 MGMT_STATUS_INVALID_PARAMS);
9002 goto clear_new_instance;
9003 }
9004
9005 /* Set the data in the advertising instance */
9006 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
9007 cp->data, cp->scan_rsp_len,
9008 cp->data + cp->adv_data_len);
9009
9010 /* If using software rotation, determine next instance to use */
9011 if (hdev->cur_adv_instance == cp->instance) {
9012 /* If the currently advertised instance is being changed
9013 * then cancel the current advertising and schedule the
9014 * next instance. If there is only one instance then the
9015 * overridden advertising data will be visible right
9016 * away
9017 */
9018 cancel_adv_timeout(hdev);
9019
9020 next_instance = hci_get_next_instance(hdev, cp->instance);
9021 if (next_instance)
9022 schedule_instance = next_instance->instance;
9023 } else if (!hdev->adv_instance_timeout) {
9024 /* Immediately advertise the new instance if no other
9025 * instance is currently being advertised.
9026 */
9027 schedule_instance = cp->instance;
9028 }
9029
9030 /* If the HCI_ADVERTISING flag is set or there is no instance to
9031 * be advertised then we have no HCI communication to make.
9032 * Simply return.
9033 */
9034 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
9035 if (adv_instance->pending) {
9036 mgmt_advertising_added(sk, hdev, cp->instance);
9037 adv_instance->pending = false;
9038 }
9039 rp.instance = cp->instance;
9040 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9041 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9042 goto unlock;
9043 }
9044
9045 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9046 data_len);
9047 if (!cmd) {
9048 err = -ENOMEM;
9049 goto clear_new_instance;
9050 }
9051
9052 err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
9053 add_ext_adv_data_complete);
9054 if (err < 0) {
9055 mgmt_pending_free(cmd);
9056 goto clear_new_instance;
9057 }
9058
9059 /* We were successful in updating data, so trigger advertising_added
9060 * event if this is an instance that wasn't previously advertising. If
9061 * a failure occurs in the requests we initiated, we will remove the
9062 * instance again in add_advertising_complete
9063 */
9064 if (adv_instance->pending)
9065 mgmt_advertising_added(sk, hdev, cp->instance);
9066
9067 goto unlock;
9068
9069 clear_new_instance:
9070 hci_remove_adv_instance(hdev, cp->instance);
9071
9072 unlock:
9073 hci_dev_unlock(hdev);
9074
9075 return err;
9076 }
9077
remove_advertising_complete(struct hci_dev * hdev,void * data,int err)9078 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
9079 int err)
9080 {
9081 struct mgmt_pending_cmd *cmd = data;
9082 struct mgmt_cp_remove_advertising *cp = cmd->param;
9083 struct mgmt_rp_remove_advertising rp;
9084
9085 bt_dev_dbg(hdev, "err %d", err);
9086
9087 memset(&rp, 0, sizeof(rp));
9088 rp.instance = cp->instance;
9089
9090 if (err)
9091 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
9092 mgmt_status(err));
9093 else
9094 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
9095 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9096
9097 mgmt_pending_free(cmd);
9098 }
9099
remove_advertising_sync(struct hci_dev * hdev,void * data)9100 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
9101 {
9102 struct mgmt_pending_cmd *cmd = data;
9103 struct mgmt_cp_remove_advertising *cp = cmd->param;
9104 int err;
9105
9106 err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
9107 if (err)
9108 return err;
9109
9110 if (list_empty(&hdev->adv_instances))
9111 err = hci_disable_advertising_sync(hdev);
9112
9113 return err;
9114 }
9115
remove_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9116 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9117 void *data, u16 data_len)
9118 {
9119 struct mgmt_cp_remove_advertising *cp = data;
9120 struct mgmt_pending_cmd *cmd;
9121 int err;
9122
9123 bt_dev_dbg(hdev, "sock %p", sk);
9124
9125 hci_dev_lock(hdev);
9126
9127 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
9128 err = mgmt_cmd_status(sk, hdev->id,
9129 MGMT_OP_REMOVE_ADVERTISING,
9130 MGMT_STATUS_INVALID_PARAMS);
9131 goto unlock;
9132 }
9133
9134 if (pending_find(MGMT_OP_SET_LE, hdev)) {
9135 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9136 MGMT_STATUS_BUSY);
9137 goto unlock;
9138 }
9139
9140 if (list_empty(&hdev->adv_instances)) {
9141 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9142 MGMT_STATUS_INVALID_PARAMS);
9143 goto unlock;
9144 }
9145
9146 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9147 data_len);
9148 if (!cmd) {
9149 err = -ENOMEM;
9150 goto unlock;
9151 }
9152
9153 err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
9154 remove_advertising_complete);
9155 if (err < 0)
9156 mgmt_pending_free(cmd);
9157
9158 unlock:
9159 hci_dev_unlock(hdev);
9160
9161 return err;
9162 }
9163
get_adv_size_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9164 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9165 void *data, u16 data_len)
9166 {
9167 struct mgmt_cp_get_adv_size_info *cp = data;
9168 struct mgmt_rp_get_adv_size_info rp;
9169 u32 flags, supported_flags;
9170
9171 bt_dev_dbg(hdev, "sock %p", sk);
9172
9173 if (!lmp_le_capable(hdev))
9174 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9175 MGMT_STATUS_REJECTED);
9176
9177 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9178 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9179 MGMT_STATUS_INVALID_PARAMS);
9180
9181 flags = __le32_to_cpu(cp->flags);
9182
9183 /* The current implementation only supports a subset of the specified
9184 * flags.
9185 */
9186 supported_flags = get_supported_adv_flags(hdev);
9187 if (flags & ~supported_flags)
9188 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9189 MGMT_STATUS_INVALID_PARAMS);
9190
9191 rp.instance = cp->instance;
9192 rp.flags = cp->flags;
9193 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9194 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9195
9196 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9197 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9198 }
9199
9200 static const struct hci_mgmt_handler mgmt_handlers[] = {
9201 { NULL }, /* 0x0000 (no command) */
9202 { read_version, MGMT_READ_VERSION_SIZE,
9203 HCI_MGMT_NO_HDEV |
9204 HCI_MGMT_UNTRUSTED },
9205 { read_commands, MGMT_READ_COMMANDS_SIZE,
9206 HCI_MGMT_NO_HDEV |
9207 HCI_MGMT_UNTRUSTED },
9208 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
9209 HCI_MGMT_NO_HDEV |
9210 HCI_MGMT_UNTRUSTED },
9211 { read_controller_info, MGMT_READ_INFO_SIZE,
9212 HCI_MGMT_UNTRUSTED },
9213 { set_powered, MGMT_SETTING_SIZE },
9214 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
9215 { set_connectable, MGMT_SETTING_SIZE },
9216 { set_fast_connectable, MGMT_SETTING_SIZE },
9217 { set_bondable, MGMT_SETTING_SIZE },
9218 { set_link_security, MGMT_SETTING_SIZE },
9219 { set_ssp, MGMT_SETTING_SIZE },
9220 { set_hs, MGMT_SETTING_SIZE },
9221 { set_le, MGMT_SETTING_SIZE },
9222 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
9223 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
9224 { add_uuid, MGMT_ADD_UUID_SIZE },
9225 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
9226 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
9227 HCI_MGMT_VAR_LEN },
9228 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9229 HCI_MGMT_VAR_LEN },
9230 { disconnect, MGMT_DISCONNECT_SIZE },
9231 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
9232 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
9233 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
9234 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
9235 { pair_device, MGMT_PAIR_DEVICE_SIZE },
9236 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
9237 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
9238 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
9239 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9240 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
9241 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9242 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
9243 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9244 HCI_MGMT_VAR_LEN },
9245 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9246 { start_discovery, MGMT_START_DISCOVERY_SIZE },
9247 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
9248 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
9249 { block_device, MGMT_BLOCK_DEVICE_SIZE },
9250 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
9251 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
9252 { set_advertising, MGMT_SETTING_SIZE },
9253 { set_bredr, MGMT_SETTING_SIZE },
9254 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
9255 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
9256 { set_secure_conn, MGMT_SETTING_SIZE },
9257 { set_debug_keys, MGMT_SETTING_SIZE },
9258 { set_privacy, MGMT_SET_PRIVACY_SIZE },
9259 { load_irks, MGMT_LOAD_IRKS_SIZE,
9260 HCI_MGMT_VAR_LEN },
9261 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
9262 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
9263 { add_device, MGMT_ADD_DEVICE_SIZE },
9264 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
9265 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
9266 HCI_MGMT_VAR_LEN },
9267 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9268 HCI_MGMT_NO_HDEV |
9269 HCI_MGMT_UNTRUSTED },
9270 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
9271 HCI_MGMT_UNCONFIGURED |
9272 HCI_MGMT_UNTRUSTED },
9273 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
9274 HCI_MGMT_UNCONFIGURED },
9275 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
9276 HCI_MGMT_UNCONFIGURED },
9277 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9278 HCI_MGMT_VAR_LEN },
9279 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9280 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
9281 HCI_MGMT_NO_HDEV |
9282 HCI_MGMT_UNTRUSTED },
9283 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
9284 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
9285 HCI_MGMT_VAR_LEN },
9286 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
9287 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
9288 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9289 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9290 HCI_MGMT_UNTRUSTED },
9291 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
9292 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
9293 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
9294 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9295 HCI_MGMT_VAR_LEN },
9296 { set_wideband_speech, MGMT_SETTING_SIZE },
9297 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
9298 HCI_MGMT_UNTRUSTED },
9299 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
9300 HCI_MGMT_UNTRUSTED |
9301 HCI_MGMT_HDEV_OPTIONAL },
9302 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
9303 HCI_MGMT_VAR_LEN |
9304 HCI_MGMT_HDEV_OPTIONAL },
9305 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9306 HCI_MGMT_UNTRUSTED },
9307 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9308 HCI_MGMT_VAR_LEN },
9309 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9310 HCI_MGMT_UNTRUSTED },
9311 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9312 HCI_MGMT_VAR_LEN },
9313 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
9314 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
9315 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9316 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9317 HCI_MGMT_VAR_LEN },
9318 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
9319 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
9320 HCI_MGMT_VAR_LEN },
9321 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
9322 HCI_MGMT_VAR_LEN },
9323 { add_adv_patterns_monitor_rssi,
9324 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
9325 HCI_MGMT_VAR_LEN },
9326 { set_mesh, MGMT_SET_MESH_RECEIVER_SIZE,
9327 HCI_MGMT_VAR_LEN },
9328 { mesh_features, MGMT_MESH_READ_FEATURES_SIZE },
9329 { mesh_send, MGMT_MESH_SEND_SIZE,
9330 HCI_MGMT_VAR_LEN },
9331 { mesh_send_cancel, MGMT_MESH_SEND_CANCEL_SIZE },
9332 { mgmt_hci_cmd_sync, MGMT_HCI_CMD_SYNC_SIZE, HCI_MGMT_VAR_LEN },
9333 };
9334
mgmt_index_added(struct hci_dev * hdev)9335 void mgmt_index_added(struct hci_dev *hdev)
9336 {
9337 struct mgmt_ev_ext_index ev;
9338
9339 if (hci_test_quirk(hdev, HCI_QUIRK_RAW_DEVICE))
9340 return;
9341
9342 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9343 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0,
9344 HCI_MGMT_UNCONF_INDEX_EVENTS);
9345 ev.type = 0x01;
9346 } else {
9347 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
9348 HCI_MGMT_INDEX_EVENTS);
9349 ev.type = 0x00;
9350 }
9351
9352 ev.bus = hdev->bus;
9353
9354 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
9355 HCI_MGMT_EXT_INDEX_EVENTS);
9356 }
9357
mgmt_index_removed(struct hci_dev * hdev)9358 void mgmt_index_removed(struct hci_dev *hdev)
9359 {
9360 struct mgmt_ev_ext_index ev;
9361 struct cmd_lookup match = { NULL, hdev, MGMT_STATUS_INVALID_INDEX };
9362
9363 if (hci_test_quirk(hdev, HCI_QUIRK_RAW_DEVICE))
9364 return;
9365
9366 mgmt_pending_foreach(0, hdev, true, cmd_complete_rsp, &match);
9367
9368 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9369 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0,
9370 HCI_MGMT_UNCONF_INDEX_EVENTS);
9371 ev.type = 0x01;
9372 } else {
9373 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
9374 HCI_MGMT_INDEX_EVENTS);
9375 ev.type = 0x00;
9376 }
9377
9378 ev.bus = hdev->bus;
9379
9380 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
9381 HCI_MGMT_EXT_INDEX_EVENTS);
9382
9383 /* Cancel any remaining timed work */
9384 if (!hci_dev_test_flag(hdev, HCI_MGMT))
9385 return;
9386 cancel_delayed_work_sync(&hdev->discov_off);
9387 cancel_delayed_work_sync(&hdev->service_cache);
9388 cancel_delayed_work_sync(&hdev->rpa_expired);
9389 }
9390
mgmt_power_on(struct hci_dev * hdev,int err)9391 void mgmt_power_on(struct hci_dev *hdev, int err)
9392 {
9393 struct cmd_lookup match = { NULL, hdev };
9394
9395 bt_dev_dbg(hdev, "err %d", err);
9396
9397 hci_dev_lock(hdev);
9398
9399 if (!err) {
9400 restart_le_actions(hdev);
9401 hci_update_passive_scan(hdev);
9402 }
9403
9404 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, true, settings_rsp,
9405 &match);
9406
9407 new_settings(hdev, match.sk);
9408
9409 if (match.sk)
9410 sock_put(match.sk);
9411
9412 hci_dev_unlock(hdev);
9413 }
9414
__mgmt_power_off(struct hci_dev * hdev)9415 void __mgmt_power_off(struct hci_dev *hdev)
9416 {
9417 struct cmd_lookup match = { NULL, hdev };
9418 u8 zero_cod[] = { 0, 0, 0 };
9419
9420 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, true, settings_rsp,
9421 &match);
9422
9423 /* If the power off is because of hdev unregistration let
9424 * use the appropriate INVALID_INDEX status. Otherwise use
9425 * NOT_POWERED. We cover both scenarios here since later in
9426 * mgmt_index_removed() any hci_conn callbacks will have already
9427 * been triggered, potentially causing misleading DISCONNECTED
9428 * status responses.
9429 */
9430 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9431 match.mgmt_status = MGMT_STATUS_INVALID_INDEX;
9432 else
9433 match.mgmt_status = MGMT_STATUS_NOT_POWERED;
9434
9435 mgmt_pending_foreach(0, hdev, true, cmd_complete_rsp, &match);
9436
9437 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9438 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9439 zero_cod, sizeof(zero_cod),
9440 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9441 ext_info_changed(hdev, NULL);
9442 }
9443
9444 new_settings(hdev, match.sk);
9445
9446 if (match.sk)
9447 sock_put(match.sk);
9448 }
9449
mgmt_set_powered_failed(struct hci_dev * hdev,int err)9450 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9451 {
9452 struct mgmt_pending_cmd *cmd;
9453 u8 status;
9454
9455 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9456 if (!cmd)
9457 return;
9458
9459 if (err == -ERFKILL)
9460 status = MGMT_STATUS_RFKILLED;
9461 else
9462 status = MGMT_STATUS_FAILED;
9463
9464 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9465
9466 mgmt_pending_remove(cmd);
9467 }
9468
mgmt_new_link_key(struct hci_dev * hdev,struct link_key * key,bool persistent)9469 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9470 bool persistent)
9471 {
9472 struct mgmt_ev_new_link_key ev;
9473
9474 memset(&ev, 0, sizeof(ev));
9475
9476 ev.store_hint = persistent;
9477 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9478 ev.key.addr.type = BDADDR_BREDR;
9479 ev.key.type = key->type;
9480 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9481 ev.key.pin_len = key->pin_len;
9482
9483 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9484 }
9485
mgmt_ltk_type(struct smp_ltk * ltk)9486 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9487 {
9488 switch (ltk->type) {
9489 case SMP_LTK:
9490 case SMP_LTK_RESPONDER:
9491 if (ltk->authenticated)
9492 return MGMT_LTK_AUTHENTICATED;
9493 return MGMT_LTK_UNAUTHENTICATED;
9494 case SMP_LTK_P256:
9495 if (ltk->authenticated)
9496 return MGMT_LTK_P256_AUTH;
9497 return MGMT_LTK_P256_UNAUTH;
9498 case SMP_LTK_P256_DEBUG:
9499 return MGMT_LTK_P256_DEBUG;
9500 }
9501
9502 return MGMT_LTK_UNAUTHENTICATED;
9503 }
9504
mgmt_new_ltk(struct hci_dev * hdev,struct smp_ltk * key,bool persistent)9505 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9506 {
9507 struct mgmt_ev_new_long_term_key ev;
9508
9509 memset(&ev, 0, sizeof(ev));
9510
9511 /* Devices using resolvable or non-resolvable random addresses
9512 * without providing an identity resolving key don't require
9513 * to store long term keys. Their addresses will change the
9514 * next time around.
9515 *
9516 * Only when a remote device provides an identity address
9517 * make sure the long term key is stored. If the remote
9518 * identity is known, the long term keys are internally
9519 * mapped to the identity address. So allow static random
9520 * and public addresses here.
9521 */
9522 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9523 (key->bdaddr.b[5] & 0xc0) != 0xc0)
9524 ev.store_hint = 0x00;
9525 else
9526 ev.store_hint = persistent;
9527
9528 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9529 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
9530 ev.key.type = mgmt_ltk_type(key);
9531 ev.key.enc_size = key->enc_size;
9532 ev.key.ediv = key->ediv;
9533 ev.key.rand = key->rand;
9534
9535 if (key->type == SMP_LTK)
9536 ev.key.initiator = 1;
9537
9538 /* Make sure we copy only the significant bytes based on the
9539 * encryption key size, and set the rest of the value to zeroes.
9540 */
9541 memcpy(ev.key.val, key->val, key->enc_size);
9542 memset(ev.key.val + key->enc_size, 0,
9543 sizeof(ev.key.val) - key->enc_size);
9544
9545 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9546 }
9547
mgmt_new_irk(struct hci_dev * hdev,struct smp_irk * irk,bool persistent)9548 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9549 {
9550 struct mgmt_ev_new_irk ev;
9551
9552 memset(&ev, 0, sizeof(ev));
9553
9554 ev.store_hint = persistent;
9555
9556 bacpy(&ev.rpa, &irk->rpa);
9557 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9558 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
9559 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9560
9561 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9562 }
9563
mgmt_new_csrk(struct hci_dev * hdev,struct smp_csrk * csrk,bool persistent)9564 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9565 bool persistent)
9566 {
9567 struct mgmt_ev_new_csrk ev;
9568
9569 memset(&ev, 0, sizeof(ev));
9570
9571 /* Devices using resolvable or non-resolvable random addresses
9572 * without providing an identity resolving key don't require
9573 * to store signature resolving keys. Their addresses will change
9574 * the next time around.
9575 *
9576 * Only when a remote device provides an identity address
9577 * make sure the signature resolving key is stored. So allow
9578 * static random and public addresses here.
9579 */
9580 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9581 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9582 ev.store_hint = 0x00;
9583 else
9584 ev.store_hint = persistent;
9585
9586 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9587 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
9588 ev.key.type = csrk->type;
9589 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9590
9591 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9592 }
9593
mgmt_new_conn_param(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u8 store_hint,u16 min_interval,u16 max_interval,u16 latency,u16 timeout)9594 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9595 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9596 u16 max_interval, u16 latency, u16 timeout)
9597 {
9598 struct mgmt_ev_new_conn_param ev;
9599
9600 if (!hci_is_identity_address(bdaddr, bdaddr_type))
9601 return;
9602
9603 memset(&ev, 0, sizeof(ev));
9604 bacpy(&ev.addr.bdaddr, bdaddr);
9605 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9606 ev.store_hint = store_hint;
9607 ev.min_interval = cpu_to_le16(min_interval);
9608 ev.max_interval = cpu_to_le16(max_interval);
9609 ev.latency = cpu_to_le16(latency);
9610 ev.timeout = cpu_to_le16(timeout);
9611
9612 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9613 }
9614
mgmt_device_connected(struct hci_dev * hdev,struct hci_conn * conn,u8 * name,u8 name_len)9615 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9616 u8 *name, u8 name_len)
9617 {
9618 struct sk_buff *skb;
9619 struct mgmt_ev_device_connected *ev;
9620 u16 eir_len = 0;
9621 u32 flags = 0;
9622
9623 if (test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
9624 return;
9625
9626 /* allocate buff for LE or BR/EDR adv */
9627 if (conn->le_adv_data_len > 0)
9628 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9629 sizeof(*ev) + conn->le_adv_data_len);
9630 else
9631 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9632 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9633 eir_precalc_len(sizeof(conn->dev_class)));
9634
9635 if (!skb)
9636 return;
9637
9638 ev = skb_put(skb, sizeof(*ev));
9639 bacpy(&ev->addr.bdaddr, &conn->dst);
9640 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9641
9642 if (conn->out)
9643 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9644
9645 ev->flags = __cpu_to_le32(flags);
9646
9647 /* We must ensure that the EIR Data fields are ordered and
9648 * unique. Keep it simple for now and avoid the problem by not
9649 * adding any BR/EDR data to the LE adv.
9650 */
9651 if (conn->le_adv_data_len > 0) {
9652 skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9653 eir_len = conn->le_adv_data_len;
9654 } else {
9655 if (name)
9656 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9657
9658 if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9659 eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9660 conn->dev_class, sizeof(conn->dev_class));
9661 }
9662
9663 ev->eir_len = cpu_to_le16(eir_len);
9664
9665 mgmt_event_skb(skb, NULL);
9666 }
9667
unpair_device_rsp(struct mgmt_pending_cmd * cmd,void * data)9668 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9669 {
9670 struct hci_dev *hdev = data;
9671 struct mgmt_cp_unpair_device *cp = cmd->param;
9672
9673 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9674
9675 cmd->cmd_complete(cmd, 0);
9676 }
9677
mgmt_powering_down(struct hci_dev * hdev)9678 bool mgmt_powering_down(struct hci_dev *hdev)
9679 {
9680 struct mgmt_pending_cmd *cmd;
9681 struct mgmt_mode *cp;
9682
9683 if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
9684 return true;
9685
9686 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9687 if (!cmd)
9688 return false;
9689
9690 cp = cmd->param;
9691 if (!cp->val)
9692 return true;
9693
9694 return false;
9695 }
9696
mgmt_device_disconnected(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 reason,bool mgmt_connected)9697 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9698 u8 link_type, u8 addr_type, u8 reason,
9699 bool mgmt_connected)
9700 {
9701 struct mgmt_ev_device_disconnected ev;
9702 struct sock *sk = NULL;
9703
9704 if (!mgmt_connected)
9705 return;
9706
9707 if (link_type != ACL_LINK && link_type != LE_LINK)
9708 return;
9709
9710 bacpy(&ev.addr.bdaddr, bdaddr);
9711 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9712 ev.reason = reason;
9713
9714 /* Report disconnects due to suspend */
9715 if (hdev->suspended)
9716 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9717
9718 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9719
9720 if (sk)
9721 sock_put(sk);
9722 }
9723
mgmt_disconnect_failed(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9724 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9725 u8 link_type, u8 addr_type, u8 status)
9726 {
9727 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9728 struct mgmt_cp_disconnect *cp;
9729 struct mgmt_pending_cmd *cmd;
9730
9731 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, true,
9732 unpair_device_rsp, hdev);
9733
9734 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9735 if (!cmd)
9736 return;
9737
9738 cp = cmd->param;
9739
9740 if (bacmp(bdaddr, &cp->addr.bdaddr))
9741 return;
9742
9743 if (cp->addr.type != bdaddr_type)
9744 return;
9745
9746 cmd->cmd_complete(cmd, mgmt_status(status));
9747 mgmt_pending_remove(cmd);
9748 }
9749
mgmt_connect_failed(struct hci_dev * hdev,struct hci_conn * conn,u8 status)9750 void mgmt_connect_failed(struct hci_dev *hdev, struct hci_conn *conn, u8 status)
9751 {
9752 struct mgmt_ev_connect_failed ev;
9753
9754 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
9755 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
9756 conn->dst_type, status, true);
9757 return;
9758 }
9759
9760 bacpy(&ev.addr.bdaddr, &conn->dst);
9761 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9762 ev.status = mgmt_status(status);
9763
9764 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9765 }
9766
mgmt_pin_code_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 secure)9767 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9768 {
9769 struct mgmt_ev_pin_code_request ev;
9770
9771 bacpy(&ev.addr.bdaddr, bdaddr);
9772 ev.addr.type = BDADDR_BREDR;
9773 ev.secure = secure;
9774
9775 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9776 }
9777
mgmt_pin_code_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)9778 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9779 u8 status)
9780 {
9781 struct mgmt_pending_cmd *cmd;
9782
9783 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9784 if (!cmd)
9785 return;
9786
9787 cmd->cmd_complete(cmd, mgmt_status(status));
9788 mgmt_pending_remove(cmd);
9789 }
9790
mgmt_pin_code_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)9791 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9792 u8 status)
9793 {
9794 struct mgmt_pending_cmd *cmd;
9795
9796 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9797 if (!cmd)
9798 return;
9799
9800 cmd->cmd_complete(cmd, mgmt_status(status));
9801 mgmt_pending_remove(cmd);
9802 }
9803
mgmt_user_confirm_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 value,u8 confirm_hint)9804 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9805 u8 link_type, u8 addr_type, u32 value,
9806 u8 confirm_hint)
9807 {
9808 struct mgmt_ev_user_confirm_request ev;
9809
9810 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9811
9812 bacpy(&ev.addr.bdaddr, bdaddr);
9813 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9814 ev.confirm_hint = confirm_hint;
9815 ev.value = cpu_to_le32(value);
9816
9817 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9818 NULL);
9819 }
9820
mgmt_user_passkey_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type)9821 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9822 u8 link_type, u8 addr_type)
9823 {
9824 struct mgmt_ev_user_passkey_request ev;
9825
9826 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9827
9828 bacpy(&ev.addr.bdaddr, bdaddr);
9829 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9830
9831 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9832 NULL);
9833 }
9834
user_pairing_resp_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status,u8 opcode)9835 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9836 u8 link_type, u8 addr_type, u8 status,
9837 u8 opcode)
9838 {
9839 struct mgmt_pending_cmd *cmd;
9840
9841 cmd = pending_find(opcode, hdev);
9842 if (!cmd)
9843 return -ENOENT;
9844
9845 cmd->cmd_complete(cmd, mgmt_status(status));
9846 mgmt_pending_remove(cmd);
9847
9848 return 0;
9849 }
9850
mgmt_user_confirm_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9851 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9852 u8 link_type, u8 addr_type, u8 status)
9853 {
9854 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9855 status, MGMT_OP_USER_CONFIRM_REPLY);
9856 }
9857
mgmt_user_confirm_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9858 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9859 u8 link_type, u8 addr_type, u8 status)
9860 {
9861 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9862 status,
9863 MGMT_OP_USER_CONFIRM_NEG_REPLY);
9864 }
9865
mgmt_user_passkey_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9866 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9867 u8 link_type, u8 addr_type, u8 status)
9868 {
9869 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9870 status, MGMT_OP_USER_PASSKEY_REPLY);
9871 }
9872
mgmt_user_passkey_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9873 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9874 u8 link_type, u8 addr_type, u8 status)
9875 {
9876 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9877 status,
9878 MGMT_OP_USER_PASSKEY_NEG_REPLY);
9879 }
9880
mgmt_user_passkey_notify(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 passkey,u8 entered)9881 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9882 u8 link_type, u8 addr_type, u32 passkey,
9883 u8 entered)
9884 {
9885 struct mgmt_ev_passkey_notify ev;
9886
9887 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9888
9889 bacpy(&ev.addr.bdaddr, bdaddr);
9890 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9891 ev.passkey = __cpu_to_le32(passkey);
9892 ev.entered = entered;
9893
9894 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9895 }
9896
mgmt_auth_failed(struct hci_conn * conn,u8 hci_status)9897 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9898 {
9899 struct mgmt_ev_auth_failed ev;
9900 struct mgmt_pending_cmd *cmd;
9901 u8 status = mgmt_status(hci_status);
9902
9903 bacpy(&ev.addr.bdaddr, &conn->dst);
9904 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9905 ev.status = status;
9906
9907 cmd = find_pairing(conn);
9908
9909 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9910 cmd ? cmd->sk : NULL);
9911
9912 if (cmd) {
9913 cmd->cmd_complete(cmd, status);
9914 mgmt_pending_remove(cmd);
9915 }
9916 }
9917
mgmt_auth_enable_complete(struct hci_dev * hdev,u8 status)9918 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9919 {
9920 struct cmd_lookup match = { NULL, hdev };
9921 bool changed;
9922
9923 if (status) {
9924 u8 mgmt_err = mgmt_status(status);
9925 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, true,
9926 cmd_status_rsp, &mgmt_err);
9927 return;
9928 }
9929
9930 if (test_bit(HCI_AUTH, &hdev->flags))
9931 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9932 else
9933 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9934
9935 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, true,
9936 settings_rsp, &match);
9937
9938 if (changed)
9939 new_settings(hdev, match.sk);
9940
9941 if (match.sk)
9942 sock_put(match.sk);
9943 }
9944
sk_lookup(struct mgmt_pending_cmd * cmd,void * data)9945 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9946 {
9947 struct cmd_lookup *match = data;
9948
9949 if (match->sk == NULL) {
9950 match->sk = cmd->sk;
9951 sock_hold(match->sk);
9952 }
9953 }
9954
mgmt_set_class_of_dev_complete(struct hci_dev * hdev,u8 * dev_class,u8 status)9955 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9956 u8 status)
9957 {
9958 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
9959
9960 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, false, sk_lookup,
9961 &match);
9962 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, false, sk_lookup,
9963 &match);
9964 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, false, sk_lookup,
9965 &match);
9966
9967 if (!status) {
9968 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
9969 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9970 ext_info_changed(hdev, NULL);
9971 }
9972
9973 if (match.sk)
9974 sock_put(match.sk);
9975 }
9976
mgmt_set_local_name_complete(struct hci_dev * hdev,u8 * name,u8 status)9977 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
9978 {
9979 struct mgmt_cp_set_local_name ev;
9980 struct mgmt_pending_cmd *cmd;
9981
9982 if (status)
9983 return;
9984
9985 memset(&ev, 0, sizeof(ev));
9986 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
9987 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
9988
9989 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
9990 if (!cmd) {
9991 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
9992
9993 /* If this is a HCI command related to powering on the
9994 * HCI dev don't send any mgmt signals.
9995 */
9996 if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
9997 return;
9998
9999 if (pending_find(MGMT_OP_SET_POWERED, hdev))
10000 return;
10001 }
10002
10003 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
10004 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
10005 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
10006 }
10007
has_uuid(u8 * uuid,u16 uuid_count,u8 (* uuids)[16])10008 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
10009 {
10010 int i;
10011
10012 for (i = 0; i < uuid_count; i++) {
10013 if (!memcmp(uuid, uuids[i], 16))
10014 return true;
10015 }
10016
10017 return false;
10018 }
10019
eir_has_uuids(u8 * eir,u16 eir_len,u16 uuid_count,u8 (* uuids)[16])10020 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
10021 {
10022 u16 parsed = 0;
10023
10024 while (parsed < eir_len) {
10025 u8 field_len = eir[0];
10026 u8 uuid[16];
10027 int i;
10028
10029 if (field_len == 0)
10030 break;
10031
10032 if (eir_len - parsed < field_len + 1)
10033 break;
10034
10035 switch (eir[1]) {
10036 case EIR_UUID16_ALL:
10037 case EIR_UUID16_SOME:
10038 for (i = 0; i + 3 <= field_len; i += 2) {
10039 memcpy(uuid, bluetooth_base_uuid, 16);
10040 uuid[13] = eir[i + 3];
10041 uuid[12] = eir[i + 2];
10042 if (has_uuid(uuid, uuid_count, uuids))
10043 return true;
10044 }
10045 break;
10046 case EIR_UUID32_ALL:
10047 case EIR_UUID32_SOME:
10048 for (i = 0; i + 5 <= field_len; i += 4) {
10049 memcpy(uuid, bluetooth_base_uuid, 16);
10050 uuid[15] = eir[i + 5];
10051 uuid[14] = eir[i + 4];
10052 uuid[13] = eir[i + 3];
10053 uuid[12] = eir[i + 2];
10054 if (has_uuid(uuid, uuid_count, uuids))
10055 return true;
10056 }
10057 break;
10058 case EIR_UUID128_ALL:
10059 case EIR_UUID128_SOME:
10060 for (i = 0; i + 17 <= field_len; i += 16) {
10061 memcpy(uuid, eir + i + 2, 16);
10062 if (has_uuid(uuid, uuid_count, uuids))
10063 return true;
10064 }
10065 break;
10066 }
10067
10068 parsed += field_len + 1;
10069 eir += field_len + 1;
10070 }
10071
10072 return false;
10073 }
10074
is_filter_match(struct hci_dev * hdev,s8 rssi,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len)10075 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10076 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10077 {
10078 /* If a RSSI threshold has been specified, and
10079 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10080 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10081 * is set, let it through for further processing, as we might need to
10082 * restart the scan.
10083 *
10084 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10085 * the results are also dropped.
10086 */
10087 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10088 (rssi == HCI_RSSI_INVALID ||
10089 (rssi < hdev->discovery.rssi &&
10090 !hci_test_quirk(hdev, HCI_QUIRK_STRICT_DUPLICATE_FILTER))))
10091 return false;
10092
10093 if (hdev->discovery.uuid_count != 0) {
10094 /* If a list of UUIDs is provided in filter, results with no
10095 * matching UUID should be dropped.
10096 */
10097 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10098 hdev->discovery.uuids) &&
10099 !eir_has_uuids(scan_rsp, scan_rsp_len,
10100 hdev->discovery.uuid_count,
10101 hdev->discovery.uuids))
10102 return false;
10103 }
10104
10105 /* If duplicate filtering does not report RSSI changes, then restart
10106 * scanning to ensure updated result with updated RSSI values.
10107 */
10108 if (hci_test_quirk(hdev, HCI_QUIRK_STRICT_DUPLICATE_FILTER)) {
10109 /* Validate RSSI value against the RSSI threshold once more. */
10110 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10111 rssi < hdev->discovery.rssi)
10112 return false;
10113 }
10114
10115 return true;
10116 }
10117
mgmt_adv_monitor_device_lost(struct hci_dev * hdev,u16 handle,bdaddr_t * bdaddr,u8 addr_type)10118 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
10119 bdaddr_t *bdaddr, u8 addr_type)
10120 {
10121 struct mgmt_ev_adv_monitor_device_lost ev;
10122
10123 ev.monitor_handle = cpu_to_le16(handle);
10124 bacpy(&ev.addr.bdaddr, bdaddr);
10125 ev.addr.type = addr_type;
10126
10127 mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
10128 NULL);
10129 }
10130
mgmt_send_adv_monitor_device_found(struct hci_dev * hdev,struct sk_buff * skb,struct sock * skip_sk,u16 handle)10131 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
10132 struct sk_buff *skb,
10133 struct sock *skip_sk,
10134 u16 handle)
10135 {
10136 struct sk_buff *advmon_skb;
10137 size_t advmon_skb_len;
10138 __le16 *monitor_handle;
10139
10140 if (!skb)
10141 return;
10142
10143 advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
10144 sizeof(struct mgmt_ev_device_found)) + skb->len;
10145 advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
10146 advmon_skb_len);
10147 if (!advmon_skb)
10148 return;
10149
10150 /* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10151 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10152 * store monitor_handle of the matched monitor.
10153 */
10154 monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
10155 *monitor_handle = cpu_to_le16(handle);
10156 skb_put_data(advmon_skb, skb->data, skb->len);
10157
10158 mgmt_event_skb(advmon_skb, skip_sk);
10159 }
10160
mgmt_adv_monitor_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,bool report_device,struct sk_buff * skb,struct sock * skip_sk)10161 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
10162 bdaddr_t *bdaddr, bool report_device,
10163 struct sk_buff *skb,
10164 struct sock *skip_sk)
10165 {
10166 struct monitored_device *dev, *tmp;
10167 bool matched = false;
10168 bool notified = false;
10169
10170 /* We have received the Advertisement Report because:
10171 * 1. the kernel has initiated active discovery
10172 * 2. if not, we have pend_le_reports > 0 in which case we are doing
10173 * passive scanning
10174 * 3. if none of the above is true, we have one or more active
10175 * Advertisement Monitor
10176 *
10177 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10178 * and report ONLY one advertisement per device for the matched Monitor
10179 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10180 *
10181 * For case 3, since we are not active scanning and all advertisements
10182 * received are due to a matched Advertisement Monitor, report all
10183 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10184 */
10185 if (report_device && !hdev->advmon_pend_notify) {
10186 mgmt_event_skb(skb, skip_sk);
10187 return;
10188 }
10189
10190 hdev->advmon_pend_notify = false;
10191
10192 list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
10193 if (!bacmp(&dev->bdaddr, bdaddr)) {
10194 matched = true;
10195
10196 if (!dev->notified) {
10197 mgmt_send_adv_monitor_device_found(hdev, skb,
10198 skip_sk,
10199 dev->handle);
10200 notified = true;
10201 dev->notified = true;
10202 }
10203 }
10204
10205 if (!dev->notified)
10206 hdev->advmon_pend_notify = true;
10207 }
10208
10209 if (!report_device &&
10210 ((matched && !notified) || !msft_monitor_supported(hdev))) {
10211 /* Handle 0 indicates that we are not active scanning and this
10212 * is a subsequent advertisement report for an already matched
10213 * Advertisement Monitor or the controller offloading support
10214 * is not available.
10215 */
10216 mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
10217 }
10218
10219 if (report_device)
10220 mgmt_event_skb(skb, skip_sk);
10221 else
10222 kfree_skb(skb);
10223 }
10224
mesh_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len,u64 instant)10225 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
10226 u8 addr_type, s8 rssi, u32 flags, u8 *eir,
10227 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10228 u64 instant)
10229 {
10230 struct sk_buff *skb;
10231 struct mgmt_ev_mesh_device_found *ev;
10232 int i, j;
10233
10234 if (!hdev->mesh_ad_types[0])
10235 goto accepted;
10236
10237 /* Scan for requested AD types */
10238 if (eir_len > 0) {
10239 for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
10240 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10241 if (!hdev->mesh_ad_types[j])
10242 break;
10243
10244 if (hdev->mesh_ad_types[j] == eir[i + 1])
10245 goto accepted;
10246 }
10247 }
10248 }
10249
10250 if (scan_rsp_len > 0) {
10251 for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
10252 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10253 if (!hdev->mesh_ad_types[j])
10254 break;
10255
10256 if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
10257 goto accepted;
10258 }
10259 }
10260 }
10261
10262 return;
10263
10264 accepted:
10265 skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
10266 sizeof(*ev) + eir_len + scan_rsp_len);
10267 if (!skb)
10268 return;
10269
10270 ev = skb_put(skb, sizeof(*ev));
10271
10272 bacpy(&ev->addr.bdaddr, bdaddr);
10273 ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
10274 ev->rssi = rssi;
10275 ev->flags = cpu_to_le32(flags);
10276 ev->instant = cpu_to_le64(instant);
10277
10278 if (eir_len > 0)
10279 /* Copy EIR or advertising data into event */
10280 skb_put_data(skb, eir, eir_len);
10281
10282 if (scan_rsp_len > 0)
10283 /* Append scan response data to event */
10284 skb_put_data(skb, scan_rsp, scan_rsp_len);
10285
10286 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10287
10288 mgmt_event_skb(skb, NULL);
10289 }
10290
mgmt_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 * dev_class,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len,u64 instant)10291 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10292 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10293 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10294 u64 instant)
10295 {
10296 struct sk_buff *skb;
10297 struct mgmt_ev_device_found *ev;
10298 bool report_device = hci_discovery_active(hdev);
10299
10300 if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
10301 mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
10302 eir, eir_len, scan_rsp, scan_rsp_len,
10303 instant);
10304
10305 /* Don't send events for a non-kernel initiated discovery. With
10306 * LE one exception is if we have pend_le_reports > 0 in which
10307 * case we're doing passive scanning and want these events.
10308 */
10309 if (!hci_discovery_active(hdev)) {
10310 if (link_type == ACL_LINK)
10311 return;
10312 if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
10313 report_device = true;
10314 else if (!hci_is_adv_monitoring(hdev))
10315 return;
10316 }
10317
10318 if (hdev->discovery.result_filtering) {
10319 /* We are using service discovery */
10320 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10321 scan_rsp_len))
10322 return;
10323 }
10324
10325 if (hdev->discovery.limited) {
10326 /* Check for limited discoverable bit */
10327 if (dev_class) {
10328 if (!(dev_class[1] & 0x20))
10329 return;
10330 } else {
10331 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10332 if (!flags || !(flags[0] & LE_AD_LIMITED))
10333 return;
10334 }
10335 }
10336
10337 /* Allocate skb. The 5 extra bytes are for the potential CoD field */
10338 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10339 sizeof(*ev) + eir_len + scan_rsp_len + 5);
10340 if (!skb)
10341 return;
10342
10343 ev = skb_put(skb, sizeof(*ev));
10344
10345 /* In case of device discovery with BR/EDR devices (pre 1.2), the
10346 * RSSI value was reported as 0 when not available. This behavior
10347 * is kept when using device discovery. This is required for full
10348 * backwards compatibility with the API.
10349 *
10350 * However when using service discovery, the value 127 will be
10351 * returned when the RSSI is not available.
10352 */
10353 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10354 link_type == ACL_LINK)
10355 rssi = 0;
10356
10357 bacpy(&ev->addr.bdaddr, bdaddr);
10358 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10359 ev->rssi = rssi;
10360 ev->flags = cpu_to_le32(flags);
10361
10362 if (eir_len > 0)
10363 /* Copy EIR or advertising data into event */
10364 skb_put_data(skb, eir, eir_len);
10365
10366 if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
10367 u8 eir_cod[5];
10368
10369 eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
10370 dev_class, 3);
10371 skb_put_data(skb, eir_cod, sizeof(eir_cod));
10372 }
10373
10374 if (scan_rsp_len > 0)
10375 /* Append scan response data to event */
10376 skb_put_data(skb, scan_rsp, scan_rsp_len);
10377
10378 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10379
10380 mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
10381 }
10382
mgmt_remote_name(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,s8 rssi,u8 * name,u8 name_len)10383 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10384 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10385 {
10386 struct sk_buff *skb;
10387 struct mgmt_ev_device_found *ev;
10388 u16 eir_len = 0;
10389 u32 flags = 0;
10390
10391 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10392 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
10393 if (!skb)
10394 return;
10395
10396 ev = skb_put(skb, sizeof(*ev));
10397 bacpy(&ev->addr.bdaddr, bdaddr);
10398 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10399 ev->rssi = rssi;
10400
10401 if (name)
10402 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
10403 else
10404 flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
10405
10406 ev->eir_len = cpu_to_le16(eir_len);
10407 ev->flags = cpu_to_le32(flags);
10408
10409 mgmt_event_skb(skb, NULL);
10410 }
10411
mgmt_discovering(struct hci_dev * hdev,u8 discovering)10412 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10413 {
10414 struct mgmt_ev_discovering ev;
10415
10416 bt_dev_dbg(hdev, "discovering %u", discovering);
10417
10418 memset(&ev, 0, sizeof(ev));
10419 ev.type = hdev->discovery.type;
10420 ev.discovering = discovering;
10421
10422 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10423 }
10424
mgmt_suspending(struct hci_dev * hdev,u8 state)10425 void mgmt_suspending(struct hci_dev *hdev, u8 state)
10426 {
10427 struct mgmt_ev_controller_suspend ev;
10428
10429 ev.suspend_state = state;
10430 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10431 }
10432
mgmt_resuming(struct hci_dev * hdev,u8 reason,bdaddr_t * bdaddr,u8 addr_type)10433 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10434 u8 addr_type)
10435 {
10436 struct mgmt_ev_controller_resume ev;
10437
10438 ev.wake_reason = reason;
10439 if (bdaddr) {
10440 bacpy(&ev.addr.bdaddr, bdaddr);
10441 ev.addr.type = addr_type;
10442 } else {
10443 memset(&ev.addr, 0, sizeof(ev.addr));
10444 }
10445
10446 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
10447 }
10448
10449 static struct hci_mgmt_chan chan = {
10450 .channel = HCI_CHANNEL_CONTROL,
10451 .handler_count = ARRAY_SIZE(mgmt_handlers),
10452 .handlers = mgmt_handlers,
10453 .hdev_init = mgmt_init_hdev,
10454 };
10455
mgmt_init(void)10456 int mgmt_init(void)
10457 {
10458 return hci_mgmt_chan_register(&chan);
10459 }
10460
mgmt_exit(void)10461 void mgmt_exit(void)
10462 {
10463 hci_mgmt_chan_unregister(&chan);
10464 }
10465
mgmt_cleanup(struct sock * sk)10466 void mgmt_cleanup(struct sock *sk)
10467 {
10468 struct mgmt_mesh_tx *mesh_tx;
10469 struct hci_dev *hdev;
10470
10471 read_lock(&hci_dev_list_lock);
10472
10473 list_for_each_entry(hdev, &hci_dev_list, list) {
10474 do {
10475 mesh_tx = mgmt_mesh_next(hdev, sk);
10476
10477 if (mesh_tx)
10478 mesh_send_complete(hdev, mesh_tx, true);
10479 } while (mesh_tx);
10480 }
10481
10482 read_unlock(&hci_dev_list_lock);
10483 }
10484