1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <linux/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35
36 #include "smp.h"
37 #include "mgmt_util.h"
38 #include "mgmt_config.h"
39 #include "msft.h"
40 #include "eir.h"
41 #include "aosp.h"
42
43 #define MGMT_VERSION 1
44 #define MGMT_REVISION 23
45
46 static const u16 mgmt_commands[] = {
47 MGMT_OP_READ_INDEX_LIST,
48 MGMT_OP_READ_INFO,
49 MGMT_OP_SET_POWERED,
50 MGMT_OP_SET_DISCOVERABLE,
51 MGMT_OP_SET_CONNECTABLE,
52 MGMT_OP_SET_FAST_CONNECTABLE,
53 MGMT_OP_SET_BONDABLE,
54 MGMT_OP_SET_LINK_SECURITY,
55 MGMT_OP_SET_SSP,
56 MGMT_OP_SET_HS,
57 MGMT_OP_SET_LE,
58 MGMT_OP_SET_DEV_CLASS,
59 MGMT_OP_SET_LOCAL_NAME,
60 MGMT_OP_ADD_UUID,
61 MGMT_OP_REMOVE_UUID,
62 MGMT_OP_LOAD_LINK_KEYS,
63 MGMT_OP_LOAD_LONG_TERM_KEYS,
64 MGMT_OP_DISCONNECT,
65 MGMT_OP_GET_CONNECTIONS,
66 MGMT_OP_PIN_CODE_REPLY,
67 MGMT_OP_PIN_CODE_NEG_REPLY,
68 MGMT_OP_SET_IO_CAPABILITY,
69 MGMT_OP_PAIR_DEVICE,
70 MGMT_OP_CANCEL_PAIR_DEVICE,
71 MGMT_OP_UNPAIR_DEVICE,
72 MGMT_OP_USER_CONFIRM_REPLY,
73 MGMT_OP_USER_CONFIRM_NEG_REPLY,
74 MGMT_OP_USER_PASSKEY_REPLY,
75 MGMT_OP_USER_PASSKEY_NEG_REPLY,
76 MGMT_OP_READ_LOCAL_OOB_DATA,
77 MGMT_OP_ADD_REMOTE_OOB_DATA,
78 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
79 MGMT_OP_START_DISCOVERY,
80 MGMT_OP_STOP_DISCOVERY,
81 MGMT_OP_CONFIRM_NAME,
82 MGMT_OP_BLOCK_DEVICE,
83 MGMT_OP_UNBLOCK_DEVICE,
84 MGMT_OP_SET_DEVICE_ID,
85 MGMT_OP_SET_ADVERTISING,
86 MGMT_OP_SET_BREDR,
87 MGMT_OP_SET_STATIC_ADDRESS,
88 MGMT_OP_SET_SCAN_PARAMS,
89 MGMT_OP_SET_SECURE_CONN,
90 MGMT_OP_SET_DEBUG_KEYS,
91 MGMT_OP_SET_PRIVACY,
92 MGMT_OP_LOAD_IRKS,
93 MGMT_OP_GET_CONN_INFO,
94 MGMT_OP_GET_CLOCK_INFO,
95 MGMT_OP_ADD_DEVICE,
96 MGMT_OP_REMOVE_DEVICE,
97 MGMT_OP_LOAD_CONN_PARAM,
98 MGMT_OP_READ_UNCONF_INDEX_LIST,
99 MGMT_OP_READ_CONFIG_INFO,
100 MGMT_OP_SET_EXTERNAL_CONFIG,
101 MGMT_OP_SET_PUBLIC_ADDRESS,
102 MGMT_OP_START_SERVICE_DISCOVERY,
103 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
104 MGMT_OP_READ_EXT_INDEX_LIST,
105 MGMT_OP_READ_ADV_FEATURES,
106 MGMT_OP_ADD_ADVERTISING,
107 MGMT_OP_REMOVE_ADVERTISING,
108 MGMT_OP_GET_ADV_SIZE_INFO,
109 MGMT_OP_START_LIMITED_DISCOVERY,
110 MGMT_OP_READ_EXT_INFO,
111 MGMT_OP_SET_APPEARANCE,
112 MGMT_OP_GET_PHY_CONFIGURATION,
113 MGMT_OP_SET_PHY_CONFIGURATION,
114 MGMT_OP_SET_BLOCKED_KEYS,
115 MGMT_OP_SET_WIDEBAND_SPEECH,
116 MGMT_OP_READ_CONTROLLER_CAP,
117 MGMT_OP_READ_EXP_FEATURES_INFO,
118 MGMT_OP_SET_EXP_FEATURE,
119 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
120 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
121 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
122 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
123 MGMT_OP_GET_DEVICE_FLAGS,
124 MGMT_OP_SET_DEVICE_FLAGS,
125 MGMT_OP_READ_ADV_MONITOR_FEATURES,
126 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
127 MGMT_OP_REMOVE_ADV_MONITOR,
128 MGMT_OP_ADD_EXT_ADV_PARAMS,
129 MGMT_OP_ADD_EXT_ADV_DATA,
130 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
131 MGMT_OP_SET_MESH_RECEIVER,
132 MGMT_OP_MESH_READ_FEATURES,
133 MGMT_OP_MESH_SEND,
134 MGMT_OP_MESH_SEND_CANCEL,
135 MGMT_OP_HCI_CMD_SYNC,
136 };
137
138 static const u16 mgmt_events[] = {
139 MGMT_EV_CONTROLLER_ERROR,
140 MGMT_EV_INDEX_ADDED,
141 MGMT_EV_INDEX_REMOVED,
142 MGMT_EV_NEW_SETTINGS,
143 MGMT_EV_CLASS_OF_DEV_CHANGED,
144 MGMT_EV_LOCAL_NAME_CHANGED,
145 MGMT_EV_NEW_LINK_KEY,
146 MGMT_EV_NEW_LONG_TERM_KEY,
147 MGMT_EV_DEVICE_CONNECTED,
148 MGMT_EV_DEVICE_DISCONNECTED,
149 MGMT_EV_CONNECT_FAILED,
150 MGMT_EV_PIN_CODE_REQUEST,
151 MGMT_EV_USER_CONFIRM_REQUEST,
152 MGMT_EV_USER_PASSKEY_REQUEST,
153 MGMT_EV_AUTH_FAILED,
154 MGMT_EV_DEVICE_FOUND,
155 MGMT_EV_DISCOVERING,
156 MGMT_EV_DEVICE_BLOCKED,
157 MGMT_EV_DEVICE_UNBLOCKED,
158 MGMT_EV_DEVICE_UNPAIRED,
159 MGMT_EV_PASSKEY_NOTIFY,
160 MGMT_EV_NEW_IRK,
161 MGMT_EV_NEW_CSRK,
162 MGMT_EV_DEVICE_ADDED,
163 MGMT_EV_DEVICE_REMOVED,
164 MGMT_EV_NEW_CONN_PARAM,
165 MGMT_EV_UNCONF_INDEX_ADDED,
166 MGMT_EV_UNCONF_INDEX_REMOVED,
167 MGMT_EV_NEW_CONFIG_OPTIONS,
168 MGMT_EV_EXT_INDEX_ADDED,
169 MGMT_EV_EXT_INDEX_REMOVED,
170 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
171 MGMT_EV_ADVERTISING_ADDED,
172 MGMT_EV_ADVERTISING_REMOVED,
173 MGMT_EV_EXT_INFO_CHANGED,
174 MGMT_EV_PHY_CONFIGURATION_CHANGED,
175 MGMT_EV_EXP_FEATURE_CHANGED,
176 MGMT_EV_DEVICE_FLAGS_CHANGED,
177 MGMT_EV_ADV_MONITOR_ADDED,
178 MGMT_EV_ADV_MONITOR_REMOVED,
179 MGMT_EV_CONTROLLER_SUSPEND,
180 MGMT_EV_CONTROLLER_RESUME,
181 MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
182 MGMT_EV_ADV_MONITOR_DEVICE_LOST,
183 };
184
185 static const u16 mgmt_untrusted_commands[] = {
186 MGMT_OP_READ_INDEX_LIST,
187 MGMT_OP_READ_INFO,
188 MGMT_OP_READ_UNCONF_INDEX_LIST,
189 MGMT_OP_READ_CONFIG_INFO,
190 MGMT_OP_READ_EXT_INDEX_LIST,
191 MGMT_OP_READ_EXT_INFO,
192 MGMT_OP_READ_CONTROLLER_CAP,
193 MGMT_OP_READ_EXP_FEATURES_INFO,
194 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
195 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
196 };
197
198 static const u16 mgmt_untrusted_events[] = {
199 MGMT_EV_INDEX_ADDED,
200 MGMT_EV_INDEX_REMOVED,
201 MGMT_EV_NEW_SETTINGS,
202 MGMT_EV_CLASS_OF_DEV_CHANGED,
203 MGMT_EV_LOCAL_NAME_CHANGED,
204 MGMT_EV_UNCONF_INDEX_ADDED,
205 MGMT_EV_UNCONF_INDEX_REMOVED,
206 MGMT_EV_NEW_CONFIG_OPTIONS,
207 MGMT_EV_EXT_INDEX_ADDED,
208 MGMT_EV_EXT_INDEX_REMOVED,
209 MGMT_EV_EXT_INFO_CHANGED,
210 MGMT_EV_EXP_FEATURE_CHANGED,
211 };
212
213 #define CACHE_TIMEOUT secs_to_jiffies(2)
214
215 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
216 "\x00\x00\x00\x00\x00\x00\x00\x00"
217
218 /* HCI to MGMT error code conversion table */
219 static const u8 mgmt_status_table[] = {
220 MGMT_STATUS_SUCCESS,
221 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
222 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
223 MGMT_STATUS_FAILED, /* Hardware Failure */
224 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
225 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
226 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
227 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
228 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
229 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
230 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
231 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
232 MGMT_STATUS_BUSY, /* Command Disallowed */
233 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
234 MGMT_STATUS_REJECTED, /* Rejected Security */
235 MGMT_STATUS_REJECTED, /* Rejected Personal */
236 MGMT_STATUS_TIMEOUT, /* Host Timeout */
237 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
238 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
239 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
240 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
241 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
242 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
243 MGMT_STATUS_BUSY, /* Repeated Attempts */
244 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
245 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
246 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
247 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
248 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
249 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
250 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
251 MGMT_STATUS_FAILED, /* Unspecified Error */
252 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
253 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
254 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
255 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
256 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
257 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
258 MGMT_STATUS_FAILED, /* Unit Link Key Used */
259 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
260 MGMT_STATUS_TIMEOUT, /* Instant Passed */
261 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
262 MGMT_STATUS_FAILED, /* Transaction Collision */
263 MGMT_STATUS_FAILED, /* Reserved for future use */
264 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
265 MGMT_STATUS_REJECTED, /* QoS Rejected */
266 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
267 MGMT_STATUS_REJECTED, /* Insufficient Security */
268 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
269 MGMT_STATUS_FAILED, /* Reserved for future use */
270 MGMT_STATUS_BUSY, /* Role Switch Pending */
271 MGMT_STATUS_FAILED, /* Reserved for future use */
272 MGMT_STATUS_FAILED, /* Slot Violation */
273 MGMT_STATUS_FAILED, /* Role Switch Failed */
274 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
275 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
276 MGMT_STATUS_BUSY, /* Host Busy Pairing */
277 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
278 MGMT_STATUS_BUSY, /* Controller Busy */
279 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
280 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
281 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
282 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
283 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
284 };
285
mgmt_errno_status(int err)286 static u8 mgmt_errno_status(int err)
287 {
288 switch (err) {
289 case 0:
290 return MGMT_STATUS_SUCCESS;
291 case -EPERM:
292 return MGMT_STATUS_REJECTED;
293 case -EINVAL:
294 return MGMT_STATUS_INVALID_PARAMS;
295 case -EOPNOTSUPP:
296 return MGMT_STATUS_NOT_SUPPORTED;
297 case -EBUSY:
298 return MGMT_STATUS_BUSY;
299 case -ETIMEDOUT:
300 return MGMT_STATUS_AUTH_FAILED;
301 case -ENOMEM:
302 return MGMT_STATUS_NO_RESOURCES;
303 case -EISCONN:
304 return MGMT_STATUS_ALREADY_CONNECTED;
305 case -ENOTCONN:
306 return MGMT_STATUS_DISCONNECTED;
307 }
308
309 return MGMT_STATUS_FAILED;
310 }
311
mgmt_status(int err)312 static u8 mgmt_status(int err)
313 {
314 if (err < 0)
315 return mgmt_errno_status(err);
316
317 if (err < ARRAY_SIZE(mgmt_status_table))
318 return mgmt_status_table[err];
319
320 return MGMT_STATUS_FAILED;
321 }
322
mgmt_index_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag)323 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
324 u16 len, int flag)
325 {
326 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
327 flag, NULL);
328 }
329
mgmt_limited_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag,struct sock * skip_sk)330 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
331 u16 len, int flag, struct sock *skip_sk)
332 {
333 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
334 flag, skip_sk);
335 }
336
mgmt_event(u16 event,struct hci_dev * hdev,void * data,u16 len,struct sock * skip_sk)337 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
338 struct sock *skip_sk)
339 {
340 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
341 HCI_SOCK_TRUSTED, skip_sk);
342 }
343
mgmt_event_skb(struct sk_buff * skb,struct sock * skip_sk)344 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
345 {
346 return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
347 skip_sk);
348 }
349
le_addr_type(u8 mgmt_addr_type)350 static u8 le_addr_type(u8 mgmt_addr_type)
351 {
352 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
353 return ADDR_LE_DEV_PUBLIC;
354 else
355 return ADDR_LE_DEV_RANDOM;
356 }
357
mgmt_fill_version_info(void * ver)358 void mgmt_fill_version_info(void *ver)
359 {
360 struct mgmt_rp_read_version *rp = ver;
361
362 rp->version = MGMT_VERSION;
363 rp->revision = cpu_to_le16(MGMT_REVISION);
364 }
365
read_version(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)366 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
367 u16 data_len)
368 {
369 struct mgmt_rp_read_version rp;
370
371 bt_dev_dbg(hdev, "sock %p", sk);
372
373 mgmt_fill_version_info(&rp);
374
375 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
376 &rp, sizeof(rp));
377 }
378
read_commands(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)379 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
380 u16 data_len)
381 {
382 struct mgmt_rp_read_commands *rp;
383 u16 num_commands, num_events;
384 size_t rp_size;
385 int i, err;
386
387 bt_dev_dbg(hdev, "sock %p", sk);
388
389 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
390 num_commands = ARRAY_SIZE(mgmt_commands);
391 num_events = ARRAY_SIZE(mgmt_events);
392 } else {
393 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
394 num_events = ARRAY_SIZE(mgmt_untrusted_events);
395 }
396
397 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
398
399 rp = kmalloc(rp_size, GFP_KERNEL);
400 if (!rp)
401 return -ENOMEM;
402
403 rp->num_commands = cpu_to_le16(num_commands);
404 rp->num_events = cpu_to_le16(num_events);
405
406 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
407 __le16 *opcode = rp->opcodes;
408
409 for (i = 0; i < num_commands; i++, opcode++)
410 put_unaligned_le16(mgmt_commands[i], opcode);
411
412 for (i = 0; i < num_events; i++, opcode++)
413 put_unaligned_le16(mgmt_events[i], opcode);
414 } else {
415 __le16 *opcode = rp->opcodes;
416
417 for (i = 0; i < num_commands; i++, opcode++)
418 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
419
420 for (i = 0; i < num_events; i++, opcode++)
421 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
422 }
423
424 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
425 rp, rp_size);
426 kfree(rp);
427
428 return err;
429 }
430
read_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)431 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
432 u16 data_len)
433 {
434 struct mgmt_rp_read_index_list *rp;
435 struct hci_dev *d;
436 size_t rp_len;
437 u16 count;
438 int err;
439
440 bt_dev_dbg(hdev, "sock %p", sk);
441
442 read_lock(&hci_dev_list_lock);
443
444 count = 0;
445 list_for_each_entry(d, &hci_dev_list, list) {
446 if (!hci_dev_test_flag(d, HCI_UNCONFIGURED))
447 count++;
448 }
449
450 rp_len = sizeof(*rp) + (2 * count);
451 rp = kmalloc(rp_len, GFP_ATOMIC);
452 if (!rp) {
453 read_unlock(&hci_dev_list_lock);
454 return -ENOMEM;
455 }
456
457 count = 0;
458 list_for_each_entry(d, &hci_dev_list, list) {
459 if (hci_dev_test_flag(d, HCI_SETUP) ||
460 hci_dev_test_flag(d, HCI_CONFIG) ||
461 hci_dev_test_flag(d, HCI_USER_CHANNEL))
462 continue;
463
464 /* Devices marked as raw-only are neither configured
465 * nor unconfigured controllers.
466 */
467 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
468 continue;
469
470 if (!hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
471 rp->index[count++] = cpu_to_le16(d->id);
472 bt_dev_dbg(hdev, "Added hci%u", d->id);
473 }
474 }
475
476 rp->num_controllers = cpu_to_le16(count);
477 rp_len = sizeof(*rp) + (2 * count);
478
479 read_unlock(&hci_dev_list_lock);
480
481 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
482 0, rp, rp_len);
483
484 kfree(rp);
485
486 return err;
487 }
488
read_unconf_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)489 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
490 void *data, u16 data_len)
491 {
492 struct mgmt_rp_read_unconf_index_list *rp;
493 struct hci_dev *d;
494 size_t rp_len;
495 u16 count;
496 int err;
497
498 bt_dev_dbg(hdev, "sock %p", sk);
499
500 read_lock(&hci_dev_list_lock);
501
502 count = 0;
503 list_for_each_entry(d, &hci_dev_list, list) {
504 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
505 count++;
506 }
507
508 rp_len = sizeof(*rp) + (2 * count);
509 rp = kmalloc(rp_len, GFP_ATOMIC);
510 if (!rp) {
511 read_unlock(&hci_dev_list_lock);
512 return -ENOMEM;
513 }
514
515 count = 0;
516 list_for_each_entry(d, &hci_dev_list, list) {
517 if (hci_dev_test_flag(d, HCI_SETUP) ||
518 hci_dev_test_flag(d, HCI_CONFIG) ||
519 hci_dev_test_flag(d, HCI_USER_CHANNEL))
520 continue;
521
522 /* Devices marked as raw-only are neither configured
523 * nor unconfigured controllers.
524 */
525 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
526 continue;
527
528 if (hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
529 rp->index[count++] = cpu_to_le16(d->id);
530 bt_dev_dbg(hdev, "Added hci%u", d->id);
531 }
532 }
533
534 rp->num_controllers = cpu_to_le16(count);
535 rp_len = sizeof(*rp) + (2 * count);
536
537 read_unlock(&hci_dev_list_lock);
538
539 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
540 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
541
542 kfree(rp);
543
544 return err;
545 }
546
read_ext_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)547 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
548 void *data, u16 data_len)
549 {
550 struct mgmt_rp_read_ext_index_list *rp;
551 struct hci_dev *d;
552 u16 count;
553 int err;
554
555 bt_dev_dbg(hdev, "sock %p", sk);
556
557 read_lock(&hci_dev_list_lock);
558
559 count = 0;
560 list_for_each_entry(d, &hci_dev_list, list)
561 count++;
562
563 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
564 if (!rp) {
565 read_unlock(&hci_dev_list_lock);
566 return -ENOMEM;
567 }
568
569 count = 0;
570 list_for_each_entry(d, &hci_dev_list, list) {
571 if (hci_dev_test_flag(d, HCI_SETUP) ||
572 hci_dev_test_flag(d, HCI_CONFIG) ||
573 hci_dev_test_flag(d, HCI_USER_CHANNEL))
574 continue;
575
576 /* Devices marked as raw-only are neither configured
577 * nor unconfigured controllers.
578 */
579 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
580 continue;
581
582 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
583 rp->entry[count].type = 0x01;
584 else
585 rp->entry[count].type = 0x00;
586
587 rp->entry[count].bus = d->bus;
588 rp->entry[count++].index = cpu_to_le16(d->id);
589 bt_dev_dbg(hdev, "Added hci%u", d->id);
590 }
591
592 rp->num_controllers = cpu_to_le16(count);
593
594 read_unlock(&hci_dev_list_lock);
595
596 /* If this command is called at least once, then all the
597 * default index and unconfigured index events are disabled
598 * and from now on only extended index events are used.
599 */
600 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
601 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
602 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
603
604 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
605 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
606 struct_size(rp, entry, count));
607
608 kfree(rp);
609
610 return err;
611 }
612
is_configured(struct hci_dev * hdev)613 static bool is_configured(struct hci_dev *hdev)
614 {
615 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
616 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
617 return false;
618
619 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
620 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
621 !bacmp(&hdev->public_addr, BDADDR_ANY))
622 return false;
623
624 return true;
625 }
626
get_missing_options(struct hci_dev * hdev)627 static __le32 get_missing_options(struct hci_dev *hdev)
628 {
629 u32 options = 0;
630
631 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
632 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
633 options |= MGMT_OPTION_EXTERNAL_CONFIG;
634
635 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
636 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
637 !bacmp(&hdev->public_addr, BDADDR_ANY))
638 options |= MGMT_OPTION_PUBLIC_ADDRESS;
639
640 return cpu_to_le32(options);
641 }
642
new_options(struct hci_dev * hdev,struct sock * skip)643 static int new_options(struct hci_dev *hdev, struct sock *skip)
644 {
645 __le32 options = get_missing_options(hdev);
646
647 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
648 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
649 }
650
send_options_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)651 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
652 {
653 __le32 options = get_missing_options(hdev);
654
655 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
656 sizeof(options));
657 }
658
read_config_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)659 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
660 void *data, u16 data_len)
661 {
662 struct mgmt_rp_read_config_info rp;
663 u32 options = 0;
664
665 bt_dev_dbg(hdev, "sock %p", sk);
666
667 hci_dev_lock(hdev);
668
669 memset(&rp, 0, sizeof(rp));
670 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
671
672 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
673 options |= MGMT_OPTION_EXTERNAL_CONFIG;
674
675 if (hdev->set_bdaddr)
676 options |= MGMT_OPTION_PUBLIC_ADDRESS;
677
678 rp.supported_options = cpu_to_le32(options);
679 rp.missing_options = get_missing_options(hdev);
680
681 hci_dev_unlock(hdev);
682
683 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
684 &rp, sizeof(rp));
685 }
686
get_supported_phys(struct hci_dev * hdev)687 static u32 get_supported_phys(struct hci_dev *hdev)
688 {
689 u32 supported_phys = 0;
690
691 if (lmp_bredr_capable(hdev)) {
692 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
693
694 if (hdev->features[0][0] & LMP_3SLOT)
695 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
696
697 if (hdev->features[0][0] & LMP_5SLOT)
698 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
699
700 if (lmp_edr_2m_capable(hdev)) {
701 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
702
703 if (lmp_edr_3slot_capable(hdev))
704 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
705
706 if (lmp_edr_5slot_capable(hdev))
707 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
708
709 if (lmp_edr_3m_capable(hdev)) {
710 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
711
712 if (lmp_edr_3slot_capable(hdev))
713 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
714
715 if (lmp_edr_5slot_capable(hdev))
716 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
717 }
718 }
719 }
720
721 if (lmp_le_capable(hdev)) {
722 supported_phys |= MGMT_PHY_LE_1M_TX;
723 supported_phys |= MGMT_PHY_LE_1M_RX;
724
725 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
726 supported_phys |= MGMT_PHY_LE_2M_TX;
727 supported_phys |= MGMT_PHY_LE_2M_RX;
728 }
729
730 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
731 supported_phys |= MGMT_PHY_LE_CODED_TX;
732 supported_phys |= MGMT_PHY_LE_CODED_RX;
733 }
734 }
735
736 return supported_phys;
737 }
738
get_selected_phys(struct hci_dev * hdev)739 static u32 get_selected_phys(struct hci_dev *hdev)
740 {
741 u32 selected_phys = 0;
742
743 if (lmp_bredr_capable(hdev)) {
744 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
745
746 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
747 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
748
749 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
750 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
751
752 if (lmp_edr_2m_capable(hdev)) {
753 if (!(hdev->pkt_type & HCI_2DH1))
754 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
755
756 if (lmp_edr_3slot_capable(hdev) &&
757 !(hdev->pkt_type & HCI_2DH3))
758 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
759
760 if (lmp_edr_5slot_capable(hdev) &&
761 !(hdev->pkt_type & HCI_2DH5))
762 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
763
764 if (lmp_edr_3m_capable(hdev)) {
765 if (!(hdev->pkt_type & HCI_3DH1))
766 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
767
768 if (lmp_edr_3slot_capable(hdev) &&
769 !(hdev->pkt_type & HCI_3DH3))
770 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
771
772 if (lmp_edr_5slot_capable(hdev) &&
773 !(hdev->pkt_type & HCI_3DH5))
774 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
775 }
776 }
777 }
778
779 if (lmp_le_capable(hdev)) {
780 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
781 selected_phys |= MGMT_PHY_LE_1M_TX;
782
783 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
784 selected_phys |= MGMT_PHY_LE_1M_RX;
785
786 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
787 selected_phys |= MGMT_PHY_LE_2M_TX;
788
789 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
790 selected_phys |= MGMT_PHY_LE_2M_RX;
791
792 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
793 selected_phys |= MGMT_PHY_LE_CODED_TX;
794
795 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
796 selected_phys |= MGMT_PHY_LE_CODED_RX;
797 }
798
799 return selected_phys;
800 }
801
get_configurable_phys(struct hci_dev * hdev)802 static u32 get_configurable_phys(struct hci_dev *hdev)
803 {
804 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
805 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
806 }
807
get_supported_settings(struct hci_dev * hdev)808 static u32 get_supported_settings(struct hci_dev *hdev)
809 {
810 u32 settings = 0;
811
812 settings |= MGMT_SETTING_POWERED;
813 settings |= MGMT_SETTING_BONDABLE;
814 settings |= MGMT_SETTING_DEBUG_KEYS;
815 settings |= MGMT_SETTING_CONNECTABLE;
816 settings |= MGMT_SETTING_DISCOVERABLE;
817
818 if (lmp_bredr_capable(hdev)) {
819 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
820 settings |= MGMT_SETTING_FAST_CONNECTABLE;
821 settings |= MGMT_SETTING_BREDR;
822 settings |= MGMT_SETTING_LINK_SECURITY;
823
824 if (lmp_ssp_capable(hdev)) {
825 settings |= MGMT_SETTING_SSP;
826 }
827
828 if (lmp_sc_capable(hdev))
829 settings |= MGMT_SETTING_SECURE_CONN;
830
831 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
832 &hdev->quirks))
833 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
834 }
835
836 if (lmp_le_capable(hdev)) {
837 settings |= MGMT_SETTING_LE;
838 settings |= MGMT_SETTING_SECURE_CONN;
839 settings |= MGMT_SETTING_PRIVACY;
840 settings |= MGMT_SETTING_STATIC_ADDRESS;
841 settings |= MGMT_SETTING_ADVERTISING;
842 }
843
844 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
845 hdev->set_bdaddr)
846 settings |= MGMT_SETTING_CONFIGURATION;
847
848 if (cis_central_capable(hdev))
849 settings |= MGMT_SETTING_CIS_CENTRAL;
850
851 if (cis_peripheral_capable(hdev))
852 settings |= MGMT_SETTING_CIS_PERIPHERAL;
853
854 if (ll_privacy_capable(hdev))
855 settings |= MGMT_SETTING_LL_PRIVACY;
856
857 settings |= MGMT_SETTING_PHY_CONFIGURATION;
858
859 return settings;
860 }
861
get_current_settings(struct hci_dev * hdev)862 static u32 get_current_settings(struct hci_dev *hdev)
863 {
864 u32 settings = 0;
865
866 if (hdev_is_powered(hdev))
867 settings |= MGMT_SETTING_POWERED;
868
869 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
870 settings |= MGMT_SETTING_CONNECTABLE;
871
872 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
873 settings |= MGMT_SETTING_FAST_CONNECTABLE;
874
875 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
876 settings |= MGMT_SETTING_DISCOVERABLE;
877
878 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
879 settings |= MGMT_SETTING_BONDABLE;
880
881 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
882 settings |= MGMT_SETTING_BREDR;
883
884 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
885 settings |= MGMT_SETTING_LE;
886
887 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
888 settings |= MGMT_SETTING_LINK_SECURITY;
889
890 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
891 settings |= MGMT_SETTING_SSP;
892
893 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
894 settings |= MGMT_SETTING_ADVERTISING;
895
896 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
897 settings |= MGMT_SETTING_SECURE_CONN;
898
899 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
900 settings |= MGMT_SETTING_DEBUG_KEYS;
901
902 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
903 settings |= MGMT_SETTING_PRIVACY;
904
905 /* The current setting for static address has two purposes. The
906 * first is to indicate if the static address will be used and
907 * the second is to indicate if it is actually set.
908 *
909 * This means if the static address is not configured, this flag
910 * will never be set. If the address is configured, then if the
911 * address is actually used decides if the flag is set or not.
912 *
913 * For single mode LE only controllers and dual-mode controllers
914 * with BR/EDR disabled, the existence of the static address will
915 * be evaluated.
916 */
917 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
918 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
919 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
920 if (bacmp(&hdev->static_addr, BDADDR_ANY))
921 settings |= MGMT_SETTING_STATIC_ADDRESS;
922 }
923
924 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
925 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
926
927 if (cis_central_capable(hdev))
928 settings |= MGMT_SETTING_CIS_CENTRAL;
929
930 if (cis_peripheral_capable(hdev))
931 settings |= MGMT_SETTING_CIS_PERIPHERAL;
932
933 if (bis_capable(hdev))
934 settings |= MGMT_SETTING_ISO_BROADCASTER;
935
936 if (sync_recv_capable(hdev))
937 settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
938
939 if (ll_privacy_capable(hdev))
940 settings |= MGMT_SETTING_LL_PRIVACY;
941
942 return settings;
943 }
944
pending_find(u16 opcode,struct hci_dev * hdev)945 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
946 {
947 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
948 }
949
mgmt_get_adv_discov_flags(struct hci_dev * hdev)950 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
951 {
952 struct mgmt_pending_cmd *cmd;
953
954 /* If there's a pending mgmt command the flags will not yet have
955 * their final values, so check for this first.
956 */
957 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
958 if (cmd) {
959 struct mgmt_mode *cp = cmd->param;
960 if (cp->val == 0x01)
961 return LE_AD_GENERAL;
962 else if (cp->val == 0x02)
963 return LE_AD_LIMITED;
964 } else {
965 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
966 return LE_AD_LIMITED;
967 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
968 return LE_AD_GENERAL;
969 }
970
971 return 0;
972 }
973
mgmt_get_connectable(struct hci_dev * hdev)974 bool mgmt_get_connectable(struct hci_dev *hdev)
975 {
976 struct mgmt_pending_cmd *cmd;
977
978 /* If there's a pending mgmt command the flag will not yet have
979 * it's final value, so check for this first.
980 */
981 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
982 if (cmd) {
983 struct mgmt_mode *cp = cmd->param;
984
985 return cp->val;
986 }
987
988 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
989 }
990
service_cache_sync(struct hci_dev * hdev,void * data)991 static int service_cache_sync(struct hci_dev *hdev, void *data)
992 {
993 hci_update_eir_sync(hdev);
994 hci_update_class_sync(hdev);
995
996 return 0;
997 }
998
service_cache_off(struct work_struct * work)999 static void service_cache_off(struct work_struct *work)
1000 {
1001 struct hci_dev *hdev = container_of(work, struct hci_dev,
1002 service_cache.work);
1003
1004 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1005 return;
1006
1007 hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1008 }
1009
rpa_expired_sync(struct hci_dev * hdev,void * data)1010 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1011 {
1012 /* The generation of a new RPA and programming it into the
1013 * controller happens in the hci_req_enable_advertising()
1014 * function.
1015 */
1016 if (ext_adv_capable(hdev))
1017 return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1018 else
1019 return hci_enable_advertising_sync(hdev);
1020 }
1021
rpa_expired(struct work_struct * work)1022 static void rpa_expired(struct work_struct *work)
1023 {
1024 struct hci_dev *hdev = container_of(work, struct hci_dev,
1025 rpa_expired.work);
1026
1027 bt_dev_dbg(hdev, "");
1028
1029 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1030
1031 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1032 return;
1033
1034 hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1035 }
1036
1037 static int set_discoverable_sync(struct hci_dev *hdev, void *data);
1038
discov_off(struct work_struct * work)1039 static void discov_off(struct work_struct *work)
1040 {
1041 struct hci_dev *hdev = container_of(work, struct hci_dev,
1042 discov_off.work);
1043
1044 bt_dev_dbg(hdev, "");
1045
1046 hci_dev_lock(hdev);
1047
1048 /* When discoverable timeout triggers, then just make sure
1049 * the limited discoverable flag is cleared. Even in the case
1050 * of a timeout triggered from general discoverable, it is
1051 * safe to unconditionally clear the flag.
1052 */
1053 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1054 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1055 hdev->discov_timeout = 0;
1056
1057 hci_cmd_sync_queue(hdev, set_discoverable_sync, NULL, NULL);
1058
1059 mgmt_new_settings(hdev);
1060
1061 hci_dev_unlock(hdev);
1062 }
1063
1064 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1065
mesh_send_complete(struct hci_dev * hdev,struct mgmt_mesh_tx * mesh_tx,bool silent)1066 static void mesh_send_complete(struct hci_dev *hdev,
1067 struct mgmt_mesh_tx *mesh_tx, bool silent)
1068 {
1069 u8 handle = mesh_tx->handle;
1070
1071 if (!silent)
1072 mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1073 sizeof(handle), NULL);
1074
1075 mgmt_mesh_remove(mesh_tx);
1076 }
1077
mesh_send_done_sync(struct hci_dev * hdev,void * data)1078 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1079 {
1080 struct mgmt_mesh_tx *mesh_tx;
1081
1082 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1083 if (list_empty(&hdev->adv_instances))
1084 hci_disable_advertising_sync(hdev);
1085 mesh_tx = mgmt_mesh_next(hdev, NULL);
1086
1087 if (mesh_tx)
1088 mesh_send_complete(hdev, mesh_tx, false);
1089
1090 return 0;
1091 }
1092
1093 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1094 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
mesh_next(struct hci_dev * hdev,void * data,int err)1095 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1096 {
1097 struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1098
1099 if (!mesh_tx)
1100 return;
1101
1102 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1103 mesh_send_start_complete);
1104
1105 if (err < 0)
1106 mesh_send_complete(hdev, mesh_tx, false);
1107 else
1108 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1109 }
1110
mesh_send_done(struct work_struct * work)1111 static void mesh_send_done(struct work_struct *work)
1112 {
1113 struct hci_dev *hdev = container_of(work, struct hci_dev,
1114 mesh_send_done.work);
1115
1116 if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1117 return;
1118
1119 hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1120 }
1121
mgmt_init_hdev(struct sock * sk,struct hci_dev * hdev)1122 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1123 {
1124 if (hci_dev_test_flag(hdev, HCI_MGMT))
1125 return;
1126
1127 BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1128
1129 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1130 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1131 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1132 INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1133
1134 /* Non-mgmt controlled devices get this bit set
1135 * implicitly so that pairing works for them, however
1136 * for mgmt we require user-space to explicitly enable
1137 * it
1138 */
1139 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1140
1141 hci_dev_set_flag(hdev, HCI_MGMT);
1142 }
1143
read_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1144 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1145 void *data, u16 data_len)
1146 {
1147 struct mgmt_rp_read_info rp;
1148
1149 bt_dev_dbg(hdev, "sock %p", sk);
1150
1151 hci_dev_lock(hdev);
1152
1153 memset(&rp, 0, sizeof(rp));
1154
1155 bacpy(&rp.bdaddr, &hdev->bdaddr);
1156
1157 rp.version = hdev->hci_ver;
1158 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1159
1160 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1161 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1162
1163 memcpy(rp.dev_class, hdev->dev_class, 3);
1164
1165 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1166 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1167
1168 hci_dev_unlock(hdev);
1169
1170 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1171 sizeof(rp));
1172 }
1173
append_eir_data_to_buf(struct hci_dev * hdev,u8 * eir)1174 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1175 {
1176 u16 eir_len = 0;
1177 size_t name_len;
1178
1179 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1180 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1181 hdev->dev_class, 3);
1182
1183 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1184 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1185 hdev->appearance);
1186
1187 name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1188 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1189 hdev->dev_name, name_len);
1190
1191 name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1192 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1193 hdev->short_name, name_len);
1194
1195 return eir_len;
1196 }
1197
read_ext_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1198 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1199 void *data, u16 data_len)
1200 {
1201 char buf[512];
1202 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1203 u16 eir_len;
1204
1205 bt_dev_dbg(hdev, "sock %p", sk);
1206
1207 memset(&buf, 0, sizeof(buf));
1208
1209 hci_dev_lock(hdev);
1210
1211 bacpy(&rp->bdaddr, &hdev->bdaddr);
1212
1213 rp->version = hdev->hci_ver;
1214 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1215
1216 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1217 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1218
1219
1220 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1221 rp->eir_len = cpu_to_le16(eir_len);
1222
1223 hci_dev_unlock(hdev);
1224
1225 /* If this command is called at least once, then the events
1226 * for class of device and local name changes are disabled
1227 * and only the new extended controller information event
1228 * is used.
1229 */
1230 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1231 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1232 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1233
1234 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1235 sizeof(*rp) + eir_len);
1236 }
1237
ext_info_changed(struct hci_dev * hdev,struct sock * skip)1238 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1239 {
1240 char buf[512];
1241 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1242 u16 eir_len;
1243
1244 memset(buf, 0, sizeof(buf));
1245
1246 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1247 ev->eir_len = cpu_to_le16(eir_len);
1248
1249 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1250 sizeof(*ev) + eir_len,
1251 HCI_MGMT_EXT_INFO_EVENTS, skip);
1252 }
1253
send_settings_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)1254 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1255 {
1256 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1257
1258 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1259 sizeof(settings));
1260 }
1261
mgmt_advertising_added(struct sock * sk,struct hci_dev * hdev,u8 instance)1262 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1263 {
1264 struct mgmt_ev_advertising_added ev;
1265
1266 ev.instance = instance;
1267
1268 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1269 }
1270
mgmt_advertising_removed(struct sock * sk,struct hci_dev * hdev,u8 instance)1271 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1272 u8 instance)
1273 {
1274 struct mgmt_ev_advertising_removed ev;
1275
1276 ev.instance = instance;
1277
1278 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1279 }
1280
cancel_adv_timeout(struct hci_dev * hdev)1281 static void cancel_adv_timeout(struct hci_dev *hdev)
1282 {
1283 if (hdev->adv_instance_timeout) {
1284 hdev->adv_instance_timeout = 0;
1285 cancel_delayed_work(&hdev->adv_instance_expire);
1286 }
1287 }
1288
1289 /* This function requires the caller holds hdev->lock */
restart_le_actions(struct hci_dev * hdev)1290 static void restart_le_actions(struct hci_dev *hdev)
1291 {
1292 struct hci_conn_params *p;
1293
1294 list_for_each_entry(p, &hdev->le_conn_params, list) {
1295 /* Needed for AUTO_OFF case where might not "really"
1296 * have been powered off.
1297 */
1298 hci_pend_le_list_del_init(p);
1299
1300 switch (p->auto_connect) {
1301 case HCI_AUTO_CONN_DIRECT:
1302 case HCI_AUTO_CONN_ALWAYS:
1303 hci_pend_le_list_add(p, &hdev->pend_le_conns);
1304 break;
1305 case HCI_AUTO_CONN_REPORT:
1306 hci_pend_le_list_add(p, &hdev->pend_le_reports);
1307 break;
1308 default:
1309 break;
1310 }
1311 }
1312 }
1313
new_settings(struct hci_dev * hdev,struct sock * skip)1314 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1315 {
1316 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1317
1318 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1319 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1320 }
1321
mgmt_set_powered_complete(struct hci_dev * hdev,void * data,int err)1322 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1323 {
1324 struct mgmt_pending_cmd *cmd = data;
1325 struct mgmt_mode *cp;
1326
1327 /* Make sure cmd still outstanding. */
1328 if (err == -ECANCELED ||
1329 cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1330 return;
1331
1332 cp = cmd->param;
1333
1334 bt_dev_dbg(hdev, "err %d", err);
1335
1336 if (!err) {
1337 if (cp->val) {
1338 hci_dev_lock(hdev);
1339 restart_le_actions(hdev);
1340 hci_update_passive_scan(hdev);
1341 hci_dev_unlock(hdev);
1342 }
1343
1344 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1345
1346 /* Only call new_setting for power on as power off is deferred
1347 * to hdev->power_off work which does call hci_dev_do_close.
1348 */
1349 if (cp->val)
1350 new_settings(hdev, cmd->sk);
1351 } else {
1352 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1353 mgmt_status(err));
1354 }
1355
1356 mgmt_pending_remove(cmd);
1357 }
1358
set_powered_sync(struct hci_dev * hdev,void * data)1359 static int set_powered_sync(struct hci_dev *hdev, void *data)
1360 {
1361 struct mgmt_pending_cmd *cmd = data;
1362 struct mgmt_mode *cp;
1363
1364 /* Make sure cmd still outstanding. */
1365 if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1366 return -ECANCELED;
1367
1368 cp = cmd->param;
1369
1370 BT_DBG("%s", hdev->name);
1371
1372 return hci_set_powered_sync(hdev, cp->val);
1373 }
1374
set_powered(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1375 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1376 u16 len)
1377 {
1378 struct mgmt_mode *cp = data;
1379 struct mgmt_pending_cmd *cmd;
1380 int err;
1381
1382 bt_dev_dbg(hdev, "sock %p", sk);
1383
1384 if (cp->val != 0x00 && cp->val != 0x01)
1385 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1386 MGMT_STATUS_INVALID_PARAMS);
1387
1388 hci_dev_lock(hdev);
1389
1390 if (!cp->val) {
1391 if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN)) {
1392 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1393 MGMT_STATUS_BUSY);
1394 goto failed;
1395 }
1396 }
1397
1398 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1399 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1400 MGMT_STATUS_BUSY);
1401 goto failed;
1402 }
1403
1404 if (!!cp->val == hdev_is_powered(hdev)) {
1405 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1406 goto failed;
1407 }
1408
1409 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1410 if (!cmd) {
1411 err = -ENOMEM;
1412 goto failed;
1413 }
1414
1415 /* Cancel potentially blocking sync operation before power off */
1416 if (cp->val == 0x00) {
1417 hci_cmd_sync_cancel_sync(hdev, -EHOSTDOWN);
1418 err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1419 mgmt_set_powered_complete);
1420 } else {
1421 /* Use hci_cmd_sync_submit since hdev might not be running */
1422 err = hci_cmd_sync_submit(hdev, set_powered_sync, cmd,
1423 mgmt_set_powered_complete);
1424 }
1425
1426 if (err < 0)
1427 mgmt_pending_remove(cmd);
1428
1429 failed:
1430 hci_dev_unlock(hdev);
1431 return err;
1432 }
1433
mgmt_new_settings(struct hci_dev * hdev)1434 int mgmt_new_settings(struct hci_dev *hdev)
1435 {
1436 return new_settings(hdev, NULL);
1437 }
1438
1439 struct cmd_lookup {
1440 struct sock *sk;
1441 struct hci_dev *hdev;
1442 u8 mgmt_status;
1443 };
1444
settings_rsp(struct mgmt_pending_cmd * cmd,void * data)1445 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1446 {
1447 struct cmd_lookup *match = data;
1448
1449 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1450
1451 if (match->sk == NULL) {
1452 match->sk = cmd->sk;
1453 sock_hold(match->sk);
1454 }
1455 }
1456
cmd_status_rsp(struct mgmt_pending_cmd * cmd,void * data)1457 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1458 {
1459 u8 *status = data;
1460
1461 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, *status);
1462 }
1463
cmd_complete_rsp(struct mgmt_pending_cmd * cmd,void * data)1464 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1465 {
1466 struct cmd_lookup *match = data;
1467
1468 /* dequeue cmd_sync entries using cmd as data as that is about to be
1469 * removed/freed.
1470 */
1471 hci_cmd_sync_dequeue(match->hdev, NULL, cmd, NULL);
1472
1473 if (cmd->cmd_complete) {
1474 cmd->cmd_complete(cmd, match->mgmt_status);
1475 return;
1476 }
1477
1478 cmd_status_rsp(cmd, data);
1479 }
1480
generic_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1481 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1482 {
1483 return mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, status,
1484 cmd->param, cmd->param_len);
1485 }
1486
addr_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1487 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1488 {
1489 return mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, status,
1490 cmd->param, sizeof(struct mgmt_addr_info));
1491 }
1492
mgmt_bredr_support(struct hci_dev * hdev)1493 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1494 {
1495 if (!lmp_bredr_capable(hdev))
1496 return MGMT_STATUS_NOT_SUPPORTED;
1497 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1498 return MGMT_STATUS_REJECTED;
1499 else
1500 return MGMT_STATUS_SUCCESS;
1501 }
1502
mgmt_le_support(struct hci_dev * hdev)1503 static u8 mgmt_le_support(struct hci_dev *hdev)
1504 {
1505 if (!lmp_le_capable(hdev))
1506 return MGMT_STATUS_NOT_SUPPORTED;
1507 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1508 return MGMT_STATUS_REJECTED;
1509 else
1510 return MGMT_STATUS_SUCCESS;
1511 }
1512
mgmt_set_discoverable_complete(struct hci_dev * hdev,void * data,int err)1513 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1514 int err)
1515 {
1516 struct mgmt_pending_cmd *cmd = data;
1517
1518 bt_dev_dbg(hdev, "err %d", err);
1519
1520 /* Make sure cmd still outstanding. */
1521 if (err == -ECANCELED ||
1522 cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1523 return;
1524
1525 hci_dev_lock(hdev);
1526
1527 if (err) {
1528 u8 mgmt_err = mgmt_status(err);
1529 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
1530 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1531 goto done;
1532 }
1533
1534 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1535 hdev->discov_timeout > 0) {
1536 int to = secs_to_jiffies(hdev->discov_timeout);
1537 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1538 }
1539
1540 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1541 new_settings(hdev, cmd->sk);
1542
1543 done:
1544 mgmt_pending_remove(cmd);
1545 hci_dev_unlock(hdev);
1546 }
1547
set_discoverable_sync(struct hci_dev * hdev,void * data)1548 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1549 {
1550 BT_DBG("%s", hdev->name);
1551
1552 return hci_update_discoverable_sync(hdev);
1553 }
1554
set_discoverable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1555 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1556 u16 len)
1557 {
1558 struct mgmt_cp_set_discoverable *cp = data;
1559 struct mgmt_pending_cmd *cmd;
1560 u16 timeout;
1561 int err;
1562
1563 bt_dev_dbg(hdev, "sock %p", sk);
1564
1565 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1566 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1567 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1568 MGMT_STATUS_REJECTED);
1569
1570 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1571 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1572 MGMT_STATUS_INVALID_PARAMS);
1573
1574 timeout = __le16_to_cpu(cp->timeout);
1575
1576 /* Disabling discoverable requires that no timeout is set,
1577 * and enabling limited discoverable requires a timeout.
1578 */
1579 if ((cp->val == 0x00 && timeout > 0) ||
1580 (cp->val == 0x02 && timeout == 0))
1581 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1582 MGMT_STATUS_INVALID_PARAMS);
1583
1584 hci_dev_lock(hdev);
1585
1586 if (!hdev_is_powered(hdev) && timeout > 0) {
1587 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1588 MGMT_STATUS_NOT_POWERED);
1589 goto failed;
1590 }
1591
1592 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1593 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1594 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1595 MGMT_STATUS_BUSY);
1596 goto failed;
1597 }
1598
1599 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1600 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1601 MGMT_STATUS_REJECTED);
1602 goto failed;
1603 }
1604
1605 if (hdev->advertising_paused) {
1606 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1607 MGMT_STATUS_BUSY);
1608 goto failed;
1609 }
1610
1611 if (!hdev_is_powered(hdev)) {
1612 bool changed = false;
1613
1614 /* Setting limited discoverable when powered off is
1615 * not a valid operation since it requires a timeout
1616 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1617 */
1618 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1619 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1620 changed = true;
1621 }
1622
1623 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1624 if (err < 0)
1625 goto failed;
1626
1627 if (changed)
1628 err = new_settings(hdev, sk);
1629
1630 goto failed;
1631 }
1632
1633 /* If the current mode is the same, then just update the timeout
1634 * value with the new value. And if only the timeout gets updated,
1635 * then no need for any HCI transactions.
1636 */
1637 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1638 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1639 HCI_LIMITED_DISCOVERABLE)) {
1640 cancel_delayed_work(&hdev->discov_off);
1641 hdev->discov_timeout = timeout;
1642
1643 if (cp->val && hdev->discov_timeout > 0) {
1644 int to = secs_to_jiffies(hdev->discov_timeout);
1645 queue_delayed_work(hdev->req_workqueue,
1646 &hdev->discov_off, to);
1647 }
1648
1649 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1650 goto failed;
1651 }
1652
1653 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1654 if (!cmd) {
1655 err = -ENOMEM;
1656 goto failed;
1657 }
1658
1659 /* Cancel any potential discoverable timeout that might be
1660 * still active and store new timeout value. The arming of
1661 * the timeout happens in the complete handler.
1662 */
1663 cancel_delayed_work(&hdev->discov_off);
1664 hdev->discov_timeout = timeout;
1665
1666 if (cp->val)
1667 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1668 else
1669 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1670
1671 /* Limited discoverable mode */
1672 if (cp->val == 0x02)
1673 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1674 else
1675 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1676
1677 err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1678 mgmt_set_discoverable_complete);
1679
1680 if (err < 0)
1681 mgmt_pending_remove(cmd);
1682
1683 failed:
1684 hci_dev_unlock(hdev);
1685 return err;
1686 }
1687
mgmt_set_connectable_complete(struct hci_dev * hdev,void * data,int err)1688 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1689 int err)
1690 {
1691 struct mgmt_pending_cmd *cmd = data;
1692
1693 bt_dev_dbg(hdev, "err %d", err);
1694
1695 /* Make sure cmd still outstanding. */
1696 if (err == -ECANCELED ||
1697 cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1698 return;
1699
1700 hci_dev_lock(hdev);
1701
1702 if (err) {
1703 u8 mgmt_err = mgmt_status(err);
1704 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
1705 goto done;
1706 }
1707
1708 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1709 new_settings(hdev, cmd->sk);
1710
1711 done:
1712 mgmt_pending_remove(cmd);
1713
1714 hci_dev_unlock(hdev);
1715 }
1716
set_connectable_update_settings(struct hci_dev * hdev,struct sock * sk,u8 val)1717 static int set_connectable_update_settings(struct hci_dev *hdev,
1718 struct sock *sk, u8 val)
1719 {
1720 bool changed = false;
1721 int err;
1722
1723 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1724 changed = true;
1725
1726 if (val) {
1727 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1728 } else {
1729 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1730 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1731 }
1732
1733 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1734 if (err < 0)
1735 return err;
1736
1737 if (changed) {
1738 hci_update_scan(hdev);
1739 hci_update_passive_scan(hdev);
1740 return new_settings(hdev, sk);
1741 }
1742
1743 return 0;
1744 }
1745
set_connectable_sync(struct hci_dev * hdev,void * data)1746 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1747 {
1748 BT_DBG("%s", hdev->name);
1749
1750 return hci_update_connectable_sync(hdev);
1751 }
1752
set_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1753 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1754 u16 len)
1755 {
1756 struct mgmt_mode *cp = data;
1757 struct mgmt_pending_cmd *cmd;
1758 int err;
1759
1760 bt_dev_dbg(hdev, "sock %p", sk);
1761
1762 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1763 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1764 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1765 MGMT_STATUS_REJECTED);
1766
1767 if (cp->val != 0x00 && cp->val != 0x01)
1768 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1769 MGMT_STATUS_INVALID_PARAMS);
1770
1771 hci_dev_lock(hdev);
1772
1773 if (!hdev_is_powered(hdev)) {
1774 err = set_connectable_update_settings(hdev, sk, cp->val);
1775 goto failed;
1776 }
1777
1778 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1779 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1780 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1781 MGMT_STATUS_BUSY);
1782 goto failed;
1783 }
1784
1785 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1786 if (!cmd) {
1787 err = -ENOMEM;
1788 goto failed;
1789 }
1790
1791 if (cp->val) {
1792 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1793 } else {
1794 if (hdev->discov_timeout > 0)
1795 cancel_delayed_work(&hdev->discov_off);
1796
1797 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1798 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1799 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1800 }
1801
1802 err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1803 mgmt_set_connectable_complete);
1804
1805 if (err < 0)
1806 mgmt_pending_remove(cmd);
1807
1808 failed:
1809 hci_dev_unlock(hdev);
1810 return err;
1811 }
1812
set_bondable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1813 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1814 u16 len)
1815 {
1816 struct mgmt_mode *cp = data;
1817 bool changed;
1818 int err;
1819
1820 bt_dev_dbg(hdev, "sock %p", sk);
1821
1822 if (cp->val != 0x00 && cp->val != 0x01)
1823 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1824 MGMT_STATUS_INVALID_PARAMS);
1825
1826 hci_dev_lock(hdev);
1827
1828 if (cp->val)
1829 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1830 else
1831 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1832
1833 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1834 if (err < 0)
1835 goto unlock;
1836
1837 if (changed) {
1838 /* In limited privacy mode the change of bondable mode
1839 * may affect the local advertising address.
1840 */
1841 hci_update_discoverable(hdev);
1842
1843 err = new_settings(hdev, sk);
1844 }
1845
1846 unlock:
1847 hci_dev_unlock(hdev);
1848 return err;
1849 }
1850
set_link_security(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1851 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1852 u16 len)
1853 {
1854 struct mgmt_mode *cp = data;
1855 struct mgmt_pending_cmd *cmd;
1856 u8 val, status;
1857 int err;
1858
1859 bt_dev_dbg(hdev, "sock %p", sk);
1860
1861 status = mgmt_bredr_support(hdev);
1862 if (status)
1863 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1864 status);
1865
1866 if (cp->val != 0x00 && cp->val != 0x01)
1867 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1868 MGMT_STATUS_INVALID_PARAMS);
1869
1870 hci_dev_lock(hdev);
1871
1872 if (!hdev_is_powered(hdev)) {
1873 bool changed = false;
1874
1875 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1876 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1877 changed = true;
1878 }
1879
1880 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1881 if (err < 0)
1882 goto failed;
1883
1884 if (changed)
1885 err = new_settings(hdev, sk);
1886
1887 goto failed;
1888 }
1889
1890 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1891 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1892 MGMT_STATUS_BUSY);
1893 goto failed;
1894 }
1895
1896 val = !!cp->val;
1897
1898 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1899 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1900 goto failed;
1901 }
1902
1903 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1904 if (!cmd) {
1905 err = -ENOMEM;
1906 goto failed;
1907 }
1908
1909 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1910 if (err < 0) {
1911 mgmt_pending_remove(cmd);
1912 goto failed;
1913 }
1914
1915 failed:
1916 hci_dev_unlock(hdev);
1917 return err;
1918 }
1919
set_ssp_complete(struct hci_dev * hdev,void * data,int err)1920 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1921 {
1922 struct cmd_lookup match = { NULL, hdev };
1923 struct mgmt_pending_cmd *cmd = data;
1924 struct mgmt_mode *cp = cmd->param;
1925 u8 enable = cp->val;
1926 bool changed;
1927
1928 /* Make sure cmd still outstanding. */
1929 if (err == -ECANCELED || cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1930 return;
1931
1932 if (err) {
1933 u8 mgmt_err = mgmt_status(err);
1934
1935 if (enable && hci_dev_test_and_clear_flag(hdev,
1936 HCI_SSP_ENABLED)) {
1937 new_settings(hdev, NULL);
1938 }
1939
1940 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, true,
1941 cmd_status_rsp, &mgmt_err);
1942 return;
1943 }
1944
1945 if (enable) {
1946 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1947 } else {
1948 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1949 }
1950
1951 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, true, settings_rsp, &match);
1952
1953 if (changed)
1954 new_settings(hdev, match.sk);
1955
1956 if (match.sk)
1957 sock_put(match.sk);
1958
1959 hci_update_eir_sync(hdev);
1960 }
1961
set_ssp_sync(struct hci_dev * hdev,void * data)1962 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1963 {
1964 struct mgmt_pending_cmd *cmd = data;
1965 struct mgmt_mode *cp = cmd->param;
1966 bool changed = false;
1967 int err;
1968
1969 if (cp->val)
1970 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1971
1972 err = hci_write_ssp_mode_sync(hdev, cp->val);
1973
1974 if (!err && changed)
1975 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1976
1977 return err;
1978 }
1979
set_ssp(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1980 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1981 {
1982 struct mgmt_mode *cp = data;
1983 struct mgmt_pending_cmd *cmd;
1984 u8 status;
1985 int err;
1986
1987 bt_dev_dbg(hdev, "sock %p", sk);
1988
1989 status = mgmt_bredr_support(hdev);
1990 if (status)
1991 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1992
1993 if (!lmp_ssp_capable(hdev))
1994 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1995 MGMT_STATUS_NOT_SUPPORTED);
1996
1997 if (cp->val != 0x00 && cp->val != 0x01)
1998 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1999 MGMT_STATUS_INVALID_PARAMS);
2000
2001 hci_dev_lock(hdev);
2002
2003 if (!hdev_is_powered(hdev)) {
2004 bool changed;
2005
2006 if (cp->val) {
2007 changed = !hci_dev_test_and_set_flag(hdev,
2008 HCI_SSP_ENABLED);
2009 } else {
2010 changed = hci_dev_test_and_clear_flag(hdev,
2011 HCI_SSP_ENABLED);
2012 }
2013
2014 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2015 if (err < 0)
2016 goto failed;
2017
2018 if (changed)
2019 err = new_settings(hdev, sk);
2020
2021 goto failed;
2022 }
2023
2024 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2025 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2026 MGMT_STATUS_BUSY);
2027 goto failed;
2028 }
2029
2030 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2031 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2032 goto failed;
2033 }
2034
2035 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2036 if (!cmd)
2037 err = -ENOMEM;
2038 else
2039 err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2040 set_ssp_complete);
2041
2042 if (err < 0) {
2043 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2044 MGMT_STATUS_FAILED);
2045
2046 if (cmd)
2047 mgmt_pending_remove(cmd);
2048 }
2049
2050 failed:
2051 hci_dev_unlock(hdev);
2052 return err;
2053 }
2054
set_hs(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2055 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2056 {
2057 bt_dev_dbg(hdev, "sock %p", sk);
2058
2059 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2060 MGMT_STATUS_NOT_SUPPORTED);
2061 }
2062
set_le_complete(struct hci_dev * hdev,void * data,int err)2063 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2064 {
2065 struct cmd_lookup match = { NULL, hdev };
2066 u8 status = mgmt_status(err);
2067
2068 bt_dev_dbg(hdev, "err %d", err);
2069
2070 if (status) {
2071 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, true, cmd_status_rsp,
2072 &status);
2073 return;
2074 }
2075
2076 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, true, settings_rsp, &match);
2077
2078 new_settings(hdev, match.sk);
2079
2080 if (match.sk)
2081 sock_put(match.sk);
2082 }
2083
set_le_sync(struct hci_dev * hdev,void * data)2084 static int set_le_sync(struct hci_dev *hdev, void *data)
2085 {
2086 struct mgmt_pending_cmd *cmd = data;
2087 struct mgmt_mode *cp = cmd->param;
2088 u8 val = !!cp->val;
2089 int err;
2090
2091 if (!val) {
2092 hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2093
2094 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2095 hci_disable_advertising_sync(hdev);
2096
2097 if (ext_adv_capable(hdev))
2098 hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2099 } else {
2100 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2101 }
2102
2103 err = hci_write_le_host_supported_sync(hdev, val, 0);
2104
2105 /* Make sure the controller has a good default for
2106 * advertising data. Restrict the update to when LE
2107 * has actually been enabled. During power on, the
2108 * update in powered_update_hci will take care of it.
2109 */
2110 if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2111 if (ext_adv_capable(hdev)) {
2112 int status;
2113
2114 status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2115 if (!status)
2116 hci_update_scan_rsp_data_sync(hdev, 0x00);
2117 } else {
2118 hci_update_adv_data_sync(hdev, 0x00);
2119 hci_update_scan_rsp_data_sync(hdev, 0x00);
2120 }
2121
2122 hci_update_passive_scan(hdev);
2123 }
2124
2125 return err;
2126 }
2127
set_mesh_complete(struct hci_dev * hdev,void * data,int err)2128 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2129 {
2130 struct mgmt_pending_cmd *cmd = data;
2131 u8 status = mgmt_status(err);
2132 struct sock *sk = cmd->sk;
2133
2134 if (status) {
2135 mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev, true,
2136 cmd_status_rsp, &status);
2137 return;
2138 }
2139
2140 mgmt_pending_remove(cmd);
2141 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2142 }
2143
set_mesh_sync(struct hci_dev * hdev,void * data)2144 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2145 {
2146 struct mgmt_pending_cmd *cmd = data;
2147 struct mgmt_cp_set_mesh *cp = cmd->param;
2148 size_t len = cmd->param_len;
2149
2150 memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2151
2152 if (cp->enable)
2153 hci_dev_set_flag(hdev, HCI_MESH);
2154 else
2155 hci_dev_clear_flag(hdev, HCI_MESH);
2156
2157 hdev->le_scan_interval = __le16_to_cpu(cp->period);
2158 hdev->le_scan_window = __le16_to_cpu(cp->window);
2159
2160 len -= sizeof(*cp);
2161
2162 /* If filters don't fit, forward all adv pkts */
2163 if (len <= sizeof(hdev->mesh_ad_types))
2164 memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2165
2166 hci_update_passive_scan_sync(hdev);
2167 return 0;
2168 }
2169
set_mesh(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2170 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2171 {
2172 struct mgmt_cp_set_mesh *cp = data;
2173 struct mgmt_pending_cmd *cmd;
2174 __u16 period, window;
2175 int err = 0;
2176
2177 bt_dev_dbg(hdev, "sock %p", sk);
2178
2179 if (!lmp_le_capable(hdev) ||
2180 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2181 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2182 MGMT_STATUS_NOT_SUPPORTED);
2183
2184 if (cp->enable != 0x00 && cp->enable != 0x01)
2185 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2186 MGMT_STATUS_INVALID_PARAMS);
2187
2188 /* Keep allowed ranges in sync with set_scan_params() */
2189 period = __le16_to_cpu(cp->period);
2190
2191 if (period < 0x0004 || period > 0x4000)
2192 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2193 MGMT_STATUS_INVALID_PARAMS);
2194
2195 window = __le16_to_cpu(cp->window);
2196
2197 if (window < 0x0004 || window > 0x4000)
2198 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2199 MGMT_STATUS_INVALID_PARAMS);
2200
2201 if (window > period)
2202 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2203 MGMT_STATUS_INVALID_PARAMS);
2204
2205 hci_dev_lock(hdev);
2206
2207 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2208 if (!cmd)
2209 err = -ENOMEM;
2210 else
2211 err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2212 set_mesh_complete);
2213
2214 if (err < 0) {
2215 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2216 MGMT_STATUS_FAILED);
2217
2218 if (cmd)
2219 mgmt_pending_remove(cmd);
2220 }
2221
2222 hci_dev_unlock(hdev);
2223 return err;
2224 }
2225
mesh_send_start_complete(struct hci_dev * hdev,void * data,int err)2226 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2227 {
2228 struct mgmt_mesh_tx *mesh_tx = data;
2229 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2230 unsigned long mesh_send_interval;
2231 u8 mgmt_err = mgmt_status(err);
2232
2233 /* Report any errors here, but don't report completion */
2234
2235 if (mgmt_err) {
2236 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2237 /* Send Complete Error Code for handle */
2238 mesh_send_complete(hdev, mesh_tx, false);
2239 return;
2240 }
2241
2242 mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2243 queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2244 mesh_send_interval);
2245 }
2246
mesh_send_sync(struct hci_dev * hdev,void * data)2247 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2248 {
2249 struct mgmt_mesh_tx *mesh_tx = data;
2250 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2251 struct adv_info *adv, *next_instance;
2252 u8 instance = hdev->le_num_of_adv_sets + 1;
2253 u16 timeout, duration;
2254 int err = 0;
2255
2256 if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2257 return MGMT_STATUS_BUSY;
2258
2259 timeout = 1000;
2260 duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2261 adv = hci_add_adv_instance(hdev, instance, 0,
2262 send->adv_data_len, send->adv_data,
2263 0, NULL,
2264 timeout, duration,
2265 HCI_ADV_TX_POWER_NO_PREFERENCE,
2266 hdev->le_adv_min_interval,
2267 hdev->le_adv_max_interval,
2268 mesh_tx->handle);
2269
2270 if (!IS_ERR(adv))
2271 mesh_tx->instance = instance;
2272 else
2273 err = PTR_ERR(adv);
2274
2275 if (hdev->cur_adv_instance == instance) {
2276 /* If the currently advertised instance is being changed then
2277 * cancel the current advertising and schedule the next
2278 * instance. If there is only one instance then the overridden
2279 * advertising data will be visible right away.
2280 */
2281 cancel_adv_timeout(hdev);
2282
2283 next_instance = hci_get_next_instance(hdev, instance);
2284 if (next_instance)
2285 instance = next_instance->instance;
2286 else
2287 instance = 0;
2288 } else if (hdev->adv_instance_timeout) {
2289 /* Immediately advertise the new instance if no other, or
2290 * let it go naturally from queue if ADV is already happening
2291 */
2292 instance = 0;
2293 }
2294
2295 if (instance)
2296 return hci_schedule_adv_instance_sync(hdev, instance, true);
2297
2298 return err;
2299 }
2300
send_count(struct mgmt_mesh_tx * mesh_tx,void * data)2301 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2302 {
2303 struct mgmt_rp_mesh_read_features *rp = data;
2304
2305 if (rp->used_handles >= rp->max_handles)
2306 return;
2307
2308 rp->handles[rp->used_handles++] = mesh_tx->handle;
2309 }
2310
mesh_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2311 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2312 void *data, u16 len)
2313 {
2314 struct mgmt_rp_mesh_read_features rp;
2315
2316 if (!lmp_le_capable(hdev) ||
2317 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2318 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2319 MGMT_STATUS_NOT_SUPPORTED);
2320
2321 memset(&rp, 0, sizeof(rp));
2322 rp.index = cpu_to_le16(hdev->id);
2323 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2324 rp.max_handles = MESH_HANDLES_MAX;
2325
2326 hci_dev_lock(hdev);
2327
2328 if (rp.max_handles)
2329 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2330
2331 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2332 rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2333
2334 hci_dev_unlock(hdev);
2335 return 0;
2336 }
2337
send_cancel(struct hci_dev * hdev,void * data)2338 static int send_cancel(struct hci_dev *hdev, void *data)
2339 {
2340 struct mgmt_pending_cmd *cmd = data;
2341 struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2342 struct mgmt_mesh_tx *mesh_tx;
2343
2344 if (!cancel->handle) {
2345 do {
2346 mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2347
2348 if (mesh_tx)
2349 mesh_send_complete(hdev, mesh_tx, false);
2350 } while (mesh_tx);
2351 } else {
2352 mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2353
2354 if (mesh_tx && mesh_tx->sk == cmd->sk)
2355 mesh_send_complete(hdev, mesh_tx, false);
2356 }
2357
2358 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2359 0, NULL, 0);
2360 mgmt_pending_free(cmd);
2361
2362 return 0;
2363 }
2364
mesh_send_cancel(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2365 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2366 void *data, u16 len)
2367 {
2368 struct mgmt_pending_cmd *cmd;
2369 int err;
2370
2371 if (!lmp_le_capable(hdev) ||
2372 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2373 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2374 MGMT_STATUS_NOT_SUPPORTED);
2375
2376 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2377 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2378 MGMT_STATUS_REJECTED);
2379
2380 hci_dev_lock(hdev);
2381 cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2382 if (!cmd)
2383 err = -ENOMEM;
2384 else
2385 err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2386
2387 if (err < 0) {
2388 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2389 MGMT_STATUS_FAILED);
2390
2391 if (cmd)
2392 mgmt_pending_free(cmd);
2393 }
2394
2395 hci_dev_unlock(hdev);
2396 return err;
2397 }
2398
mesh_send(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2399 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2400 {
2401 struct mgmt_mesh_tx *mesh_tx;
2402 struct mgmt_cp_mesh_send *send = data;
2403 struct mgmt_rp_mesh_read_features rp;
2404 bool sending;
2405 int err = 0;
2406
2407 if (!lmp_le_capable(hdev) ||
2408 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2409 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2410 MGMT_STATUS_NOT_SUPPORTED);
2411 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2412 len <= MGMT_MESH_SEND_SIZE ||
2413 len > (MGMT_MESH_SEND_SIZE + 31))
2414 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2415 MGMT_STATUS_REJECTED);
2416
2417 hci_dev_lock(hdev);
2418
2419 memset(&rp, 0, sizeof(rp));
2420 rp.max_handles = MESH_HANDLES_MAX;
2421
2422 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2423
2424 if (rp.max_handles <= rp.used_handles) {
2425 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2426 MGMT_STATUS_BUSY);
2427 goto done;
2428 }
2429
2430 sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2431 mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2432
2433 if (!mesh_tx)
2434 err = -ENOMEM;
2435 else if (!sending)
2436 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2437 mesh_send_start_complete);
2438
2439 if (err < 0) {
2440 bt_dev_err(hdev, "Send Mesh Failed %d", err);
2441 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2442 MGMT_STATUS_FAILED);
2443
2444 if (mesh_tx) {
2445 if (sending)
2446 mgmt_mesh_remove(mesh_tx);
2447 }
2448 } else {
2449 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2450
2451 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2452 &mesh_tx->handle, 1);
2453 }
2454
2455 done:
2456 hci_dev_unlock(hdev);
2457 return err;
2458 }
2459
set_le(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2460 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2461 {
2462 struct mgmt_mode *cp = data;
2463 struct mgmt_pending_cmd *cmd;
2464 int err;
2465 u8 val, enabled;
2466
2467 bt_dev_dbg(hdev, "sock %p", sk);
2468
2469 if (!lmp_le_capable(hdev))
2470 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2471 MGMT_STATUS_NOT_SUPPORTED);
2472
2473 if (cp->val != 0x00 && cp->val != 0x01)
2474 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2475 MGMT_STATUS_INVALID_PARAMS);
2476
2477 /* Bluetooth single mode LE only controllers or dual-mode
2478 * controllers configured as LE only devices, do not allow
2479 * switching LE off. These have either LE enabled explicitly
2480 * or BR/EDR has been previously switched off.
2481 *
2482 * When trying to enable an already enabled LE, then gracefully
2483 * send a positive response. Trying to disable it however will
2484 * result into rejection.
2485 */
2486 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2487 if (cp->val == 0x01)
2488 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2489
2490 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2491 MGMT_STATUS_REJECTED);
2492 }
2493
2494 hci_dev_lock(hdev);
2495
2496 val = !!cp->val;
2497 enabled = lmp_host_le_capable(hdev);
2498
2499 if (!hdev_is_powered(hdev) || val == enabled) {
2500 bool changed = false;
2501
2502 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2503 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2504 changed = true;
2505 }
2506
2507 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2508 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2509 changed = true;
2510 }
2511
2512 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2513 if (err < 0)
2514 goto unlock;
2515
2516 if (changed)
2517 err = new_settings(hdev, sk);
2518
2519 goto unlock;
2520 }
2521
2522 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2523 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2524 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2525 MGMT_STATUS_BUSY);
2526 goto unlock;
2527 }
2528
2529 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2530 if (!cmd)
2531 err = -ENOMEM;
2532 else
2533 err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2534 set_le_complete);
2535
2536 if (err < 0) {
2537 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2538 MGMT_STATUS_FAILED);
2539
2540 if (cmd)
2541 mgmt_pending_remove(cmd);
2542 }
2543
2544 unlock:
2545 hci_dev_unlock(hdev);
2546 return err;
2547 }
2548
send_hci_cmd_sync(struct hci_dev * hdev,void * data)2549 static int send_hci_cmd_sync(struct hci_dev *hdev, void *data)
2550 {
2551 struct mgmt_pending_cmd *cmd = data;
2552 struct mgmt_cp_hci_cmd_sync *cp = cmd->param;
2553 struct sk_buff *skb;
2554
2555 skb = __hci_cmd_sync_ev(hdev, le16_to_cpu(cp->opcode),
2556 le16_to_cpu(cp->params_len), cp->params,
2557 cp->event, cp->timeout ?
2558 secs_to_jiffies(cp->timeout) :
2559 HCI_CMD_TIMEOUT);
2560 if (IS_ERR(skb)) {
2561 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
2562 mgmt_status(PTR_ERR(skb)));
2563 goto done;
2564 }
2565
2566 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_HCI_CMD_SYNC, 0,
2567 skb->data, skb->len);
2568
2569 kfree_skb(skb);
2570
2571 done:
2572 mgmt_pending_free(cmd);
2573
2574 return 0;
2575 }
2576
mgmt_hci_cmd_sync(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2577 static int mgmt_hci_cmd_sync(struct sock *sk, struct hci_dev *hdev,
2578 void *data, u16 len)
2579 {
2580 struct mgmt_cp_hci_cmd_sync *cp = data;
2581 struct mgmt_pending_cmd *cmd;
2582 int err;
2583
2584 if (len != (offsetof(struct mgmt_cp_hci_cmd_sync, params) +
2585 le16_to_cpu(cp->params_len)))
2586 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
2587 MGMT_STATUS_INVALID_PARAMS);
2588
2589 hci_dev_lock(hdev);
2590 cmd = mgmt_pending_new(sk, MGMT_OP_HCI_CMD_SYNC, hdev, data, len);
2591 if (!cmd)
2592 err = -ENOMEM;
2593 else
2594 err = hci_cmd_sync_queue(hdev, send_hci_cmd_sync, cmd, NULL);
2595
2596 if (err < 0) {
2597 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
2598 MGMT_STATUS_FAILED);
2599
2600 if (cmd)
2601 mgmt_pending_free(cmd);
2602 }
2603
2604 hci_dev_unlock(hdev);
2605 return err;
2606 }
2607
2608 /* This is a helper function to test for pending mgmt commands that can
2609 * cause CoD or EIR HCI commands. We can only allow one such pending
2610 * mgmt command at a time since otherwise we cannot easily track what
2611 * the current values are, will be, and based on that calculate if a new
2612 * HCI command needs to be sent and if yes with what value.
2613 */
pending_eir_or_class(struct hci_dev * hdev)2614 static bool pending_eir_or_class(struct hci_dev *hdev)
2615 {
2616 struct mgmt_pending_cmd *cmd;
2617
2618 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2619 switch (cmd->opcode) {
2620 case MGMT_OP_ADD_UUID:
2621 case MGMT_OP_REMOVE_UUID:
2622 case MGMT_OP_SET_DEV_CLASS:
2623 case MGMT_OP_SET_POWERED:
2624 return true;
2625 }
2626 }
2627
2628 return false;
2629 }
2630
2631 static const u8 bluetooth_base_uuid[] = {
2632 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2633 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2634 };
2635
get_uuid_size(const u8 * uuid)2636 static u8 get_uuid_size(const u8 *uuid)
2637 {
2638 u32 val;
2639
2640 if (memcmp(uuid, bluetooth_base_uuid, 12))
2641 return 128;
2642
2643 val = get_unaligned_le32(&uuid[12]);
2644 if (val > 0xffff)
2645 return 32;
2646
2647 return 16;
2648 }
2649
mgmt_class_complete(struct hci_dev * hdev,void * data,int err)2650 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2651 {
2652 struct mgmt_pending_cmd *cmd = data;
2653
2654 bt_dev_dbg(hdev, "err %d", err);
2655
2656 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
2657 mgmt_status(err), hdev->dev_class, 3);
2658
2659 mgmt_pending_free(cmd);
2660 }
2661
add_uuid_sync(struct hci_dev * hdev,void * data)2662 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2663 {
2664 int err;
2665
2666 err = hci_update_class_sync(hdev);
2667 if (err)
2668 return err;
2669
2670 return hci_update_eir_sync(hdev);
2671 }
2672
add_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2673 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2674 {
2675 struct mgmt_cp_add_uuid *cp = data;
2676 struct mgmt_pending_cmd *cmd;
2677 struct bt_uuid *uuid;
2678 int err;
2679
2680 bt_dev_dbg(hdev, "sock %p", sk);
2681
2682 hci_dev_lock(hdev);
2683
2684 if (pending_eir_or_class(hdev)) {
2685 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2686 MGMT_STATUS_BUSY);
2687 goto failed;
2688 }
2689
2690 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2691 if (!uuid) {
2692 err = -ENOMEM;
2693 goto failed;
2694 }
2695
2696 memcpy(uuid->uuid, cp->uuid, 16);
2697 uuid->svc_hint = cp->svc_hint;
2698 uuid->size = get_uuid_size(cp->uuid);
2699
2700 list_add_tail(&uuid->list, &hdev->uuids);
2701
2702 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2703 if (!cmd) {
2704 err = -ENOMEM;
2705 goto failed;
2706 }
2707
2708 /* MGMT_OP_ADD_UUID don't require adapter the UP/Running so use
2709 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2710 */
2711 err = hci_cmd_sync_submit(hdev, add_uuid_sync, cmd,
2712 mgmt_class_complete);
2713 if (err < 0) {
2714 mgmt_pending_free(cmd);
2715 goto failed;
2716 }
2717
2718 failed:
2719 hci_dev_unlock(hdev);
2720 return err;
2721 }
2722
enable_service_cache(struct hci_dev * hdev)2723 static bool enable_service_cache(struct hci_dev *hdev)
2724 {
2725 if (!hdev_is_powered(hdev))
2726 return false;
2727
2728 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2729 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2730 CACHE_TIMEOUT);
2731 return true;
2732 }
2733
2734 return false;
2735 }
2736
remove_uuid_sync(struct hci_dev * hdev,void * data)2737 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2738 {
2739 int err;
2740
2741 err = hci_update_class_sync(hdev);
2742 if (err)
2743 return err;
2744
2745 return hci_update_eir_sync(hdev);
2746 }
2747
remove_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2748 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2749 u16 len)
2750 {
2751 struct mgmt_cp_remove_uuid *cp = data;
2752 struct mgmt_pending_cmd *cmd;
2753 struct bt_uuid *match, *tmp;
2754 static const u8 bt_uuid_any[] = {
2755 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2756 };
2757 int err, found;
2758
2759 bt_dev_dbg(hdev, "sock %p", sk);
2760
2761 hci_dev_lock(hdev);
2762
2763 if (pending_eir_or_class(hdev)) {
2764 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2765 MGMT_STATUS_BUSY);
2766 goto unlock;
2767 }
2768
2769 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2770 hci_uuids_clear(hdev);
2771
2772 if (enable_service_cache(hdev)) {
2773 err = mgmt_cmd_complete(sk, hdev->id,
2774 MGMT_OP_REMOVE_UUID,
2775 0, hdev->dev_class, 3);
2776 goto unlock;
2777 }
2778
2779 goto update_class;
2780 }
2781
2782 found = 0;
2783
2784 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2785 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2786 continue;
2787
2788 list_del(&match->list);
2789 kfree(match);
2790 found++;
2791 }
2792
2793 if (found == 0) {
2794 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2795 MGMT_STATUS_INVALID_PARAMS);
2796 goto unlock;
2797 }
2798
2799 update_class:
2800 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2801 if (!cmd) {
2802 err = -ENOMEM;
2803 goto unlock;
2804 }
2805
2806 /* MGMT_OP_REMOVE_UUID don't require adapter the UP/Running so use
2807 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2808 */
2809 err = hci_cmd_sync_submit(hdev, remove_uuid_sync, cmd,
2810 mgmt_class_complete);
2811 if (err < 0)
2812 mgmt_pending_free(cmd);
2813
2814 unlock:
2815 hci_dev_unlock(hdev);
2816 return err;
2817 }
2818
set_class_sync(struct hci_dev * hdev,void * data)2819 static int set_class_sync(struct hci_dev *hdev, void *data)
2820 {
2821 int err = 0;
2822
2823 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2824 cancel_delayed_work_sync(&hdev->service_cache);
2825 err = hci_update_eir_sync(hdev);
2826 }
2827
2828 if (err)
2829 return err;
2830
2831 return hci_update_class_sync(hdev);
2832 }
2833
set_dev_class(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2834 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2835 u16 len)
2836 {
2837 struct mgmt_cp_set_dev_class *cp = data;
2838 struct mgmt_pending_cmd *cmd;
2839 int err;
2840
2841 bt_dev_dbg(hdev, "sock %p", sk);
2842
2843 if (!lmp_bredr_capable(hdev))
2844 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2845 MGMT_STATUS_NOT_SUPPORTED);
2846
2847 hci_dev_lock(hdev);
2848
2849 if (pending_eir_or_class(hdev)) {
2850 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2851 MGMT_STATUS_BUSY);
2852 goto unlock;
2853 }
2854
2855 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2856 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2857 MGMT_STATUS_INVALID_PARAMS);
2858 goto unlock;
2859 }
2860
2861 hdev->major_class = cp->major;
2862 hdev->minor_class = cp->minor;
2863
2864 if (!hdev_is_powered(hdev)) {
2865 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2866 hdev->dev_class, 3);
2867 goto unlock;
2868 }
2869
2870 cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2871 if (!cmd) {
2872 err = -ENOMEM;
2873 goto unlock;
2874 }
2875
2876 /* MGMT_OP_SET_DEV_CLASS don't require adapter the UP/Running so use
2877 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2878 */
2879 err = hci_cmd_sync_submit(hdev, set_class_sync, cmd,
2880 mgmt_class_complete);
2881 if (err < 0)
2882 mgmt_pending_free(cmd);
2883
2884 unlock:
2885 hci_dev_unlock(hdev);
2886 return err;
2887 }
2888
load_link_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2889 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2890 u16 len)
2891 {
2892 struct mgmt_cp_load_link_keys *cp = data;
2893 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2894 sizeof(struct mgmt_link_key_info));
2895 u16 key_count, expected_len;
2896 bool changed;
2897 int i;
2898
2899 bt_dev_dbg(hdev, "sock %p", sk);
2900
2901 if (!lmp_bredr_capable(hdev))
2902 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2903 MGMT_STATUS_NOT_SUPPORTED);
2904
2905 key_count = __le16_to_cpu(cp->key_count);
2906 if (key_count > max_key_count) {
2907 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2908 key_count);
2909 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2910 MGMT_STATUS_INVALID_PARAMS);
2911 }
2912
2913 expected_len = struct_size(cp, keys, key_count);
2914 if (expected_len != len) {
2915 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2916 expected_len, len);
2917 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2918 MGMT_STATUS_INVALID_PARAMS);
2919 }
2920
2921 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2922 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2923 MGMT_STATUS_INVALID_PARAMS);
2924
2925 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2926 key_count);
2927
2928 hci_dev_lock(hdev);
2929
2930 hci_link_keys_clear(hdev);
2931
2932 if (cp->debug_keys)
2933 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2934 else
2935 changed = hci_dev_test_and_clear_flag(hdev,
2936 HCI_KEEP_DEBUG_KEYS);
2937
2938 if (changed)
2939 new_settings(hdev, NULL);
2940
2941 for (i = 0; i < key_count; i++) {
2942 struct mgmt_link_key_info *key = &cp->keys[i];
2943
2944 if (hci_is_blocked_key(hdev,
2945 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2946 key->val)) {
2947 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2948 &key->addr.bdaddr);
2949 continue;
2950 }
2951
2952 if (key->addr.type != BDADDR_BREDR) {
2953 bt_dev_warn(hdev,
2954 "Invalid link address type %u for %pMR",
2955 key->addr.type, &key->addr.bdaddr);
2956 continue;
2957 }
2958
2959 if (key->type > 0x08) {
2960 bt_dev_warn(hdev, "Invalid link key type %u for %pMR",
2961 key->type, &key->addr.bdaddr);
2962 continue;
2963 }
2964
2965 /* Always ignore debug keys and require a new pairing if
2966 * the user wants to use them.
2967 */
2968 if (key->type == HCI_LK_DEBUG_COMBINATION)
2969 continue;
2970
2971 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2972 key->type, key->pin_len, NULL);
2973 }
2974
2975 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2976
2977 hci_dev_unlock(hdev);
2978
2979 return 0;
2980 }
2981
device_unpaired(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,struct sock * skip_sk)2982 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2983 u8 addr_type, struct sock *skip_sk)
2984 {
2985 struct mgmt_ev_device_unpaired ev;
2986
2987 bacpy(&ev.addr.bdaddr, bdaddr);
2988 ev.addr.type = addr_type;
2989
2990 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2991 skip_sk);
2992 }
2993
unpair_device_complete(struct hci_dev * hdev,void * data,int err)2994 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2995 {
2996 struct mgmt_pending_cmd *cmd = data;
2997 struct mgmt_cp_unpair_device *cp = cmd->param;
2998
2999 if (!err)
3000 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
3001
3002 cmd->cmd_complete(cmd, err);
3003 mgmt_pending_free(cmd);
3004 }
3005
unpair_device_sync(struct hci_dev * hdev,void * data)3006 static int unpair_device_sync(struct hci_dev *hdev, void *data)
3007 {
3008 struct mgmt_pending_cmd *cmd = data;
3009 struct mgmt_cp_unpair_device *cp = cmd->param;
3010 struct hci_conn *conn;
3011
3012 if (cp->addr.type == BDADDR_BREDR)
3013 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3014 &cp->addr.bdaddr);
3015 else
3016 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3017 le_addr_type(cp->addr.type));
3018
3019 if (!conn)
3020 return 0;
3021
3022 /* Disregard any possible error since the likes of hci_abort_conn_sync
3023 * will clean up the connection no matter the error.
3024 */
3025 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3026
3027 return 0;
3028 }
3029
unpair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3030 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3031 u16 len)
3032 {
3033 struct mgmt_cp_unpair_device *cp = data;
3034 struct mgmt_rp_unpair_device rp;
3035 struct hci_conn_params *params;
3036 struct mgmt_pending_cmd *cmd;
3037 struct hci_conn *conn;
3038 u8 addr_type;
3039 int err;
3040
3041 memset(&rp, 0, sizeof(rp));
3042 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3043 rp.addr.type = cp->addr.type;
3044
3045 if (!bdaddr_type_is_valid(cp->addr.type))
3046 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3047 MGMT_STATUS_INVALID_PARAMS,
3048 &rp, sizeof(rp));
3049
3050 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
3051 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3052 MGMT_STATUS_INVALID_PARAMS,
3053 &rp, sizeof(rp));
3054
3055 hci_dev_lock(hdev);
3056
3057 if (!hdev_is_powered(hdev)) {
3058 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3059 MGMT_STATUS_NOT_POWERED, &rp,
3060 sizeof(rp));
3061 goto unlock;
3062 }
3063
3064 if (cp->addr.type == BDADDR_BREDR) {
3065 /* If disconnection is requested, then look up the
3066 * connection. If the remote device is connected, it
3067 * will be later used to terminate the link.
3068 *
3069 * Setting it to NULL explicitly will cause no
3070 * termination of the link.
3071 */
3072 if (cp->disconnect)
3073 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3074 &cp->addr.bdaddr);
3075 else
3076 conn = NULL;
3077
3078 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
3079 if (err < 0) {
3080 err = mgmt_cmd_complete(sk, hdev->id,
3081 MGMT_OP_UNPAIR_DEVICE,
3082 MGMT_STATUS_NOT_PAIRED, &rp,
3083 sizeof(rp));
3084 goto unlock;
3085 }
3086
3087 goto done;
3088 }
3089
3090 /* LE address type */
3091 addr_type = le_addr_type(cp->addr.type);
3092
3093 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
3094 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
3095 if (err < 0) {
3096 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3097 MGMT_STATUS_NOT_PAIRED, &rp,
3098 sizeof(rp));
3099 goto unlock;
3100 }
3101
3102 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3103 if (!conn) {
3104 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3105 goto done;
3106 }
3107
3108
3109 /* Defer clearing up the connection parameters until closing to
3110 * give a chance of keeping them if a repairing happens.
3111 */
3112 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3113
3114 /* Disable auto-connection parameters if present */
3115 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3116 if (params) {
3117 if (params->explicit_connect)
3118 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3119 else
3120 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3121 }
3122
3123 /* If disconnection is not requested, then clear the connection
3124 * variable so that the link is not terminated.
3125 */
3126 if (!cp->disconnect)
3127 conn = NULL;
3128
3129 done:
3130 /* If the connection variable is set, then termination of the
3131 * link is requested.
3132 */
3133 if (!conn) {
3134 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3135 &rp, sizeof(rp));
3136 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3137 goto unlock;
3138 }
3139
3140 cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3141 sizeof(*cp));
3142 if (!cmd) {
3143 err = -ENOMEM;
3144 goto unlock;
3145 }
3146
3147 cmd->cmd_complete = addr_cmd_complete;
3148
3149 err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3150 unpair_device_complete);
3151 if (err < 0)
3152 mgmt_pending_free(cmd);
3153
3154 unlock:
3155 hci_dev_unlock(hdev);
3156 return err;
3157 }
3158
disconnect_complete(struct hci_dev * hdev,void * data,int err)3159 static void disconnect_complete(struct hci_dev *hdev, void *data, int err)
3160 {
3161 struct mgmt_pending_cmd *cmd = data;
3162
3163 cmd->cmd_complete(cmd, mgmt_status(err));
3164 mgmt_pending_free(cmd);
3165 }
3166
disconnect_sync(struct hci_dev * hdev,void * data)3167 static int disconnect_sync(struct hci_dev *hdev, void *data)
3168 {
3169 struct mgmt_pending_cmd *cmd = data;
3170 struct mgmt_cp_disconnect *cp = cmd->param;
3171 struct hci_conn *conn;
3172
3173 if (cp->addr.type == BDADDR_BREDR)
3174 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3175 &cp->addr.bdaddr);
3176 else
3177 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3178 le_addr_type(cp->addr.type));
3179
3180 if (!conn)
3181 return -ENOTCONN;
3182
3183 /* Disregard any possible error since the likes of hci_abort_conn_sync
3184 * will clean up the connection no matter the error.
3185 */
3186 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3187
3188 return 0;
3189 }
3190
disconnect(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3191 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3192 u16 len)
3193 {
3194 struct mgmt_cp_disconnect *cp = data;
3195 struct mgmt_rp_disconnect rp;
3196 struct mgmt_pending_cmd *cmd;
3197 int err;
3198
3199 bt_dev_dbg(hdev, "sock %p", sk);
3200
3201 memset(&rp, 0, sizeof(rp));
3202 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3203 rp.addr.type = cp->addr.type;
3204
3205 if (!bdaddr_type_is_valid(cp->addr.type))
3206 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3207 MGMT_STATUS_INVALID_PARAMS,
3208 &rp, sizeof(rp));
3209
3210 hci_dev_lock(hdev);
3211
3212 if (!test_bit(HCI_UP, &hdev->flags)) {
3213 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3214 MGMT_STATUS_NOT_POWERED, &rp,
3215 sizeof(rp));
3216 goto failed;
3217 }
3218
3219 cmd = mgmt_pending_new(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3220 if (!cmd) {
3221 err = -ENOMEM;
3222 goto failed;
3223 }
3224
3225 cmd->cmd_complete = generic_cmd_complete;
3226
3227 err = hci_cmd_sync_queue(hdev, disconnect_sync, cmd,
3228 disconnect_complete);
3229 if (err < 0)
3230 mgmt_pending_free(cmd);
3231
3232 failed:
3233 hci_dev_unlock(hdev);
3234 return err;
3235 }
3236
link_to_bdaddr(u8 link_type,u8 addr_type)3237 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3238 {
3239 switch (link_type) {
3240 case CIS_LINK:
3241 case BIS_LINK:
3242 case LE_LINK:
3243 switch (addr_type) {
3244 case ADDR_LE_DEV_PUBLIC:
3245 return BDADDR_LE_PUBLIC;
3246
3247 default:
3248 /* Fallback to LE Random address type */
3249 return BDADDR_LE_RANDOM;
3250 }
3251
3252 default:
3253 /* Fallback to BR/EDR type */
3254 return BDADDR_BREDR;
3255 }
3256 }
3257
get_connections(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)3258 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3259 u16 data_len)
3260 {
3261 struct mgmt_rp_get_connections *rp;
3262 struct hci_conn *c;
3263 int err;
3264 u16 i;
3265
3266 bt_dev_dbg(hdev, "sock %p", sk);
3267
3268 hci_dev_lock(hdev);
3269
3270 if (!hdev_is_powered(hdev)) {
3271 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3272 MGMT_STATUS_NOT_POWERED);
3273 goto unlock;
3274 }
3275
3276 i = 0;
3277 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3278 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3279 i++;
3280 }
3281
3282 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3283 if (!rp) {
3284 err = -ENOMEM;
3285 goto unlock;
3286 }
3287
3288 i = 0;
3289 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3290 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3291 continue;
3292 bacpy(&rp->addr[i].bdaddr, &c->dst);
3293 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3294 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3295 continue;
3296 i++;
3297 }
3298
3299 rp->conn_count = cpu_to_le16(i);
3300
3301 /* Recalculate length in case of filtered SCO connections, etc */
3302 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3303 struct_size(rp, addr, i));
3304
3305 kfree(rp);
3306
3307 unlock:
3308 hci_dev_unlock(hdev);
3309 return err;
3310 }
3311
send_pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_pin_code_neg_reply * cp)3312 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3313 struct mgmt_cp_pin_code_neg_reply *cp)
3314 {
3315 struct mgmt_pending_cmd *cmd;
3316 int err;
3317
3318 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3319 sizeof(*cp));
3320 if (!cmd)
3321 return -ENOMEM;
3322
3323 cmd->cmd_complete = addr_cmd_complete;
3324
3325 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3326 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3327 if (err < 0)
3328 mgmt_pending_remove(cmd);
3329
3330 return err;
3331 }
3332
pin_code_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3333 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3334 u16 len)
3335 {
3336 struct hci_conn *conn;
3337 struct mgmt_cp_pin_code_reply *cp = data;
3338 struct hci_cp_pin_code_reply reply;
3339 struct mgmt_pending_cmd *cmd;
3340 int err;
3341
3342 bt_dev_dbg(hdev, "sock %p", sk);
3343
3344 hci_dev_lock(hdev);
3345
3346 if (!hdev_is_powered(hdev)) {
3347 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3348 MGMT_STATUS_NOT_POWERED);
3349 goto failed;
3350 }
3351
3352 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3353 if (!conn) {
3354 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3355 MGMT_STATUS_NOT_CONNECTED);
3356 goto failed;
3357 }
3358
3359 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3360 struct mgmt_cp_pin_code_neg_reply ncp;
3361
3362 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3363
3364 bt_dev_err(hdev, "PIN code is not 16 bytes long");
3365
3366 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3367 if (err >= 0)
3368 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3369 MGMT_STATUS_INVALID_PARAMS);
3370
3371 goto failed;
3372 }
3373
3374 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3375 if (!cmd) {
3376 err = -ENOMEM;
3377 goto failed;
3378 }
3379
3380 cmd->cmd_complete = addr_cmd_complete;
3381
3382 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3383 reply.pin_len = cp->pin_len;
3384 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3385
3386 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3387 if (err < 0)
3388 mgmt_pending_remove(cmd);
3389
3390 failed:
3391 hci_dev_unlock(hdev);
3392 return err;
3393 }
3394
set_io_capability(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3395 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3396 u16 len)
3397 {
3398 struct mgmt_cp_set_io_capability *cp = data;
3399
3400 bt_dev_dbg(hdev, "sock %p", sk);
3401
3402 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3403 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3404 MGMT_STATUS_INVALID_PARAMS);
3405
3406 hci_dev_lock(hdev);
3407
3408 hdev->io_capability = cp->io_capability;
3409
3410 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3411
3412 hci_dev_unlock(hdev);
3413
3414 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3415 NULL, 0);
3416 }
3417
find_pairing(struct hci_conn * conn)3418 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3419 {
3420 struct hci_dev *hdev = conn->hdev;
3421 struct mgmt_pending_cmd *cmd;
3422
3423 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3424 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3425 continue;
3426
3427 if (cmd->user_data != conn)
3428 continue;
3429
3430 return cmd;
3431 }
3432
3433 return NULL;
3434 }
3435
pairing_complete(struct mgmt_pending_cmd * cmd,u8 status)3436 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3437 {
3438 struct mgmt_rp_pair_device rp;
3439 struct hci_conn *conn = cmd->user_data;
3440 int err;
3441
3442 bacpy(&rp.addr.bdaddr, &conn->dst);
3443 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3444
3445 err = mgmt_cmd_complete(cmd->sk, cmd->hdev->id, MGMT_OP_PAIR_DEVICE,
3446 status, &rp, sizeof(rp));
3447
3448 /* So we don't get further callbacks for this connection */
3449 conn->connect_cfm_cb = NULL;
3450 conn->security_cfm_cb = NULL;
3451 conn->disconn_cfm_cb = NULL;
3452
3453 hci_conn_drop(conn);
3454
3455 /* The device is paired so there is no need to remove
3456 * its connection parameters anymore.
3457 */
3458 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3459
3460 hci_conn_put(conn);
3461
3462 return err;
3463 }
3464
mgmt_smp_complete(struct hci_conn * conn,bool complete)3465 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3466 {
3467 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3468 struct mgmt_pending_cmd *cmd;
3469
3470 cmd = find_pairing(conn);
3471 if (cmd) {
3472 cmd->cmd_complete(cmd, status);
3473 mgmt_pending_remove(cmd);
3474 }
3475 }
3476
pairing_complete_cb(struct hci_conn * conn,u8 status)3477 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3478 {
3479 struct mgmt_pending_cmd *cmd;
3480
3481 BT_DBG("status %u", status);
3482
3483 cmd = find_pairing(conn);
3484 if (!cmd) {
3485 BT_DBG("Unable to find a pending command");
3486 return;
3487 }
3488
3489 cmd->cmd_complete(cmd, mgmt_status(status));
3490 mgmt_pending_remove(cmd);
3491 }
3492
le_pairing_complete_cb(struct hci_conn * conn,u8 status)3493 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3494 {
3495 struct mgmt_pending_cmd *cmd;
3496
3497 BT_DBG("status %u", status);
3498
3499 if (!status)
3500 return;
3501
3502 cmd = find_pairing(conn);
3503 if (!cmd) {
3504 BT_DBG("Unable to find a pending command");
3505 return;
3506 }
3507
3508 cmd->cmd_complete(cmd, mgmt_status(status));
3509 mgmt_pending_remove(cmd);
3510 }
3511
pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3512 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3513 u16 len)
3514 {
3515 struct mgmt_cp_pair_device *cp = data;
3516 struct mgmt_rp_pair_device rp;
3517 struct mgmt_pending_cmd *cmd;
3518 u8 sec_level, auth_type;
3519 struct hci_conn *conn;
3520 int err;
3521
3522 bt_dev_dbg(hdev, "sock %p", sk);
3523
3524 memset(&rp, 0, sizeof(rp));
3525 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3526 rp.addr.type = cp->addr.type;
3527
3528 if (!bdaddr_type_is_valid(cp->addr.type))
3529 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3530 MGMT_STATUS_INVALID_PARAMS,
3531 &rp, sizeof(rp));
3532
3533 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3534 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3535 MGMT_STATUS_INVALID_PARAMS,
3536 &rp, sizeof(rp));
3537
3538 hci_dev_lock(hdev);
3539
3540 if (!hdev_is_powered(hdev)) {
3541 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3542 MGMT_STATUS_NOT_POWERED, &rp,
3543 sizeof(rp));
3544 goto unlock;
3545 }
3546
3547 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3548 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3549 MGMT_STATUS_ALREADY_PAIRED, &rp,
3550 sizeof(rp));
3551 goto unlock;
3552 }
3553
3554 sec_level = BT_SECURITY_MEDIUM;
3555 auth_type = HCI_AT_DEDICATED_BONDING;
3556
3557 if (cp->addr.type == BDADDR_BREDR) {
3558 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3559 auth_type, CONN_REASON_PAIR_DEVICE,
3560 HCI_ACL_CONN_TIMEOUT);
3561 } else {
3562 u8 addr_type = le_addr_type(cp->addr.type);
3563 struct hci_conn_params *p;
3564
3565 /* When pairing a new device, it is expected to remember
3566 * this device for future connections. Adding the connection
3567 * parameter information ahead of time allows tracking
3568 * of the peripheral preferred values and will speed up any
3569 * further connection establishment.
3570 *
3571 * If connection parameters already exist, then they
3572 * will be kept and this function does nothing.
3573 */
3574 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3575 if (!p) {
3576 err = -EIO;
3577 goto unlock;
3578 }
3579
3580 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3581 p->auto_connect = HCI_AUTO_CONN_DISABLED;
3582
3583 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3584 sec_level, HCI_LE_CONN_TIMEOUT,
3585 CONN_REASON_PAIR_DEVICE);
3586 }
3587
3588 if (IS_ERR(conn)) {
3589 int status;
3590
3591 if (PTR_ERR(conn) == -EBUSY)
3592 status = MGMT_STATUS_BUSY;
3593 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3594 status = MGMT_STATUS_NOT_SUPPORTED;
3595 else if (PTR_ERR(conn) == -ECONNREFUSED)
3596 status = MGMT_STATUS_REJECTED;
3597 else
3598 status = MGMT_STATUS_CONNECT_FAILED;
3599
3600 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3601 status, &rp, sizeof(rp));
3602 goto unlock;
3603 }
3604
3605 if (conn->connect_cfm_cb) {
3606 hci_conn_drop(conn);
3607 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3608 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3609 goto unlock;
3610 }
3611
3612 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3613 if (!cmd) {
3614 err = -ENOMEM;
3615 hci_conn_drop(conn);
3616 goto unlock;
3617 }
3618
3619 cmd->cmd_complete = pairing_complete;
3620
3621 /* For LE, just connecting isn't a proof that the pairing finished */
3622 if (cp->addr.type == BDADDR_BREDR) {
3623 conn->connect_cfm_cb = pairing_complete_cb;
3624 conn->security_cfm_cb = pairing_complete_cb;
3625 conn->disconn_cfm_cb = pairing_complete_cb;
3626 } else {
3627 conn->connect_cfm_cb = le_pairing_complete_cb;
3628 conn->security_cfm_cb = le_pairing_complete_cb;
3629 conn->disconn_cfm_cb = le_pairing_complete_cb;
3630 }
3631
3632 conn->io_capability = cp->io_cap;
3633 cmd->user_data = hci_conn_get(conn);
3634
3635 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3636 hci_conn_security(conn, sec_level, auth_type, true)) {
3637 cmd->cmd_complete(cmd, 0);
3638 mgmt_pending_remove(cmd);
3639 }
3640
3641 err = 0;
3642
3643 unlock:
3644 hci_dev_unlock(hdev);
3645 return err;
3646 }
3647
cancel_pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3648 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3649 u16 len)
3650 {
3651 struct mgmt_addr_info *addr = data;
3652 struct mgmt_pending_cmd *cmd;
3653 struct hci_conn *conn;
3654 int err;
3655
3656 bt_dev_dbg(hdev, "sock %p", sk);
3657
3658 hci_dev_lock(hdev);
3659
3660 if (!hdev_is_powered(hdev)) {
3661 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3662 MGMT_STATUS_NOT_POWERED);
3663 goto unlock;
3664 }
3665
3666 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3667 if (!cmd) {
3668 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3669 MGMT_STATUS_INVALID_PARAMS);
3670 goto unlock;
3671 }
3672
3673 conn = cmd->user_data;
3674
3675 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3676 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3677 MGMT_STATUS_INVALID_PARAMS);
3678 goto unlock;
3679 }
3680
3681 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3682 mgmt_pending_remove(cmd);
3683
3684 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3685 addr, sizeof(*addr));
3686
3687 /* Since user doesn't want to proceed with the connection, abort any
3688 * ongoing pairing and then terminate the link if it was created
3689 * because of the pair device action.
3690 */
3691 if (addr->type == BDADDR_BREDR)
3692 hci_remove_link_key(hdev, &addr->bdaddr);
3693 else
3694 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3695 le_addr_type(addr->type));
3696
3697 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3698 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3699
3700 unlock:
3701 hci_dev_unlock(hdev);
3702 return err;
3703 }
3704
user_pairing_resp(struct sock * sk,struct hci_dev * hdev,struct mgmt_addr_info * addr,u16 mgmt_op,u16 hci_op,__le32 passkey)3705 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3706 struct mgmt_addr_info *addr, u16 mgmt_op,
3707 u16 hci_op, __le32 passkey)
3708 {
3709 struct mgmt_pending_cmd *cmd;
3710 struct hci_conn *conn;
3711 int err;
3712
3713 hci_dev_lock(hdev);
3714
3715 if (!hdev_is_powered(hdev)) {
3716 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3717 MGMT_STATUS_NOT_POWERED, addr,
3718 sizeof(*addr));
3719 goto done;
3720 }
3721
3722 if (addr->type == BDADDR_BREDR)
3723 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3724 else
3725 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3726 le_addr_type(addr->type));
3727
3728 if (!conn) {
3729 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3730 MGMT_STATUS_NOT_CONNECTED, addr,
3731 sizeof(*addr));
3732 goto done;
3733 }
3734
3735 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3736 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3737 if (!err)
3738 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3739 MGMT_STATUS_SUCCESS, addr,
3740 sizeof(*addr));
3741 else
3742 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3743 MGMT_STATUS_FAILED, addr,
3744 sizeof(*addr));
3745
3746 goto done;
3747 }
3748
3749 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3750 if (!cmd) {
3751 err = -ENOMEM;
3752 goto done;
3753 }
3754
3755 cmd->cmd_complete = addr_cmd_complete;
3756
3757 /* Continue with pairing via HCI */
3758 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3759 struct hci_cp_user_passkey_reply cp;
3760
3761 bacpy(&cp.bdaddr, &addr->bdaddr);
3762 cp.passkey = passkey;
3763 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3764 } else
3765 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3766 &addr->bdaddr);
3767
3768 if (err < 0)
3769 mgmt_pending_remove(cmd);
3770
3771 done:
3772 hci_dev_unlock(hdev);
3773 return err;
3774 }
3775
pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3776 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3777 void *data, u16 len)
3778 {
3779 struct mgmt_cp_pin_code_neg_reply *cp = data;
3780
3781 bt_dev_dbg(hdev, "sock %p", sk);
3782
3783 return user_pairing_resp(sk, hdev, &cp->addr,
3784 MGMT_OP_PIN_CODE_NEG_REPLY,
3785 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3786 }
3787
user_confirm_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3788 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3789 u16 len)
3790 {
3791 struct mgmt_cp_user_confirm_reply *cp = data;
3792
3793 bt_dev_dbg(hdev, "sock %p", sk);
3794
3795 if (len != sizeof(*cp))
3796 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3797 MGMT_STATUS_INVALID_PARAMS);
3798
3799 return user_pairing_resp(sk, hdev, &cp->addr,
3800 MGMT_OP_USER_CONFIRM_REPLY,
3801 HCI_OP_USER_CONFIRM_REPLY, 0);
3802 }
3803
user_confirm_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3804 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3805 void *data, u16 len)
3806 {
3807 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3808
3809 bt_dev_dbg(hdev, "sock %p", sk);
3810
3811 return user_pairing_resp(sk, hdev, &cp->addr,
3812 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3813 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3814 }
3815
user_passkey_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3816 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3817 u16 len)
3818 {
3819 struct mgmt_cp_user_passkey_reply *cp = data;
3820
3821 bt_dev_dbg(hdev, "sock %p", sk);
3822
3823 return user_pairing_resp(sk, hdev, &cp->addr,
3824 MGMT_OP_USER_PASSKEY_REPLY,
3825 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3826 }
3827
user_passkey_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3828 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3829 void *data, u16 len)
3830 {
3831 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3832
3833 bt_dev_dbg(hdev, "sock %p", sk);
3834
3835 return user_pairing_resp(sk, hdev, &cp->addr,
3836 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3837 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3838 }
3839
adv_expire_sync(struct hci_dev * hdev,u32 flags)3840 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3841 {
3842 struct adv_info *adv_instance;
3843
3844 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3845 if (!adv_instance)
3846 return 0;
3847
3848 /* stop if current instance doesn't need to be changed */
3849 if (!(adv_instance->flags & flags))
3850 return 0;
3851
3852 cancel_adv_timeout(hdev);
3853
3854 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3855 if (!adv_instance)
3856 return 0;
3857
3858 hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3859
3860 return 0;
3861 }
3862
name_changed_sync(struct hci_dev * hdev,void * data)3863 static int name_changed_sync(struct hci_dev *hdev, void *data)
3864 {
3865 return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3866 }
3867
set_name_complete(struct hci_dev * hdev,void * data,int err)3868 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3869 {
3870 struct mgmt_pending_cmd *cmd = data;
3871 struct mgmt_cp_set_local_name *cp = cmd->param;
3872 u8 status = mgmt_status(err);
3873
3874 bt_dev_dbg(hdev, "err %d", err);
3875
3876 if (err == -ECANCELED ||
3877 cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3878 return;
3879
3880 if (status) {
3881 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3882 status);
3883 } else {
3884 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3885 cp, sizeof(*cp));
3886
3887 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3888 hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3889 }
3890
3891 mgmt_pending_remove(cmd);
3892 }
3893
set_name_sync(struct hci_dev * hdev,void * data)3894 static int set_name_sync(struct hci_dev *hdev, void *data)
3895 {
3896 if (lmp_bredr_capable(hdev)) {
3897 hci_update_name_sync(hdev);
3898 hci_update_eir_sync(hdev);
3899 }
3900
3901 /* The name is stored in the scan response data and so
3902 * no need to update the advertising data here.
3903 */
3904 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3905 hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3906
3907 return 0;
3908 }
3909
set_local_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3910 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3911 u16 len)
3912 {
3913 struct mgmt_cp_set_local_name *cp = data;
3914 struct mgmt_pending_cmd *cmd;
3915 int err;
3916
3917 bt_dev_dbg(hdev, "sock %p", sk);
3918
3919 hci_dev_lock(hdev);
3920
3921 /* If the old values are the same as the new ones just return a
3922 * direct command complete event.
3923 */
3924 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3925 !memcmp(hdev->short_name, cp->short_name,
3926 sizeof(hdev->short_name))) {
3927 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3928 data, len);
3929 goto failed;
3930 }
3931
3932 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3933
3934 if (!hdev_is_powered(hdev)) {
3935 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3936
3937 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3938 data, len);
3939 if (err < 0)
3940 goto failed;
3941
3942 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3943 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3944 ext_info_changed(hdev, sk);
3945
3946 goto failed;
3947 }
3948
3949 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3950 if (!cmd)
3951 err = -ENOMEM;
3952 else
3953 err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3954 set_name_complete);
3955
3956 if (err < 0) {
3957 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3958 MGMT_STATUS_FAILED);
3959
3960 if (cmd)
3961 mgmt_pending_remove(cmd);
3962
3963 goto failed;
3964 }
3965
3966 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3967
3968 failed:
3969 hci_dev_unlock(hdev);
3970 return err;
3971 }
3972
appearance_changed_sync(struct hci_dev * hdev,void * data)3973 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3974 {
3975 return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3976 }
3977
set_appearance(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3978 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3979 u16 len)
3980 {
3981 struct mgmt_cp_set_appearance *cp = data;
3982 u16 appearance;
3983 int err;
3984
3985 bt_dev_dbg(hdev, "sock %p", sk);
3986
3987 if (!lmp_le_capable(hdev))
3988 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3989 MGMT_STATUS_NOT_SUPPORTED);
3990
3991 appearance = le16_to_cpu(cp->appearance);
3992
3993 hci_dev_lock(hdev);
3994
3995 if (hdev->appearance != appearance) {
3996 hdev->appearance = appearance;
3997
3998 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3999 hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
4000 NULL);
4001
4002 ext_info_changed(hdev, sk);
4003 }
4004
4005 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
4006 0);
4007
4008 hci_dev_unlock(hdev);
4009
4010 return err;
4011 }
4012
get_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4013 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4014 void *data, u16 len)
4015 {
4016 struct mgmt_rp_get_phy_configuration rp;
4017
4018 bt_dev_dbg(hdev, "sock %p", sk);
4019
4020 hci_dev_lock(hdev);
4021
4022 memset(&rp, 0, sizeof(rp));
4023
4024 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
4025 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
4026 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
4027
4028 hci_dev_unlock(hdev);
4029
4030 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
4031 &rp, sizeof(rp));
4032 }
4033
mgmt_phy_configuration_changed(struct hci_dev * hdev,struct sock * skip)4034 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
4035 {
4036 struct mgmt_ev_phy_configuration_changed ev;
4037
4038 memset(&ev, 0, sizeof(ev));
4039
4040 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
4041
4042 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
4043 sizeof(ev), skip);
4044 }
4045
set_default_phy_complete(struct hci_dev * hdev,void * data,int err)4046 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
4047 {
4048 struct mgmt_pending_cmd *cmd = data;
4049 struct sk_buff *skb = cmd->skb;
4050 u8 status = mgmt_status(err);
4051
4052 if (err == -ECANCELED ||
4053 cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
4054 return;
4055
4056 if (!status) {
4057 if (!skb)
4058 status = MGMT_STATUS_FAILED;
4059 else if (IS_ERR(skb))
4060 status = mgmt_status(PTR_ERR(skb));
4061 else
4062 status = mgmt_status(skb->data[0]);
4063 }
4064
4065 bt_dev_dbg(hdev, "status %d", status);
4066
4067 if (status) {
4068 mgmt_cmd_status(cmd->sk, hdev->id,
4069 MGMT_OP_SET_PHY_CONFIGURATION, status);
4070 } else {
4071 mgmt_cmd_complete(cmd->sk, hdev->id,
4072 MGMT_OP_SET_PHY_CONFIGURATION, 0,
4073 NULL, 0);
4074
4075 mgmt_phy_configuration_changed(hdev, cmd->sk);
4076 }
4077
4078 if (skb && !IS_ERR(skb))
4079 kfree_skb(skb);
4080
4081 mgmt_pending_remove(cmd);
4082 }
4083
set_default_phy_sync(struct hci_dev * hdev,void * data)4084 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
4085 {
4086 struct mgmt_pending_cmd *cmd = data;
4087 struct mgmt_cp_set_phy_configuration *cp = cmd->param;
4088 struct hci_cp_le_set_default_phy cp_phy;
4089 u32 selected_phys = __le32_to_cpu(cp->selected_phys);
4090
4091 memset(&cp_phy, 0, sizeof(cp_phy));
4092
4093 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
4094 cp_phy.all_phys |= 0x01;
4095
4096 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
4097 cp_phy.all_phys |= 0x02;
4098
4099 if (selected_phys & MGMT_PHY_LE_1M_TX)
4100 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
4101
4102 if (selected_phys & MGMT_PHY_LE_2M_TX)
4103 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
4104
4105 if (selected_phys & MGMT_PHY_LE_CODED_TX)
4106 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
4107
4108 if (selected_phys & MGMT_PHY_LE_1M_RX)
4109 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
4110
4111 if (selected_phys & MGMT_PHY_LE_2M_RX)
4112 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
4113
4114 if (selected_phys & MGMT_PHY_LE_CODED_RX)
4115 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
4116
4117 cmd->skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4118 sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
4119
4120 return 0;
4121 }
4122
set_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4123 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4124 void *data, u16 len)
4125 {
4126 struct mgmt_cp_set_phy_configuration *cp = data;
4127 struct mgmt_pending_cmd *cmd;
4128 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4129 u16 pkt_type = (HCI_DH1 | HCI_DM1);
4130 bool changed = false;
4131 int err;
4132
4133 bt_dev_dbg(hdev, "sock %p", sk);
4134
4135 configurable_phys = get_configurable_phys(hdev);
4136 supported_phys = get_supported_phys(hdev);
4137 selected_phys = __le32_to_cpu(cp->selected_phys);
4138
4139 if (selected_phys & ~supported_phys)
4140 return mgmt_cmd_status(sk, hdev->id,
4141 MGMT_OP_SET_PHY_CONFIGURATION,
4142 MGMT_STATUS_INVALID_PARAMS);
4143
4144 unconfigure_phys = supported_phys & ~configurable_phys;
4145
4146 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4147 return mgmt_cmd_status(sk, hdev->id,
4148 MGMT_OP_SET_PHY_CONFIGURATION,
4149 MGMT_STATUS_INVALID_PARAMS);
4150
4151 if (selected_phys == get_selected_phys(hdev))
4152 return mgmt_cmd_complete(sk, hdev->id,
4153 MGMT_OP_SET_PHY_CONFIGURATION,
4154 0, NULL, 0);
4155
4156 hci_dev_lock(hdev);
4157
4158 if (!hdev_is_powered(hdev)) {
4159 err = mgmt_cmd_status(sk, hdev->id,
4160 MGMT_OP_SET_PHY_CONFIGURATION,
4161 MGMT_STATUS_REJECTED);
4162 goto unlock;
4163 }
4164
4165 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4166 err = mgmt_cmd_status(sk, hdev->id,
4167 MGMT_OP_SET_PHY_CONFIGURATION,
4168 MGMT_STATUS_BUSY);
4169 goto unlock;
4170 }
4171
4172 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4173 pkt_type |= (HCI_DH3 | HCI_DM3);
4174 else
4175 pkt_type &= ~(HCI_DH3 | HCI_DM3);
4176
4177 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4178 pkt_type |= (HCI_DH5 | HCI_DM5);
4179 else
4180 pkt_type &= ~(HCI_DH5 | HCI_DM5);
4181
4182 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4183 pkt_type &= ~HCI_2DH1;
4184 else
4185 pkt_type |= HCI_2DH1;
4186
4187 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4188 pkt_type &= ~HCI_2DH3;
4189 else
4190 pkt_type |= HCI_2DH3;
4191
4192 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4193 pkt_type &= ~HCI_2DH5;
4194 else
4195 pkt_type |= HCI_2DH5;
4196
4197 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4198 pkt_type &= ~HCI_3DH1;
4199 else
4200 pkt_type |= HCI_3DH1;
4201
4202 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4203 pkt_type &= ~HCI_3DH3;
4204 else
4205 pkt_type |= HCI_3DH3;
4206
4207 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4208 pkt_type &= ~HCI_3DH5;
4209 else
4210 pkt_type |= HCI_3DH5;
4211
4212 if (pkt_type != hdev->pkt_type) {
4213 hdev->pkt_type = pkt_type;
4214 changed = true;
4215 }
4216
4217 if ((selected_phys & MGMT_PHY_LE_MASK) ==
4218 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4219 if (changed)
4220 mgmt_phy_configuration_changed(hdev, sk);
4221
4222 err = mgmt_cmd_complete(sk, hdev->id,
4223 MGMT_OP_SET_PHY_CONFIGURATION,
4224 0, NULL, 0);
4225
4226 goto unlock;
4227 }
4228
4229 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4230 len);
4231 if (!cmd)
4232 err = -ENOMEM;
4233 else
4234 err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4235 set_default_phy_complete);
4236
4237 if (err < 0) {
4238 err = mgmt_cmd_status(sk, hdev->id,
4239 MGMT_OP_SET_PHY_CONFIGURATION,
4240 MGMT_STATUS_FAILED);
4241
4242 if (cmd)
4243 mgmt_pending_remove(cmd);
4244 }
4245
4246 unlock:
4247 hci_dev_unlock(hdev);
4248
4249 return err;
4250 }
4251
set_blocked_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4252 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4253 u16 len)
4254 {
4255 int err = MGMT_STATUS_SUCCESS;
4256 struct mgmt_cp_set_blocked_keys *keys = data;
4257 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4258 sizeof(struct mgmt_blocked_key_info));
4259 u16 key_count, expected_len;
4260 int i;
4261
4262 bt_dev_dbg(hdev, "sock %p", sk);
4263
4264 key_count = __le16_to_cpu(keys->key_count);
4265 if (key_count > max_key_count) {
4266 bt_dev_err(hdev, "too big key_count value %u", key_count);
4267 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4268 MGMT_STATUS_INVALID_PARAMS);
4269 }
4270
4271 expected_len = struct_size(keys, keys, key_count);
4272 if (expected_len != len) {
4273 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4274 expected_len, len);
4275 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4276 MGMT_STATUS_INVALID_PARAMS);
4277 }
4278
4279 hci_dev_lock(hdev);
4280
4281 hci_blocked_keys_clear(hdev);
4282
4283 for (i = 0; i < key_count; ++i) {
4284 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4285
4286 if (!b) {
4287 err = MGMT_STATUS_NO_RESOURCES;
4288 break;
4289 }
4290
4291 b->type = keys->keys[i].type;
4292 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4293 list_add_rcu(&b->list, &hdev->blocked_keys);
4294 }
4295 hci_dev_unlock(hdev);
4296
4297 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4298 err, NULL, 0);
4299 }
4300
set_wideband_speech(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4301 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4302 void *data, u16 len)
4303 {
4304 struct mgmt_mode *cp = data;
4305 int err;
4306 bool changed = false;
4307
4308 bt_dev_dbg(hdev, "sock %p", sk);
4309
4310 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
4311 return mgmt_cmd_status(sk, hdev->id,
4312 MGMT_OP_SET_WIDEBAND_SPEECH,
4313 MGMT_STATUS_NOT_SUPPORTED);
4314
4315 if (cp->val != 0x00 && cp->val != 0x01)
4316 return mgmt_cmd_status(sk, hdev->id,
4317 MGMT_OP_SET_WIDEBAND_SPEECH,
4318 MGMT_STATUS_INVALID_PARAMS);
4319
4320 hci_dev_lock(hdev);
4321
4322 if (hdev_is_powered(hdev) &&
4323 !!cp->val != hci_dev_test_flag(hdev,
4324 HCI_WIDEBAND_SPEECH_ENABLED)) {
4325 err = mgmt_cmd_status(sk, hdev->id,
4326 MGMT_OP_SET_WIDEBAND_SPEECH,
4327 MGMT_STATUS_REJECTED);
4328 goto unlock;
4329 }
4330
4331 if (cp->val)
4332 changed = !hci_dev_test_and_set_flag(hdev,
4333 HCI_WIDEBAND_SPEECH_ENABLED);
4334 else
4335 changed = hci_dev_test_and_clear_flag(hdev,
4336 HCI_WIDEBAND_SPEECH_ENABLED);
4337
4338 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4339 if (err < 0)
4340 goto unlock;
4341
4342 if (changed)
4343 err = new_settings(hdev, sk);
4344
4345 unlock:
4346 hci_dev_unlock(hdev);
4347 return err;
4348 }
4349
read_controller_cap(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4350 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4351 void *data, u16 data_len)
4352 {
4353 char buf[20];
4354 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4355 u16 cap_len = 0;
4356 u8 flags = 0;
4357 u8 tx_power_range[2];
4358
4359 bt_dev_dbg(hdev, "sock %p", sk);
4360
4361 memset(&buf, 0, sizeof(buf));
4362
4363 hci_dev_lock(hdev);
4364
4365 /* When the Read Simple Pairing Options command is supported, then
4366 * the remote public key validation is supported.
4367 *
4368 * Alternatively, when Microsoft extensions are available, they can
4369 * indicate support for public key validation as well.
4370 */
4371 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4372 flags |= 0x01; /* Remote public key validation (BR/EDR) */
4373
4374 flags |= 0x02; /* Remote public key validation (LE) */
4375
4376 /* When the Read Encryption Key Size command is supported, then the
4377 * encryption key size is enforced.
4378 */
4379 if (hdev->commands[20] & 0x10)
4380 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
4381
4382 flags |= 0x08; /* Encryption key size enforcement (LE) */
4383
4384 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4385 &flags, 1);
4386
4387 /* When the Read Simple Pairing Options command is supported, then
4388 * also max encryption key size information is provided.
4389 */
4390 if (hdev->commands[41] & 0x08)
4391 cap_len = eir_append_le16(rp->cap, cap_len,
4392 MGMT_CAP_MAX_ENC_KEY_SIZE,
4393 hdev->max_enc_key_size);
4394
4395 cap_len = eir_append_le16(rp->cap, cap_len,
4396 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4397 SMP_MAX_ENC_KEY_SIZE);
4398
4399 /* Append the min/max LE tx power parameters if we were able to fetch
4400 * it from the controller
4401 */
4402 if (hdev->commands[38] & 0x80) {
4403 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4404 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4405 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4406 tx_power_range, 2);
4407 }
4408
4409 rp->cap_len = cpu_to_le16(cap_len);
4410
4411 hci_dev_unlock(hdev);
4412
4413 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4414 rp, sizeof(*rp) + cap_len);
4415 }
4416
4417 #ifdef CONFIG_BT_FEATURE_DEBUG
4418 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4419 static const u8 debug_uuid[16] = {
4420 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4421 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4422 };
4423 #endif
4424
4425 /* 330859bc-7506-492d-9370-9a6f0614037f */
4426 static const u8 quality_report_uuid[16] = {
4427 0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4428 0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4429 };
4430
4431 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4432 static const u8 offload_codecs_uuid[16] = {
4433 0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4434 0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4435 };
4436
4437 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4438 static const u8 le_simultaneous_roles_uuid[16] = {
4439 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4440 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4441 };
4442
4443 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4444 static const u8 iso_socket_uuid[16] = {
4445 0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4446 0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4447 };
4448
4449 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4450 static const u8 mgmt_mesh_uuid[16] = {
4451 0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4452 0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4453 };
4454
read_exp_features_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4455 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4456 void *data, u16 data_len)
4457 {
4458 struct mgmt_rp_read_exp_features_info *rp;
4459 size_t len;
4460 u16 idx = 0;
4461 u32 flags;
4462 int status;
4463
4464 bt_dev_dbg(hdev, "sock %p", sk);
4465
4466 /* Enough space for 7 features */
4467 len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4468 rp = kzalloc(len, GFP_KERNEL);
4469 if (!rp)
4470 return -ENOMEM;
4471
4472 #ifdef CONFIG_BT_FEATURE_DEBUG
4473 if (!hdev) {
4474 flags = bt_dbg_get() ? BIT(0) : 0;
4475
4476 memcpy(rp->features[idx].uuid, debug_uuid, 16);
4477 rp->features[idx].flags = cpu_to_le32(flags);
4478 idx++;
4479 }
4480 #endif
4481
4482 if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4483 if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4484 flags = BIT(0);
4485 else
4486 flags = 0;
4487
4488 memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4489 rp->features[idx].flags = cpu_to_le32(flags);
4490 idx++;
4491 }
4492
4493 if (hdev && (aosp_has_quality_report(hdev) ||
4494 hdev->set_quality_report)) {
4495 if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4496 flags = BIT(0);
4497 else
4498 flags = 0;
4499
4500 memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4501 rp->features[idx].flags = cpu_to_le32(flags);
4502 idx++;
4503 }
4504
4505 if (hdev && hdev->get_data_path_id) {
4506 if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4507 flags = BIT(0);
4508 else
4509 flags = 0;
4510
4511 memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4512 rp->features[idx].flags = cpu_to_le32(flags);
4513 idx++;
4514 }
4515
4516 if (IS_ENABLED(CONFIG_BT_LE)) {
4517 flags = iso_enabled() ? BIT(0) : 0;
4518 memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4519 rp->features[idx].flags = cpu_to_le32(flags);
4520 idx++;
4521 }
4522
4523 if (hdev && lmp_le_capable(hdev)) {
4524 if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4525 flags = BIT(0);
4526 else
4527 flags = 0;
4528
4529 memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4530 rp->features[idx].flags = cpu_to_le32(flags);
4531 idx++;
4532 }
4533
4534 rp->feature_count = cpu_to_le16(idx);
4535
4536 /* After reading the experimental features information, enable
4537 * the events to update client on any future change.
4538 */
4539 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4540
4541 status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4542 MGMT_OP_READ_EXP_FEATURES_INFO,
4543 0, rp, sizeof(*rp) + (20 * idx));
4544
4545 kfree(rp);
4546 return status;
4547 }
4548
exp_feature_changed(struct hci_dev * hdev,const u8 * uuid,bool enabled,struct sock * skip)4549 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4550 bool enabled, struct sock *skip)
4551 {
4552 struct mgmt_ev_exp_feature_changed ev;
4553
4554 memset(&ev, 0, sizeof(ev));
4555 memcpy(ev.uuid, uuid, 16);
4556 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4557
4558 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4559 &ev, sizeof(ev),
4560 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4561 }
4562
4563 #define EXP_FEAT(_uuid, _set_func) \
4564 { \
4565 .uuid = _uuid, \
4566 .set_func = _set_func, \
4567 }
4568
4569 /* The zero key uuid is special. Multiple exp features are set through it. */
set_zero_key_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4570 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4571 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4572 {
4573 struct mgmt_rp_set_exp_feature rp;
4574
4575 memset(rp.uuid, 0, 16);
4576 rp.flags = cpu_to_le32(0);
4577
4578 #ifdef CONFIG_BT_FEATURE_DEBUG
4579 if (!hdev) {
4580 bool changed = bt_dbg_get();
4581
4582 bt_dbg_set(false);
4583
4584 if (changed)
4585 exp_feature_changed(NULL, ZERO_KEY, false, sk);
4586 }
4587 #endif
4588
4589 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4590
4591 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4592 MGMT_OP_SET_EXP_FEATURE, 0,
4593 &rp, sizeof(rp));
4594 }
4595
4596 #ifdef CONFIG_BT_FEATURE_DEBUG
set_debug_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4597 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4598 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4599 {
4600 struct mgmt_rp_set_exp_feature rp;
4601
4602 bool val, changed;
4603 int err;
4604
4605 /* Command requires to use the non-controller index */
4606 if (hdev)
4607 return mgmt_cmd_status(sk, hdev->id,
4608 MGMT_OP_SET_EXP_FEATURE,
4609 MGMT_STATUS_INVALID_INDEX);
4610
4611 /* Parameters are limited to a single octet */
4612 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4613 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4614 MGMT_OP_SET_EXP_FEATURE,
4615 MGMT_STATUS_INVALID_PARAMS);
4616
4617 /* Only boolean on/off is supported */
4618 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4619 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4620 MGMT_OP_SET_EXP_FEATURE,
4621 MGMT_STATUS_INVALID_PARAMS);
4622
4623 val = !!cp->param[0];
4624 changed = val ? !bt_dbg_get() : bt_dbg_get();
4625 bt_dbg_set(val);
4626
4627 memcpy(rp.uuid, debug_uuid, 16);
4628 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4629
4630 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4631
4632 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4633 MGMT_OP_SET_EXP_FEATURE, 0,
4634 &rp, sizeof(rp));
4635
4636 if (changed)
4637 exp_feature_changed(hdev, debug_uuid, val, sk);
4638
4639 return err;
4640 }
4641 #endif
4642
set_mgmt_mesh_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4643 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4644 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4645 {
4646 struct mgmt_rp_set_exp_feature rp;
4647 bool val, changed;
4648 int err;
4649
4650 /* Command requires to use the controller index */
4651 if (!hdev)
4652 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4653 MGMT_OP_SET_EXP_FEATURE,
4654 MGMT_STATUS_INVALID_INDEX);
4655
4656 /* Parameters are limited to a single octet */
4657 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4658 return mgmt_cmd_status(sk, hdev->id,
4659 MGMT_OP_SET_EXP_FEATURE,
4660 MGMT_STATUS_INVALID_PARAMS);
4661
4662 /* Only boolean on/off is supported */
4663 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4664 return mgmt_cmd_status(sk, hdev->id,
4665 MGMT_OP_SET_EXP_FEATURE,
4666 MGMT_STATUS_INVALID_PARAMS);
4667
4668 val = !!cp->param[0];
4669
4670 if (val) {
4671 changed = !hci_dev_test_and_set_flag(hdev,
4672 HCI_MESH_EXPERIMENTAL);
4673 } else {
4674 hci_dev_clear_flag(hdev, HCI_MESH);
4675 changed = hci_dev_test_and_clear_flag(hdev,
4676 HCI_MESH_EXPERIMENTAL);
4677 }
4678
4679 memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4680 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4681
4682 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4683
4684 err = mgmt_cmd_complete(sk, hdev->id,
4685 MGMT_OP_SET_EXP_FEATURE, 0,
4686 &rp, sizeof(rp));
4687
4688 if (changed)
4689 exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4690
4691 return err;
4692 }
4693
set_quality_report_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4694 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4695 struct mgmt_cp_set_exp_feature *cp,
4696 u16 data_len)
4697 {
4698 struct mgmt_rp_set_exp_feature rp;
4699 bool val, changed;
4700 int err;
4701
4702 /* Command requires to use a valid controller index */
4703 if (!hdev)
4704 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4705 MGMT_OP_SET_EXP_FEATURE,
4706 MGMT_STATUS_INVALID_INDEX);
4707
4708 /* Parameters are limited to a single octet */
4709 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4710 return mgmt_cmd_status(sk, hdev->id,
4711 MGMT_OP_SET_EXP_FEATURE,
4712 MGMT_STATUS_INVALID_PARAMS);
4713
4714 /* Only boolean on/off is supported */
4715 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4716 return mgmt_cmd_status(sk, hdev->id,
4717 MGMT_OP_SET_EXP_FEATURE,
4718 MGMT_STATUS_INVALID_PARAMS);
4719
4720 hci_req_sync_lock(hdev);
4721
4722 val = !!cp->param[0];
4723 changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4724
4725 if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4726 err = mgmt_cmd_status(sk, hdev->id,
4727 MGMT_OP_SET_EXP_FEATURE,
4728 MGMT_STATUS_NOT_SUPPORTED);
4729 goto unlock_quality_report;
4730 }
4731
4732 if (changed) {
4733 if (hdev->set_quality_report)
4734 err = hdev->set_quality_report(hdev, val);
4735 else
4736 err = aosp_set_quality_report(hdev, val);
4737
4738 if (err) {
4739 err = mgmt_cmd_status(sk, hdev->id,
4740 MGMT_OP_SET_EXP_FEATURE,
4741 MGMT_STATUS_FAILED);
4742 goto unlock_quality_report;
4743 }
4744
4745 if (val)
4746 hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4747 else
4748 hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4749 }
4750
4751 bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4752
4753 memcpy(rp.uuid, quality_report_uuid, 16);
4754 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4755 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4756
4757 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4758 &rp, sizeof(rp));
4759
4760 if (changed)
4761 exp_feature_changed(hdev, quality_report_uuid, val, sk);
4762
4763 unlock_quality_report:
4764 hci_req_sync_unlock(hdev);
4765 return err;
4766 }
4767
set_offload_codec_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4768 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4769 struct mgmt_cp_set_exp_feature *cp,
4770 u16 data_len)
4771 {
4772 bool val, changed;
4773 int err;
4774 struct mgmt_rp_set_exp_feature rp;
4775
4776 /* Command requires to use a valid controller index */
4777 if (!hdev)
4778 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4779 MGMT_OP_SET_EXP_FEATURE,
4780 MGMT_STATUS_INVALID_INDEX);
4781
4782 /* Parameters are limited to a single octet */
4783 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4784 return mgmt_cmd_status(sk, hdev->id,
4785 MGMT_OP_SET_EXP_FEATURE,
4786 MGMT_STATUS_INVALID_PARAMS);
4787
4788 /* Only boolean on/off is supported */
4789 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4790 return mgmt_cmd_status(sk, hdev->id,
4791 MGMT_OP_SET_EXP_FEATURE,
4792 MGMT_STATUS_INVALID_PARAMS);
4793
4794 val = !!cp->param[0];
4795 changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4796
4797 if (!hdev->get_data_path_id) {
4798 return mgmt_cmd_status(sk, hdev->id,
4799 MGMT_OP_SET_EXP_FEATURE,
4800 MGMT_STATUS_NOT_SUPPORTED);
4801 }
4802
4803 if (changed) {
4804 if (val)
4805 hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4806 else
4807 hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4808 }
4809
4810 bt_dev_info(hdev, "offload codecs enable %d changed %d",
4811 val, changed);
4812
4813 memcpy(rp.uuid, offload_codecs_uuid, 16);
4814 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4815 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4816 err = mgmt_cmd_complete(sk, hdev->id,
4817 MGMT_OP_SET_EXP_FEATURE, 0,
4818 &rp, sizeof(rp));
4819
4820 if (changed)
4821 exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4822
4823 return err;
4824 }
4825
set_le_simultaneous_roles_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4826 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4827 struct mgmt_cp_set_exp_feature *cp,
4828 u16 data_len)
4829 {
4830 bool val, changed;
4831 int err;
4832 struct mgmt_rp_set_exp_feature rp;
4833
4834 /* Command requires to use a valid controller index */
4835 if (!hdev)
4836 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4837 MGMT_OP_SET_EXP_FEATURE,
4838 MGMT_STATUS_INVALID_INDEX);
4839
4840 /* Parameters are limited to a single octet */
4841 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4842 return mgmt_cmd_status(sk, hdev->id,
4843 MGMT_OP_SET_EXP_FEATURE,
4844 MGMT_STATUS_INVALID_PARAMS);
4845
4846 /* Only boolean on/off is supported */
4847 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4848 return mgmt_cmd_status(sk, hdev->id,
4849 MGMT_OP_SET_EXP_FEATURE,
4850 MGMT_STATUS_INVALID_PARAMS);
4851
4852 val = !!cp->param[0];
4853 changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4854
4855 if (!hci_dev_le_state_simultaneous(hdev)) {
4856 return mgmt_cmd_status(sk, hdev->id,
4857 MGMT_OP_SET_EXP_FEATURE,
4858 MGMT_STATUS_NOT_SUPPORTED);
4859 }
4860
4861 if (changed) {
4862 if (val)
4863 hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4864 else
4865 hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4866 }
4867
4868 bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4869 val, changed);
4870
4871 memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4872 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4873 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4874 err = mgmt_cmd_complete(sk, hdev->id,
4875 MGMT_OP_SET_EXP_FEATURE, 0,
4876 &rp, sizeof(rp));
4877
4878 if (changed)
4879 exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4880
4881 return err;
4882 }
4883
4884 #ifdef CONFIG_BT_LE
set_iso_socket_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4885 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4886 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4887 {
4888 struct mgmt_rp_set_exp_feature rp;
4889 bool val, changed = false;
4890 int err;
4891
4892 /* Command requires to use the non-controller index */
4893 if (hdev)
4894 return mgmt_cmd_status(sk, hdev->id,
4895 MGMT_OP_SET_EXP_FEATURE,
4896 MGMT_STATUS_INVALID_INDEX);
4897
4898 /* Parameters are limited to a single octet */
4899 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4900 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4901 MGMT_OP_SET_EXP_FEATURE,
4902 MGMT_STATUS_INVALID_PARAMS);
4903
4904 /* Only boolean on/off is supported */
4905 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4906 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4907 MGMT_OP_SET_EXP_FEATURE,
4908 MGMT_STATUS_INVALID_PARAMS);
4909
4910 val = cp->param[0] ? true : false;
4911 if (val)
4912 err = iso_init();
4913 else
4914 err = iso_exit();
4915
4916 if (!err)
4917 changed = true;
4918
4919 memcpy(rp.uuid, iso_socket_uuid, 16);
4920 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4921
4922 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4923
4924 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4925 MGMT_OP_SET_EXP_FEATURE, 0,
4926 &rp, sizeof(rp));
4927
4928 if (changed)
4929 exp_feature_changed(hdev, iso_socket_uuid, val, sk);
4930
4931 return err;
4932 }
4933 #endif
4934
4935 static const struct mgmt_exp_feature {
4936 const u8 *uuid;
4937 int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4938 struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4939 } exp_features[] = {
4940 EXP_FEAT(ZERO_KEY, set_zero_key_func),
4941 #ifdef CONFIG_BT_FEATURE_DEBUG
4942 EXP_FEAT(debug_uuid, set_debug_func),
4943 #endif
4944 EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
4945 EXP_FEAT(quality_report_uuid, set_quality_report_func),
4946 EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
4947 EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
4948 #ifdef CONFIG_BT_LE
4949 EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
4950 #endif
4951
4952 /* end with a null feature */
4953 EXP_FEAT(NULL, NULL)
4954 };
4955
set_exp_feature(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4956 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4957 void *data, u16 data_len)
4958 {
4959 struct mgmt_cp_set_exp_feature *cp = data;
4960 size_t i = 0;
4961
4962 bt_dev_dbg(hdev, "sock %p", sk);
4963
4964 for (i = 0; exp_features[i].uuid; i++) {
4965 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
4966 return exp_features[i].set_func(sk, hdev, cp, data_len);
4967 }
4968
4969 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4970 MGMT_OP_SET_EXP_FEATURE,
4971 MGMT_STATUS_NOT_SUPPORTED);
4972 }
4973
get_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4974 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4975 u16 data_len)
4976 {
4977 struct mgmt_cp_get_device_flags *cp = data;
4978 struct mgmt_rp_get_device_flags rp;
4979 struct bdaddr_list_with_flags *br_params;
4980 struct hci_conn_params *params;
4981 u32 supported_flags;
4982 u32 current_flags = 0;
4983 u8 status = MGMT_STATUS_INVALID_PARAMS;
4984
4985 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4986 &cp->addr.bdaddr, cp->addr.type);
4987
4988 hci_dev_lock(hdev);
4989
4990 supported_flags = hdev->conn_flags;
4991
4992 memset(&rp, 0, sizeof(rp));
4993
4994 if (cp->addr.type == BDADDR_BREDR) {
4995 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4996 &cp->addr.bdaddr,
4997 cp->addr.type);
4998 if (!br_params)
4999 goto done;
5000
5001 current_flags = br_params->flags;
5002 } else {
5003 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5004 le_addr_type(cp->addr.type));
5005 if (!params)
5006 goto done;
5007
5008 current_flags = params->flags;
5009 }
5010
5011 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5012 rp.addr.type = cp->addr.type;
5013 rp.supported_flags = cpu_to_le32(supported_flags);
5014 rp.current_flags = cpu_to_le32(current_flags);
5015
5016 status = MGMT_STATUS_SUCCESS;
5017
5018 done:
5019 hci_dev_unlock(hdev);
5020
5021 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5022 &rp, sizeof(rp));
5023 }
5024
device_flags_changed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u32 supported_flags,u32 current_flags)5025 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5026 bdaddr_t *bdaddr, u8 bdaddr_type,
5027 u32 supported_flags, u32 current_flags)
5028 {
5029 struct mgmt_ev_device_flags_changed ev;
5030
5031 bacpy(&ev.addr.bdaddr, bdaddr);
5032 ev.addr.type = bdaddr_type;
5033 ev.supported_flags = cpu_to_le32(supported_flags);
5034 ev.current_flags = cpu_to_le32(current_flags);
5035
5036 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5037 }
5038
set_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5039 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5040 u16 len)
5041 {
5042 struct mgmt_cp_set_device_flags *cp = data;
5043 struct bdaddr_list_with_flags *br_params;
5044 struct hci_conn_params *params;
5045 u8 status = MGMT_STATUS_INVALID_PARAMS;
5046 u32 supported_flags;
5047 u32 current_flags = __le32_to_cpu(cp->current_flags);
5048
5049 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5050 &cp->addr.bdaddr, cp->addr.type, current_flags);
5051
5052 // We should take hci_dev_lock() early, I think.. conn_flags can change
5053 supported_flags = hdev->conn_flags;
5054
5055 if ((supported_flags | current_flags) != supported_flags) {
5056 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5057 current_flags, supported_flags);
5058 goto done;
5059 }
5060
5061 hci_dev_lock(hdev);
5062
5063 if (cp->addr.type == BDADDR_BREDR) {
5064 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5065 &cp->addr.bdaddr,
5066 cp->addr.type);
5067
5068 if (br_params) {
5069 br_params->flags = current_flags;
5070 status = MGMT_STATUS_SUCCESS;
5071 } else {
5072 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5073 &cp->addr.bdaddr, cp->addr.type);
5074 }
5075
5076 goto unlock;
5077 }
5078
5079 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5080 le_addr_type(cp->addr.type));
5081 if (!params) {
5082 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5083 &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5084 goto unlock;
5085 }
5086
5087 supported_flags = hdev->conn_flags;
5088
5089 if ((supported_flags | current_flags) != supported_flags) {
5090 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5091 current_flags, supported_flags);
5092 goto unlock;
5093 }
5094
5095 WRITE_ONCE(params->flags, current_flags);
5096 status = MGMT_STATUS_SUCCESS;
5097
5098 /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5099 * has been set.
5100 */
5101 if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5102 hci_update_passive_scan(hdev);
5103
5104 unlock:
5105 hci_dev_unlock(hdev);
5106
5107 done:
5108 if (status == MGMT_STATUS_SUCCESS)
5109 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5110 supported_flags, current_flags);
5111
5112 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5113 &cp->addr, sizeof(cp->addr));
5114 }
5115
mgmt_adv_monitor_added(struct sock * sk,struct hci_dev * hdev,u16 handle)5116 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5117 u16 handle)
5118 {
5119 struct mgmt_ev_adv_monitor_added ev;
5120
5121 ev.monitor_handle = cpu_to_le16(handle);
5122
5123 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5124 }
5125
mgmt_adv_monitor_removed(struct sock * sk,struct hci_dev * hdev,__le16 handle)5126 static void mgmt_adv_monitor_removed(struct sock *sk, struct hci_dev *hdev,
5127 __le16 handle)
5128 {
5129 struct mgmt_ev_adv_monitor_removed ev;
5130
5131 ev.monitor_handle = handle;
5132
5133 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk);
5134 }
5135
read_adv_mon_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5136 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5137 void *data, u16 len)
5138 {
5139 struct adv_monitor *monitor = NULL;
5140 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5141 int handle, err;
5142 size_t rp_size = 0;
5143 __u32 supported = 0;
5144 __u32 enabled = 0;
5145 __u16 num_handles = 0;
5146 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5147
5148 BT_DBG("request for %s", hdev->name);
5149
5150 hci_dev_lock(hdev);
5151
5152 if (msft_monitor_supported(hdev))
5153 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5154
5155 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5156 handles[num_handles++] = monitor->handle;
5157
5158 hci_dev_unlock(hdev);
5159
5160 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5161 rp = kmalloc(rp_size, GFP_KERNEL);
5162 if (!rp)
5163 return -ENOMEM;
5164
5165 /* All supported features are currently enabled */
5166 enabled = supported;
5167
5168 rp->supported_features = cpu_to_le32(supported);
5169 rp->enabled_features = cpu_to_le32(enabled);
5170 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5171 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5172 rp->num_handles = cpu_to_le16(num_handles);
5173 if (num_handles)
5174 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5175
5176 err = mgmt_cmd_complete(sk, hdev->id,
5177 MGMT_OP_READ_ADV_MONITOR_FEATURES,
5178 MGMT_STATUS_SUCCESS, rp, rp_size);
5179
5180 kfree(rp);
5181
5182 return err;
5183 }
5184
mgmt_add_adv_patterns_monitor_complete(struct hci_dev * hdev,void * data,int status)5185 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5186 void *data, int status)
5187 {
5188 struct mgmt_rp_add_adv_patterns_monitor rp;
5189 struct mgmt_pending_cmd *cmd = data;
5190 struct adv_monitor *monitor = cmd->user_data;
5191
5192 hci_dev_lock(hdev);
5193
5194 rp.monitor_handle = cpu_to_le16(monitor->handle);
5195
5196 if (!status) {
5197 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5198 hdev->adv_monitors_cnt++;
5199 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5200 monitor->state = ADV_MONITOR_STATE_REGISTERED;
5201 hci_update_passive_scan(hdev);
5202 }
5203
5204 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
5205 mgmt_status(status), &rp, sizeof(rp));
5206 mgmt_pending_remove(cmd);
5207
5208 hci_dev_unlock(hdev);
5209 bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5210 rp.monitor_handle, status);
5211 }
5212
mgmt_add_adv_patterns_monitor_sync(struct hci_dev * hdev,void * data)5213 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5214 {
5215 struct mgmt_pending_cmd *cmd = data;
5216 struct adv_monitor *monitor = cmd->user_data;
5217
5218 return hci_add_adv_monitor(hdev, monitor);
5219 }
5220
__add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,struct adv_monitor * m,u8 status,void * data,u16 len,u16 op)5221 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5222 struct adv_monitor *m, u8 status,
5223 void *data, u16 len, u16 op)
5224 {
5225 struct mgmt_pending_cmd *cmd;
5226 int err;
5227
5228 hci_dev_lock(hdev);
5229
5230 if (status)
5231 goto unlock;
5232
5233 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5234 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5235 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5236 status = MGMT_STATUS_BUSY;
5237 goto unlock;
5238 }
5239
5240 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5241 if (!cmd) {
5242 status = MGMT_STATUS_NO_RESOURCES;
5243 goto unlock;
5244 }
5245
5246 cmd->user_data = m;
5247 err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5248 mgmt_add_adv_patterns_monitor_complete);
5249 if (err) {
5250 if (err == -ENOMEM)
5251 status = MGMT_STATUS_NO_RESOURCES;
5252 else
5253 status = MGMT_STATUS_FAILED;
5254
5255 goto unlock;
5256 }
5257
5258 hci_dev_unlock(hdev);
5259
5260 return 0;
5261
5262 unlock:
5263 hci_free_adv_monitor(hdev, m);
5264 hci_dev_unlock(hdev);
5265 return mgmt_cmd_status(sk, hdev->id, op, status);
5266 }
5267
parse_adv_monitor_rssi(struct adv_monitor * m,struct mgmt_adv_rssi_thresholds * rssi)5268 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5269 struct mgmt_adv_rssi_thresholds *rssi)
5270 {
5271 if (rssi) {
5272 m->rssi.low_threshold = rssi->low_threshold;
5273 m->rssi.low_threshold_timeout =
5274 __le16_to_cpu(rssi->low_threshold_timeout);
5275 m->rssi.high_threshold = rssi->high_threshold;
5276 m->rssi.high_threshold_timeout =
5277 __le16_to_cpu(rssi->high_threshold_timeout);
5278 m->rssi.sampling_period = rssi->sampling_period;
5279 } else {
5280 /* Default values. These numbers are the least constricting
5281 * parameters for MSFT API to work, so it behaves as if there
5282 * are no rssi parameter to consider. May need to be changed
5283 * if other API are to be supported.
5284 */
5285 m->rssi.low_threshold = -127;
5286 m->rssi.low_threshold_timeout = 60;
5287 m->rssi.high_threshold = -127;
5288 m->rssi.high_threshold_timeout = 0;
5289 m->rssi.sampling_period = 0;
5290 }
5291 }
5292
parse_adv_monitor_pattern(struct adv_monitor * m,u8 pattern_count,struct mgmt_adv_pattern * patterns)5293 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5294 struct mgmt_adv_pattern *patterns)
5295 {
5296 u8 offset = 0, length = 0;
5297 struct adv_pattern *p = NULL;
5298 int i;
5299
5300 for (i = 0; i < pattern_count; i++) {
5301 offset = patterns[i].offset;
5302 length = patterns[i].length;
5303 if (offset >= HCI_MAX_EXT_AD_LENGTH ||
5304 length > HCI_MAX_EXT_AD_LENGTH ||
5305 (offset + length) > HCI_MAX_EXT_AD_LENGTH)
5306 return MGMT_STATUS_INVALID_PARAMS;
5307
5308 p = kmalloc(sizeof(*p), GFP_KERNEL);
5309 if (!p)
5310 return MGMT_STATUS_NO_RESOURCES;
5311
5312 p->ad_type = patterns[i].ad_type;
5313 p->offset = patterns[i].offset;
5314 p->length = patterns[i].length;
5315 memcpy(p->value, patterns[i].value, p->length);
5316
5317 INIT_LIST_HEAD(&p->list);
5318 list_add(&p->list, &m->patterns);
5319 }
5320
5321 return MGMT_STATUS_SUCCESS;
5322 }
5323
add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5324 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5325 void *data, u16 len)
5326 {
5327 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5328 struct adv_monitor *m = NULL;
5329 u8 status = MGMT_STATUS_SUCCESS;
5330 size_t expected_size = sizeof(*cp);
5331
5332 BT_DBG("request for %s", hdev->name);
5333
5334 if (len <= sizeof(*cp)) {
5335 status = MGMT_STATUS_INVALID_PARAMS;
5336 goto done;
5337 }
5338
5339 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5340 if (len != expected_size) {
5341 status = MGMT_STATUS_INVALID_PARAMS;
5342 goto done;
5343 }
5344
5345 m = kzalloc(sizeof(*m), GFP_KERNEL);
5346 if (!m) {
5347 status = MGMT_STATUS_NO_RESOURCES;
5348 goto done;
5349 }
5350
5351 INIT_LIST_HEAD(&m->patterns);
5352
5353 parse_adv_monitor_rssi(m, NULL);
5354 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5355
5356 done:
5357 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5358 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5359 }
5360
add_adv_patterns_monitor_rssi(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5361 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5362 void *data, u16 len)
5363 {
5364 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5365 struct adv_monitor *m = NULL;
5366 u8 status = MGMT_STATUS_SUCCESS;
5367 size_t expected_size = sizeof(*cp);
5368
5369 BT_DBG("request for %s", hdev->name);
5370
5371 if (len <= sizeof(*cp)) {
5372 status = MGMT_STATUS_INVALID_PARAMS;
5373 goto done;
5374 }
5375
5376 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5377 if (len != expected_size) {
5378 status = MGMT_STATUS_INVALID_PARAMS;
5379 goto done;
5380 }
5381
5382 m = kzalloc(sizeof(*m), GFP_KERNEL);
5383 if (!m) {
5384 status = MGMT_STATUS_NO_RESOURCES;
5385 goto done;
5386 }
5387
5388 INIT_LIST_HEAD(&m->patterns);
5389
5390 parse_adv_monitor_rssi(m, &cp->rssi);
5391 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5392
5393 done:
5394 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5395 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5396 }
5397
mgmt_remove_adv_monitor_complete(struct hci_dev * hdev,void * data,int status)5398 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5399 void *data, int status)
5400 {
5401 struct mgmt_rp_remove_adv_monitor rp;
5402 struct mgmt_pending_cmd *cmd = data;
5403 struct mgmt_cp_remove_adv_monitor *cp;
5404
5405 if (status == -ECANCELED)
5406 return;
5407
5408 hci_dev_lock(hdev);
5409
5410 cp = cmd->param;
5411
5412 rp.monitor_handle = cp->monitor_handle;
5413
5414 if (!status) {
5415 mgmt_adv_monitor_removed(cmd->sk, hdev, cp->monitor_handle);
5416 hci_update_passive_scan(hdev);
5417 }
5418
5419 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
5420 mgmt_status(status), &rp, sizeof(rp));
5421 mgmt_pending_free(cmd);
5422
5423 hci_dev_unlock(hdev);
5424 bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5425 rp.monitor_handle, status);
5426 }
5427
mgmt_remove_adv_monitor_sync(struct hci_dev * hdev,void * data)5428 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5429 {
5430 struct mgmt_pending_cmd *cmd = data;
5431 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5432 u16 handle = __le16_to_cpu(cp->monitor_handle);
5433
5434 if (!handle)
5435 return hci_remove_all_adv_monitor(hdev);
5436
5437 return hci_remove_single_adv_monitor(hdev, handle);
5438 }
5439
remove_adv_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5440 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5441 void *data, u16 len)
5442 {
5443 struct mgmt_pending_cmd *cmd;
5444 int err, status;
5445
5446 hci_dev_lock(hdev);
5447
5448 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5449 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5450 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5451 status = MGMT_STATUS_BUSY;
5452 goto unlock;
5453 }
5454
5455 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5456 if (!cmd) {
5457 status = MGMT_STATUS_NO_RESOURCES;
5458 goto unlock;
5459 }
5460
5461 err = hci_cmd_sync_submit(hdev, mgmt_remove_adv_monitor_sync, cmd,
5462 mgmt_remove_adv_monitor_complete);
5463
5464 if (err) {
5465 mgmt_pending_free(cmd);
5466
5467 if (err == -ENOMEM)
5468 status = MGMT_STATUS_NO_RESOURCES;
5469 else
5470 status = MGMT_STATUS_FAILED;
5471
5472 goto unlock;
5473 }
5474
5475 hci_dev_unlock(hdev);
5476
5477 return 0;
5478
5479 unlock:
5480 hci_dev_unlock(hdev);
5481 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5482 status);
5483 }
5484
read_local_oob_data_complete(struct hci_dev * hdev,void * data,int err)5485 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5486 {
5487 struct mgmt_rp_read_local_oob_data mgmt_rp;
5488 size_t rp_size = sizeof(mgmt_rp);
5489 struct mgmt_pending_cmd *cmd = data;
5490 struct sk_buff *skb = cmd->skb;
5491 u8 status = mgmt_status(err);
5492
5493 if (!status) {
5494 if (!skb)
5495 status = MGMT_STATUS_FAILED;
5496 else if (IS_ERR(skb))
5497 status = mgmt_status(PTR_ERR(skb));
5498 else
5499 status = mgmt_status(skb->data[0]);
5500 }
5501
5502 bt_dev_dbg(hdev, "status %d", status);
5503
5504 if (status) {
5505 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5506 goto remove;
5507 }
5508
5509 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5510
5511 if (!bredr_sc_enabled(hdev)) {
5512 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5513
5514 if (skb->len < sizeof(*rp)) {
5515 mgmt_cmd_status(cmd->sk, hdev->id,
5516 MGMT_OP_READ_LOCAL_OOB_DATA,
5517 MGMT_STATUS_FAILED);
5518 goto remove;
5519 }
5520
5521 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5522 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5523
5524 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5525 } else {
5526 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5527
5528 if (skb->len < sizeof(*rp)) {
5529 mgmt_cmd_status(cmd->sk, hdev->id,
5530 MGMT_OP_READ_LOCAL_OOB_DATA,
5531 MGMT_STATUS_FAILED);
5532 goto remove;
5533 }
5534
5535 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5536 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5537
5538 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5539 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5540 }
5541
5542 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5543 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5544
5545 remove:
5546 if (skb && !IS_ERR(skb))
5547 kfree_skb(skb);
5548
5549 mgmt_pending_free(cmd);
5550 }
5551
read_local_oob_data_sync(struct hci_dev * hdev,void * data)5552 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5553 {
5554 struct mgmt_pending_cmd *cmd = data;
5555
5556 if (bredr_sc_enabled(hdev))
5557 cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5558 else
5559 cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5560
5561 if (IS_ERR(cmd->skb))
5562 return PTR_ERR(cmd->skb);
5563 else
5564 return 0;
5565 }
5566
read_local_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)5567 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5568 void *data, u16 data_len)
5569 {
5570 struct mgmt_pending_cmd *cmd;
5571 int err;
5572
5573 bt_dev_dbg(hdev, "sock %p", sk);
5574
5575 hci_dev_lock(hdev);
5576
5577 if (!hdev_is_powered(hdev)) {
5578 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5579 MGMT_STATUS_NOT_POWERED);
5580 goto unlock;
5581 }
5582
5583 if (!lmp_ssp_capable(hdev)) {
5584 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5585 MGMT_STATUS_NOT_SUPPORTED);
5586 goto unlock;
5587 }
5588
5589 cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5590 if (!cmd)
5591 err = -ENOMEM;
5592 else
5593 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5594 read_local_oob_data_complete);
5595
5596 if (err < 0) {
5597 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5598 MGMT_STATUS_FAILED);
5599
5600 if (cmd)
5601 mgmt_pending_free(cmd);
5602 }
5603
5604 unlock:
5605 hci_dev_unlock(hdev);
5606 return err;
5607 }
5608
add_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5609 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5610 void *data, u16 len)
5611 {
5612 struct mgmt_addr_info *addr = data;
5613 int err;
5614
5615 bt_dev_dbg(hdev, "sock %p", sk);
5616
5617 if (!bdaddr_type_is_valid(addr->type))
5618 return mgmt_cmd_complete(sk, hdev->id,
5619 MGMT_OP_ADD_REMOTE_OOB_DATA,
5620 MGMT_STATUS_INVALID_PARAMS,
5621 addr, sizeof(*addr));
5622
5623 hci_dev_lock(hdev);
5624
5625 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5626 struct mgmt_cp_add_remote_oob_data *cp = data;
5627 u8 status;
5628
5629 if (cp->addr.type != BDADDR_BREDR) {
5630 err = mgmt_cmd_complete(sk, hdev->id,
5631 MGMT_OP_ADD_REMOTE_OOB_DATA,
5632 MGMT_STATUS_INVALID_PARAMS,
5633 &cp->addr, sizeof(cp->addr));
5634 goto unlock;
5635 }
5636
5637 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5638 cp->addr.type, cp->hash,
5639 cp->rand, NULL, NULL);
5640 if (err < 0)
5641 status = MGMT_STATUS_FAILED;
5642 else
5643 status = MGMT_STATUS_SUCCESS;
5644
5645 err = mgmt_cmd_complete(sk, hdev->id,
5646 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5647 &cp->addr, sizeof(cp->addr));
5648 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5649 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5650 u8 *rand192, *hash192, *rand256, *hash256;
5651 u8 status;
5652
5653 if (bdaddr_type_is_le(cp->addr.type)) {
5654 /* Enforce zero-valued 192-bit parameters as
5655 * long as legacy SMP OOB isn't implemented.
5656 */
5657 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5658 memcmp(cp->hash192, ZERO_KEY, 16)) {
5659 err = mgmt_cmd_complete(sk, hdev->id,
5660 MGMT_OP_ADD_REMOTE_OOB_DATA,
5661 MGMT_STATUS_INVALID_PARAMS,
5662 addr, sizeof(*addr));
5663 goto unlock;
5664 }
5665
5666 rand192 = NULL;
5667 hash192 = NULL;
5668 } else {
5669 /* In case one of the P-192 values is set to zero,
5670 * then just disable OOB data for P-192.
5671 */
5672 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5673 !memcmp(cp->hash192, ZERO_KEY, 16)) {
5674 rand192 = NULL;
5675 hash192 = NULL;
5676 } else {
5677 rand192 = cp->rand192;
5678 hash192 = cp->hash192;
5679 }
5680 }
5681
5682 /* In case one of the P-256 values is set to zero, then just
5683 * disable OOB data for P-256.
5684 */
5685 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5686 !memcmp(cp->hash256, ZERO_KEY, 16)) {
5687 rand256 = NULL;
5688 hash256 = NULL;
5689 } else {
5690 rand256 = cp->rand256;
5691 hash256 = cp->hash256;
5692 }
5693
5694 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5695 cp->addr.type, hash192, rand192,
5696 hash256, rand256);
5697 if (err < 0)
5698 status = MGMT_STATUS_FAILED;
5699 else
5700 status = MGMT_STATUS_SUCCESS;
5701
5702 err = mgmt_cmd_complete(sk, hdev->id,
5703 MGMT_OP_ADD_REMOTE_OOB_DATA,
5704 status, &cp->addr, sizeof(cp->addr));
5705 } else {
5706 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5707 len);
5708 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5709 MGMT_STATUS_INVALID_PARAMS);
5710 }
5711
5712 unlock:
5713 hci_dev_unlock(hdev);
5714 return err;
5715 }
5716
remove_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5717 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5718 void *data, u16 len)
5719 {
5720 struct mgmt_cp_remove_remote_oob_data *cp = data;
5721 u8 status;
5722 int err;
5723
5724 bt_dev_dbg(hdev, "sock %p", sk);
5725
5726 if (cp->addr.type != BDADDR_BREDR)
5727 return mgmt_cmd_complete(sk, hdev->id,
5728 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5729 MGMT_STATUS_INVALID_PARAMS,
5730 &cp->addr, sizeof(cp->addr));
5731
5732 hci_dev_lock(hdev);
5733
5734 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5735 hci_remote_oob_data_clear(hdev);
5736 status = MGMT_STATUS_SUCCESS;
5737 goto done;
5738 }
5739
5740 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5741 if (err < 0)
5742 status = MGMT_STATUS_INVALID_PARAMS;
5743 else
5744 status = MGMT_STATUS_SUCCESS;
5745
5746 done:
5747 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5748 status, &cp->addr, sizeof(cp->addr));
5749
5750 hci_dev_unlock(hdev);
5751 return err;
5752 }
5753
discovery_type_is_valid(struct hci_dev * hdev,uint8_t type,uint8_t * mgmt_status)5754 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5755 uint8_t *mgmt_status)
5756 {
5757 switch (type) {
5758 case DISCOV_TYPE_LE:
5759 *mgmt_status = mgmt_le_support(hdev);
5760 if (*mgmt_status)
5761 return false;
5762 break;
5763 case DISCOV_TYPE_INTERLEAVED:
5764 *mgmt_status = mgmt_le_support(hdev);
5765 if (*mgmt_status)
5766 return false;
5767 fallthrough;
5768 case DISCOV_TYPE_BREDR:
5769 *mgmt_status = mgmt_bredr_support(hdev);
5770 if (*mgmt_status)
5771 return false;
5772 break;
5773 default:
5774 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5775 return false;
5776 }
5777
5778 return true;
5779 }
5780
start_discovery_complete(struct hci_dev * hdev,void * data,int err)5781 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5782 {
5783 struct mgmt_pending_cmd *cmd = data;
5784
5785 bt_dev_dbg(hdev, "err %d", err);
5786
5787 if (err == -ECANCELED)
5788 return;
5789
5790 if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5791 cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5792 cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5793 return;
5794
5795 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err),
5796 cmd->param, 1);
5797 mgmt_pending_remove(cmd);
5798
5799 hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5800 DISCOVERY_FINDING);
5801 }
5802
start_discovery_sync(struct hci_dev * hdev,void * data)5803 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5804 {
5805 return hci_start_discovery_sync(hdev);
5806 }
5807
start_discovery_internal(struct sock * sk,struct hci_dev * hdev,u16 op,void * data,u16 len)5808 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5809 u16 op, void *data, u16 len)
5810 {
5811 struct mgmt_cp_start_discovery *cp = data;
5812 struct mgmt_pending_cmd *cmd;
5813 u8 status;
5814 int err;
5815
5816 bt_dev_dbg(hdev, "sock %p", sk);
5817
5818 hci_dev_lock(hdev);
5819
5820 if (!hdev_is_powered(hdev)) {
5821 err = mgmt_cmd_complete(sk, hdev->id, op,
5822 MGMT_STATUS_NOT_POWERED,
5823 &cp->type, sizeof(cp->type));
5824 goto failed;
5825 }
5826
5827 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5828 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5829 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5830 &cp->type, sizeof(cp->type));
5831 goto failed;
5832 }
5833
5834 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5835 err = mgmt_cmd_complete(sk, hdev->id, op, status,
5836 &cp->type, sizeof(cp->type));
5837 goto failed;
5838 }
5839
5840 /* Can't start discovery when it is paused */
5841 if (hdev->discovery_paused) {
5842 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5843 &cp->type, sizeof(cp->type));
5844 goto failed;
5845 }
5846
5847 /* Clear the discovery filter first to free any previously
5848 * allocated memory for the UUID list.
5849 */
5850 hci_discovery_filter_clear(hdev);
5851
5852 hdev->discovery.type = cp->type;
5853 hdev->discovery.report_invalid_rssi = false;
5854 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5855 hdev->discovery.limited = true;
5856 else
5857 hdev->discovery.limited = false;
5858
5859 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5860 if (!cmd) {
5861 err = -ENOMEM;
5862 goto failed;
5863 }
5864
5865 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5866 start_discovery_complete);
5867 if (err < 0) {
5868 mgmt_pending_remove(cmd);
5869 goto failed;
5870 }
5871
5872 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5873
5874 failed:
5875 hci_dev_unlock(hdev);
5876 return err;
5877 }
5878
start_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5879 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5880 void *data, u16 len)
5881 {
5882 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5883 data, len);
5884 }
5885
start_limited_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5886 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5887 void *data, u16 len)
5888 {
5889 return start_discovery_internal(sk, hdev,
5890 MGMT_OP_START_LIMITED_DISCOVERY,
5891 data, len);
5892 }
5893
start_service_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5894 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5895 void *data, u16 len)
5896 {
5897 struct mgmt_cp_start_service_discovery *cp = data;
5898 struct mgmt_pending_cmd *cmd;
5899 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5900 u16 uuid_count, expected_len;
5901 u8 status;
5902 int err;
5903
5904 bt_dev_dbg(hdev, "sock %p", sk);
5905
5906 hci_dev_lock(hdev);
5907
5908 if (!hdev_is_powered(hdev)) {
5909 err = mgmt_cmd_complete(sk, hdev->id,
5910 MGMT_OP_START_SERVICE_DISCOVERY,
5911 MGMT_STATUS_NOT_POWERED,
5912 &cp->type, sizeof(cp->type));
5913 goto failed;
5914 }
5915
5916 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5917 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5918 err = mgmt_cmd_complete(sk, hdev->id,
5919 MGMT_OP_START_SERVICE_DISCOVERY,
5920 MGMT_STATUS_BUSY, &cp->type,
5921 sizeof(cp->type));
5922 goto failed;
5923 }
5924
5925 if (hdev->discovery_paused) {
5926 err = mgmt_cmd_complete(sk, hdev->id,
5927 MGMT_OP_START_SERVICE_DISCOVERY,
5928 MGMT_STATUS_BUSY, &cp->type,
5929 sizeof(cp->type));
5930 goto failed;
5931 }
5932
5933 uuid_count = __le16_to_cpu(cp->uuid_count);
5934 if (uuid_count > max_uuid_count) {
5935 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5936 uuid_count);
5937 err = mgmt_cmd_complete(sk, hdev->id,
5938 MGMT_OP_START_SERVICE_DISCOVERY,
5939 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5940 sizeof(cp->type));
5941 goto failed;
5942 }
5943
5944 expected_len = sizeof(*cp) + uuid_count * 16;
5945 if (expected_len != len) {
5946 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
5947 expected_len, len);
5948 err = mgmt_cmd_complete(sk, hdev->id,
5949 MGMT_OP_START_SERVICE_DISCOVERY,
5950 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5951 sizeof(cp->type));
5952 goto failed;
5953 }
5954
5955 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5956 err = mgmt_cmd_complete(sk, hdev->id,
5957 MGMT_OP_START_SERVICE_DISCOVERY,
5958 status, &cp->type, sizeof(cp->type));
5959 goto failed;
5960 }
5961
5962 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
5963 hdev, data, len);
5964 if (!cmd) {
5965 err = -ENOMEM;
5966 goto failed;
5967 }
5968
5969 /* Clear the discovery filter first to free any previously
5970 * allocated memory for the UUID list.
5971 */
5972 hci_discovery_filter_clear(hdev);
5973
5974 hdev->discovery.result_filtering = true;
5975 hdev->discovery.type = cp->type;
5976 hdev->discovery.rssi = cp->rssi;
5977 hdev->discovery.uuid_count = uuid_count;
5978
5979 if (uuid_count > 0) {
5980 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
5981 GFP_KERNEL);
5982 if (!hdev->discovery.uuids) {
5983 err = mgmt_cmd_complete(sk, hdev->id,
5984 MGMT_OP_START_SERVICE_DISCOVERY,
5985 MGMT_STATUS_FAILED,
5986 &cp->type, sizeof(cp->type));
5987 mgmt_pending_remove(cmd);
5988 goto failed;
5989 }
5990 }
5991
5992 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5993 start_discovery_complete);
5994 if (err < 0) {
5995 mgmt_pending_remove(cmd);
5996 goto failed;
5997 }
5998
5999 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6000
6001 failed:
6002 hci_dev_unlock(hdev);
6003 return err;
6004 }
6005
stop_discovery_complete(struct hci_dev * hdev,void * data,int err)6006 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6007 {
6008 struct mgmt_pending_cmd *cmd = data;
6009
6010 if (err == -ECANCELED ||
6011 cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
6012 return;
6013
6014 bt_dev_dbg(hdev, "err %d", err);
6015
6016 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err),
6017 cmd->param, 1);
6018 mgmt_pending_remove(cmd);
6019
6020 if (!err)
6021 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6022 }
6023
stop_discovery_sync(struct hci_dev * hdev,void * data)6024 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6025 {
6026 return hci_stop_discovery_sync(hdev);
6027 }
6028
stop_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6029 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6030 u16 len)
6031 {
6032 struct mgmt_cp_stop_discovery *mgmt_cp = data;
6033 struct mgmt_pending_cmd *cmd;
6034 int err;
6035
6036 bt_dev_dbg(hdev, "sock %p", sk);
6037
6038 hci_dev_lock(hdev);
6039
6040 if (!hci_discovery_active(hdev)) {
6041 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6042 MGMT_STATUS_REJECTED, &mgmt_cp->type,
6043 sizeof(mgmt_cp->type));
6044 goto unlock;
6045 }
6046
6047 if (hdev->discovery.type != mgmt_cp->type) {
6048 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6049 MGMT_STATUS_INVALID_PARAMS,
6050 &mgmt_cp->type, sizeof(mgmt_cp->type));
6051 goto unlock;
6052 }
6053
6054 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6055 if (!cmd) {
6056 err = -ENOMEM;
6057 goto unlock;
6058 }
6059
6060 err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6061 stop_discovery_complete);
6062 if (err < 0) {
6063 mgmt_pending_remove(cmd);
6064 goto unlock;
6065 }
6066
6067 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6068
6069 unlock:
6070 hci_dev_unlock(hdev);
6071 return err;
6072 }
6073
confirm_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6074 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6075 u16 len)
6076 {
6077 struct mgmt_cp_confirm_name *cp = data;
6078 struct inquiry_entry *e;
6079 int err;
6080
6081 bt_dev_dbg(hdev, "sock %p", sk);
6082
6083 hci_dev_lock(hdev);
6084
6085 if (!hci_discovery_active(hdev)) {
6086 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6087 MGMT_STATUS_FAILED, &cp->addr,
6088 sizeof(cp->addr));
6089 goto failed;
6090 }
6091
6092 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6093 if (!e) {
6094 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6095 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6096 sizeof(cp->addr));
6097 goto failed;
6098 }
6099
6100 if (cp->name_known) {
6101 e->name_state = NAME_KNOWN;
6102 list_del(&e->list);
6103 } else {
6104 e->name_state = NAME_NEEDED;
6105 hci_inquiry_cache_update_resolve(hdev, e);
6106 }
6107
6108 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6109 &cp->addr, sizeof(cp->addr));
6110
6111 failed:
6112 hci_dev_unlock(hdev);
6113 return err;
6114 }
6115
block_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6116 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6117 u16 len)
6118 {
6119 struct mgmt_cp_block_device *cp = data;
6120 u8 status;
6121 int err;
6122
6123 bt_dev_dbg(hdev, "sock %p", sk);
6124
6125 if (!bdaddr_type_is_valid(cp->addr.type))
6126 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6127 MGMT_STATUS_INVALID_PARAMS,
6128 &cp->addr, sizeof(cp->addr));
6129
6130 hci_dev_lock(hdev);
6131
6132 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6133 cp->addr.type);
6134 if (err < 0) {
6135 status = MGMT_STATUS_FAILED;
6136 goto done;
6137 }
6138
6139 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6140 sk);
6141 status = MGMT_STATUS_SUCCESS;
6142
6143 done:
6144 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6145 &cp->addr, sizeof(cp->addr));
6146
6147 hci_dev_unlock(hdev);
6148
6149 return err;
6150 }
6151
unblock_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6152 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6153 u16 len)
6154 {
6155 struct mgmt_cp_unblock_device *cp = data;
6156 u8 status;
6157 int err;
6158
6159 bt_dev_dbg(hdev, "sock %p", sk);
6160
6161 if (!bdaddr_type_is_valid(cp->addr.type))
6162 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6163 MGMT_STATUS_INVALID_PARAMS,
6164 &cp->addr, sizeof(cp->addr));
6165
6166 hci_dev_lock(hdev);
6167
6168 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6169 cp->addr.type);
6170 if (err < 0) {
6171 status = MGMT_STATUS_INVALID_PARAMS;
6172 goto done;
6173 }
6174
6175 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6176 sk);
6177 status = MGMT_STATUS_SUCCESS;
6178
6179 done:
6180 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6181 &cp->addr, sizeof(cp->addr));
6182
6183 hci_dev_unlock(hdev);
6184
6185 return err;
6186 }
6187
set_device_id_sync(struct hci_dev * hdev,void * data)6188 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6189 {
6190 return hci_update_eir_sync(hdev);
6191 }
6192
set_device_id(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6193 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6194 u16 len)
6195 {
6196 struct mgmt_cp_set_device_id *cp = data;
6197 int err;
6198 __u16 source;
6199
6200 bt_dev_dbg(hdev, "sock %p", sk);
6201
6202 source = __le16_to_cpu(cp->source);
6203
6204 if (source > 0x0002)
6205 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6206 MGMT_STATUS_INVALID_PARAMS);
6207
6208 hci_dev_lock(hdev);
6209
6210 hdev->devid_source = source;
6211 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6212 hdev->devid_product = __le16_to_cpu(cp->product);
6213 hdev->devid_version = __le16_to_cpu(cp->version);
6214
6215 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6216 NULL, 0);
6217
6218 hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6219
6220 hci_dev_unlock(hdev);
6221
6222 return err;
6223 }
6224
enable_advertising_instance(struct hci_dev * hdev,int err)6225 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6226 {
6227 if (err)
6228 bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6229 else
6230 bt_dev_dbg(hdev, "status %d", err);
6231 }
6232
set_advertising_complete(struct hci_dev * hdev,void * data,int err)6233 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6234 {
6235 struct cmd_lookup match = { NULL, hdev };
6236 u8 instance;
6237 struct adv_info *adv_instance;
6238 u8 status = mgmt_status(err);
6239
6240 if (status) {
6241 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, true,
6242 cmd_status_rsp, &status);
6243 return;
6244 }
6245
6246 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6247 hci_dev_set_flag(hdev, HCI_ADVERTISING);
6248 else
6249 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6250
6251 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, true, settings_rsp,
6252 &match);
6253
6254 new_settings(hdev, match.sk);
6255
6256 if (match.sk)
6257 sock_put(match.sk);
6258
6259 /* If "Set Advertising" was just disabled and instance advertising was
6260 * set up earlier, then re-enable multi-instance advertising.
6261 */
6262 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6263 list_empty(&hdev->adv_instances))
6264 return;
6265
6266 instance = hdev->cur_adv_instance;
6267 if (!instance) {
6268 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6269 struct adv_info, list);
6270 if (!adv_instance)
6271 return;
6272
6273 instance = adv_instance->instance;
6274 }
6275
6276 err = hci_schedule_adv_instance_sync(hdev, instance, true);
6277
6278 enable_advertising_instance(hdev, err);
6279 }
6280
set_adv_sync(struct hci_dev * hdev,void * data)6281 static int set_adv_sync(struct hci_dev *hdev, void *data)
6282 {
6283 struct mgmt_pending_cmd *cmd = data;
6284 struct mgmt_mode *cp = cmd->param;
6285 u8 val = !!cp->val;
6286
6287 if (cp->val == 0x02)
6288 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6289 else
6290 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6291
6292 cancel_adv_timeout(hdev);
6293
6294 if (val) {
6295 /* Switch to instance "0" for the Set Advertising setting.
6296 * We cannot use update_[adv|scan_rsp]_data() here as the
6297 * HCI_ADVERTISING flag is not yet set.
6298 */
6299 hdev->cur_adv_instance = 0x00;
6300
6301 if (ext_adv_capable(hdev)) {
6302 hci_start_ext_adv_sync(hdev, 0x00);
6303 } else {
6304 hci_update_adv_data_sync(hdev, 0x00);
6305 hci_update_scan_rsp_data_sync(hdev, 0x00);
6306 hci_enable_advertising_sync(hdev);
6307 }
6308 } else {
6309 hci_disable_advertising_sync(hdev);
6310 }
6311
6312 return 0;
6313 }
6314
set_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6315 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6316 u16 len)
6317 {
6318 struct mgmt_mode *cp = data;
6319 struct mgmt_pending_cmd *cmd;
6320 u8 val, status;
6321 int err;
6322
6323 bt_dev_dbg(hdev, "sock %p", sk);
6324
6325 status = mgmt_le_support(hdev);
6326 if (status)
6327 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6328 status);
6329
6330 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6331 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6332 MGMT_STATUS_INVALID_PARAMS);
6333
6334 if (hdev->advertising_paused)
6335 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6336 MGMT_STATUS_BUSY);
6337
6338 hci_dev_lock(hdev);
6339
6340 val = !!cp->val;
6341
6342 /* The following conditions are ones which mean that we should
6343 * not do any HCI communication but directly send a mgmt
6344 * response to user space (after toggling the flag if
6345 * necessary).
6346 */
6347 if (!hdev_is_powered(hdev) ||
6348 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6349 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6350 hci_dev_test_flag(hdev, HCI_MESH) ||
6351 hci_conn_num(hdev, LE_LINK) > 0 ||
6352 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6353 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6354 bool changed;
6355
6356 if (cp->val) {
6357 hdev->cur_adv_instance = 0x00;
6358 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6359 if (cp->val == 0x02)
6360 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6361 else
6362 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6363 } else {
6364 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6365 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6366 }
6367
6368 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6369 if (err < 0)
6370 goto unlock;
6371
6372 if (changed)
6373 err = new_settings(hdev, sk);
6374
6375 goto unlock;
6376 }
6377
6378 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6379 pending_find(MGMT_OP_SET_LE, hdev)) {
6380 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6381 MGMT_STATUS_BUSY);
6382 goto unlock;
6383 }
6384
6385 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6386 if (!cmd)
6387 err = -ENOMEM;
6388 else
6389 err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6390 set_advertising_complete);
6391
6392 if (err < 0 && cmd)
6393 mgmt_pending_remove(cmd);
6394
6395 unlock:
6396 hci_dev_unlock(hdev);
6397 return err;
6398 }
6399
set_static_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6400 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6401 void *data, u16 len)
6402 {
6403 struct mgmt_cp_set_static_address *cp = data;
6404 int err;
6405
6406 bt_dev_dbg(hdev, "sock %p", sk);
6407
6408 if (!lmp_le_capable(hdev))
6409 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6410 MGMT_STATUS_NOT_SUPPORTED);
6411
6412 if (hdev_is_powered(hdev))
6413 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6414 MGMT_STATUS_REJECTED);
6415
6416 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6417 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6418 return mgmt_cmd_status(sk, hdev->id,
6419 MGMT_OP_SET_STATIC_ADDRESS,
6420 MGMT_STATUS_INVALID_PARAMS);
6421
6422 /* Two most significant bits shall be set */
6423 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6424 return mgmt_cmd_status(sk, hdev->id,
6425 MGMT_OP_SET_STATIC_ADDRESS,
6426 MGMT_STATUS_INVALID_PARAMS);
6427 }
6428
6429 hci_dev_lock(hdev);
6430
6431 bacpy(&hdev->static_addr, &cp->bdaddr);
6432
6433 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6434 if (err < 0)
6435 goto unlock;
6436
6437 err = new_settings(hdev, sk);
6438
6439 unlock:
6440 hci_dev_unlock(hdev);
6441 return err;
6442 }
6443
set_scan_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6444 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6445 void *data, u16 len)
6446 {
6447 struct mgmt_cp_set_scan_params *cp = data;
6448 __u16 interval, window;
6449 int err;
6450
6451 bt_dev_dbg(hdev, "sock %p", sk);
6452
6453 if (!lmp_le_capable(hdev))
6454 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6455 MGMT_STATUS_NOT_SUPPORTED);
6456
6457 /* Keep allowed ranges in sync with set_mesh() */
6458 interval = __le16_to_cpu(cp->interval);
6459
6460 if (interval < 0x0004 || interval > 0x4000)
6461 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6462 MGMT_STATUS_INVALID_PARAMS);
6463
6464 window = __le16_to_cpu(cp->window);
6465
6466 if (window < 0x0004 || window > 0x4000)
6467 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6468 MGMT_STATUS_INVALID_PARAMS);
6469
6470 if (window > interval)
6471 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6472 MGMT_STATUS_INVALID_PARAMS);
6473
6474 hci_dev_lock(hdev);
6475
6476 hdev->le_scan_interval = interval;
6477 hdev->le_scan_window = window;
6478
6479 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6480 NULL, 0);
6481
6482 /* If background scan is running, restart it so new parameters are
6483 * loaded.
6484 */
6485 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6486 hdev->discovery.state == DISCOVERY_STOPPED)
6487 hci_update_passive_scan(hdev);
6488
6489 hci_dev_unlock(hdev);
6490
6491 return err;
6492 }
6493
fast_connectable_complete(struct hci_dev * hdev,void * data,int err)6494 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6495 {
6496 struct mgmt_pending_cmd *cmd = data;
6497
6498 bt_dev_dbg(hdev, "err %d", err);
6499
6500 if (err) {
6501 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6502 mgmt_status(err));
6503 } else {
6504 struct mgmt_mode *cp = cmd->param;
6505
6506 if (cp->val)
6507 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6508 else
6509 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6510
6511 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6512 new_settings(hdev, cmd->sk);
6513 }
6514
6515 mgmt_pending_free(cmd);
6516 }
6517
write_fast_connectable_sync(struct hci_dev * hdev,void * data)6518 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6519 {
6520 struct mgmt_pending_cmd *cmd = data;
6521 struct mgmt_mode *cp = cmd->param;
6522
6523 return hci_write_fast_connectable_sync(hdev, cp->val);
6524 }
6525
set_fast_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6526 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6527 void *data, u16 len)
6528 {
6529 struct mgmt_mode *cp = data;
6530 struct mgmt_pending_cmd *cmd;
6531 int err;
6532
6533 bt_dev_dbg(hdev, "sock %p", sk);
6534
6535 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6536 hdev->hci_ver < BLUETOOTH_VER_1_2)
6537 return mgmt_cmd_status(sk, hdev->id,
6538 MGMT_OP_SET_FAST_CONNECTABLE,
6539 MGMT_STATUS_NOT_SUPPORTED);
6540
6541 if (cp->val != 0x00 && cp->val != 0x01)
6542 return mgmt_cmd_status(sk, hdev->id,
6543 MGMT_OP_SET_FAST_CONNECTABLE,
6544 MGMT_STATUS_INVALID_PARAMS);
6545
6546 hci_dev_lock(hdev);
6547
6548 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6549 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6550 goto unlock;
6551 }
6552
6553 if (!hdev_is_powered(hdev)) {
6554 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6555 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6556 new_settings(hdev, sk);
6557 goto unlock;
6558 }
6559
6560 cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6561 len);
6562 if (!cmd)
6563 err = -ENOMEM;
6564 else
6565 err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6566 fast_connectable_complete);
6567
6568 if (err < 0) {
6569 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6570 MGMT_STATUS_FAILED);
6571
6572 if (cmd)
6573 mgmt_pending_free(cmd);
6574 }
6575
6576 unlock:
6577 hci_dev_unlock(hdev);
6578
6579 return err;
6580 }
6581
set_bredr_complete(struct hci_dev * hdev,void * data,int err)6582 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6583 {
6584 struct mgmt_pending_cmd *cmd = data;
6585
6586 bt_dev_dbg(hdev, "err %d", err);
6587
6588 if (err) {
6589 u8 mgmt_err = mgmt_status(err);
6590
6591 /* We need to restore the flag if related HCI commands
6592 * failed.
6593 */
6594 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6595
6596 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
6597 } else {
6598 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6599 new_settings(hdev, cmd->sk);
6600 }
6601
6602 mgmt_pending_free(cmd);
6603 }
6604
set_bredr_sync(struct hci_dev * hdev,void * data)6605 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6606 {
6607 int status;
6608
6609 status = hci_write_fast_connectable_sync(hdev, false);
6610
6611 if (!status)
6612 status = hci_update_scan_sync(hdev);
6613
6614 /* Since only the advertising data flags will change, there
6615 * is no need to update the scan response data.
6616 */
6617 if (!status)
6618 status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6619
6620 return status;
6621 }
6622
set_bredr(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6623 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6624 {
6625 struct mgmt_mode *cp = data;
6626 struct mgmt_pending_cmd *cmd;
6627 int err;
6628
6629 bt_dev_dbg(hdev, "sock %p", sk);
6630
6631 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6632 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6633 MGMT_STATUS_NOT_SUPPORTED);
6634
6635 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6636 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6637 MGMT_STATUS_REJECTED);
6638
6639 if (cp->val != 0x00 && cp->val != 0x01)
6640 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6641 MGMT_STATUS_INVALID_PARAMS);
6642
6643 hci_dev_lock(hdev);
6644
6645 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6646 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6647 goto unlock;
6648 }
6649
6650 if (!hdev_is_powered(hdev)) {
6651 if (!cp->val) {
6652 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6653 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6654 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6655 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6656 }
6657
6658 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6659
6660 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6661 if (err < 0)
6662 goto unlock;
6663
6664 err = new_settings(hdev, sk);
6665 goto unlock;
6666 }
6667
6668 /* Reject disabling when powered on */
6669 if (!cp->val) {
6670 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6671 MGMT_STATUS_REJECTED);
6672 goto unlock;
6673 } else {
6674 /* When configuring a dual-mode controller to operate
6675 * with LE only and using a static address, then switching
6676 * BR/EDR back on is not allowed.
6677 *
6678 * Dual-mode controllers shall operate with the public
6679 * address as its identity address for BR/EDR and LE. So
6680 * reject the attempt to create an invalid configuration.
6681 *
6682 * The same restrictions applies when secure connections
6683 * has been enabled. For BR/EDR this is a controller feature
6684 * while for LE it is a host stack feature. This means that
6685 * switching BR/EDR back on when secure connections has been
6686 * enabled is not a supported transaction.
6687 */
6688 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6689 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6690 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6691 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6692 MGMT_STATUS_REJECTED);
6693 goto unlock;
6694 }
6695 }
6696
6697 cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6698 if (!cmd)
6699 err = -ENOMEM;
6700 else
6701 err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6702 set_bredr_complete);
6703
6704 if (err < 0) {
6705 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6706 MGMT_STATUS_FAILED);
6707 if (cmd)
6708 mgmt_pending_free(cmd);
6709
6710 goto unlock;
6711 }
6712
6713 /* We need to flip the bit already here so that
6714 * hci_req_update_adv_data generates the correct flags.
6715 */
6716 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6717
6718 unlock:
6719 hci_dev_unlock(hdev);
6720 return err;
6721 }
6722
set_secure_conn_complete(struct hci_dev * hdev,void * data,int err)6723 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6724 {
6725 struct mgmt_pending_cmd *cmd = data;
6726 struct mgmt_mode *cp;
6727
6728 bt_dev_dbg(hdev, "err %d", err);
6729
6730 if (err) {
6731 u8 mgmt_err = mgmt_status(err);
6732
6733 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
6734 goto done;
6735 }
6736
6737 cp = cmd->param;
6738
6739 switch (cp->val) {
6740 case 0x00:
6741 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6742 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6743 break;
6744 case 0x01:
6745 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6746 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6747 break;
6748 case 0x02:
6749 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6750 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6751 break;
6752 }
6753
6754 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6755 new_settings(hdev, cmd->sk);
6756
6757 done:
6758 mgmt_pending_free(cmd);
6759 }
6760
set_secure_conn_sync(struct hci_dev * hdev,void * data)6761 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6762 {
6763 struct mgmt_pending_cmd *cmd = data;
6764 struct mgmt_mode *cp = cmd->param;
6765 u8 val = !!cp->val;
6766
6767 /* Force write of val */
6768 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6769
6770 return hci_write_sc_support_sync(hdev, val);
6771 }
6772
set_secure_conn(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6773 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6774 void *data, u16 len)
6775 {
6776 struct mgmt_mode *cp = data;
6777 struct mgmt_pending_cmd *cmd;
6778 u8 val;
6779 int err;
6780
6781 bt_dev_dbg(hdev, "sock %p", sk);
6782
6783 if (!lmp_sc_capable(hdev) &&
6784 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6785 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6786 MGMT_STATUS_NOT_SUPPORTED);
6787
6788 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6789 lmp_sc_capable(hdev) &&
6790 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6791 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6792 MGMT_STATUS_REJECTED);
6793
6794 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6795 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6796 MGMT_STATUS_INVALID_PARAMS);
6797
6798 hci_dev_lock(hdev);
6799
6800 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6801 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6802 bool changed;
6803
6804 if (cp->val) {
6805 changed = !hci_dev_test_and_set_flag(hdev,
6806 HCI_SC_ENABLED);
6807 if (cp->val == 0x02)
6808 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6809 else
6810 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6811 } else {
6812 changed = hci_dev_test_and_clear_flag(hdev,
6813 HCI_SC_ENABLED);
6814 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6815 }
6816
6817 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6818 if (err < 0)
6819 goto failed;
6820
6821 if (changed)
6822 err = new_settings(hdev, sk);
6823
6824 goto failed;
6825 }
6826
6827 val = !!cp->val;
6828
6829 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6830 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6831 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6832 goto failed;
6833 }
6834
6835 cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6836 if (!cmd)
6837 err = -ENOMEM;
6838 else
6839 err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6840 set_secure_conn_complete);
6841
6842 if (err < 0) {
6843 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6844 MGMT_STATUS_FAILED);
6845 if (cmd)
6846 mgmt_pending_free(cmd);
6847 }
6848
6849 failed:
6850 hci_dev_unlock(hdev);
6851 return err;
6852 }
6853
set_debug_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6854 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6855 void *data, u16 len)
6856 {
6857 struct mgmt_mode *cp = data;
6858 bool changed, use_changed;
6859 int err;
6860
6861 bt_dev_dbg(hdev, "sock %p", sk);
6862
6863 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6864 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6865 MGMT_STATUS_INVALID_PARAMS);
6866
6867 hci_dev_lock(hdev);
6868
6869 if (cp->val)
6870 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6871 else
6872 changed = hci_dev_test_and_clear_flag(hdev,
6873 HCI_KEEP_DEBUG_KEYS);
6874
6875 if (cp->val == 0x02)
6876 use_changed = !hci_dev_test_and_set_flag(hdev,
6877 HCI_USE_DEBUG_KEYS);
6878 else
6879 use_changed = hci_dev_test_and_clear_flag(hdev,
6880 HCI_USE_DEBUG_KEYS);
6881
6882 if (hdev_is_powered(hdev) && use_changed &&
6883 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6884 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6885 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6886 sizeof(mode), &mode);
6887 }
6888
6889 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6890 if (err < 0)
6891 goto unlock;
6892
6893 if (changed)
6894 err = new_settings(hdev, sk);
6895
6896 unlock:
6897 hci_dev_unlock(hdev);
6898 return err;
6899 }
6900
set_privacy(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)6901 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6902 u16 len)
6903 {
6904 struct mgmt_cp_set_privacy *cp = cp_data;
6905 bool changed;
6906 int err;
6907
6908 bt_dev_dbg(hdev, "sock %p", sk);
6909
6910 if (!lmp_le_capable(hdev))
6911 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6912 MGMT_STATUS_NOT_SUPPORTED);
6913
6914 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6915 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6916 MGMT_STATUS_INVALID_PARAMS);
6917
6918 if (hdev_is_powered(hdev))
6919 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6920 MGMT_STATUS_REJECTED);
6921
6922 hci_dev_lock(hdev);
6923
6924 /* If user space supports this command it is also expected to
6925 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
6926 */
6927 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6928
6929 if (cp->privacy) {
6930 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
6931 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
6932 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
6933 hci_adv_instances_set_rpa_expired(hdev, true);
6934 if (cp->privacy == 0x02)
6935 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
6936 else
6937 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6938 } else {
6939 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
6940 memset(hdev->irk, 0, sizeof(hdev->irk));
6941 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
6942 hci_adv_instances_set_rpa_expired(hdev, false);
6943 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6944 }
6945
6946 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
6947 if (err < 0)
6948 goto unlock;
6949
6950 if (changed)
6951 err = new_settings(hdev, sk);
6952
6953 unlock:
6954 hci_dev_unlock(hdev);
6955 return err;
6956 }
6957
irk_is_valid(struct mgmt_irk_info * irk)6958 static bool irk_is_valid(struct mgmt_irk_info *irk)
6959 {
6960 switch (irk->addr.type) {
6961 case BDADDR_LE_PUBLIC:
6962 return true;
6963
6964 case BDADDR_LE_RANDOM:
6965 /* Two most significant bits shall be set */
6966 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6967 return false;
6968 return true;
6969 }
6970
6971 return false;
6972 }
6973
load_irks(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)6974 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6975 u16 len)
6976 {
6977 struct mgmt_cp_load_irks *cp = cp_data;
6978 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
6979 sizeof(struct mgmt_irk_info));
6980 u16 irk_count, expected_len;
6981 int i, err;
6982
6983 bt_dev_dbg(hdev, "sock %p", sk);
6984
6985 if (!lmp_le_capable(hdev))
6986 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6987 MGMT_STATUS_NOT_SUPPORTED);
6988
6989 irk_count = __le16_to_cpu(cp->irk_count);
6990 if (irk_count > max_irk_count) {
6991 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
6992 irk_count);
6993 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6994 MGMT_STATUS_INVALID_PARAMS);
6995 }
6996
6997 expected_len = struct_size(cp, irks, irk_count);
6998 if (expected_len != len) {
6999 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7000 expected_len, len);
7001 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7002 MGMT_STATUS_INVALID_PARAMS);
7003 }
7004
7005 bt_dev_dbg(hdev, "irk_count %u", irk_count);
7006
7007 for (i = 0; i < irk_count; i++) {
7008 struct mgmt_irk_info *key = &cp->irks[i];
7009
7010 if (!irk_is_valid(key))
7011 return mgmt_cmd_status(sk, hdev->id,
7012 MGMT_OP_LOAD_IRKS,
7013 MGMT_STATUS_INVALID_PARAMS);
7014 }
7015
7016 hci_dev_lock(hdev);
7017
7018 hci_smp_irks_clear(hdev);
7019
7020 for (i = 0; i < irk_count; i++) {
7021 struct mgmt_irk_info *irk = &cp->irks[i];
7022
7023 if (hci_is_blocked_key(hdev,
7024 HCI_BLOCKED_KEY_TYPE_IRK,
7025 irk->val)) {
7026 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7027 &irk->addr.bdaddr);
7028 continue;
7029 }
7030
7031 hci_add_irk(hdev, &irk->addr.bdaddr,
7032 le_addr_type(irk->addr.type), irk->val,
7033 BDADDR_ANY);
7034 }
7035
7036 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7037
7038 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7039
7040 hci_dev_unlock(hdev);
7041
7042 return err;
7043 }
7044
ltk_is_valid(struct mgmt_ltk_info * key)7045 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7046 {
7047 if (key->initiator != 0x00 && key->initiator != 0x01)
7048 return false;
7049
7050 switch (key->addr.type) {
7051 case BDADDR_LE_PUBLIC:
7052 return true;
7053
7054 case BDADDR_LE_RANDOM:
7055 /* Two most significant bits shall be set */
7056 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7057 return false;
7058 return true;
7059 }
7060
7061 return false;
7062 }
7063
load_long_term_keys(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)7064 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7065 void *cp_data, u16 len)
7066 {
7067 struct mgmt_cp_load_long_term_keys *cp = cp_data;
7068 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7069 sizeof(struct mgmt_ltk_info));
7070 u16 key_count, expected_len;
7071 int i, err;
7072
7073 bt_dev_dbg(hdev, "sock %p", sk);
7074
7075 if (!lmp_le_capable(hdev))
7076 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7077 MGMT_STATUS_NOT_SUPPORTED);
7078
7079 key_count = __le16_to_cpu(cp->key_count);
7080 if (key_count > max_key_count) {
7081 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7082 key_count);
7083 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7084 MGMT_STATUS_INVALID_PARAMS);
7085 }
7086
7087 expected_len = struct_size(cp, keys, key_count);
7088 if (expected_len != len) {
7089 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7090 expected_len, len);
7091 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7092 MGMT_STATUS_INVALID_PARAMS);
7093 }
7094
7095 bt_dev_dbg(hdev, "key_count %u", key_count);
7096
7097 hci_dev_lock(hdev);
7098
7099 hci_smp_ltks_clear(hdev);
7100
7101 for (i = 0; i < key_count; i++) {
7102 struct mgmt_ltk_info *key = &cp->keys[i];
7103 u8 type, authenticated;
7104
7105 if (hci_is_blocked_key(hdev,
7106 HCI_BLOCKED_KEY_TYPE_LTK,
7107 key->val)) {
7108 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7109 &key->addr.bdaddr);
7110 continue;
7111 }
7112
7113 if (!ltk_is_valid(key)) {
7114 bt_dev_warn(hdev, "Invalid LTK for %pMR",
7115 &key->addr.bdaddr);
7116 continue;
7117 }
7118
7119 switch (key->type) {
7120 case MGMT_LTK_UNAUTHENTICATED:
7121 authenticated = 0x00;
7122 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7123 break;
7124 case MGMT_LTK_AUTHENTICATED:
7125 authenticated = 0x01;
7126 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7127 break;
7128 case MGMT_LTK_P256_UNAUTH:
7129 authenticated = 0x00;
7130 type = SMP_LTK_P256;
7131 break;
7132 case MGMT_LTK_P256_AUTH:
7133 authenticated = 0x01;
7134 type = SMP_LTK_P256;
7135 break;
7136 case MGMT_LTK_P256_DEBUG:
7137 authenticated = 0x00;
7138 type = SMP_LTK_P256_DEBUG;
7139 fallthrough;
7140 default:
7141 continue;
7142 }
7143
7144 hci_add_ltk(hdev, &key->addr.bdaddr,
7145 le_addr_type(key->addr.type), type, authenticated,
7146 key->val, key->enc_size, key->ediv, key->rand);
7147 }
7148
7149 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7150 NULL, 0);
7151
7152 hci_dev_unlock(hdev);
7153
7154 return err;
7155 }
7156
get_conn_info_complete(struct hci_dev * hdev,void * data,int err)7157 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
7158 {
7159 struct mgmt_pending_cmd *cmd = data;
7160 struct hci_conn *conn = cmd->user_data;
7161 struct mgmt_cp_get_conn_info *cp = cmd->param;
7162 struct mgmt_rp_get_conn_info rp;
7163 u8 status;
7164
7165 bt_dev_dbg(hdev, "err %d", err);
7166
7167 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
7168
7169 status = mgmt_status(err);
7170 if (status == MGMT_STATUS_SUCCESS) {
7171 rp.rssi = conn->rssi;
7172 rp.tx_power = conn->tx_power;
7173 rp.max_tx_power = conn->max_tx_power;
7174 } else {
7175 rp.rssi = HCI_RSSI_INVALID;
7176 rp.tx_power = HCI_TX_POWER_INVALID;
7177 rp.max_tx_power = HCI_TX_POWER_INVALID;
7178 }
7179
7180 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, MGMT_OP_GET_CONN_INFO, status,
7181 &rp, sizeof(rp));
7182
7183 mgmt_pending_free(cmd);
7184 }
7185
get_conn_info_sync(struct hci_dev * hdev,void * data)7186 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
7187 {
7188 struct mgmt_pending_cmd *cmd = data;
7189 struct mgmt_cp_get_conn_info *cp = cmd->param;
7190 struct hci_conn *conn;
7191 int err;
7192 __le16 handle;
7193
7194 /* Make sure we are still connected */
7195 if (cp->addr.type == BDADDR_BREDR)
7196 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7197 &cp->addr.bdaddr);
7198 else
7199 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7200
7201 if (!conn || conn->state != BT_CONNECTED)
7202 return MGMT_STATUS_NOT_CONNECTED;
7203
7204 cmd->user_data = conn;
7205 handle = cpu_to_le16(conn->handle);
7206
7207 /* Refresh RSSI each time */
7208 err = hci_read_rssi_sync(hdev, handle);
7209
7210 /* For LE links TX power does not change thus we don't need to
7211 * query for it once value is known.
7212 */
7213 if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
7214 conn->tx_power == HCI_TX_POWER_INVALID))
7215 err = hci_read_tx_power_sync(hdev, handle, 0x00);
7216
7217 /* Max TX power needs to be read only once per connection */
7218 if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
7219 err = hci_read_tx_power_sync(hdev, handle, 0x01);
7220
7221 return err;
7222 }
7223
get_conn_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7224 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7225 u16 len)
7226 {
7227 struct mgmt_cp_get_conn_info *cp = data;
7228 struct mgmt_rp_get_conn_info rp;
7229 struct hci_conn *conn;
7230 unsigned long conn_info_age;
7231 int err = 0;
7232
7233 bt_dev_dbg(hdev, "sock %p", sk);
7234
7235 memset(&rp, 0, sizeof(rp));
7236 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7237 rp.addr.type = cp->addr.type;
7238
7239 if (!bdaddr_type_is_valid(cp->addr.type))
7240 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7241 MGMT_STATUS_INVALID_PARAMS,
7242 &rp, sizeof(rp));
7243
7244 hci_dev_lock(hdev);
7245
7246 if (!hdev_is_powered(hdev)) {
7247 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7248 MGMT_STATUS_NOT_POWERED, &rp,
7249 sizeof(rp));
7250 goto unlock;
7251 }
7252
7253 if (cp->addr.type == BDADDR_BREDR)
7254 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7255 &cp->addr.bdaddr);
7256 else
7257 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7258
7259 if (!conn || conn->state != BT_CONNECTED) {
7260 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7261 MGMT_STATUS_NOT_CONNECTED, &rp,
7262 sizeof(rp));
7263 goto unlock;
7264 }
7265
7266 /* To avoid client trying to guess when to poll again for information we
7267 * calculate conn info age as random value between min/max set in hdev.
7268 */
7269 conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age,
7270 hdev->conn_info_max_age - 1);
7271
7272 /* Query controller to refresh cached values if they are too old or were
7273 * never read.
7274 */
7275 if (time_after(jiffies, conn->conn_info_timestamp +
7276 msecs_to_jiffies(conn_info_age)) ||
7277 !conn->conn_info_timestamp) {
7278 struct mgmt_pending_cmd *cmd;
7279
7280 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
7281 len);
7282 if (!cmd) {
7283 err = -ENOMEM;
7284 } else {
7285 err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
7286 cmd, get_conn_info_complete);
7287 }
7288
7289 if (err < 0) {
7290 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7291 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7292
7293 if (cmd)
7294 mgmt_pending_free(cmd);
7295
7296 goto unlock;
7297 }
7298
7299 conn->conn_info_timestamp = jiffies;
7300 } else {
7301 /* Cache is valid, just reply with values cached in hci_conn */
7302 rp.rssi = conn->rssi;
7303 rp.tx_power = conn->tx_power;
7304 rp.max_tx_power = conn->max_tx_power;
7305
7306 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7307 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7308 }
7309
7310 unlock:
7311 hci_dev_unlock(hdev);
7312 return err;
7313 }
7314
get_clock_info_complete(struct hci_dev * hdev,void * data,int err)7315 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
7316 {
7317 struct mgmt_pending_cmd *cmd = data;
7318 struct mgmt_cp_get_clock_info *cp = cmd->param;
7319 struct mgmt_rp_get_clock_info rp;
7320 struct hci_conn *conn = cmd->user_data;
7321 u8 status = mgmt_status(err);
7322
7323 bt_dev_dbg(hdev, "err %d", err);
7324
7325 memset(&rp, 0, sizeof(rp));
7326 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7327 rp.addr.type = cp->addr.type;
7328
7329 if (err)
7330 goto complete;
7331
7332 rp.local_clock = cpu_to_le32(hdev->clock);
7333
7334 if (conn) {
7335 rp.piconet_clock = cpu_to_le32(conn->clock);
7336 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7337 }
7338
7339 complete:
7340 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, status, &rp,
7341 sizeof(rp));
7342
7343 mgmt_pending_free(cmd);
7344 }
7345
get_clock_info_sync(struct hci_dev * hdev,void * data)7346 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
7347 {
7348 struct mgmt_pending_cmd *cmd = data;
7349 struct mgmt_cp_get_clock_info *cp = cmd->param;
7350 struct hci_cp_read_clock hci_cp;
7351 struct hci_conn *conn;
7352
7353 memset(&hci_cp, 0, sizeof(hci_cp));
7354 hci_read_clock_sync(hdev, &hci_cp);
7355
7356 /* Make sure connection still exists */
7357 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
7358 if (!conn || conn->state != BT_CONNECTED)
7359 return MGMT_STATUS_NOT_CONNECTED;
7360
7361 cmd->user_data = conn;
7362 hci_cp.handle = cpu_to_le16(conn->handle);
7363 hci_cp.which = 0x01; /* Piconet clock */
7364
7365 return hci_read_clock_sync(hdev, &hci_cp);
7366 }
7367
get_clock_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7368 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7369 u16 len)
7370 {
7371 struct mgmt_cp_get_clock_info *cp = data;
7372 struct mgmt_rp_get_clock_info rp;
7373 struct mgmt_pending_cmd *cmd;
7374 struct hci_conn *conn;
7375 int err;
7376
7377 bt_dev_dbg(hdev, "sock %p", sk);
7378
7379 memset(&rp, 0, sizeof(rp));
7380 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7381 rp.addr.type = cp->addr.type;
7382
7383 if (cp->addr.type != BDADDR_BREDR)
7384 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7385 MGMT_STATUS_INVALID_PARAMS,
7386 &rp, sizeof(rp));
7387
7388 hci_dev_lock(hdev);
7389
7390 if (!hdev_is_powered(hdev)) {
7391 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7392 MGMT_STATUS_NOT_POWERED, &rp,
7393 sizeof(rp));
7394 goto unlock;
7395 }
7396
7397 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7398 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7399 &cp->addr.bdaddr);
7400 if (!conn || conn->state != BT_CONNECTED) {
7401 err = mgmt_cmd_complete(sk, hdev->id,
7402 MGMT_OP_GET_CLOCK_INFO,
7403 MGMT_STATUS_NOT_CONNECTED,
7404 &rp, sizeof(rp));
7405 goto unlock;
7406 }
7407 } else {
7408 conn = NULL;
7409 }
7410
7411 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7412 if (!cmd)
7413 err = -ENOMEM;
7414 else
7415 err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7416 get_clock_info_complete);
7417
7418 if (err < 0) {
7419 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7420 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7421
7422 if (cmd)
7423 mgmt_pending_free(cmd);
7424 }
7425
7426
7427 unlock:
7428 hci_dev_unlock(hdev);
7429 return err;
7430 }
7431
is_connected(struct hci_dev * hdev,bdaddr_t * addr,u8 type)7432 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7433 {
7434 struct hci_conn *conn;
7435
7436 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7437 if (!conn)
7438 return false;
7439
7440 if (conn->dst_type != type)
7441 return false;
7442
7443 if (conn->state != BT_CONNECTED)
7444 return false;
7445
7446 return true;
7447 }
7448
7449 /* This function requires the caller holds hdev->lock */
hci_conn_params_set(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type,u8 auto_connect)7450 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7451 u8 addr_type, u8 auto_connect)
7452 {
7453 struct hci_conn_params *params;
7454
7455 params = hci_conn_params_add(hdev, addr, addr_type);
7456 if (!params)
7457 return -EIO;
7458
7459 if (params->auto_connect == auto_connect)
7460 return 0;
7461
7462 hci_pend_le_list_del_init(params);
7463
7464 switch (auto_connect) {
7465 case HCI_AUTO_CONN_DISABLED:
7466 case HCI_AUTO_CONN_LINK_LOSS:
7467 /* If auto connect is being disabled when we're trying to
7468 * connect to device, keep connecting.
7469 */
7470 if (params->explicit_connect)
7471 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7472 break;
7473 case HCI_AUTO_CONN_REPORT:
7474 if (params->explicit_connect)
7475 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7476 else
7477 hci_pend_le_list_add(params, &hdev->pend_le_reports);
7478 break;
7479 case HCI_AUTO_CONN_DIRECT:
7480 case HCI_AUTO_CONN_ALWAYS:
7481 if (!is_connected(hdev, addr, addr_type))
7482 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7483 break;
7484 }
7485
7486 params->auto_connect = auto_connect;
7487
7488 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7489 addr, addr_type, auto_connect);
7490
7491 return 0;
7492 }
7493
device_added(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type,u8 action)7494 static void device_added(struct sock *sk, struct hci_dev *hdev,
7495 bdaddr_t *bdaddr, u8 type, u8 action)
7496 {
7497 struct mgmt_ev_device_added ev;
7498
7499 bacpy(&ev.addr.bdaddr, bdaddr);
7500 ev.addr.type = type;
7501 ev.action = action;
7502
7503 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7504 }
7505
add_device_complete(struct hci_dev * hdev,void * data,int err)7506 static void add_device_complete(struct hci_dev *hdev, void *data, int err)
7507 {
7508 struct mgmt_pending_cmd *cmd = data;
7509 struct mgmt_cp_add_device *cp = cmd->param;
7510
7511 if (!err) {
7512 struct hci_conn_params *params;
7513
7514 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7515 le_addr_type(cp->addr.type));
7516
7517 device_added(cmd->sk, hdev, &cp->addr.bdaddr, cp->addr.type,
7518 cp->action);
7519 device_flags_changed(NULL, hdev, &cp->addr.bdaddr,
7520 cp->addr.type, hdev->conn_flags,
7521 params ? params->flags : 0);
7522 }
7523
7524 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_ADD_DEVICE,
7525 mgmt_status(err), &cp->addr, sizeof(cp->addr));
7526 mgmt_pending_free(cmd);
7527 }
7528
add_device_sync(struct hci_dev * hdev,void * data)7529 static int add_device_sync(struct hci_dev *hdev, void *data)
7530 {
7531 return hci_update_passive_scan_sync(hdev);
7532 }
7533
add_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7534 static int add_device(struct sock *sk, struct hci_dev *hdev,
7535 void *data, u16 len)
7536 {
7537 struct mgmt_pending_cmd *cmd;
7538 struct mgmt_cp_add_device *cp = data;
7539 u8 auto_conn, addr_type;
7540 struct hci_conn_params *params;
7541 int err;
7542 u32 current_flags = 0;
7543 u32 supported_flags;
7544
7545 bt_dev_dbg(hdev, "sock %p", sk);
7546
7547 if (!bdaddr_type_is_valid(cp->addr.type) ||
7548 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7549 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7550 MGMT_STATUS_INVALID_PARAMS,
7551 &cp->addr, sizeof(cp->addr));
7552
7553 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7554 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7555 MGMT_STATUS_INVALID_PARAMS,
7556 &cp->addr, sizeof(cp->addr));
7557
7558 hci_dev_lock(hdev);
7559
7560 if (cp->addr.type == BDADDR_BREDR) {
7561 /* Only incoming connections action is supported for now */
7562 if (cp->action != 0x01) {
7563 err = mgmt_cmd_complete(sk, hdev->id,
7564 MGMT_OP_ADD_DEVICE,
7565 MGMT_STATUS_INVALID_PARAMS,
7566 &cp->addr, sizeof(cp->addr));
7567 goto unlock;
7568 }
7569
7570 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7571 &cp->addr.bdaddr,
7572 cp->addr.type, 0);
7573 if (err)
7574 goto unlock;
7575
7576 hci_update_scan(hdev);
7577
7578 goto added;
7579 }
7580
7581 addr_type = le_addr_type(cp->addr.type);
7582
7583 if (cp->action == 0x02)
7584 auto_conn = HCI_AUTO_CONN_ALWAYS;
7585 else if (cp->action == 0x01)
7586 auto_conn = HCI_AUTO_CONN_DIRECT;
7587 else
7588 auto_conn = HCI_AUTO_CONN_REPORT;
7589
7590 /* Kernel internally uses conn_params with resolvable private
7591 * address, but Add Device allows only identity addresses.
7592 * Make sure it is enforced before calling
7593 * hci_conn_params_lookup.
7594 */
7595 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7596 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7597 MGMT_STATUS_INVALID_PARAMS,
7598 &cp->addr, sizeof(cp->addr));
7599 goto unlock;
7600 }
7601
7602 /* If the connection parameters don't exist for this device,
7603 * they will be created and configured with defaults.
7604 */
7605 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7606 auto_conn) < 0) {
7607 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7608 MGMT_STATUS_FAILED, &cp->addr,
7609 sizeof(cp->addr));
7610 goto unlock;
7611 } else {
7612 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7613 addr_type);
7614 if (params)
7615 current_flags = params->flags;
7616 }
7617
7618 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
7619 if (!cmd) {
7620 err = -ENOMEM;
7621 goto unlock;
7622 }
7623
7624 err = hci_cmd_sync_queue(hdev, add_device_sync, cmd,
7625 add_device_complete);
7626 if (err < 0) {
7627 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7628 MGMT_STATUS_FAILED, &cp->addr,
7629 sizeof(cp->addr));
7630 mgmt_pending_free(cmd);
7631 }
7632
7633 goto unlock;
7634
7635 added:
7636 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7637 supported_flags = hdev->conn_flags;
7638 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7639 supported_flags, current_flags);
7640
7641 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7642 MGMT_STATUS_SUCCESS, &cp->addr,
7643 sizeof(cp->addr));
7644
7645 unlock:
7646 hci_dev_unlock(hdev);
7647 return err;
7648 }
7649
device_removed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)7650 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7651 bdaddr_t *bdaddr, u8 type)
7652 {
7653 struct mgmt_ev_device_removed ev;
7654
7655 bacpy(&ev.addr.bdaddr, bdaddr);
7656 ev.addr.type = type;
7657
7658 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7659 }
7660
remove_device_sync(struct hci_dev * hdev,void * data)7661 static int remove_device_sync(struct hci_dev *hdev, void *data)
7662 {
7663 return hci_update_passive_scan_sync(hdev);
7664 }
7665
remove_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7666 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7667 void *data, u16 len)
7668 {
7669 struct mgmt_cp_remove_device *cp = data;
7670 int err;
7671
7672 bt_dev_dbg(hdev, "sock %p", sk);
7673
7674 hci_dev_lock(hdev);
7675
7676 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7677 struct hci_conn_params *params;
7678 u8 addr_type;
7679
7680 if (!bdaddr_type_is_valid(cp->addr.type)) {
7681 err = mgmt_cmd_complete(sk, hdev->id,
7682 MGMT_OP_REMOVE_DEVICE,
7683 MGMT_STATUS_INVALID_PARAMS,
7684 &cp->addr, sizeof(cp->addr));
7685 goto unlock;
7686 }
7687
7688 if (cp->addr.type == BDADDR_BREDR) {
7689 err = hci_bdaddr_list_del(&hdev->accept_list,
7690 &cp->addr.bdaddr,
7691 cp->addr.type);
7692 if (err) {
7693 err = mgmt_cmd_complete(sk, hdev->id,
7694 MGMT_OP_REMOVE_DEVICE,
7695 MGMT_STATUS_INVALID_PARAMS,
7696 &cp->addr,
7697 sizeof(cp->addr));
7698 goto unlock;
7699 }
7700
7701 hci_update_scan(hdev);
7702
7703 device_removed(sk, hdev, &cp->addr.bdaddr,
7704 cp->addr.type);
7705 goto complete;
7706 }
7707
7708 addr_type = le_addr_type(cp->addr.type);
7709
7710 /* Kernel internally uses conn_params with resolvable private
7711 * address, but Remove Device allows only identity addresses.
7712 * Make sure it is enforced before calling
7713 * hci_conn_params_lookup.
7714 */
7715 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7716 err = mgmt_cmd_complete(sk, hdev->id,
7717 MGMT_OP_REMOVE_DEVICE,
7718 MGMT_STATUS_INVALID_PARAMS,
7719 &cp->addr, sizeof(cp->addr));
7720 goto unlock;
7721 }
7722
7723 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7724 addr_type);
7725 if (!params) {
7726 err = mgmt_cmd_complete(sk, hdev->id,
7727 MGMT_OP_REMOVE_DEVICE,
7728 MGMT_STATUS_INVALID_PARAMS,
7729 &cp->addr, sizeof(cp->addr));
7730 goto unlock;
7731 }
7732
7733 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7734 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7735 err = mgmt_cmd_complete(sk, hdev->id,
7736 MGMT_OP_REMOVE_DEVICE,
7737 MGMT_STATUS_INVALID_PARAMS,
7738 &cp->addr, sizeof(cp->addr));
7739 goto unlock;
7740 }
7741
7742 hci_conn_params_free(params);
7743
7744 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7745 } else {
7746 struct hci_conn_params *p, *tmp;
7747 struct bdaddr_list *b, *btmp;
7748
7749 if (cp->addr.type) {
7750 err = mgmt_cmd_complete(sk, hdev->id,
7751 MGMT_OP_REMOVE_DEVICE,
7752 MGMT_STATUS_INVALID_PARAMS,
7753 &cp->addr, sizeof(cp->addr));
7754 goto unlock;
7755 }
7756
7757 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7758 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7759 list_del(&b->list);
7760 kfree(b);
7761 }
7762
7763 hci_update_scan(hdev);
7764
7765 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7766 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7767 continue;
7768 device_removed(sk, hdev, &p->addr, p->addr_type);
7769 if (p->explicit_connect) {
7770 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7771 continue;
7772 }
7773 hci_conn_params_free(p);
7774 }
7775
7776 bt_dev_dbg(hdev, "All LE connection parameters were removed");
7777 }
7778
7779 hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7780
7781 complete:
7782 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7783 MGMT_STATUS_SUCCESS, &cp->addr,
7784 sizeof(cp->addr));
7785 unlock:
7786 hci_dev_unlock(hdev);
7787 return err;
7788 }
7789
conn_update_sync(struct hci_dev * hdev,void * data)7790 static int conn_update_sync(struct hci_dev *hdev, void *data)
7791 {
7792 struct hci_conn_params *params = data;
7793 struct hci_conn *conn;
7794
7795 conn = hci_conn_hash_lookup_le(hdev, ¶ms->addr, params->addr_type);
7796 if (!conn)
7797 return -ECANCELED;
7798
7799 return hci_le_conn_update_sync(hdev, conn, params);
7800 }
7801
load_conn_param(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7802 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7803 u16 len)
7804 {
7805 struct mgmt_cp_load_conn_param *cp = data;
7806 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7807 sizeof(struct mgmt_conn_param));
7808 u16 param_count, expected_len;
7809 int i;
7810
7811 if (!lmp_le_capable(hdev))
7812 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7813 MGMT_STATUS_NOT_SUPPORTED);
7814
7815 param_count = __le16_to_cpu(cp->param_count);
7816 if (param_count > max_param_count) {
7817 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7818 param_count);
7819 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7820 MGMT_STATUS_INVALID_PARAMS);
7821 }
7822
7823 expected_len = struct_size(cp, params, param_count);
7824 if (expected_len != len) {
7825 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7826 expected_len, len);
7827 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7828 MGMT_STATUS_INVALID_PARAMS);
7829 }
7830
7831 bt_dev_dbg(hdev, "param_count %u", param_count);
7832
7833 hci_dev_lock(hdev);
7834
7835 if (param_count > 1)
7836 hci_conn_params_clear_disabled(hdev);
7837
7838 for (i = 0; i < param_count; i++) {
7839 struct mgmt_conn_param *param = &cp->params[i];
7840 struct hci_conn_params *hci_param;
7841 u16 min, max, latency, timeout;
7842 bool update = false;
7843 u8 addr_type;
7844
7845 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
7846 param->addr.type);
7847
7848 if (param->addr.type == BDADDR_LE_PUBLIC) {
7849 addr_type = ADDR_LE_DEV_PUBLIC;
7850 } else if (param->addr.type == BDADDR_LE_RANDOM) {
7851 addr_type = ADDR_LE_DEV_RANDOM;
7852 } else {
7853 bt_dev_err(hdev, "ignoring invalid connection parameters");
7854 continue;
7855 }
7856
7857 min = le16_to_cpu(param->min_interval);
7858 max = le16_to_cpu(param->max_interval);
7859 latency = le16_to_cpu(param->latency);
7860 timeout = le16_to_cpu(param->timeout);
7861
7862 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7863 min, max, latency, timeout);
7864
7865 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7866 bt_dev_err(hdev, "ignoring invalid connection parameters");
7867 continue;
7868 }
7869
7870 /* Detect when the loading is for an existing parameter then
7871 * attempt to trigger the connection update procedure.
7872 */
7873 if (!i && param_count == 1) {
7874 hci_param = hci_conn_params_lookup(hdev,
7875 ¶m->addr.bdaddr,
7876 addr_type);
7877 if (hci_param)
7878 update = true;
7879 else
7880 hci_conn_params_clear_disabled(hdev);
7881 }
7882
7883 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
7884 addr_type);
7885 if (!hci_param) {
7886 bt_dev_err(hdev, "failed to add connection parameters");
7887 continue;
7888 }
7889
7890 hci_param->conn_min_interval = min;
7891 hci_param->conn_max_interval = max;
7892 hci_param->conn_latency = latency;
7893 hci_param->supervision_timeout = timeout;
7894
7895 /* Check if we need to trigger a connection update */
7896 if (update) {
7897 struct hci_conn *conn;
7898
7899 /* Lookup for existing connection as central and check
7900 * if parameters match and if they don't then trigger
7901 * a connection update.
7902 */
7903 conn = hci_conn_hash_lookup_le(hdev, &hci_param->addr,
7904 addr_type);
7905 if (conn && conn->role == HCI_ROLE_MASTER &&
7906 (conn->le_conn_min_interval != min ||
7907 conn->le_conn_max_interval != max ||
7908 conn->le_conn_latency != latency ||
7909 conn->le_supv_timeout != timeout))
7910 hci_cmd_sync_queue(hdev, conn_update_sync,
7911 hci_param, NULL);
7912 }
7913 }
7914
7915 hci_dev_unlock(hdev);
7916
7917 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7918 NULL, 0);
7919 }
7920
set_external_config(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7921 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7922 void *data, u16 len)
7923 {
7924 struct mgmt_cp_set_external_config *cp = data;
7925 bool changed;
7926 int err;
7927
7928 bt_dev_dbg(hdev, "sock %p", sk);
7929
7930 if (hdev_is_powered(hdev))
7931 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7932 MGMT_STATUS_REJECTED);
7933
7934 if (cp->config != 0x00 && cp->config != 0x01)
7935 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7936 MGMT_STATUS_INVALID_PARAMS);
7937
7938 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7939 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7940 MGMT_STATUS_NOT_SUPPORTED);
7941
7942 hci_dev_lock(hdev);
7943
7944 if (cp->config)
7945 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7946 else
7947 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7948
7949 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7950 if (err < 0)
7951 goto unlock;
7952
7953 if (!changed)
7954 goto unlock;
7955
7956 err = new_options(hdev, sk);
7957
7958 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7959 mgmt_index_removed(hdev);
7960
7961 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7962 hci_dev_set_flag(hdev, HCI_CONFIG);
7963 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7964
7965 queue_work(hdev->req_workqueue, &hdev->power_on);
7966 } else {
7967 set_bit(HCI_RAW, &hdev->flags);
7968 mgmt_index_added(hdev);
7969 }
7970 }
7971
7972 unlock:
7973 hci_dev_unlock(hdev);
7974 return err;
7975 }
7976
set_public_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7977 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
7978 void *data, u16 len)
7979 {
7980 struct mgmt_cp_set_public_address *cp = data;
7981 bool changed;
7982 int err;
7983
7984 bt_dev_dbg(hdev, "sock %p", sk);
7985
7986 if (hdev_is_powered(hdev))
7987 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7988 MGMT_STATUS_REJECTED);
7989
7990 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
7991 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7992 MGMT_STATUS_INVALID_PARAMS);
7993
7994 if (!hdev->set_bdaddr)
7995 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7996 MGMT_STATUS_NOT_SUPPORTED);
7997
7998 hci_dev_lock(hdev);
7999
8000 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
8001 bacpy(&hdev->public_addr, &cp->bdaddr);
8002
8003 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
8004 if (err < 0)
8005 goto unlock;
8006
8007 if (!changed)
8008 goto unlock;
8009
8010 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
8011 err = new_options(hdev, sk);
8012
8013 if (is_configured(hdev)) {
8014 mgmt_index_removed(hdev);
8015
8016 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
8017
8018 hci_dev_set_flag(hdev, HCI_CONFIG);
8019 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8020
8021 queue_work(hdev->req_workqueue, &hdev->power_on);
8022 }
8023
8024 unlock:
8025 hci_dev_unlock(hdev);
8026 return err;
8027 }
8028
read_local_oob_ext_data_complete(struct hci_dev * hdev,void * data,int err)8029 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
8030 int err)
8031 {
8032 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8033 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8034 u8 *h192, *r192, *h256, *r256;
8035 struct mgmt_pending_cmd *cmd = data;
8036 struct sk_buff *skb = cmd->skb;
8037 u8 status = mgmt_status(err);
8038 u16 eir_len;
8039
8040 if (err == -ECANCELED ||
8041 cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
8042 return;
8043
8044 if (!status) {
8045 if (!skb)
8046 status = MGMT_STATUS_FAILED;
8047 else if (IS_ERR(skb))
8048 status = mgmt_status(PTR_ERR(skb));
8049 else
8050 status = mgmt_status(skb->data[0]);
8051 }
8052
8053 bt_dev_dbg(hdev, "status %u", status);
8054
8055 mgmt_cp = cmd->param;
8056
8057 if (status) {
8058 status = mgmt_status(status);
8059 eir_len = 0;
8060
8061 h192 = NULL;
8062 r192 = NULL;
8063 h256 = NULL;
8064 r256 = NULL;
8065 } else if (!bredr_sc_enabled(hdev)) {
8066 struct hci_rp_read_local_oob_data *rp;
8067
8068 if (skb->len != sizeof(*rp)) {
8069 status = MGMT_STATUS_FAILED;
8070 eir_len = 0;
8071 } else {
8072 status = MGMT_STATUS_SUCCESS;
8073 rp = (void *)skb->data;
8074
8075 eir_len = 5 + 18 + 18;
8076 h192 = rp->hash;
8077 r192 = rp->rand;
8078 h256 = NULL;
8079 r256 = NULL;
8080 }
8081 } else {
8082 struct hci_rp_read_local_oob_ext_data *rp;
8083
8084 if (skb->len != sizeof(*rp)) {
8085 status = MGMT_STATUS_FAILED;
8086 eir_len = 0;
8087 } else {
8088 status = MGMT_STATUS_SUCCESS;
8089 rp = (void *)skb->data;
8090
8091 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8092 eir_len = 5 + 18 + 18;
8093 h192 = NULL;
8094 r192 = NULL;
8095 } else {
8096 eir_len = 5 + 18 + 18 + 18 + 18;
8097 h192 = rp->hash192;
8098 r192 = rp->rand192;
8099 }
8100
8101 h256 = rp->hash256;
8102 r256 = rp->rand256;
8103 }
8104 }
8105
8106 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8107 if (!mgmt_rp)
8108 goto done;
8109
8110 if (eir_len == 0)
8111 goto send_rsp;
8112
8113 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8114 hdev->dev_class, 3);
8115
8116 if (h192 && r192) {
8117 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8118 EIR_SSP_HASH_C192, h192, 16);
8119 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8120 EIR_SSP_RAND_R192, r192, 16);
8121 }
8122
8123 if (h256 && r256) {
8124 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8125 EIR_SSP_HASH_C256, h256, 16);
8126 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8127 EIR_SSP_RAND_R256, r256, 16);
8128 }
8129
8130 send_rsp:
8131 mgmt_rp->type = mgmt_cp->type;
8132 mgmt_rp->eir_len = cpu_to_le16(eir_len);
8133
8134 err = mgmt_cmd_complete(cmd->sk, hdev->id,
8135 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8136 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8137 if (err < 0 || status)
8138 goto done;
8139
8140 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8141
8142 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8143 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8144 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8145 done:
8146 if (skb && !IS_ERR(skb))
8147 kfree_skb(skb);
8148
8149 kfree(mgmt_rp);
8150 mgmt_pending_remove(cmd);
8151 }
8152
read_local_ssp_oob_req(struct hci_dev * hdev,struct sock * sk,struct mgmt_cp_read_local_oob_ext_data * cp)8153 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8154 struct mgmt_cp_read_local_oob_ext_data *cp)
8155 {
8156 struct mgmt_pending_cmd *cmd;
8157 int err;
8158
8159 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8160 cp, sizeof(*cp));
8161 if (!cmd)
8162 return -ENOMEM;
8163
8164 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
8165 read_local_oob_ext_data_complete);
8166
8167 if (err < 0) {
8168 mgmt_pending_remove(cmd);
8169 return err;
8170 }
8171
8172 return 0;
8173 }
8174
read_local_oob_ext_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8175 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8176 void *data, u16 data_len)
8177 {
8178 struct mgmt_cp_read_local_oob_ext_data *cp = data;
8179 struct mgmt_rp_read_local_oob_ext_data *rp;
8180 size_t rp_len;
8181 u16 eir_len;
8182 u8 status, flags, role, addr[7], hash[16], rand[16];
8183 int err;
8184
8185 bt_dev_dbg(hdev, "sock %p", sk);
8186
8187 if (hdev_is_powered(hdev)) {
8188 switch (cp->type) {
8189 case BIT(BDADDR_BREDR):
8190 status = mgmt_bredr_support(hdev);
8191 if (status)
8192 eir_len = 0;
8193 else
8194 eir_len = 5;
8195 break;
8196 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8197 status = mgmt_le_support(hdev);
8198 if (status)
8199 eir_len = 0;
8200 else
8201 eir_len = 9 + 3 + 18 + 18 + 3;
8202 break;
8203 default:
8204 status = MGMT_STATUS_INVALID_PARAMS;
8205 eir_len = 0;
8206 break;
8207 }
8208 } else {
8209 status = MGMT_STATUS_NOT_POWERED;
8210 eir_len = 0;
8211 }
8212
8213 rp_len = sizeof(*rp) + eir_len;
8214 rp = kmalloc(rp_len, GFP_ATOMIC);
8215 if (!rp)
8216 return -ENOMEM;
8217
8218 if (!status && !lmp_ssp_capable(hdev)) {
8219 status = MGMT_STATUS_NOT_SUPPORTED;
8220 eir_len = 0;
8221 }
8222
8223 if (status)
8224 goto complete;
8225
8226 hci_dev_lock(hdev);
8227
8228 eir_len = 0;
8229 switch (cp->type) {
8230 case BIT(BDADDR_BREDR):
8231 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8232 err = read_local_ssp_oob_req(hdev, sk, cp);
8233 hci_dev_unlock(hdev);
8234 if (!err)
8235 goto done;
8236
8237 status = MGMT_STATUS_FAILED;
8238 goto complete;
8239 } else {
8240 eir_len = eir_append_data(rp->eir, eir_len,
8241 EIR_CLASS_OF_DEV,
8242 hdev->dev_class, 3);
8243 }
8244 break;
8245 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8246 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8247 smp_generate_oob(hdev, hash, rand) < 0) {
8248 hci_dev_unlock(hdev);
8249 status = MGMT_STATUS_FAILED;
8250 goto complete;
8251 }
8252
8253 /* This should return the active RPA, but since the RPA
8254 * is only programmed on demand, it is really hard to fill
8255 * this in at the moment. For now disallow retrieving
8256 * local out-of-band data when privacy is in use.
8257 *
8258 * Returning the identity address will not help here since
8259 * pairing happens before the identity resolving key is
8260 * known and thus the connection establishment happens
8261 * based on the RPA and not the identity address.
8262 */
8263 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8264 hci_dev_unlock(hdev);
8265 status = MGMT_STATUS_REJECTED;
8266 goto complete;
8267 }
8268
8269 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8270 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8271 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8272 bacmp(&hdev->static_addr, BDADDR_ANY))) {
8273 memcpy(addr, &hdev->static_addr, 6);
8274 addr[6] = 0x01;
8275 } else {
8276 memcpy(addr, &hdev->bdaddr, 6);
8277 addr[6] = 0x00;
8278 }
8279
8280 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8281 addr, sizeof(addr));
8282
8283 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8284 role = 0x02;
8285 else
8286 role = 0x01;
8287
8288 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8289 &role, sizeof(role));
8290
8291 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8292 eir_len = eir_append_data(rp->eir, eir_len,
8293 EIR_LE_SC_CONFIRM,
8294 hash, sizeof(hash));
8295
8296 eir_len = eir_append_data(rp->eir, eir_len,
8297 EIR_LE_SC_RANDOM,
8298 rand, sizeof(rand));
8299 }
8300
8301 flags = mgmt_get_adv_discov_flags(hdev);
8302
8303 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8304 flags |= LE_AD_NO_BREDR;
8305
8306 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8307 &flags, sizeof(flags));
8308 break;
8309 }
8310
8311 hci_dev_unlock(hdev);
8312
8313 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8314
8315 status = MGMT_STATUS_SUCCESS;
8316
8317 complete:
8318 rp->type = cp->type;
8319 rp->eir_len = cpu_to_le16(eir_len);
8320
8321 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8322 status, rp, sizeof(*rp) + eir_len);
8323 if (err < 0 || status)
8324 goto done;
8325
8326 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8327 rp, sizeof(*rp) + eir_len,
8328 HCI_MGMT_OOB_DATA_EVENTS, sk);
8329
8330 done:
8331 kfree(rp);
8332
8333 return err;
8334 }
8335
get_supported_adv_flags(struct hci_dev * hdev)8336 static u32 get_supported_adv_flags(struct hci_dev *hdev)
8337 {
8338 u32 flags = 0;
8339
8340 flags |= MGMT_ADV_FLAG_CONNECTABLE;
8341 flags |= MGMT_ADV_FLAG_DISCOV;
8342 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8343 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8344 flags |= MGMT_ADV_FLAG_APPEARANCE;
8345 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8346 flags |= MGMT_ADV_PARAM_DURATION;
8347 flags |= MGMT_ADV_PARAM_TIMEOUT;
8348 flags |= MGMT_ADV_PARAM_INTERVALS;
8349 flags |= MGMT_ADV_PARAM_TX_POWER;
8350 flags |= MGMT_ADV_PARAM_SCAN_RSP;
8351
8352 /* In extended adv TX_POWER returned from Set Adv Param
8353 * will be always valid.
8354 */
8355 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
8356 flags |= MGMT_ADV_FLAG_TX_POWER;
8357
8358 if (ext_adv_capable(hdev)) {
8359 flags |= MGMT_ADV_FLAG_SEC_1M;
8360 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8361 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8362
8363 if (le_2m_capable(hdev))
8364 flags |= MGMT_ADV_FLAG_SEC_2M;
8365
8366 if (le_coded_capable(hdev))
8367 flags |= MGMT_ADV_FLAG_SEC_CODED;
8368 }
8369
8370 return flags;
8371 }
8372
read_adv_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8373 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8374 void *data, u16 data_len)
8375 {
8376 struct mgmt_rp_read_adv_features *rp;
8377 size_t rp_len;
8378 int err;
8379 struct adv_info *adv_instance;
8380 u32 supported_flags;
8381 u8 *instance;
8382
8383 bt_dev_dbg(hdev, "sock %p", sk);
8384
8385 if (!lmp_le_capable(hdev))
8386 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8387 MGMT_STATUS_REJECTED);
8388
8389 hci_dev_lock(hdev);
8390
8391 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8392 rp = kmalloc(rp_len, GFP_ATOMIC);
8393 if (!rp) {
8394 hci_dev_unlock(hdev);
8395 return -ENOMEM;
8396 }
8397
8398 supported_flags = get_supported_adv_flags(hdev);
8399
8400 rp->supported_flags = cpu_to_le32(supported_flags);
8401 rp->max_adv_data_len = max_adv_len(hdev);
8402 rp->max_scan_rsp_len = max_adv_len(hdev);
8403 rp->max_instances = hdev->le_num_of_adv_sets;
8404 rp->num_instances = hdev->adv_instance_cnt;
8405
8406 instance = rp->instance;
8407 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8408 /* Only instances 1-le_num_of_adv_sets are externally visible */
8409 if (adv_instance->instance <= hdev->adv_instance_cnt) {
8410 *instance = adv_instance->instance;
8411 instance++;
8412 } else {
8413 rp->num_instances--;
8414 rp_len--;
8415 }
8416 }
8417
8418 hci_dev_unlock(hdev);
8419
8420 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8421 MGMT_STATUS_SUCCESS, rp, rp_len);
8422
8423 kfree(rp);
8424
8425 return err;
8426 }
8427
calculate_name_len(struct hci_dev * hdev)8428 static u8 calculate_name_len(struct hci_dev *hdev)
8429 {
8430 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 2]; /* len + type + name */
8431
8432 return eir_append_local_name(hdev, buf, 0);
8433 }
8434
tlv_data_max_len(struct hci_dev * hdev,u32 adv_flags,bool is_adv_data)8435 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8436 bool is_adv_data)
8437 {
8438 u8 max_len = max_adv_len(hdev);
8439
8440 if (is_adv_data) {
8441 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8442 MGMT_ADV_FLAG_LIMITED_DISCOV |
8443 MGMT_ADV_FLAG_MANAGED_FLAGS))
8444 max_len -= 3;
8445
8446 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8447 max_len -= 3;
8448 } else {
8449 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8450 max_len -= calculate_name_len(hdev);
8451
8452 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8453 max_len -= 4;
8454 }
8455
8456 return max_len;
8457 }
8458
flags_managed(u32 adv_flags)8459 static bool flags_managed(u32 adv_flags)
8460 {
8461 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8462 MGMT_ADV_FLAG_LIMITED_DISCOV |
8463 MGMT_ADV_FLAG_MANAGED_FLAGS);
8464 }
8465
tx_power_managed(u32 adv_flags)8466 static bool tx_power_managed(u32 adv_flags)
8467 {
8468 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8469 }
8470
name_managed(u32 adv_flags)8471 static bool name_managed(u32 adv_flags)
8472 {
8473 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8474 }
8475
appearance_managed(u32 adv_flags)8476 static bool appearance_managed(u32 adv_flags)
8477 {
8478 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8479 }
8480
tlv_data_is_valid(struct hci_dev * hdev,u32 adv_flags,u8 * data,u8 len,bool is_adv_data)8481 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8482 u8 len, bool is_adv_data)
8483 {
8484 int i, cur_len;
8485 u8 max_len;
8486
8487 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8488
8489 if (len > max_len)
8490 return false;
8491
8492 /* Make sure that the data is correctly formatted. */
8493 for (i = 0; i < len; i += (cur_len + 1)) {
8494 cur_len = data[i];
8495
8496 if (!cur_len)
8497 continue;
8498
8499 if (data[i + 1] == EIR_FLAGS &&
8500 (!is_adv_data || flags_managed(adv_flags)))
8501 return false;
8502
8503 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8504 return false;
8505
8506 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8507 return false;
8508
8509 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8510 return false;
8511
8512 if (data[i + 1] == EIR_APPEARANCE &&
8513 appearance_managed(adv_flags))
8514 return false;
8515
8516 /* If the current field length would exceed the total data
8517 * length, then it's invalid.
8518 */
8519 if (i + cur_len >= len)
8520 return false;
8521 }
8522
8523 return true;
8524 }
8525
requested_adv_flags_are_valid(struct hci_dev * hdev,u32 adv_flags)8526 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8527 {
8528 u32 supported_flags, phy_flags;
8529
8530 /* The current implementation only supports a subset of the specified
8531 * flags. Also need to check mutual exclusiveness of sec flags.
8532 */
8533 supported_flags = get_supported_adv_flags(hdev);
8534 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8535 if (adv_flags & ~supported_flags ||
8536 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8537 return false;
8538
8539 return true;
8540 }
8541
adv_busy(struct hci_dev * hdev)8542 static bool adv_busy(struct hci_dev *hdev)
8543 {
8544 return pending_find(MGMT_OP_SET_LE, hdev);
8545 }
8546
add_adv_complete(struct hci_dev * hdev,struct sock * sk,u8 instance,int err)8547 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8548 int err)
8549 {
8550 struct adv_info *adv, *n;
8551
8552 bt_dev_dbg(hdev, "err %d", err);
8553
8554 hci_dev_lock(hdev);
8555
8556 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8557 u8 instance;
8558
8559 if (!adv->pending)
8560 continue;
8561
8562 if (!err) {
8563 adv->pending = false;
8564 continue;
8565 }
8566
8567 instance = adv->instance;
8568
8569 if (hdev->cur_adv_instance == instance)
8570 cancel_adv_timeout(hdev);
8571
8572 hci_remove_adv_instance(hdev, instance);
8573 mgmt_advertising_removed(sk, hdev, instance);
8574 }
8575
8576 hci_dev_unlock(hdev);
8577 }
8578
add_advertising_complete(struct hci_dev * hdev,void * data,int err)8579 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8580 {
8581 struct mgmt_pending_cmd *cmd = data;
8582 struct mgmt_cp_add_advertising *cp = cmd->param;
8583 struct mgmt_rp_add_advertising rp;
8584
8585 memset(&rp, 0, sizeof(rp));
8586
8587 rp.instance = cp->instance;
8588
8589 if (err)
8590 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
8591 mgmt_status(err));
8592 else
8593 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
8594 mgmt_status(err), &rp, sizeof(rp));
8595
8596 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8597
8598 mgmt_pending_free(cmd);
8599 }
8600
add_advertising_sync(struct hci_dev * hdev,void * data)8601 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8602 {
8603 struct mgmt_pending_cmd *cmd = data;
8604 struct mgmt_cp_add_advertising *cp = cmd->param;
8605
8606 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8607 }
8608
add_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8609 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8610 void *data, u16 data_len)
8611 {
8612 struct mgmt_cp_add_advertising *cp = data;
8613 struct mgmt_rp_add_advertising rp;
8614 u32 flags;
8615 u8 status;
8616 u16 timeout, duration;
8617 unsigned int prev_instance_cnt;
8618 u8 schedule_instance = 0;
8619 struct adv_info *adv, *next_instance;
8620 int err;
8621 struct mgmt_pending_cmd *cmd;
8622
8623 bt_dev_dbg(hdev, "sock %p", sk);
8624
8625 status = mgmt_le_support(hdev);
8626 if (status)
8627 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8628 status);
8629
8630 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8631 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8632 MGMT_STATUS_INVALID_PARAMS);
8633
8634 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8635 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8636 MGMT_STATUS_INVALID_PARAMS);
8637
8638 flags = __le32_to_cpu(cp->flags);
8639 timeout = __le16_to_cpu(cp->timeout);
8640 duration = __le16_to_cpu(cp->duration);
8641
8642 if (!requested_adv_flags_are_valid(hdev, flags))
8643 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8644 MGMT_STATUS_INVALID_PARAMS);
8645
8646 hci_dev_lock(hdev);
8647
8648 if (timeout && !hdev_is_powered(hdev)) {
8649 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8650 MGMT_STATUS_REJECTED);
8651 goto unlock;
8652 }
8653
8654 if (adv_busy(hdev)) {
8655 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8656 MGMT_STATUS_BUSY);
8657 goto unlock;
8658 }
8659
8660 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8661 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8662 cp->scan_rsp_len, false)) {
8663 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8664 MGMT_STATUS_INVALID_PARAMS);
8665 goto unlock;
8666 }
8667
8668 prev_instance_cnt = hdev->adv_instance_cnt;
8669
8670 adv = hci_add_adv_instance(hdev, cp->instance, flags,
8671 cp->adv_data_len, cp->data,
8672 cp->scan_rsp_len,
8673 cp->data + cp->adv_data_len,
8674 timeout, duration,
8675 HCI_ADV_TX_POWER_NO_PREFERENCE,
8676 hdev->le_adv_min_interval,
8677 hdev->le_adv_max_interval, 0);
8678 if (IS_ERR(adv)) {
8679 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8680 MGMT_STATUS_FAILED);
8681 goto unlock;
8682 }
8683
8684 /* Only trigger an advertising added event if a new instance was
8685 * actually added.
8686 */
8687 if (hdev->adv_instance_cnt > prev_instance_cnt)
8688 mgmt_advertising_added(sk, hdev, cp->instance);
8689
8690 if (hdev->cur_adv_instance == cp->instance) {
8691 /* If the currently advertised instance is being changed then
8692 * cancel the current advertising and schedule the next
8693 * instance. If there is only one instance then the overridden
8694 * advertising data will be visible right away.
8695 */
8696 cancel_adv_timeout(hdev);
8697
8698 next_instance = hci_get_next_instance(hdev, cp->instance);
8699 if (next_instance)
8700 schedule_instance = next_instance->instance;
8701 } else if (!hdev->adv_instance_timeout) {
8702 /* Immediately advertise the new instance if no other
8703 * instance is currently being advertised.
8704 */
8705 schedule_instance = cp->instance;
8706 }
8707
8708 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
8709 * there is no instance to be advertised then we have no HCI
8710 * communication to make. Simply return.
8711 */
8712 if (!hdev_is_powered(hdev) ||
8713 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8714 !schedule_instance) {
8715 rp.instance = cp->instance;
8716 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8717 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8718 goto unlock;
8719 }
8720
8721 /* We're good to go, update advertising data, parameters, and start
8722 * advertising.
8723 */
8724 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8725 data_len);
8726 if (!cmd) {
8727 err = -ENOMEM;
8728 goto unlock;
8729 }
8730
8731 cp->instance = schedule_instance;
8732
8733 err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8734 add_advertising_complete);
8735 if (err < 0)
8736 mgmt_pending_free(cmd);
8737
8738 unlock:
8739 hci_dev_unlock(hdev);
8740
8741 return err;
8742 }
8743
add_ext_adv_params_complete(struct hci_dev * hdev,void * data,int err)8744 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8745 int err)
8746 {
8747 struct mgmt_pending_cmd *cmd = data;
8748 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8749 struct mgmt_rp_add_ext_adv_params rp;
8750 struct adv_info *adv;
8751 u32 flags;
8752
8753 BT_DBG("%s", hdev->name);
8754
8755 hci_dev_lock(hdev);
8756
8757 adv = hci_find_adv_instance(hdev, cp->instance);
8758 if (!adv)
8759 goto unlock;
8760
8761 rp.instance = cp->instance;
8762 rp.tx_power = adv->tx_power;
8763
8764 /* While we're at it, inform userspace of the available space for this
8765 * advertisement, given the flags that will be used.
8766 */
8767 flags = __le32_to_cpu(cp->flags);
8768 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8769 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8770
8771 if (err) {
8772 /* If this advertisement was previously advertising and we
8773 * failed to update it, we signal that it has been removed and
8774 * delete its structure
8775 */
8776 if (!adv->pending)
8777 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8778
8779 hci_remove_adv_instance(hdev, cp->instance);
8780
8781 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
8782 mgmt_status(err));
8783 } else {
8784 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
8785 mgmt_status(err), &rp, sizeof(rp));
8786 }
8787
8788 unlock:
8789 mgmt_pending_free(cmd);
8790
8791 hci_dev_unlock(hdev);
8792 }
8793
add_ext_adv_params_sync(struct hci_dev * hdev,void * data)8794 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8795 {
8796 struct mgmt_pending_cmd *cmd = data;
8797 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8798
8799 return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8800 }
8801
add_ext_adv_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8802 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8803 void *data, u16 data_len)
8804 {
8805 struct mgmt_cp_add_ext_adv_params *cp = data;
8806 struct mgmt_rp_add_ext_adv_params rp;
8807 struct mgmt_pending_cmd *cmd = NULL;
8808 struct adv_info *adv;
8809 u32 flags, min_interval, max_interval;
8810 u16 timeout, duration;
8811 u8 status;
8812 s8 tx_power;
8813 int err;
8814
8815 BT_DBG("%s", hdev->name);
8816
8817 status = mgmt_le_support(hdev);
8818 if (status)
8819 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8820 status);
8821
8822 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8823 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8824 MGMT_STATUS_INVALID_PARAMS);
8825
8826 /* The purpose of breaking add_advertising into two separate MGMT calls
8827 * for params and data is to allow more parameters to be added to this
8828 * structure in the future. For this reason, we verify that we have the
8829 * bare minimum structure we know of when the interface was defined. Any
8830 * extra parameters we don't know about will be ignored in this request.
8831 */
8832 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8833 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8834 MGMT_STATUS_INVALID_PARAMS);
8835
8836 flags = __le32_to_cpu(cp->flags);
8837
8838 if (!requested_adv_flags_are_valid(hdev, flags))
8839 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8840 MGMT_STATUS_INVALID_PARAMS);
8841
8842 hci_dev_lock(hdev);
8843
8844 /* In new interface, we require that we are powered to register */
8845 if (!hdev_is_powered(hdev)) {
8846 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8847 MGMT_STATUS_REJECTED);
8848 goto unlock;
8849 }
8850
8851 if (adv_busy(hdev)) {
8852 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8853 MGMT_STATUS_BUSY);
8854 goto unlock;
8855 }
8856
8857 /* Parse defined parameters from request, use defaults otherwise */
8858 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8859 __le16_to_cpu(cp->timeout) : 0;
8860
8861 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8862 __le16_to_cpu(cp->duration) :
8863 hdev->def_multi_adv_rotation_duration;
8864
8865 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8866 __le32_to_cpu(cp->min_interval) :
8867 hdev->le_adv_min_interval;
8868
8869 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8870 __le32_to_cpu(cp->max_interval) :
8871 hdev->le_adv_max_interval;
8872
8873 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8874 cp->tx_power :
8875 HCI_ADV_TX_POWER_NO_PREFERENCE;
8876
8877 /* Create advertising instance with no advertising or response data */
8878 adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
8879 timeout, duration, tx_power, min_interval,
8880 max_interval, 0);
8881
8882 if (IS_ERR(adv)) {
8883 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8884 MGMT_STATUS_FAILED);
8885 goto unlock;
8886 }
8887
8888 /* Submit request for advertising params if ext adv available */
8889 if (ext_adv_capable(hdev)) {
8890 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8891 data, data_len);
8892 if (!cmd) {
8893 err = -ENOMEM;
8894 hci_remove_adv_instance(hdev, cp->instance);
8895 goto unlock;
8896 }
8897
8898 err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8899 add_ext_adv_params_complete);
8900 if (err < 0)
8901 mgmt_pending_free(cmd);
8902 } else {
8903 rp.instance = cp->instance;
8904 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8905 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8906 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8907 err = mgmt_cmd_complete(sk, hdev->id,
8908 MGMT_OP_ADD_EXT_ADV_PARAMS,
8909 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8910 }
8911
8912 unlock:
8913 hci_dev_unlock(hdev);
8914
8915 return err;
8916 }
8917
add_ext_adv_data_complete(struct hci_dev * hdev,void * data,int err)8918 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8919 {
8920 struct mgmt_pending_cmd *cmd = data;
8921 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8922 struct mgmt_rp_add_advertising rp;
8923
8924 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8925
8926 memset(&rp, 0, sizeof(rp));
8927
8928 rp.instance = cp->instance;
8929
8930 if (err)
8931 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
8932 mgmt_status(err));
8933 else
8934 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
8935 mgmt_status(err), &rp, sizeof(rp));
8936
8937 mgmt_pending_free(cmd);
8938 }
8939
add_ext_adv_data_sync(struct hci_dev * hdev,void * data)8940 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8941 {
8942 struct mgmt_pending_cmd *cmd = data;
8943 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8944 int err;
8945
8946 if (ext_adv_capable(hdev)) {
8947 err = hci_update_adv_data_sync(hdev, cp->instance);
8948 if (err)
8949 return err;
8950
8951 err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
8952 if (err)
8953 return err;
8954
8955 return hci_enable_ext_advertising_sync(hdev, cp->instance);
8956 }
8957
8958 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8959 }
8960
add_ext_adv_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8961 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8962 u16 data_len)
8963 {
8964 struct mgmt_cp_add_ext_adv_data *cp = data;
8965 struct mgmt_rp_add_ext_adv_data rp;
8966 u8 schedule_instance = 0;
8967 struct adv_info *next_instance;
8968 struct adv_info *adv_instance;
8969 int err = 0;
8970 struct mgmt_pending_cmd *cmd;
8971
8972 BT_DBG("%s", hdev->name);
8973
8974 hci_dev_lock(hdev);
8975
8976 adv_instance = hci_find_adv_instance(hdev, cp->instance);
8977
8978 if (!adv_instance) {
8979 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8980 MGMT_STATUS_INVALID_PARAMS);
8981 goto unlock;
8982 }
8983
8984 /* In new interface, we require that we are powered to register */
8985 if (!hdev_is_powered(hdev)) {
8986 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8987 MGMT_STATUS_REJECTED);
8988 goto clear_new_instance;
8989 }
8990
8991 if (adv_busy(hdev)) {
8992 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8993 MGMT_STATUS_BUSY);
8994 goto clear_new_instance;
8995 }
8996
8997 /* Validate new data */
8998 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
8999 cp->adv_data_len, true) ||
9000 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
9001 cp->adv_data_len, cp->scan_rsp_len, false)) {
9002 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9003 MGMT_STATUS_INVALID_PARAMS);
9004 goto clear_new_instance;
9005 }
9006
9007 /* Set the data in the advertising instance */
9008 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
9009 cp->data, cp->scan_rsp_len,
9010 cp->data + cp->adv_data_len);
9011
9012 /* If using software rotation, determine next instance to use */
9013 if (hdev->cur_adv_instance == cp->instance) {
9014 /* If the currently advertised instance is being changed
9015 * then cancel the current advertising and schedule the
9016 * next instance. If there is only one instance then the
9017 * overridden advertising data will be visible right
9018 * away
9019 */
9020 cancel_adv_timeout(hdev);
9021
9022 next_instance = hci_get_next_instance(hdev, cp->instance);
9023 if (next_instance)
9024 schedule_instance = next_instance->instance;
9025 } else if (!hdev->adv_instance_timeout) {
9026 /* Immediately advertise the new instance if no other
9027 * instance is currently being advertised.
9028 */
9029 schedule_instance = cp->instance;
9030 }
9031
9032 /* If the HCI_ADVERTISING flag is set or there is no instance to
9033 * be advertised then we have no HCI communication to make.
9034 * Simply return.
9035 */
9036 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
9037 if (adv_instance->pending) {
9038 mgmt_advertising_added(sk, hdev, cp->instance);
9039 adv_instance->pending = false;
9040 }
9041 rp.instance = cp->instance;
9042 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9043 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9044 goto unlock;
9045 }
9046
9047 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9048 data_len);
9049 if (!cmd) {
9050 err = -ENOMEM;
9051 goto clear_new_instance;
9052 }
9053
9054 err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
9055 add_ext_adv_data_complete);
9056 if (err < 0) {
9057 mgmt_pending_free(cmd);
9058 goto clear_new_instance;
9059 }
9060
9061 /* We were successful in updating data, so trigger advertising_added
9062 * event if this is an instance that wasn't previously advertising. If
9063 * a failure occurs in the requests we initiated, we will remove the
9064 * instance again in add_advertising_complete
9065 */
9066 if (adv_instance->pending)
9067 mgmt_advertising_added(sk, hdev, cp->instance);
9068
9069 goto unlock;
9070
9071 clear_new_instance:
9072 hci_remove_adv_instance(hdev, cp->instance);
9073
9074 unlock:
9075 hci_dev_unlock(hdev);
9076
9077 return err;
9078 }
9079
remove_advertising_complete(struct hci_dev * hdev,void * data,int err)9080 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
9081 int err)
9082 {
9083 struct mgmt_pending_cmd *cmd = data;
9084 struct mgmt_cp_remove_advertising *cp = cmd->param;
9085 struct mgmt_rp_remove_advertising rp;
9086
9087 bt_dev_dbg(hdev, "err %d", err);
9088
9089 memset(&rp, 0, sizeof(rp));
9090 rp.instance = cp->instance;
9091
9092 if (err)
9093 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
9094 mgmt_status(err));
9095 else
9096 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
9097 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9098
9099 mgmt_pending_free(cmd);
9100 }
9101
remove_advertising_sync(struct hci_dev * hdev,void * data)9102 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
9103 {
9104 struct mgmt_pending_cmd *cmd = data;
9105 struct mgmt_cp_remove_advertising *cp = cmd->param;
9106 int err;
9107
9108 err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
9109 if (err)
9110 return err;
9111
9112 if (list_empty(&hdev->adv_instances))
9113 err = hci_disable_advertising_sync(hdev);
9114
9115 return err;
9116 }
9117
remove_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9118 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9119 void *data, u16 data_len)
9120 {
9121 struct mgmt_cp_remove_advertising *cp = data;
9122 struct mgmt_pending_cmd *cmd;
9123 int err;
9124
9125 bt_dev_dbg(hdev, "sock %p", sk);
9126
9127 hci_dev_lock(hdev);
9128
9129 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
9130 err = mgmt_cmd_status(sk, hdev->id,
9131 MGMT_OP_REMOVE_ADVERTISING,
9132 MGMT_STATUS_INVALID_PARAMS);
9133 goto unlock;
9134 }
9135
9136 if (pending_find(MGMT_OP_SET_LE, hdev)) {
9137 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9138 MGMT_STATUS_BUSY);
9139 goto unlock;
9140 }
9141
9142 if (list_empty(&hdev->adv_instances)) {
9143 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9144 MGMT_STATUS_INVALID_PARAMS);
9145 goto unlock;
9146 }
9147
9148 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9149 data_len);
9150 if (!cmd) {
9151 err = -ENOMEM;
9152 goto unlock;
9153 }
9154
9155 err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
9156 remove_advertising_complete);
9157 if (err < 0)
9158 mgmt_pending_free(cmd);
9159
9160 unlock:
9161 hci_dev_unlock(hdev);
9162
9163 return err;
9164 }
9165
get_adv_size_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9166 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9167 void *data, u16 data_len)
9168 {
9169 struct mgmt_cp_get_adv_size_info *cp = data;
9170 struct mgmt_rp_get_adv_size_info rp;
9171 u32 flags, supported_flags;
9172
9173 bt_dev_dbg(hdev, "sock %p", sk);
9174
9175 if (!lmp_le_capable(hdev))
9176 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9177 MGMT_STATUS_REJECTED);
9178
9179 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9180 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9181 MGMT_STATUS_INVALID_PARAMS);
9182
9183 flags = __le32_to_cpu(cp->flags);
9184
9185 /* The current implementation only supports a subset of the specified
9186 * flags.
9187 */
9188 supported_flags = get_supported_adv_flags(hdev);
9189 if (flags & ~supported_flags)
9190 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9191 MGMT_STATUS_INVALID_PARAMS);
9192
9193 rp.instance = cp->instance;
9194 rp.flags = cp->flags;
9195 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9196 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9197
9198 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9199 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9200 }
9201
9202 static const struct hci_mgmt_handler mgmt_handlers[] = {
9203 { NULL }, /* 0x0000 (no command) */
9204 { read_version, MGMT_READ_VERSION_SIZE,
9205 HCI_MGMT_NO_HDEV |
9206 HCI_MGMT_UNTRUSTED },
9207 { read_commands, MGMT_READ_COMMANDS_SIZE,
9208 HCI_MGMT_NO_HDEV |
9209 HCI_MGMT_UNTRUSTED },
9210 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
9211 HCI_MGMT_NO_HDEV |
9212 HCI_MGMT_UNTRUSTED },
9213 { read_controller_info, MGMT_READ_INFO_SIZE,
9214 HCI_MGMT_UNTRUSTED },
9215 { set_powered, MGMT_SETTING_SIZE },
9216 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
9217 { set_connectable, MGMT_SETTING_SIZE },
9218 { set_fast_connectable, MGMT_SETTING_SIZE },
9219 { set_bondable, MGMT_SETTING_SIZE },
9220 { set_link_security, MGMT_SETTING_SIZE },
9221 { set_ssp, MGMT_SETTING_SIZE },
9222 { set_hs, MGMT_SETTING_SIZE },
9223 { set_le, MGMT_SETTING_SIZE },
9224 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
9225 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
9226 { add_uuid, MGMT_ADD_UUID_SIZE },
9227 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
9228 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
9229 HCI_MGMT_VAR_LEN },
9230 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9231 HCI_MGMT_VAR_LEN },
9232 { disconnect, MGMT_DISCONNECT_SIZE },
9233 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
9234 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
9235 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
9236 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
9237 { pair_device, MGMT_PAIR_DEVICE_SIZE },
9238 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
9239 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
9240 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
9241 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9242 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
9243 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9244 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
9245 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9246 HCI_MGMT_VAR_LEN },
9247 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9248 { start_discovery, MGMT_START_DISCOVERY_SIZE },
9249 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
9250 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
9251 { block_device, MGMT_BLOCK_DEVICE_SIZE },
9252 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
9253 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
9254 { set_advertising, MGMT_SETTING_SIZE },
9255 { set_bredr, MGMT_SETTING_SIZE },
9256 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
9257 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
9258 { set_secure_conn, MGMT_SETTING_SIZE },
9259 { set_debug_keys, MGMT_SETTING_SIZE },
9260 { set_privacy, MGMT_SET_PRIVACY_SIZE },
9261 { load_irks, MGMT_LOAD_IRKS_SIZE,
9262 HCI_MGMT_VAR_LEN },
9263 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
9264 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
9265 { add_device, MGMT_ADD_DEVICE_SIZE },
9266 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
9267 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
9268 HCI_MGMT_VAR_LEN },
9269 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9270 HCI_MGMT_NO_HDEV |
9271 HCI_MGMT_UNTRUSTED },
9272 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
9273 HCI_MGMT_UNCONFIGURED |
9274 HCI_MGMT_UNTRUSTED },
9275 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
9276 HCI_MGMT_UNCONFIGURED },
9277 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
9278 HCI_MGMT_UNCONFIGURED },
9279 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9280 HCI_MGMT_VAR_LEN },
9281 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9282 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
9283 HCI_MGMT_NO_HDEV |
9284 HCI_MGMT_UNTRUSTED },
9285 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
9286 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
9287 HCI_MGMT_VAR_LEN },
9288 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
9289 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
9290 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9291 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9292 HCI_MGMT_UNTRUSTED },
9293 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
9294 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
9295 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
9296 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9297 HCI_MGMT_VAR_LEN },
9298 { set_wideband_speech, MGMT_SETTING_SIZE },
9299 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
9300 HCI_MGMT_UNTRUSTED },
9301 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
9302 HCI_MGMT_UNTRUSTED |
9303 HCI_MGMT_HDEV_OPTIONAL },
9304 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
9305 HCI_MGMT_VAR_LEN |
9306 HCI_MGMT_HDEV_OPTIONAL },
9307 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9308 HCI_MGMT_UNTRUSTED },
9309 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9310 HCI_MGMT_VAR_LEN },
9311 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9312 HCI_MGMT_UNTRUSTED },
9313 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9314 HCI_MGMT_VAR_LEN },
9315 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
9316 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
9317 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9318 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9319 HCI_MGMT_VAR_LEN },
9320 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
9321 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
9322 HCI_MGMT_VAR_LEN },
9323 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
9324 HCI_MGMT_VAR_LEN },
9325 { add_adv_patterns_monitor_rssi,
9326 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
9327 HCI_MGMT_VAR_LEN },
9328 { set_mesh, MGMT_SET_MESH_RECEIVER_SIZE,
9329 HCI_MGMT_VAR_LEN },
9330 { mesh_features, MGMT_MESH_READ_FEATURES_SIZE },
9331 { mesh_send, MGMT_MESH_SEND_SIZE,
9332 HCI_MGMT_VAR_LEN },
9333 { mesh_send_cancel, MGMT_MESH_SEND_CANCEL_SIZE },
9334 { mgmt_hci_cmd_sync, MGMT_HCI_CMD_SYNC_SIZE, HCI_MGMT_VAR_LEN },
9335 };
9336
mgmt_index_added(struct hci_dev * hdev)9337 void mgmt_index_added(struct hci_dev *hdev)
9338 {
9339 struct mgmt_ev_ext_index ev;
9340
9341 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9342 return;
9343
9344 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9345 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0,
9346 HCI_MGMT_UNCONF_INDEX_EVENTS);
9347 ev.type = 0x01;
9348 } else {
9349 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
9350 HCI_MGMT_INDEX_EVENTS);
9351 ev.type = 0x00;
9352 }
9353
9354 ev.bus = hdev->bus;
9355
9356 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
9357 HCI_MGMT_EXT_INDEX_EVENTS);
9358 }
9359
mgmt_index_removed(struct hci_dev * hdev)9360 void mgmt_index_removed(struct hci_dev *hdev)
9361 {
9362 struct mgmt_ev_ext_index ev;
9363 struct cmd_lookup match = { NULL, hdev, MGMT_STATUS_INVALID_INDEX };
9364
9365 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9366 return;
9367
9368 mgmt_pending_foreach(0, hdev, true, cmd_complete_rsp, &match);
9369
9370 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9371 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0,
9372 HCI_MGMT_UNCONF_INDEX_EVENTS);
9373 ev.type = 0x01;
9374 } else {
9375 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
9376 HCI_MGMT_INDEX_EVENTS);
9377 ev.type = 0x00;
9378 }
9379
9380 ev.bus = hdev->bus;
9381
9382 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
9383 HCI_MGMT_EXT_INDEX_EVENTS);
9384
9385 /* Cancel any remaining timed work */
9386 if (!hci_dev_test_flag(hdev, HCI_MGMT))
9387 return;
9388 cancel_delayed_work_sync(&hdev->discov_off);
9389 cancel_delayed_work_sync(&hdev->service_cache);
9390 cancel_delayed_work_sync(&hdev->rpa_expired);
9391 }
9392
mgmt_power_on(struct hci_dev * hdev,int err)9393 void mgmt_power_on(struct hci_dev *hdev, int err)
9394 {
9395 struct cmd_lookup match = { NULL, hdev };
9396
9397 bt_dev_dbg(hdev, "err %d", err);
9398
9399 hci_dev_lock(hdev);
9400
9401 if (!err) {
9402 restart_le_actions(hdev);
9403 hci_update_passive_scan(hdev);
9404 }
9405
9406 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, true, settings_rsp,
9407 &match);
9408
9409 new_settings(hdev, match.sk);
9410
9411 if (match.sk)
9412 sock_put(match.sk);
9413
9414 hci_dev_unlock(hdev);
9415 }
9416
__mgmt_power_off(struct hci_dev * hdev)9417 void __mgmt_power_off(struct hci_dev *hdev)
9418 {
9419 struct cmd_lookup match = { NULL, hdev };
9420 u8 zero_cod[] = { 0, 0, 0 };
9421
9422 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, true, settings_rsp,
9423 &match);
9424
9425 /* If the power off is because of hdev unregistration let
9426 * use the appropriate INVALID_INDEX status. Otherwise use
9427 * NOT_POWERED. We cover both scenarios here since later in
9428 * mgmt_index_removed() any hci_conn callbacks will have already
9429 * been triggered, potentially causing misleading DISCONNECTED
9430 * status responses.
9431 */
9432 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9433 match.mgmt_status = MGMT_STATUS_INVALID_INDEX;
9434 else
9435 match.mgmt_status = MGMT_STATUS_NOT_POWERED;
9436
9437 mgmt_pending_foreach(0, hdev, true, cmd_complete_rsp, &match);
9438
9439 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9440 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9441 zero_cod, sizeof(zero_cod),
9442 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9443 ext_info_changed(hdev, NULL);
9444 }
9445
9446 new_settings(hdev, match.sk);
9447
9448 if (match.sk)
9449 sock_put(match.sk);
9450 }
9451
mgmt_set_powered_failed(struct hci_dev * hdev,int err)9452 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9453 {
9454 struct mgmt_pending_cmd *cmd;
9455 u8 status;
9456
9457 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9458 if (!cmd)
9459 return;
9460
9461 if (err == -ERFKILL)
9462 status = MGMT_STATUS_RFKILLED;
9463 else
9464 status = MGMT_STATUS_FAILED;
9465
9466 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9467
9468 mgmt_pending_remove(cmd);
9469 }
9470
mgmt_new_link_key(struct hci_dev * hdev,struct link_key * key,bool persistent)9471 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9472 bool persistent)
9473 {
9474 struct mgmt_ev_new_link_key ev;
9475
9476 memset(&ev, 0, sizeof(ev));
9477
9478 ev.store_hint = persistent;
9479 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9480 ev.key.addr.type = BDADDR_BREDR;
9481 ev.key.type = key->type;
9482 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9483 ev.key.pin_len = key->pin_len;
9484
9485 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9486 }
9487
mgmt_ltk_type(struct smp_ltk * ltk)9488 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9489 {
9490 switch (ltk->type) {
9491 case SMP_LTK:
9492 case SMP_LTK_RESPONDER:
9493 if (ltk->authenticated)
9494 return MGMT_LTK_AUTHENTICATED;
9495 return MGMT_LTK_UNAUTHENTICATED;
9496 case SMP_LTK_P256:
9497 if (ltk->authenticated)
9498 return MGMT_LTK_P256_AUTH;
9499 return MGMT_LTK_P256_UNAUTH;
9500 case SMP_LTK_P256_DEBUG:
9501 return MGMT_LTK_P256_DEBUG;
9502 }
9503
9504 return MGMT_LTK_UNAUTHENTICATED;
9505 }
9506
mgmt_new_ltk(struct hci_dev * hdev,struct smp_ltk * key,bool persistent)9507 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9508 {
9509 struct mgmt_ev_new_long_term_key ev;
9510
9511 memset(&ev, 0, sizeof(ev));
9512
9513 /* Devices using resolvable or non-resolvable random addresses
9514 * without providing an identity resolving key don't require
9515 * to store long term keys. Their addresses will change the
9516 * next time around.
9517 *
9518 * Only when a remote device provides an identity address
9519 * make sure the long term key is stored. If the remote
9520 * identity is known, the long term keys are internally
9521 * mapped to the identity address. So allow static random
9522 * and public addresses here.
9523 */
9524 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9525 (key->bdaddr.b[5] & 0xc0) != 0xc0)
9526 ev.store_hint = 0x00;
9527 else
9528 ev.store_hint = persistent;
9529
9530 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9531 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
9532 ev.key.type = mgmt_ltk_type(key);
9533 ev.key.enc_size = key->enc_size;
9534 ev.key.ediv = key->ediv;
9535 ev.key.rand = key->rand;
9536
9537 if (key->type == SMP_LTK)
9538 ev.key.initiator = 1;
9539
9540 /* Make sure we copy only the significant bytes based on the
9541 * encryption key size, and set the rest of the value to zeroes.
9542 */
9543 memcpy(ev.key.val, key->val, key->enc_size);
9544 memset(ev.key.val + key->enc_size, 0,
9545 sizeof(ev.key.val) - key->enc_size);
9546
9547 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9548 }
9549
mgmt_new_irk(struct hci_dev * hdev,struct smp_irk * irk,bool persistent)9550 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9551 {
9552 struct mgmt_ev_new_irk ev;
9553
9554 memset(&ev, 0, sizeof(ev));
9555
9556 ev.store_hint = persistent;
9557
9558 bacpy(&ev.rpa, &irk->rpa);
9559 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9560 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
9561 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9562
9563 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9564 }
9565
mgmt_new_csrk(struct hci_dev * hdev,struct smp_csrk * csrk,bool persistent)9566 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9567 bool persistent)
9568 {
9569 struct mgmt_ev_new_csrk ev;
9570
9571 memset(&ev, 0, sizeof(ev));
9572
9573 /* Devices using resolvable or non-resolvable random addresses
9574 * without providing an identity resolving key don't require
9575 * to store signature resolving keys. Their addresses will change
9576 * the next time around.
9577 *
9578 * Only when a remote device provides an identity address
9579 * make sure the signature resolving key is stored. So allow
9580 * static random and public addresses here.
9581 */
9582 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9583 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9584 ev.store_hint = 0x00;
9585 else
9586 ev.store_hint = persistent;
9587
9588 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9589 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
9590 ev.key.type = csrk->type;
9591 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9592
9593 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9594 }
9595
mgmt_new_conn_param(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u8 store_hint,u16 min_interval,u16 max_interval,u16 latency,u16 timeout)9596 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9597 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9598 u16 max_interval, u16 latency, u16 timeout)
9599 {
9600 struct mgmt_ev_new_conn_param ev;
9601
9602 if (!hci_is_identity_address(bdaddr, bdaddr_type))
9603 return;
9604
9605 memset(&ev, 0, sizeof(ev));
9606 bacpy(&ev.addr.bdaddr, bdaddr);
9607 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9608 ev.store_hint = store_hint;
9609 ev.min_interval = cpu_to_le16(min_interval);
9610 ev.max_interval = cpu_to_le16(max_interval);
9611 ev.latency = cpu_to_le16(latency);
9612 ev.timeout = cpu_to_le16(timeout);
9613
9614 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9615 }
9616
mgmt_device_connected(struct hci_dev * hdev,struct hci_conn * conn,u8 * name,u8 name_len)9617 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9618 u8 *name, u8 name_len)
9619 {
9620 struct sk_buff *skb;
9621 struct mgmt_ev_device_connected *ev;
9622 u16 eir_len = 0;
9623 u32 flags = 0;
9624
9625 if (test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
9626 return;
9627
9628 /* allocate buff for LE or BR/EDR adv */
9629 if (conn->le_adv_data_len > 0)
9630 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9631 sizeof(*ev) + conn->le_adv_data_len);
9632 else
9633 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9634 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9635 eir_precalc_len(sizeof(conn->dev_class)));
9636
9637 if (!skb)
9638 return;
9639
9640 ev = skb_put(skb, sizeof(*ev));
9641 bacpy(&ev->addr.bdaddr, &conn->dst);
9642 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9643
9644 if (conn->out)
9645 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9646
9647 ev->flags = __cpu_to_le32(flags);
9648
9649 /* We must ensure that the EIR Data fields are ordered and
9650 * unique. Keep it simple for now and avoid the problem by not
9651 * adding any BR/EDR data to the LE adv.
9652 */
9653 if (conn->le_adv_data_len > 0) {
9654 skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9655 eir_len = conn->le_adv_data_len;
9656 } else {
9657 if (name)
9658 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9659
9660 if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9661 eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9662 conn->dev_class, sizeof(conn->dev_class));
9663 }
9664
9665 ev->eir_len = cpu_to_le16(eir_len);
9666
9667 mgmt_event_skb(skb, NULL);
9668 }
9669
unpair_device_rsp(struct mgmt_pending_cmd * cmd,void * data)9670 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9671 {
9672 struct hci_dev *hdev = data;
9673 struct mgmt_cp_unpair_device *cp = cmd->param;
9674
9675 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9676
9677 cmd->cmd_complete(cmd, 0);
9678 }
9679
mgmt_powering_down(struct hci_dev * hdev)9680 bool mgmt_powering_down(struct hci_dev *hdev)
9681 {
9682 struct mgmt_pending_cmd *cmd;
9683 struct mgmt_mode *cp;
9684
9685 if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
9686 return true;
9687
9688 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9689 if (!cmd)
9690 return false;
9691
9692 cp = cmd->param;
9693 if (!cp->val)
9694 return true;
9695
9696 return false;
9697 }
9698
mgmt_device_disconnected(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 reason,bool mgmt_connected)9699 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9700 u8 link_type, u8 addr_type, u8 reason,
9701 bool mgmt_connected)
9702 {
9703 struct mgmt_ev_device_disconnected ev;
9704 struct sock *sk = NULL;
9705
9706 if (!mgmt_connected)
9707 return;
9708
9709 if (link_type != ACL_LINK && link_type != LE_LINK)
9710 return;
9711
9712 bacpy(&ev.addr.bdaddr, bdaddr);
9713 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9714 ev.reason = reason;
9715
9716 /* Report disconnects due to suspend */
9717 if (hdev->suspended)
9718 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9719
9720 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9721
9722 if (sk)
9723 sock_put(sk);
9724 }
9725
mgmt_disconnect_failed(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9726 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9727 u8 link_type, u8 addr_type, u8 status)
9728 {
9729 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9730 struct mgmt_cp_disconnect *cp;
9731 struct mgmt_pending_cmd *cmd;
9732
9733 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, true,
9734 unpair_device_rsp, hdev);
9735
9736 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9737 if (!cmd)
9738 return;
9739
9740 cp = cmd->param;
9741
9742 if (bacmp(bdaddr, &cp->addr.bdaddr))
9743 return;
9744
9745 if (cp->addr.type != bdaddr_type)
9746 return;
9747
9748 cmd->cmd_complete(cmd, mgmt_status(status));
9749 mgmt_pending_remove(cmd);
9750 }
9751
mgmt_connect_failed(struct hci_dev * hdev,struct hci_conn * conn,u8 status)9752 void mgmt_connect_failed(struct hci_dev *hdev, struct hci_conn *conn, u8 status)
9753 {
9754 struct mgmt_ev_connect_failed ev;
9755
9756 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
9757 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
9758 conn->dst_type, status, true);
9759 return;
9760 }
9761
9762 bacpy(&ev.addr.bdaddr, &conn->dst);
9763 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9764 ev.status = mgmt_status(status);
9765
9766 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9767 }
9768
mgmt_pin_code_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 secure)9769 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9770 {
9771 struct mgmt_ev_pin_code_request ev;
9772
9773 bacpy(&ev.addr.bdaddr, bdaddr);
9774 ev.addr.type = BDADDR_BREDR;
9775 ev.secure = secure;
9776
9777 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9778 }
9779
mgmt_pin_code_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)9780 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9781 u8 status)
9782 {
9783 struct mgmt_pending_cmd *cmd;
9784
9785 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9786 if (!cmd)
9787 return;
9788
9789 cmd->cmd_complete(cmd, mgmt_status(status));
9790 mgmt_pending_remove(cmd);
9791 }
9792
mgmt_pin_code_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)9793 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9794 u8 status)
9795 {
9796 struct mgmt_pending_cmd *cmd;
9797
9798 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9799 if (!cmd)
9800 return;
9801
9802 cmd->cmd_complete(cmd, mgmt_status(status));
9803 mgmt_pending_remove(cmd);
9804 }
9805
mgmt_user_confirm_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 value,u8 confirm_hint)9806 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9807 u8 link_type, u8 addr_type, u32 value,
9808 u8 confirm_hint)
9809 {
9810 struct mgmt_ev_user_confirm_request ev;
9811
9812 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9813
9814 bacpy(&ev.addr.bdaddr, bdaddr);
9815 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9816 ev.confirm_hint = confirm_hint;
9817 ev.value = cpu_to_le32(value);
9818
9819 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9820 NULL);
9821 }
9822
mgmt_user_passkey_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type)9823 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9824 u8 link_type, u8 addr_type)
9825 {
9826 struct mgmt_ev_user_passkey_request ev;
9827
9828 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9829
9830 bacpy(&ev.addr.bdaddr, bdaddr);
9831 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9832
9833 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9834 NULL);
9835 }
9836
user_pairing_resp_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status,u8 opcode)9837 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9838 u8 link_type, u8 addr_type, u8 status,
9839 u8 opcode)
9840 {
9841 struct mgmt_pending_cmd *cmd;
9842
9843 cmd = pending_find(opcode, hdev);
9844 if (!cmd)
9845 return -ENOENT;
9846
9847 cmd->cmd_complete(cmd, mgmt_status(status));
9848 mgmt_pending_remove(cmd);
9849
9850 return 0;
9851 }
9852
mgmt_user_confirm_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9853 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9854 u8 link_type, u8 addr_type, u8 status)
9855 {
9856 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9857 status, MGMT_OP_USER_CONFIRM_REPLY);
9858 }
9859
mgmt_user_confirm_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9860 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9861 u8 link_type, u8 addr_type, u8 status)
9862 {
9863 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9864 status,
9865 MGMT_OP_USER_CONFIRM_NEG_REPLY);
9866 }
9867
mgmt_user_passkey_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9868 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9869 u8 link_type, u8 addr_type, u8 status)
9870 {
9871 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9872 status, MGMT_OP_USER_PASSKEY_REPLY);
9873 }
9874
mgmt_user_passkey_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9875 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9876 u8 link_type, u8 addr_type, u8 status)
9877 {
9878 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9879 status,
9880 MGMT_OP_USER_PASSKEY_NEG_REPLY);
9881 }
9882
mgmt_user_passkey_notify(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 passkey,u8 entered)9883 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9884 u8 link_type, u8 addr_type, u32 passkey,
9885 u8 entered)
9886 {
9887 struct mgmt_ev_passkey_notify ev;
9888
9889 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9890
9891 bacpy(&ev.addr.bdaddr, bdaddr);
9892 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9893 ev.passkey = __cpu_to_le32(passkey);
9894 ev.entered = entered;
9895
9896 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9897 }
9898
mgmt_auth_failed(struct hci_conn * conn,u8 hci_status)9899 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9900 {
9901 struct mgmt_ev_auth_failed ev;
9902 struct mgmt_pending_cmd *cmd;
9903 u8 status = mgmt_status(hci_status);
9904
9905 bacpy(&ev.addr.bdaddr, &conn->dst);
9906 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9907 ev.status = status;
9908
9909 cmd = find_pairing(conn);
9910
9911 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9912 cmd ? cmd->sk : NULL);
9913
9914 if (cmd) {
9915 cmd->cmd_complete(cmd, status);
9916 mgmt_pending_remove(cmd);
9917 }
9918 }
9919
mgmt_auth_enable_complete(struct hci_dev * hdev,u8 status)9920 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9921 {
9922 struct cmd_lookup match = { NULL, hdev };
9923 bool changed;
9924
9925 if (status) {
9926 u8 mgmt_err = mgmt_status(status);
9927 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, true,
9928 cmd_status_rsp, &mgmt_err);
9929 return;
9930 }
9931
9932 if (test_bit(HCI_AUTH, &hdev->flags))
9933 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9934 else
9935 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9936
9937 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, true,
9938 settings_rsp, &match);
9939
9940 if (changed)
9941 new_settings(hdev, match.sk);
9942
9943 if (match.sk)
9944 sock_put(match.sk);
9945 }
9946
sk_lookup(struct mgmt_pending_cmd * cmd,void * data)9947 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9948 {
9949 struct cmd_lookup *match = data;
9950
9951 if (match->sk == NULL) {
9952 match->sk = cmd->sk;
9953 sock_hold(match->sk);
9954 }
9955 }
9956
mgmt_set_class_of_dev_complete(struct hci_dev * hdev,u8 * dev_class,u8 status)9957 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9958 u8 status)
9959 {
9960 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
9961
9962 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, false, sk_lookup,
9963 &match);
9964 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, false, sk_lookup,
9965 &match);
9966 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, false, sk_lookup,
9967 &match);
9968
9969 if (!status) {
9970 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
9971 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9972 ext_info_changed(hdev, NULL);
9973 }
9974
9975 if (match.sk)
9976 sock_put(match.sk);
9977 }
9978
mgmt_set_local_name_complete(struct hci_dev * hdev,u8 * name,u8 status)9979 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
9980 {
9981 struct mgmt_cp_set_local_name ev;
9982 struct mgmt_pending_cmd *cmd;
9983
9984 if (status)
9985 return;
9986
9987 memset(&ev, 0, sizeof(ev));
9988 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
9989 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
9990
9991 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
9992 if (!cmd) {
9993 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
9994
9995 /* If this is a HCI command related to powering on the
9996 * HCI dev don't send any mgmt signals.
9997 */
9998 if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
9999 return;
10000
10001 if (pending_find(MGMT_OP_SET_POWERED, hdev))
10002 return;
10003 }
10004
10005 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
10006 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
10007 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
10008 }
10009
has_uuid(u8 * uuid,u16 uuid_count,u8 (* uuids)[16])10010 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
10011 {
10012 int i;
10013
10014 for (i = 0; i < uuid_count; i++) {
10015 if (!memcmp(uuid, uuids[i], 16))
10016 return true;
10017 }
10018
10019 return false;
10020 }
10021
eir_has_uuids(u8 * eir,u16 eir_len,u16 uuid_count,u8 (* uuids)[16])10022 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
10023 {
10024 u16 parsed = 0;
10025
10026 while (parsed < eir_len) {
10027 u8 field_len = eir[0];
10028 u8 uuid[16];
10029 int i;
10030
10031 if (field_len == 0)
10032 break;
10033
10034 if (eir_len - parsed < field_len + 1)
10035 break;
10036
10037 switch (eir[1]) {
10038 case EIR_UUID16_ALL:
10039 case EIR_UUID16_SOME:
10040 for (i = 0; i + 3 <= field_len; i += 2) {
10041 memcpy(uuid, bluetooth_base_uuid, 16);
10042 uuid[13] = eir[i + 3];
10043 uuid[12] = eir[i + 2];
10044 if (has_uuid(uuid, uuid_count, uuids))
10045 return true;
10046 }
10047 break;
10048 case EIR_UUID32_ALL:
10049 case EIR_UUID32_SOME:
10050 for (i = 0; i + 5 <= field_len; i += 4) {
10051 memcpy(uuid, bluetooth_base_uuid, 16);
10052 uuid[15] = eir[i + 5];
10053 uuid[14] = eir[i + 4];
10054 uuid[13] = eir[i + 3];
10055 uuid[12] = eir[i + 2];
10056 if (has_uuid(uuid, uuid_count, uuids))
10057 return true;
10058 }
10059 break;
10060 case EIR_UUID128_ALL:
10061 case EIR_UUID128_SOME:
10062 for (i = 0; i + 17 <= field_len; i += 16) {
10063 memcpy(uuid, eir + i + 2, 16);
10064 if (has_uuid(uuid, uuid_count, uuids))
10065 return true;
10066 }
10067 break;
10068 }
10069
10070 parsed += field_len + 1;
10071 eir += field_len + 1;
10072 }
10073
10074 return false;
10075 }
10076
is_filter_match(struct hci_dev * hdev,s8 rssi,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len)10077 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10078 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10079 {
10080 /* If a RSSI threshold has been specified, and
10081 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10082 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10083 * is set, let it through for further processing, as we might need to
10084 * restart the scan.
10085 *
10086 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10087 * the results are also dropped.
10088 */
10089 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10090 (rssi == HCI_RSSI_INVALID ||
10091 (rssi < hdev->discovery.rssi &&
10092 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
10093 return false;
10094
10095 if (hdev->discovery.uuid_count != 0) {
10096 /* If a list of UUIDs is provided in filter, results with no
10097 * matching UUID should be dropped.
10098 */
10099 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10100 hdev->discovery.uuids) &&
10101 !eir_has_uuids(scan_rsp, scan_rsp_len,
10102 hdev->discovery.uuid_count,
10103 hdev->discovery.uuids))
10104 return false;
10105 }
10106
10107 /* If duplicate filtering does not report RSSI changes, then restart
10108 * scanning to ensure updated result with updated RSSI values.
10109 */
10110 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
10111 /* Validate RSSI value against the RSSI threshold once more. */
10112 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10113 rssi < hdev->discovery.rssi)
10114 return false;
10115 }
10116
10117 return true;
10118 }
10119
mgmt_adv_monitor_device_lost(struct hci_dev * hdev,u16 handle,bdaddr_t * bdaddr,u8 addr_type)10120 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
10121 bdaddr_t *bdaddr, u8 addr_type)
10122 {
10123 struct mgmt_ev_adv_monitor_device_lost ev;
10124
10125 ev.monitor_handle = cpu_to_le16(handle);
10126 bacpy(&ev.addr.bdaddr, bdaddr);
10127 ev.addr.type = addr_type;
10128
10129 mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
10130 NULL);
10131 }
10132
mgmt_send_adv_monitor_device_found(struct hci_dev * hdev,struct sk_buff * skb,struct sock * skip_sk,u16 handle)10133 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
10134 struct sk_buff *skb,
10135 struct sock *skip_sk,
10136 u16 handle)
10137 {
10138 struct sk_buff *advmon_skb;
10139 size_t advmon_skb_len;
10140 __le16 *monitor_handle;
10141
10142 if (!skb)
10143 return;
10144
10145 advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
10146 sizeof(struct mgmt_ev_device_found)) + skb->len;
10147 advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
10148 advmon_skb_len);
10149 if (!advmon_skb)
10150 return;
10151
10152 /* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10153 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10154 * store monitor_handle of the matched monitor.
10155 */
10156 monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
10157 *monitor_handle = cpu_to_le16(handle);
10158 skb_put_data(advmon_skb, skb->data, skb->len);
10159
10160 mgmt_event_skb(advmon_skb, skip_sk);
10161 }
10162
mgmt_adv_monitor_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,bool report_device,struct sk_buff * skb,struct sock * skip_sk)10163 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
10164 bdaddr_t *bdaddr, bool report_device,
10165 struct sk_buff *skb,
10166 struct sock *skip_sk)
10167 {
10168 struct monitored_device *dev, *tmp;
10169 bool matched = false;
10170 bool notified = false;
10171
10172 /* We have received the Advertisement Report because:
10173 * 1. the kernel has initiated active discovery
10174 * 2. if not, we have pend_le_reports > 0 in which case we are doing
10175 * passive scanning
10176 * 3. if none of the above is true, we have one or more active
10177 * Advertisement Monitor
10178 *
10179 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10180 * and report ONLY one advertisement per device for the matched Monitor
10181 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10182 *
10183 * For case 3, since we are not active scanning and all advertisements
10184 * received are due to a matched Advertisement Monitor, report all
10185 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10186 */
10187 if (report_device && !hdev->advmon_pend_notify) {
10188 mgmt_event_skb(skb, skip_sk);
10189 return;
10190 }
10191
10192 hdev->advmon_pend_notify = false;
10193
10194 list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
10195 if (!bacmp(&dev->bdaddr, bdaddr)) {
10196 matched = true;
10197
10198 if (!dev->notified) {
10199 mgmt_send_adv_monitor_device_found(hdev, skb,
10200 skip_sk,
10201 dev->handle);
10202 notified = true;
10203 dev->notified = true;
10204 }
10205 }
10206
10207 if (!dev->notified)
10208 hdev->advmon_pend_notify = true;
10209 }
10210
10211 if (!report_device &&
10212 ((matched && !notified) || !msft_monitor_supported(hdev))) {
10213 /* Handle 0 indicates that we are not active scanning and this
10214 * is a subsequent advertisement report for an already matched
10215 * Advertisement Monitor or the controller offloading support
10216 * is not available.
10217 */
10218 mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
10219 }
10220
10221 if (report_device)
10222 mgmt_event_skb(skb, skip_sk);
10223 else
10224 kfree_skb(skb);
10225 }
10226
mesh_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len,u64 instant)10227 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
10228 u8 addr_type, s8 rssi, u32 flags, u8 *eir,
10229 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10230 u64 instant)
10231 {
10232 struct sk_buff *skb;
10233 struct mgmt_ev_mesh_device_found *ev;
10234 int i, j;
10235
10236 if (!hdev->mesh_ad_types[0])
10237 goto accepted;
10238
10239 /* Scan for requested AD types */
10240 if (eir_len > 0) {
10241 for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
10242 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10243 if (!hdev->mesh_ad_types[j])
10244 break;
10245
10246 if (hdev->mesh_ad_types[j] == eir[i + 1])
10247 goto accepted;
10248 }
10249 }
10250 }
10251
10252 if (scan_rsp_len > 0) {
10253 for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
10254 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10255 if (!hdev->mesh_ad_types[j])
10256 break;
10257
10258 if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
10259 goto accepted;
10260 }
10261 }
10262 }
10263
10264 return;
10265
10266 accepted:
10267 skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
10268 sizeof(*ev) + eir_len + scan_rsp_len);
10269 if (!skb)
10270 return;
10271
10272 ev = skb_put(skb, sizeof(*ev));
10273
10274 bacpy(&ev->addr.bdaddr, bdaddr);
10275 ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
10276 ev->rssi = rssi;
10277 ev->flags = cpu_to_le32(flags);
10278 ev->instant = cpu_to_le64(instant);
10279
10280 if (eir_len > 0)
10281 /* Copy EIR or advertising data into event */
10282 skb_put_data(skb, eir, eir_len);
10283
10284 if (scan_rsp_len > 0)
10285 /* Append scan response data to event */
10286 skb_put_data(skb, scan_rsp, scan_rsp_len);
10287
10288 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10289
10290 mgmt_event_skb(skb, NULL);
10291 }
10292
mgmt_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 * dev_class,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len,u64 instant)10293 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10294 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10295 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10296 u64 instant)
10297 {
10298 struct sk_buff *skb;
10299 struct mgmt_ev_device_found *ev;
10300 bool report_device = hci_discovery_active(hdev);
10301
10302 if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
10303 mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
10304 eir, eir_len, scan_rsp, scan_rsp_len,
10305 instant);
10306
10307 /* Don't send events for a non-kernel initiated discovery. With
10308 * LE one exception is if we have pend_le_reports > 0 in which
10309 * case we're doing passive scanning and want these events.
10310 */
10311 if (!hci_discovery_active(hdev)) {
10312 if (link_type == ACL_LINK)
10313 return;
10314 if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
10315 report_device = true;
10316 else if (!hci_is_adv_monitoring(hdev))
10317 return;
10318 }
10319
10320 if (hdev->discovery.result_filtering) {
10321 /* We are using service discovery */
10322 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10323 scan_rsp_len))
10324 return;
10325 }
10326
10327 if (hdev->discovery.limited) {
10328 /* Check for limited discoverable bit */
10329 if (dev_class) {
10330 if (!(dev_class[1] & 0x20))
10331 return;
10332 } else {
10333 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10334 if (!flags || !(flags[0] & LE_AD_LIMITED))
10335 return;
10336 }
10337 }
10338
10339 /* Allocate skb. The 5 extra bytes are for the potential CoD field */
10340 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10341 sizeof(*ev) + eir_len + scan_rsp_len + 5);
10342 if (!skb)
10343 return;
10344
10345 ev = skb_put(skb, sizeof(*ev));
10346
10347 /* In case of device discovery with BR/EDR devices (pre 1.2), the
10348 * RSSI value was reported as 0 when not available. This behavior
10349 * is kept when using device discovery. This is required for full
10350 * backwards compatibility with the API.
10351 *
10352 * However when using service discovery, the value 127 will be
10353 * returned when the RSSI is not available.
10354 */
10355 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10356 link_type == ACL_LINK)
10357 rssi = 0;
10358
10359 bacpy(&ev->addr.bdaddr, bdaddr);
10360 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10361 ev->rssi = rssi;
10362 ev->flags = cpu_to_le32(flags);
10363
10364 if (eir_len > 0)
10365 /* Copy EIR or advertising data into event */
10366 skb_put_data(skb, eir, eir_len);
10367
10368 if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
10369 u8 eir_cod[5];
10370
10371 eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
10372 dev_class, 3);
10373 skb_put_data(skb, eir_cod, sizeof(eir_cod));
10374 }
10375
10376 if (scan_rsp_len > 0)
10377 /* Append scan response data to event */
10378 skb_put_data(skb, scan_rsp, scan_rsp_len);
10379
10380 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10381
10382 mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
10383 }
10384
mgmt_remote_name(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,s8 rssi,u8 * name,u8 name_len)10385 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10386 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10387 {
10388 struct sk_buff *skb;
10389 struct mgmt_ev_device_found *ev;
10390 u16 eir_len = 0;
10391 u32 flags = 0;
10392
10393 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10394 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
10395 if (!skb)
10396 return;
10397
10398 ev = skb_put(skb, sizeof(*ev));
10399 bacpy(&ev->addr.bdaddr, bdaddr);
10400 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10401 ev->rssi = rssi;
10402
10403 if (name)
10404 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
10405 else
10406 flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
10407
10408 ev->eir_len = cpu_to_le16(eir_len);
10409 ev->flags = cpu_to_le32(flags);
10410
10411 mgmt_event_skb(skb, NULL);
10412 }
10413
mgmt_discovering(struct hci_dev * hdev,u8 discovering)10414 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10415 {
10416 struct mgmt_ev_discovering ev;
10417
10418 bt_dev_dbg(hdev, "discovering %u", discovering);
10419
10420 memset(&ev, 0, sizeof(ev));
10421 ev.type = hdev->discovery.type;
10422 ev.discovering = discovering;
10423
10424 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10425 }
10426
mgmt_suspending(struct hci_dev * hdev,u8 state)10427 void mgmt_suspending(struct hci_dev *hdev, u8 state)
10428 {
10429 struct mgmt_ev_controller_suspend ev;
10430
10431 ev.suspend_state = state;
10432 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10433 }
10434
mgmt_resuming(struct hci_dev * hdev,u8 reason,bdaddr_t * bdaddr,u8 addr_type)10435 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10436 u8 addr_type)
10437 {
10438 struct mgmt_ev_controller_resume ev;
10439
10440 ev.wake_reason = reason;
10441 if (bdaddr) {
10442 bacpy(&ev.addr.bdaddr, bdaddr);
10443 ev.addr.type = addr_type;
10444 } else {
10445 memset(&ev.addr, 0, sizeof(ev.addr));
10446 }
10447
10448 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
10449 }
10450
10451 static struct hci_mgmt_chan chan = {
10452 .channel = HCI_CHANNEL_CONTROL,
10453 .handler_count = ARRAY_SIZE(mgmt_handlers),
10454 .handlers = mgmt_handlers,
10455 .hdev_init = mgmt_init_hdev,
10456 };
10457
mgmt_init(void)10458 int mgmt_init(void)
10459 {
10460 return hci_mgmt_chan_register(&chan);
10461 }
10462
mgmt_exit(void)10463 void mgmt_exit(void)
10464 {
10465 hci_mgmt_chan_unregister(&chan);
10466 }
10467
mgmt_cleanup(struct sock * sk)10468 void mgmt_cleanup(struct sock *sk)
10469 {
10470 struct mgmt_mesh_tx *mesh_tx;
10471 struct hci_dev *hdev;
10472
10473 read_lock(&hci_dev_list_lock);
10474
10475 list_for_each_entry(hdev, &hci_dev_list, list) {
10476 do {
10477 mesh_tx = mgmt_mesh_next(hdev, sk);
10478
10479 if (mesh_tx)
10480 mesh_send_complete(hdev, mesh_tx, true);
10481 } while (mesh_tx);
10482 }
10483
10484 read_unlock(&hci_dev_list_lock);
10485 }
10486