1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <linux/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35
36 #include "smp.h"
37 #include "mgmt_util.h"
38 #include "mgmt_config.h"
39 #include "msft.h"
40 #include "eir.h"
41 #include "aosp.h"
42
43 #define MGMT_VERSION 1
44 #define MGMT_REVISION 23
45
46 static const u16 mgmt_commands[] = {
47 MGMT_OP_READ_INDEX_LIST,
48 MGMT_OP_READ_INFO,
49 MGMT_OP_SET_POWERED,
50 MGMT_OP_SET_DISCOVERABLE,
51 MGMT_OP_SET_CONNECTABLE,
52 MGMT_OP_SET_FAST_CONNECTABLE,
53 MGMT_OP_SET_BONDABLE,
54 MGMT_OP_SET_LINK_SECURITY,
55 MGMT_OP_SET_SSP,
56 MGMT_OP_SET_HS,
57 MGMT_OP_SET_LE,
58 MGMT_OP_SET_DEV_CLASS,
59 MGMT_OP_SET_LOCAL_NAME,
60 MGMT_OP_ADD_UUID,
61 MGMT_OP_REMOVE_UUID,
62 MGMT_OP_LOAD_LINK_KEYS,
63 MGMT_OP_LOAD_LONG_TERM_KEYS,
64 MGMT_OP_DISCONNECT,
65 MGMT_OP_GET_CONNECTIONS,
66 MGMT_OP_PIN_CODE_REPLY,
67 MGMT_OP_PIN_CODE_NEG_REPLY,
68 MGMT_OP_SET_IO_CAPABILITY,
69 MGMT_OP_PAIR_DEVICE,
70 MGMT_OP_CANCEL_PAIR_DEVICE,
71 MGMT_OP_UNPAIR_DEVICE,
72 MGMT_OP_USER_CONFIRM_REPLY,
73 MGMT_OP_USER_CONFIRM_NEG_REPLY,
74 MGMT_OP_USER_PASSKEY_REPLY,
75 MGMT_OP_USER_PASSKEY_NEG_REPLY,
76 MGMT_OP_READ_LOCAL_OOB_DATA,
77 MGMT_OP_ADD_REMOTE_OOB_DATA,
78 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
79 MGMT_OP_START_DISCOVERY,
80 MGMT_OP_STOP_DISCOVERY,
81 MGMT_OP_CONFIRM_NAME,
82 MGMT_OP_BLOCK_DEVICE,
83 MGMT_OP_UNBLOCK_DEVICE,
84 MGMT_OP_SET_DEVICE_ID,
85 MGMT_OP_SET_ADVERTISING,
86 MGMT_OP_SET_BREDR,
87 MGMT_OP_SET_STATIC_ADDRESS,
88 MGMT_OP_SET_SCAN_PARAMS,
89 MGMT_OP_SET_SECURE_CONN,
90 MGMT_OP_SET_DEBUG_KEYS,
91 MGMT_OP_SET_PRIVACY,
92 MGMT_OP_LOAD_IRKS,
93 MGMT_OP_GET_CONN_INFO,
94 MGMT_OP_GET_CLOCK_INFO,
95 MGMT_OP_ADD_DEVICE,
96 MGMT_OP_REMOVE_DEVICE,
97 MGMT_OP_LOAD_CONN_PARAM,
98 MGMT_OP_READ_UNCONF_INDEX_LIST,
99 MGMT_OP_READ_CONFIG_INFO,
100 MGMT_OP_SET_EXTERNAL_CONFIG,
101 MGMT_OP_SET_PUBLIC_ADDRESS,
102 MGMT_OP_START_SERVICE_DISCOVERY,
103 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
104 MGMT_OP_READ_EXT_INDEX_LIST,
105 MGMT_OP_READ_ADV_FEATURES,
106 MGMT_OP_ADD_ADVERTISING,
107 MGMT_OP_REMOVE_ADVERTISING,
108 MGMT_OP_GET_ADV_SIZE_INFO,
109 MGMT_OP_START_LIMITED_DISCOVERY,
110 MGMT_OP_READ_EXT_INFO,
111 MGMT_OP_SET_APPEARANCE,
112 MGMT_OP_GET_PHY_CONFIGURATION,
113 MGMT_OP_SET_PHY_CONFIGURATION,
114 MGMT_OP_SET_BLOCKED_KEYS,
115 MGMT_OP_SET_WIDEBAND_SPEECH,
116 MGMT_OP_READ_CONTROLLER_CAP,
117 MGMT_OP_READ_EXP_FEATURES_INFO,
118 MGMT_OP_SET_EXP_FEATURE,
119 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
120 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
121 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
122 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
123 MGMT_OP_GET_DEVICE_FLAGS,
124 MGMT_OP_SET_DEVICE_FLAGS,
125 MGMT_OP_READ_ADV_MONITOR_FEATURES,
126 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
127 MGMT_OP_REMOVE_ADV_MONITOR,
128 MGMT_OP_ADD_EXT_ADV_PARAMS,
129 MGMT_OP_ADD_EXT_ADV_DATA,
130 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
131 MGMT_OP_SET_MESH_RECEIVER,
132 MGMT_OP_MESH_READ_FEATURES,
133 MGMT_OP_MESH_SEND,
134 MGMT_OP_MESH_SEND_CANCEL,
135 MGMT_OP_HCI_CMD_SYNC,
136 };
137
138 static const u16 mgmt_events[] = {
139 MGMT_EV_CONTROLLER_ERROR,
140 MGMT_EV_INDEX_ADDED,
141 MGMT_EV_INDEX_REMOVED,
142 MGMT_EV_NEW_SETTINGS,
143 MGMT_EV_CLASS_OF_DEV_CHANGED,
144 MGMT_EV_LOCAL_NAME_CHANGED,
145 MGMT_EV_NEW_LINK_KEY,
146 MGMT_EV_NEW_LONG_TERM_KEY,
147 MGMT_EV_DEVICE_CONNECTED,
148 MGMT_EV_DEVICE_DISCONNECTED,
149 MGMT_EV_CONNECT_FAILED,
150 MGMT_EV_PIN_CODE_REQUEST,
151 MGMT_EV_USER_CONFIRM_REQUEST,
152 MGMT_EV_USER_PASSKEY_REQUEST,
153 MGMT_EV_AUTH_FAILED,
154 MGMT_EV_DEVICE_FOUND,
155 MGMT_EV_DISCOVERING,
156 MGMT_EV_DEVICE_BLOCKED,
157 MGMT_EV_DEVICE_UNBLOCKED,
158 MGMT_EV_DEVICE_UNPAIRED,
159 MGMT_EV_PASSKEY_NOTIFY,
160 MGMT_EV_NEW_IRK,
161 MGMT_EV_NEW_CSRK,
162 MGMT_EV_DEVICE_ADDED,
163 MGMT_EV_DEVICE_REMOVED,
164 MGMT_EV_NEW_CONN_PARAM,
165 MGMT_EV_UNCONF_INDEX_ADDED,
166 MGMT_EV_UNCONF_INDEX_REMOVED,
167 MGMT_EV_NEW_CONFIG_OPTIONS,
168 MGMT_EV_EXT_INDEX_ADDED,
169 MGMT_EV_EXT_INDEX_REMOVED,
170 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
171 MGMT_EV_ADVERTISING_ADDED,
172 MGMT_EV_ADVERTISING_REMOVED,
173 MGMT_EV_EXT_INFO_CHANGED,
174 MGMT_EV_PHY_CONFIGURATION_CHANGED,
175 MGMT_EV_EXP_FEATURE_CHANGED,
176 MGMT_EV_DEVICE_FLAGS_CHANGED,
177 MGMT_EV_ADV_MONITOR_ADDED,
178 MGMT_EV_ADV_MONITOR_REMOVED,
179 MGMT_EV_CONTROLLER_SUSPEND,
180 MGMT_EV_CONTROLLER_RESUME,
181 MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
182 MGMT_EV_ADV_MONITOR_DEVICE_LOST,
183 };
184
185 static const u16 mgmt_untrusted_commands[] = {
186 MGMT_OP_READ_INDEX_LIST,
187 MGMT_OP_READ_INFO,
188 MGMT_OP_READ_UNCONF_INDEX_LIST,
189 MGMT_OP_READ_CONFIG_INFO,
190 MGMT_OP_READ_EXT_INDEX_LIST,
191 MGMT_OP_READ_EXT_INFO,
192 MGMT_OP_READ_CONTROLLER_CAP,
193 MGMT_OP_READ_EXP_FEATURES_INFO,
194 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
195 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
196 };
197
198 static const u16 mgmt_untrusted_events[] = {
199 MGMT_EV_INDEX_ADDED,
200 MGMT_EV_INDEX_REMOVED,
201 MGMT_EV_NEW_SETTINGS,
202 MGMT_EV_CLASS_OF_DEV_CHANGED,
203 MGMT_EV_LOCAL_NAME_CHANGED,
204 MGMT_EV_UNCONF_INDEX_ADDED,
205 MGMT_EV_UNCONF_INDEX_REMOVED,
206 MGMT_EV_NEW_CONFIG_OPTIONS,
207 MGMT_EV_EXT_INDEX_ADDED,
208 MGMT_EV_EXT_INDEX_REMOVED,
209 MGMT_EV_EXT_INFO_CHANGED,
210 MGMT_EV_EXP_FEATURE_CHANGED,
211 };
212
213 #define CACHE_TIMEOUT secs_to_jiffies(2)
214
215 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
216 "\x00\x00\x00\x00\x00\x00\x00\x00"
217
218 /* HCI to MGMT error code conversion table */
219 static const u8 mgmt_status_table[] = {
220 MGMT_STATUS_SUCCESS,
221 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
222 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
223 MGMT_STATUS_FAILED, /* Hardware Failure */
224 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
225 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
226 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
227 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
228 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
229 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
230 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
231 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
232 MGMT_STATUS_BUSY, /* Command Disallowed */
233 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
234 MGMT_STATUS_REJECTED, /* Rejected Security */
235 MGMT_STATUS_REJECTED, /* Rejected Personal */
236 MGMT_STATUS_TIMEOUT, /* Host Timeout */
237 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
238 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
239 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
240 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
241 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
242 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
243 MGMT_STATUS_BUSY, /* Repeated Attempts */
244 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
245 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
246 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
247 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
248 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
249 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
250 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
251 MGMT_STATUS_FAILED, /* Unspecified Error */
252 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
253 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
254 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
255 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
256 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
257 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
258 MGMT_STATUS_FAILED, /* Unit Link Key Used */
259 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
260 MGMT_STATUS_TIMEOUT, /* Instant Passed */
261 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
262 MGMT_STATUS_FAILED, /* Transaction Collision */
263 MGMT_STATUS_FAILED, /* Reserved for future use */
264 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
265 MGMT_STATUS_REJECTED, /* QoS Rejected */
266 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
267 MGMT_STATUS_REJECTED, /* Insufficient Security */
268 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
269 MGMT_STATUS_FAILED, /* Reserved for future use */
270 MGMT_STATUS_BUSY, /* Role Switch Pending */
271 MGMT_STATUS_FAILED, /* Reserved for future use */
272 MGMT_STATUS_FAILED, /* Slot Violation */
273 MGMT_STATUS_FAILED, /* Role Switch Failed */
274 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
275 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
276 MGMT_STATUS_BUSY, /* Host Busy Pairing */
277 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
278 MGMT_STATUS_BUSY, /* Controller Busy */
279 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
280 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
281 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
282 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
283 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
284 };
285
mgmt_errno_status(int err)286 static u8 mgmt_errno_status(int err)
287 {
288 switch (err) {
289 case 0:
290 return MGMT_STATUS_SUCCESS;
291 case -EPERM:
292 return MGMT_STATUS_REJECTED;
293 case -EINVAL:
294 return MGMT_STATUS_INVALID_PARAMS;
295 case -EOPNOTSUPP:
296 return MGMT_STATUS_NOT_SUPPORTED;
297 case -EBUSY:
298 return MGMT_STATUS_BUSY;
299 case -ETIMEDOUT:
300 return MGMT_STATUS_AUTH_FAILED;
301 case -ENOMEM:
302 return MGMT_STATUS_NO_RESOURCES;
303 case -EISCONN:
304 return MGMT_STATUS_ALREADY_CONNECTED;
305 case -ENOTCONN:
306 return MGMT_STATUS_DISCONNECTED;
307 }
308
309 return MGMT_STATUS_FAILED;
310 }
311
mgmt_status(int err)312 static u8 mgmt_status(int err)
313 {
314 if (err < 0)
315 return mgmt_errno_status(err);
316
317 if (err < ARRAY_SIZE(mgmt_status_table))
318 return mgmt_status_table[err];
319
320 return MGMT_STATUS_FAILED;
321 }
322
mgmt_index_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag)323 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
324 u16 len, int flag)
325 {
326 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
327 flag, NULL);
328 }
329
mgmt_limited_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag,struct sock * skip_sk)330 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
331 u16 len, int flag, struct sock *skip_sk)
332 {
333 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
334 flag, skip_sk);
335 }
336
mgmt_event(u16 event,struct hci_dev * hdev,void * data,u16 len,struct sock * skip_sk)337 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
338 struct sock *skip_sk)
339 {
340 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
341 HCI_SOCK_TRUSTED, skip_sk);
342 }
343
mgmt_event_skb(struct sk_buff * skb,struct sock * skip_sk)344 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
345 {
346 return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
347 skip_sk);
348 }
349
le_addr_type(u8 mgmt_addr_type)350 static u8 le_addr_type(u8 mgmt_addr_type)
351 {
352 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
353 return ADDR_LE_DEV_PUBLIC;
354 else
355 return ADDR_LE_DEV_RANDOM;
356 }
357
mgmt_fill_version_info(void * ver)358 void mgmt_fill_version_info(void *ver)
359 {
360 struct mgmt_rp_read_version *rp = ver;
361
362 rp->version = MGMT_VERSION;
363 rp->revision = cpu_to_le16(MGMT_REVISION);
364 }
365
read_version(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)366 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
367 u16 data_len)
368 {
369 struct mgmt_rp_read_version rp;
370
371 bt_dev_dbg(hdev, "sock %p", sk);
372
373 mgmt_fill_version_info(&rp);
374
375 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
376 &rp, sizeof(rp));
377 }
378
read_commands(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)379 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
380 u16 data_len)
381 {
382 struct mgmt_rp_read_commands *rp;
383 u16 num_commands, num_events;
384 size_t rp_size;
385 int i, err;
386
387 bt_dev_dbg(hdev, "sock %p", sk);
388
389 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
390 num_commands = ARRAY_SIZE(mgmt_commands);
391 num_events = ARRAY_SIZE(mgmt_events);
392 } else {
393 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
394 num_events = ARRAY_SIZE(mgmt_untrusted_events);
395 }
396
397 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
398
399 rp = kmalloc(rp_size, GFP_KERNEL);
400 if (!rp)
401 return -ENOMEM;
402
403 rp->num_commands = cpu_to_le16(num_commands);
404 rp->num_events = cpu_to_le16(num_events);
405
406 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
407 __le16 *opcode = rp->opcodes;
408
409 for (i = 0; i < num_commands; i++, opcode++)
410 put_unaligned_le16(mgmt_commands[i], opcode);
411
412 for (i = 0; i < num_events; i++, opcode++)
413 put_unaligned_le16(mgmt_events[i], opcode);
414 } else {
415 __le16 *opcode = rp->opcodes;
416
417 for (i = 0; i < num_commands; i++, opcode++)
418 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
419
420 for (i = 0; i < num_events; i++, opcode++)
421 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
422 }
423
424 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
425 rp, rp_size);
426 kfree(rp);
427
428 return err;
429 }
430
read_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)431 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
432 u16 data_len)
433 {
434 struct mgmt_rp_read_index_list *rp;
435 struct hci_dev *d;
436 size_t rp_len;
437 u16 count;
438 int err;
439
440 bt_dev_dbg(hdev, "sock %p", sk);
441
442 read_lock(&hci_dev_list_lock);
443
444 count = 0;
445 list_for_each_entry(d, &hci_dev_list, list) {
446 if (!hci_dev_test_flag(d, HCI_UNCONFIGURED))
447 count++;
448 }
449
450 rp_len = sizeof(*rp) + (2 * count);
451 rp = kmalloc(rp_len, GFP_ATOMIC);
452 if (!rp) {
453 read_unlock(&hci_dev_list_lock);
454 return -ENOMEM;
455 }
456
457 count = 0;
458 list_for_each_entry(d, &hci_dev_list, list) {
459 if (hci_dev_test_flag(d, HCI_SETUP) ||
460 hci_dev_test_flag(d, HCI_CONFIG) ||
461 hci_dev_test_flag(d, HCI_USER_CHANNEL))
462 continue;
463
464 /* Devices marked as raw-only are neither configured
465 * nor unconfigured controllers.
466 */
467 if (hci_test_quirk(d, HCI_QUIRK_RAW_DEVICE))
468 continue;
469
470 if (!hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
471 rp->index[count++] = cpu_to_le16(d->id);
472 bt_dev_dbg(hdev, "Added hci%u", d->id);
473 }
474 }
475
476 rp->num_controllers = cpu_to_le16(count);
477 rp_len = sizeof(*rp) + (2 * count);
478
479 read_unlock(&hci_dev_list_lock);
480
481 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
482 0, rp, rp_len);
483
484 kfree(rp);
485
486 return err;
487 }
488
read_unconf_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)489 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
490 void *data, u16 data_len)
491 {
492 struct mgmt_rp_read_unconf_index_list *rp;
493 struct hci_dev *d;
494 size_t rp_len;
495 u16 count;
496 int err;
497
498 bt_dev_dbg(hdev, "sock %p", sk);
499
500 read_lock(&hci_dev_list_lock);
501
502 count = 0;
503 list_for_each_entry(d, &hci_dev_list, list) {
504 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
505 count++;
506 }
507
508 rp_len = sizeof(*rp) + (2 * count);
509 rp = kmalloc(rp_len, GFP_ATOMIC);
510 if (!rp) {
511 read_unlock(&hci_dev_list_lock);
512 return -ENOMEM;
513 }
514
515 count = 0;
516 list_for_each_entry(d, &hci_dev_list, list) {
517 if (hci_dev_test_flag(d, HCI_SETUP) ||
518 hci_dev_test_flag(d, HCI_CONFIG) ||
519 hci_dev_test_flag(d, HCI_USER_CHANNEL))
520 continue;
521
522 /* Devices marked as raw-only are neither configured
523 * nor unconfigured controllers.
524 */
525 if (hci_test_quirk(d, HCI_QUIRK_RAW_DEVICE))
526 continue;
527
528 if (hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
529 rp->index[count++] = cpu_to_le16(d->id);
530 bt_dev_dbg(hdev, "Added hci%u", d->id);
531 }
532 }
533
534 rp->num_controllers = cpu_to_le16(count);
535 rp_len = sizeof(*rp) + (2 * count);
536
537 read_unlock(&hci_dev_list_lock);
538
539 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
540 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
541
542 kfree(rp);
543
544 return err;
545 }
546
read_ext_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)547 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
548 void *data, u16 data_len)
549 {
550 struct mgmt_rp_read_ext_index_list *rp;
551 struct hci_dev *d;
552 u16 count;
553 int err;
554
555 bt_dev_dbg(hdev, "sock %p", sk);
556
557 read_lock(&hci_dev_list_lock);
558
559 count = 0;
560 list_for_each_entry(d, &hci_dev_list, list)
561 count++;
562
563 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
564 if (!rp) {
565 read_unlock(&hci_dev_list_lock);
566 return -ENOMEM;
567 }
568
569 count = 0;
570 list_for_each_entry(d, &hci_dev_list, list) {
571 if (hci_dev_test_flag(d, HCI_SETUP) ||
572 hci_dev_test_flag(d, HCI_CONFIG) ||
573 hci_dev_test_flag(d, HCI_USER_CHANNEL))
574 continue;
575
576 /* Devices marked as raw-only are neither configured
577 * nor unconfigured controllers.
578 */
579 if (hci_test_quirk(d, HCI_QUIRK_RAW_DEVICE))
580 continue;
581
582 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
583 rp->entry[count].type = 0x01;
584 else
585 rp->entry[count].type = 0x00;
586
587 rp->entry[count].bus = d->bus;
588 rp->entry[count++].index = cpu_to_le16(d->id);
589 bt_dev_dbg(hdev, "Added hci%u", d->id);
590 }
591
592 rp->num_controllers = cpu_to_le16(count);
593
594 read_unlock(&hci_dev_list_lock);
595
596 /* If this command is called at least once, then all the
597 * default index and unconfigured index events are disabled
598 * and from now on only extended index events are used.
599 */
600 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
601 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
602 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
603
604 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
605 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
606 struct_size(rp, entry, count));
607
608 kfree(rp);
609
610 return err;
611 }
612
is_configured(struct hci_dev * hdev)613 static bool is_configured(struct hci_dev *hdev)
614 {
615 if (hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG) &&
616 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
617 return false;
618
619 if ((hci_test_quirk(hdev, HCI_QUIRK_INVALID_BDADDR) ||
620 hci_test_quirk(hdev, HCI_QUIRK_USE_BDADDR_PROPERTY)) &&
621 !bacmp(&hdev->public_addr, BDADDR_ANY))
622 return false;
623
624 return true;
625 }
626
get_missing_options(struct hci_dev * hdev)627 static __le32 get_missing_options(struct hci_dev *hdev)
628 {
629 u32 options = 0;
630
631 if (hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG) &&
632 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
633 options |= MGMT_OPTION_EXTERNAL_CONFIG;
634
635 if ((hci_test_quirk(hdev, HCI_QUIRK_INVALID_BDADDR) ||
636 hci_test_quirk(hdev, HCI_QUIRK_USE_BDADDR_PROPERTY)) &&
637 !bacmp(&hdev->public_addr, BDADDR_ANY))
638 options |= MGMT_OPTION_PUBLIC_ADDRESS;
639
640 return cpu_to_le32(options);
641 }
642
new_options(struct hci_dev * hdev,struct sock * skip)643 static int new_options(struct hci_dev *hdev, struct sock *skip)
644 {
645 __le32 options = get_missing_options(hdev);
646
647 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
648 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
649 }
650
send_options_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)651 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
652 {
653 __le32 options = get_missing_options(hdev);
654
655 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
656 sizeof(options));
657 }
658
read_config_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)659 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
660 void *data, u16 data_len)
661 {
662 struct mgmt_rp_read_config_info rp;
663 u32 options = 0;
664
665 bt_dev_dbg(hdev, "sock %p", sk);
666
667 hci_dev_lock(hdev);
668
669 memset(&rp, 0, sizeof(rp));
670 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
671
672 if (hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG))
673 options |= MGMT_OPTION_EXTERNAL_CONFIG;
674
675 if (hdev->set_bdaddr)
676 options |= MGMT_OPTION_PUBLIC_ADDRESS;
677
678 rp.supported_options = cpu_to_le32(options);
679 rp.missing_options = get_missing_options(hdev);
680
681 hci_dev_unlock(hdev);
682
683 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
684 &rp, sizeof(rp));
685 }
686
get_supported_phys(struct hci_dev * hdev)687 static u32 get_supported_phys(struct hci_dev *hdev)
688 {
689 u32 supported_phys = 0;
690
691 if (lmp_bredr_capable(hdev)) {
692 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
693
694 if (hdev->features[0][0] & LMP_3SLOT)
695 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
696
697 if (hdev->features[0][0] & LMP_5SLOT)
698 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
699
700 if (lmp_edr_2m_capable(hdev)) {
701 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
702
703 if (lmp_edr_3slot_capable(hdev))
704 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
705
706 if (lmp_edr_5slot_capable(hdev))
707 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
708
709 if (lmp_edr_3m_capable(hdev)) {
710 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
711
712 if (lmp_edr_3slot_capable(hdev))
713 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
714
715 if (lmp_edr_5slot_capable(hdev))
716 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
717 }
718 }
719 }
720
721 if (lmp_le_capable(hdev)) {
722 supported_phys |= MGMT_PHY_LE_1M_TX;
723 supported_phys |= MGMT_PHY_LE_1M_RX;
724
725 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
726 supported_phys |= MGMT_PHY_LE_2M_TX;
727 supported_phys |= MGMT_PHY_LE_2M_RX;
728 }
729
730 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
731 supported_phys |= MGMT_PHY_LE_CODED_TX;
732 supported_phys |= MGMT_PHY_LE_CODED_RX;
733 }
734 }
735
736 return supported_phys;
737 }
738
get_selected_phys(struct hci_dev * hdev)739 static u32 get_selected_phys(struct hci_dev *hdev)
740 {
741 u32 selected_phys = 0;
742
743 if (lmp_bredr_capable(hdev)) {
744 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
745
746 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
747 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
748
749 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
750 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
751
752 if (lmp_edr_2m_capable(hdev)) {
753 if (!(hdev->pkt_type & HCI_2DH1))
754 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
755
756 if (lmp_edr_3slot_capable(hdev) &&
757 !(hdev->pkt_type & HCI_2DH3))
758 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
759
760 if (lmp_edr_5slot_capable(hdev) &&
761 !(hdev->pkt_type & HCI_2DH5))
762 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
763
764 if (lmp_edr_3m_capable(hdev)) {
765 if (!(hdev->pkt_type & HCI_3DH1))
766 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
767
768 if (lmp_edr_3slot_capable(hdev) &&
769 !(hdev->pkt_type & HCI_3DH3))
770 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
771
772 if (lmp_edr_5slot_capable(hdev) &&
773 !(hdev->pkt_type & HCI_3DH5))
774 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
775 }
776 }
777 }
778
779 if (lmp_le_capable(hdev)) {
780 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
781 selected_phys |= MGMT_PHY_LE_1M_TX;
782
783 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
784 selected_phys |= MGMT_PHY_LE_1M_RX;
785
786 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
787 selected_phys |= MGMT_PHY_LE_2M_TX;
788
789 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
790 selected_phys |= MGMT_PHY_LE_2M_RX;
791
792 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
793 selected_phys |= MGMT_PHY_LE_CODED_TX;
794
795 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
796 selected_phys |= MGMT_PHY_LE_CODED_RX;
797 }
798
799 return selected_phys;
800 }
801
get_configurable_phys(struct hci_dev * hdev)802 static u32 get_configurable_phys(struct hci_dev *hdev)
803 {
804 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
805 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
806 }
807
get_supported_settings(struct hci_dev * hdev)808 static u32 get_supported_settings(struct hci_dev *hdev)
809 {
810 u32 settings = 0;
811
812 settings |= MGMT_SETTING_POWERED;
813 settings |= MGMT_SETTING_BONDABLE;
814 settings |= MGMT_SETTING_DEBUG_KEYS;
815 settings |= MGMT_SETTING_CONNECTABLE;
816 settings |= MGMT_SETTING_DISCOVERABLE;
817
818 if (lmp_bredr_capable(hdev)) {
819 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
820 settings |= MGMT_SETTING_FAST_CONNECTABLE;
821 settings |= MGMT_SETTING_BREDR;
822 settings |= MGMT_SETTING_LINK_SECURITY;
823
824 if (lmp_ssp_capable(hdev)) {
825 settings |= MGMT_SETTING_SSP;
826 }
827
828 if (lmp_sc_capable(hdev))
829 settings |= MGMT_SETTING_SECURE_CONN;
830
831 if (hci_test_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED))
832 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
833 }
834
835 if (lmp_le_capable(hdev)) {
836 settings |= MGMT_SETTING_LE;
837 settings |= MGMT_SETTING_SECURE_CONN;
838 settings |= MGMT_SETTING_PRIVACY;
839 settings |= MGMT_SETTING_STATIC_ADDRESS;
840 settings |= MGMT_SETTING_ADVERTISING;
841 }
842
843 if (hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG) || hdev->set_bdaddr)
844 settings |= MGMT_SETTING_CONFIGURATION;
845
846 if (cis_central_capable(hdev))
847 settings |= MGMT_SETTING_CIS_CENTRAL;
848
849 if (cis_peripheral_capable(hdev))
850 settings |= MGMT_SETTING_CIS_PERIPHERAL;
851
852 if (ll_privacy_capable(hdev))
853 settings |= MGMT_SETTING_LL_PRIVACY;
854
855 settings |= MGMT_SETTING_PHY_CONFIGURATION;
856
857 return settings;
858 }
859
get_current_settings(struct hci_dev * hdev)860 static u32 get_current_settings(struct hci_dev *hdev)
861 {
862 u32 settings = 0;
863
864 if (hdev_is_powered(hdev))
865 settings |= MGMT_SETTING_POWERED;
866
867 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
868 settings |= MGMT_SETTING_CONNECTABLE;
869
870 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
871 settings |= MGMT_SETTING_FAST_CONNECTABLE;
872
873 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
874 settings |= MGMT_SETTING_DISCOVERABLE;
875
876 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
877 settings |= MGMT_SETTING_BONDABLE;
878
879 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
880 settings |= MGMT_SETTING_BREDR;
881
882 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
883 settings |= MGMT_SETTING_LE;
884
885 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
886 settings |= MGMT_SETTING_LINK_SECURITY;
887
888 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
889 settings |= MGMT_SETTING_SSP;
890
891 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
892 settings |= MGMT_SETTING_ADVERTISING;
893
894 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
895 settings |= MGMT_SETTING_SECURE_CONN;
896
897 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
898 settings |= MGMT_SETTING_DEBUG_KEYS;
899
900 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
901 settings |= MGMT_SETTING_PRIVACY;
902
903 /* The current setting for static address has two purposes. The
904 * first is to indicate if the static address will be used and
905 * the second is to indicate if it is actually set.
906 *
907 * This means if the static address is not configured, this flag
908 * will never be set. If the address is configured, then if the
909 * address is actually used decides if the flag is set or not.
910 *
911 * For single mode LE only controllers and dual-mode controllers
912 * with BR/EDR disabled, the existence of the static address will
913 * be evaluated.
914 */
915 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
916 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
917 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
918 if (bacmp(&hdev->static_addr, BDADDR_ANY))
919 settings |= MGMT_SETTING_STATIC_ADDRESS;
920 }
921
922 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
923 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
924
925 if (cis_central_capable(hdev))
926 settings |= MGMT_SETTING_CIS_CENTRAL;
927
928 if (cis_peripheral_capable(hdev))
929 settings |= MGMT_SETTING_CIS_PERIPHERAL;
930
931 if (bis_capable(hdev))
932 settings |= MGMT_SETTING_ISO_BROADCASTER;
933
934 if (sync_recv_capable(hdev))
935 settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
936
937 if (ll_privacy_capable(hdev))
938 settings |= MGMT_SETTING_LL_PRIVACY;
939
940 return settings;
941 }
942
pending_find(u16 opcode,struct hci_dev * hdev)943 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
944 {
945 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
946 }
947
mgmt_get_adv_discov_flags(struct hci_dev * hdev)948 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
949 {
950 struct mgmt_pending_cmd *cmd;
951
952 /* If there's a pending mgmt command the flags will not yet have
953 * their final values, so check for this first.
954 */
955 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
956 if (cmd) {
957 struct mgmt_mode *cp = cmd->param;
958 if (cp->val == 0x01)
959 return LE_AD_GENERAL;
960 else if (cp->val == 0x02)
961 return LE_AD_LIMITED;
962 } else {
963 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
964 return LE_AD_LIMITED;
965 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
966 return LE_AD_GENERAL;
967 }
968
969 return 0;
970 }
971
mgmt_get_connectable(struct hci_dev * hdev)972 bool mgmt_get_connectable(struct hci_dev *hdev)
973 {
974 struct mgmt_pending_cmd *cmd;
975
976 /* If there's a pending mgmt command the flag will not yet have
977 * it's final value, so check for this first.
978 */
979 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
980 if (cmd) {
981 struct mgmt_mode *cp = cmd->param;
982
983 return cp->val;
984 }
985
986 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
987 }
988
service_cache_sync(struct hci_dev * hdev,void * data)989 static int service_cache_sync(struct hci_dev *hdev, void *data)
990 {
991 hci_update_eir_sync(hdev);
992 hci_update_class_sync(hdev);
993
994 return 0;
995 }
996
service_cache_off(struct work_struct * work)997 static void service_cache_off(struct work_struct *work)
998 {
999 struct hci_dev *hdev = container_of(work, struct hci_dev,
1000 service_cache.work);
1001
1002 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1003 return;
1004
1005 hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1006 }
1007
rpa_expired_sync(struct hci_dev * hdev,void * data)1008 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1009 {
1010 /* The generation of a new RPA and programming it into the
1011 * controller happens in the hci_req_enable_advertising()
1012 * function.
1013 */
1014 if (ext_adv_capable(hdev))
1015 return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1016 else
1017 return hci_enable_advertising_sync(hdev);
1018 }
1019
rpa_expired(struct work_struct * work)1020 static void rpa_expired(struct work_struct *work)
1021 {
1022 struct hci_dev *hdev = container_of(work, struct hci_dev,
1023 rpa_expired.work);
1024
1025 bt_dev_dbg(hdev, "");
1026
1027 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1028
1029 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1030 return;
1031
1032 hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1033 }
1034
1035 static int set_discoverable_sync(struct hci_dev *hdev, void *data);
1036
discov_off(struct work_struct * work)1037 static void discov_off(struct work_struct *work)
1038 {
1039 struct hci_dev *hdev = container_of(work, struct hci_dev,
1040 discov_off.work);
1041
1042 bt_dev_dbg(hdev, "");
1043
1044 hci_dev_lock(hdev);
1045
1046 /* When discoverable timeout triggers, then just make sure
1047 * the limited discoverable flag is cleared. Even in the case
1048 * of a timeout triggered from general discoverable, it is
1049 * safe to unconditionally clear the flag.
1050 */
1051 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1052 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1053 hdev->discov_timeout = 0;
1054
1055 hci_cmd_sync_queue(hdev, set_discoverable_sync, NULL, NULL);
1056
1057 mgmt_new_settings(hdev);
1058
1059 hci_dev_unlock(hdev);
1060 }
1061
1062 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1063
mesh_send_complete(struct hci_dev * hdev,struct mgmt_mesh_tx * mesh_tx,bool silent)1064 static void mesh_send_complete(struct hci_dev *hdev,
1065 struct mgmt_mesh_tx *mesh_tx, bool silent)
1066 {
1067 u8 handle = mesh_tx->handle;
1068
1069 if (!silent)
1070 mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1071 sizeof(handle), NULL);
1072
1073 mgmt_mesh_remove(mesh_tx);
1074 }
1075
mesh_send_done_sync(struct hci_dev * hdev,void * data)1076 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1077 {
1078 struct mgmt_mesh_tx *mesh_tx;
1079
1080 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1081 if (list_empty(&hdev->adv_instances))
1082 hci_disable_advertising_sync(hdev);
1083 mesh_tx = mgmt_mesh_next(hdev, NULL);
1084
1085 if (mesh_tx)
1086 mesh_send_complete(hdev, mesh_tx, false);
1087
1088 return 0;
1089 }
1090
1091 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1092 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
mesh_next(struct hci_dev * hdev,void * data,int err)1093 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1094 {
1095 struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1096
1097 if (!mesh_tx)
1098 return;
1099
1100 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1101 mesh_send_start_complete);
1102
1103 if (err < 0)
1104 mesh_send_complete(hdev, mesh_tx, false);
1105 else
1106 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1107 }
1108
mesh_send_done(struct work_struct * work)1109 static void mesh_send_done(struct work_struct *work)
1110 {
1111 struct hci_dev *hdev = container_of(work, struct hci_dev,
1112 mesh_send_done.work);
1113
1114 if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1115 return;
1116
1117 hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1118 }
1119
mgmt_init_hdev(struct sock * sk,struct hci_dev * hdev)1120 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1121 {
1122 if (hci_dev_test_flag(hdev, HCI_MGMT))
1123 return;
1124
1125 BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1126
1127 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1128 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1129 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1130 INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1131
1132 /* Non-mgmt controlled devices get this bit set
1133 * implicitly so that pairing works for them, however
1134 * for mgmt we require user-space to explicitly enable
1135 * it
1136 */
1137 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1138
1139 hci_dev_set_flag(hdev, HCI_MGMT);
1140 }
1141
read_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1142 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1143 void *data, u16 data_len)
1144 {
1145 struct mgmt_rp_read_info rp;
1146
1147 bt_dev_dbg(hdev, "sock %p", sk);
1148
1149 hci_dev_lock(hdev);
1150
1151 memset(&rp, 0, sizeof(rp));
1152
1153 bacpy(&rp.bdaddr, &hdev->bdaddr);
1154
1155 rp.version = hdev->hci_ver;
1156 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1157
1158 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1159 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1160
1161 memcpy(rp.dev_class, hdev->dev_class, 3);
1162
1163 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1164 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1165
1166 hci_dev_unlock(hdev);
1167
1168 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1169 sizeof(rp));
1170 }
1171
append_eir_data_to_buf(struct hci_dev * hdev,u8 * eir)1172 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1173 {
1174 u16 eir_len = 0;
1175 size_t name_len;
1176
1177 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1178 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1179 hdev->dev_class, 3);
1180
1181 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1182 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1183 hdev->appearance);
1184
1185 name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1186 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1187 hdev->dev_name, name_len);
1188
1189 name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1190 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1191 hdev->short_name, name_len);
1192
1193 return eir_len;
1194 }
1195
read_ext_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1196 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1197 void *data, u16 data_len)
1198 {
1199 char buf[512];
1200 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1201 u16 eir_len;
1202
1203 bt_dev_dbg(hdev, "sock %p", sk);
1204
1205 memset(&buf, 0, sizeof(buf));
1206
1207 hci_dev_lock(hdev);
1208
1209 bacpy(&rp->bdaddr, &hdev->bdaddr);
1210
1211 rp->version = hdev->hci_ver;
1212 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1213
1214 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1215 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1216
1217
1218 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1219 rp->eir_len = cpu_to_le16(eir_len);
1220
1221 hci_dev_unlock(hdev);
1222
1223 /* If this command is called at least once, then the events
1224 * for class of device and local name changes are disabled
1225 * and only the new extended controller information event
1226 * is used.
1227 */
1228 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1229 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1230 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1231
1232 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1233 sizeof(*rp) + eir_len);
1234 }
1235
ext_info_changed(struct hci_dev * hdev,struct sock * skip)1236 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1237 {
1238 char buf[512];
1239 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1240 u16 eir_len;
1241
1242 memset(buf, 0, sizeof(buf));
1243
1244 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1245 ev->eir_len = cpu_to_le16(eir_len);
1246
1247 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1248 sizeof(*ev) + eir_len,
1249 HCI_MGMT_EXT_INFO_EVENTS, skip);
1250 }
1251
send_settings_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)1252 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1253 {
1254 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1255
1256 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1257 sizeof(settings));
1258 }
1259
mgmt_advertising_added(struct sock * sk,struct hci_dev * hdev,u8 instance)1260 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1261 {
1262 struct mgmt_ev_advertising_added ev;
1263
1264 ev.instance = instance;
1265
1266 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1267 }
1268
mgmt_advertising_removed(struct sock * sk,struct hci_dev * hdev,u8 instance)1269 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1270 u8 instance)
1271 {
1272 struct mgmt_ev_advertising_removed ev;
1273
1274 ev.instance = instance;
1275
1276 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1277 }
1278
cancel_adv_timeout(struct hci_dev * hdev)1279 static void cancel_adv_timeout(struct hci_dev *hdev)
1280 {
1281 if (hdev->adv_instance_timeout) {
1282 hdev->adv_instance_timeout = 0;
1283 cancel_delayed_work(&hdev->adv_instance_expire);
1284 }
1285 }
1286
1287 /* This function requires the caller holds hdev->lock */
restart_le_actions(struct hci_dev * hdev)1288 static void restart_le_actions(struct hci_dev *hdev)
1289 {
1290 struct hci_conn_params *p;
1291
1292 list_for_each_entry(p, &hdev->le_conn_params, list) {
1293 /* Needed for AUTO_OFF case where might not "really"
1294 * have been powered off.
1295 */
1296 hci_pend_le_list_del_init(p);
1297
1298 switch (p->auto_connect) {
1299 case HCI_AUTO_CONN_DIRECT:
1300 case HCI_AUTO_CONN_ALWAYS:
1301 hci_pend_le_list_add(p, &hdev->pend_le_conns);
1302 break;
1303 case HCI_AUTO_CONN_REPORT:
1304 hci_pend_le_list_add(p, &hdev->pend_le_reports);
1305 break;
1306 default:
1307 break;
1308 }
1309 }
1310 }
1311
new_settings(struct hci_dev * hdev,struct sock * skip)1312 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1313 {
1314 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1315
1316 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1317 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1318 }
1319
mgmt_set_powered_complete(struct hci_dev * hdev,void * data,int err)1320 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1321 {
1322 struct mgmt_pending_cmd *cmd = data;
1323 struct mgmt_mode *cp;
1324
1325 /* Make sure cmd still outstanding. */
1326 if (err == -ECANCELED ||
1327 cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1328 return;
1329
1330 cp = cmd->param;
1331
1332 bt_dev_dbg(hdev, "err %d", err);
1333
1334 if (!err) {
1335 if (cp->val) {
1336 hci_dev_lock(hdev);
1337 restart_le_actions(hdev);
1338 hci_update_passive_scan(hdev);
1339 hci_dev_unlock(hdev);
1340 }
1341
1342 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1343
1344 /* Only call new_setting for power on as power off is deferred
1345 * to hdev->power_off work which does call hci_dev_do_close.
1346 */
1347 if (cp->val)
1348 new_settings(hdev, cmd->sk);
1349 } else {
1350 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1351 mgmt_status(err));
1352 }
1353
1354 mgmt_pending_remove(cmd);
1355 }
1356
set_powered_sync(struct hci_dev * hdev,void * data)1357 static int set_powered_sync(struct hci_dev *hdev, void *data)
1358 {
1359 struct mgmt_pending_cmd *cmd = data;
1360 struct mgmt_mode *cp;
1361
1362 /* Make sure cmd still outstanding. */
1363 if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1364 return -ECANCELED;
1365
1366 cp = cmd->param;
1367
1368 BT_DBG("%s", hdev->name);
1369
1370 return hci_set_powered_sync(hdev, cp->val);
1371 }
1372
set_powered(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1373 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1374 u16 len)
1375 {
1376 struct mgmt_mode *cp = data;
1377 struct mgmt_pending_cmd *cmd;
1378 int err;
1379
1380 bt_dev_dbg(hdev, "sock %p", sk);
1381
1382 if (cp->val != 0x00 && cp->val != 0x01)
1383 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1384 MGMT_STATUS_INVALID_PARAMS);
1385
1386 hci_dev_lock(hdev);
1387
1388 if (!cp->val) {
1389 if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN)) {
1390 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1391 MGMT_STATUS_BUSY);
1392 goto failed;
1393 }
1394 }
1395
1396 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1397 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1398 MGMT_STATUS_BUSY);
1399 goto failed;
1400 }
1401
1402 if (!!cp->val == hdev_is_powered(hdev)) {
1403 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1404 goto failed;
1405 }
1406
1407 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1408 if (!cmd) {
1409 err = -ENOMEM;
1410 goto failed;
1411 }
1412
1413 /* Cancel potentially blocking sync operation before power off */
1414 if (cp->val == 0x00) {
1415 hci_cmd_sync_cancel_sync(hdev, -EHOSTDOWN);
1416 err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1417 mgmt_set_powered_complete);
1418 } else {
1419 /* Use hci_cmd_sync_submit since hdev might not be running */
1420 err = hci_cmd_sync_submit(hdev, set_powered_sync, cmd,
1421 mgmt_set_powered_complete);
1422 }
1423
1424 if (err < 0)
1425 mgmt_pending_remove(cmd);
1426
1427 failed:
1428 hci_dev_unlock(hdev);
1429 return err;
1430 }
1431
mgmt_new_settings(struct hci_dev * hdev)1432 int mgmt_new_settings(struct hci_dev *hdev)
1433 {
1434 return new_settings(hdev, NULL);
1435 }
1436
1437 struct cmd_lookup {
1438 struct sock *sk;
1439 struct hci_dev *hdev;
1440 u8 mgmt_status;
1441 };
1442
settings_rsp(struct mgmt_pending_cmd * cmd,void * data)1443 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1444 {
1445 struct cmd_lookup *match = data;
1446
1447 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1448
1449 if (match->sk == NULL) {
1450 match->sk = cmd->sk;
1451 sock_hold(match->sk);
1452 }
1453 }
1454
cmd_status_rsp(struct mgmt_pending_cmd * cmd,void * data)1455 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1456 {
1457 u8 *status = data;
1458
1459 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, *status);
1460 }
1461
cmd_complete_rsp(struct mgmt_pending_cmd * cmd,void * data)1462 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1463 {
1464 struct cmd_lookup *match = data;
1465
1466 /* dequeue cmd_sync entries using cmd as data as that is about to be
1467 * removed/freed.
1468 */
1469 hci_cmd_sync_dequeue(match->hdev, NULL, cmd, NULL);
1470
1471 if (cmd->cmd_complete) {
1472 cmd->cmd_complete(cmd, match->mgmt_status);
1473 return;
1474 }
1475
1476 cmd_status_rsp(cmd, data);
1477 }
1478
generic_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1479 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1480 {
1481 return mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, status,
1482 cmd->param, cmd->param_len);
1483 }
1484
addr_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1485 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1486 {
1487 return mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, status,
1488 cmd->param, sizeof(struct mgmt_addr_info));
1489 }
1490
mgmt_bredr_support(struct hci_dev * hdev)1491 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1492 {
1493 if (!lmp_bredr_capable(hdev))
1494 return MGMT_STATUS_NOT_SUPPORTED;
1495 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1496 return MGMT_STATUS_REJECTED;
1497 else
1498 return MGMT_STATUS_SUCCESS;
1499 }
1500
mgmt_le_support(struct hci_dev * hdev)1501 static u8 mgmt_le_support(struct hci_dev *hdev)
1502 {
1503 if (!lmp_le_capable(hdev))
1504 return MGMT_STATUS_NOT_SUPPORTED;
1505 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1506 return MGMT_STATUS_REJECTED;
1507 else
1508 return MGMT_STATUS_SUCCESS;
1509 }
1510
mgmt_set_discoverable_complete(struct hci_dev * hdev,void * data,int err)1511 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1512 int err)
1513 {
1514 struct mgmt_pending_cmd *cmd = data;
1515
1516 bt_dev_dbg(hdev, "err %d", err);
1517
1518 /* Make sure cmd still outstanding. */
1519 if (err == -ECANCELED ||
1520 cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1521 return;
1522
1523 hci_dev_lock(hdev);
1524
1525 if (err) {
1526 u8 mgmt_err = mgmt_status(err);
1527 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
1528 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1529 goto done;
1530 }
1531
1532 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1533 hdev->discov_timeout > 0) {
1534 int to = secs_to_jiffies(hdev->discov_timeout);
1535 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1536 }
1537
1538 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1539 new_settings(hdev, cmd->sk);
1540
1541 done:
1542 mgmt_pending_remove(cmd);
1543 hci_dev_unlock(hdev);
1544 }
1545
set_discoverable_sync(struct hci_dev * hdev,void * data)1546 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1547 {
1548 BT_DBG("%s", hdev->name);
1549
1550 return hci_update_discoverable_sync(hdev);
1551 }
1552
set_discoverable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1553 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1554 u16 len)
1555 {
1556 struct mgmt_cp_set_discoverable *cp = data;
1557 struct mgmt_pending_cmd *cmd;
1558 u16 timeout;
1559 int err;
1560
1561 bt_dev_dbg(hdev, "sock %p", sk);
1562
1563 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1564 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1565 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1566 MGMT_STATUS_REJECTED);
1567
1568 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1569 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1570 MGMT_STATUS_INVALID_PARAMS);
1571
1572 timeout = __le16_to_cpu(cp->timeout);
1573
1574 /* Disabling discoverable requires that no timeout is set,
1575 * and enabling limited discoverable requires a timeout.
1576 */
1577 if ((cp->val == 0x00 && timeout > 0) ||
1578 (cp->val == 0x02 && timeout == 0))
1579 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1580 MGMT_STATUS_INVALID_PARAMS);
1581
1582 hci_dev_lock(hdev);
1583
1584 if (!hdev_is_powered(hdev) && timeout > 0) {
1585 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1586 MGMT_STATUS_NOT_POWERED);
1587 goto failed;
1588 }
1589
1590 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1591 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1592 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1593 MGMT_STATUS_BUSY);
1594 goto failed;
1595 }
1596
1597 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1598 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1599 MGMT_STATUS_REJECTED);
1600 goto failed;
1601 }
1602
1603 if (hdev->advertising_paused) {
1604 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1605 MGMT_STATUS_BUSY);
1606 goto failed;
1607 }
1608
1609 if (!hdev_is_powered(hdev)) {
1610 bool changed = false;
1611
1612 /* Setting limited discoverable when powered off is
1613 * not a valid operation since it requires a timeout
1614 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1615 */
1616 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1617 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1618 changed = true;
1619 }
1620
1621 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1622 if (err < 0)
1623 goto failed;
1624
1625 if (changed)
1626 err = new_settings(hdev, sk);
1627
1628 goto failed;
1629 }
1630
1631 /* If the current mode is the same, then just update the timeout
1632 * value with the new value. And if only the timeout gets updated,
1633 * then no need for any HCI transactions.
1634 */
1635 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1636 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1637 HCI_LIMITED_DISCOVERABLE)) {
1638 cancel_delayed_work(&hdev->discov_off);
1639 hdev->discov_timeout = timeout;
1640
1641 if (cp->val && hdev->discov_timeout > 0) {
1642 int to = secs_to_jiffies(hdev->discov_timeout);
1643 queue_delayed_work(hdev->req_workqueue,
1644 &hdev->discov_off, to);
1645 }
1646
1647 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1648 goto failed;
1649 }
1650
1651 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1652 if (!cmd) {
1653 err = -ENOMEM;
1654 goto failed;
1655 }
1656
1657 /* Cancel any potential discoverable timeout that might be
1658 * still active and store new timeout value. The arming of
1659 * the timeout happens in the complete handler.
1660 */
1661 cancel_delayed_work(&hdev->discov_off);
1662 hdev->discov_timeout = timeout;
1663
1664 if (cp->val)
1665 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1666 else
1667 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1668
1669 /* Limited discoverable mode */
1670 if (cp->val == 0x02)
1671 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1672 else
1673 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1674
1675 err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1676 mgmt_set_discoverable_complete);
1677
1678 if (err < 0)
1679 mgmt_pending_remove(cmd);
1680
1681 failed:
1682 hci_dev_unlock(hdev);
1683 return err;
1684 }
1685
mgmt_set_connectable_complete(struct hci_dev * hdev,void * data,int err)1686 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1687 int err)
1688 {
1689 struct mgmt_pending_cmd *cmd = data;
1690
1691 bt_dev_dbg(hdev, "err %d", err);
1692
1693 /* Make sure cmd still outstanding. */
1694 if (err == -ECANCELED ||
1695 cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1696 return;
1697
1698 hci_dev_lock(hdev);
1699
1700 if (err) {
1701 u8 mgmt_err = mgmt_status(err);
1702 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
1703 goto done;
1704 }
1705
1706 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1707 new_settings(hdev, cmd->sk);
1708
1709 done:
1710 mgmt_pending_remove(cmd);
1711
1712 hci_dev_unlock(hdev);
1713 }
1714
set_connectable_update_settings(struct hci_dev * hdev,struct sock * sk,u8 val)1715 static int set_connectable_update_settings(struct hci_dev *hdev,
1716 struct sock *sk, u8 val)
1717 {
1718 bool changed = false;
1719 int err;
1720
1721 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1722 changed = true;
1723
1724 if (val) {
1725 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1726 } else {
1727 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1728 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1729 }
1730
1731 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1732 if (err < 0)
1733 return err;
1734
1735 if (changed) {
1736 hci_update_scan(hdev);
1737 hci_update_passive_scan(hdev);
1738 return new_settings(hdev, sk);
1739 }
1740
1741 return 0;
1742 }
1743
set_connectable_sync(struct hci_dev * hdev,void * data)1744 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1745 {
1746 BT_DBG("%s", hdev->name);
1747
1748 return hci_update_connectable_sync(hdev);
1749 }
1750
set_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1751 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1752 u16 len)
1753 {
1754 struct mgmt_mode *cp = data;
1755 struct mgmt_pending_cmd *cmd;
1756 int err;
1757
1758 bt_dev_dbg(hdev, "sock %p", sk);
1759
1760 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1761 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1762 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1763 MGMT_STATUS_REJECTED);
1764
1765 if (cp->val != 0x00 && cp->val != 0x01)
1766 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1767 MGMT_STATUS_INVALID_PARAMS);
1768
1769 hci_dev_lock(hdev);
1770
1771 if (!hdev_is_powered(hdev)) {
1772 err = set_connectable_update_settings(hdev, sk, cp->val);
1773 goto failed;
1774 }
1775
1776 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1777 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1778 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1779 MGMT_STATUS_BUSY);
1780 goto failed;
1781 }
1782
1783 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1784 if (!cmd) {
1785 err = -ENOMEM;
1786 goto failed;
1787 }
1788
1789 if (cp->val) {
1790 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1791 } else {
1792 if (hdev->discov_timeout > 0)
1793 cancel_delayed_work(&hdev->discov_off);
1794
1795 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1796 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1797 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1798 }
1799
1800 err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1801 mgmt_set_connectable_complete);
1802
1803 if (err < 0)
1804 mgmt_pending_remove(cmd);
1805
1806 failed:
1807 hci_dev_unlock(hdev);
1808 return err;
1809 }
1810
set_bondable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1811 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1812 u16 len)
1813 {
1814 struct mgmt_mode *cp = data;
1815 bool changed;
1816 int err;
1817
1818 bt_dev_dbg(hdev, "sock %p", sk);
1819
1820 if (cp->val != 0x00 && cp->val != 0x01)
1821 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1822 MGMT_STATUS_INVALID_PARAMS);
1823
1824 hci_dev_lock(hdev);
1825
1826 if (cp->val)
1827 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1828 else
1829 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1830
1831 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1832 if (err < 0)
1833 goto unlock;
1834
1835 if (changed) {
1836 /* In limited privacy mode the change of bondable mode
1837 * may affect the local advertising address.
1838 */
1839 hci_update_discoverable(hdev);
1840
1841 err = new_settings(hdev, sk);
1842 }
1843
1844 unlock:
1845 hci_dev_unlock(hdev);
1846 return err;
1847 }
1848
set_link_security(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1849 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1850 u16 len)
1851 {
1852 struct mgmt_mode *cp = data;
1853 struct mgmt_pending_cmd *cmd;
1854 u8 val, status;
1855 int err;
1856
1857 bt_dev_dbg(hdev, "sock %p", sk);
1858
1859 status = mgmt_bredr_support(hdev);
1860 if (status)
1861 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1862 status);
1863
1864 if (cp->val != 0x00 && cp->val != 0x01)
1865 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1866 MGMT_STATUS_INVALID_PARAMS);
1867
1868 hci_dev_lock(hdev);
1869
1870 if (!hdev_is_powered(hdev)) {
1871 bool changed = false;
1872
1873 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1874 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1875 changed = true;
1876 }
1877
1878 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1879 if (err < 0)
1880 goto failed;
1881
1882 if (changed)
1883 err = new_settings(hdev, sk);
1884
1885 goto failed;
1886 }
1887
1888 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1889 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1890 MGMT_STATUS_BUSY);
1891 goto failed;
1892 }
1893
1894 val = !!cp->val;
1895
1896 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1897 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1898 goto failed;
1899 }
1900
1901 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1902 if (!cmd) {
1903 err = -ENOMEM;
1904 goto failed;
1905 }
1906
1907 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1908 if (err < 0) {
1909 mgmt_pending_remove(cmd);
1910 goto failed;
1911 }
1912
1913 failed:
1914 hci_dev_unlock(hdev);
1915 return err;
1916 }
1917
set_ssp_complete(struct hci_dev * hdev,void * data,int err)1918 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1919 {
1920 struct cmd_lookup match = { NULL, hdev };
1921 struct mgmt_pending_cmd *cmd = data;
1922 struct mgmt_mode *cp = cmd->param;
1923 u8 enable = cp->val;
1924 bool changed;
1925
1926 /* Make sure cmd still outstanding. */
1927 if (err == -ECANCELED || cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1928 return;
1929
1930 if (err) {
1931 u8 mgmt_err = mgmt_status(err);
1932
1933 if (enable && hci_dev_test_and_clear_flag(hdev,
1934 HCI_SSP_ENABLED)) {
1935 new_settings(hdev, NULL);
1936 }
1937
1938 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, true,
1939 cmd_status_rsp, &mgmt_err);
1940 return;
1941 }
1942
1943 if (enable) {
1944 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1945 } else {
1946 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1947 }
1948
1949 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, true, settings_rsp, &match);
1950
1951 if (changed)
1952 new_settings(hdev, match.sk);
1953
1954 if (match.sk)
1955 sock_put(match.sk);
1956
1957 hci_update_eir_sync(hdev);
1958 }
1959
set_ssp_sync(struct hci_dev * hdev,void * data)1960 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1961 {
1962 struct mgmt_pending_cmd *cmd = data;
1963 struct mgmt_mode *cp = cmd->param;
1964 bool changed = false;
1965 int err;
1966
1967 if (cp->val)
1968 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1969
1970 err = hci_write_ssp_mode_sync(hdev, cp->val);
1971
1972 if (!err && changed)
1973 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1974
1975 return err;
1976 }
1977
set_ssp(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1978 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1979 {
1980 struct mgmt_mode *cp = data;
1981 struct mgmt_pending_cmd *cmd;
1982 u8 status;
1983 int err;
1984
1985 bt_dev_dbg(hdev, "sock %p", sk);
1986
1987 status = mgmt_bredr_support(hdev);
1988 if (status)
1989 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1990
1991 if (!lmp_ssp_capable(hdev))
1992 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1993 MGMT_STATUS_NOT_SUPPORTED);
1994
1995 if (cp->val != 0x00 && cp->val != 0x01)
1996 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1997 MGMT_STATUS_INVALID_PARAMS);
1998
1999 hci_dev_lock(hdev);
2000
2001 if (!hdev_is_powered(hdev)) {
2002 bool changed;
2003
2004 if (cp->val) {
2005 changed = !hci_dev_test_and_set_flag(hdev,
2006 HCI_SSP_ENABLED);
2007 } else {
2008 changed = hci_dev_test_and_clear_flag(hdev,
2009 HCI_SSP_ENABLED);
2010 }
2011
2012 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2013 if (err < 0)
2014 goto failed;
2015
2016 if (changed)
2017 err = new_settings(hdev, sk);
2018
2019 goto failed;
2020 }
2021
2022 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2023 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2024 MGMT_STATUS_BUSY);
2025 goto failed;
2026 }
2027
2028 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2029 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2030 goto failed;
2031 }
2032
2033 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2034 if (!cmd)
2035 err = -ENOMEM;
2036 else
2037 err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2038 set_ssp_complete);
2039
2040 if (err < 0) {
2041 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2042 MGMT_STATUS_FAILED);
2043
2044 if (cmd)
2045 mgmt_pending_remove(cmd);
2046 }
2047
2048 failed:
2049 hci_dev_unlock(hdev);
2050 return err;
2051 }
2052
set_hs(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2053 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2054 {
2055 bt_dev_dbg(hdev, "sock %p", sk);
2056
2057 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2058 MGMT_STATUS_NOT_SUPPORTED);
2059 }
2060
set_le_complete(struct hci_dev * hdev,void * data,int err)2061 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2062 {
2063 struct cmd_lookup match = { NULL, hdev };
2064 u8 status = mgmt_status(err);
2065
2066 bt_dev_dbg(hdev, "err %d", err);
2067
2068 if (status) {
2069 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, true, cmd_status_rsp,
2070 &status);
2071 return;
2072 }
2073
2074 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, true, settings_rsp, &match);
2075
2076 new_settings(hdev, match.sk);
2077
2078 if (match.sk)
2079 sock_put(match.sk);
2080 }
2081
set_le_sync(struct hci_dev * hdev,void * data)2082 static int set_le_sync(struct hci_dev *hdev, void *data)
2083 {
2084 struct mgmt_pending_cmd *cmd = data;
2085 struct mgmt_mode *cp = cmd->param;
2086 u8 val = !!cp->val;
2087 int err;
2088
2089 if (!val) {
2090 hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2091
2092 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2093 hci_disable_advertising_sync(hdev);
2094
2095 if (ext_adv_capable(hdev))
2096 hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2097 } else {
2098 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2099 }
2100
2101 err = hci_write_le_host_supported_sync(hdev, val, 0);
2102
2103 /* Make sure the controller has a good default for
2104 * advertising data. Restrict the update to when LE
2105 * has actually been enabled. During power on, the
2106 * update in powered_update_hci will take care of it.
2107 */
2108 if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2109 if (ext_adv_capable(hdev)) {
2110 int status;
2111
2112 status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2113 if (!status)
2114 hci_update_scan_rsp_data_sync(hdev, 0x00);
2115 } else {
2116 hci_update_adv_data_sync(hdev, 0x00);
2117 hci_update_scan_rsp_data_sync(hdev, 0x00);
2118 }
2119
2120 hci_update_passive_scan(hdev);
2121 }
2122
2123 return err;
2124 }
2125
set_mesh_complete(struct hci_dev * hdev,void * data,int err)2126 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2127 {
2128 struct mgmt_pending_cmd *cmd = data;
2129 u8 status = mgmt_status(err);
2130 struct sock *sk = cmd->sk;
2131
2132 if (status) {
2133 mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev, true,
2134 cmd_status_rsp, &status);
2135 return;
2136 }
2137
2138 mgmt_pending_remove(cmd);
2139 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2140 }
2141
set_mesh_sync(struct hci_dev * hdev,void * data)2142 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2143 {
2144 struct mgmt_pending_cmd *cmd = data;
2145 struct mgmt_cp_set_mesh *cp = cmd->param;
2146 size_t len = cmd->param_len;
2147
2148 memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2149
2150 if (cp->enable)
2151 hci_dev_set_flag(hdev, HCI_MESH);
2152 else
2153 hci_dev_clear_flag(hdev, HCI_MESH);
2154
2155 hdev->le_scan_interval = __le16_to_cpu(cp->period);
2156 hdev->le_scan_window = __le16_to_cpu(cp->window);
2157
2158 len -= sizeof(*cp);
2159
2160 /* If filters don't fit, forward all adv pkts */
2161 if (len <= sizeof(hdev->mesh_ad_types))
2162 memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2163
2164 hci_update_passive_scan_sync(hdev);
2165 return 0;
2166 }
2167
set_mesh(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2168 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2169 {
2170 struct mgmt_cp_set_mesh *cp = data;
2171 struct mgmt_pending_cmd *cmd;
2172 __u16 period, window;
2173 int err = 0;
2174
2175 bt_dev_dbg(hdev, "sock %p", sk);
2176
2177 if (!lmp_le_capable(hdev) ||
2178 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2179 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2180 MGMT_STATUS_NOT_SUPPORTED);
2181
2182 if (cp->enable != 0x00 && cp->enable != 0x01)
2183 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2184 MGMT_STATUS_INVALID_PARAMS);
2185
2186 /* Keep allowed ranges in sync with set_scan_params() */
2187 period = __le16_to_cpu(cp->period);
2188
2189 if (period < 0x0004 || period > 0x4000)
2190 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2191 MGMT_STATUS_INVALID_PARAMS);
2192
2193 window = __le16_to_cpu(cp->window);
2194
2195 if (window < 0x0004 || window > 0x4000)
2196 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2197 MGMT_STATUS_INVALID_PARAMS);
2198
2199 if (window > period)
2200 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2201 MGMT_STATUS_INVALID_PARAMS);
2202
2203 hci_dev_lock(hdev);
2204
2205 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2206 if (!cmd)
2207 err = -ENOMEM;
2208 else
2209 err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2210 set_mesh_complete);
2211
2212 if (err < 0) {
2213 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2214 MGMT_STATUS_FAILED);
2215
2216 if (cmd)
2217 mgmt_pending_remove(cmd);
2218 }
2219
2220 hci_dev_unlock(hdev);
2221 return err;
2222 }
2223
mesh_send_start_complete(struct hci_dev * hdev,void * data,int err)2224 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2225 {
2226 struct mgmt_mesh_tx *mesh_tx = data;
2227 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2228 unsigned long mesh_send_interval;
2229 u8 mgmt_err = mgmt_status(err);
2230
2231 /* Report any errors here, but don't report completion */
2232
2233 if (mgmt_err) {
2234 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2235 /* Send Complete Error Code for handle */
2236 mesh_send_complete(hdev, mesh_tx, false);
2237 return;
2238 }
2239
2240 mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2241 queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2242 mesh_send_interval);
2243 }
2244
mesh_send_sync(struct hci_dev * hdev,void * data)2245 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2246 {
2247 struct mgmt_mesh_tx *mesh_tx = data;
2248 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2249 struct adv_info *adv, *next_instance;
2250 u8 instance = hdev->le_num_of_adv_sets + 1;
2251 u16 timeout, duration;
2252 int err = 0;
2253
2254 if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2255 return MGMT_STATUS_BUSY;
2256
2257 timeout = 1000;
2258 duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2259 adv = hci_add_adv_instance(hdev, instance, 0,
2260 send->adv_data_len, send->adv_data,
2261 0, NULL,
2262 timeout, duration,
2263 HCI_ADV_TX_POWER_NO_PREFERENCE,
2264 hdev->le_adv_min_interval,
2265 hdev->le_adv_max_interval,
2266 mesh_tx->handle);
2267
2268 if (!IS_ERR(adv))
2269 mesh_tx->instance = instance;
2270 else
2271 err = PTR_ERR(adv);
2272
2273 if (hdev->cur_adv_instance == instance) {
2274 /* If the currently advertised instance is being changed then
2275 * cancel the current advertising and schedule the next
2276 * instance. If there is only one instance then the overridden
2277 * advertising data will be visible right away.
2278 */
2279 cancel_adv_timeout(hdev);
2280
2281 next_instance = hci_get_next_instance(hdev, instance);
2282 if (next_instance)
2283 instance = next_instance->instance;
2284 else
2285 instance = 0;
2286 } else if (hdev->adv_instance_timeout) {
2287 /* Immediately advertise the new instance if no other, or
2288 * let it go naturally from queue if ADV is already happening
2289 */
2290 instance = 0;
2291 }
2292
2293 if (instance)
2294 return hci_schedule_adv_instance_sync(hdev, instance, true);
2295
2296 return err;
2297 }
2298
send_count(struct mgmt_mesh_tx * mesh_tx,void * data)2299 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2300 {
2301 struct mgmt_rp_mesh_read_features *rp = data;
2302
2303 if (rp->used_handles >= rp->max_handles)
2304 return;
2305
2306 rp->handles[rp->used_handles++] = mesh_tx->handle;
2307 }
2308
mesh_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2309 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2310 void *data, u16 len)
2311 {
2312 struct mgmt_rp_mesh_read_features rp;
2313
2314 if (!lmp_le_capable(hdev) ||
2315 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2316 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2317 MGMT_STATUS_NOT_SUPPORTED);
2318
2319 memset(&rp, 0, sizeof(rp));
2320 rp.index = cpu_to_le16(hdev->id);
2321 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2322 rp.max_handles = MESH_HANDLES_MAX;
2323
2324 hci_dev_lock(hdev);
2325
2326 if (rp.max_handles)
2327 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2328
2329 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2330 rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2331
2332 hci_dev_unlock(hdev);
2333 return 0;
2334 }
2335
send_cancel(struct hci_dev * hdev,void * data)2336 static int send_cancel(struct hci_dev *hdev, void *data)
2337 {
2338 struct mgmt_pending_cmd *cmd = data;
2339 struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2340 struct mgmt_mesh_tx *mesh_tx;
2341
2342 if (!cancel->handle) {
2343 do {
2344 mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2345
2346 if (mesh_tx)
2347 mesh_send_complete(hdev, mesh_tx, false);
2348 } while (mesh_tx);
2349 } else {
2350 mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2351
2352 if (mesh_tx && mesh_tx->sk == cmd->sk)
2353 mesh_send_complete(hdev, mesh_tx, false);
2354 }
2355
2356 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2357 0, NULL, 0);
2358 mgmt_pending_free(cmd);
2359
2360 return 0;
2361 }
2362
mesh_send_cancel(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2363 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2364 void *data, u16 len)
2365 {
2366 struct mgmt_pending_cmd *cmd;
2367 int err;
2368
2369 if (!lmp_le_capable(hdev) ||
2370 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2371 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2372 MGMT_STATUS_NOT_SUPPORTED);
2373
2374 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2375 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2376 MGMT_STATUS_REJECTED);
2377
2378 hci_dev_lock(hdev);
2379 cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2380 if (!cmd)
2381 err = -ENOMEM;
2382 else
2383 err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2384
2385 if (err < 0) {
2386 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2387 MGMT_STATUS_FAILED);
2388
2389 if (cmd)
2390 mgmt_pending_free(cmd);
2391 }
2392
2393 hci_dev_unlock(hdev);
2394 return err;
2395 }
2396
mesh_send(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2397 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2398 {
2399 struct mgmt_mesh_tx *mesh_tx;
2400 struct mgmt_cp_mesh_send *send = data;
2401 struct mgmt_rp_mesh_read_features rp;
2402 bool sending;
2403 int err = 0;
2404
2405 if (!lmp_le_capable(hdev) ||
2406 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2407 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2408 MGMT_STATUS_NOT_SUPPORTED);
2409 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2410 len <= MGMT_MESH_SEND_SIZE ||
2411 len > (MGMT_MESH_SEND_SIZE + 31))
2412 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2413 MGMT_STATUS_REJECTED);
2414
2415 hci_dev_lock(hdev);
2416
2417 memset(&rp, 0, sizeof(rp));
2418 rp.max_handles = MESH_HANDLES_MAX;
2419
2420 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2421
2422 if (rp.max_handles <= rp.used_handles) {
2423 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2424 MGMT_STATUS_BUSY);
2425 goto done;
2426 }
2427
2428 sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2429 mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2430
2431 if (!mesh_tx)
2432 err = -ENOMEM;
2433 else if (!sending)
2434 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2435 mesh_send_start_complete);
2436
2437 if (err < 0) {
2438 bt_dev_err(hdev, "Send Mesh Failed %d", err);
2439 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2440 MGMT_STATUS_FAILED);
2441
2442 if (mesh_tx) {
2443 if (sending)
2444 mgmt_mesh_remove(mesh_tx);
2445 }
2446 } else {
2447 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2448
2449 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2450 &mesh_tx->handle, 1);
2451 }
2452
2453 done:
2454 hci_dev_unlock(hdev);
2455 return err;
2456 }
2457
set_le(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2458 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2459 {
2460 struct mgmt_mode *cp = data;
2461 struct mgmt_pending_cmd *cmd;
2462 int err;
2463 u8 val, enabled;
2464
2465 bt_dev_dbg(hdev, "sock %p", sk);
2466
2467 if (!lmp_le_capable(hdev))
2468 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2469 MGMT_STATUS_NOT_SUPPORTED);
2470
2471 if (cp->val != 0x00 && cp->val != 0x01)
2472 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2473 MGMT_STATUS_INVALID_PARAMS);
2474
2475 /* Bluetooth single mode LE only controllers or dual-mode
2476 * controllers configured as LE only devices, do not allow
2477 * switching LE off. These have either LE enabled explicitly
2478 * or BR/EDR has been previously switched off.
2479 *
2480 * When trying to enable an already enabled LE, then gracefully
2481 * send a positive response. Trying to disable it however will
2482 * result into rejection.
2483 */
2484 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2485 if (cp->val == 0x01)
2486 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2487
2488 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2489 MGMT_STATUS_REJECTED);
2490 }
2491
2492 hci_dev_lock(hdev);
2493
2494 val = !!cp->val;
2495 enabled = lmp_host_le_capable(hdev);
2496
2497 if (!hdev_is_powered(hdev) || val == enabled) {
2498 bool changed = false;
2499
2500 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2501 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2502 changed = true;
2503 }
2504
2505 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2506 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2507 changed = true;
2508 }
2509
2510 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2511 if (err < 0)
2512 goto unlock;
2513
2514 if (changed)
2515 err = new_settings(hdev, sk);
2516
2517 goto unlock;
2518 }
2519
2520 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2521 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2522 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2523 MGMT_STATUS_BUSY);
2524 goto unlock;
2525 }
2526
2527 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2528 if (!cmd)
2529 err = -ENOMEM;
2530 else
2531 err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2532 set_le_complete);
2533
2534 if (err < 0) {
2535 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2536 MGMT_STATUS_FAILED);
2537
2538 if (cmd)
2539 mgmt_pending_remove(cmd);
2540 }
2541
2542 unlock:
2543 hci_dev_unlock(hdev);
2544 return err;
2545 }
2546
send_hci_cmd_sync(struct hci_dev * hdev,void * data)2547 static int send_hci_cmd_sync(struct hci_dev *hdev, void *data)
2548 {
2549 struct mgmt_pending_cmd *cmd = data;
2550 struct mgmt_cp_hci_cmd_sync *cp = cmd->param;
2551 struct sk_buff *skb;
2552
2553 skb = __hci_cmd_sync_ev(hdev, le16_to_cpu(cp->opcode),
2554 le16_to_cpu(cp->params_len), cp->params,
2555 cp->event, cp->timeout ?
2556 secs_to_jiffies(cp->timeout) :
2557 HCI_CMD_TIMEOUT);
2558 if (IS_ERR(skb)) {
2559 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
2560 mgmt_status(PTR_ERR(skb)));
2561 goto done;
2562 }
2563
2564 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_HCI_CMD_SYNC, 0,
2565 skb->data, skb->len);
2566
2567 kfree_skb(skb);
2568
2569 done:
2570 mgmt_pending_free(cmd);
2571
2572 return 0;
2573 }
2574
mgmt_hci_cmd_sync(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2575 static int mgmt_hci_cmd_sync(struct sock *sk, struct hci_dev *hdev,
2576 void *data, u16 len)
2577 {
2578 struct mgmt_cp_hci_cmd_sync *cp = data;
2579 struct mgmt_pending_cmd *cmd;
2580 int err;
2581
2582 if (len != (offsetof(struct mgmt_cp_hci_cmd_sync, params) +
2583 le16_to_cpu(cp->params_len)))
2584 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
2585 MGMT_STATUS_INVALID_PARAMS);
2586
2587 hci_dev_lock(hdev);
2588 cmd = mgmt_pending_new(sk, MGMT_OP_HCI_CMD_SYNC, hdev, data, len);
2589 if (!cmd)
2590 err = -ENOMEM;
2591 else
2592 err = hci_cmd_sync_queue(hdev, send_hci_cmd_sync, cmd, NULL);
2593
2594 if (err < 0) {
2595 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
2596 MGMT_STATUS_FAILED);
2597
2598 if (cmd)
2599 mgmt_pending_free(cmd);
2600 }
2601
2602 hci_dev_unlock(hdev);
2603 return err;
2604 }
2605
2606 /* This is a helper function to test for pending mgmt commands that can
2607 * cause CoD or EIR HCI commands. We can only allow one such pending
2608 * mgmt command at a time since otherwise we cannot easily track what
2609 * the current values are, will be, and based on that calculate if a new
2610 * HCI command needs to be sent and if yes with what value.
2611 */
pending_eir_or_class(struct hci_dev * hdev)2612 static bool pending_eir_or_class(struct hci_dev *hdev)
2613 {
2614 struct mgmt_pending_cmd *cmd;
2615
2616 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2617 switch (cmd->opcode) {
2618 case MGMT_OP_ADD_UUID:
2619 case MGMT_OP_REMOVE_UUID:
2620 case MGMT_OP_SET_DEV_CLASS:
2621 case MGMT_OP_SET_POWERED:
2622 return true;
2623 }
2624 }
2625
2626 return false;
2627 }
2628
2629 static const u8 bluetooth_base_uuid[] = {
2630 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2631 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2632 };
2633
get_uuid_size(const u8 * uuid)2634 static u8 get_uuid_size(const u8 *uuid)
2635 {
2636 u32 val;
2637
2638 if (memcmp(uuid, bluetooth_base_uuid, 12))
2639 return 128;
2640
2641 val = get_unaligned_le32(&uuid[12]);
2642 if (val > 0xffff)
2643 return 32;
2644
2645 return 16;
2646 }
2647
mgmt_class_complete(struct hci_dev * hdev,void * data,int err)2648 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2649 {
2650 struct mgmt_pending_cmd *cmd = data;
2651
2652 bt_dev_dbg(hdev, "err %d", err);
2653
2654 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
2655 mgmt_status(err), hdev->dev_class, 3);
2656
2657 mgmt_pending_free(cmd);
2658 }
2659
add_uuid_sync(struct hci_dev * hdev,void * data)2660 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2661 {
2662 int err;
2663
2664 err = hci_update_class_sync(hdev);
2665 if (err)
2666 return err;
2667
2668 return hci_update_eir_sync(hdev);
2669 }
2670
add_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2671 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2672 {
2673 struct mgmt_cp_add_uuid *cp = data;
2674 struct mgmt_pending_cmd *cmd;
2675 struct bt_uuid *uuid;
2676 int err;
2677
2678 bt_dev_dbg(hdev, "sock %p", sk);
2679
2680 hci_dev_lock(hdev);
2681
2682 if (pending_eir_or_class(hdev)) {
2683 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2684 MGMT_STATUS_BUSY);
2685 goto failed;
2686 }
2687
2688 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2689 if (!uuid) {
2690 err = -ENOMEM;
2691 goto failed;
2692 }
2693
2694 memcpy(uuid->uuid, cp->uuid, 16);
2695 uuid->svc_hint = cp->svc_hint;
2696 uuid->size = get_uuid_size(cp->uuid);
2697
2698 list_add_tail(&uuid->list, &hdev->uuids);
2699
2700 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2701 if (!cmd) {
2702 err = -ENOMEM;
2703 goto failed;
2704 }
2705
2706 /* MGMT_OP_ADD_UUID don't require adapter the UP/Running so use
2707 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2708 */
2709 err = hci_cmd_sync_submit(hdev, add_uuid_sync, cmd,
2710 mgmt_class_complete);
2711 if (err < 0) {
2712 mgmt_pending_free(cmd);
2713 goto failed;
2714 }
2715
2716 failed:
2717 hci_dev_unlock(hdev);
2718 return err;
2719 }
2720
enable_service_cache(struct hci_dev * hdev)2721 static bool enable_service_cache(struct hci_dev *hdev)
2722 {
2723 if (!hdev_is_powered(hdev))
2724 return false;
2725
2726 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2727 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2728 CACHE_TIMEOUT);
2729 return true;
2730 }
2731
2732 return false;
2733 }
2734
remove_uuid_sync(struct hci_dev * hdev,void * data)2735 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2736 {
2737 int err;
2738
2739 err = hci_update_class_sync(hdev);
2740 if (err)
2741 return err;
2742
2743 return hci_update_eir_sync(hdev);
2744 }
2745
remove_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2746 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2747 u16 len)
2748 {
2749 struct mgmt_cp_remove_uuid *cp = data;
2750 struct mgmt_pending_cmd *cmd;
2751 struct bt_uuid *match, *tmp;
2752 static const u8 bt_uuid_any[] = {
2753 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2754 };
2755 int err, found;
2756
2757 bt_dev_dbg(hdev, "sock %p", sk);
2758
2759 hci_dev_lock(hdev);
2760
2761 if (pending_eir_or_class(hdev)) {
2762 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2763 MGMT_STATUS_BUSY);
2764 goto unlock;
2765 }
2766
2767 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2768 hci_uuids_clear(hdev);
2769
2770 if (enable_service_cache(hdev)) {
2771 err = mgmt_cmd_complete(sk, hdev->id,
2772 MGMT_OP_REMOVE_UUID,
2773 0, hdev->dev_class, 3);
2774 goto unlock;
2775 }
2776
2777 goto update_class;
2778 }
2779
2780 found = 0;
2781
2782 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2783 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2784 continue;
2785
2786 list_del(&match->list);
2787 kfree(match);
2788 found++;
2789 }
2790
2791 if (found == 0) {
2792 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2793 MGMT_STATUS_INVALID_PARAMS);
2794 goto unlock;
2795 }
2796
2797 update_class:
2798 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2799 if (!cmd) {
2800 err = -ENOMEM;
2801 goto unlock;
2802 }
2803
2804 /* MGMT_OP_REMOVE_UUID don't require adapter the UP/Running so use
2805 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2806 */
2807 err = hci_cmd_sync_submit(hdev, remove_uuid_sync, cmd,
2808 mgmt_class_complete);
2809 if (err < 0)
2810 mgmt_pending_free(cmd);
2811
2812 unlock:
2813 hci_dev_unlock(hdev);
2814 return err;
2815 }
2816
set_class_sync(struct hci_dev * hdev,void * data)2817 static int set_class_sync(struct hci_dev *hdev, void *data)
2818 {
2819 int err = 0;
2820
2821 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2822 cancel_delayed_work_sync(&hdev->service_cache);
2823 err = hci_update_eir_sync(hdev);
2824 }
2825
2826 if (err)
2827 return err;
2828
2829 return hci_update_class_sync(hdev);
2830 }
2831
set_dev_class(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2832 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2833 u16 len)
2834 {
2835 struct mgmt_cp_set_dev_class *cp = data;
2836 struct mgmt_pending_cmd *cmd;
2837 int err;
2838
2839 bt_dev_dbg(hdev, "sock %p", sk);
2840
2841 if (!lmp_bredr_capable(hdev))
2842 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2843 MGMT_STATUS_NOT_SUPPORTED);
2844
2845 hci_dev_lock(hdev);
2846
2847 if (pending_eir_or_class(hdev)) {
2848 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2849 MGMT_STATUS_BUSY);
2850 goto unlock;
2851 }
2852
2853 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2854 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2855 MGMT_STATUS_INVALID_PARAMS);
2856 goto unlock;
2857 }
2858
2859 hdev->major_class = cp->major;
2860 hdev->minor_class = cp->minor;
2861
2862 if (!hdev_is_powered(hdev)) {
2863 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2864 hdev->dev_class, 3);
2865 goto unlock;
2866 }
2867
2868 cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2869 if (!cmd) {
2870 err = -ENOMEM;
2871 goto unlock;
2872 }
2873
2874 /* MGMT_OP_SET_DEV_CLASS don't require adapter the UP/Running so use
2875 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2876 */
2877 err = hci_cmd_sync_submit(hdev, set_class_sync, cmd,
2878 mgmt_class_complete);
2879 if (err < 0)
2880 mgmt_pending_free(cmd);
2881
2882 unlock:
2883 hci_dev_unlock(hdev);
2884 return err;
2885 }
2886
load_link_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2887 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2888 u16 len)
2889 {
2890 struct mgmt_cp_load_link_keys *cp = data;
2891 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2892 sizeof(struct mgmt_link_key_info));
2893 u16 key_count, expected_len;
2894 bool changed;
2895 int i;
2896
2897 bt_dev_dbg(hdev, "sock %p", sk);
2898
2899 if (!lmp_bredr_capable(hdev))
2900 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2901 MGMT_STATUS_NOT_SUPPORTED);
2902
2903 key_count = __le16_to_cpu(cp->key_count);
2904 if (key_count > max_key_count) {
2905 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2906 key_count);
2907 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2908 MGMT_STATUS_INVALID_PARAMS);
2909 }
2910
2911 expected_len = struct_size(cp, keys, key_count);
2912 if (expected_len != len) {
2913 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2914 expected_len, len);
2915 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2916 MGMT_STATUS_INVALID_PARAMS);
2917 }
2918
2919 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2920 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2921 MGMT_STATUS_INVALID_PARAMS);
2922
2923 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2924 key_count);
2925
2926 hci_dev_lock(hdev);
2927
2928 hci_link_keys_clear(hdev);
2929
2930 if (cp->debug_keys)
2931 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2932 else
2933 changed = hci_dev_test_and_clear_flag(hdev,
2934 HCI_KEEP_DEBUG_KEYS);
2935
2936 if (changed)
2937 new_settings(hdev, NULL);
2938
2939 for (i = 0; i < key_count; i++) {
2940 struct mgmt_link_key_info *key = &cp->keys[i];
2941
2942 if (hci_is_blocked_key(hdev,
2943 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2944 key->val)) {
2945 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2946 &key->addr.bdaddr);
2947 continue;
2948 }
2949
2950 if (key->addr.type != BDADDR_BREDR) {
2951 bt_dev_warn(hdev,
2952 "Invalid link address type %u for %pMR",
2953 key->addr.type, &key->addr.bdaddr);
2954 continue;
2955 }
2956
2957 if (key->type > 0x08) {
2958 bt_dev_warn(hdev, "Invalid link key type %u for %pMR",
2959 key->type, &key->addr.bdaddr);
2960 continue;
2961 }
2962
2963 /* Always ignore debug keys and require a new pairing if
2964 * the user wants to use them.
2965 */
2966 if (key->type == HCI_LK_DEBUG_COMBINATION)
2967 continue;
2968
2969 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2970 key->type, key->pin_len, NULL);
2971 }
2972
2973 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2974
2975 hci_dev_unlock(hdev);
2976
2977 return 0;
2978 }
2979
device_unpaired(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,struct sock * skip_sk)2980 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2981 u8 addr_type, struct sock *skip_sk)
2982 {
2983 struct mgmt_ev_device_unpaired ev;
2984
2985 bacpy(&ev.addr.bdaddr, bdaddr);
2986 ev.addr.type = addr_type;
2987
2988 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2989 skip_sk);
2990 }
2991
unpair_device_complete(struct hci_dev * hdev,void * data,int err)2992 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2993 {
2994 struct mgmt_pending_cmd *cmd = data;
2995 struct mgmt_cp_unpair_device *cp = cmd->param;
2996
2997 if (!err)
2998 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2999
3000 cmd->cmd_complete(cmd, err);
3001 mgmt_pending_free(cmd);
3002 }
3003
unpair_device_sync(struct hci_dev * hdev,void * data)3004 static int unpair_device_sync(struct hci_dev *hdev, void *data)
3005 {
3006 struct mgmt_pending_cmd *cmd = data;
3007 struct mgmt_cp_unpair_device *cp = cmd->param;
3008 struct hci_conn *conn;
3009
3010 if (cp->addr.type == BDADDR_BREDR)
3011 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3012 &cp->addr.bdaddr);
3013 else
3014 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3015 le_addr_type(cp->addr.type));
3016
3017 if (!conn)
3018 return 0;
3019
3020 /* Disregard any possible error since the likes of hci_abort_conn_sync
3021 * will clean up the connection no matter the error.
3022 */
3023 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3024
3025 return 0;
3026 }
3027
unpair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3028 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3029 u16 len)
3030 {
3031 struct mgmt_cp_unpair_device *cp = data;
3032 struct mgmt_rp_unpair_device rp;
3033 struct hci_conn_params *params;
3034 struct mgmt_pending_cmd *cmd;
3035 struct hci_conn *conn;
3036 u8 addr_type;
3037 int err;
3038
3039 memset(&rp, 0, sizeof(rp));
3040 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3041 rp.addr.type = cp->addr.type;
3042
3043 if (!bdaddr_type_is_valid(cp->addr.type))
3044 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3045 MGMT_STATUS_INVALID_PARAMS,
3046 &rp, sizeof(rp));
3047
3048 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
3049 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3050 MGMT_STATUS_INVALID_PARAMS,
3051 &rp, sizeof(rp));
3052
3053 hci_dev_lock(hdev);
3054
3055 if (!hdev_is_powered(hdev)) {
3056 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3057 MGMT_STATUS_NOT_POWERED, &rp,
3058 sizeof(rp));
3059 goto unlock;
3060 }
3061
3062 if (cp->addr.type == BDADDR_BREDR) {
3063 /* If disconnection is requested, then look up the
3064 * connection. If the remote device is connected, it
3065 * will be later used to terminate the link.
3066 *
3067 * Setting it to NULL explicitly will cause no
3068 * termination of the link.
3069 */
3070 if (cp->disconnect)
3071 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3072 &cp->addr.bdaddr);
3073 else
3074 conn = NULL;
3075
3076 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
3077 if (err < 0) {
3078 err = mgmt_cmd_complete(sk, hdev->id,
3079 MGMT_OP_UNPAIR_DEVICE,
3080 MGMT_STATUS_NOT_PAIRED, &rp,
3081 sizeof(rp));
3082 goto unlock;
3083 }
3084
3085 goto done;
3086 }
3087
3088 /* LE address type */
3089 addr_type = le_addr_type(cp->addr.type);
3090
3091 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
3092 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
3093 if (err < 0) {
3094 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3095 MGMT_STATUS_NOT_PAIRED, &rp,
3096 sizeof(rp));
3097 goto unlock;
3098 }
3099
3100 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3101 if (!conn) {
3102 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3103 goto done;
3104 }
3105
3106
3107 /* Defer clearing up the connection parameters until closing to
3108 * give a chance of keeping them if a repairing happens.
3109 */
3110 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3111
3112 /* Disable auto-connection parameters if present */
3113 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3114 if (params) {
3115 if (params->explicit_connect)
3116 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3117 else
3118 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3119 }
3120
3121 /* If disconnection is not requested, then clear the connection
3122 * variable so that the link is not terminated.
3123 */
3124 if (!cp->disconnect)
3125 conn = NULL;
3126
3127 done:
3128 /* If the connection variable is set, then termination of the
3129 * link is requested.
3130 */
3131 if (!conn) {
3132 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3133 &rp, sizeof(rp));
3134 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3135 goto unlock;
3136 }
3137
3138 cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3139 sizeof(*cp));
3140 if (!cmd) {
3141 err = -ENOMEM;
3142 goto unlock;
3143 }
3144
3145 cmd->cmd_complete = addr_cmd_complete;
3146
3147 err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3148 unpair_device_complete);
3149 if (err < 0)
3150 mgmt_pending_free(cmd);
3151
3152 unlock:
3153 hci_dev_unlock(hdev);
3154 return err;
3155 }
3156
disconnect_complete(struct hci_dev * hdev,void * data,int err)3157 static void disconnect_complete(struct hci_dev *hdev, void *data, int err)
3158 {
3159 struct mgmt_pending_cmd *cmd = data;
3160
3161 cmd->cmd_complete(cmd, mgmt_status(err));
3162 mgmt_pending_free(cmd);
3163 }
3164
disconnect_sync(struct hci_dev * hdev,void * data)3165 static int disconnect_sync(struct hci_dev *hdev, void *data)
3166 {
3167 struct mgmt_pending_cmd *cmd = data;
3168 struct mgmt_cp_disconnect *cp = cmd->param;
3169 struct hci_conn *conn;
3170
3171 if (cp->addr.type == BDADDR_BREDR)
3172 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3173 &cp->addr.bdaddr);
3174 else
3175 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3176 le_addr_type(cp->addr.type));
3177
3178 if (!conn)
3179 return -ENOTCONN;
3180
3181 /* Disregard any possible error since the likes of hci_abort_conn_sync
3182 * will clean up the connection no matter the error.
3183 */
3184 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3185
3186 return 0;
3187 }
3188
disconnect(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3189 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3190 u16 len)
3191 {
3192 struct mgmt_cp_disconnect *cp = data;
3193 struct mgmt_rp_disconnect rp;
3194 struct mgmt_pending_cmd *cmd;
3195 int err;
3196
3197 bt_dev_dbg(hdev, "sock %p", sk);
3198
3199 memset(&rp, 0, sizeof(rp));
3200 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3201 rp.addr.type = cp->addr.type;
3202
3203 if (!bdaddr_type_is_valid(cp->addr.type))
3204 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3205 MGMT_STATUS_INVALID_PARAMS,
3206 &rp, sizeof(rp));
3207
3208 hci_dev_lock(hdev);
3209
3210 if (!test_bit(HCI_UP, &hdev->flags)) {
3211 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3212 MGMT_STATUS_NOT_POWERED, &rp,
3213 sizeof(rp));
3214 goto failed;
3215 }
3216
3217 cmd = mgmt_pending_new(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3218 if (!cmd) {
3219 err = -ENOMEM;
3220 goto failed;
3221 }
3222
3223 cmd->cmd_complete = generic_cmd_complete;
3224
3225 err = hci_cmd_sync_queue(hdev, disconnect_sync, cmd,
3226 disconnect_complete);
3227 if (err < 0)
3228 mgmt_pending_free(cmd);
3229
3230 failed:
3231 hci_dev_unlock(hdev);
3232 return err;
3233 }
3234
link_to_bdaddr(u8 link_type,u8 addr_type)3235 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3236 {
3237 switch (link_type) {
3238 case CIS_LINK:
3239 case BIS_LINK:
3240 case PA_LINK:
3241 case LE_LINK:
3242 switch (addr_type) {
3243 case ADDR_LE_DEV_PUBLIC:
3244 return BDADDR_LE_PUBLIC;
3245
3246 default:
3247 /* Fallback to LE Random address type */
3248 return BDADDR_LE_RANDOM;
3249 }
3250
3251 default:
3252 /* Fallback to BR/EDR type */
3253 return BDADDR_BREDR;
3254 }
3255 }
3256
get_connections(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)3257 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3258 u16 data_len)
3259 {
3260 struct mgmt_rp_get_connections *rp;
3261 struct hci_conn *c;
3262 int err;
3263 u16 i;
3264
3265 bt_dev_dbg(hdev, "sock %p", sk);
3266
3267 hci_dev_lock(hdev);
3268
3269 if (!hdev_is_powered(hdev)) {
3270 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3271 MGMT_STATUS_NOT_POWERED);
3272 goto unlock;
3273 }
3274
3275 i = 0;
3276 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3277 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3278 i++;
3279 }
3280
3281 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3282 if (!rp) {
3283 err = -ENOMEM;
3284 goto unlock;
3285 }
3286
3287 i = 0;
3288 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3289 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3290 continue;
3291 bacpy(&rp->addr[i].bdaddr, &c->dst);
3292 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3293 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3294 continue;
3295 i++;
3296 }
3297
3298 rp->conn_count = cpu_to_le16(i);
3299
3300 /* Recalculate length in case of filtered SCO connections, etc */
3301 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3302 struct_size(rp, addr, i));
3303
3304 kfree(rp);
3305
3306 unlock:
3307 hci_dev_unlock(hdev);
3308 return err;
3309 }
3310
send_pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_pin_code_neg_reply * cp)3311 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3312 struct mgmt_cp_pin_code_neg_reply *cp)
3313 {
3314 struct mgmt_pending_cmd *cmd;
3315 int err;
3316
3317 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3318 sizeof(*cp));
3319 if (!cmd)
3320 return -ENOMEM;
3321
3322 cmd->cmd_complete = addr_cmd_complete;
3323
3324 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3325 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3326 if (err < 0)
3327 mgmt_pending_remove(cmd);
3328
3329 return err;
3330 }
3331
pin_code_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3332 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3333 u16 len)
3334 {
3335 struct hci_conn *conn;
3336 struct mgmt_cp_pin_code_reply *cp = data;
3337 struct hci_cp_pin_code_reply reply;
3338 struct mgmt_pending_cmd *cmd;
3339 int err;
3340
3341 bt_dev_dbg(hdev, "sock %p", sk);
3342
3343 hci_dev_lock(hdev);
3344
3345 if (!hdev_is_powered(hdev)) {
3346 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3347 MGMT_STATUS_NOT_POWERED);
3348 goto failed;
3349 }
3350
3351 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3352 if (!conn) {
3353 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3354 MGMT_STATUS_NOT_CONNECTED);
3355 goto failed;
3356 }
3357
3358 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3359 struct mgmt_cp_pin_code_neg_reply ncp;
3360
3361 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3362
3363 bt_dev_err(hdev, "PIN code is not 16 bytes long");
3364
3365 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3366 if (err >= 0)
3367 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3368 MGMT_STATUS_INVALID_PARAMS);
3369
3370 goto failed;
3371 }
3372
3373 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3374 if (!cmd) {
3375 err = -ENOMEM;
3376 goto failed;
3377 }
3378
3379 cmd->cmd_complete = addr_cmd_complete;
3380
3381 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3382 reply.pin_len = cp->pin_len;
3383 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3384
3385 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3386 if (err < 0)
3387 mgmt_pending_remove(cmd);
3388
3389 failed:
3390 hci_dev_unlock(hdev);
3391 return err;
3392 }
3393
set_io_capability(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3394 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3395 u16 len)
3396 {
3397 struct mgmt_cp_set_io_capability *cp = data;
3398
3399 bt_dev_dbg(hdev, "sock %p", sk);
3400
3401 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3402 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3403 MGMT_STATUS_INVALID_PARAMS);
3404
3405 hci_dev_lock(hdev);
3406
3407 hdev->io_capability = cp->io_capability;
3408
3409 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3410
3411 hci_dev_unlock(hdev);
3412
3413 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3414 NULL, 0);
3415 }
3416
find_pairing(struct hci_conn * conn)3417 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3418 {
3419 struct hci_dev *hdev = conn->hdev;
3420 struct mgmt_pending_cmd *cmd;
3421
3422 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3423 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3424 continue;
3425
3426 if (cmd->user_data != conn)
3427 continue;
3428
3429 return cmd;
3430 }
3431
3432 return NULL;
3433 }
3434
pairing_complete(struct mgmt_pending_cmd * cmd,u8 status)3435 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3436 {
3437 struct mgmt_rp_pair_device rp;
3438 struct hci_conn *conn = cmd->user_data;
3439 int err;
3440
3441 bacpy(&rp.addr.bdaddr, &conn->dst);
3442 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3443
3444 err = mgmt_cmd_complete(cmd->sk, cmd->hdev->id, MGMT_OP_PAIR_DEVICE,
3445 status, &rp, sizeof(rp));
3446
3447 /* So we don't get further callbacks for this connection */
3448 conn->connect_cfm_cb = NULL;
3449 conn->security_cfm_cb = NULL;
3450 conn->disconn_cfm_cb = NULL;
3451
3452 hci_conn_drop(conn);
3453
3454 /* The device is paired so there is no need to remove
3455 * its connection parameters anymore.
3456 */
3457 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3458
3459 hci_conn_put(conn);
3460
3461 return err;
3462 }
3463
mgmt_smp_complete(struct hci_conn * conn,bool complete)3464 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3465 {
3466 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3467 struct mgmt_pending_cmd *cmd;
3468
3469 cmd = find_pairing(conn);
3470 if (cmd) {
3471 cmd->cmd_complete(cmd, status);
3472 mgmt_pending_remove(cmd);
3473 }
3474 }
3475
pairing_complete_cb(struct hci_conn * conn,u8 status)3476 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3477 {
3478 struct mgmt_pending_cmd *cmd;
3479
3480 BT_DBG("status %u", status);
3481
3482 cmd = find_pairing(conn);
3483 if (!cmd) {
3484 BT_DBG("Unable to find a pending command");
3485 return;
3486 }
3487
3488 cmd->cmd_complete(cmd, mgmt_status(status));
3489 mgmt_pending_remove(cmd);
3490 }
3491
le_pairing_complete_cb(struct hci_conn * conn,u8 status)3492 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3493 {
3494 struct mgmt_pending_cmd *cmd;
3495
3496 BT_DBG("status %u", status);
3497
3498 if (!status)
3499 return;
3500
3501 cmd = find_pairing(conn);
3502 if (!cmd) {
3503 BT_DBG("Unable to find a pending command");
3504 return;
3505 }
3506
3507 cmd->cmd_complete(cmd, mgmt_status(status));
3508 mgmt_pending_remove(cmd);
3509 }
3510
pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3511 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3512 u16 len)
3513 {
3514 struct mgmt_cp_pair_device *cp = data;
3515 struct mgmt_rp_pair_device rp;
3516 struct mgmt_pending_cmd *cmd;
3517 u8 sec_level, auth_type;
3518 struct hci_conn *conn;
3519 int err;
3520
3521 bt_dev_dbg(hdev, "sock %p", sk);
3522
3523 memset(&rp, 0, sizeof(rp));
3524 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3525 rp.addr.type = cp->addr.type;
3526
3527 if (!bdaddr_type_is_valid(cp->addr.type))
3528 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3529 MGMT_STATUS_INVALID_PARAMS,
3530 &rp, sizeof(rp));
3531
3532 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3533 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3534 MGMT_STATUS_INVALID_PARAMS,
3535 &rp, sizeof(rp));
3536
3537 hci_dev_lock(hdev);
3538
3539 if (!hdev_is_powered(hdev)) {
3540 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3541 MGMT_STATUS_NOT_POWERED, &rp,
3542 sizeof(rp));
3543 goto unlock;
3544 }
3545
3546 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3547 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3548 MGMT_STATUS_ALREADY_PAIRED, &rp,
3549 sizeof(rp));
3550 goto unlock;
3551 }
3552
3553 sec_level = BT_SECURITY_MEDIUM;
3554 auth_type = HCI_AT_DEDICATED_BONDING;
3555
3556 if (cp->addr.type == BDADDR_BREDR) {
3557 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3558 auth_type, CONN_REASON_PAIR_DEVICE,
3559 HCI_ACL_CONN_TIMEOUT);
3560 } else {
3561 u8 addr_type = le_addr_type(cp->addr.type);
3562 struct hci_conn_params *p;
3563
3564 /* When pairing a new device, it is expected to remember
3565 * this device for future connections. Adding the connection
3566 * parameter information ahead of time allows tracking
3567 * of the peripheral preferred values and will speed up any
3568 * further connection establishment.
3569 *
3570 * If connection parameters already exist, then they
3571 * will be kept and this function does nothing.
3572 */
3573 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3574 if (!p) {
3575 err = -EIO;
3576 goto unlock;
3577 }
3578
3579 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3580 p->auto_connect = HCI_AUTO_CONN_DISABLED;
3581
3582 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3583 sec_level, HCI_LE_CONN_TIMEOUT,
3584 CONN_REASON_PAIR_DEVICE);
3585 }
3586
3587 if (IS_ERR(conn)) {
3588 int status;
3589
3590 if (PTR_ERR(conn) == -EBUSY)
3591 status = MGMT_STATUS_BUSY;
3592 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3593 status = MGMT_STATUS_NOT_SUPPORTED;
3594 else if (PTR_ERR(conn) == -ECONNREFUSED)
3595 status = MGMT_STATUS_REJECTED;
3596 else
3597 status = MGMT_STATUS_CONNECT_FAILED;
3598
3599 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3600 status, &rp, sizeof(rp));
3601 goto unlock;
3602 }
3603
3604 if (conn->connect_cfm_cb) {
3605 hci_conn_drop(conn);
3606 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3607 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3608 goto unlock;
3609 }
3610
3611 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3612 if (!cmd) {
3613 err = -ENOMEM;
3614 hci_conn_drop(conn);
3615 goto unlock;
3616 }
3617
3618 cmd->cmd_complete = pairing_complete;
3619
3620 /* For LE, just connecting isn't a proof that the pairing finished */
3621 if (cp->addr.type == BDADDR_BREDR) {
3622 conn->connect_cfm_cb = pairing_complete_cb;
3623 conn->security_cfm_cb = pairing_complete_cb;
3624 conn->disconn_cfm_cb = pairing_complete_cb;
3625 } else {
3626 conn->connect_cfm_cb = le_pairing_complete_cb;
3627 conn->security_cfm_cb = le_pairing_complete_cb;
3628 conn->disconn_cfm_cb = le_pairing_complete_cb;
3629 }
3630
3631 conn->io_capability = cp->io_cap;
3632 cmd->user_data = hci_conn_get(conn);
3633
3634 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3635 hci_conn_security(conn, sec_level, auth_type, true)) {
3636 cmd->cmd_complete(cmd, 0);
3637 mgmt_pending_remove(cmd);
3638 }
3639
3640 err = 0;
3641
3642 unlock:
3643 hci_dev_unlock(hdev);
3644 return err;
3645 }
3646
cancel_pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3647 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3648 u16 len)
3649 {
3650 struct mgmt_addr_info *addr = data;
3651 struct mgmt_pending_cmd *cmd;
3652 struct hci_conn *conn;
3653 int err;
3654
3655 bt_dev_dbg(hdev, "sock %p", sk);
3656
3657 hci_dev_lock(hdev);
3658
3659 if (!hdev_is_powered(hdev)) {
3660 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3661 MGMT_STATUS_NOT_POWERED);
3662 goto unlock;
3663 }
3664
3665 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3666 if (!cmd) {
3667 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3668 MGMT_STATUS_INVALID_PARAMS);
3669 goto unlock;
3670 }
3671
3672 conn = cmd->user_data;
3673
3674 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3675 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3676 MGMT_STATUS_INVALID_PARAMS);
3677 goto unlock;
3678 }
3679
3680 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3681 mgmt_pending_remove(cmd);
3682
3683 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3684 addr, sizeof(*addr));
3685
3686 /* Since user doesn't want to proceed with the connection, abort any
3687 * ongoing pairing and then terminate the link if it was created
3688 * because of the pair device action.
3689 */
3690 if (addr->type == BDADDR_BREDR)
3691 hci_remove_link_key(hdev, &addr->bdaddr);
3692 else
3693 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3694 le_addr_type(addr->type));
3695
3696 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3697 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3698
3699 unlock:
3700 hci_dev_unlock(hdev);
3701 return err;
3702 }
3703
user_pairing_resp(struct sock * sk,struct hci_dev * hdev,struct mgmt_addr_info * addr,u16 mgmt_op,u16 hci_op,__le32 passkey)3704 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3705 struct mgmt_addr_info *addr, u16 mgmt_op,
3706 u16 hci_op, __le32 passkey)
3707 {
3708 struct mgmt_pending_cmd *cmd;
3709 struct hci_conn *conn;
3710 int err;
3711
3712 hci_dev_lock(hdev);
3713
3714 if (!hdev_is_powered(hdev)) {
3715 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3716 MGMT_STATUS_NOT_POWERED, addr,
3717 sizeof(*addr));
3718 goto done;
3719 }
3720
3721 if (addr->type == BDADDR_BREDR)
3722 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3723 else
3724 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3725 le_addr_type(addr->type));
3726
3727 if (!conn) {
3728 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3729 MGMT_STATUS_NOT_CONNECTED, addr,
3730 sizeof(*addr));
3731 goto done;
3732 }
3733
3734 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3735 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3736 if (!err)
3737 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3738 MGMT_STATUS_SUCCESS, addr,
3739 sizeof(*addr));
3740 else
3741 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3742 MGMT_STATUS_FAILED, addr,
3743 sizeof(*addr));
3744
3745 goto done;
3746 }
3747
3748 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3749 if (!cmd) {
3750 err = -ENOMEM;
3751 goto done;
3752 }
3753
3754 cmd->cmd_complete = addr_cmd_complete;
3755
3756 /* Continue with pairing via HCI */
3757 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3758 struct hci_cp_user_passkey_reply cp;
3759
3760 bacpy(&cp.bdaddr, &addr->bdaddr);
3761 cp.passkey = passkey;
3762 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3763 } else
3764 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3765 &addr->bdaddr);
3766
3767 if (err < 0)
3768 mgmt_pending_remove(cmd);
3769
3770 done:
3771 hci_dev_unlock(hdev);
3772 return err;
3773 }
3774
pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3775 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3776 void *data, u16 len)
3777 {
3778 struct mgmt_cp_pin_code_neg_reply *cp = data;
3779
3780 bt_dev_dbg(hdev, "sock %p", sk);
3781
3782 return user_pairing_resp(sk, hdev, &cp->addr,
3783 MGMT_OP_PIN_CODE_NEG_REPLY,
3784 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3785 }
3786
user_confirm_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3787 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3788 u16 len)
3789 {
3790 struct mgmt_cp_user_confirm_reply *cp = data;
3791
3792 bt_dev_dbg(hdev, "sock %p", sk);
3793
3794 if (len != sizeof(*cp))
3795 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3796 MGMT_STATUS_INVALID_PARAMS);
3797
3798 return user_pairing_resp(sk, hdev, &cp->addr,
3799 MGMT_OP_USER_CONFIRM_REPLY,
3800 HCI_OP_USER_CONFIRM_REPLY, 0);
3801 }
3802
user_confirm_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3803 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3804 void *data, u16 len)
3805 {
3806 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3807
3808 bt_dev_dbg(hdev, "sock %p", sk);
3809
3810 return user_pairing_resp(sk, hdev, &cp->addr,
3811 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3812 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3813 }
3814
user_passkey_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3815 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3816 u16 len)
3817 {
3818 struct mgmt_cp_user_passkey_reply *cp = data;
3819
3820 bt_dev_dbg(hdev, "sock %p", sk);
3821
3822 return user_pairing_resp(sk, hdev, &cp->addr,
3823 MGMT_OP_USER_PASSKEY_REPLY,
3824 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3825 }
3826
user_passkey_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3827 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3828 void *data, u16 len)
3829 {
3830 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3831
3832 bt_dev_dbg(hdev, "sock %p", sk);
3833
3834 return user_pairing_resp(sk, hdev, &cp->addr,
3835 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3836 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3837 }
3838
adv_expire_sync(struct hci_dev * hdev,u32 flags)3839 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3840 {
3841 struct adv_info *adv_instance;
3842
3843 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3844 if (!adv_instance)
3845 return 0;
3846
3847 /* stop if current instance doesn't need to be changed */
3848 if (!(adv_instance->flags & flags))
3849 return 0;
3850
3851 cancel_adv_timeout(hdev);
3852
3853 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3854 if (!adv_instance)
3855 return 0;
3856
3857 hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3858
3859 return 0;
3860 }
3861
name_changed_sync(struct hci_dev * hdev,void * data)3862 static int name_changed_sync(struct hci_dev *hdev, void *data)
3863 {
3864 return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3865 }
3866
set_name_complete(struct hci_dev * hdev,void * data,int err)3867 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3868 {
3869 struct mgmt_pending_cmd *cmd = data;
3870 struct mgmt_cp_set_local_name *cp = cmd->param;
3871 u8 status = mgmt_status(err);
3872
3873 bt_dev_dbg(hdev, "err %d", err);
3874
3875 if (err == -ECANCELED ||
3876 cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3877 return;
3878
3879 if (status) {
3880 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3881 status);
3882 } else {
3883 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3884 cp, sizeof(*cp));
3885
3886 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3887 hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3888 }
3889
3890 mgmt_pending_remove(cmd);
3891 }
3892
set_name_sync(struct hci_dev * hdev,void * data)3893 static int set_name_sync(struct hci_dev *hdev, void *data)
3894 {
3895 if (lmp_bredr_capable(hdev)) {
3896 hci_update_name_sync(hdev);
3897 hci_update_eir_sync(hdev);
3898 }
3899
3900 /* The name is stored in the scan response data and so
3901 * no need to update the advertising data here.
3902 */
3903 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3904 hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3905
3906 return 0;
3907 }
3908
set_local_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3909 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3910 u16 len)
3911 {
3912 struct mgmt_cp_set_local_name *cp = data;
3913 struct mgmt_pending_cmd *cmd;
3914 int err;
3915
3916 bt_dev_dbg(hdev, "sock %p", sk);
3917
3918 hci_dev_lock(hdev);
3919
3920 /* If the old values are the same as the new ones just return a
3921 * direct command complete event.
3922 */
3923 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3924 !memcmp(hdev->short_name, cp->short_name,
3925 sizeof(hdev->short_name))) {
3926 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3927 data, len);
3928 goto failed;
3929 }
3930
3931 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3932
3933 if (!hdev_is_powered(hdev)) {
3934 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3935
3936 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3937 data, len);
3938 if (err < 0)
3939 goto failed;
3940
3941 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3942 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3943 ext_info_changed(hdev, sk);
3944
3945 goto failed;
3946 }
3947
3948 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3949 if (!cmd)
3950 err = -ENOMEM;
3951 else
3952 err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3953 set_name_complete);
3954
3955 if (err < 0) {
3956 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3957 MGMT_STATUS_FAILED);
3958
3959 if (cmd)
3960 mgmt_pending_remove(cmd);
3961
3962 goto failed;
3963 }
3964
3965 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3966
3967 failed:
3968 hci_dev_unlock(hdev);
3969 return err;
3970 }
3971
appearance_changed_sync(struct hci_dev * hdev,void * data)3972 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3973 {
3974 return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3975 }
3976
set_appearance(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3977 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3978 u16 len)
3979 {
3980 struct mgmt_cp_set_appearance *cp = data;
3981 u16 appearance;
3982 int err;
3983
3984 bt_dev_dbg(hdev, "sock %p", sk);
3985
3986 if (!lmp_le_capable(hdev))
3987 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3988 MGMT_STATUS_NOT_SUPPORTED);
3989
3990 appearance = le16_to_cpu(cp->appearance);
3991
3992 hci_dev_lock(hdev);
3993
3994 if (hdev->appearance != appearance) {
3995 hdev->appearance = appearance;
3996
3997 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3998 hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3999 NULL);
4000
4001 ext_info_changed(hdev, sk);
4002 }
4003
4004 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
4005 0);
4006
4007 hci_dev_unlock(hdev);
4008
4009 return err;
4010 }
4011
get_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4012 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4013 void *data, u16 len)
4014 {
4015 struct mgmt_rp_get_phy_configuration rp;
4016
4017 bt_dev_dbg(hdev, "sock %p", sk);
4018
4019 hci_dev_lock(hdev);
4020
4021 memset(&rp, 0, sizeof(rp));
4022
4023 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
4024 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
4025 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
4026
4027 hci_dev_unlock(hdev);
4028
4029 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
4030 &rp, sizeof(rp));
4031 }
4032
mgmt_phy_configuration_changed(struct hci_dev * hdev,struct sock * skip)4033 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
4034 {
4035 struct mgmt_ev_phy_configuration_changed ev;
4036
4037 memset(&ev, 0, sizeof(ev));
4038
4039 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
4040
4041 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
4042 sizeof(ev), skip);
4043 }
4044
set_default_phy_complete(struct hci_dev * hdev,void * data,int err)4045 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
4046 {
4047 struct mgmt_pending_cmd *cmd = data;
4048 struct sk_buff *skb = cmd->skb;
4049 u8 status = mgmt_status(err);
4050
4051 if (err == -ECANCELED ||
4052 cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
4053 return;
4054
4055 if (!status) {
4056 if (!skb)
4057 status = MGMT_STATUS_FAILED;
4058 else if (IS_ERR(skb))
4059 status = mgmt_status(PTR_ERR(skb));
4060 else
4061 status = mgmt_status(skb->data[0]);
4062 }
4063
4064 bt_dev_dbg(hdev, "status %d", status);
4065
4066 if (status) {
4067 mgmt_cmd_status(cmd->sk, hdev->id,
4068 MGMT_OP_SET_PHY_CONFIGURATION, status);
4069 } else {
4070 mgmt_cmd_complete(cmd->sk, hdev->id,
4071 MGMT_OP_SET_PHY_CONFIGURATION, 0,
4072 NULL, 0);
4073
4074 mgmt_phy_configuration_changed(hdev, cmd->sk);
4075 }
4076
4077 if (skb && !IS_ERR(skb))
4078 kfree_skb(skb);
4079
4080 mgmt_pending_remove(cmd);
4081 }
4082
set_default_phy_sync(struct hci_dev * hdev,void * data)4083 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
4084 {
4085 struct mgmt_pending_cmd *cmd = data;
4086 struct mgmt_cp_set_phy_configuration *cp = cmd->param;
4087 struct hci_cp_le_set_default_phy cp_phy;
4088 u32 selected_phys = __le32_to_cpu(cp->selected_phys);
4089
4090 memset(&cp_phy, 0, sizeof(cp_phy));
4091
4092 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
4093 cp_phy.all_phys |= 0x01;
4094
4095 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
4096 cp_phy.all_phys |= 0x02;
4097
4098 if (selected_phys & MGMT_PHY_LE_1M_TX)
4099 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
4100
4101 if (selected_phys & MGMT_PHY_LE_2M_TX)
4102 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
4103
4104 if (selected_phys & MGMT_PHY_LE_CODED_TX)
4105 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
4106
4107 if (selected_phys & MGMT_PHY_LE_1M_RX)
4108 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
4109
4110 if (selected_phys & MGMT_PHY_LE_2M_RX)
4111 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
4112
4113 if (selected_phys & MGMT_PHY_LE_CODED_RX)
4114 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
4115
4116 cmd->skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4117 sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
4118
4119 return 0;
4120 }
4121
set_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4122 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4123 void *data, u16 len)
4124 {
4125 struct mgmt_cp_set_phy_configuration *cp = data;
4126 struct mgmt_pending_cmd *cmd;
4127 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4128 u16 pkt_type = (HCI_DH1 | HCI_DM1);
4129 bool changed = false;
4130 int err;
4131
4132 bt_dev_dbg(hdev, "sock %p", sk);
4133
4134 configurable_phys = get_configurable_phys(hdev);
4135 supported_phys = get_supported_phys(hdev);
4136 selected_phys = __le32_to_cpu(cp->selected_phys);
4137
4138 if (selected_phys & ~supported_phys)
4139 return mgmt_cmd_status(sk, hdev->id,
4140 MGMT_OP_SET_PHY_CONFIGURATION,
4141 MGMT_STATUS_INVALID_PARAMS);
4142
4143 unconfigure_phys = supported_phys & ~configurable_phys;
4144
4145 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4146 return mgmt_cmd_status(sk, hdev->id,
4147 MGMT_OP_SET_PHY_CONFIGURATION,
4148 MGMT_STATUS_INVALID_PARAMS);
4149
4150 if (selected_phys == get_selected_phys(hdev))
4151 return mgmt_cmd_complete(sk, hdev->id,
4152 MGMT_OP_SET_PHY_CONFIGURATION,
4153 0, NULL, 0);
4154
4155 hci_dev_lock(hdev);
4156
4157 if (!hdev_is_powered(hdev)) {
4158 err = mgmt_cmd_status(sk, hdev->id,
4159 MGMT_OP_SET_PHY_CONFIGURATION,
4160 MGMT_STATUS_REJECTED);
4161 goto unlock;
4162 }
4163
4164 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4165 err = mgmt_cmd_status(sk, hdev->id,
4166 MGMT_OP_SET_PHY_CONFIGURATION,
4167 MGMT_STATUS_BUSY);
4168 goto unlock;
4169 }
4170
4171 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4172 pkt_type |= (HCI_DH3 | HCI_DM3);
4173 else
4174 pkt_type &= ~(HCI_DH3 | HCI_DM3);
4175
4176 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4177 pkt_type |= (HCI_DH5 | HCI_DM5);
4178 else
4179 pkt_type &= ~(HCI_DH5 | HCI_DM5);
4180
4181 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4182 pkt_type &= ~HCI_2DH1;
4183 else
4184 pkt_type |= HCI_2DH1;
4185
4186 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4187 pkt_type &= ~HCI_2DH3;
4188 else
4189 pkt_type |= HCI_2DH3;
4190
4191 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4192 pkt_type &= ~HCI_2DH5;
4193 else
4194 pkt_type |= HCI_2DH5;
4195
4196 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4197 pkt_type &= ~HCI_3DH1;
4198 else
4199 pkt_type |= HCI_3DH1;
4200
4201 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4202 pkt_type &= ~HCI_3DH3;
4203 else
4204 pkt_type |= HCI_3DH3;
4205
4206 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4207 pkt_type &= ~HCI_3DH5;
4208 else
4209 pkt_type |= HCI_3DH5;
4210
4211 if (pkt_type != hdev->pkt_type) {
4212 hdev->pkt_type = pkt_type;
4213 changed = true;
4214 }
4215
4216 if ((selected_phys & MGMT_PHY_LE_MASK) ==
4217 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4218 if (changed)
4219 mgmt_phy_configuration_changed(hdev, sk);
4220
4221 err = mgmt_cmd_complete(sk, hdev->id,
4222 MGMT_OP_SET_PHY_CONFIGURATION,
4223 0, NULL, 0);
4224
4225 goto unlock;
4226 }
4227
4228 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4229 len);
4230 if (!cmd)
4231 err = -ENOMEM;
4232 else
4233 err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4234 set_default_phy_complete);
4235
4236 if (err < 0) {
4237 err = mgmt_cmd_status(sk, hdev->id,
4238 MGMT_OP_SET_PHY_CONFIGURATION,
4239 MGMT_STATUS_FAILED);
4240
4241 if (cmd)
4242 mgmt_pending_remove(cmd);
4243 }
4244
4245 unlock:
4246 hci_dev_unlock(hdev);
4247
4248 return err;
4249 }
4250
set_blocked_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4251 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4252 u16 len)
4253 {
4254 int err = MGMT_STATUS_SUCCESS;
4255 struct mgmt_cp_set_blocked_keys *keys = data;
4256 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4257 sizeof(struct mgmt_blocked_key_info));
4258 u16 key_count, expected_len;
4259 int i;
4260
4261 bt_dev_dbg(hdev, "sock %p", sk);
4262
4263 key_count = __le16_to_cpu(keys->key_count);
4264 if (key_count > max_key_count) {
4265 bt_dev_err(hdev, "too big key_count value %u", key_count);
4266 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4267 MGMT_STATUS_INVALID_PARAMS);
4268 }
4269
4270 expected_len = struct_size(keys, keys, key_count);
4271 if (expected_len != len) {
4272 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4273 expected_len, len);
4274 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4275 MGMT_STATUS_INVALID_PARAMS);
4276 }
4277
4278 hci_dev_lock(hdev);
4279
4280 hci_blocked_keys_clear(hdev);
4281
4282 for (i = 0; i < key_count; ++i) {
4283 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4284
4285 if (!b) {
4286 err = MGMT_STATUS_NO_RESOURCES;
4287 break;
4288 }
4289
4290 b->type = keys->keys[i].type;
4291 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4292 list_add_rcu(&b->list, &hdev->blocked_keys);
4293 }
4294 hci_dev_unlock(hdev);
4295
4296 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4297 err, NULL, 0);
4298 }
4299
set_wideband_speech(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4300 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4301 void *data, u16 len)
4302 {
4303 struct mgmt_mode *cp = data;
4304 int err;
4305 bool changed = false;
4306
4307 bt_dev_dbg(hdev, "sock %p", sk);
4308
4309 if (!hci_test_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED))
4310 return mgmt_cmd_status(sk, hdev->id,
4311 MGMT_OP_SET_WIDEBAND_SPEECH,
4312 MGMT_STATUS_NOT_SUPPORTED);
4313
4314 if (cp->val != 0x00 && cp->val != 0x01)
4315 return mgmt_cmd_status(sk, hdev->id,
4316 MGMT_OP_SET_WIDEBAND_SPEECH,
4317 MGMT_STATUS_INVALID_PARAMS);
4318
4319 hci_dev_lock(hdev);
4320
4321 if (hdev_is_powered(hdev) &&
4322 !!cp->val != hci_dev_test_flag(hdev,
4323 HCI_WIDEBAND_SPEECH_ENABLED)) {
4324 err = mgmt_cmd_status(sk, hdev->id,
4325 MGMT_OP_SET_WIDEBAND_SPEECH,
4326 MGMT_STATUS_REJECTED);
4327 goto unlock;
4328 }
4329
4330 if (cp->val)
4331 changed = !hci_dev_test_and_set_flag(hdev,
4332 HCI_WIDEBAND_SPEECH_ENABLED);
4333 else
4334 changed = hci_dev_test_and_clear_flag(hdev,
4335 HCI_WIDEBAND_SPEECH_ENABLED);
4336
4337 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4338 if (err < 0)
4339 goto unlock;
4340
4341 if (changed)
4342 err = new_settings(hdev, sk);
4343
4344 unlock:
4345 hci_dev_unlock(hdev);
4346 return err;
4347 }
4348
read_controller_cap(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4349 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4350 void *data, u16 data_len)
4351 {
4352 char buf[20];
4353 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4354 u16 cap_len = 0;
4355 u8 flags = 0;
4356 u8 tx_power_range[2];
4357
4358 bt_dev_dbg(hdev, "sock %p", sk);
4359
4360 memset(&buf, 0, sizeof(buf));
4361
4362 hci_dev_lock(hdev);
4363
4364 /* When the Read Simple Pairing Options command is supported, then
4365 * the remote public key validation is supported.
4366 *
4367 * Alternatively, when Microsoft extensions are available, they can
4368 * indicate support for public key validation as well.
4369 */
4370 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4371 flags |= 0x01; /* Remote public key validation (BR/EDR) */
4372
4373 flags |= 0x02; /* Remote public key validation (LE) */
4374
4375 /* When the Read Encryption Key Size command is supported, then the
4376 * encryption key size is enforced.
4377 */
4378 if (hdev->commands[20] & 0x10)
4379 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
4380
4381 flags |= 0x08; /* Encryption key size enforcement (LE) */
4382
4383 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4384 &flags, 1);
4385
4386 /* When the Read Simple Pairing Options command is supported, then
4387 * also max encryption key size information is provided.
4388 */
4389 if (hdev->commands[41] & 0x08)
4390 cap_len = eir_append_le16(rp->cap, cap_len,
4391 MGMT_CAP_MAX_ENC_KEY_SIZE,
4392 hdev->max_enc_key_size);
4393
4394 cap_len = eir_append_le16(rp->cap, cap_len,
4395 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4396 SMP_MAX_ENC_KEY_SIZE);
4397
4398 /* Append the min/max LE tx power parameters if we were able to fetch
4399 * it from the controller
4400 */
4401 if (hdev->commands[38] & 0x80) {
4402 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4403 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4404 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4405 tx_power_range, 2);
4406 }
4407
4408 rp->cap_len = cpu_to_le16(cap_len);
4409
4410 hci_dev_unlock(hdev);
4411
4412 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4413 rp, sizeof(*rp) + cap_len);
4414 }
4415
4416 #ifdef CONFIG_BT_FEATURE_DEBUG
4417 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4418 static const u8 debug_uuid[16] = {
4419 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4420 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4421 };
4422 #endif
4423
4424 /* 330859bc-7506-492d-9370-9a6f0614037f */
4425 static const u8 quality_report_uuid[16] = {
4426 0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4427 0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4428 };
4429
4430 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4431 static const u8 offload_codecs_uuid[16] = {
4432 0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4433 0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4434 };
4435
4436 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4437 static const u8 le_simultaneous_roles_uuid[16] = {
4438 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4439 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4440 };
4441
4442 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4443 static const u8 iso_socket_uuid[16] = {
4444 0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4445 0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4446 };
4447
4448 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4449 static const u8 mgmt_mesh_uuid[16] = {
4450 0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4451 0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4452 };
4453
read_exp_features_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4454 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4455 void *data, u16 data_len)
4456 {
4457 struct mgmt_rp_read_exp_features_info *rp;
4458 size_t len;
4459 u16 idx = 0;
4460 u32 flags;
4461 int status;
4462
4463 bt_dev_dbg(hdev, "sock %p", sk);
4464
4465 /* Enough space for 7 features */
4466 len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4467 rp = kzalloc(len, GFP_KERNEL);
4468 if (!rp)
4469 return -ENOMEM;
4470
4471 #ifdef CONFIG_BT_FEATURE_DEBUG
4472 if (!hdev) {
4473 flags = bt_dbg_get() ? BIT(0) : 0;
4474
4475 memcpy(rp->features[idx].uuid, debug_uuid, 16);
4476 rp->features[idx].flags = cpu_to_le32(flags);
4477 idx++;
4478 }
4479 #endif
4480
4481 if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4482 if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4483 flags = BIT(0);
4484 else
4485 flags = 0;
4486
4487 memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4488 rp->features[idx].flags = cpu_to_le32(flags);
4489 idx++;
4490 }
4491
4492 if (hdev && (aosp_has_quality_report(hdev) ||
4493 hdev->set_quality_report)) {
4494 if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4495 flags = BIT(0);
4496 else
4497 flags = 0;
4498
4499 memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4500 rp->features[idx].flags = cpu_to_le32(flags);
4501 idx++;
4502 }
4503
4504 if (hdev && hdev->get_data_path_id) {
4505 if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4506 flags = BIT(0);
4507 else
4508 flags = 0;
4509
4510 memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4511 rp->features[idx].flags = cpu_to_le32(flags);
4512 idx++;
4513 }
4514
4515 if (IS_ENABLED(CONFIG_BT_LE)) {
4516 flags = iso_enabled() ? BIT(0) : 0;
4517 memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4518 rp->features[idx].flags = cpu_to_le32(flags);
4519 idx++;
4520 }
4521
4522 if (hdev && lmp_le_capable(hdev)) {
4523 if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4524 flags = BIT(0);
4525 else
4526 flags = 0;
4527
4528 memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4529 rp->features[idx].flags = cpu_to_le32(flags);
4530 idx++;
4531 }
4532
4533 rp->feature_count = cpu_to_le16(idx);
4534
4535 /* After reading the experimental features information, enable
4536 * the events to update client on any future change.
4537 */
4538 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4539
4540 status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4541 MGMT_OP_READ_EXP_FEATURES_INFO,
4542 0, rp, sizeof(*rp) + (20 * idx));
4543
4544 kfree(rp);
4545 return status;
4546 }
4547
exp_feature_changed(struct hci_dev * hdev,const u8 * uuid,bool enabled,struct sock * skip)4548 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4549 bool enabled, struct sock *skip)
4550 {
4551 struct mgmt_ev_exp_feature_changed ev;
4552
4553 memset(&ev, 0, sizeof(ev));
4554 memcpy(ev.uuid, uuid, 16);
4555 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4556
4557 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4558 &ev, sizeof(ev),
4559 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4560 }
4561
4562 #define EXP_FEAT(_uuid, _set_func) \
4563 { \
4564 .uuid = _uuid, \
4565 .set_func = _set_func, \
4566 }
4567
4568 /* The zero key uuid is special. Multiple exp features are set through it. */
set_zero_key_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4569 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4570 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4571 {
4572 struct mgmt_rp_set_exp_feature rp;
4573
4574 memset(rp.uuid, 0, 16);
4575 rp.flags = cpu_to_le32(0);
4576
4577 #ifdef CONFIG_BT_FEATURE_DEBUG
4578 if (!hdev) {
4579 bool changed = bt_dbg_get();
4580
4581 bt_dbg_set(false);
4582
4583 if (changed)
4584 exp_feature_changed(NULL, ZERO_KEY, false, sk);
4585 }
4586 #endif
4587
4588 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4589
4590 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4591 MGMT_OP_SET_EXP_FEATURE, 0,
4592 &rp, sizeof(rp));
4593 }
4594
4595 #ifdef CONFIG_BT_FEATURE_DEBUG
set_debug_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4596 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4597 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4598 {
4599 struct mgmt_rp_set_exp_feature rp;
4600
4601 bool val, changed;
4602 int err;
4603
4604 /* Command requires to use the non-controller index */
4605 if (hdev)
4606 return mgmt_cmd_status(sk, hdev->id,
4607 MGMT_OP_SET_EXP_FEATURE,
4608 MGMT_STATUS_INVALID_INDEX);
4609
4610 /* Parameters are limited to a single octet */
4611 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4612 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4613 MGMT_OP_SET_EXP_FEATURE,
4614 MGMT_STATUS_INVALID_PARAMS);
4615
4616 /* Only boolean on/off is supported */
4617 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4618 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4619 MGMT_OP_SET_EXP_FEATURE,
4620 MGMT_STATUS_INVALID_PARAMS);
4621
4622 val = !!cp->param[0];
4623 changed = val ? !bt_dbg_get() : bt_dbg_get();
4624 bt_dbg_set(val);
4625
4626 memcpy(rp.uuid, debug_uuid, 16);
4627 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4628
4629 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4630
4631 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4632 MGMT_OP_SET_EXP_FEATURE, 0,
4633 &rp, sizeof(rp));
4634
4635 if (changed)
4636 exp_feature_changed(hdev, debug_uuid, val, sk);
4637
4638 return err;
4639 }
4640 #endif
4641
set_mgmt_mesh_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4642 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4643 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4644 {
4645 struct mgmt_rp_set_exp_feature rp;
4646 bool val, changed;
4647 int err;
4648
4649 /* Command requires to use the controller index */
4650 if (!hdev)
4651 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4652 MGMT_OP_SET_EXP_FEATURE,
4653 MGMT_STATUS_INVALID_INDEX);
4654
4655 /* Parameters are limited to a single octet */
4656 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4657 return mgmt_cmd_status(sk, hdev->id,
4658 MGMT_OP_SET_EXP_FEATURE,
4659 MGMT_STATUS_INVALID_PARAMS);
4660
4661 /* Only boolean on/off is supported */
4662 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4663 return mgmt_cmd_status(sk, hdev->id,
4664 MGMT_OP_SET_EXP_FEATURE,
4665 MGMT_STATUS_INVALID_PARAMS);
4666
4667 val = !!cp->param[0];
4668
4669 if (val) {
4670 changed = !hci_dev_test_and_set_flag(hdev,
4671 HCI_MESH_EXPERIMENTAL);
4672 } else {
4673 hci_dev_clear_flag(hdev, HCI_MESH);
4674 changed = hci_dev_test_and_clear_flag(hdev,
4675 HCI_MESH_EXPERIMENTAL);
4676 }
4677
4678 memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4679 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4680
4681 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4682
4683 err = mgmt_cmd_complete(sk, hdev->id,
4684 MGMT_OP_SET_EXP_FEATURE, 0,
4685 &rp, sizeof(rp));
4686
4687 if (changed)
4688 exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4689
4690 return err;
4691 }
4692
set_quality_report_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4693 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4694 struct mgmt_cp_set_exp_feature *cp,
4695 u16 data_len)
4696 {
4697 struct mgmt_rp_set_exp_feature rp;
4698 bool val, changed;
4699 int err;
4700
4701 /* Command requires to use a valid controller index */
4702 if (!hdev)
4703 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4704 MGMT_OP_SET_EXP_FEATURE,
4705 MGMT_STATUS_INVALID_INDEX);
4706
4707 /* Parameters are limited to a single octet */
4708 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4709 return mgmt_cmd_status(sk, hdev->id,
4710 MGMT_OP_SET_EXP_FEATURE,
4711 MGMT_STATUS_INVALID_PARAMS);
4712
4713 /* Only boolean on/off is supported */
4714 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4715 return mgmt_cmd_status(sk, hdev->id,
4716 MGMT_OP_SET_EXP_FEATURE,
4717 MGMT_STATUS_INVALID_PARAMS);
4718
4719 hci_req_sync_lock(hdev);
4720
4721 val = !!cp->param[0];
4722 changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4723
4724 if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4725 err = mgmt_cmd_status(sk, hdev->id,
4726 MGMT_OP_SET_EXP_FEATURE,
4727 MGMT_STATUS_NOT_SUPPORTED);
4728 goto unlock_quality_report;
4729 }
4730
4731 if (changed) {
4732 if (hdev->set_quality_report)
4733 err = hdev->set_quality_report(hdev, val);
4734 else
4735 err = aosp_set_quality_report(hdev, val);
4736
4737 if (err) {
4738 err = mgmt_cmd_status(sk, hdev->id,
4739 MGMT_OP_SET_EXP_FEATURE,
4740 MGMT_STATUS_FAILED);
4741 goto unlock_quality_report;
4742 }
4743
4744 if (val)
4745 hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4746 else
4747 hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4748 }
4749
4750 bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4751
4752 memcpy(rp.uuid, quality_report_uuid, 16);
4753 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4754 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4755
4756 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4757 &rp, sizeof(rp));
4758
4759 if (changed)
4760 exp_feature_changed(hdev, quality_report_uuid, val, sk);
4761
4762 unlock_quality_report:
4763 hci_req_sync_unlock(hdev);
4764 return err;
4765 }
4766
set_offload_codec_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4767 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4768 struct mgmt_cp_set_exp_feature *cp,
4769 u16 data_len)
4770 {
4771 bool val, changed;
4772 int err;
4773 struct mgmt_rp_set_exp_feature rp;
4774
4775 /* Command requires to use a valid controller index */
4776 if (!hdev)
4777 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4778 MGMT_OP_SET_EXP_FEATURE,
4779 MGMT_STATUS_INVALID_INDEX);
4780
4781 /* Parameters are limited to a single octet */
4782 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4783 return mgmt_cmd_status(sk, hdev->id,
4784 MGMT_OP_SET_EXP_FEATURE,
4785 MGMT_STATUS_INVALID_PARAMS);
4786
4787 /* Only boolean on/off is supported */
4788 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4789 return mgmt_cmd_status(sk, hdev->id,
4790 MGMT_OP_SET_EXP_FEATURE,
4791 MGMT_STATUS_INVALID_PARAMS);
4792
4793 val = !!cp->param[0];
4794 changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4795
4796 if (!hdev->get_data_path_id) {
4797 return mgmt_cmd_status(sk, hdev->id,
4798 MGMT_OP_SET_EXP_FEATURE,
4799 MGMT_STATUS_NOT_SUPPORTED);
4800 }
4801
4802 if (changed) {
4803 if (val)
4804 hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4805 else
4806 hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4807 }
4808
4809 bt_dev_info(hdev, "offload codecs enable %d changed %d",
4810 val, changed);
4811
4812 memcpy(rp.uuid, offload_codecs_uuid, 16);
4813 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4814 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4815 err = mgmt_cmd_complete(sk, hdev->id,
4816 MGMT_OP_SET_EXP_FEATURE, 0,
4817 &rp, sizeof(rp));
4818
4819 if (changed)
4820 exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4821
4822 return err;
4823 }
4824
set_le_simultaneous_roles_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4825 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4826 struct mgmt_cp_set_exp_feature *cp,
4827 u16 data_len)
4828 {
4829 bool val, changed;
4830 int err;
4831 struct mgmt_rp_set_exp_feature rp;
4832
4833 /* Command requires to use a valid controller index */
4834 if (!hdev)
4835 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4836 MGMT_OP_SET_EXP_FEATURE,
4837 MGMT_STATUS_INVALID_INDEX);
4838
4839 /* Parameters are limited to a single octet */
4840 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4841 return mgmt_cmd_status(sk, hdev->id,
4842 MGMT_OP_SET_EXP_FEATURE,
4843 MGMT_STATUS_INVALID_PARAMS);
4844
4845 /* Only boolean on/off is supported */
4846 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4847 return mgmt_cmd_status(sk, hdev->id,
4848 MGMT_OP_SET_EXP_FEATURE,
4849 MGMT_STATUS_INVALID_PARAMS);
4850
4851 val = !!cp->param[0];
4852 changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4853
4854 if (!hci_dev_le_state_simultaneous(hdev)) {
4855 return mgmt_cmd_status(sk, hdev->id,
4856 MGMT_OP_SET_EXP_FEATURE,
4857 MGMT_STATUS_NOT_SUPPORTED);
4858 }
4859
4860 if (changed) {
4861 if (val)
4862 hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4863 else
4864 hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4865 }
4866
4867 bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4868 val, changed);
4869
4870 memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4871 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4872 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4873 err = mgmt_cmd_complete(sk, hdev->id,
4874 MGMT_OP_SET_EXP_FEATURE, 0,
4875 &rp, sizeof(rp));
4876
4877 if (changed)
4878 exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4879
4880 return err;
4881 }
4882
4883 #ifdef CONFIG_BT_LE
set_iso_socket_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4884 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4885 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4886 {
4887 struct mgmt_rp_set_exp_feature rp;
4888 bool val, changed = false;
4889 int err;
4890
4891 /* Command requires to use the non-controller index */
4892 if (hdev)
4893 return mgmt_cmd_status(sk, hdev->id,
4894 MGMT_OP_SET_EXP_FEATURE,
4895 MGMT_STATUS_INVALID_INDEX);
4896
4897 /* Parameters are limited to a single octet */
4898 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4899 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4900 MGMT_OP_SET_EXP_FEATURE,
4901 MGMT_STATUS_INVALID_PARAMS);
4902
4903 /* Only boolean on/off is supported */
4904 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4905 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4906 MGMT_OP_SET_EXP_FEATURE,
4907 MGMT_STATUS_INVALID_PARAMS);
4908
4909 val = cp->param[0] ? true : false;
4910 if (val)
4911 err = iso_init();
4912 else
4913 err = iso_exit();
4914
4915 if (!err)
4916 changed = true;
4917
4918 memcpy(rp.uuid, iso_socket_uuid, 16);
4919 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4920
4921 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4922
4923 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4924 MGMT_OP_SET_EXP_FEATURE, 0,
4925 &rp, sizeof(rp));
4926
4927 if (changed)
4928 exp_feature_changed(hdev, iso_socket_uuid, val, sk);
4929
4930 return err;
4931 }
4932 #endif
4933
4934 static const struct mgmt_exp_feature {
4935 const u8 *uuid;
4936 int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4937 struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4938 } exp_features[] = {
4939 EXP_FEAT(ZERO_KEY, set_zero_key_func),
4940 #ifdef CONFIG_BT_FEATURE_DEBUG
4941 EXP_FEAT(debug_uuid, set_debug_func),
4942 #endif
4943 EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
4944 EXP_FEAT(quality_report_uuid, set_quality_report_func),
4945 EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
4946 EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
4947 #ifdef CONFIG_BT_LE
4948 EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
4949 #endif
4950
4951 /* end with a null feature */
4952 EXP_FEAT(NULL, NULL)
4953 };
4954
set_exp_feature(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4955 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4956 void *data, u16 data_len)
4957 {
4958 struct mgmt_cp_set_exp_feature *cp = data;
4959 size_t i = 0;
4960
4961 bt_dev_dbg(hdev, "sock %p", sk);
4962
4963 for (i = 0; exp_features[i].uuid; i++) {
4964 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
4965 return exp_features[i].set_func(sk, hdev, cp, data_len);
4966 }
4967
4968 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4969 MGMT_OP_SET_EXP_FEATURE,
4970 MGMT_STATUS_NOT_SUPPORTED);
4971 }
4972
get_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4973 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4974 u16 data_len)
4975 {
4976 struct mgmt_cp_get_device_flags *cp = data;
4977 struct mgmt_rp_get_device_flags rp;
4978 struct bdaddr_list_with_flags *br_params;
4979 struct hci_conn_params *params;
4980 u32 supported_flags;
4981 u32 current_flags = 0;
4982 u8 status = MGMT_STATUS_INVALID_PARAMS;
4983
4984 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4985 &cp->addr.bdaddr, cp->addr.type);
4986
4987 hci_dev_lock(hdev);
4988
4989 supported_flags = hdev->conn_flags;
4990
4991 memset(&rp, 0, sizeof(rp));
4992
4993 if (cp->addr.type == BDADDR_BREDR) {
4994 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4995 &cp->addr.bdaddr,
4996 cp->addr.type);
4997 if (!br_params)
4998 goto done;
4999
5000 current_flags = br_params->flags;
5001 } else {
5002 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5003 le_addr_type(cp->addr.type));
5004 if (!params)
5005 goto done;
5006
5007 current_flags = params->flags;
5008 }
5009
5010 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5011 rp.addr.type = cp->addr.type;
5012 rp.supported_flags = cpu_to_le32(supported_flags);
5013 rp.current_flags = cpu_to_le32(current_flags);
5014
5015 status = MGMT_STATUS_SUCCESS;
5016
5017 done:
5018 hci_dev_unlock(hdev);
5019
5020 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5021 &rp, sizeof(rp));
5022 }
5023
device_flags_changed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u32 supported_flags,u32 current_flags)5024 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5025 bdaddr_t *bdaddr, u8 bdaddr_type,
5026 u32 supported_flags, u32 current_flags)
5027 {
5028 struct mgmt_ev_device_flags_changed ev;
5029
5030 bacpy(&ev.addr.bdaddr, bdaddr);
5031 ev.addr.type = bdaddr_type;
5032 ev.supported_flags = cpu_to_le32(supported_flags);
5033 ev.current_flags = cpu_to_le32(current_flags);
5034
5035 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5036 }
5037
set_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5038 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5039 u16 len)
5040 {
5041 struct mgmt_cp_set_device_flags *cp = data;
5042 struct bdaddr_list_with_flags *br_params;
5043 struct hci_conn_params *params;
5044 u8 status = MGMT_STATUS_INVALID_PARAMS;
5045 u32 supported_flags;
5046 u32 current_flags = __le32_to_cpu(cp->current_flags);
5047
5048 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5049 &cp->addr.bdaddr, cp->addr.type, current_flags);
5050
5051 // We should take hci_dev_lock() early, I think.. conn_flags can change
5052 supported_flags = hdev->conn_flags;
5053
5054 if ((supported_flags | current_flags) != supported_flags) {
5055 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5056 current_flags, supported_flags);
5057 goto done;
5058 }
5059
5060 hci_dev_lock(hdev);
5061
5062 if (cp->addr.type == BDADDR_BREDR) {
5063 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5064 &cp->addr.bdaddr,
5065 cp->addr.type);
5066
5067 if (br_params) {
5068 br_params->flags = current_flags;
5069 status = MGMT_STATUS_SUCCESS;
5070 } else {
5071 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5072 &cp->addr.bdaddr, cp->addr.type);
5073 }
5074
5075 goto unlock;
5076 }
5077
5078 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5079 le_addr_type(cp->addr.type));
5080 if (!params) {
5081 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5082 &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5083 goto unlock;
5084 }
5085
5086 supported_flags = hdev->conn_flags;
5087
5088 if ((supported_flags | current_flags) != supported_flags) {
5089 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5090 current_flags, supported_flags);
5091 goto unlock;
5092 }
5093
5094 WRITE_ONCE(params->flags, current_flags);
5095 status = MGMT_STATUS_SUCCESS;
5096
5097 /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5098 * has been set.
5099 */
5100 if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5101 hci_update_passive_scan(hdev);
5102
5103 unlock:
5104 hci_dev_unlock(hdev);
5105
5106 done:
5107 if (status == MGMT_STATUS_SUCCESS)
5108 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5109 supported_flags, current_flags);
5110
5111 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5112 &cp->addr, sizeof(cp->addr));
5113 }
5114
mgmt_adv_monitor_added(struct sock * sk,struct hci_dev * hdev,u16 handle)5115 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5116 u16 handle)
5117 {
5118 struct mgmt_ev_adv_monitor_added ev;
5119
5120 ev.monitor_handle = cpu_to_le16(handle);
5121
5122 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5123 }
5124
mgmt_adv_monitor_removed(struct sock * sk,struct hci_dev * hdev,__le16 handle)5125 static void mgmt_adv_monitor_removed(struct sock *sk, struct hci_dev *hdev,
5126 __le16 handle)
5127 {
5128 struct mgmt_ev_adv_monitor_removed ev;
5129
5130 ev.monitor_handle = handle;
5131
5132 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk);
5133 }
5134
read_adv_mon_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5135 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5136 void *data, u16 len)
5137 {
5138 struct adv_monitor *monitor = NULL;
5139 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5140 int handle, err;
5141 size_t rp_size = 0;
5142 __u32 supported = 0;
5143 __u32 enabled = 0;
5144 __u16 num_handles = 0;
5145 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5146
5147 BT_DBG("request for %s", hdev->name);
5148
5149 hci_dev_lock(hdev);
5150
5151 if (msft_monitor_supported(hdev))
5152 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5153
5154 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5155 handles[num_handles++] = monitor->handle;
5156
5157 hci_dev_unlock(hdev);
5158
5159 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5160 rp = kmalloc(rp_size, GFP_KERNEL);
5161 if (!rp)
5162 return -ENOMEM;
5163
5164 /* All supported features are currently enabled */
5165 enabled = supported;
5166
5167 rp->supported_features = cpu_to_le32(supported);
5168 rp->enabled_features = cpu_to_le32(enabled);
5169 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5170 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5171 rp->num_handles = cpu_to_le16(num_handles);
5172 if (num_handles)
5173 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5174
5175 err = mgmt_cmd_complete(sk, hdev->id,
5176 MGMT_OP_READ_ADV_MONITOR_FEATURES,
5177 MGMT_STATUS_SUCCESS, rp, rp_size);
5178
5179 kfree(rp);
5180
5181 return err;
5182 }
5183
mgmt_add_adv_patterns_monitor_complete(struct hci_dev * hdev,void * data,int status)5184 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5185 void *data, int status)
5186 {
5187 struct mgmt_rp_add_adv_patterns_monitor rp;
5188 struct mgmt_pending_cmd *cmd = data;
5189 struct adv_monitor *monitor = cmd->user_data;
5190
5191 hci_dev_lock(hdev);
5192
5193 rp.monitor_handle = cpu_to_le16(monitor->handle);
5194
5195 if (!status) {
5196 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5197 hdev->adv_monitors_cnt++;
5198 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5199 monitor->state = ADV_MONITOR_STATE_REGISTERED;
5200 hci_update_passive_scan(hdev);
5201 }
5202
5203 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
5204 mgmt_status(status), &rp, sizeof(rp));
5205 mgmt_pending_remove(cmd);
5206
5207 hci_dev_unlock(hdev);
5208 bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5209 rp.monitor_handle, status);
5210 }
5211
mgmt_add_adv_patterns_monitor_sync(struct hci_dev * hdev,void * data)5212 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5213 {
5214 struct mgmt_pending_cmd *cmd = data;
5215 struct adv_monitor *monitor = cmd->user_data;
5216
5217 return hci_add_adv_monitor(hdev, monitor);
5218 }
5219
__add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,struct adv_monitor * m,u8 status,void * data,u16 len,u16 op)5220 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5221 struct adv_monitor *m, u8 status,
5222 void *data, u16 len, u16 op)
5223 {
5224 struct mgmt_pending_cmd *cmd;
5225 int err;
5226
5227 hci_dev_lock(hdev);
5228
5229 if (status)
5230 goto unlock;
5231
5232 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5233 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5234 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5235 status = MGMT_STATUS_BUSY;
5236 goto unlock;
5237 }
5238
5239 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5240 if (!cmd) {
5241 status = MGMT_STATUS_NO_RESOURCES;
5242 goto unlock;
5243 }
5244
5245 cmd->user_data = m;
5246 err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5247 mgmt_add_adv_patterns_monitor_complete);
5248 if (err) {
5249 if (err == -ENOMEM)
5250 status = MGMT_STATUS_NO_RESOURCES;
5251 else
5252 status = MGMT_STATUS_FAILED;
5253
5254 goto unlock;
5255 }
5256
5257 hci_dev_unlock(hdev);
5258
5259 return 0;
5260
5261 unlock:
5262 hci_free_adv_monitor(hdev, m);
5263 hci_dev_unlock(hdev);
5264 return mgmt_cmd_status(sk, hdev->id, op, status);
5265 }
5266
parse_adv_monitor_rssi(struct adv_monitor * m,struct mgmt_adv_rssi_thresholds * rssi)5267 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5268 struct mgmt_adv_rssi_thresholds *rssi)
5269 {
5270 if (rssi) {
5271 m->rssi.low_threshold = rssi->low_threshold;
5272 m->rssi.low_threshold_timeout =
5273 __le16_to_cpu(rssi->low_threshold_timeout);
5274 m->rssi.high_threshold = rssi->high_threshold;
5275 m->rssi.high_threshold_timeout =
5276 __le16_to_cpu(rssi->high_threshold_timeout);
5277 m->rssi.sampling_period = rssi->sampling_period;
5278 } else {
5279 /* Default values. These numbers are the least constricting
5280 * parameters for MSFT API to work, so it behaves as if there
5281 * are no rssi parameter to consider. May need to be changed
5282 * if other API are to be supported.
5283 */
5284 m->rssi.low_threshold = -127;
5285 m->rssi.low_threshold_timeout = 60;
5286 m->rssi.high_threshold = -127;
5287 m->rssi.high_threshold_timeout = 0;
5288 m->rssi.sampling_period = 0;
5289 }
5290 }
5291
parse_adv_monitor_pattern(struct adv_monitor * m,u8 pattern_count,struct mgmt_adv_pattern * patterns)5292 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5293 struct mgmt_adv_pattern *patterns)
5294 {
5295 u8 offset = 0, length = 0;
5296 struct adv_pattern *p = NULL;
5297 int i;
5298
5299 for (i = 0; i < pattern_count; i++) {
5300 offset = patterns[i].offset;
5301 length = patterns[i].length;
5302 if (offset >= HCI_MAX_EXT_AD_LENGTH ||
5303 length > HCI_MAX_EXT_AD_LENGTH ||
5304 (offset + length) > HCI_MAX_EXT_AD_LENGTH)
5305 return MGMT_STATUS_INVALID_PARAMS;
5306
5307 p = kmalloc(sizeof(*p), GFP_KERNEL);
5308 if (!p)
5309 return MGMT_STATUS_NO_RESOURCES;
5310
5311 p->ad_type = patterns[i].ad_type;
5312 p->offset = patterns[i].offset;
5313 p->length = patterns[i].length;
5314 memcpy(p->value, patterns[i].value, p->length);
5315
5316 INIT_LIST_HEAD(&p->list);
5317 list_add(&p->list, &m->patterns);
5318 }
5319
5320 return MGMT_STATUS_SUCCESS;
5321 }
5322
add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5323 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5324 void *data, u16 len)
5325 {
5326 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5327 struct adv_monitor *m = NULL;
5328 u8 status = MGMT_STATUS_SUCCESS;
5329 size_t expected_size = sizeof(*cp);
5330
5331 BT_DBG("request for %s", hdev->name);
5332
5333 if (len <= sizeof(*cp)) {
5334 status = MGMT_STATUS_INVALID_PARAMS;
5335 goto done;
5336 }
5337
5338 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5339 if (len != expected_size) {
5340 status = MGMT_STATUS_INVALID_PARAMS;
5341 goto done;
5342 }
5343
5344 m = kzalloc(sizeof(*m), GFP_KERNEL);
5345 if (!m) {
5346 status = MGMT_STATUS_NO_RESOURCES;
5347 goto done;
5348 }
5349
5350 INIT_LIST_HEAD(&m->patterns);
5351
5352 parse_adv_monitor_rssi(m, NULL);
5353 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5354
5355 done:
5356 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5357 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5358 }
5359
add_adv_patterns_monitor_rssi(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5360 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5361 void *data, u16 len)
5362 {
5363 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5364 struct adv_monitor *m = NULL;
5365 u8 status = MGMT_STATUS_SUCCESS;
5366 size_t expected_size = sizeof(*cp);
5367
5368 BT_DBG("request for %s", hdev->name);
5369
5370 if (len <= sizeof(*cp)) {
5371 status = MGMT_STATUS_INVALID_PARAMS;
5372 goto done;
5373 }
5374
5375 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5376 if (len != expected_size) {
5377 status = MGMT_STATUS_INVALID_PARAMS;
5378 goto done;
5379 }
5380
5381 m = kzalloc(sizeof(*m), GFP_KERNEL);
5382 if (!m) {
5383 status = MGMT_STATUS_NO_RESOURCES;
5384 goto done;
5385 }
5386
5387 INIT_LIST_HEAD(&m->patterns);
5388
5389 parse_adv_monitor_rssi(m, &cp->rssi);
5390 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5391
5392 done:
5393 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5394 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5395 }
5396
mgmt_remove_adv_monitor_complete(struct hci_dev * hdev,void * data,int status)5397 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5398 void *data, int status)
5399 {
5400 struct mgmt_rp_remove_adv_monitor rp;
5401 struct mgmt_pending_cmd *cmd = data;
5402 struct mgmt_cp_remove_adv_monitor *cp;
5403
5404 if (status == -ECANCELED)
5405 return;
5406
5407 hci_dev_lock(hdev);
5408
5409 cp = cmd->param;
5410
5411 rp.monitor_handle = cp->monitor_handle;
5412
5413 if (!status) {
5414 mgmt_adv_monitor_removed(cmd->sk, hdev, cp->monitor_handle);
5415 hci_update_passive_scan(hdev);
5416 }
5417
5418 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
5419 mgmt_status(status), &rp, sizeof(rp));
5420 mgmt_pending_free(cmd);
5421
5422 hci_dev_unlock(hdev);
5423 bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5424 rp.monitor_handle, status);
5425 }
5426
mgmt_remove_adv_monitor_sync(struct hci_dev * hdev,void * data)5427 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5428 {
5429 struct mgmt_pending_cmd *cmd = data;
5430 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5431 u16 handle = __le16_to_cpu(cp->monitor_handle);
5432
5433 if (!handle)
5434 return hci_remove_all_adv_monitor(hdev);
5435
5436 return hci_remove_single_adv_monitor(hdev, handle);
5437 }
5438
remove_adv_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5439 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5440 void *data, u16 len)
5441 {
5442 struct mgmt_pending_cmd *cmd;
5443 int err, status;
5444
5445 hci_dev_lock(hdev);
5446
5447 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5448 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5449 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5450 status = MGMT_STATUS_BUSY;
5451 goto unlock;
5452 }
5453
5454 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5455 if (!cmd) {
5456 status = MGMT_STATUS_NO_RESOURCES;
5457 goto unlock;
5458 }
5459
5460 err = hci_cmd_sync_submit(hdev, mgmt_remove_adv_monitor_sync, cmd,
5461 mgmt_remove_adv_monitor_complete);
5462
5463 if (err) {
5464 mgmt_pending_free(cmd);
5465
5466 if (err == -ENOMEM)
5467 status = MGMT_STATUS_NO_RESOURCES;
5468 else
5469 status = MGMT_STATUS_FAILED;
5470
5471 goto unlock;
5472 }
5473
5474 hci_dev_unlock(hdev);
5475
5476 return 0;
5477
5478 unlock:
5479 hci_dev_unlock(hdev);
5480 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5481 status);
5482 }
5483
read_local_oob_data_complete(struct hci_dev * hdev,void * data,int err)5484 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5485 {
5486 struct mgmt_rp_read_local_oob_data mgmt_rp;
5487 size_t rp_size = sizeof(mgmt_rp);
5488 struct mgmt_pending_cmd *cmd = data;
5489 struct sk_buff *skb = cmd->skb;
5490 u8 status = mgmt_status(err);
5491
5492 if (!status) {
5493 if (!skb)
5494 status = MGMT_STATUS_FAILED;
5495 else if (IS_ERR(skb))
5496 status = mgmt_status(PTR_ERR(skb));
5497 else
5498 status = mgmt_status(skb->data[0]);
5499 }
5500
5501 bt_dev_dbg(hdev, "status %d", status);
5502
5503 if (status) {
5504 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5505 goto remove;
5506 }
5507
5508 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5509
5510 if (!bredr_sc_enabled(hdev)) {
5511 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5512
5513 if (skb->len < sizeof(*rp)) {
5514 mgmt_cmd_status(cmd->sk, hdev->id,
5515 MGMT_OP_READ_LOCAL_OOB_DATA,
5516 MGMT_STATUS_FAILED);
5517 goto remove;
5518 }
5519
5520 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5521 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5522
5523 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5524 } else {
5525 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5526
5527 if (skb->len < sizeof(*rp)) {
5528 mgmt_cmd_status(cmd->sk, hdev->id,
5529 MGMT_OP_READ_LOCAL_OOB_DATA,
5530 MGMT_STATUS_FAILED);
5531 goto remove;
5532 }
5533
5534 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5535 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5536
5537 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5538 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5539 }
5540
5541 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5542 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5543
5544 remove:
5545 if (skb && !IS_ERR(skb))
5546 kfree_skb(skb);
5547
5548 mgmt_pending_free(cmd);
5549 }
5550
read_local_oob_data_sync(struct hci_dev * hdev,void * data)5551 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5552 {
5553 struct mgmt_pending_cmd *cmd = data;
5554
5555 if (bredr_sc_enabled(hdev))
5556 cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5557 else
5558 cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5559
5560 if (IS_ERR(cmd->skb))
5561 return PTR_ERR(cmd->skb);
5562 else
5563 return 0;
5564 }
5565
read_local_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)5566 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5567 void *data, u16 data_len)
5568 {
5569 struct mgmt_pending_cmd *cmd;
5570 int err;
5571
5572 bt_dev_dbg(hdev, "sock %p", sk);
5573
5574 hci_dev_lock(hdev);
5575
5576 if (!hdev_is_powered(hdev)) {
5577 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5578 MGMT_STATUS_NOT_POWERED);
5579 goto unlock;
5580 }
5581
5582 if (!lmp_ssp_capable(hdev)) {
5583 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5584 MGMT_STATUS_NOT_SUPPORTED);
5585 goto unlock;
5586 }
5587
5588 cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5589 if (!cmd)
5590 err = -ENOMEM;
5591 else
5592 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5593 read_local_oob_data_complete);
5594
5595 if (err < 0) {
5596 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5597 MGMT_STATUS_FAILED);
5598
5599 if (cmd)
5600 mgmt_pending_free(cmd);
5601 }
5602
5603 unlock:
5604 hci_dev_unlock(hdev);
5605 return err;
5606 }
5607
add_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5608 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5609 void *data, u16 len)
5610 {
5611 struct mgmt_addr_info *addr = data;
5612 int err;
5613
5614 bt_dev_dbg(hdev, "sock %p", sk);
5615
5616 if (!bdaddr_type_is_valid(addr->type))
5617 return mgmt_cmd_complete(sk, hdev->id,
5618 MGMT_OP_ADD_REMOTE_OOB_DATA,
5619 MGMT_STATUS_INVALID_PARAMS,
5620 addr, sizeof(*addr));
5621
5622 hci_dev_lock(hdev);
5623
5624 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5625 struct mgmt_cp_add_remote_oob_data *cp = data;
5626 u8 status;
5627
5628 if (cp->addr.type != BDADDR_BREDR) {
5629 err = mgmt_cmd_complete(sk, hdev->id,
5630 MGMT_OP_ADD_REMOTE_OOB_DATA,
5631 MGMT_STATUS_INVALID_PARAMS,
5632 &cp->addr, sizeof(cp->addr));
5633 goto unlock;
5634 }
5635
5636 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5637 cp->addr.type, cp->hash,
5638 cp->rand, NULL, NULL);
5639 if (err < 0)
5640 status = MGMT_STATUS_FAILED;
5641 else
5642 status = MGMT_STATUS_SUCCESS;
5643
5644 err = mgmt_cmd_complete(sk, hdev->id,
5645 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5646 &cp->addr, sizeof(cp->addr));
5647 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5648 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5649 u8 *rand192, *hash192, *rand256, *hash256;
5650 u8 status;
5651
5652 if (bdaddr_type_is_le(cp->addr.type)) {
5653 /* Enforce zero-valued 192-bit parameters as
5654 * long as legacy SMP OOB isn't implemented.
5655 */
5656 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5657 memcmp(cp->hash192, ZERO_KEY, 16)) {
5658 err = mgmt_cmd_complete(sk, hdev->id,
5659 MGMT_OP_ADD_REMOTE_OOB_DATA,
5660 MGMT_STATUS_INVALID_PARAMS,
5661 addr, sizeof(*addr));
5662 goto unlock;
5663 }
5664
5665 rand192 = NULL;
5666 hash192 = NULL;
5667 } else {
5668 /* In case one of the P-192 values is set to zero,
5669 * then just disable OOB data for P-192.
5670 */
5671 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5672 !memcmp(cp->hash192, ZERO_KEY, 16)) {
5673 rand192 = NULL;
5674 hash192 = NULL;
5675 } else {
5676 rand192 = cp->rand192;
5677 hash192 = cp->hash192;
5678 }
5679 }
5680
5681 /* In case one of the P-256 values is set to zero, then just
5682 * disable OOB data for P-256.
5683 */
5684 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5685 !memcmp(cp->hash256, ZERO_KEY, 16)) {
5686 rand256 = NULL;
5687 hash256 = NULL;
5688 } else {
5689 rand256 = cp->rand256;
5690 hash256 = cp->hash256;
5691 }
5692
5693 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5694 cp->addr.type, hash192, rand192,
5695 hash256, rand256);
5696 if (err < 0)
5697 status = MGMT_STATUS_FAILED;
5698 else
5699 status = MGMT_STATUS_SUCCESS;
5700
5701 err = mgmt_cmd_complete(sk, hdev->id,
5702 MGMT_OP_ADD_REMOTE_OOB_DATA,
5703 status, &cp->addr, sizeof(cp->addr));
5704 } else {
5705 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5706 len);
5707 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5708 MGMT_STATUS_INVALID_PARAMS);
5709 }
5710
5711 unlock:
5712 hci_dev_unlock(hdev);
5713 return err;
5714 }
5715
remove_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5716 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5717 void *data, u16 len)
5718 {
5719 struct mgmt_cp_remove_remote_oob_data *cp = data;
5720 u8 status;
5721 int err;
5722
5723 bt_dev_dbg(hdev, "sock %p", sk);
5724
5725 if (cp->addr.type != BDADDR_BREDR)
5726 return mgmt_cmd_complete(sk, hdev->id,
5727 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5728 MGMT_STATUS_INVALID_PARAMS,
5729 &cp->addr, sizeof(cp->addr));
5730
5731 hci_dev_lock(hdev);
5732
5733 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5734 hci_remote_oob_data_clear(hdev);
5735 status = MGMT_STATUS_SUCCESS;
5736 goto done;
5737 }
5738
5739 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5740 if (err < 0)
5741 status = MGMT_STATUS_INVALID_PARAMS;
5742 else
5743 status = MGMT_STATUS_SUCCESS;
5744
5745 done:
5746 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5747 status, &cp->addr, sizeof(cp->addr));
5748
5749 hci_dev_unlock(hdev);
5750 return err;
5751 }
5752
discovery_type_is_valid(struct hci_dev * hdev,uint8_t type,uint8_t * mgmt_status)5753 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5754 uint8_t *mgmt_status)
5755 {
5756 switch (type) {
5757 case DISCOV_TYPE_LE:
5758 *mgmt_status = mgmt_le_support(hdev);
5759 if (*mgmt_status)
5760 return false;
5761 break;
5762 case DISCOV_TYPE_INTERLEAVED:
5763 *mgmt_status = mgmt_le_support(hdev);
5764 if (*mgmt_status)
5765 return false;
5766 fallthrough;
5767 case DISCOV_TYPE_BREDR:
5768 *mgmt_status = mgmt_bredr_support(hdev);
5769 if (*mgmt_status)
5770 return false;
5771 break;
5772 default:
5773 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5774 return false;
5775 }
5776
5777 return true;
5778 }
5779
start_discovery_complete(struct hci_dev * hdev,void * data,int err)5780 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5781 {
5782 struct mgmt_pending_cmd *cmd = data;
5783
5784 bt_dev_dbg(hdev, "err %d", err);
5785
5786 if (err == -ECANCELED)
5787 return;
5788
5789 if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5790 cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5791 cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5792 return;
5793
5794 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err),
5795 cmd->param, 1);
5796 mgmt_pending_remove(cmd);
5797
5798 hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5799 DISCOVERY_FINDING);
5800 }
5801
start_discovery_sync(struct hci_dev * hdev,void * data)5802 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5803 {
5804 return hci_start_discovery_sync(hdev);
5805 }
5806
start_discovery_internal(struct sock * sk,struct hci_dev * hdev,u16 op,void * data,u16 len)5807 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5808 u16 op, void *data, u16 len)
5809 {
5810 struct mgmt_cp_start_discovery *cp = data;
5811 struct mgmt_pending_cmd *cmd;
5812 u8 status;
5813 int err;
5814
5815 bt_dev_dbg(hdev, "sock %p", sk);
5816
5817 hci_dev_lock(hdev);
5818
5819 if (!hdev_is_powered(hdev)) {
5820 err = mgmt_cmd_complete(sk, hdev->id, op,
5821 MGMT_STATUS_NOT_POWERED,
5822 &cp->type, sizeof(cp->type));
5823 goto failed;
5824 }
5825
5826 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5827 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5828 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5829 &cp->type, sizeof(cp->type));
5830 goto failed;
5831 }
5832
5833 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5834 err = mgmt_cmd_complete(sk, hdev->id, op, status,
5835 &cp->type, sizeof(cp->type));
5836 goto failed;
5837 }
5838
5839 /* Can't start discovery when it is paused */
5840 if (hdev->discovery_paused) {
5841 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5842 &cp->type, sizeof(cp->type));
5843 goto failed;
5844 }
5845
5846 /* Clear the discovery filter first to free any previously
5847 * allocated memory for the UUID list.
5848 */
5849 hci_discovery_filter_clear(hdev);
5850
5851 hdev->discovery.type = cp->type;
5852 hdev->discovery.report_invalid_rssi = false;
5853 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5854 hdev->discovery.limited = true;
5855 else
5856 hdev->discovery.limited = false;
5857
5858 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5859 if (!cmd) {
5860 err = -ENOMEM;
5861 goto failed;
5862 }
5863
5864 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5865 start_discovery_complete);
5866 if (err < 0) {
5867 mgmt_pending_remove(cmd);
5868 goto failed;
5869 }
5870
5871 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5872
5873 failed:
5874 hci_dev_unlock(hdev);
5875 return err;
5876 }
5877
start_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5878 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5879 void *data, u16 len)
5880 {
5881 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5882 data, len);
5883 }
5884
start_limited_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5885 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5886 void *data, u16 len)
5887 {
5888 return start_discovery_internal(sk, hdev,
5889 MGMT_OP_START_LIMITED_DISCOVERY,
5890 data, len);
5891 }
5892
start_service_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5893 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5894 void *data, u16 len)
5895 {
5896 struct mgmt_cp_start_service_discovery *cp = data;
5897 struct mgmt_pending_cmd *cmd;
5898 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5899 u16 uuid_count, expected_len;
5900 u8 status;
5901 int err;
5902
5903 bt_dev_dbg(hdev, "sock %p", sk);
5904
5905 hci_dev_lock(hdev);
5906
5907 if (!hdev_is_powered(hdev)) {
5908 err = mgmt_cmd_complete(sk, hdev->id,
5909 MGMT_OP_START_SERVICE_DISCOVERY,
5910 MGMT_STATUS_NOT_POWERED,
5911 &cp->type, sizeof(cp->type));
5912 goto failed;
5913 }
5914
5915 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5916 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5917 err = mgmt_cmd_complete(sk, hdev->id,
5918 MGMT_OP_START_SERVICE_DISCOVERY,
5919 MGMT_STATUS_BUSY, &cp->type,
5920 sizeof(cp->type));
5921 goto failed;
5922 }
5923
5924 if (hdev->discovery_paused) {
5925 err = mgmt_cmd_complete(sk, hdev->id,
5926 MGMT_OP_START_SERVICE_DISCOVERY,
5927 MGMT_STATUS_BUSY, &cp->type,
5928 sizeof(cp->type));
5929 goto failed;
5930 }
5931
5932 uuid_count = __le16_to_cpu(cp->uuid_count);
5933 if (uuid_count > max_uuid_count) {
5934 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5935 uuid_count);
5936 err = mgmt_cmd_complete(sk, hdev->id,
5937 MGMT_OP_START_SERVICE_DISCOVERY,
5938 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5939 sizeof(cp->type));
5940 goto failed;
5941 }
5942
5943 expected_len = sizeof(*cp) + uuid_count * 16;
5944 if (expected_len != len) {
5945 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
5946 expected_len, len);
5947 err = mgmt_cmd_complete(sk, hdev->id,
5948 MGMT_OP_START_SERVICE_DISCOVERY,
5949 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5950 sizeof(cp->type));
5951 goto failed;
5952 }
5953
5954 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5955 err = mgmt_cmd_complete(sk, hdev->id,
5956 MGMT_OP_START_SERVICE_DISCOVERY,
5957 status, &cp->type, sizeof(cp->type));
5958 goto failed;
5959 }
5960
5961 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
5962 hdev, data, len);
5963 if (!cmd) {
5964 err = -ENOMEM;
5965 goto failed;
5966 }
5967
5968 /* Clear the discovery filter first to free any previously
5969 * allocated memory for the UUID list.
5970 */
5971 hci_discovery_filter_clear(hdev);
5972
5973 hdev->discovery.result_filtering = true;
5974 hdev->discovery.type = cp->type;
5975 hdev->discovery.rssi = cp->rssi;
5976 hdev->discovery.uuid_count = uuid_count;
5977
5978 if (uuid_count > 0) {
5979 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
5980 GFP_KERNEL);
5981 if (!hdev->discovery.uuids) {
5982 err = mgmt_cmd_complete(sk, hdev->id,
5983 MGMT_OP_START_SERVICE_DISCOVERY,
5984 MGMT_STATUS_FAILED,
5985 &cp->type, sizeof(cp->type));
5986 mgmt_pending_remove(cmd);
5987 goto failed;
5988 }
5989 }
5990
5991 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5992 start_discovery_complete);
5993 if (err < 0) {
5994 mgmt_pending_remove(cmd);
5995 goto failed;
5996 }
5997
5998 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5999
6000 failed:
6001 hci_dev_unlock(hdev);
6002 return err;
6003 }
6004
stop_discovery_complete(struct hci_dev * hdev,void * data,int err)6005 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6006 {
6007 struct mgmt_pending_cmd *cmd = data;
6008
6009 if (err == -ECANCELED ||
6010 cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
6011 return;
6012
6013 bt_dev_dbg(hdev, "err %d", err);
6014
6015 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err),
6016 cmd->param, 1);
6017 mgmt_pending_remove(cmd);
6018
6019 if (!err)
6020 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6021 }
6022
stop_discovery_sync(struct hci_dev * hdev,void * data)6023 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6024 {
6025 return hci_stop_discovery_sync(hdev);
6026 }
6027
stop_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6028 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6029 u16 len)
6030 {
6031 struct mgmt_cp_stop_discovery *mgmt_cp = data;
6032 struct mgmt_pending_cmd *cmd;
6033 int err;
6034
6035 bt_dev_dbg(hdev, "sock %p", sk);
6036
6037 hci_dev_lock(hdev);
6038
6039 if (!hci_discovery_active(hdev)) {
6040 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6041 MGMT_STATUS_REJECTED, &mgmt_cp->type,
6042 sizeof(mgmt_cp->type));
6043 goto unlock;
6044 }
6045
6046 if (hdev->discovery.type != mgmt_cp->type) {
6047 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6048 MGMT_STATUS_INVALID_PARAMS,
6049 &mgmt_cp->type, sizeof(mgmt_cp->type));
6050 goto unlock;
6051 }
6052
6053 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6054 if (!cmd) {
6055 err = -ENOMEM;
6056 goto unlock;
6057 }
6058
6059 err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6060 stop_discovery_complete);
6061 if (err < 0) {
6062 mgmt_pending_remove(cmd);
6063 goto unlock;
6064 }
6065
6066 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6067
6068 unlock:
6069 hci_dev_unlock(hdev);
6070 return err;
6071 }
6072
confirm_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6073 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6074 u16 len)
6075 {
6076 struct mgmt_cp_confirm_name *cp = data;
6077 struct inquiry_entry *e;
6078 int err;
6079
6080 bt_dev_dbg(hdev, "sock %p", sk);
6081
6082 hci_dev_lock(hdev);
6083
6084 if (!hci_discovery_active(hdev)) {
6085 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6086 MGMT_STATUS_FAILED, &cp->addr,
6087 sizeof(cp->addr));
6088 goto failed;
6089 }
6090
6091 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6092 if (!e) {
6093 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6094 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6095 sizeof(cp->addr));
6096 goto failed;
6097 }
6098
6099 if (cp->name_known) {
6100 e->name_state = NAME_KNOWN;
6101 list_del(&e->list);
6102 } else {
6103 e->name_state = NAME_NEEDED;
6104 hci_inquiry_cache_update_resolve(hdev, e);
6105 }
6106
6107 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6108 &cp->addr, sizeof(cp->addr));
6109
6110 failed:
6111 hci_dev_unlock(hdev);
6112 return err;
6113 }
6114
block_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6115 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6116 u16 len)
6117 {
6118 struct mgmt_cp_block_device *cp = data;
6119 u8 status;
6120 int err;
6121
6122 bt_dev_dbg(hdev, "sock %p", sk);
6123
6124 if (!bdaddr_type_is_valid(cp->addr.type))
6125 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6126 MGMT_STATUS_INVALID_PARAMS,
6127 &cp->addr, sizeof(cp->addr));
6128
6129 hci_dev_lock(hdev);
6130
6131 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6132 cp->addr.type);
6133 if (err < 0) {
6134 status = MGMT_STATUS_FAILED;
6135 goto done;
6136 }
6137
6138 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6139 sk);
6140 status = MGMT_STATUS_SUCCESS;
6141
6142 done:
6143 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6144 &cp->addr, sizeof(cp->addr));
6145
6146 hci_dev_unlock(hdev);
6147
6148 return err;
6149 }
6150
unblock_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6151 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6152 u16 len)
6153 {
6154 struct mgmt_cp_unblock_device *cp = data;
6155 u8 status;
6156 int err;
6157
6158 bt_dev_dbg(hdev, "sock %p", sk);
6159
6160 if (!bdaddr_type_is_valid(cp->addr.type))
6161 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6162 MGMT_STATUS_INVALID_PARAMS,
6163 &cp->addr, sizeof(cp->addr));
6164
6165 hci_dev_lock(hdev);
6166
6167 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6168 cp->addr.type);
6169 if (err < 0) {
6170 status = MGMT_STATUS_INVALID_PARAMS;
6171 goto done;
6172 }
6173
6174 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6175 sk);
6176 status = MGMT_STATUS_SUCCESS;
6177
6178 done:
6179 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6180 &cp->addr, sizeof(cp->addr));
6181
6182 hci_dev_unlock(hdev);
6183
6184 return err;
6185 }
6186
set_device_id_sync(struct hci_dev * hdev,void * data)6187 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6188 {
6189 return hci_update_eir_sync(hdev);
6190 }
6191
set_device_id(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6192 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6193 u16 len)
6194 {
6195 struct mgmt_cp_set_device_id *cp = data;
6196 int err;
6197 __u16 source;
6198
6199 bt_dev_dbg(hdev, "sock %p", sk);
6200
6201 source = __le16_to_cpu(cp->source);
6202
6203 if (source > 0x0002)
6204 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6205 MGMT_STATUS_INVALID_PARAMS);
6206
6207 hci_dev_lock(hdev);
6208
6209 hdev->devid_source = source;
6210 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6211 hdev->devid_product = __le16_to_cpu(cp->product);
6212 hdev->devid_version = __le16_to_cpu(cp->version);
6213
6214 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6215 NULL, 0);
6216
6217 hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6218
6219 hci_dev_unlock(hdev);
6220
6221 return err;
6222 }
6223
enable_advertising_instance(struct hci_dev * hdev,int err)6224 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6225 {
6226 if (err)
6227 bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6228 else
6229 bt_dev_dbg(hdev, "status %d", err);
6230 }
6231
set_advertising_complete(struct hci_dev * hdev,void * data,int err)6232 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6233 {
6234 struct cmd_lookup match = { NULL, hdev };
6235 u8 instance;
6236 struct adv_info *adv_instance;
6237 u8 status = mgmt_status(err);
6238
6239 if (status) {
6240 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, true,
6241 cmd_status_rsp, &status);
6242 return;
6243 }
6244
6245 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6246 hci_dev_set_flag(hdev, HCI_ADVERTISING);
6247 else
6248 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6249
6250 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, true, settings_rsp,
6251 &match);
6252
6253 new_settings(hdev, match.sk);
6254
6255 if (match.sk)
6256 sock_put(match.sk);
6257
6258 /* If "Set Advertising" was just disabled and instance advertising was
6259 * set up earlier, then re-enable multi-instance advertising.
6260 */
6261 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6262 list_empty(&hdev->adv_instances))
6263 return;
6264
6265 instance = hdev->cur_adv_instance;
6266 if (!instance) {
6267 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6268 struct adv_info, list);
6269 if (!adv_instance)
6270 return;
6271
6272 instance = adv_instance->instance;
6273 }
6274
6275 err = hci_schedule_adv_instance_sync(hdev, instance, true);
6276
6277 enable_advertising_instance(hdev, err);
6278 }
6279
set_adv_sync(struct hci_dev * hdev,void * data)6280 static int set_adv_sync(struct hci_dev *hdev, void *data)
6281 {
6282 struct mgmt_pending_cmd *cmd = data;
6283 struct mgmt_mode *cp = cmd->param;
6284 u8 val = !!cp->val;
6285
6286 if (cp->val == 0x02)
6287 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6288 else
6289 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6290
6291 cancel_adv_timeout(hdev);
6292
6293 if (val) {
6294 /* Switch to instance "0" for the Set Advertising setting.
6295 * We cannot use update_[adv|scan_rsp]_data() here as the
6296 * HCI_ADVERTISING flag is not yet set.
6297 */
6298 hdev->cur_adv_instance = 0x00;
6299
6300 if (ext_adv_capable(hdev)) {
6301 hci_start_ext_adv_sync(hdev, 0x00);
6302 } else {
6303 hci_update_adv_data_sync(hdev, 0x00);
6304 hci_update_scan_rsp_data_sync(hdev, 0x00);
6305 hci_enable_advertising_sync(hdev);
6306 }
6307 } else {
6308 hci_disable_advertising_sync(hdev);
6309 }
6310
6311 return 0;
6312 }
6313
set_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6314 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6315 u16 len)
6316 {
6317 struct mgmt_mode *cp = data;
6318 struct mgmt_pending_cmd *cmd;
6319 u8 val, status;
6320 int err;
6321
6322 bt_dev_dbg(hdev, "sock %p", sk);
6323
6324 status = mgmt_le_support(hdev);
6325 if (status)
6326 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6327 status);
6328
6329 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6330 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6331 MGMT_STATUS_INVALID_PARAMS);
6332
6333 if (hdev->advertising_paused)
6334 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6335 MGMT_STATUS_BUSY);
6336
6337 hci_dev_lock(hdev);
6338
6339 val = !!cp->val;
6340
6341 /* The following conditions are ones which mean that we should
6342 * not do any HCI communication but directly send a mgmt
6343 * response to user space (after toggling the flag if
6344 * necessary).
6345 */
6346 if (!hdev_is_powered(hdev) ||
6347 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6348 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6349 hci_dev_test_flag(hdev, HCI_MESH) ||
6350 hci_conn_num(hdev, LE_LINK) > 0 ||
6351 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6352 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6353 bool changed;
6354
6355 if (cp->val) {
6356 hdev->cur_adv_instance = 0x00;
6357 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6358 if (cp->val == 0x02)
6359 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6360 else
6361 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6362 } else {
6363 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6364 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6365 }
6366
6367 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6368 if (err < 0)
6369 goto unlock;
6370
6371 if (changed)
6372 err = new_settings(hdev, sk);
6373
6374 goto unlock;
6375 }
6376
6377 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6378 pending_find(MGMT_OP_SET_LE, hdev)) {
6379 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6380 MGMT_STATUS_BUSY);
6381 goto unlock;
6382 }
6383
6384 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6385 if (!cmd)
6386 err = -ENOMEM;
6387 else
6388 err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6389 set_advertising_complete);
6390
6391 if (err < 0 && cmd)
6392 mgmt_pending_remove(cmd);
6393
6394 unlock:
6395 hci_dev_unlock(hdev);
6396 return err;
6397 }
6398
set_static_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6399 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6400 void *data, u16 len)
6401 {
6402 struct mgmt_cp_set_static_address *cp = data;
6403 int err;
6404
6405 bt_dev_dbg(hdev, "sock %p", sk);
6406
6407 if (!lmp_le_capable(hdev))
6408 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6409 MGMT_STATUS_NOT_SUPPORTED);
6410
6411 if (hdev_is_powered(hdev))
6412 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6413 MGMT_STATUS_REJECTED);
6414
6415 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6416 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6417 return mgmt_cmd_status(sk, hdev->id,
6418 MGMT_OP_SET_STATIC_ADDRESS,
6419 MGMT_STATUS_INVALID_PARAMS);
6420
6421 /* Two most significant bits shall be set */
6422 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6423 return mgmt_cmd_status(sk, hdev->id,
6424 MGMT_OP_SET_STATIC_ADDRESS,
6425 MGMT_STATUS_INVALID_PARAMS);
6426 }
6427
6428 hci_dev_lock(hdev);
6429
6430 bacpy(&hdev->static_addr, &cp->bdaddr);
6431
6432 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6433 if (err < 0)
6434 goto unlock;
6435
6436 err = new_settings(hdev, sk);
6437
6438 unlock:
6439 hci_dev_unlock(hdev);
6440 return err;
6441 }
6442
set_scan_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6443 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6444 void *data, u16 len)
6445 {
6446 struct mgmt_cp_set_scan_params *cp = data;
6447 __u16 interval, window;
6448 int err;
6449
6450 bt_dev_dbg(hdev, "sock %p", sk);
6451
6452 if (!lmp_le_capable(hdev))
6453 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6454 MGMT_STATUS_NOT_SUPPORTED);
6455
6456 /* Keep allowed ranges in sync with set_mesh() */
6457 interval = __le16_to_cpu(cp->interval);
6458
6459 if (interval < 0x0004 || interval > 0x4000)
6460 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6461 MGMT_STATUS_INVALID_PARAMS);
6462
6463 window = __le16_to_cpu(cp->window);
6464
6465 if (window < 0x0004 || window > 0x4000)
6466 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6467 MGMT_STATUS_INVALID_PARAMS);
6468
6469 if (window > interval)
6470 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6471 MGMT_STATUS_INVALID_PARAMS);
6472
6473 hci_dev_lock(hdev);
6474
6475 hdev->le_scan_interval = interval;
6476 hdev->le_scan_window = window;
6477
6478 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6479 NULL, 0);
6480
6481 /* If background scan is running, restart it so new parameters are
6482 * loaded.
6483 */
6484 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6485 hdev->discovery.state == DISCOVERY_STOPPED)
6486 hci_update_passive_scan(hdev);
6487
6488 hci_dev_unlock(hdev);
6489
6490 return err;
6491 }
6492
fast_connectable_complete(struct hci_dev * hdev,void * data,int err)6493 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6494 {
6495 struct mgmt_pending_cmd *cmd = data;
6496
6497 bt_dev_dbg(hdev, "err %d", err);
6498
6499 if (err) {
6500 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6501 mgmt_status(err));
6502 } else {
6503 struct mgmt_mode *cp = cmd->param;
6504
6505 if (cp->val)
6506 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6507 else
6508 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6509
6510 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6511 new_settings(hdev, cmd->sk);
6512 }
6513
6514 mgmt_pending_free(cmd);
6515 }
6516
write_fast_connectable_sync(struct hci_dev * hdev,void * data)6517 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6518 {
6519 struct mgmt_pending_cmd *cmd = data;
6520 struct mgmt_mode *cp = cmd->param;
6521
6522 return hci_write_fast_connectable_sync(hdev, cp->val);
6523 }
6524
set_fast_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6525 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6526 void *data, u16 len)
6527 {
6528 struct mgmt_mode *cp = data;
6529 struct mgmt_pending_cmd *cmd;
6530 int err;
6531
6532 bt_dev_dbg(hdev, "sock %p", sk);
6533
6534 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6535 hdev->hci_ver < BLUETOOTH_VER_1_2)
6536 return mgmt_cmd_status(sk, hdev->id,
6537 MGMT_OP_SET_FAST_CONNECTABLE,
6538 MGMT_STATUS_NOT_SUPPORTED);
6539
6540 if (cp->val != 0x00 && cp->val != 0x01)
6541 return mgmt_cmd_status(sk, hdev->id,
6542 MGMT_OP_SET_FAST_CONNECTABLE,
6543 MGMT_STATUS_INVALID_PARAMS);
6544
6545 hci_dev_lock(hdev);
6546
6547 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6548 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6549 goto unlock;
6550 }
6551
6552 if (!hdev_is_powered(hdev)) {
6553 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6554 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6555 new_settings(hdev, sk);
6556 goto unlock;
6557 }
6558
6559 cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6560 len);
6561 if (!cmd)
6562 err = -ENOMEM;
6563 else
6564 err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6565 fast_connectable_complete);
6566
6567 if (err < 0) {
6568 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6569 MGMT_STATUS_FAILED);
6570
6571 if (cmd)
6572 mgmt_pending_free(cmd);
6573 }
6574
6575 unlock:
6576 hci_dev_unlock(hdev);
6577
6578 return err;
6579 }
6580
set_bredr_complete(struct hci_dev * hdev,void * data,int err)6581 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6582 {
6583 struct mgmt_pending_cmd *cmd = data;
6584
6585 bt_dev_dbg(hdev, "err %d", err);
6586
6587 if (err) {
6588 u8 mgmt_err = mgmt_status(err);
6589
6590 /* We need to restore the flag if related HCI commands
6591 * failed.
6592 */
6593 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6594
6595 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
6596 } else {
6597 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6598 new_settings(hdev, cmd->sk);
6599 }
6600
6601 mgmt_pending_free(cmd);
6602 }
6603
set_bredr_sync(struct hci_dev * hdev,void * data)6604 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6605 {
6606 int status;
6607
6608 status = hci_write_fast_connectable_sync(hdev, false);
6609
6610 if (!status)
6611 status = hci_update_scan_sync(hdev);
6612
6613 /* Since only the advertising data flags will change, there
6614 * is no need to update the scan response data.
6615 */
6616 if (!status)
6617 status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6618
6619 return status;
6620 }
6621
set_bredr(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6622 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6623 {
6624 struct mgmt_mode *cp = data;
6625 struct mgmt_pending_cmd *cmd;
6626 int err;
6627
6628 bt_dev_dbg(hdev, "sock %p", sk);
6629
6630 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6631 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6632 MGMT_STATUS_NOT_SUPPORTED);
6633
6634 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6635 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6636 MGMT_STATUS_REJECTED);
6637
6638 if (cp->val != 0x00 && cp->val != 0x01)
6639 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6640 MGMT_STATUS_INVALID_PARAMS);
6641
6642 hci_dev_lock(hdev);
6643
6644 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6645 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6646 goto unlock;
6647 }
6648
6649 if (!hdev_is_powered(hdev)) {
6650 if (!cp->val) {
6651 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6652 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6653 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6654 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6655 }
6656
6657 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6658
6659 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6660 if (err < 0)
6661 goto unlock;
6662
6663 err = new_settings(hdev, sk);
6664 goto unlock;
6665 }
6666
6667 /* Reject disabling when powered on */
6668 if (!cp->val) {
6669 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6670 MGMT_STATUS_REJECTED);
6671 goto unlock;
6672 } else {
6673 /* When configuring a dual-mode controller to operate
6674 * with LE only and using a static address, then switching
6675 * BR/EDR back on is not allowed.
6676 *
6677 * Dual-mode controllers shall operate with the public
6678 * address as its identity address for BR/EDR and LE. So
6679 * reject the attempt to create an invalid configuration.
6680 *
6681 * The same restrictions applies when secure connections
6682 * has been enabled. For BR/EDR this is a controller feature
6683 * while for LE it is a host stack feature. This means that
6684 * switching BR/EDR back on when secure connections has been
6685 * enabled is not a supported transaction.
6686 */
6687 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6688 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6689 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6690 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6691 MGMT_STATUS_REJECTED);
6692 goto unlock;
6693 }
6694 }
6695
6696 cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6697 if (!cmd)
6698 err = -ENOMEM;
6699 else
6700 err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6701 set_bredr_complete);
6702
6703 if (err < 0) {
6704 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6705 MGMT_STATUS_FAILED);
6706 if (cmd)
6707 mgmt_pending_free(cmd);
6708
6709 goto unlock;
6710 }
6711
6712 /* We need to flip the bit already here so that
6713 * hci_req_update_adv_data generates the correct flags.
6714 */
6715 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6716
6717 unlock:
6718 hci_dev_unlock(hdev);
6719 return err;
6720 }
6721
set_secure_conn_complete(struct hci_dev * hdev,void * data,int err)6722 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6723 {
6724 struct mgmt_pending_cmd *cmd = data;
6725 struct mgmt_mode *cp;
6726
6727 bt_dev_dbg(hdev, "err %d", err);
6728
6729 if (err) {
6730 u8 mgmt_err = mgmt_status(err);
6731
6732 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
6733 goto done;
6734 }
6735
6736 cp = cmd->param;
6737
6738 switch (cp->val) {
6739 case 0x00:
6740 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6741 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6742 break;
6743 case 0x01:
6744 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6745 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6746 break;
6747 case 0x02:
6748 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6749 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6750 break;
6751 }
6752
6753 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6754 new_settings(hdev, cmd->sk);
6755
6756 done:
6757 mgmt_pending_free(cmd);
6758 }
6759
set_secure_conn_sync(struct hci_dev * hdev,void * data)6760 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6761 {
6762 struct mgmt_pending_cmd *cmd = data;
6763 struct mgmt_mode *cp = cmd->param;
6764 u8 val = !!cp->val;
6765
6766 /* Force write of val */
6767 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6768
6769 return hci_write_sc_support_sync(hdev, val);
6770 }
6771
set_secure_conn(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6772 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6773 void *data, u16 len)
6774 {
6775 struct mgmt_mode *cp = data;
6776 struct mgmt_pending_cmd *cmd;
6777 u8 val;
6778 int err;
6779
6780 bt_dev_dbg(hdev, "sock %p", sk);
6781
6782 if (!lmp_sc_capable(hdev) &&
6783 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6784 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6785 MGMT_STATUS_NOT_SUPPORTED);
6786
6787 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6788 lmp_sc_capable(hdev) &&
6789 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6790 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6791 MGMT_STATUS_REJECTED);
6792
6793 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6794 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6795 MGMT_STATUS_INVALID_PARAMS);
6796
6797 hci_dev_lock(hdev);
6798
6799 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6800 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6801 bool changed;
6802
6803 if (cp->val) {
6804 changed = !hci_dev_test_and_set_flag(hdev,
6805 HCI_SC_ENABLED);
6806 if (cp->val == 0x02)
6807 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6808 else
6809 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6810 } else {
6811 changed = hci_dev_test_and_clear_flag(hdev,
6812 HCI_SC_ENABLED);
6813 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6814 }
6815
6816 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6817 if (err < 0)
6818 goto failed;
6819
6820 if (changed)
6821 err = new_settings(hdev, sk);
6822
6823 goto failed;
6824 }
6825
6826 val = !!cp->val;
6827
6828 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6829 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6830 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6831 goto failed;
6832 }
6833
6834 cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6835 if (!cmd)
6836 err = -ENOMEM;
6837 else
6838 err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6839 set_secure_conn_complete);
6840
6841 if (err < 0) {
6842 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6843 MGMT_STATUS_FAILED);
6844 if (cmd)
6845 mgmt_pending_free(cmd);
6846 }
6847
6848 failed:
6849 hci_dev_unlock(hdev);
6850 return err;
6851 }
6852
set_debug_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6853 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6854 void *data, u16 len)
6855 {
6856 struct mgmt_mode *cp = data;
6857 bool changed, use_changed;
6858 int err;
6859
6860 bt_dev_dbg(hdev, "sock %p", sk);
6861
6862 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6863 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6864 MGMT_STATUS_INVALID_PARAMS);
6865
6866 hci_dev_lock(hdev);
6867
6868 if (cp->val)
6869 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6870 else
6871 changed = hci_dev_test_and_clear_flag(hdev,
6872 HCI_KEEP_DEBUG_KEYS);
6873
6874 if (cp->val == 0x02)
6875 use_changed = !hci_dev_test_and_set_flag(hdev,
6876 HCI_USE_DEBUG_KEYS);
6877 else
6878 use_changed = hci_dev_test_and_clear_flag(hdev,
6879 HCI_USE_DEBUG_KEYS);
6880
6881 if (hdev_is_powered(hdev) && use_changed &&
6882 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6883 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6884 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6885 sizeof(mode), &mode);
6886 }
6887
6888 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6889 if (err < 0)
6890 goto unlock;
6891
6892 if (changed)
6893 err = new_settings(hdev, sk);
6894
6895 unlock:
6896 hci_dev_unlock(hdev);
6897 return err;
6898 }
6899
set_privacy(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)6900 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6901 u16 len)
6902 {
6903 struct mgmt_cp_set_privacy *cp = cp_data;
6904 bool changed;
6905 int err;
6906
6907 bt_dev_dbg(hdev, "sock %p", sk);
6908
6909 if (!lmp_le_capable(hdev))
6910 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6911 MGMT_STATUS_NOT_SUPPORTED);
6912
6913 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6914 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6915 MGMT_STATUS_INVALID_PARAMS);
6916
6917 if (hdev_is_powered(hdev))
6918 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6919 MGMT_STATUS_REJECTED);
6920
6921 hci_dev_lock(hdev);
6922
6923 /* If user space supports this command it is also expected to
6924 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
6925 */
6926 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6927
6928 if (cp->privacy) {
6929 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
6930 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
6931 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
6932 hci_adv_instances_set_rpa_expired(hdev, true);
6933 if (cp->privacy == 0x02)
6934 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
6935 else
6936 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6937 } else {
6938 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
6939 memset(hdev->irk, 0, sizeof(hdev->irk));
6940 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
6941 hci_adv_instances_set_rpa_expired(hdev, false);
6942 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6943 }
6944
6945 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
6946 if (err < 0)
6947 goto unlock;
6948
6949 if (changed)
6950 err = new_settings(hdev, sk);
6951
6952 unlock:
6953 hci_dev_unlock(hdev);
6954 return err;
6955 }
6956
irk_is_valid(struct mgmt_irk_info * irk)6957 static bool irk_is_valid(struct mgmt_irk_info *irk)
6958 {
6959 switch (irk->addr.type) {
6960 case BDADDR_LE_PUBLIC:
6961 return true;
6962
6963 case BDADDR_LE_RANDOM:
6964 /* Two most significant bits shall be set */
6965 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6966 return false;
6967 return true;
6968 }
6969
6970 return false;
6971 }
6972
load_irks(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)6973 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6974 u16 len)
6975 {
6976 struct mgmt_cp_load_irks *cp = cp_data;
6977 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
6978 sizeof(struct mgmt_irk_info));
6979 u16 irk_count, expected_len;
6980 int i, err;
6981
6982 bt_dev_dbg(hdev, "sock %p", sk);
6983
6984 if (!lmp_le_capable(hdev))
6985 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6986 MGMT_STATUS_NOT_SUPPORTED);
6987
6988 irk_count = __le16_to_cpu(cp->irk_count);
6989 if (irk_count > max_irk_count) {
6990 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
6991 irk_count);
6992 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6993 MGMT_STATUS_INVALID_PARAMS);
6994 }
6995
6996 expected_len = struct_size(cp, irks, irk_count);
6997 if (expected_len != len) {
6998 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
6999 expected_len, len);
7000 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7001 MGMT_STATUS_INVALID_PARAMS);
7002 }
7003
7004 bt_dev_dbg(hdev, "irk_count %u", irk_count);
7005
7006 for (i = 0; i < irk_count; i++) {
7007 struct mgmt_irk_info *key = &cp->irks[i];
7008
7009 if (!irk_is_valid(key))
7010 return mgmt_cmd_status(sk, hdev->id,
7011 MGMT_OP_LOAD_IRKS,
7012 MGMT_STATUS_INVALID_PARAMS);
7013 }
7014
7015 hci_dev_lock(hdev);
7016
7017 hci_smp_irks_clear(hdev);
7018
7019 for (i = 0; i < irk_count; i++) {
7020 struct mgmt_irk_info *irk = &cp->irks[i];
7021
7022 if (hci_is_blocked_key(hdev,
7023 HCI_BLOCKED_KEY_TYPE_IRK,
7024 irk->val)) {
7025 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7026 &irk->addr.bdaddr);
7027 continue;
7028 }
7029
7030 hci_add_irk(hdev, &irk->addr.bdaddr,
7031 le_addr_type(irk->addr.type), irk->val,
7032 BDADDR_ANY);
7033 }
7034
7035 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7036
7037 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7038
7039 hci_dev_unlock(hdev);
7040
7041 return err;
7042 }
7043
ltk_is_valid(struct mgmt_ltk_info * key)7044 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7045 {
7046 if (key->initiator != 0x00 && key->initiator != 0x01)
7047 return false;
7048
7049 switch (key->addr.type) {
7050 case BDADDR_LE_PUBLIC:
7051 return true;
7052
7053 case BDADDR_LE_RANDOM:
7054 /* Two most significant bits shall be set */
7055 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7056 return false;
7057 return true;
7058 }
7059
7060 return false;
7061 }
7062
load_long_term_keys(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)7063 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7064 void *cp_data, u16 len)
7065 {
7066 struct mgmt_cp_load_long_term_keys *cp = cp_data;
7067 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7068 sizeof(struct mgmt_ltk_info));
7069 u16 key_count, expected_len;
7070 int i, err;
7071
7072 bt_dev_dbg(hdev, "sock %p", sk);
7073
7074 if (!lmp_le_capable(hdev))
7075 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7076 MGMT_STATUS_NOT_SUPPORTED);
7077
7078 key_count = __le16_to_cpu(cp->key_count);
7079 if (key_count > max_key_count) {
7080 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7081 key_count);
7082 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7083 MGMT_STATUS_INVALID_PARAMS);
7084 }
7085
7086 expected_len = struct_size(cp, keys, key_count);
7087 if (expected_len != len) {
7088 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7089 expected_len, len);
7090 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7091 MGMT_STATUS_INVALID_PARAMS);
7092 }
7093
7094 bt_dev_dbg(hdev, "key_count %u", key_count);
7095
7096 hci_dev_lock(hdev);
7097
7098 hci_smp_ltks_clear(hdev);
7099
7100 for (i = 0; i < key_count; i++) {
7101 struct mgmt_ltk_info *key = &cp->keys[i];
7102 u8 type, authenticated;
7103
7104 if (hci_is_blocked_key(hdev,
7105 HCI_BLOCKED_KEY_TYPE_LTK,
7106 key->val)) {
7107 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7108 &key->addr.bdaddr);
7109 continue;
7110 }
7111
7112 if (!ltk_is_valid(key)) {
7113 bt_dev_warn(hdev, "Invalid LTK for %pMR",
7114 &key->addr.bdaddr);
7115 continue;
7116 }
7117
7118 switch (key->type) {
7119 case MGMT_LTK_UNAUTHENTICATED:
7120 authenticated = 0x00;
7121 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7122 break;
7123 case MGMT_LTK_AUTHENTICATED:
7124 authenticated = 0x01;
7125 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7126 break;
7127 case MGMT_LTK_P256_UNAUTH:
7128 authenticated = 0x00;
7129 type = SMP_LTK_P256;
7130 break;
7131 case MGMT_LTK_P256_AUTH:
7132 authenticated = 0x01;
7133 type = SMP_LTK_P256;
7134 break;
7135 case MGMT_LTK_P256_DEBUG:
7136 authenticated = 0x00;
7137 type = SMP_LTK_P256_DEBUG;
7138 fallthrough;
7139 default:
7140 continue;
7141 }
7142
7143 hci_add_ltk(hdev, &key->addr.bdaddr,
7144 le_addr_type(key->addr.type), type, authenticated,
7145 key->val, key->enc_size, key->ediv, key->rand);
7146 }
7147
7148 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7149 NULL, 0);
7150
7151 hci_dev_unlock(hdev);
7152
7153 return err;
7154 }
7155
get_conn_info_complete(struct hci_dev * hdev,void * data,int err)7156 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
7157 {
7158 struct mgmt_pending_cmd *cmd = data;
7159 struct hci_conn *conn = cmd->user_data;
7160 struct mgmt_cp_get_conn_info *cp = cmd->param;
7161 struct mgmt_rp_get_conn_info rp;
7162 u8 status;
7163
7164 bt_dev_dbg(hdev, "err %d", err);
7165
7166 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
7167
7168 status = mgmt_status(err);
7169 if (status == MGMT_STATUS_SUCCESS) {
7170 rp.rssi = conn->rssi;
7171 rp.tx_power = conn->tx_power;
7172 rp.max_tx_power = conn->max_tx_power;
7173 } else {
7174 rp.rssi = HCI_RSSI_INVALID;
7175 rp.tx_power = HCI_TX_POWER_INVALID;
7176 rp.max_tx_power = HCI_TX_POWER_INVALID;
7177 }
7178
7179 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, MGMT_OP_GET_CONN_INFO, status,
7180 &rp, sizeof(rp));
7181
7182 mgmt_pending_free(cmd);
7183 }
7184
get_conn_info_sync(struct hci_dev * hdev,void * data)7185 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
7186 {
7187 struct mgmt_pending_cmd *cmd = data;
7188 struct mgmt_cp_get_conn_info *cp = cmd->param;
7189 struct hci_conn *conn;
7190 int err;
7191 __le16 handle;
7192
7193 /* Make sure we are still connected */
7194 if (cp->addr.type == BDADDR_BREDR)
7195 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7196 &cp->addr.bdaddr);
7197 else
7198 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7199
7200 if (!conn || conn->state != BT_CONNECTED)
7201 return MGMT_STATUS_NOT_CONNECTED;
7202
7203 cmd->user_data = conn;
7204 handle = cpu_to_le16(conn->handle);
7205
7206 /* Refresh RSSI each time */
7207 err = hci_read_rssi_sync(hdev, handle);
7208
7209 /* For LE links TX power does not change thus we don't need to
7210 * query for it once value is known.
7211 */
7212 if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
7213 conn->tx_power == HCI_TX_POWER_INVALID))
7214 err = hci_read_tx_power_sync(hdev, handle, 0x00);
7215
7216 /* Max TX power needs to be read only once per connection */
7217 if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
7218 err = hci_read_tx_power_sync(hdev, handle, 0x01);
7219
7220 return err;
7221 }
7222
get_conn_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7223 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7224 u16 len)
7225 {
7226 struct mgmt_cp_get_conn_info *cp = data;
7227 struct mgmt_rp_get_conn_info rp;
7228 struct hci_conn *conn;
7229 unsigned long conn_info_age;
7230 int err = 0;
7231
7232 bt_dev_dbg(hdev, "sock %p", sk);
7233
7234 memset(&rp, 0, sizeof(rp));
7235 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7236 rp.addr.type = cp->addr.type;
7237
7238 if (!bdaddr_type_is_valid(cp->addr.type))
7239 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7240 MGMT_STATUS_INVALID_PARAMS,
7241 &rp, sizeof(rp));
7242
7243 hci_dev_lock(hdev);
7244
7245 if (!hdev_is_powered(hdev)) {
7246 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7247 MGMT_STATUS_NOT_POWERED, &rp,
7248 sizeof(rp));
7249 goto unlock;
7250 }
7251
7252 if (cp->addr.type == BDADDR_BREDR)
7253 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7254 &cp->addr.bdaddr);
7255 else
7256 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7257
7258 if (!conn || conn->state != BT_CONNECTED) {
7259 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7260 MGMT_STATUS_NOT_CONNECTED, &rp,
7261 sizeof(rp));
7262 goto unlock;
7263 }
7264
7265 /* To avoid client trying to guess when to poll again for information we
7266 * calculate conn info age as random value between min/max set in hdev.
7267 */
7268 conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age,
7269 hdev->conn_info_max_age - 1);
7270
7271 /* Query controller to refresh cached values if they are too old or were
7272 * never read.
7273 */
7274 if (time_after(jiffies, conn->conn_info_timestamp +
7275 msecs_to_jiffies(conn_info_age)) ||
7276 !conn->conn_info_timestamp) {
7277 struct mgmt_pending_cmd *cmd;
7278
7279 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
7280 len);
7281 if (!cmd) {
7282 err = -ENOMEM;
7283 } else {
7284 err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
7285 cmd, get_conn_info_complete);
7286 }
7287
7288 if (err < 0) {
7289 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7290 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7291
7292 if (cmd)
7293 mgmt_pending_free(cmd);
7294
7295 goto unlock;
7296 }
7297
7298 conn->conn_info_timestamp = jiffies;
7299 } else {
7300 /* Cache is valid, just reply with values cached in hci_conn */
7301 rp.rssi = conn->rssi;
7302 rp.tx_power = conn->tx_power;
7303 rp.max_tx_power = conn->max_tx_power;
7304
7305 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7306 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7307 }
7308
7309 unlock:
7310 hci_dev_unlock(hdev);
7311 return err;
7312 }
7313
get_clock_info_complete(struct hci_dev * hdev,void * data,int err)7314 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
7315 {
7316 struct mgmt_pending_cmd *cmd = data;
7317 struct mgmt_cp_get_clock_info *cp = cmd->param;
7318 struct mgmt_rp_get_clock_info rp;
7319 struct hci_conn *conn = cmd->user_data;
7320 u8 status = mgmt_status(err);
7321
7322 bt_dev_dbg(hdev, "err %d", err);
7323
7324 memset(&rp, 0, sizeof(rp));
7325 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7326 rp.addr.type = cp->addr.type;
7327
7328 if (err)
7329 goto complete;
7330
7331 rp.local_clock = cpu_to_le32(hdev->clock);
7332
7333 if (conn) {
7334 rp.piconet_clock = cpu_to_le32(conn->clock);
7335 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7336 }
7337
7338 complete:
7339 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, status, &rp,
7340 sizeof(rp));
7341
7342 mgmt_pending_free(cmd);
7343 }
7344
get_clock_info_sync(struct hci_dev * hdev,void * data)7345 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
7346 {
7347 struct mgmt_pending_cmd *cmd = data;
7348 struct mgmt_cp_get_clock_info *cp = cmd->param;
7349 struct hci_cp_read_clock hci_cp;
7350 struct hci_conn *conn;
7351
7352 memset(&hci_cp, 0, sizeof(hci_cp));
7353 hci_read_clock_sync(hdev, &hci_cp);
7354
7355 /* Make sure connection still exists */
7356 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
7357 if (!conn || conn->state != BT_CONNECTED)
7358 return MGMT_STATUS_NOT_CONNECTED;
7359
7360 cmd->user_data = conn;
7361 hci_cp.handle = cpu_to_le16(conn->handle);
7362 hci_cp.which = 0x01; /* Piconet clock */
7363
7364 return hci_read_clock_sync(hdev, &hci_cp);
7365 }
7366
get_clock_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7367 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7368 u16 len)
7369 {
7370 struct mgmt_cp_get_clock_info *cp = data;
7371 struct mgmt_rp_get_clock_info rp;
7372 struct mgmt_pending_cmd *cmd;
7373 struct hci_conn *conn;
7374 int err;
7375
7376 bt_dev_dbg(hdev, "sock %p", sk);
7377
7378 memset(&rp, 0, sizeof(rp));
7379 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7380 rp.addr.type = cp->addr.type;
7381
7382 if (cp->addr.type != BDADDR_BREDR)
7383 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7384 MGMT_STATUS_INVALID_PARAMS,
7385 &rp, sizeof(rp));
7386
7387 hci_dev_lock(hdev);
7388
7389 if (!hdev_is_powered(hdev)) {
7390 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7391 MGMT_STATUS_NOT_POWERED, &rp,
7392 sizeof(rp));
7393 goto unlock;
7394 }
7395
7396 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7397 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7398 &cp->addr.bdaddr);
7399 if (!conn || conn->state != BT_CONNECTED) {
7400 err = mgmt_cmd_complete(sk, hdev->id,
7401 MGMT_OP_GET_CLOCK_INFO,
7402 MGMT_STATUS_NOT_CONNECTED,
7403 &rp, sizeof(rp));
7404 goto unlock;
7405 }
7406 } else {
7407 conn = NULL;
7408 }
7409
7410 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7411 if (!cmd)
7412 err = -ENOMEM;
7413 else
7414 err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7415 get_clock_info_complete);
7416
7417 if (err < 0) {
7418 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7419 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7420
7421 if (cmd)
7422 mgmt_pending_free(cmd);
7423 }
7424
7425
7426 unlock:
7427 hci_dev_unlock(hdev);
7428 return err;
7429 }
7430
is_connected(struct hci_dev * hdev,bdaddr_t * addr,u8 type)7431 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7432 {
7433 struct hci_conn *conn;
7434
7435 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7436 if (!conn)
7437 return false;
7438
7439 if (conn->dst_type != type)
7440 return false;
7441
7442 if (conn->state != BT_CONNECTED)
7443 return false;
7444
7445 return true;
7446 }
7447
7448 /* This function requires the caller holds hdev->lock */
hci_conn_params_set(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type,u8 auto_connect)7449 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7450 u8 addr_type, u8 auto_connect)
7451 {
7452 struct hci_conn_params *params;
7453
7454 params = hci_conn_params_add(hdev, addr, addr_type);
7455 if (!params)
7456 return -EIO;
7457
7458 if (params->auto_connect == auto_connect)
7459 return 0;
7460
7461 hci_pend_le_list_del_init(params);
7462
7463 switch (auto_connect) {
7464 case HCI_AUTO_CONN_DISABLED:
7465 case HCI_AUTO_CONN_LINK_LOSS:
7466 /* If auto connect is being disabled when we're trying to
7467 * connect to device, keep connecting.
7468 */
7469 if (params->explicit_connect)
7470 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7471 break;
7472 case HCI_AUTO_CONN_REPORT:
7473 if (params->explicit_connect)
7474 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7475 else
7476 hci_pend_le_list_add(params, &hdev->pend_le_reports);
7477 break;
7478 case HCI_AUTO_CONN_DIRECT:
7479 case HCI_AUTO_CONN_ALWAYS:
7480 if (!is_connected(hdev, addr, addr_type))
7481 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7482 break;
7483 }
7484
7485 params->auto_connect = auto_connect;
7486
7487 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7488 addr, addr_type, auto_connect);
7489
7490 return 0;
7491 }
7492
device_added(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type,u8 action)7493 static void device_added(struct sock *sk, struct hci_dev *hdev,
7494 bdaddr_t *bdaddr, u8 type, u8 action)
7495 {
7496 struct mgmt_ev_device_added ev;
7497
7498 bacpy(&ev.addr.bdaddr, bdaddr);
7499 ev.addr.type = type;
7500 ev.action = action;
7501
7502 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7503 }
7504
add_device_complete(struct hci_dev * hdev,void * data,int err)7505 static void add_device_complete(struct hci_dev *hdev, void *data, int err)
7506 {
7507 struct mgmt_pending_cmd *cmd = data;
7508 struct mgmt_cp_add_device *cp = cmd->param;
7509
7510 if (!err) {
7511 struct hci_conn_params *params;
7512
7513 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7514 le_addr_type(cp->addr.type));
7515
7516 device_added(cmd->sk, hdev, &cp->addr.bdaddr, cp->addr.type,
7517 cp->action);
7518 device_flags_changed(NULL, hdev, &cp->addr.bdaddr,
7519 cp->addr.type, hdev->conn_flags,
7520 params ? params->flags : 0);
7521 }
7522
7523 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_ADD_DEVICE,
7524 mgmt_status(err), &cp->addr, sizeof(cp->addr));
7525 mgmt_pending_free(cmd);
7526 }
7527
add_device_sync(struct hci_dev * hdev,void * data)7528 static int add_device_sync(struct hci_dev *hdev, void *data)
7529 {
7530 return hci_update_passive_scan_sync(hdev);
7531 }
7532
add_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7533 static int add_device(struct sock *sk, struct hci_dev *hdev,
7534 void *data, u16 len)
7535 {
7536 struct mgmt_pending_cmd *cmd;
7537 struct mgmt_cp_add_device *cp = data;
7538 u8 auto_conn, addr_type;
7539 struct hci_conn_params *params;
7540 int err;
7541 u32 current_flags = 0;
7542 u32 supported_flags;
7543
7544 bt_dev_dbg(hdev, "sock %p", sk);
7545
7546 if (!bdaddr_type_is_valid(cp->addr.type) ||
7547 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7548 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7549 MGMT_STATUS_INVALID_PARAMS,
7550 &cp->addr, sizeof(cp->addr));
7551
7552 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7553 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7554 MGMT_STATUS_INVALID_PARAMS,
7555 &cp->addr, sizeof(cp->addr));
7556
7557 hci_dev_lock(hdev);
7558
7559 if (cp->addr.type == BDADDR_BREDR) {
7560 /* Only incoming connections action is supported for now */
7561 if (cp->action != 0x01) {
7562 err = mgmt_cmd_complete(sk, hdev->id,
7563 MGMT_OP_ADD_DEVICE,
7564 MGMT_STATUS_INVALID_PARAMS,
7565 &cp->addr, sizeof(cp->addr));
7566 goto unlock;
7567 }
7568
7569 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7570 &cp->addr.bdaddr,
7571 cp->addr.type, 0);
7572 if (err)
7573 goto unlock;
7574
7575 hci_update_scan(hdev);
7576
7577 goto added;
7578 }
7579
7580 addr_type = le_addr_type(cp->addr.type);
7581
7582 if (cp->action == 0x02)
7583 auto_conn = HCI_AUTO_CONN_ALWAYS;
7584 else if (cp->action == 0x01)
7585 auto_conn = HCI_AUTO_CONN_DIRECT;
7586 else
7587 auto_conn = HCI_AUTO_CONN_REPORT;
7588
7589 /* Kernel internally uses conn_params with resolvable private
7590 * address, but Add Device allows only identity addresses.
7591 * Make sure it is enforced before calling
7592 * hci_conn_params_lookup.
7593 */
7594 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7595 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7596 MGMT_STATUS_INVALID_PARAMS,
7597 &cp->addr, sizeof(cp->addr));
7598 goto unlock;
7599 }
7600
7601 /* If the connection parameters don't exist for this device,
7602 * they will be created and configured with defaults.
7603 */
7604 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7605 auto_conn) < 0) {
7606 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7607 MGMT_STATUS_FAILED, &cp->addr,
7608 sizeof(cp->addr));
7609 goto unlock;
7610 } else {
7611 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7612 addr_type);
7613 if (params)
7614 current_flags = params->flags;
7615 }
7616
7617 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
7618 if (!cmd) {
7619 err = -ENOMEM;
7620 goto unlock;
7621 }
7622
7623 err = hci_cmd_sync_queue(hdev, add_device_sync, cmd,
7624 add_device_complete);
7625 if (err < 0) {
7626 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7627 MGMT_STATUS_FAILED, &cp->addr,
7628 sizeof(cp->addr));
7629 mgmt_pending_free(cmd);
7630 }
7631
7632 goto unlock;
7633
7634 added:
7635 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7636 supported_flags = hdev->conn_flags;
7637 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7638 supported_flags, current_flags);
7639
7640 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7641 MGMT_STATUS_SUCCESS, &cp->addr,
7642 sizeof(cp->addr));
7643
7644 unlock:
7645 hci_dev_unlock(hdev);
7646 return err;
7647 }
7648
device_removed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)7649 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7650 bdaddr_t *bdaddr, u8 type)
7651 {
7652 struct mgmt_ev_device_removed ev;
7653
7654 bacpy(&ev.addr.bdaddr, bdaddr);
7655 ev.addr.type = type;
7656
7657 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7658 }
7659
remove_device_sync(struct hci_dev * hdev,void * data)7660 static int remove_device_sync(struct hci_dev *hdev, void *data)
7661 {
7662 return hci_update_passive_scan_sync(hdev);
7663 }
7664
remove_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7665 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7666 void *data, u16 len)
7667 {
7668 struct mgmt_cp_remove_device *cp = data;
7669 int err;
7670
7671 bt_dev_dbg(hdev, "sock %p", sk);
7672
7673 hci_dev_lock(hdev);
7674
7675 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7676 struct hci_conn_params *params;
7677 u8 addr_type;
7678
7679 if (!bdaddr_type_is_valid(cp->addr.type)) {
7680 err = mgmt_cmd_complete(sk, hdev->id,
7681 MGMT_OP_REMOVE_DEVICE,
7682 MGMT_STATUS_INVALID_PARAMS,
7683 &cp->addr, sizeof(cp->addr));
7684 goto unlock;
7685 }
7686
7687 if (cp->addr.type == BDADDR_BREDR) {
7688 err = hci_bdaddr_list_del(&hdev->accept_list,
7689 &cp->addr.bdaddr,
7690 cp->addr.type);
7691 if (err) {
7692 err = mgmt_cmd_complete(sk, hdev->id,
7693 MGMT_OP_REMOVE_DEVICE,
7694 MGMT_STATUS_INVALID_PARAMS,
7695 &cp->addr,
7696 sizeof(cp->addr));
7697 goto unlock;
7698 }
7699
7700 hci_update_scan(hdev);
7701
7702 device_removed(sk, hdev, &cp->addr.bdaddr,
7703 cp->addr.type);
7704 goto complete;
7705 }
7706
7707 addr_type = le_addr_type(cp->addr.type);
7708
7709 /* Kernel internally uses conn_params with resolvable private
7710 * address, but Remove Device allows only identity addresses.
7711 * Make sure it is enforced before calling
7712 * hci_conn_params_lookup.
7713 */
7714 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7715 err = mgmt_cmd_complete(sk, hdev->id,
7716 MGMT_OP_REMOVE_DEVICE,
7717 MGMT_STATUS_INVALID_PARAMS,
7718 &cp->addr, sizeof(cp->addr));
7719 goto unlock;
7720 }
7721
7722 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7723 addr_type);
7724 if (!params) {
7725 err = mgmt_cmd_complete(sk, hdev->id,
7726 MGMT_OP_REMOVE_DEVICE,
7727 MGMT_STATUS_INVALID_PARAMS,
7728 &cp->addr, sizeof(cp->addr));
7729 goto unlock;
7730 }
7731
7732 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7733 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7734 err = mgmt_cmd_complete(sk, hdev->id,
7735 MGMT_OP_REMOVE_DEVICE,
7736 MGMT_STATUS_INVALID_PARAMS,
7737 &cp->addr, sizeof(cp->addr));
7738 goto unlock;
7739 }
7740
7741 hci_conn_params_free(params);
7742
7743 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7744 } else {
7745 struct hci_conn_params *p, *tmp;
7746 struct bdaddr_list *b, *btmp;
7747
7748 if (cp->addr.type) {
7749 err = mgmt_cmd_complete(sk, hdev->id,
7750 MGMT_OP_REMOVE_DEVICE,
7751 MGMT_STATUS_INVALID_PARAMS,
7752 &cp->addr, sizeof(cp->addr));
7753 goto unlock;
7754 }
7755
7756 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7757 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7758 list_del(&b->list);
7759 kfree(b);
7760 }
7761
7762 hci_update_scan(hdev);
7763
7764 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7765 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7766 continue;
7767 device_removed(sk, hdev, &p->addr, p->addr_type);
7768 if (p->explicit_connect) {
7769 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7770 continue;
7771 }
7772 hci_conn_params_free(p);
7773 }
7774
7775 bt_dev_dbg(hdev, "All LE connection parameters were removed");
7776 }
7777
7778 hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7779
7780 complete:
7781 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7782 MGMT_STATUS_SUCCESS, &cp->addr,
7783 sizeof(cp->addr));
7784 unlock:
7785 hci_dev_unlock(hdev);
7786 return err;
7787 }
7788
conn_update_sync(struct hci_dev * hdev,void * data)7789 static int conn_update_sync(struct hci_dev *hdev, void *data)
7790 {
7791 struct hci_conn_params *params = data;
7792 struct hci_conn *conn;
7793
7794 conn = hci_conn_hash_lookup_le(hdev, ¶ms->addr, params->addr_type);
7795 if (!conn)
7796 return -ECANCELED;
7797
7798 return hci_le_conn_update_sync(hdev, conn, params);
7799 }
7800
load_conn_param(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7801 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7802 u16 len)
7803 {
7804 struct mgmt_cp_load_conn_param *cp = data;
7805 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7806 sizeof(struct mgmt_conn_param));
7807 u16 param_count, expected_len;
7808 int i;
7809
7810 if (!lmp_le_capable(hdev))
7811 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7812 MGMT_STATUS_NOT_SUPPORTED);
7813
7814 param_count = __le16_to_cpu(cp->param_count);
7815 if (param_count > max_param_count) {
7816 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7817 param_count);
7818 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7819 MGMT_STATUS_INVALID_PARAMS);
7820 }
7821
7822 expected_len = struct_size(cp, params, param_count);
7823 if (expected_len != len) {
7824 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7825 expected_len, len);
7826 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7827 MGMT_STATUS_INVALID_PARAMS);
7828 }
7829
7830 bt_dev_dbg(hdev, "param_count %u", param_count);
7831
7832 hci_dev_lock(hdev);
7833
7834 if (param_count > 1)
7835 hci_conn_params_clear_disabled(hdev);
7836
7837 for (i = 0; i < param_count; i++) {
7838 struct mgmt_conn_param *param = &cp->params[i];
7839 struct hci_conn_params *hci_param;
7840 u16 min, max, latency, timeout;
7841 bool update = false;
7842 u8 addr_type;
7843
7844 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
7845 param->addr.type);
7846
7847 if (param->addr.type == BDADDR_LE_PUBLIC) {
7848 addr_type = ADDR_LE_DEV_PUBLIC;
7849 } else if (param->addr.type == BDADDR_LE_RANDOM) {
7850 addr_type = ADDR_LE_DEV_RANDOM;
7851 } else {
7852 bt_dev_err(hdev, "ignoring invalid connection parameters");
7853 continue;
7854 }
7855
7856 min = le16_to_cpu(param->min_interval);
7857 max = le16_to_cpu(param->max_interval);
7858 latency = le16_to_cpu(param->latency);
7859 timeout = le16_to_cpu(param->timeout);
7860
7861 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7862 min, max, latency, timeout);
7863
7864 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7865 bt_dev_err(hdev, "ignoring invalid connection parameters");
7866 continue;
7867 }
7868
7869 /* Detect when the loading is for an existing parameter then
7870 * attempt to trigger the connection update procedure.
7871 */
7872 if (!i && param_count == 1) {
7873 hci_param = hci_conn_params_lookup(hdev,
7874 ¶m->addr.bdaddr,
7875 addr_type);
7876 if (hci_param)
7877 update = true;
7878 else
7879 hci_conn_params_clear_disabled(hdev);
7880 }
7881
7882 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
7883 addr_type);
7884 if (!hci_param) {
7885 bt_dev_err(hdev, "failed to add connection parameters");
7886 continue;
7887 }
7888
7889 hci_param->conn_min_interval = min;
7890 hci_param->conn_max_interval = max;
7891 hci_param->conn_latency = latency;
7892 hci_param->supervision_timeout = timeout;
7893
7894 /* Check if we need to trigger a connection update */
7895 if (update) {
7896 struct hci_conn *conn;
7897
7898 /* Lookup for existing connection as central and check
7899 * if parameters match and if they don't then trigger
7900 * a connection update.
7901 */
7902 conn = hci_conn_hash_lookup_le(hdev, &hci_param->addr,
7903 addr_type);
7904 if (conn && conn->role == HCI_ROLE_MASTER &&
7905 (conn->le_conn_min_interval != min ||
7906 conn->le_conn_max_interval != max ||
7907 conn->le_conn_latency != latency ||
7908 conn->le_supv_timeout != timeout))
7909 hci_cmd_sync_queue(hdev, conn_update_sync,
7910 hci_param, NULL);
7911 }
7912 }
7913
7914 hci_dev_unlock(hdev);
7915
7916 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7917 NULL, 0);
7918 }
7919
set_external_config(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7920 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7921 void *data, u16 len)
7922 {
7923 struct mgmt_cp_set_external_config *cp = data;
7924 bool changed;
7925 int err;
7926
7927 bt_dev_dbg(hdev, "sock %p", sk);
7928
7929 if (hdev_is_powered(hdev))
7930 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7931 MGMT_STATUS_REJECTED);
7932
7933 if (cp->config != 0x00 && cp->config != 0x01)
7934 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7935 MGMT_STATUS_INVALID_PARAMS);
7936
7937 if (!hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG))
7938 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7939 MGMT_STATUS_NOT_SUPPORTED);
7940
7941 hci_dev_lock(hdev);
7942
7943 if (cp->config)
7944 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7945 else
7946 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7947
7948 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7949 if (err < 0)
7950 goto unlock;
7951
7952 if (!changed)
7953 goto unlock;
7954
7955 err = new_options(hdev, sk);
7956
7957 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7958 mgmt_index_removed(hdev);
7959
7960 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7961 hci_dev_set_flag(hdev, HCI_CONFIG);
7962 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7963
7964 queue_work(hdev->req_workqueue, &hdev->power_on);
7965 } else {
7966 set_bit(HCI_RAW, &hdev->flags);
7967 mgmt_index_added(hdev);
7968 }
7969 }
7970
7971 unlock:
7972 hci_dev_unlock(hdev);
7973 return err;
7974 }
7975
set_public_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7976 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
7977 void *data, u16 len)
7978 {
7979 struct mgmt_cp_set_public_address *cp = data;
7980 bool changed;
7981 int err;
7982
7983 bt_dev_dbg(hdev, "sock %p", sk);
7984
7985 if (hdev_is_powered(hdev))
7986 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7987 MGMT_STATUS_REJECTED);
7988
7989 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
7990 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7991 MGMT_STATUS_INVALID_PARAMS);
7992
7993 if (!hdev->set_bdaddr)
7994 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7995 MGMT_STATUS_NOT_SUPPORTED);
7996
7997 hci_dev_lock(hdev);
7998
7999 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
8000 bacpy(&hdev->public_addr, &cp->bdaddr);
8001
8002 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
8003 if (err < 0)
8004 goto unlock;
8005
8006 if (!changed)
8007 goto unlock;
8008
8009 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
8010 err = new_options(hdev, sk);
8011
8012 if (is_configured(hdev)) {
8013 mgmt_index_removed(hdev);
8014
8015 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
8016
8017 hci_dev_set_flag(hdev, HCI_CONFIG);
8018 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8019
8020 queue_work(hdev->req_workqueue, &hdev->power_on);
8021 }
8022
8023 unlock:
8024 hci_dev_unlock(hdev);
8025 return err;
8026 }
8027
read_local_oob_ext_data_complete(struct hci_dev * hdev,void * data,int err)8028 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
8029 int err)
8030 {
8031 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8032 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8033 u8 *h192, *r192, *h256, *r256;
8034 struct mgmt_pending_cmd *cmd = data;
8035 struct sk_buff *skb = cmd->skb;
8036 u8 status = mgmt_status(err);
8037 u16 eir_len;
8038
8039 if (err == -ECANCELED ||
8040 cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
8041 return;
8042
8043 if (!status) {
8044 if (!skb)
8045 status = MGMT_STATUS_FAILED;
8046 else if (IS_ERR(skb))
8047 status = mgmt_status(PTR_ERR(skb));
8048 else
8049 status = mgmt_status(skb->data[0]);
8050 }
8051
8052 bt_dev_dbg(hdev, "status %u", status);
8053
8054 mgmt_cp = cmd->param;
8055
8056 if (status) {
8057 status = mgmt_status(status);
8058 eir_len = 0;
8059
8060 h192 = NULL;
8061 r192 = NULL;
8062 h256 = NULL;
8063 r256 = NULL;
8064 } else if (!bredr_sc_enabled(hdev)) {
8065 struct hci_rp_read_local_oob_data *rp;
8066
8067 if (skb->len != sizeof(*rp)) {
8068 status = MGMT_STATUS_FAILED;
8069 eir_len = 0;
8070 } else {
8071 status = MGMT_STATUS_SUCCESS;
8072 rp = (void *)skb->data;
8073
8074 eir_len = 5 + 18 + 18;
8075 h192 = rp->hash;
8076 r192 = rp->rand;
8077 h256 = NULL;
8078 r256 = NULL;
8079 }
8080 } else {
8081 struct hci_rp_read_local_oob_ext_data *rp;
8082
8083 if (skb->len != sizeof(*rp)) {
8084 status = MGMT_STATUS_FAILED;
8085 eir_len = 0;
8086 } else {
8087 status = MGMT_STATUS_SUCCESS;
8088 rp = (void *)skb->data;
8089
8090 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8091 eir_len = 5 + 18 + 18;
8092 h192 = NULL;
8093 r192 = NULL;
8094 } else {
8095 eir_len = 5 + 18 + 18 + 18 + 18;
8096 h192 = rp->hash192;
8097 r192 = rp->rand192;
8098 }
8099
8100 h256 = rp->hash256;
8101 r256 = rp->rand256;
8102 }
8103 }
8104
8105 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8106 if (!mgmt_rp)
8107 goto done;
8108
8109 if (eir_len == 0)
8110 goto send_rsp;
8111
8112 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8113 hdev->dev_class, 3);
8114
8115 if (h192 && r192) {
8116 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8117 EIR_SSP_HASH_C192, h192, 16);
8118 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8119 EIR_SSP_RAND_R192, r192, 16);
8120 }
8121
8122 if (h256 && r256) {
8123 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8124 EIR_SSP_HASH_C256, h256, 16);
8125 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8126 EIR_SSP_RAND_R256, r256, 16);
8127 }
8128
8129 send_rsp:
8130 mgmt_rp->type = mgmt_cp->type;
8131 mgmt_rp->eir_len = cpu_to_le16(eir_len);
8132
8133 err = mgmt_cmd_complete(cmd->sk, hdev->id,
8134 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8135 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8136 if (err < 0 || status)
8137 goto done;
8138
8139 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8140
8141 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8142 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8143 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8144 done:
8145 if (skb && !IS_ERR(skb))
8146 kfree_skb(skb);
8147
8148 kfree(mgmt_rp);
8149 mgmt_pending_remove(cmd);
8150 }
8151
read_local_ssp_oob_req(struct hci_dev * hdev,struct sock * sk,struct mgmt_cp_read_local_oob_ext_data * cp)8152 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8153 struct mgmt_cp_read_local_oob_ext_data *cp)
8154 {
8155 struct mgmt_pending_cmd *cmd;
8156 int err;
8157
8158 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8159 cp, sizeof(*cp));
8160 if (!cmd)
8161 return -ENOMEM;
8162
8163 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
8164 read_local_oob_ext_data_complete);
8165
8166 if (err < 0) {
8167 mgmt_pending_remove(cmd);
8168 return err;
8169 }
8170
8171 return 0;
8172 }
8173
read_local_oob_ext_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8174 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8175 void *data, u16 data_len)
8176 {
8177 struct mgmt_cp_read_local_oob_ext_data *cp = data;
8178 struct mgmt_rp_read_local_oob_ext_data *rp;
8179 size_t rp_len;
8180 u16 eir_len;
8181 u8 status, flags, role, addr[7], hash[16], rand[16];
8182 int err;
8183
8184 bt_dev_dbg(hdev, "sock %p", sk);
8185
8186 if (hdev_is_powered(hdev)) {
8187 switch (cp->type) {
8188 case BIT(BDADDR_BREDR):
8189 status = mgmt_bredr_support(hdev);
8190 if (status)
8191 eir_len = 0;
8192 else
8193 eir_len = 5;
8194 break;
8195 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8196 status = mgmt_le_support(hdev);
8197 if (status)
8198 eir_len = 0;
8199 else
8200 eir_len = 9 + 3 + 18 + 18 + 3;
8201 break;
8202 default:
8203 status = MGMT_STATUS_INVALID_PARAMS;
8204 eir_len = 0;
8205 break;
8206 }
8207 } else {
8208 status = MGMT_STATUS_NOT_POWERED;
8209 eir_len = 0;
8210 }
8211
8212 rp_len = sizeof(*rp) + eir_len;
8213 rp = kmalloc(rp_len, GFP_ATOMIC);
8214 if (!rp)
8215 return -ENOMEM;
8216
8217 if (!status && !lmp_ssp_capable(hdev)) {
8218 status = MGMT_STATUS_NOT_SUPPORTED;
8219 eir_len = 0;
8220 }
8221
8222 if (status)
8223 goto complete;
8224
8225 hci_dev_lock(hdev);
8226
8227 eir_len = 0;
8228 switch (cp->type) {
8229 case BIT(BDADDR_BREDR):
8230 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8231 err = read_local_ssp_oob_req(hdev, sk, cp);
8232 hci_dev_unlock(hdev);
8233 if (!err)
8234 goto done;
8235
8236 status = MGMT_STATUS_FAILED;
8237 goto complete;
8238 } else {
8239 eir_len = eir_append_data(rp->eir, eir_len,
8240 EIR_CLASS_OF_DEV,
8241 hdev->dev_class, 3);
8242 }
8243 break;
8244 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8245 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8246 smp_generate_oob(hdev, hash, rand) < 0) {
8247 hci_dev_unlock(hdev);
8248 status = MGMT_STATUS_FAILED;
8249 goto complete;
8250 }
8251
8252 /* This should return the active RPA, but since the RPA
8253 * is only programmed on demand, it is really hard to fill
8254 * this in at the moment. For now disallow retrieving
8255 * local out-of-band data when privacy is in use.
8256 *
8257 * Returning the identity address will not help here since
8258 * pairing happens before the identity resolving key is
8259 * known and thus the connection establishment happens
8260 * based on the RPA and not the identity address.
8261 */
8262 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8263 hci_dev_unlock(hdev);
8264 status = MGMT_STATUS_REJECTED;
8265 goto complete;
8266 }
8267
8268 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8269 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8270 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8271 bacmp(&hdev->static_addr, BDADDR_ANY))) {
8272 memcpy(addr, &hdev->static_addr, 6);
8273 addr[6] = 0x01;
8274 } else {
8275 memcpy(addr, &hdev->bdaddr, 6);
8276 addr[6] = 0x00;
8277 }
8278
8279 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8280 addr, sizeof(addr));
8281
8282 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8283 role = 0x02;
8284 else
8285 role = 0x01;
8286
8287 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8288 &role, sizeof(role));
8289
8290 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8291 eir_len = eir_append_data(rp->eir, eir_len,
8292 EIR_LE_SC_CONFIRM,
8293 hash, sizeof(hash));
8294
8295 eir_len = eir_append_data(rp->eir, eir_len,
8296 EIR_LE_SC_RANDOM,
8297 rand, sizeof(rand));
8298 }
8299
8300 flags = mgmt_get_adv_discov_flags(hdev);
8301
8302 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8303 flags |= LE_AD_NO_BREDR;
8304
8305 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8306 &flags, sizeof(flags));
8307 break;
8308 }
8309
8310 hci_dev_unlock(hdev);
8311
8312 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8313
8314 status = MGMT_STATUS_SUCCESS;
8315
8316 complete:
8317 rp->type = cp->type;
8318 rp->eir_len = cpu_to_le16(eir_len);
8319
8320 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8321 status, rp, sizeof(*rp) + eir_len);
8322 if (err < 0 || status)
8323 goto done;
8324
8325 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8326 rp, sizeof(*rp) + eir_len,
8327 HCI_MGMT_OOB_DATA_EVENTS, sk);
8328
8329 done:
8330 kfree(rp);
8331
8332 return err;
8333 }
8334
get_supported_adv_flags(struct hci_dev * hdev)8335 static u32 get_supported_adv_flags(struct hci_dev *hdev)
8336 {
8337 u32 flags = 0;
8338
8339 flags |= MGMT_ADV_FLAG_CONNECTABLE;
8340 flags |= MGMT_ADV_FLAG_DISCOV;
8341 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8342 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8343 flags |= MGMT_ADV_FLAG_APPEARANCE;
8344 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8345 flags |= MGMT_ADV_PARAM_DURATION;
8346 flags |= MGMT_ADV_PARAM_TIMEOUT;
8347 flags |= MGMT_ADV_PARAM_INTERVALS;
8348 flags |= MGMT_ADV_PARAM_TX_POWER;
8349 flags |= MGMT_ADV_PARAM_SCAN_RSP;
8350
8351 /* In extended adv TX_POWER returned from Set Adv Param
8352 * will be always valid.
8353 */
8354 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
8355 flags |= MGMT_ADV_FLAG_TX_POWER;
8356
8357 if (ext_adv_capable(hdev)) {
8358 flags |= MGMT_ADV_FLAG_SEC_1M;
8359 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8360 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8361
8362 if (le_2m_capable(hdev))
8363 flags |= MGMT_ADV_FLAG_SEC_2M;
8364
8365 if (le_coded_capable(hdev))
8366 flags |= MGMT_ADV_FLAG_SEC_CODED;
8367 }
8368
8369 return flags;
8370 }
8371
read_adv_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8372 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8373 void *data, u16 data_len)
8374 {
8375 struct mgmt_rp_read_adv_features *rp;
8376 size_t rp_len;
8377 int err;
8378 struct adv_info *adv_instance;
8379 u32 supported_flags;
8380 u8 *instance;
8381
8382 bt_dev_dbg(hdev, "sock %p", sk);
8383
8384 if (!lmp_le_capable(hdev))
8385 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8386 MGMT_STATUS_REJECTED);
8387
8388 hci_dev_lock(hdev);
8389
8390 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8391 rp = kmalloc(rp_len, GFP_ATOMIC);
8392 if (!rp) {
8393 hci_dev_unlock(hdev);
8394 return -ENOMEM;
8395 }
8396
8397 supported_flags = get_supported_adv_flags(hdev);
8398
8399 rp->supported_flags = cpu_to_le32(supported_flags);
8400 rp->max_adv_data_len = max_adv_len(hdev);
8401 rp->max_scan_rsp_len = max_adv_len(hdev);
8402 rp->max_instances = hdev->le_num_of_adv_sets;
8403 rp->num_instances = hdev->adv_instance_cnt;
8404
8405 instance = rp->instance;
8406 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8407 /* Only instances 1-le_num_of_adv_sets are externally visible */
8408 if (adv_instance->instance <= hdev->adv_instance_cnt) {
8409 *instance = adv_instance->instance;
8410 instance++;
8411 } else {
8412 rp->num_instances--;
8413 rp_len--;
8414 }
8415 }
8416
8417 hci_dev_unlock(hdev);
8418
8419 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8420 MGMT_STATUS_SUCCESS, rp, rp_len);
8421
8422 kfree(rp);
8423
8424 return err;
8425 }
8426
calculate_name_len(struct hci_dev * hdev)8427 static u8 calculate_name_len(struct hci_dev *hdev)
8428 {
8429 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 2]; /* len + type + name */
8430
8431 return eir_append_local_name(hdev, buf, 0);
8432 }
8433
tlv_data_max_len(struct hci_dev * hdev,u32 adv_flags,bool is_adv_data)8434 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8435 bool is_adv_data)
8436 {
8437 u8 max_len = max_adv_len(hdev);
8438
8439 if (is_adv_data) {
8440 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8441 MGMT_ADV_FLAG_LIMITED_DISCOV |
8442 MGMT_ADV_FLAG_MANAGED_FLAGS))
8443 max_len -= 3;
8444
8445 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8446 max_len -= 3;
8447 } else {
8448 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8449 max_len -= calculate_name_len(hdev);
8450
8451 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8452 max_len -= 4;
8453 }
8454
8455 return max_len;
8456 }
8457
flags_managed(u32 adv_flags)8458 static bool flags_managed(u32 adv_flags)
8459 {
8460 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8461 MGMT_ADV_FLAG_LIMITED_DISCOV |
8462 MGMT_ADV_FLAG_MANAGED_FLAGS);
8463 }
8464
tx_power_managed(u32 adv_flags)8465 static bool tx_power_managed(u32 adv_flags)
8466 {
8467 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8468 }
8469
name_managed(u32 adv_flags)8470 static bool name_managed(u32 adv_flags)
8471 {
8472 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8473 }
8474
appearance_managed(u32 adv_flags)8475 static bool appearance_managed(u32 adv_flags)
8476 {
8477 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8478 }
8479
tlv_data_is_valid(struct hci_dev * hdev,u32 adv_flags,u8 * data,u8 len,bool is_adv_data)8480 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8481 u8 len, bool is_adv_data)
8482 {
8483 int i, cur_len;
8484 u8 max_len;
8485
8486 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8487
8488 if (len > max_len)
8489 return false;
8490
8491 /* Make sure that the data is correctly formatted. */
8492 for (i = 0; i < len; i += (cur_len + 1)) {
8493 cur_len = data[i];
8494
8495 if (!cur_len)
8496 continue;
8497
8498 if (data[i + 1] == EIR_FLAGS &&
8499 (!is_adv_data || flags_managed(adv_flags)))
8500 return false;
8501
8502 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8503 return false;
8504
8505 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8506 return false;
8507
8508 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8509 return false;
8510
8511 if (data[i + 1] == EIR_APPEARANCE &&
8512 appearance_managed(adv_flags))
8513 return false;
8514
8515 /* If the current field length would exceed the total data
8516 * length, then it's invalid.
8517 */
8518 if (i + cur_len >= len)
8519 return false;
8520 }
8521
8522 return true;
8523 }
8524
requested_adv_flags_are_valid(struct hci_dev * hdev,u32 adv_flags)8525 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8526 {
8527 u32 supported_flags, phy_flags;
8528
8529 /* The current implementation only supports a subset of the specified
8530 * flags. Also need to check mutual exclusiveness of sec flags.
8531 */
8532 supported_flags = get_supported_adv_flags(hdev);
8533 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8534 if (adv_flags & ~supported_flags ||
8535 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8536 return false;
8537
8538 return true;
8539 }
8540
adv_busy(struct hci_dev * hdev)8541 static bool adv_busy(struct hci_dev *hdev)
8542 {
8543 return pending_find(MGMT_OP_SET_LE, hdev);
8544 }
8545
add_adv_complete(struct hci_dev * hdev,struct sock * sk,u8 instance,int err)8546 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8547 int err)
8548 {
8549 struct adv_info *adv, *n;
8550
8551 bt_dev_dbg(hdev, "err %d", err);
8552
8553 hci_dev_lock(hdev);
8554
8555 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8556 u8 instance;
8557
8558 if (!adv->pending)
8559 continue;
8560
8561 if (!err) {
8562 adv->pending = false;
8563 continue;
8564 }
8565
8566 instance = adv->instance;
8567
8568 if (hdev->cur_adv_instance == instance)
8569 cancel_adv_timeout(hdev);
8570
8571 hci_remove_adv_instance(hdev, instance);
8572 mgmt_advertising_removed(sk, hdev, instance);
8573 }
8574
8575 hci_dev_unlock(hdev);
8576 }
8577
add_advertising_complete(struct hci_dev * hdev,void * data,int err)8578 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8579 {
8580 struct mgmt_pending_cmd *cmd = data;
8581 struct mgmt_cp_add_advertising *cp = cmd->param;
8582 struct mgmt_rp_add_advertising rp;
8583
8584 memset(&rp, 0, sizeof(rp));
8585
8586 rp.instance = cp->instance;
8587
8588 if (err)
8589 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
8590 mgmt_status(err));
8591 else
8592 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
8593 mgmt_status(err), &rp, sizeof(rp));
8594
8595 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8596
8597 mgmt_pending_free(cmd);
8598 }
8599
add_advertising_sync(struct hci_dev * hdev,void * data)8600 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8601 {
8602 struct mgmt_pending_cmd *cmd = data;
8603 struct mgmt_cp_add_advertising *cp = cmd->param;
8604
8605 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8606 }
8607
add_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8608 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8609 void *data, u16 data_len)
8610 {
8611 struct mgmt_cp_add_advertising *cp = data;
8612 struct mgmt_rp_add_advertising rp;
8613 u32 flags;
8614 u8 status;
8615 u16 timeout, duration;
8616 unsigned int prev_instance_cnt;
8617 u8 schedule_instance = 0;
8618 struct adv_info *adv, *next_instance;
8619 int err;
8620 struct mgmt_pending_cmd *cmd;
8621
8622 bt_dev_dbg(hdev, "sock %p", sk);
8623
8624 status = mgmt_le_support(hdev);
8625 if (status)
8626 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8627 status);
8628
8629 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8630 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8631 MGMT_STATUS_INVALID_PARAMS);
8632
8633 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8634 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8635 MGMT_STATUS_INVALID_PARAMS);
8636
8637 flags = __le32_to_cpu(cp->flags);
8638 timeout = __le16_to_cpu(cp->timeout);
8639 duration = __le16_to_cpu(cp->duration);
8640
8641 if (!requested_adv_flags_are_valid(hdev, flags))
8642 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8643 MGMT_STATUS_INVALID_PARAMS);
8644
8645 hci_dev_lock(hdev);
8646
8647 if (timeout && !hdev_is_powered(hdev)) {
8648 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8649 MGMT_STATUS_REJECTED);
8650 goto unlock;
8651 }
8652
8653 if (adv_busy(hdev)) {
8654 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8655 MGMT_STATUS_BUSY);
8656 goto unlock;
8657 }
8658
8659 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8660 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8661 cp->scan_rsp_len, false)) {
8662 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8663 MGMT_STATUS_INVALID_PARAMS);
8664 goto unlock;
8665 }
8666
8667 prev_instance_cnt = hdev->adv_instance_cnt;
8668
8669 adv = hci_add_adv_instance(hdev, cp->instance, flags,
8670 cp->adv_data_len, cp->data,
8671 cp->scan_rsp_len,
8672 cp->data + cp->adv_data_len,
8673 timeout, duration,
8674 HCI_ADV_TX_POWER_NO_PREFERENCE,
8675 hdev->le_adv_min_interval,
8676 hdev->le_adv_max_interval, 0);
8677 if (IS_ERR(adv)) {
8678 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8679 MGMT_STATUS_FAILED);
8680 goto unlock;
8681 }
8682
8683 /* Only trigger an advertising added event if a new instance was
8684 * actually added.
8685 */
8686 if (hdev->adv_instance_cnt > prev_instance_cnt)
8687 mgmt_advertising_added(sk, hdev, cp->instance);
8688
8689 if (hdev->cur_adv_instance == cp->instance) {
8690 /* If the currently advertised instance is being changed then
8691 * cancel the current advertising and schedule the next
8692 * instance. If there is only one instance then the overridden
8693 * advertising data will be visible right away.
8694 */
8695 cancel_adv_timeout(hdev);
8696
8697 next_instance = hci_get_next_instance(hdev, cp->instance);
8698 if (next_instance)
8699 schedule_instance = next_instance->instance;
8700 } else if (!hdev->adv_instance_timeout) {
8701 /* Immediately advertise the new instance if no other
8702 * instance is currently being advertised.
8703 */
8704 schedule_instance = cp->instance;
8705 }
8706
8707 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
8708 * there is no instance to be advertised then we have no HCI
8709 * communication to make. Simply return.
8710 */
8711 if (!hdev_is_powered(hdev) ||
8712 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8713 !schedule_instance) {
8714 rp.instance = cp->instance;
8715 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8716 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8717 goto unlock;
8718 }
8719
8720 /* We're good to go, update advertising data, parameters, and start
8721 * advertising.
8722 */
8723 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8724 data_len);
8725 if (!cmd) {
8726 err = -ENOMEM;
8727 goto unlock;
8728 }
8729
8730 cp->instance = schedule_instance;
8731
8732 err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8733 add_advertising_complete);
8734 if (err < 0)
8735 mgmt_pending_free(cmd);
8736
8737 unlock:
8738 hci_dev_unlock(hdev);
8739
8740 return err;
8741 }
8742
add_ext_adv_params_complete(struct hci_dev * hdev,void * data,int err)8743 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8744 int err)
8745 {
8746 struct mgmt_pending_cmd *cmd = data;
8747 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8748 struct mgmt_rp_add_ext_adv_params rp;
8749 struct adv_info *adv;
8750 u32 flags;
8751
8752 BT_DBG("%s", hdev->name);
8753
8754 hci_dev_lock(hdev);
8755
8756 adv = hci_find_adv_instance(hdev, cp->instance);
8757 if (!adv)
8758 goto unlock;
8759
8760 rp.instance = cp->instance;
8761 rp.tx_power = adv->tx_power;
8762
8763 /* While we're at it, inform userspace of the available space for this
8764 * advertisement, given the flags that will be used.
8765 */
8766 flags = __le32_to_cpu(cp->flags);
8767 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8768 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8769
8770 if (err) {
8771 /* If this advertisement was previously advertising and we
8772 * failed to update it, we signal that it has been removed and
8773 * delete its structure
8774 */
8775 if (!adv->pending)
8776 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8777
8778 hci_remove_adv_instance(hdev, cp->instance);
8779
8780 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
8781 mgmt_status(err));
8782 } else {
8783 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
8784 mgmt_status(err), &rp, sizeof(rp));
8785 }
8786
8787 unlock:
8788 mgmt_pending_free(cmd);
8789
8790 hci_dev_unlock(hdev);
8791 }
8792
add_ext_adv_params_sync(struct hci_dev * hdev,void * data)8793 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8794 {
8795 struct mgmt_pending_cmd *cmd = data;
8796 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8797
8798 return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8799 }
8800
add_ext_adv_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8801 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8802 void *data, u16 data_len)
8803 {
8804 struct mgmt_cp_add_ext_adv_params *cp = data;
8805 struct mgmt_rp_add_ext_adv_params rp;
8806 struct mgmt_pending_cmd *cmd = NULL;
8807 struct adv_info *adv;
8808 u32 flags, min_interval, max_interval;
8809 u16 timeout, duration;
8810 u8 status;
8811 s8 tx_power;
8812 int err;
8813
8814 BT_DBG("%s", hdev->name);
8815
8816 status = mgmt_le_support(hdev);
8817 if (status)
8818 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8819 status);
8820
8821 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8822 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8823 MGMT_STATUS_INVALID_PARAMS);
8824
8825 /* The purpose of breaking add_advertising into two separate MGMT calls
8826 * for params and data is to allow more parameters to be added to this
8827 * structure in the future. For this reason, we verify that we have the
8828 * bare minimum structure we know of when the interface was defined. Any
8829 * extra parameters we don't know about will be ignored in this request.
8830 */
8831 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8832 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8833 MGMT_STATUS_INVALID_PARAMS);
8834
8835 flags = __le32_to_cpu(cp->flags);
8836
8837 if (!requested_adv_flags_are_valid(hdev, flags))
8838 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8839 MGMT_STATUS_INVALID_PARAMS);
8840
8841 hci_dev_lock(hdev);
8842
8843 /* In new interface, we require that we are powered to register */
8844 if (!hdev_is_powered(hdev)) {
8845 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8846 MGMT_STATUS_REJECTED);
8847 goto unlock;
8848 }
8849
8850 if (adv_busy(hdev)) {
8851 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8852 MGMT_STATUS_BUSY);
8853 goto unlock;
8854 }
8855
8856 /* Parse defined parameters from request, use defaults otherwise */
8857 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8858 __le16_to_cpu(cp->timeout) : 0;
8859
8860 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8861 __le16_to_cpu(cp->duration) :
8862 hdev->def_multi_adv_rotation_duration;
8863
8864 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8865 __le32_to_cpu(cp->min_interval) :
8866 hdev->le_adv_min_interval;
8867
8868 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8869 __le32_to_cpu(cp->max_interval) :
8870 hdev->le_adv_max_interval;
8871
8872 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8873 cp->tx_power :
8874 HCI_ADV_TX_POWER_NO_PREFERENCE;
8875
8876 /* Create advertising instance with no advertising or response data */
8877 adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
8878 timeout, duration, tx_power, min_interval,
8879 max_interval, 0);
8880
8881 if (IS_ERR(adv)) {
8882 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8883 MGMT_STATUS_FAILED);
8884 goto unlock;
8885 }
8886
8887 /* Submit request for advertising params if ext adv available */
8888 if (ext_adv_capable(hdev)) {
8889 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8890 data, data_len);
8891 if (!cmd) {
8892 err = -ENOMEM;
8893 hci_remove_adv_instance(hdev, cp->instance);
8894 goto unlock;
8895 }
8896
8897 err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8898 add_ext_adv_params_complete);
8899 if (err < 0)
8900 mgmt_pending_free(cmd);
8901 } else {
8902 rp.instance = cp->instance;
8903 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8904 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8905 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8906 err = mgmt_cmd_complete(sk, hdev->id,
8907 MGMT_OP_ADD_EXT_ADV_PARAMS,
8908 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8909 }
8910
8911 unlock:
8912 hci_dev_unlock(hdev);
8913
8914 return err;
8915 }
8916
add_ext_adv_data_complete(struct hci_dev * hdev,void * data,int err)8917 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8918 {
8919 struct mgmt_pending_cmd *cmd = data;
8920 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8921 struct mgmt_rp_add_advertising rp;
8922
8923 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8924
8925 memset(&rp, 0, sizeof(rp));
8926
8927 rp.instance = cp->instance;
8928
8929 if (err)
8930 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
8931 mgmt_status(err));
8932 else
8933 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
8934 mgmt_status(err), &rp, sizeof(rp));
8935
8936 mgmt_pending_free(cmd);
8937 }
8938
add_ext_adv_data_sync(struct hci_dev * hdev,void * data)8939 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8940 {
8941 struct mgmt_pending_cmd *cmd = data;
8942 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8943 int err;
8944
8945 if (ext_adv_capable(hdev)) {
8946 err = hci_update_adv_data_sync(hdev, cp->instance);
8947 if (err)
8948 return err;
8949
8950 err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
8951 if (err)
8952 return err;
8953
8954 return hci_enable_ext_advertising_sync(hdev, cp->instance);
8955 }
8956
8957 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8958 }
8959
add_ext_adv_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8960 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8961 u16 data_len)
8962 {
8963 struct mgmt_cp_add_ext_adv_data *cp = data;
8964 struct mgmt_rp_add_ext_adv_data rp;
8965 u8 schedule_instance = 0;
8966 struct adv_info *next_instance;
8967 struct adv_info *adv_instance;
8968 int err = 0;
8969 struct mgmt_pending_cmd *cmd;
8970
8971 BT_DBG("%s", hdev->name);
8972
8973 hci_dev_lock(hdev);
8974
8975 adv_instance = hci_find_adv_instance(hdev, cp->instance);
8976
8977 if (!adv_instance) {
8978 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8979 MGMT_STATUS_INVALID_PARAMS);
8980 goto unlock;
8981 }
8982
8983 /* In new interface, we require that we are powered to register */
8984 if (!hdev_is_powered(hdev)) {
8985 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8986 MGMT_STATUS_REJECTED);
8987 goto clear_new_instance;
8988 }
8989
8990 if (adv_busy(hdev)) {
8991 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8992 MGMT_STATUS_BUSY);
8993 goto clear_new_instance;
8994 }
8995
8996 /* Validate new data */
8997 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
8998 cp->adv_data_len, true) ||
8999 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
9000 cp->adv_data_len, cp->scan_rsp_len, false)) {
9001 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9002 MGMT_STATUS_INVALID_PARAMS);
9003 goto clear_new_instance;
9004 }
9005
9006 /* Set the data in the advertising instance */
9007 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
9008 cp->data, cp->scan_rsp_len,
9009 cp->data + cp->adv_data_len);
9010
9011 /* If using software rotation, determine next instance to use */
9012 if (hdev->cur_adv_instance == cp->instance) {
9013 /* If the currently advertised instance is being changed
9014 * then cancel the current advertising and schedule the
9015 * next instance. If there is only one instance then the
9016 * overridden advertising data will be visible right
9017 * away
9018 */
9019 cancel_adv_timeout(hdev);
9020
9021 next_instance = hci_get_next_instance(hdev, cp->instance);
9022 if (next_instance)
9023 schedule_instance = next_instance->instance;
9024 } else if (!hdev->adv_instance_timeout) {
9025 /* Immediately advertise the new instance if no other
9026 * instance is currently being advertised.
9027 */
9028 schedule_instance = cp->instance;
9029 }
9030
9031 /* If the HCI_ADVERTISING flag is set or there is no instance to
9032 * be advertised then we have no HCI communication to make.
9033 * Simply return.
9034 */
9035 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
9036 if (adv_instance->pending) {
9037 mgmt_advertising_added(sk, hdev, cp->instance);
9038 adv_instance->pending = false;
9039 }
9040 rp.instance = cp->instance;
9041 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9042 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9043 goto unlock;
9044 }
9045
9046 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9047 data_len);
9048 if (!cmd) {
9049 err = -ENOMEM;
9050 goto clear_new_instance;
9051 }
9052
9053 err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
9054 add_ext_adv_data_complete);
9055 if (err < 0) {
9056 mgmt_pending_free(cmd);
9057 goto clear_new_instance;
9058 }
9059
9060 /* We were successful in updating data, so trigger advertising_added
9061 * event if this is an instance that wasn't previously advertising. If
9062 * a failure occurs in the requests we initiated, we will remove the
9063 * instance again in add_advertising_complete
9064 */
9065 if (adv_instance->pending)
9066 mgmt_advertising_added(sk, hdev, cp->instance);
9067
9068 goto unlock;
9069
9070 clear_new_instance:
9071 hci_remove_adv_instance(hdev, cp->instance);
9072
9073 unlock:
9074 hci_dev_unlock(hdev);
9075
9076 return err;
9077 }
9078
remove_advertising_complete(struct hci_dev * hdev,void * data,int err)9079 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
9080 int err)
9081 {
9082 struct mgmt_pending_cmd *cmd = data;
9083 struct mgmt_cp_remove_advertising *cp = cmd->param;
9084 struct mgmt_rp_remove_advertising rp;
9085
9086 bt_dev_dbg(hdev, "err %d", err);
9087
9088 memset(&rp, 0, sizeof(rp));
9089 rp.instance = cp->instance;
9090
9091 if (err)
9092 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
9093 mgmt_status(err));
9094 else
9095 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
9096 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9097
9098 mgmt_pending_free(cmd);
9099 }
9100
remove_advertising_sync(struct hci_dev * hdev,void * data)9101 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
9102 {
9103 struct mgmt_pending_cmd *cmd = data;
9104 struct mgmt_cp_remove_advertising *cp = cmd->param;
9105 int err;
9106
9107 err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
9108 if (err)
9109 return err;
9110
9111 if (list_empty(&hdev->adv_instances))
9112 err = hci_disable_advertising_sync(hdev);
9113
9114 return err;
9115 }
9116
remove_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9117 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9118 void *data, u16 data_len)
9119 {
9120 struct mgmt_cp_remove_advertising *cp = data;
9121 struct mgmt_pending_cmd *cmd;
9122 int err;
9123
9124 bt_dev_dbg(hdev, "sock %p", sk);
9125
9126 hci_dev_lock(hdev);
9127
9128 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
9129 err = mgmt_cmd_status(sk, hdev->id,
9130 MGMT_OP_REMOVE_ADVERTISING,
9131 MGMT_STATUS_INVALID_PARAMS);
9132 goto unlock;
9133 }
9134
9135 if (pending_find(MGMT_OP_SET_LE, hdev)) {
9136 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9137 MGMT_STATUS_BUSY);
9138 goto unlock;
9139 }
9140
9141 if (list_empty(&hdev->adv_instances)) {
9142 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9143 MGMT_STATUS_INVALID_PARAMS);
9144 goto unlock;
9145 }
9146
9147 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9148 data_len);
9149 if (!cmd) {
9150 err = -ENOMEM;
9151 goto unlock;
9152 }
9153
9154 err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
9155 remove_advertising_complete);
9156 if (err < 0)
9157 mgmt_pending_free(cmd);
9158
9159 unlock:
9160 hci_dev_unlock(hdev);
9161
9162 return err;
9163 }
9164
get_adv_size_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9165 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9166 void *data, u16 data_len)
9167 {
9168 struct mgmt_cp_get_adv_size_info *cp = data;
9169 struct mgmt_rp_get_adv_size_info rp;
9170 u32 flags, supported_flags;
9171
9172 bt_dev_dbg(hdev, "sock %p", sk);
9173
9174 if (!lmp_le_capable(hdev))
9175 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9176 MGMT_STATUS_REJECTED);
9177
9178 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9179 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9180 MGMT_STATUS_INVALID_PARAMS);
9181
9182 flags = __le32_to_cpu(cp->flags);
9183
9184 /* The current implementation only supports a subset of the specified
9185 * flags.
9186 */
9187 supported_flags = get_supported_adv_flags(hdev);
9188 if (flags & ~supported_flags)
9189 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9190 MGMT_STATUS_INVALID_PARAMS);
9191
9192 rp.instance = cp->instance;
9193 rp.flags = cp->flags;
9194 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9195 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9196
9197 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9198 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9199 }
9200
9201 static const struct hci_mgmt_handler mgmt_handlers[] = {
9202 { NULL }, /* 0x0000 (no command) */
9203 { read_version, MGMT_READ_VERSION_SIZE,
9204 HCI_MGMT_NO_HDEV |
9205 HCI_MGMT_UNTRUSTED },
9206 { read_commands, MGMT_READ_COMMANDS_SIZE,
9207 HCI_MGMT_NO_HDEV |
9208 HCI_MGMT_UNTRUSTED },
9209 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
9210 HCI_MGMT_NO_HDEV |
9211 HCI_MGMT_UNTRUSTED },
9212 { read_controller_info, MGMT_READ_INFO_SIZE,
9213 HCI_MGMT_UNTRUSTED },
9214 { set_powered, MGMT_SETTING_SIZE },
9215 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
9216 { set_connectable, MGMT_SETTING_SIZE },
9217 { set_fast_connectable, MGMT_SETTING_SIZE },
9218 { set_bondable, MGMT_SETTING_SIZE },
9219 { set_link_security, MGMT_SETTING_SIZE },
9220 { set_ssp, MGMT_SETTING_SIZE },
9221 { set_hs, MGMT_SETTING_SIZE },
9222 { set_le, MGMT_SETTING_SIZE },
9223 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
9224 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
9225 { add_uuid, MGMT_ADD_UUID_SIZE },
9226 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
9227 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
9228 HCI_MGMT_VAR_LEN },
9229 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9230 HCI_MGMT_VAR_LEN },
9231 { disconnect, MGMT_DISCONNECT_SIZE },
9232 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
9233 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
9234 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
9235 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
9236 { pair_device, MGMT_PAIR_DEVICE_SIZE },
9237 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
9238 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
9239 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
9240 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9241 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
9242 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9243 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
9244 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9245 HCI_MGMT_VAR_LEN },
9246 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9247 { start_discovery, MGMT_START_DISCOVERY_SIZE },
9248 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
9249 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
9250 { block_device, MGMT_BLOCK_DEVICE_SIZE },
9251 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
9252 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
9253 { set_advertising, MGMT_SETTING_SIZE },
9254 { set_bredr, MGMT_SETTING_SIZE },
9255 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
9256 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
9257 { set_secure_conn, MGMT_SETTING_SIZE },
9258 { set_debug_keys, MGMT_SETTING_SIZE },
9259 { set_privacy, MGMT_SET_PRIVACY_SIZE },
9260 { load_irks, MGMT_LOAD_IRKS_SIZE,
9261 HCI_MGMT_VAR_LEN },
9262 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
9263 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
9264 { add_device, MGMT_ADD_DEVICE_SIZE },
9265 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
9266 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
9267 HCI_MGMT_VAR_LEN },
9268 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9269 HCI_MGMT_NO_HDEV |
9270 HCI_MGMT_UNTRUSTED },
9271 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
9272 HCI_MGMT_UNCONFIGURED |
9273 HCI_MGMT_UNTRUSTED },
9274 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
9275 HCI_MGMT_UNCONFIGURED },
9276 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
9277 HCI_MGMT_UNCONFIGURED },
9278 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9279 HCI_MGMT_VAR_LEN },
9280 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9281 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
9282 HCI_MGMT_NO_HDEV |
9283 HCI_MGMT_UNTRUSTED },
9284 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
9285 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
9286 HCI_MGMT_VAR_LEN },
9287 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
9288 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
9289 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9290 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9291 HCI_MGMT_UNTRUSTED },
9292 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
9293 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
9294 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
9295 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9296 HCI_MGMT_VAR_LEN },
9297 { set_wideband_speech, MGMT_SETTING_SIZE },
9298 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
9299 HCI_MGMT_UNTRUSTED },
9300 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
9301 HCI_MGMT_UNTRUSTED |
9302 HCI_MGMT_HDEV_OPTIONAL },
9303 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
9304 HCI_MGMT_VAR_LEN |
9305 HCI_MGMT_HDEV_OPTIONAL },
9306 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9307 HCI_MGMT_UNTRUSTED },
9308 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9309 HCI_MGMT_VAR_LEN },
9310 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9311 HCI_MGMT_UNTRUSTED },
9312 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9313 HCI_MGMT_VAR_LEN },
9314 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
9315 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
9316 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9317 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9318 HCI_MGMT_VAR_LEN },
9319 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
9320 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
9321 HCI_MGMT_VAR_LEN },
9322 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
9323 HCI_MGMT_VAR_LEN },
9324 { add_adv_patterns_monitor_rssi,
9325 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
9326 HCI_MGMT_VAR_LEN },
9327 { set_mesh, MGMT_SET_MESH_RECEIVER_SIZE,
9328 HCI_MGMT_VAR_LEN },
9329 { mesh_features, MGMT_MESH_READ_FEATURES_SIZE },
9330 { mesh_send, MGMT_MESH_SEND_SIZE,
9331 HCI_MGMT_VAR_LEN },
9332 { mesh_send_cancel, MGMT_MESH_SEND_CANCEL_SIZE },
9333 { mgmt_hci_cmd_sync, MGMT_HCI_CMD_SYNC_SIZE, HCI_MGMT_VAR_LEN },
9334 };
9335
mgmt_index_added(struct hci_dev * hdev)9336 void mgmt_index_added(struct hci_dev *hdev)
9337 {
9338 struct mgmt_ev_ext_index ev;
9339
9340 if (hci_test_quirk(hdev, HCI_QUIRK_RAW_DEVICE))
9341 return;
9342
9343 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9344 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0,
9345 HCI_MGMT_UNCONF_INDEX_EVENTS);
9346 ev.type = 0x01;
9347 } else {
9348 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
9349 HCI_MGMT_INDEX_EVENTS);
9350 ev.type = 0x00;
9351 }
9352
9353 ev.bus = hdev->bus;
9354
9355 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
9356 HCI_MGMT_EXT_INDEX_EVENTS);
9357 }
9358
mgmt_index_removed(struct hci_dev * hdev)9359 void mgmt_index_removed(struct hci_dev *hdev)
9360 {
9361 struct mgmt_ev_ext_index ev;
9362 struct cmd_lookup match = { NULL, hdev, MGMT_STATUS_INVALID_INDEX };
9363
9364 if (hci_test_quirk(hdev, HCI_QUIRK_RAW_DEVICE))
9365 return;
9366
9367 mgmt_pending_foreach(0, hdev, true, cmd_complete_rsp, &match);
9368
9369 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9370 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0,
9371 HCI_MGMT_UNCONF_INDEX_EVENTS);
9372 ev.type = 0x01;
9373 } else {
9374 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
9375 HCI_MGMT_INDEX_EVENTS);
9376 ev.type = 0x00;
9377 }
9378
9379 ev.bus = hdev->bus;
9380
9381 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
9382 HCI_MGMT_EXT_INDEX_EVENTS);
9383
9384 /* Cancel any remaining timed work */
9385 if (!hci_dev_test_flag(hdev, HCI_MGMT))
9386 return;
9387 cancel_delayed_work_sync(&hdev->discov_off);
9388 cancel_delayed_work_sync(&hdev->service_cache);
9389 cancel_delayed_work_sync(&hdev->rpa_expired);
9390 }
9391
mgmt_power_on(struct hci_dev * hdev,int err)9392 void mgmt_power_on(struct hci_dev *hdev, int err)
9393 {
9394 struct cmd_lookup match = { NULL, hdev };
9395
9396 bt_dev_dbg(hdev, "err %d", err);
9397
9398 hci_dev_lock(hdev);
9399
9400 if (!err) {
9401 restart_le_actions(hdev);
9402 hci_update_passive_scan(hdev);
9403 }
9404
9405 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, true, settings_rsp,
9406 &match);
9407
9408 new_settings(hdev, match.sk);
9409
9410 if (match.sk)
9411 sock_put(match.sk);
9412
9413 hci_dev_unlock(hdev);
9414 }
9415
__mgmt_power_off(struct hci_dev * hdev)9416 void __mgmt_power_off(struct hci_dev *hdev)
9417 {
9418 struct cmd_lookup match = { NULL, hdev };
9419 u8 zero_cod[] = { 0, 0, 0 };
9420
9421 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, true, settings_rsp,
9422 &match);
9423
9424 /* If the power off is because of hdev unregistration let
9425 * use the appropriate INVALID_INDEX status. Otherwise use
9426 * NOT_POWERED. We cover both scenarios here since later in
9427 * mgmt_index_removed() any hci_conn callbacks will have already
9428 * been triggered, potentially causing misleading DISCONNECTED
9429 * status responses.
9430 */
9431 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9432 match.mgmt_status = MGMT_STATUS_INVALID_INDEX;
9433 else
9434 match.mgmt_status = MGMT_STATUS_NOT_POWERED;
9435
9436 mgmt_pending_foreach(0, hdev, true, cmd_complete_rsp, &match);
9437
9438 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9439 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9440 zero_cod, sizeof(zero_cod),
9441 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9442 ext_info_changed(hdev, NULL);
9443 }
9444
9445 new_settings(hdev, match.sk);
9446
9447 if (match.sk)
9448 sock_put(match.sk);
9449 }
9450
mgmt_set_powered_failed(struct hci_dev * hdev,int err)9451 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9452 {
9453 struct mgmt_pending_cmd *cmd;
9454 u8 status;
9455
9456 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9457 if (!cmd)
9458 return;
9459
9460 if (err == -ERFKILL)
9461 status = MGMT_STATUS_RFKILLED;
9462 else
9463 status = MGMT_STATUS_FAILED;
9464
9465 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9466
9467 mgmt_pending_remove(cmd);
9468 }
9469
mgmt_new_link_key(struct hci_dev * hdev,struct link_key * key,bool persistent)9470 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9471 bool persistent)
9472 {
9473 struct mgmt_ev_new_link_key ev;
9474
9475 memset(&ev, 0, sizeof(ev));
9476
9477 ev.store_hint = persistent;
9478 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9479 ev.key.addr.type = BDADDR_BREDR;
9480 ev.key.type = key->type;
9481 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9482 ev.key.pin_len = key->pin_len;
9483
9484 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9485 }
9486
mgmt_ltk_type(struct smp_ltk * ltk)9487 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9488 {
9489 switch (ltk->type) {
9490 case SMP_LTK:
9491 case SMP_LTK_RESPONDER:
9492 if (ltk->authenticated)
9493 return MGMT_LTK_AUTHENTICATED;
9494 return MGMT_LTK_UNAUTHENTICATED;
9495 case SMP_LTK_P256:
9496 if (ltk->authenticated)
9497 return MGMT_LTK_P256_AUTH;
9498 return MGMT_LTK_P256_UNAUTH;
9499 case SMP_LTK_P256_DEBUG:
9500 return MGMT_LTK_P256_DEBUG;
9501 }
9502
9503 return MGMT_LTK_UNAUTHENTICATED;
9504 }
9505
mgmt_new_ltk(struct hci_dev * hdev,struct smp_ltk * key,bool persistent)9506 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9507 {
9508 struct mgmt_ev_new_long_term_key ev;
9509
9510 memset(&ev, 0, sizeof(ev));
9511
9512 /* Devices using resolvable or non-resolvable random addresses
9513 * without providing an identity resolving key don't require
9514 * to store long term keys. Their addresses will change the
9515 * next time around.
9516 *
9517 * Only when a remote device provides an identity address
9518 * make sure the long term key is stored. If the remote
9519 * identity is known, the long term keys are internally
9520 * mapped to the identity address. So allow static random
9521 * and public addresses here.
9522 */
9523 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9524 (key->bdaddr.b[5] & 0xc0) != 0xc0)
9525 ev.store_hint = 0x00;
9526 else
9527 ev.store_hint = persistent;
9528
9529 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9530 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
9531 ev.key.type = mgmt_ltk_type(key);
9532 ev.key.enc_size = key->enc_size;
9533 ev.key.ediv = key->ediv;
9534 ev.key.rand = key->rand;
9535
9536 if (key->type == SMP_LTK)
9537 ev.key.initiator = 1;
9538
9539 /* Make sure we copy only the significant bytes based on the
9540 * encryption key size, and set the rest of the value to zeroes.
9541 */
9542 memcpy(ev.key.val, key->val, key->enc_size);
9543 memset(ev.key.val + key->enc_size, 0,
9544 sizeof(ev.key.val) - key->enc_size);
9545
9546 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9547 }
9548
mgmt_new_irk(struct hci_dev * hdev,struct smp_irk * irk,bool persistent)9549 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9550 {
9551 struct mgmt_ev_new_irk ev;
9552
9553 memset(&ev, 0, sizeof(ev));
9554
9555 ev.store_hint = persistent;
9556
9557 bacpy(&ev.rpa, &irk->rpa);
9558 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9559 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
9560 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9561
9562 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9563 }
9564
mgmt_new_csrk(struct hci_dev * hdev,struct smp_csrk * csrk,bool persistent)9565 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9566 bool persistent)
9567 {
9568 struct mgmt_ev_new_csrk ev;
9569
9570 memset(&ev, 0, sizeof(ev));
9571
9572 /* Devices using resolvable or non-resolvable random addresses
9573 * without providing an identity resolving key don't require
9574 * to store signature resolving keys. Their addresses will change
9575 * the next time around.
9576 *
9577 * Only when a remote device provides an identity address
9578 * make sure the signature resolving key is stored. So allow
9579 * static random and public addresses here.
9580 */
9581 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9582 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9583 ev.store_hint = 0x00;
9584 else
9585 ev.store_hint = persistent;
9586
9587 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9588 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
9589 ev.key.type = csrk->type;
9590 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9591
9592 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9593 }
9594
mgmt_new_conn_param(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u8 store_hint,u16 min_interval,u16 max_interval,u16 latency,u16 timeout)9595 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9596 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9597 u16 max_interval, u16 latency, u16 timeout)
9598 {
9599 struct mgmt_ev_new_conn_param ev;
9600
9601 if (!hci_is_identity_address(bdaddr, bdaddr_type))
9602 return;
9603
9604 memset(&ev, 0, sizeof(ev));
9605 bacpy(&ev.addr.bdaddr, bdaddr);
9606 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9607 ev.store_hint = store_hint;
9608 ev.min_interval = cpu_to_le16(min_interval);
9609 ev.max_interval = cpu_to_le16(max_interval);
9610 ev.latency = cpu_to_le16(latency);
9611 ev.timeout = cpu_to_le16(timeout);
9612
9613 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9614 }
9615
mgmt_device_connected(struct hci_dev * hdev,struct hci_conn * conn,u8 * name,u8 name_len)9616 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9617 u8 *name, u8 name_len)
9618 {
9619 struct sk_buff *skb;
9620 struct mgmt_ev_device_connected *ev;
9621 u16 eir_len = 0;
9622 u32 flags = 0;
9623
9624 if (test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
9625 return;
9626
9627 /* allocate buff for LE or BR/EDR adv */
9628 if (conn->le_adv_data_len > 0)
9629 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9630 sizeof(*ev) + conn->le_adv_data_len);
9631 else
9632 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9633 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9634 eir_precalc_len(sizeof(conn->dev_class)));
9635
9636 if (!skb)
9637 return;
9638
9639 ev = skb_put(skb, sizeof(*ev));
9640 bacpy(&ev->addr.bdaddr, &conn->dst);
9641 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9642
9643 if (conn->out)
9644 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9645
9646 ev->flags = __cpu_to_le32(flags);
9647
9648 /* We must ensure that the EIR Data fields are ordered and
9649 * unique. Keep it simple for now and avoid the problem by not
9650 * adding any BR/EDR data to the LE adv.
9651 */
9652 if (conn->le_adv_data_len > 0) {
9653 skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9654 eir_len = conn->le_adv_data_len;
9655 } else {
9656 if (name)
9657 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9658
9659 if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9660 eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9661 conn->dev_class, sizeof(conn->dev_class));
9662 }
9663
9664 ev->eir_len = cpu_to_le16(eir_len);
9665
9666 mgmt_event_skb(skb, NULL);
9667 }
9668
unpair_device_rsp(struct mgmt_pending_cmd * cmd,void * data)9669 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9670 {
9671 struct hci_dev *hdev = data;
9672 struct mgmt_cp_unpair_device *cp = cmd->param;
9673
9674 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9675
9676 cmd->cmd_complete(cmd, 0);
9677 }
9678
mgmt_powering_down(struct hci_dev * hdev)9679 bool mgmt_powering_down(struct hci_dev *hdev)
9680 {
9681 struct mgmt_pending_cmd *cmd;
9682 struct mgmt_mode *cp;
9683
9684 if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
9685 return true;
9686
9687 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9688 if (!cmd)
9689 return false;
9690
9691 cp = cmd->param;
9692 if (!cp->val)
9693 return true;
9694
9695 return false;
9696 }
9697
mgmt_device_disconnected(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 reason,bool mgmt_connected)9698 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9699 u8 link_type, u8 addr_type, u8 reason,
9700 bool mgmt_connected)
9701 {
9702 struct mgmt_ev_device_disconnected ev;
9703 struct sock *sk = NULL;
9704
9705 if (!mgmt_connected)
9706 return;
9707
9708 if (link_type != ACL_LINK && link_type != LE_LINK)
9709 return;
9710
9711 bacpy(&ev.addr.bdaddr, bdaddr);
9712 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9713 ev.reason = reason;
9714
9715 /* Report disconnects due to suspend */
9716 if (hdev->suspended)
9717 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9718
9719 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9720
9721 if (sk)
9722 sock_put(sk);
9723 }
9724
mgmt_disconnect_failed(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9725 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9726 u8 link_type, u8 addr_type, u8 status)
9727 {
9728 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9729 struct mgmt_cp_disconnect *cp;
9730 struct mgmt_pending_cmd *cmd;
9731
9732 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, true,
9733 unpair_device_rsp, hdev);
9734
9735 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9736 if (!cmd)
9737 return;
9738
9739 cp = cmd->param;
9740
9741 if (bacmp(bdaddr, &cp->addr.bdaddr))
9742 return;
9743
9744 if (cp->addr.type != bdaddr_type)
9745 return;
9746
9747 cmd->cmd_complete(cmd, mgmt_status(status));
9748 mgmt_pending_remove(cmd);
9749 }
9750
mgmt_connect_failed(struct hci_dev * hdev,struct hci_conn * conn,u8 status)9751 void mgmt_connect_failed(struct hci_dev *hdev, struct hci_conn *conn, u8 status)
9752 {
9753 struct mgmt_ev_connect_failed ev;
9754
9755 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
9756 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
9757 conn->dst_type, status, true);
9758 return;
9759 }
9760
9761 bacpy(&ev.addr.bdaddr, &conn->dst);
9762 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9763 ev.status = mgmt_status(status);
9764
9765 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9766 }
9767
mgmt_pin_code_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 secure)9768 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9769 {
9770 struct mgmt_ev_pin_code_request ev;
9771
9772 bacpy(&ev.addr.bdaddr, bdaddr);
9773 ev.addr.type = BDADDR_BREDR;
9774 ev.secure = secure;
9775
9776 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9777 }
9778
mgmt_pin_code_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)9779 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9780 u8 status)
9781 {
9782 struct mgmt_pending_cmd *cmd;
9783
9784 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9785 if (!cmd)
9786 return;
9787
9788 cmd->cmd_complete(cmd, mgmt_status(status));
9789 mgmt_pending_remove(cmd);
9790 }
9791
mgmt_pin_code_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)9792 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9793 u8 status)
9794 {
9795 struct mgmt_pending_cmd *cmd;
9796
9797 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9798 if (!cmd)
9799 return;
9800
9801 cmd->cmd_complete(cmd, mgmt_status(status));
9802 mgmt_pending_remove(cmd);
9803 }
9804
mgmt_user_confirm_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 value,u8 confirm_hint)9805 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9806 u8 link_type, u8 addr_type, u32 value,
9807 u8 confirm_hint)
9808 {
9809 struct mgmt_ev_user_confirm_request ev;
9810
9811 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9812
9813 bacpy(&ev.addr.bdaddr, bdaddr);
9814 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9815 ev.confirm_hint = confirm_hint;
9816 ev.value = cpu_to_le32(value);
9817
9818 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9819 NULL);
9820 }
9821
mgmt_user_passkey_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type)9822 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9823 u8 link_type, u8 addr_type)
9824 {
9825 struct mgmt_ev_user_passkey_request ev;
9826
9827 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9828
9829 bacpy(&ev.addr.bdaddr, bdaddr);
9830 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9831
9832 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9833 NULL);
9834 }
9835
user_pairing_resp_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status,u8 opcode)9836 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9837 u8 link_type, u8 addr_type, u8 status,
9838 u8 opcode)
9839 {
9840 struct mgmt_pending_cmd *cmd;
9841
9842 cmd = pending_find(opcode, hdev);
9843 if (!cmd)
9844 return -ENOENT;
9845
9846 cmd->cmd_complete(cmd, mgmt_status(status));
9847 mgmt_pending_remove(cmd);
9848
9849 return 0;
9850 }
9851
mgmt_user_confirm_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9852 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9853 u8 link_type, u8 addr_type, u8 status)
9854 {
9855 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9856 status, MGMT_OP_USER_CONFIRM_REPLY);
9857 }
9858
mgmt_user_confirm_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9859 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9860 u8 link_type, u8 addr_type, u8 status)
9861 {
9862 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9863 status,
9864 MGMT_OP_USER_CONFIRM_NEG_REPLY);
9865 }
9866
mgmt_user_passkey_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9867 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9868 u8 link_type, u8 addr_type, u8 status)
9869 {
9870 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9871 status, MGMT_OP_USER_PASSKEY_REPLY);
9872 }
9873
mgmt_user_passkey_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9874 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9875 u8 link_type, u8 addr_type, u8 status)
9876 {
9877 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9878 status,
9879 MGMT_OP_USER_PASSKEY_NEG_REPLY);
9880 }
9881
mgmt_user_passkey_notify(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 passkey,u8 entered)9882 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9883 u8 link_type, u8 addr_type, u32 passkey,
9884 u8 entered)
9885 {
9886 struct mgmt_ev_passkey_notify ev;
9887
9888 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9889
9890 bacpy(&ev.addr.bdaddr, bdaddr);
9891 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9892 ev.passkey = __cpu_to_le32(passkey);
9893 ev.entered = entered;
9894
9895 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9896 }
9897
mgmt_auth_failed(struct hci_conn * conn,u8 hci_status)9898 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9899 {
9900 struct mgmt_ev_auth_failed ev;
9901 struct mgmt_pending_cmd *cmd;
9902 u8 status = mgmt_status(hci_status);
9903
9904 bacpy(&ev.addr.bdaddr, &conn->dst);
9905 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9906 ev.status = status;
9907
9908 cmd = find_pairing(conn);
9909
9910 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9911 cmd ? cmd->sk : NULL);
9912
9913 if (cmd) {
9914 cmd->cmd_complete(cmd, status);
9915 mgmt_pending_remove(cmd);
9916 }
9917 }
9918
mgmt_auth_enable_complete(struct hci_dev * hdev,u8 status)9919 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9920 {
9921 struct cmd_lookup match = { NULL, hdev };
9922 bool changed;
9923
9924 if (status) {
9925 u8 mgmt_err = mgmt_status(status);
9926 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, true,
9927 cmd_status_rsp, &mgmt_err);
9928 return;
9929 }
9930
9931 if (test_bit(HCI_AUTH, &hdev->flags))
9932 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9933 else
9934 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9935
9936 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, true,
9937 settings_rsp, &match);
9938
9939 if (changed)
9940 new_settings(hdev, match.sk);
9941
9942 if (match.sk)
9943 sock_put(match.sk);
9944 }
9945
sk_lookup(struct mgmt_pending_cmd * cmd,void * data)9946 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9947 {
9948 struct cmd_lookup *match = data;
9949
9950 if (match->sk == NULL) {
9951 match->sk = cmd->sk;
9952 sock_hold(match->sk);
9953 }
9954 }
9955
mgmt_set_class_of_dev_complete(struct hci_dev * hdev,u8 * dev_class,u8 status)9956 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9957 u8 status)
9958 {
9959 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
9960
9961 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, false, sk_lookup,
9962 &match);
9963 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, false, sk_lookup,
9964 &match);
9965 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, false, sk_lookup,
9966 &match);
9967
9968 if (!status) {
9969 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
9970 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9971 ext_info_changed(hdev, NULL);
9972 }
9973
9974 if (match.sk)
9975 sock_put(match.sk);
9976 }
9977
mgmt_set_local_name_complete(struct hci_dev * hdev,u8 * name,u8 status)9978 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
9979 {
9980 struct mgmt_cp_set_local_name ev;
9981 struct mgmt_pending_cmd *cmd;
9982
9983 if (status)
9984 return;
9985
9986 memset(&ev, 0, sizeof(ev));
9987 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
9988 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
9989
9990 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
9991 if (!cmd) {
9992 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
9993
9994 /* If this is a HCI command related to powering on the
9995 * HCI dev don't send any mgmt signals.
9996 */
9997 if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
9998 return;
9999
10000 if (pending_find(MGMT_OP_SET_POWERED, hdev))
10001 return;
10002 }
10003
10004 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
10005 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
10006 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
10007 }
10008
has_uuid(u8 * uuid,u16 uuid_count,u8 (* uuids)[16])10009 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
10010 {
10011 int i;
10012
10013 for (i = 0; i < uuid_count; i++) {
10014 if (!memcmp(uuid, uuids[i], 16))
10015 return true;
10016 }
10017
10018 return false;
10019 }
10020
eir_has_uuids(u8 * eir,u16 eir_len,u16 uuid_count,u8 (* uuids)[16])10021 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
10022 {
10023 u16 parsed = 0;
10024
10025 while (parsed < eir_len) {
10026 u8 field_len = eir[0];
10027 u8 uuid[16];
10028 int i;
10029
10030 if (field_len == 0)
10031 break;
10032
10033 if (eir_len - parsed < field_len + 1)
10034 break;
10035
10036 switch (eir[1]) {
10037 case EIR_UUID16_ALL:
10038 case EIR_UUID16_SOME:
10039 for (i = 0; i + 3 <= field_len; i += 2) {
10040 memcpy(uuid, bluetooth_base_uuid, 16);
10041 uuid[13] = eir[i + 3];
10042 uuid[12] = eir[i + 2];
10043 if (has_uuid(uuid, uuid_count, uuids))
10044 return true;
10045 }
10046 break;
10047 case EIR_UUID32_ALL:
10048 case EIR_UUID32_SOME:
10049 for (i = 0; i + 5 <= field_len; i += 4) {
10050 memcpy(uuid, bluetooth_base_uuid, 16);
10051 uuid[15] = eir[i + 5];
10052 uuid[14] = eir[i + 4];
10053 uuid[13] = eir[i + 3];
10054 uuid[12] = eir[i + 2];
10055 if (has_uuid(uuid, uuid_count, uuids))
10056 return true;
10057 }
10058 break;
10059 case EIR_UUID128_ALL:
10060 case EIR_UUID128_SOME:
10061 for (i = 0; i + 17 <= field_len; i += 16) {
10062 memcpy(uuid, eir + i + 2, 16);
10063 if (has_uuid(uuid, uuid_count, uuids))
10064 return true;
10065 }
10066 break;
10067 }
10068
10069 parsed += field_len + 1;
10070 eir += field_len + 1;
10071 }
10072
10073 return false;
10074 }
10075
is_filter_match(struct hci_dev * hdev,s8 rssi,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len)10076 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10077 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10078 {
10079 /* If a RSSI threshold has been specified, and
10080 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10081 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10082 * is set, let it through for further processing, as we might need to
10083 * restart the scan.
10084 *
10085 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10086 * the results are also dropped.
10087 */
10088 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10089 (rssi == HCI_RSSI_INVALID ||
10090 (rssi < hdev->discovery.rssi &&
10091 !hci_test_quirk(hdev, HCI_QUIRK_STRICT_DUPLICATE_FILTER))))
10092 return false;
10093
10094 if (hdev->discovery.uuid_count != 0) {
10095 /* If a list of UUIDs is provided in filter, results with no
10096 * matching UUID should be dropped.
10097 */
10098 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10099 hdev->discovery.uuids) &&
10100 !eir_has_uuids(scan_rsp, scan_rsp_len,
10101 hdev->discovery.uuid_count,
10102 hdev->discovery.uuids))
10103 return false;
10104 }
10105
10106 /* If duplicate filtering does not report RSSI changes, then restart
10107 * scanning to ensure updated result with updated RSSI values.
10108 */
10109 if (hci_test_quirk(hdev, HCI_QUIRK_STRICT_DUPLICATE_FILTER)) {
10110 /* Validate RSSI value against the RSSI threshold once more. */
10111 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10112 rssi < hdev->discovery.rssi)
10113 return false;
10114 }
10115
10116 return true;
10117 }
10118
mgmt_adv_monitor_device_lost(struct hci_dev * hdev,u16 handle,bdaddr_t * bdaddr,u8 addr_type)10119 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
10120 bdaddr_t *bdaddr, u8 addr_type)
10121 {
10122 struct mgmt_ev_adv_monitor_device_lost ev;
10123
10124 ev.monitor_handle = cpu_to_le16(handle);
10125 bacpy(&ev.addr.bdaddr, bdaddr);
10126 ev.addr.type = addr_type;
10127
10128 mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
10129 NULL);
10130 }
10131
mgmt_send_adv_monitor_device_found(struct hci_dev * hdev,struct sk_buff * skb,struct sock * skip_sk,u16 handle)10132 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
10133 struct sk_buff *skb,
10134 struct sock *skip_sk,
10135 u16 handle)
10136 {
10137 struct sk_buff *advmon_skb;
10138 size_t advmon_skb_len;
10139 __le16 *monitor_handle;
10140
10141 if (!skb)
10142 return;
10143
10144 advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
10145 sizeof(struct mgmt_ev_device_found)) + skb->len;
10146 advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
10147 advmon_skb_len);
10148 if (!advmon_skb)
10149 return;
10150
10151 /* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10152 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10153 * store monitor_handle of the matched monitor.
10154 */
10155 monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
10156 *monitor_handle = cpu_to_le16(handle);
10157 skb_put_data(advmon_skb, skb->data, skb->len);
10158
10159 mgmt_event_skb(advmon_skb, skip_sk);
10160 }
10161
mgmt_adv_monitor_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,bool report_device,struct sk_buff * skb,struct sock * skip_sk)10162 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
10163 bdaddr_t *bdaddr, bool report_device,
10164 struct sk_buff *skb,
10165 struct sock *skip_sk)
10166 {
10167 struct monitored_device *dev, *tmp;
10168 bool matched = false;
10169 bool notified = false;
10170
10171 /* We have received the Advertisement Report because:
10172 * 1. the kernel has initiated active discovery
10173 * 2. if not, we have pend_le_reports > 0 in which case we are doing
10174 * passive scanning
10175 * 3. if none of the above is true, we have one or more active
10176 * Advertisement Monitor
10177 *
10178 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10179 * and report ONLY one advertisement per device for the matched Monitor
10180 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10181 *
10182 * For case 3, since we are not active scanning and all advertisements
10183 * received are due to a matched Advertisement Monitor, report all
10184 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10185 */
10186 if (report_device && !hdev->advmon_pend_notify) {
10187 mgmt_event_skb(skb, skip_sk);
10188 return;
10189 }
10190
10191 hdev->advmon_pend_notify = false;
10192
10193 list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
10194 if (!bacmp(&dev->bdaddr, bdaddr)) {
10195 matched = true;
10196
10197 if (!dev->notified) {
10198 mgmt_send_adv_monitor_device_found(hdev, skb,
10199 skip_sk,
10200 dev->handle);
10201 notified = true;
10202 dev->notified = true;
10203 }
10204 }
10205
10206 if (!dev->notified)
10207 hdev->advmon_pend_notify = true;
10208 }
10209
10210 if (!report_device &&
10211 ((matched && !notified) || !msft_monitor_supported(hdev))) {
10212 /* Handle 0 indicates that we are not active scanning and this
10213 * is a subsequent advertisement report for an already matched
10214 * Advertisement Monitor or the controller offloading support
10215 * is not available.
10216 */
10217 mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
10218 }
10219
10220 if (report_device)
10221 mgmt_event_skb(skb, skip_sk);
10222 else
10223 kfree_skb(skb);
10224 }
10225
mesh_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len,u64 instant)10226 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
10227 u8 addr_type, s8 rssi, u32 flags, u8 *eir,
10228 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10229 u64 instant)
10230 {
10231 struct sk_buff *skb;
10232 struct mgmt_ev_mesh_device_found *ev;
10233 int i, j;
10234
10235 if (!hdev->mesh_ad_types[0])
10236 goto accepted;
10237
10238 /* Scan for requested AD types */
10239 if (eir_len > 0) {
10240 for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
10241 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10242 if (!hdev->mesh_ad_types[j])
10243 break;
10244
10245 if (hdev->mesh_ad_types[j] == eir[i + 1])
10246 goto accepted;
10247 }
10248 }
10249 }
10250
10251 if (scan_rsp_len > 0) {
10252 for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
10253 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10254 if (!hdev->mesh_ad_types[j])
10255 break;
10256
10257 if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
10258 goto accepted;
10259 }
10260 }
10261 }
10262
10263 return;
10264
10265 accepted:
10266 skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
10267 sizeof(*ev) + eir_len + scan_rsp_len);
10268 if (!skb)
10269 return;
10270
10271 ev = skb_put(skb, sizeof(*ev));
10272
10273 bacpy(&ev->addr.bdaddr, bdaddr);
10274 ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
10275 ev->rssi = rssi;
10276 ev->flags = cpu_to_le32(flags);
10277 ev->instant = cpu_to_le64(instant);
10278
10279 if (eir_len > 0)
10280 /* Copy EIR or advertising data into event */
10281 skb_put_data(skb, eir, eir_len);
10282
10283 if (scan_rsp_len > 0)
10284 /* Append scan response data to event */
10285 skb_put_data(skb, scan_rsp, scan_rsp_len);
10286
10287 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10288
10289 mgmt_event_skb(skb, NULL);
10290 }
10291
mgmt_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 * dev_class,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len,u64 instant)10292 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10293 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10294 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10295 u64 instant)
10296 {
10297 struct sk_buff *skb;
10298 struct mgmt_ev_device_found *ev;
10299 bool report_device = hci_discovery_active(hdev);
10300
10301 if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
10302 mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
10303 eir, eir_len, scan_rsp, scan_rsp_len,
10304 instant);
10305
10306 /* Don't send events for a non-kernel initiated discovery. With
10307 * LE one exception is if we have pend_le_reports > 0 in which
10308 * case we're doing passive scanning and want these events.
10309 */
10310 if (!hci_discovery_active(hdev)) {
10311 if (link_type == ACL_LINK)
10312 return;
10313 if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
10314 report_device = true;
10315 else if (!hci_is_adv_monitoring(hdev))
10316 return;
10317 }
10318
10319 if (hdev->discovery.result_filtering) {
10320 /* We are using service discovery */
10321 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10322 scan_rsp_len))
10323 return;
10324 }
10325
10326 if (hdev->discovery.limited) {
10327 /* Check for limited discoverable bit */
10328 if (dev_class) {
10329 if (!(dev_class[1] & 0x20))
10330 return;
10331 } else {
10332 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10333 if (!flags || !(flags[0] & LE_AD_LIMITED))
10334 return;
10335 }
10336 }
10337
10338 /* Allocate skb. The 5 extra bytes are for the potential CoD field */
10339 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10340 sizeof(*ev) + eir_len + scan_rsp_len + 5);
10341 if (!skb)
10342 return;
10343
10344 ev = skb_put(skb, sizeof(*ev));
10345
10346 /* In case of device discovery with BR/EDR devices (pre 1.2), the
10347 * RSSI value was reported as 0 when not available. This behavior
10348 * is kept when using device discovery. This is required for full
10349 * backwards compatibility with the API.
10350 *
10351 * However when using service discovery, the value 127 will be
10352 * returned when the RSSI is not available.
10353 */
10354 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10355 link_type == ACL_LINK)
10356 rssi = 0;
10357
10358 bacpy(&ev->addr.bdaddr, bdaddr);
10359 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10360 ev->rssi = rssi;
10361 ev->flags = cpu_to_le32(flags);
10362
10363 if (eir_len > 0)
10364 /* Copy EIR or advertising data into event */
10365 skb_put_data(skb, eir, eir_len);
10366
10367 if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
10368 u8 eir_cod[5];
10369
10370 eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
10371 dev_class, 3);
10372 skb_put_data(skb, eir_cod, sizeof(eir_cod));
10373 }
10374
10375 if (scan_rsp_len > 0)
10376 /* Append scan response data to event */
10377 skb_put_data(skb, scan_rsp, scan_rsp_len);
10378
10379 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10380
10381 mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
10382 }
10383
mgmt_remote_name(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,s8 rssi,u8 * name,u8 name_len)10384 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10385 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10386 {
10387 struct sk_buff *skb;
10388 struct mgmt_ev_device_found *ev;
10389 u16 eir_len = 0;
10390 u32 flags = 0;
10391
10392 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10393 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
10394 if (!skb)
10395 return;
10396
10397 ev = skb_put(skb, sizeof(*ev));
10398 bacpy(&ev->addr.bdaddr, bdaddr);
10399 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10400 ev->rssi = rssi;
10401
10402 if (name)
10403 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
10404 else
10405 flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
10406
10407 ev->eir_len = cpu_to_le16(eir_len);
10408 ev->flags = cpu_to_le32(flags);
10409
10410 mgmt_event_skb(skb, NULL);
10411 }
10412
mgmt_discovering(struct hci_dev * hdev,u8 discovering)10413 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10414 {
10415 struct mgmt_ev_discovering ev;
10416
10417 bt_dev_dbg(hdev, "discovering %u", discovering);
10418
10419 memset(&ev, 0, sizeof(ev));
10420 ev.type = hdev->discovery.type;
10421 ev.discovering = discovering;
10422
10423 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10424 }
10425
mgmt_suspending(struct hci_dev * hdev,u8 state)10426 void mgmt_suspending(struct hci_dev *hdev, u8 state)
10427 {
10428 struct mgmt_ev_controller_suspend ev;
10429
10430 ev.suspend_state = state;
10431 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10432 }
10433
mgmt_resuming(struct hci_dev * hdev,u8 reason,bdaddr_t * bdaddr,u8 addr_type)10434 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10435 u8 addr_type)
10436 {
10437 struct mgmt_ev_controller_resume ev;
10438
10439 ev.wake_reason = reason;
10440 if (bdaddr) {
10441 bacpy(&ev.addr.bdaddr, bdaddr);
10442 ev.addr.type = addr_type;
10443 } else {
10444 memset(&ev.addr, 0, sizeof(ev.addr));
10445 }
10446
10447 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
10448 }
10449
10450 static struct hci_mgmt_chan chan = {
10451 .channel = HCI_CHANNEL_CONTROL,
10452 .handler_count = ARRAY_SIZE(mgmt_handlers),
10453 .handlers = mgmt_handlers,
10454 .hdev_init = mgmt_init_hdev,
10455 };
10456
mgmt_init(void)10457 int mgmt_init(void)
10458 {
10459 return hci_mgmt_chan_register(&chan);
10460 }
10461
mgmt_exit(void)10462 void mgmt_exit(void)
10463 {
10464 hci_mgmt_chan_unregister(&chan);
10465 }
10466
mgmt_cleanup(struct sock * sk)10467 void mgmt_cleanup(struct sock *sk)
10468 {
10469 struct mgmt_mesh_tx *mesh_tx;
10470 struct hci_dev *hdev;
10471
10472 read_lock(&hci_dev_list_lock);
10473
10474 list_for_each_entry(hdev, &hci_dev_list, list) {
10475 do {
10476 mesh_tx = mgmt_mesh_next(hdev, sk);
10477
10478 if (mesh_tx)
10479 mesh_send_complete(hdev, mesh_tx, true);
10480 } while (mesh_tx);
10481 }
10482
10483 read_unlock(&hci_dev_list_lock);
10484 }
10485