1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <linux/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35
36 #include "smp.h"
37 #include "mgmt_util.h"
38 #include "mgmt_config.h"
39 #include "msft.h"
40 #include "eir.h"
41 #include "aosp.h"
42
43 #define MGMT_VERSION 1
44 #define MGMT_REVISION 23
45
46 static const u16 mgmt_commands[] = {
47 MGMT_OP_READ_INDEX_LIST,
48 MGMT_OP_READ_INFO,
49 MGMT_OP_SET_POWERED,
50 MGMT_OP_SET_DISCOVERABLE,
51 MGMT_OP_SET_CONNECTABLE,
52 MGMT_OP_SET_FAST_CONNECTABLE,
53 MGMT_OP_SET_BONDABLE,
54 MGMT_OP_SET_LINK_SECURITY,
55 MGMT_OP_SET_SSP,
56 MGMT_OP_SET_HS,
57 MGMT_OP_SET_LE,
58 MGMT_OP_SET_DEV_CLASS,
59 MGMT_OP_SET_LOCAL_NAME,
60 MGMT_OP_ADD_UUID,
61 MGMT_OP_REMOVE_UUID,
62 MGMT_OP_LOAD_LINK_KEYS,
63 MGMT_OP_LOAD_LONG_TERM_KEYS,
64 MGMT_OP_DISCONNECT,
65 MGMT_OP_GET_CONNECTIONS,
66 MGMT_OP_PIN_CODE_REPLY,
67 MGMT_OP_PIN_CODE_NEG_REPLY,
68 MGMT_OP_SET_IO_CAPABILITY,
69 MGMT_OP_PAIR_DEVICE,
70 MGMT_OP_CANCEL_PAIR_DEVICE,
71 MGMT_OP_UNPAIR_DEVICE,
72 MGMT_OP_USER_CONFIRM_REPLY,
73 MGMT_OP_USER_CONFIRM_NEG_REPLY,
74 MGMT_OP_USER_PASSKEY_REPLY,
75 MGMT_OP_USER_PASSKEY_NEG_REPLY,
76 MGMT_OP_READ_LOCAL_OOB_DATA,
77 MGMT_OP_ADD_REMOTE_OOB_DATA,
78 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
79 MGMT_OP_START_DISCOVERY,
80 MGMT_OP_STOP_DISCOVERY,
81 MGMT_OP_CONFIRM_NAME,
82 MGMT_OP_BLOCK_DEVICE,
83 MGMT_OP_UNBLOCK_DEVICE,
84 MGMT_OP_SET_DEVICE_ID,
85 MGMT_OP_SET_ADVERTISING,
86 MGMT_OP_SET_BREDR,
87 MGMT_OP_SET_STATIC_ADDRESS,
88 MGMT_OP_SET_SCAN_PARAMS,
89 MGMT_OP_SET_SECURE_CONN,
90 MGMT_OP_SET_DEBUG_KEYS,
91 MGMT_OP_SET_PRIVACY,
92 MGMT_OP_LOAD_IRKS,
93 MGMT_OP_GET_CONN_INFO,
94 MGMT_OP_GET_CLOCK_INFO,
95 MGMT_OP_ADD_DEVICE,
96 MGMT_OP_REMOVE_DEVICE,
97 MGMT_OP_LOAD_CONN_PARAM,
98 MGMT_OP_READ_UNCONF_INDEX_LIST,
99 MGMT_OP_READ_CONFIG_INFO,
100 MGMT_OP_SET_EXTERNAL_CONFIG,
101 MGMT_OP_SET_PUBLIC_ADDRESS,
102 MGMT_OP_START_SERVICE_DISCOVERY,
103 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
104 MGMT_OP_READ_EXT_INDEX_LIST,
105 MGMT_OP_READ_ADV_FEATURES,
106 MGMT_OP_ADD_ADVERTISING,
107 MGMT_OP_REMOVE_ADVERTISING,
108 MGMT_OP_GET_ADV_SIZE_INFO,
109 MGMT_OP_START_LIMITED_DISCOVERY,
110 MGMT_OP_READ_EXT_INFO,
111 MGMT_OP_SET_APPEARANCE,
112 MGMT_OP_GET_PHY_CONFIGURATION,
113 MGMT_OP_SET_PHY_CONFIGURATION,
114 MGMT_OP_SET_BLOCKED_KEYS,
115 MGMT_OP_SET_WIDEBAND_SPEECH,
116 MGMT_OP_READ_CONTROLLER_CAP,
117 MGMT_OP_READ_EXP_FEATURES_INFO,
118 MGMT_OP_SET_EXP_FEATURE,
119 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
120 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
121 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
122 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
123 MGMT_OP_GET_DEVICE_FLAGS,
124 MGMT_OP_SET_DEVICE_FLAGS,
125 MGMT_OP_READ_ADV_MONITOR_FEATURES,
126 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
127 MGMT_OP_REMOVE_ADV_MONITOR,
128 MGMT_OP_ADD_EXT_ADV_PARAMS,
129 MGMT_OP_ADD_EXT_ADV_DATA,
130 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
131 MGMT_OP_SET_MESH_RECEIVER,
132 MGMT_OP_MESH_READ_FEATURES,
133 MGMT_OP_MESH_SEND,
134 MGMT_OP_MESH_SEND_CANCEL,
135 };
136
137 static const u16 mgmt_events[] = {
138 MGMT_EV_CONTROLLER_ERROR,
139 MGMT_EV_INDEX_ADDED,
140 MGMT_EV_INDEX_REMOVED,
141 MGMT_EV_NEW_SETTINGS,
142 MGMT_EV_CLASS_OF_DEV_CHANGED,
143 MGMT_EV_LOCAL_NAME_CHANGED,
144 MGMT_EV_NEW_LINK_KEY,
145 MGMT_EV_NEW_LONG_TERM_KEY,
146 MGMT_EV_DEVICE_CONNECTED,
147 MGMT_EV_DEVICE_DISCONNECTED,
148 MGMT_EV_CONNECT_FAILED,
149 MGMT_EV_PIN_CODE_REQUEST,
150 MGMT_EV_USER_CONFIRM_REQUEST,
151 MGMT_EV_USER_PASSKEY_REQUEST,
152 MGMT_EV_AUTH_FAILED,
153 MGMT_EV_DEVICE_FOUND,
154 MGMT_EV_DISCOVERING,
155 MGMT_EV_DEVICE_BLOCKED,
156 MGMT_EV_DEVICE_UNBLOCKED,
157 MGMT_EV_DEVICE_UNPAIRED,
158 MGMT_EV_PASSKEY_NOTIFY,
159 MGMT_EV_NEW_IRK,
160 MGMT_EV_NEW_CSRK,
161 MGMT_EV_DEVICE_ADDED,
162 MGMT_EV_DEVICE_REMOVED,
163 MGMT_EV_NEW_CONN_PARAM,
164 MGMT_EV_UNCONF_INDEX_ADDED,
165 MGMT_EV_UNCONF_INDEX_REMOVED,
166 MGMT_EV_NEW_CONFIG_OPTIONS,
167 MGMT_EV_EXT_INDEX_ADDED,
168 MGMT_EV_EXT_INDEX_REMOVED,
169 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
170 MGMT_EV_ADVERTISING_ADDED,
171 MGMT_EV_ADVERTISING_REMOVED,
172 MGMT_EV_EXT_INFO_CHANGED,
173 MGMT_EV_PHY_CONFIGURATION_CHANGED,
174 MGMT_EV_EXP_FEATURE_CHANGED,
175 MGMT_EV_DEVICE_FLAGS_CHANGED,
176 MGMT_EV_ADV_MONITOR_ADDED,
177 MGMT_EV_ADV_MONITOR_REMOVED,
178 MGMT_EV_CONTROLLER_SUSPEND,
179 MGMT_EV_CONTROLLER_RESUME,
180 MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
181 MGMT_EV_ADV_MONITOR_DEVICE_LOST,
182 };
183
184 static const u16 mgmt_untrusted_commands[] = {
185 MGMT_OP_READ_INDEX_LIST,
186 MGMT_OP_READ_INFO,
187 MGMT_OP_READ_UNCONF_INDEX_LIST,
188 MGMT_OP_READ_CONFIG_INFO,
189 MGMT_OP_READ_EXT_INDEX_LIST,
190 MGMT_OP_READ_EXT_INFO,
191 MGMT_OP_READ_CONTROLLER_CAP,
192 MGMT_OP_READ_EXP_FEATURES_INFO,
193 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
194 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
195 };
196
197 static const u16 mgmt_untrusted_events[] = {
198 MGMT_EV_INDEX_ADDED,
199 MGMT_EV_INDEX_REMOVED,
200 MGMT_EV_NEW_SETTINGS,
201 MGMT_EV_CLASS_OF_DEV_CHANGED,
202 MGMT_EV_LOCAL_NAME_CHANGED,
203 MGMT_EV_UNCONF_INDEX_ADDED,
204 MGMT_EV_UNCONF_INDEX_REMOVED,
205 MGMT_EV_NEW_CONFIG_OPTIONS,
206 MGMT_EV_EXT_INDEX_ADDED,
207 MGMT_EV_EXT_INDEX_REMOVED,
208 MGMT_EV_EXT_INFO_CHANGED,
209 MGMT_EV_EXP_FEATURE_CHANGED,
210 };
211
212 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
213
214 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
215 "\x00\x00\x00\x00\x00\x00\x00\x00"
216
217 /* HCI to MGMT error code conversion table */
218 static const u8 mgmt_status_table[] = {
219 MGMT_STATUS_SUCCESS,
220 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
221 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
222 MGMT_STATUS_FAILED, /* Hardware Failure */
223 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
224 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
225 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
226 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
227 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
228 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
229 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
230 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
231 MGMT_STATUS_BUSY, /* Command Disallowed */
232 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
233 MGMT_STATUS_REJECTED, /* Rejected Security */
234 MGMT_STATUS_REJECTED, /* Rejected Personal */
235 MGMT_STATUS_TIMEOUT, /* Host Timeout */
236 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
237 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
238 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
239 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
240 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
241 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
242 MGMT_STATUS_BUSY, /* Repeated Attempts */
243 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
244 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
245 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
246 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
247 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
248 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
249 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
250 MGMT_STATUS_FAILED, /* Unspecified Error */
251 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
252 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
253 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
254 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
255 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
256 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
257 MGMT_STATUS_FAILED, /* Unit Link Key Used */
258 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
259 MGMT_STATUS_TIMEOUT, /* Instant Passed */
260 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
261 MGMT_STATUS_FAILED, /* Transaction Collision */
262 MGMT_STATUS_FAILED, /* Reserved for future use */
263 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
264 MGMT_STATUS_REJECTED, /* QoS Rejected */
265 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
266 MGMT_STATUS_REJECTED, /* Insufficient Security */
267 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
268 MGMT_STATUS_FAILED, /* Reserved for future use */
269 MGMT_STATUS_BUSY, /* Role Switch Pending */
270 MGMT_STATUS_FAILED, /* Reserved for future use */
271 MGMT_STATUS_FAILED, /* Slot Violation */
272 MGMT_STATUS_FAILED, /* Role Switch Failed */
273 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
274 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
275 MGMT_STATUS_BUSY, /* Host Busy Pairing */
276 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
277 MGMT_STATUS_BUSY, /* Controller Busy */
278 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
279 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
280 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
281 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
282 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
283 };
284
mgmt_errno_status(int err)285 static u8 mgmt_errno_status(int err)
286 {
287 switch (err) {
288 case 0:
289 return MGMT_STATUS_SUCCESS;
290 case -EPERM:
291 return MGMT_STATUS_REJECTED;
292 case -EINVAL:
293 return MGMT_STATUS_INVALID_PARAMS;
294 case -EOPNOTSUPP:
295 return MGMT_STATUS_NOT_SUPPORTED;
296 case -EBUSY:
297 return MGMT_STATUS_BUSY;
298 case -ETIMEDOUT:
299 return MGMT_STATUS_AUTH_FAILED;
300 case -ENOMEM:
301 return MGMT_STATUS_NO_RESOURCES;
302 case -EISCONN:
303 return MGMT_STATUS_ALREADY_CONNECTED;
304 case -ENOTCONN:
305 return MGMT_STATUS_DISCONNECTED;
306 }
307
308 return MGMT_STATUS_FAILED;
309 }
310
mgmt_status(int err)311 static u8 mgmt_status(int err)
312 {
313 if (err < 0)
314 return mgmt_errno_status(err);
315
316 if (err < ARRAY_SIZE(mgmt_status_table))
317 return mgmt_status_table[err];
318
319 return MGMT_STATUS_FAILED;
320 }
321
mgmt_index_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag)322 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
323 u16 len, int flag)
324 {
325 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
326 flag, NULL);
327 }
328
mgmt_limited_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag,struct sock * skip_sk)329 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
330 u16 len, int flag, struct sock *skip_sk)
331 {
332 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
333 flag, skip_sk);
334 }
335
mgmt_event(u16 event,struct hci_dev * hdev,void * data,u16 len,struct sock * skip_sk)336 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
337 struct sock *skip_sk)
338 {
339 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
340 HCI_SOCK_TRUSTED, skip_sk);
341 }
342
mgmt_event_skb(struct sk_buff * skb,struct sock * skip_sk)343 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
344 {
345 return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
346 skip_sk);
347 }
348
le_addr_type(u8 mgmt_addr_type)349 static u8 le_addr_type(u8 mgmt_addr_type)
350 {
351 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
352 return ADDR_LE_DEV_PUBLIC;
353 else
354 return ADDR_LE_DEV_RANDOM;
355 }
356
mgmt_fill_version_info(void * ver)357 void mgmt_fill_version_info(void *ver)
358 {
359 struct mgmt_rp_read_version *rp = ver;
360
361 rp->version = MGMT_VERSION;
362 rp->revision = cpu_to_le16(MGMT_REVISION);
363 }
364
read_version(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)365 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
366 u16 data_len)
367 {
368 struct mgmt_rp_read_version rp;
369
370 bt_dev_dbg(hdev, "sock %p", sk);
371
372 mgmt_fill_version_info(&rp);
373
374 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
375 &rp, sizeof(rp));
376 }
377
read_commands(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)378 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
379 u16 data_len)
380 {
381 struct mgmt_rp_read_commands *rp;
382 u16 num_commands, num_events;
383 size_t rp_size;
384 int i, err;
385
386 bt_dev_dbg(hdev, "sock %p", sk);
387
388 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
389 num_commands = ARRAY_SIZE(mgmt_commands);
390 num_events = ARRAY_SIZE(mgmt_events);
391 } else {
392 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
393 num_events = ARRAY_SIZE(mgmt_untrusted_events);
394 }
395
396 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
397
398 rp = kmalloc(rp_size, GFP_KERNEL);
399 if (!rp)
400 return -ENOMEM;
401
402 rp->num_commands = cpu_to_le16(num_commands);
403 rp->num_events = cpu_to_le16(num_events);
404
405 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
406 __le16 *opcode = rp->opcodes;
407
408 for (i = 0; i < num_commands; i++, opcode++)
409 put_unaligned_le16(mgmt_commands[i], opcode);
410
411 for (i = 0; i < num_events; i++, opcode++)
412 put_unaligned_le16(mgmt_events[i], opcode);
413 } else {
414 __le16 *opcode = rp->opcodes;
415
416 for (i = 0; i < num_commands; i++, opcode++)
417 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
418
419 for (i = 0; i < num_events; i++, opcode++)
420 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
421 }
422
423 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
424 rp, rp_size);
425 kfree(rp);
426
427 return err;
428 }
429
read_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)430 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
431 u16 data_len)
432 {
433 struct mgmt_rp_read_index_list *rp;
434 struct hci_dev *d;
435 size_t rp_len;
436 u16 count;
437 int err;
438
439 bt_dev_dbg(hdev, "sock %p", sk);
440
441 read_lock(&hci_dev_list_lock);
442
443 count = 0;
444 list_for_each_entry(d, &hci_dev_list, list) {
445 if (!hci_dev_test_flag(d, HCI_UNCONFIGURED))
446 count++;
447 }
448
449 rp_len = sizeof(*rp) + (2 * count);
450 rp = kmalloc(rp_len, GFP_ATOMIC);
451 if (!rp) {
452 read_unlock(&hci_dev_list_lock);
453 return -ENOMEM;
454 }
455
456 count = 0;
457 list_for_each_entry(d, &hci_dev_list, list) {
458 if (hci_dev_test_flag(d, HCI_SETUP) ||
459 hci_dev_test_flag(d, HCI_CONFIG) ||
460 hci_dev_test_flag(d, HCI_USER_CHANNEL))
461 continue;
462
463 /* Devices marked as raw-only are neither configured
464 * nor unconfigured controllers.
465 */
466 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
467 continue;
468
469 if (!hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
470 rp->index[count++] = cpu_to_le16(d->id);
471 bt_dev_dbg(hdev, "Added hci%u", d->id);
472 }
473 }
474
475 rp->num_controllers = cpu_to_le16(count);
476 rp_len = sizeof(*rp) + (2 * count);
477
478 read_unlock(&hci_dev_list_lock);
479
480 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
481 0, rp, rp_len);
482
483 kfree(rp);
484
485 return err;
486 }
487
read_unconf_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)488 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
489 void *data, u16 data_len)
490 {
491 struct mgmt_rp_read_unconf_index_list *rp;
492 struct hci_dev *d;
493 size_t rp_len;
494 u16 count;
495 int err;
496
497 bt_dev_dbg(hdev, "sock %p", sk);
498
499 read_lock(&hci_dev_list_lock);
500
501 count = 0;
502 list_for_each_entry(d, &hci_dev_list, list) {
503 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
504 count++;
505 }
506
507 rp_len = sizeof(*rp) + (2 * count);
508 rp = kmalloc(rp_len, GFP_ATOMIC);
509 if (!rp) {
510 read_unlock(&hci_dev_list_lock);
511 return -ENOMEM;
512 }
513
514 count = 0;
515 list_for_each_entry(d, &hci_dev_list, list) {
516 if (hci_dev_test_flag(d, HCI_SETUP) ||
517 hci_dev_test_flag(d, HCI_CONFIG) ||
518 hci_dev_test_flag(d, HCI_USER_CHANNEL))
519 continue;
520
521 /* Devices marked as raw-only are neither configured
522 * nor unconfigured controllers.
523 */
524 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
525 continue;
526
527 if (hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
528 rp->index[count++] = cpu_to_le16(d->id);
529 bt_dev_dbg(hdev, "Added hci%u", d->id);
530 }
531 }
532
533 rp->num_controllers = cpu_to_le16(count);
534 rp_len = sizeof(*rp) + (2 * count);
535
536 read_unlock(&hci_dev_list_lock);
537
538 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
539 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
540
541 kfree(rp);
542
543 return err;
544 }
545
read_ext_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)546 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
547 void *data, u16 data_len)
548 {
549 struct mgmt_rp_read_ext_index_list *rp;
550 struct hci_dev *d;
551 u16 count;
552 int err;
553
554 bt_dev_dbg(hdev, "sock %p", sk);
555
556 read_lock(&hci_dev_list_lock);
557
558 count = 0;
559 list_for_each_entry(d, &hci_dev_list, list)
560 count++;
561
562 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
563 if (!rp) {
564 read_unlock(&hci_dev_list_lock);
565 return -ENOMEM;
566 }
567
568 count = 0;
569 list_for_each_entry(d, &hci_dev_list, list) {
570 if (hci_dev_test_flag(d, HCI_SETUP) ||
571 hci_dev_test_flag(d, HCI_CONFIG) ||
572 hci_dev_test_flag(d, HCI_USER_CHANNEL))
573 continue;
574
575 /* Devices marked as raw-only are neither configured
576 * nor unconfigured controllers.
577 */
578 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
579 continue;
580
581 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
582 rp->entry[count].type = 0x01;
583 else
584 rp->entry[count].type = 0x00;
585
586 rp->entry[count].bus = d->bus;
587 rp->entry[count++].index = cpu_to_le16(d->id);
588 bt_dev_dbg(hdev, "Added hci%u", d->id);
589 }
590
591 rp->num_controllers = cpu_to_le16(count);
592
593 read_unlock(&hci_dev_list_lock);
594
595 /* If this command is called at least once, then all the
596 * default index and unconfigured index events are disabled
597 * and from now on only extended index events are used.
598 */
599 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
600 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
601 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
602
603 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
604 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
605 struct_size(rp, entry, count));
606
607 kfree(rp);
608
609 return err;
610 }
611
is_configured(struct hci_dev * hdev)612 static bool is_configured(struct hci_dev *hdev)
613 {
614 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
615 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
616 return false;
617
618 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
619 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
620 !bacmp(&hdev->public_addr, BDADDR_ANY))
621 return false;
622
623 return true;
624 }
625
get_missing_options(struct hci_dev * hdev)626 static __le32 get_missing_options(struct hci_dev *hdev)
627 {
628 u32 options = 0;
629
630 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
631 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
632 options |= MGMT_OPTION_EXTERNAL_CONFIG;
633
634 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
635 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
636 !bacmp(&hdev->public_addr, BDADDR_ANY))
637 options |= MGMT_OPTION_PUBLIC_ADDRESS;
638
639 return cpu_to_le32(options);
640 }
641
new_options(struct hci_dev * hdev,struct sock * skip)642 static int new_options(struct hci_dev *hdev, struct sock *skip)
643 {
644 __le32 options = get_missing_options(hdev);
645
646 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
647 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
648 }
649
send_options_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)650 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
651 {
652 __le32 options = get_missing_options(hdev);
653
654 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
655 sizeof(options));
656 }
657
read_config_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)658 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
659 void *data, u16 data_len)
660 {
661 struct mgmt_rp_read_config_info rp;
662 u32 options = 0;
663
664 bt_dev_dbg(hdev, "sock %p", sk);
665
666 hci_dev_lock(hdev);
667
668 memset(&rp, 0, sizeof(rp));
669 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
670
671 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
672 options |= MGMT_OPTION_EXTERNAL_CONFIG;
673
674 if (hdev->set_bdaddr)
675 options |= MGMT_OPTION_PUBLIC_ADDRESS;
676
677 rp.supported_options = cpu_to_le32(options);
678 rp.missing_options = get_missing_options(hdev);
679
680 hci_dev_unlock(hdev);
681
682 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
683 &rp, sizeof(rp));
684 }
685
get_supported_phys(struct hci_dev * hdev)686 static u32 get_supported_phys(struct hci_dev *hdev)
687 {
688 u32 supported_phys = 0;
689
690 if (lmp_bredr_capable(hdev)) {
691 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
692
693 if (hdev->features[0][0] & LMP_3SLOT)
694 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
695
696 if (hdev->features[0][0] & LMP_5SLOT)
697 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
698
699 if (lmp_edr_2m_capable(hdev)) {
700 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
701
702 if (lmp_edr_3slot_capable(hdev))
703 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
704
705 if (lmp_edr_5slot_capable(hdev))
706 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
707
708 if (lmp_edr_3m_capable(hdev)) {
709 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
710
711 if (lmp_edr_3slot_capable(hdev))
712 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
713
714 if (lmp_edr_5slot_capable(hdev))
715 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
716 }
717 }
718 }
719
720 if (lmp_le_capable(hdev)) {
721 supported_phys |= MGMT_PHY_LE_1M_TX;
722 supported_phys |= MGMT_PHY_LE_1M_RX;
723
724 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
725 supported_phys |= MGMT_PHY_LE_2M_TX;
726 supported_phys |= MGMT_PHY_LE_2M_RX;
727 }
728
729 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
730 supported_phys |= MGMT_PHY_LE_CODED_TX;
731 supported_phys |= MGMT_PHY_LE_CODED_RX;
732 }
733 }
734
735 return supported_phys;
736 }
737
get_selected_phys(struct hci_dev * hdev)738 static u32 get_selected_phys(struct hci_dev *hdev)
739 {
740 u32 selected_phys = 0;
741
742 if (lmp_bredr_capable(hdev)) {
743 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
744
745 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
746 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
747
748 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
749 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
750
751 if (lmp_edr_2m_capable(hdev)) {
752 if (!(hdev->pkt_type & HCI_2DH1))
753 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
754
755 if (lmp_edr_3slot_capable(hdev) &&
756 !(hdev->pkt_type & HCI_2DH3))
757 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
758
759 if (lmp_edr_5slot_capable(hdev) &&
760 !(hdev->pkt_type & HCI_2DH5))
761 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
762
763 if (lmp_edr_3m_capable(hdev)) {
764 if (!(hdev->pkt_type & HCI_3DH1))
765 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
766
767 if (lmp_edr_3slot_capable(hdev) &&
768 !(hdev->pkt_type & HCI_3DH3))
769 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
770
771 if (lmp_edr_5slot_capable(hdev) &&
772 !(hdev->pkt_type & HCI_3DH5))
773 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
774 }
775 }
776 }
777
778 if (lmp_le_capable(hdev)) {
779 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
780 selected_phys |= MGMT_PHY_LE_1M_TX;
781
782 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
783 selected_phys |= MGMT_PHY_LE_1M_RX;
784
785 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
786 selected_phys |= MGMT_PHY_LE_2M_TX;
787
788 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
789 selected_phys |= MGMT_PHY_LE_2M_RX;
790
791 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
792 selected_phys |= MGMT_PHY_LE_CODED_TX;
793
794 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
795 selected_phys |= MGMT_PHY_LE_CODED_RX;
796 }
797
798 return selected_phys;
799 }
800
get_configurable_phys(struct hci_dev * hdev)801 static u32 get_configurable_phys(struct hci_dev *hdev)
802 {
803 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
804 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
805 }
806
get_supported_settings(struct hci_dev * hdev)807 static u32 get_supported_settings(struct hci_dev *hdev)
808 {
809 u32 settings = 0;
810
811 settings |= MGMT_SETTING_POWERED;
812 settings |= MGMT_SETTING_BONDABLE;
813 settings |= MGMT_SETTING_DEBUG_KEYS;
814 settings |= MGMT_SETTING_CONNECTABLE;
815 settings |= MGMT_SETTING_DISCOVERABLE;
816
817 if (lmp_bredr_capable(hdev)) {
818 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
819 settings |= MGMT_SETTING_FAST_CONNECTABLE;
820 settings |= MGMT_SETTING_BREDR;
821 settings |= MGMT_SETTING_LINK_SECURITY;
822
823 if (lmp_ssp_capable(hdev)) {
824 settings |= MGMT_SETTING_SSP;
825 }
826
827 if (lmp_sc_capable(hdev))
828 settings |= MGMT_SETTING_SECURE_CONN;
829
830 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
831 &hdev->quirks))
832 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
833 }
834
835 if (lmp_le_capable(hdev)) {
836 settings |= MGMT_SETTING_LE;
837 settings |= MGMT_SETTING_SECURE_CONN;
838 settings |= MGMT_SETTING_PRIVACY;
839 settings |= MGMT_SETTING_STATIC_ADDRESS;
840 settings |= MGMT_SETTING_ADVERTISING;
841 }
842
843 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
844 hdev->set_bdaddr)
845 settings |= MGMT_SETTING_CONFIGURATION;
846
847 if (cis_central_capable(hdev))
848 settings |= MGMT_SETTING_CIS_CENTRAL;
849
850 if (cis_peripheral_capable(hdev))
851 settings |= MGMT_SETTING_CIS_PERIPHERAL;
852
853 settings |= MGMT_SETTING_PHY_CONFIGURATION;
854
855 return settings;
856 }
857
get_current_settings(struct hci_dev * hdev)858 static u32 get_current_settings(struct hci_dev *hdev)
859 {
860 u32 settings = 0;
861
862 if (hdev_is_powered(hdev))
863 settings |= MGMT_SETTING_POWERED;
864
865 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
866 settings |= MGMT_SETTING_CONNECTABLE;
867
868 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
869 settings |= MGMT_SETTING_FAST_CONNECTABLE;
870
871 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
872 settings |= MGMT_SETTING_DISCOVERABLE;
873
874 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
875 settings |= MGMT_SETTING_BONDABLE;
876
877 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
878 settings |= MGMT_SETTING_BREDR;
879
880 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
881 settings |= MGMT_SETTING_LE;
882
883 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
884 settings |= MGMT_SETTING_LINK_SECURITY;
885
886 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
887 settings |= MGMT_SETTING_SSP;
888
889 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
890 settings |= MGMT_SETTING_ADVERTISING;
891
892 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
893 settings |= MGMT_SETTING_SECURE_CONN;
894
895 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
896 settings |= MGMT_SETTING_DEBUG_KEYS;
897
898 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
899 settings |= MGMT_SETTING_PRIVACY;
900
901 /* The current setting for static address has two purposes. The
902 * first is to indicate if the static address will be used and
903 * the second is to indicate if it is actually set.
904 *
905 * This means if the static address is not configured, this flag
906 * will never be set. If the address is configured, then if the
907 * address is actually used decides if the flag is set or not.
908 *
909 * For single mode LE only controllers and dual-mode controllers
910 * with BR/EDR disabled, the existence of the static address will
911 * be evaluated.
912 */
913 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
914 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
915 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
916 if (bacmp(&hdev->static_addr, BDADDR_ANY))
917 settings |= MGMT_SETTING_STATIC_ADDRESS;
918 }
919
920 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
921 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
922
923 if (cis_central_capable(hdev))
924 settings |= MGMT_SETTING_CIS_CENTRAL;
925
926 if (cis_peripheral_capable(hdev))
927 settings |= MGMT_SETTING_CIS_PERIPHERAL;
928
929 if (bis_capable(hdev))
930 settings |= MGMT_SETTING_ISO_BROADCASTER;
931
932 if (sync_recv_capable(hdev))
933 settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
934
935 return settings;
936 }
937
pending_find(u16 opcode,struct hci_dev * hdev)938 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
939 {
940 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
941 }
942
mgmt_get_adv_discov_flags(struct hci_dev * hdev)943 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
944 {
945 struct mgmt_pending_cmd *cmd;
946
947 /* If there's a pending mgmt command the flags will not yet have
948 * their final values, so check for this first.
949 */
950 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
951 if (cmd) {
952 struct mgmt_mode *cp = cmd->param;
953 if (cp->val == 0x01)
954 return LE_AD_GENERAL;
955 else if (cp->val == 0x02)
956 return LE_AD_LIMITED;
957 } else {
958 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
959 return LE_AD_LIMITED;
960 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
961 return LE_AD_GENERAL;
962 }
963
964 return 0;
965 }
966
mgmt_get_connectable(struct hci_dev * hdev)967 bool mgmt_get_connectable(struct hci_dev *hdev)
968 {
969 struct mgmt_pending_cmd *cmd;
970
971 /* If there's a pending mgmt command the flag will not yet have
972 * it's final value, so check for this first.
973 */
974 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
975 if (cmd) {
976 struct mgmt_mode *cp = cmd->param;
977
978 return cp->val;
979 }
980
981 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
982 }
983
service_cache_sync(struct hci_dev * hdev,void * data)984 static int service_cache_sync(struct hci_dev *hdev, void *data)
985 {
986 hci_update_eir_sync(hdev);
987 hci_update_class_sync(hdev);
988
989 return 0;
990 }
991
service_cache_off(struct work_struct * work)992 static void service_cache_off(struct work_struct *work)
993 {
994 struct hci_dev *hdev = container_of(work, struct hci_dev,
995 service_cache.work);
996
997 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
998 return;
999
1000 hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1001 }
1002
rpa_expired_sync(struct hci_dev * hdev,void * data)1003 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1004 {
1005 /* The generation of a new RPA and programming it into the
1006 * controller happens in the hci_req_enable_advertising()
1007 * function.
1008 */
1009 if (ext_adv_capable(hdev))
1010 return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1011 else
1012 return hci_enable_advertising_sync(hdev);
1013 }
1014
rpa_expired(struct work_struct * work)1015 static void rpa_expired(struct work_struct *work)
1016 {
1017 struct hci_dev *hdev = container_of(work, struct hci_dev,
1018 rpa_expired.work);
1019
1020 bt_dev_dbg(hdev, "");
1021
1022 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1023
1024 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1025 return;
1026
1027 hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1028 }
1029
1030 static int set_discoverable_sync(struct hci_dev *hdev, void *data);
1031
discov_off(struct work_struct * work)1032 static void discov_off(struct work_struct *work)
1033 {
1034 struct hci_dev *hdev = container_of(work, struct hci_dev,
1035 discov_off.work);
1036
1037 bt_dev_dbg(hdev, "");
1038
1039 hci_dev_lock(hdev);
1040
1041 /* When discoverable timeout triggers, then just make sure
1042 * the limited discoverable flag is cleared. Even in the case
1043 * of a timeout triggered from general discoverable, it is
1044 * safe to unconditionally clear the flag.
1045 */
1046 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1047 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1048 hdev->discov_timeout = 0;
1049
1050 hci_cmd_sync_queue(hdev, set_discoverable_sync, NULL, NULL);
1051
1052 mgmt_new_settings(hdev);
1053
1054 hci_dev_unlock(hdev);
1055 }
1056
1057 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1058
mesh_send_complete(struct hci_dev * hdev,struct mgmt_mesh_tx * mesh_tx,bool silent)1059 static void mesh_send_complete(struct hci_dev *hdev,
1060 struct mgmt_mesh_tx *mesh_tx, bool silent)
1061 {
1062 u8 handle = mesh_tx->handle;
1063
1064 if (!silent)
1065 mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1066 sizeof(handle), NULL);
1067
1068 mgmt_mesh_remove(mesh_tx);
1069 }
1070
mesh_send_done_sync(struct hci_dev * hdev,void * data)1071 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1072 {
1073 struct mgmt_mesh_tx *mesh_tx;
1074
1075 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1076 hci_disable_advertising_sync(hdev);
1077 mesh_tx = mgmt_mesh_next(hdev, NULL);
1078
1079 if (mesh_tx)
1080 mesh_send_complete(hdev, mesh_tx, false);
1081
1082 return 0;
1083 }
1084
1085 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1086 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
mesh_next(struct hci_dev * hdev,void * data,int err)1087 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1088 {
1089 struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1090
1091 if (!mesh_tx)
1092 return;
1093
1094 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1095 mesh_send_start_complete);
1096
1097 if (err < 0)
1098 mesh_send_complete(hdev, mesh_tx, false);
1099 else
1100 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1101 }
1102
mesh_send_done(struct work_struct * work)1103 static void mesh_send_done(struct work_struct *work)
1104 {
1105 struct hci_dev *hdev = container_of(work, struct hci_dev,
1106 mesh_send_done.work);
1107
1108 if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1109 return;
1110
1111 hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1112 }
1113
mgmt_init_hdev(struct sock * sk,struct hci_dev * hdev)1114 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1115 {
1116 if (hci_dev_test_flag(hdev, HCI_MGMT))
1117 return;
1118
1119 BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1120
1121 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1122 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1123 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1124 INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1125
1126 /* Non-mgmt controlled devices get this bit set
1127 * implicitly so that pairing works for them, however
1128 * for mgmt we require user-space to explicitly enable
1129 * it
1130 */
1131 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1132
1133 hci_dev_set_flag(hdev, HCI_MGMT);
1134 }
1135
read_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1136 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1137 void *data, u16 data_len)
1138 {
1139 struct mgmt_rp_read_info rp;
1140
1141 bt_dev_dbg(hdev, "sock %p", sk);
1142
1143 hci_dev_lock(hdev);
1144
1145 memset(&rp, 0, sizeof(rp));
1146
1147 bacpy(&rp.bdaddr, &hdev->bdaddr);
1148
1149 rp.version = hdev->hci_ver;
1150 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1151
1152 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1153 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1154
1155 memcpy(rp.dev_class, hdev->dev_class, 3);
1156
1157 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1158 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1159
1160 hci_dev_unlock(hdev);
1161
1162 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1163 sizeof(rp));
1164 }
1165
append_eir_data_to_buf(struct hci_dev * hdev,u8 * eir)1166 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1167 {
1168 u16 eir_len = 0;
1169 size_t name_len;
1170
1171 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1172 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1173 hdev->dev_class, 3);
1174
1175 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1176 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1177 hdev->appearance);
1178
1179 name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1180 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1181 hdev->dev_name, name_len);
1182
1183 name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1184 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1185 hdev->short_name, name_len);
1186
1187 return eir_len;
1188 }
1189
read_ext_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1190 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1191 void *data, u16 data_len)
1192 {
1193 char buf[512];
1194 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1195 u16 eir_len;
1196
1197 bt_dev_dbg(hdev, "sock %p", sk);
1198
1199 memset(&buf, 0, sizeof(buf));
1200
1201 hci_dev_lock(hdev);
1202
1203 bacpy(&rp->bdaddr, &hdev->bdaddr);
1204
1205 rp->version = hdev->hci_ver;
1206 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1207
1208 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1209 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1210
1211
1212 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1213 rp->eir_len = cpu_to_le16(eir_len);
1214
1215 hci_dev_unlock(hdev);
1216
1217 /* If this command is called at least once, then the events
1218 * for class of device and local name changes are disabled
1219 * and only the new extended controller information event
1220 * is used.
1221 */
1222 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1223 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1224 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1225
1226 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1227 sizeof(*rp) + eir_len);
1228 }
1229
ext_info_changed(struct hci_dev * hdev,struct sock * skip)1230 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1231 {
1232 char buf[512];
1233 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1234 u16 eir_len;
1235
1236 memset(buf, 0, sizeof(buf));
1237
1238 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1239 ev->eir_len = cpu_to_le16(eir_len);
1240
1241 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1242 sizeof(*ev) + eir_len,
1243 HCI_MGMT_EXT_INFO_EVENTS, skip);
1244 }
1245
send_settings_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)1246 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1247 {
1248 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1249
1250 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1251 sizeof(settings));
1252 }
1253
mgmt_advertising_added(struct sock * sk,struct hci_dev * hdev,u8 instance)1254 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1255 {
1256 struct mgmt_ev_advertising_added ev;
1257
1258 ev.instance = instance;
1259
1260 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1261 }
1262
mgmt_advertising_removed(struct sock * sk,struct hci_dev * hdev,u8 instance)1263 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1264 u8 instance)
1265 {
1266 struct mgmt_ev_advertising_removed ev;
1267
1268 ev.instance = instance;
1269
1270 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1271 }
1272
cancel_adv_timeout(struct hci_dev * hdev)1273 static void cancel_adv_timeout(struct hci_dev *hdev)
1274 {
1275 if (hdev->adv_instance_timeout) {
1276 hdev->adv_instance_timeout = 0;
1277 cancel_delayed_work(&hdev->adv_instance_expire);
1278 }
1279 }
1280
1281 /* This function requires the caller holds hdev->lock */
restart_le_actions(struct hci_dev * hdev)1282 static void restart_le_actions(struct hci_dev *hdev)
1283 {
1284 struct hci_conn_params *p;
1285
1286 list_for_each_entry(p, &hdev->le_conn_params, list) {
1287 /* Needed for AUTO_OFF case where might not "really"
1288 * have been powered off.
1289 */
1290 hci_pend_le_list_del_init(p);
1291
1292 switch (p->auto_connect) {
1293 case HCI_AUTO_CONN_DIRECT:
1294 case HCI_AUTO_CONN_ALWAYS:
1295 hci_pend_le_list_add(p, &hdev->pend_le_conns);
1296 break;
1297 case HCI_AUTO_CONN_REPORT:
1298 hci_pend_le_list_add(p, &hdev->pend_le_reports);
1299 break;
1300 default:
1301 break;
1302 }
1303 }
1304 }
1305
new_settings(struct hci_dev * hdev,struct sock * skip)1306 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1307 {
1308 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1309
1310 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1311 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1312 }
1313
mgmt_set_powered_complete(struct hci_dev * hdev,void * data,int err)1314 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1315 {
1316 struct mgmt_pending_cmd *cmd = data;
1317 struct mgmt_mode *cp;
1318
1319 /* Make sure cmd still outstanding. */
1320 if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1321 return;
1322
1323 cp = cmd->param;
1324
1325 bt_dev_dbg(hdev, "err %d", err);
1326
1327 if (!err) {
1328 if (cp->val) {
1329 hci_dev_lock(hdev);
1330 restart_le_actions(hdev);
1331 hci_update_passive_scan(hdev);
1332 hci_dev_unlock(hdev);
1333 }
1334
1335 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1336
1337 /* Only call new_setting for power on as power off is deferred
1338 * to hdev->power_off work which does call hci_dev_do_close.
1339 */
1340 if (cp->val)
1341 new_settings(hdev, cmd->sk);
1342 } else {
1343 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1344 mgmt_status(err));
1345 }
1346
1347 mgmt_pending_remove(cmd);
1348 }
1349
set_powered_sync(struct hci_dev * hdev,void * data)1350 static int set_powered_sync(struct hci_dev *hdev, void *data)
1351 {
1352 struct mgmt_pending_cmd *cmd = data;
1353 struct mgmt_mode *cp = cmd->param;
1354
1355 BT_DBG("%s", hdev->name);
1356
1357 return hci_set_powered_sync(hdev, cp->val);
1358 }
1359
set_powered(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1360 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1361 u16 len)
1362 {
1363 struct mgmt_mode *cp = data;
1364 struct mgmt_pending_cmd *cmd;
1365 int err;
1366
1367 bt_dev_dbg(hdev, "sock %p", sk);
1368
1369 if (cp->val != 0x00 && cp->val != 0x01)
1370 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1371 MGMT_STATUS_INVALID_PARAMS);
1372
1373 hci_dev_lock(hdev);
1374
1375 if (!cp->val) {
1376 if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN)) {
1377 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1378 MGMT_STATUS_BUSY);
1379 goto failed;
1380 }
1381 }
1382
1383 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1384 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1385 MGMT_STATUS_BUSY);
1386 goto failed;
1387 }
1388
1389 if (!!cp->val == hdev_is_powered(hdev)) {
1390 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1391 goto failed;
1392 }
1393
1394 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1395 if (!cmd) {
1396 err = -ENOMEM;
1397 goto failed;
1398 }
1399
1400 /* Cancel potentially blocking sync operation before power off */
1401 if (cp->val == 0x00) {
1402 hci_cmd_sync_cancel_sync(hdev, -EHOSTDOWN);
1403 err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1404 mgmt_set_powered_complete);
1405 } else {
1406 /* Use hci_cmd_sync_submit since hdev might not be running */
1407 err = hci_cmd_sync_submit(hdev, set_powered_sync, cmd,
1408 mgmt_set_powered_complete);
1409 }
1410
1411 if (err < 0)
1412 mgmt_pending_remove(cmd);
1413
1414 failed:
1415 hci_dev_unlock(hdev);
1416 return err;
1417 }
1418
mgmt_new_settings(struct hci_dev * hdev)1419 int mgmt_new_settings(struct hci_dev *hdev)
1420 {
1421 return new_settings(hdev, NULL);
1422 }
1423
1424 struct cmd_lookup {
1425 struct sock *sk;
1426 struct hci_dev *hdev;
1427 u8 mgmt_status;
1428 };
1429
settings_rsp(struct mgmt_pending_cmd * cmd,void * data)1430 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1431 {
1432 struct cmd_lookup *match = data;
1433
1434 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1435
1436 list_del(&cmd->list);
1437
1438 if (match->sk == NULL) {
1439 match->sk = cmd->sk;
1440 sock_hold(match->sk);
1441 }
1442
1443 mgmt_pending_free(cmd);
1444 }
1445
cmd_status_rsp(struct mgmt_pending_cmd * cmd,void * data)1446 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1447 {
1448 u8 *status = data;
1449
1450 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1451 mgmt_pending_remove(cmd);
1452 }
1453
cmd_complete_rsp(struct mgmt_pending_cmd * cmd,void * data)1454 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1455 {
1456 struct cmd_lookup *match = data;
1457
1458 /* dequeue cmd_sync entries using cmd as data as that is about to be
1459 * removed/freed.
1460 */
1461 hci_cmd_sync_dequeue(match->hdev, NULL, cmd, NULL);
1462
1463 if (cmd->cmd_complete) {
1464 cmd->cmd_complete(cmd, match->mgmt_status);
1465 mgmt_pending_remove(cmd);
1466
1467 return;
1468 }
1469
1470 cmd_status_rsp(cmd, data);
1471 }
1472
generic_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1473 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1474 {
1475 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1476 cmd->param, cmd->param_len);
1477 }
1478
addr_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1479 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1480 {
1481 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1482 cmd->param, sizeof(struct mgmt_addr_info));
1483 }
1484
mgmt_bredr_support(struct hci_dev * hdev)1485 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1486 {
1487 if (!lmp_bredr_capable(hdev))
1488 return MGMT_STATUS_NOT_SUPPORTED;
1489 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1490 return MGMT_STATUS_REJECTED;
1491 else
1492 return MGMT_STATUS_SUCCESS;
1493 }
1494
mgmt_le_support(struct hci_dev * hdev)1495 static u8 mgmt_le_support(struct hci_dev *hdev)
1496 {
1497 if (!lmp_le_capable(hdev))
1498 return MGMT_STATUS_NOT_SUPPORTED;
1499 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1500 return MGMT_STATUS_REJECTED;
1501 else
1502 return MGMT_STATUS_SUCCESS;
1503 }
1504
mgmt_set_discoverable_complete(struct hci_dev * hdev,void * data,int err)1505 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1506 int err)
1507 {
1508 struct mgmt_pending_cmd *cmd = data;
1509
1510 bt_dev_dbg(hdev, "err %d", err);
1511
1512 /* Make sure cmd still outstanding. */
1513 if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1514 return;
1515
1516 hci_dev_lock(hdev);
1517
1518 if (err) {
1519 u8 mgmt_err = mgmt_status(err);
1520 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1521 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1522 goto done;
1523 }
1524
1525 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1526 hdev->discov_timeout > 0) {
1527 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1528 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1529 }
1530
1531 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1532 new_settings(hdev, cmd->sk);
1533
1534 done:
1535 mgmt_pending_remove(cmd);
1536 hci_dev_unlock(hdev);
1537 }
1538
set_discoverable_sync(struct hci_dev * hdev,void * data)1539 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1540 {
1541 BT_DBG("%s", hdev->name);
1542
1543 return hci_update_discoverable_sync(hdev);
1544 }
1545
set_discoverable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1546 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1547 u16 len)
1548 {
1549 struct mgmt_cp_set_discoverable *cp = data;
1550 struct mgmt_pending_cmd *cmd;
1551 u16 timeout;
1552 int err;
1553
1554 bt_dev_dbg(hdev, "sock %p", sk);
1555
1556 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1557 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1558 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1559 MGMT_STATUS_REJECTED);
1560
1561 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1562 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1563 MGMT_STATUS_INVALID_PARAMS);
1564
1565 timeout = __le16_to_cpu(cp->timeout);
1566
1567 /* Disabling discoverable requires that no timeout is set,
1568 * and enabling limited discoverable requires a timeout.
1569 */
1570 if ((cp->val == 0x00 && timeout > 0) ||
1571 (cp->val == 0x02 && timeout == 0))
1572 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1573 MGMT_STATUS_INVALID_PARAMS);
1574
1575 hci_dev_lock(hdev);
1576
1577 if (!hdev_is_powered(hdev) && timeout > 0) {
1578 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1579 MGMT_STATUS_NOT_POWERED);
1580 goto failed;
1581 }
1582
1583 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1584 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1585 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1586 MGMT_STATUS_BUSY);
1587 goto failed;
1588 }
1589
1590 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1591 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1592 MGMT_STATUS_REJECTED);
1593 goto failed;
1594 }
1595
1596 if (hdev->advertising_paused) {
1597 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1598 MGMT_STATUS_BUSY);
1599 goto failed;
1600 }
1601
1602 if (!hdev_is_powered(hdev)) {
1603 bool changed = false;
1604
1605 /* Setting limited discoverable when powered off is
1606 * not a valid operation since it requires a timeout
1607 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1608 */
1609 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1610 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1611 changed = true;
1612 }
1613
1614 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1615 if (err < 0)
1616 goto failed;
1617
1618 if (changed)
1619 err = new_settings(hdev, sk);
1620
1621 goto failed;
1622 }
1623
1624 /* If the current mode is the same, then just update the timeout
1625 * value with the new value. And if only the timeout gets updated,
1626 * then no need for any HCI transactions.
1627 */
1628 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1629 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1630 HCI_LIMITED_DISCOVERABLE)) {
1631 cancel_delayed_work(&hdev->discov_off);
1632 hdev->discov_timeout = timeout;
1633
1634 if (cp->val && hdev->discov_timeout > 0) {
1635 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1636 queue_delayed_work(hdev->req_workqueue,
1637 &hdev->discov_off, to);
1638 }
1639
1640 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1641 goto failed;
1642 }
1643
1644 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1645 if (!cmd) {
1646 err = -ENOMEM;
1647 goto failed;
1648 }
1649
1650 /* Cancel any potential discoverable timeout that might be
1651 * still active and store new timeout value. The arming of
1652 * the timeout happens in the complete handler.
1653 */
1654 cancel_delayed_work(&hdev->discov_off);
1655 hdev->discov_timeout = timeout;
1656
1657 if (cp->val)
1658 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1659 else
1660 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1661
1662 /* Limited discoverable mode */
1663 if (cp->val == 0x02)
1664 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1665 else
1666 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1667
1668 err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1669 mgmt_set_discoverable_complete);
1670
1671 if (err < 0)
1672 mgmt_pending_remove(cmd);
1673
1674 failed:
1675 hci_dev_unlock(hdev);
1676 return err;
1677 }
1678
mgmt_set_connectable_complete(struct hci_dev * hdev,void * data,int err)1679 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1680 int err)
1681 {
1682 struct mgmt_pending_cmd *cmd = data;
1683
1684 bt_dev_dbg(hdev, "err %d", err);
1685
1686 /* Make sure cmd still outstanding. */
1687 if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1688 return;
1689
1690 hci_dev_lock(hdev);
1691
1692 if (err) {
1693 u8 mgmt_err = mgmt_status(err);
1694 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1695 goto done;
1696 }
1697
1698 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1699 new_settings(hdev, cmd->sk);
1700
1701 done:
1702 mgmt_pending_remove(cmd);
1703
1704 hci_dev_unlock(hdev);
1705 }
1706
set_connectable_update_settings(struct hci_dev * hdev,struct sock * sk,u8 val)1707 static int set_connectable_update_settings(struct hci_dev *hdev,
1708 struct sock *sk, u8 val)
1709 {
1710 bool changed = false;
1711 int err;
1712
1713 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1714 changed = true;
1715
1716 if (val) {
1717 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1718 } else {
1719 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1720 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1721 }
1722
1723 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1724 if (err < 0)
1725 return err;
1726
1727 if (changed) {
1728 hci_update_scan(hdev);
1729 hci_update_passive_scan(hdev);
1730 return new_settings(hdev, sk);
1731 }
1732
1733 return 0;
1734 }
1735
set_connectable_sync(struct hci_dev * hdev,void * data)1736 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1737 {
1738 BT_DBG("%s", hdev->name);
1739
1740 return hci_update_connectable_sync(hdev);
1741 }
1742
set_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1743 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1744 u16 len)
1745 {
1746 struct mgmt_mode *cp = data;
1747 struct mgmt_pending_cmd *cmd;
1748 int err;
1749
1750 bt_dev_dbg(hdev, "sock %p", sk);
1751
1752 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1753 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1754 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1755 MGMT_STATUS_REJECTED);
1756
1757 if (cp->val != 0x00 && cp->val != 0x01)
1758 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1759 MGMT_STATUS_INVALID_PARAMS);
1760
1761 hci_dev_lock(hdev);
1762
1763 if (!hdev_is_powered(hdev)) {
1764 err = set_connectable_update_settings(hdev, sk, cp->val);
1765 goto failed;
1766 }
1767
1768 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1769 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1770 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1771 MGMT_STATUS_BUSY);
1772 goto failed;
1773 }
1774
1775 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1776 if (!cmd) {
1777 err = -ENOMEM;
1778 goto failed;
1779 }
1780
1781 if (cp->val) {
1782 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1783 } else {
1784 if (hdev->discov_timeout > 0)
1785 cancel_delayed_work(&hdev->discov_off);
1786
1787 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1788 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1789 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1790 }
1791
1792 err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1793 mgmt_set_connectable_complete);
1794
1795 if (err < 0)
1796 mgmt_pending_remove(cmd);
1797
1798 failed:
1799 hci_dev_unlock(hdev);
1800 return err;
1801 }
1802
set_bondable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1803 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1804 u16 len)
1805 {
1806 struct mgmt_mode *cp = data;
1807 bool changed;
1808 int err;
1809
1810 bt_dev_dbg(hdev, "sock %p", sk);
1811
1812 if (cp->val != 0x00 && cp->val != 0x01)
1813 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1814 MGMT_STATUS_INVALID_PARAMS);
1815
1816 hci_dev_lock(hdev);
1817
1818 if (cp->val)
1819 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1820 else
1821 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1822
1823 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1824 if (err < 0)
1825 goto unlock;
1826
1827 if (changed) {
1828 /* In limited privacy mode the change of bondable mode
1829 * may affect the local advertising address.
1830 */
1831 hci_update_discoverable(hdev);
1832
1833 err = new_settings(hdev, sk);
1834 }
1835
1836 unlock:
1837 hci_dev_unlock(hdev);
1838 return err;
1839 }
1840
set_link_security(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1841 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1842 u16 len)
1843 {
1844 struct mgmt_mode *cp = data;
1845 struct mgmt_pending_cmd *cmd;
1846 u8 val, status;
1847 int err;
1848
1849 bt_dev_dbg(hdev, "sock %p", sk);
1850
1851 status = mgmt_bredr_support(hdev);
1852 if (status)
1853 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1854 status);
1855
1856 if (cp->val != 0x00 && cp->val != 0x01)
1857 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1858 MGMT_STATUS_INVALID_PARAMS);
1859
1860 hci_dev_lock(hdev);
1861
1862 if (!hdev_is_powered(hdev)) {
1863 bool changed = false;
1864
1865 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1866 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1867 changed = true;
1868 }
1869
1870 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1871 if (err < 0)
1872 goto failed;
1873
1874 if (changed)
1875 err = new_settings(hdev, sk);
1876
1877 goto failed;
1878 }
1879
1880 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1881 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1882 MGMT_STATUS_BUSY);
1883 goto failed;
1884 }
1885
1886 val = !!cp->val;
1887
1888 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1889 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1890 goto failed;
1891 }
1892
1893 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1894 if (!cmd) {
1895 err = -ENOMEM;
1896 goto failed;
1897 }
1898
1899 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1900 if (err < 0) {
1901 mgmt_pending_remove(cmd);
1902 goto failed;
1903 }
1904
1905 failed:
1906 hci_dev_unlock(hdev);
1907 return err;
1908 }
1909
set_ssp_complete(struct hci_dev * hdev,void * data,int err)1910 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1911 {
1912 struct cmd_lookup match = { NULL, hdev };
1913 struct mgmt_pending_cmd *cmd = data;
1914 struct mgmt_mode *cp = cmd->param;
1915 u8 enable = cp->val;
1916 bool changed;
1917
1918 /* Make sure cmd still outstanding. */
1919 if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1920 return;
1921
1922 if (err) {
1923 u8 mgmt_err = mgmt_status(err);
1924
1925 if (enable && hci_dev_test_and_clear_flag(hdev,
1926 HCI_SSP_ENABLED)) {
1927 new_settings(hdev, NULL);
1928 }
1929
1930 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1931 &mgmt_err);
1932 return;
1933 }
1934
1935 if (enable) {
1936 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1937 } else {
1938 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1939 }
1940
1941 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1942
1943 if (changed)
1944 new_settings(hdev, match.sk);
1945
1946 if (match.sk)
1947 sock_put(match.sk);
1948
1949 hci_update_eir_sync(hdev);
1950 }
1951
set_ssp_sync(struct hci_dev * hdev,void * data)1952 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1953 {
1954 struct mgmt_pending_cmd *cmd = data;
1955 struct mgmt_mode *cp = cmd->param;
1956 bool changed = false;
1957 int err;
1958
1959 if (cp->val)
1960 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1961
1962 err = hci_write_ssp_mode_sync(hdev, cp->val);
1963
1964 if (!err && changed)
1965 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1966
1967 return err;
1968 }
1969
set_ssp(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1970 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1971 {
1972 struct mgmt_mode *cp = data;
1973 struct mgmt_pending_cmd *cmd;
1974 u8 status;
1975 int err;
1976
1977 bt_dev_dbg(hdev, "sock %p", sk);
1978
1979 status = mgmt_bredr_support(hdev);
1980 if (status)
1981 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1982
1983 if (!lmp_ssp_capable(hdev))
1984 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1985 MGMT_STATUS_NOT_SUPPORTED);
1986
1987 if (cp->val != 0x00 && cp->val != 0x01)
1988 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1989 MGMT_STATUS_INVALID_PARAMS);
1990
1991 hci_dev_lock(hdev);
1992
1993 if (!hdev_is_powered(hdev)) {
1994 bool changed;
1995
1996 if (cp->val) {
1997 changed = !hci_dev_test_and_set_flag(hdev,
1998 HCI_SSP_ENABLED);
1999 } else {
2000 changed = hci_dev_test_and_clear_flag(hdev,
2001 HCI_SSP_ENABLED);
2002 }
2003
2004 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2005 if (err < 0)
2006 goto failed;
2007
2008 if (changed)
2009 err = new_settings(hdev, sk);
2010
2011 goto failed;
2012 }
2013
2014 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2015 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2016 MGMT_STATUS_BUSY);
2017 goto failed;
2018 }
2019
2020 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2021 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2022 goto failed;
2023 }
2024
2025 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2026 if (!cmd)
2027 err = -ENOMEM;
2028 else
2029 err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2030 set_ssp_complete);
2031
2032 if (err < 0) {
2033 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2034 MGMT_STATUS_FAILED);
2035
2036 if (cmd)
2037 mgmt_pending_remove(cmd);
2038 }
2039
2040 failed:
2041 hci_dev_unlock(hdev);
2042 return err;
2043 }
2044
set_hs(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2045 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2046 {
2047 bt_dev_dbg(hdev, "sock %p", sk);
2048
2049 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2050 MGMT_STATUS_NOT_SUPPORTED);
2051 }
2052
set_le_complete(struct hci_dev * hdev,void * data,int err)2053 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2054 {
2055 struct cmd_lookup match = { NULL, hdev };
2056 u8 status = mgmt_status(err);
2057
2058 bt_dev_dbg(hdev, "err %d", err);
2059
2060 if (status) {
2061 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2062 &status);
2063 return;
2064 }
2065
2066 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2067
2068 new_settings(hdev, match.sk);
2069
2070 if (match.sk)
2071 sock_put(match.sk);
2072 }
2073
set_le_sync(struct hci_dev * hdev,void * data)2074 static int set_le_sync(struct hci_dev *hdev, void *data)
2075 {
2076 struct mgmt_pending_cmd *cmd = data;
2077 struct mgmt_mode *cp = cmd->param;
2078 u8 val = !!cp->val;
2079 int err;
2080
2081 if (!val) {
2082 hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2083
2084 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2085 hci_disable_advertising_sync(hdev);
2086
2087 if (ext_adv_capable(hdev))
2088 hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2089 } else {
2090 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2091 }
2092
2093 err = hci_write_le_host_supported_sync(hdev, val, 0);
2094
2095 /* Make sure the controller has a good default for
2096 * advertising data. Restrict the update to when LE
2097 * has actually been enabled. During power on, the
2098 * update in powered_update_hci will take care of it.
2099 */
2100 if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2101 if (ext_adv_capable(hdev)) {
2102 int status;
2103
2104 status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2105 if (!status)
2106 hci_update_scan_rsp_data_sync(hdev, 0x00);
2107 } else {
2108 hci_update_adv_data_sync(hdev, 0x00);
2109 hci_update_scan_rsp_data_sync(hdev, 0x00);
2110 }
2111
2112 hci_update_passive_scan(hdev);
2113 }
2114
2115 return err;
2116 }
2117
set_mesh_complete(struct hci_dev * hdev,void * data,int err)2118 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2119 {
2120 struct mgmt_pending_cmd *cmd = data;
2121 u8 status = mgmt_status(err);
2122 struct sock *sk = cmd->sk;
2123
2124 if (status) {
2125 mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev,
2126 cmd_status_rsp, &status);
2127 return;
2128 }
2129
2130 mgmt_pending_remove(cmd);
2131 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2132 }
2133
set_mesh_sync(struct hci_dev * hdev,void * data)2134 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2135 {
2136 struct mgmt_pending_cmd *cmd = data;
2137 struct mgmt_cp_set_mesh *cp = cmd->param;
2138 size_t len = cmd->param_len;
2139
2140 memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2141
2142 if (cp->enable)
2143 hci_dev_set_flag(hdev, HCI_MESH);
2144 else
2145 hci_dev_clear_flag(hdev, HCI_MESH);
2146
2147 len -= sizeof(*cp);
2148
2149 /* If filters don't fit, forward all adv pkts */
2150 if (len <= sizeof(hdev->mesh_ad_types))
2151 memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2152
2153 hci_update_passive_scan_sync(hdev);
2154 return 0;
2155 }
2156
set_mesh(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2157 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2158 {
2159 struct mgmt_cp_set_mesh *cp = data;
2160 struct mgmt_pending_cmd *cmd;
2161 int err = 0;
2162
2163 bt_dev_dbg(hdev, "sock %p", sk);
2164
2165 if (!lmp_le_capable(hdev) ||
2166 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2167 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2168 MGMT_STATUS_NOT_SUPPORTED);
2169
2170 if (cp->enable != 0x00 && cp->enable != 0x01)
2171 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2172 MGMT_STATUS_INVALID_PARAMS);
2173
2174 hci_dev_lock(hdev);
2175
2176 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2177 if (!cmd)
2178 err = -ENOMEM;
2179 else
2180 err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2181 set_mesh_complete);
2182
2183 if (err < 0) {
2184 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2185 MGMT_STATUS_FAILED);
2186
2187 if (cmd)
2188 mgmt_pending_remove(cmd);
2189 }
2190
2191 hci_dev_unlock(hdev);
2192 return err;
2193 }
2194
mesh_send_start_complete(struct hci_dev * hdev,void * data,int err)2195 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2196 {
2197 struct mgmt_mesh_tx *mesh_tx = data;
2198 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2199 unsigned long mesh_send_interval;
2200 u8 mgmt_err = mgmt_status(err);
2201
2202 /* Report any errors here, but don't report completion */
2203
2204 if (mgmt_err) {
2205 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2206 /* Send Complete Error Code for handle */
2207 mesh_send_complete(hdev, mesh_tx, false);
2208 return;
2209 }
2210
2211 mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2212 queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2213 mesh_send_interval);
2214 }
2215
mesh_send_sync(struct hci_dev * hdev,void * data)2216 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2217 {
2218 struct mgmt_mesh_tx *mesh_tx = data;
2219 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2220 struct adv_info *adv, *next_instance;
2221 u8 instance = hdev->le_num_of_adv_sets + 1;
2222 u16 timeout, duration;
2223 int err = 0;
2224
2225 if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2226 return MGMT_STATUS_BUSY;
2227
2228 timeout = 1000;
2229 duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2230 adv = hci_add_adv_instance(hdev, instance, 0,
2231 send->adv_data_len, send->adv_data,
2232 0, NULL,
2233 timeout, duration,
2234 HCI_ADV_TX_POWER_NO_PREFERENCE,
2235 hdev->le_adv_min_interval,
2236 hdev->le_adv_max_interval,
2237 mesh_tx->handle);
2238
2239 if (!IS_ERR(adv))
2240 mesh_tx->instance = instance;
2241 else
2242 err = PTR_ERR(adv);
2243
2244 if (hdev->cur_adv_instance == instance) {
2245 /* If the currently advertised instance is being changed then
2246 * cancel the current advertising and schedule the next
2247 * instance. If there is only one instance then the overridden
2248 * advertising data will be visible right away.
2249 */
2250 cancel_adv_timeout(hdev);
2251
2252 next_instance = hci_get_next_instance(hdev, instance);
2253 if (next_instance)
2254 instance = next_instance->instance;
2255 else
2256 instance = 0;
2257 } else if (hdev->adv_instance_timeout) {
2258 /* Immediately advertise the new instance if no other, or
2259 * let it go naturally from queue if ADV is already happening
2260 */
2261 instance = 0;
2262 }
2263
2264 if (instance)
2265 return hci_schedule_adv_instance_sync(hdev, instance, true);
2266
2267 return err;
2268 }
2269
send_count(struct mgmt_mesh_tx * mesh_tx,void * data)2270 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2271 {
2272 struct mgmt_rp_mesh_read_features *rp = data;
2273
2274 if (rp->used_handles >= rp->max_handles)
2275 return;
2276
2277 rp->handles[rp->used_handles++] = mesh_tx->handle;
2278 }
2279
mesh_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2280 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2281 void *data, u16 len)
2282 {
2283 struct mgmt_rp_mesh_read_features rp;
2284
2285 if (!lmp_le_capable(hdev) ||
2286 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2287 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2288 MGMT_STATUS_NOT_SUPPORTED);
2289
2290 memset(&rp, 0, sizeof(rp));
2291 rp.index = cpu_to_le16(hdev->id);
2292 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2293 rp.max_handles = MESH_HANDLES_MAX;
2294
2295 hci_dev_lock(hdev);
2296
2297 if (rp.max_handles)
2298 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2299
2300 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2301 rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2302
2303 hci_dev_unlock(hdev);
2304 return 0;
2305 }
2306
send_cancel(struct hci_dev * hdev,void * data)2307 static int send_cancel(struct hci_dev *hdev, void *data)
2308 {
2309 struct mgmt_pending_cmd *cmd = data;
2310 struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2311 struct mgmt_mesh_tx *mesh_tx;
2312
2313 if (!cancel->handle) {
2314 do {
2315 mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2316
2317 if (mesh_tx)
2318 mesh_send_complete(hdev, mesh_tx, false);
2319 } while (mesh_tx);
2320 } else {
2321 mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2322
2323 if (mesh_tx && mesh_tx->sk == cmd->sk)
2324 mesh_send_complete(hdev, mesh_tx, false);
2325 }
2326
2327 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2328 0, NULL, 0);
2329 mgmt_pending_free(cmd);
2330
2331 return 0;
2332 }
2333
mesh_send_cancel(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2334 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2335 void *data, u16 len)
2336 {
2337 struct mgmt_pending_cmd *cmd;
2338 int err;
2339
2340 if (!lmp_le_capable(hdev) ||
2341 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2342 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2343 MGMT_STATUS_NOT_SUPPORTED);
2344
2345 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2346 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2347 MGMT_STATUS_REJECTED);
2348
2349 hci_dev_lock(hdev);
2350 cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2351 if (!cmd)
2352 err = -ENOMEM;
2353 else
2354 err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2355
2356 if (err < 0) {
2357 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2358 MGMT_STATUS_FAILED);
2359
2360 if (cmd)
2361 mgmt_pending_free(cmd);
2362 }
2363
2364 hci_dev_unlock(hdev);
2365 return err;
2366 }
2367
mesh_send(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2368 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2369 {
2370 struct mgmt_mesh_tx *mesh_tx;
2371 struct mgmt_cp_mesh_send *send = data;
2372 struct mgmt_rp_mesh_read_features rp;
2373 bool sending;
2374 int err = 0;
2375
2376 if (!lmp_le_capable(hdev) ||
2377 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2378 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2379 MGMT_STATUS_NOT_SUPPORTED);
2380 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2381 len <= MGMT_MESH_SEND_SIZE ||
2382 len > (MGMT_MESH_SEND_SIZE + 31))
2383 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2384 MGMT_STATUS_REJECTED);
2385
2386 hci_dev_lock(hdev);
2387
2388 memset(&rp, 0, sizeof(rp));
2389 rp.max_handles = MESH_HANDLES_MAX;
2390
2391 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2392
2393 if (rp.max_handles <= rp.used_handles) {
2394 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2395 MGMT_STATUS_BUSY);
2396 goto done;
2397 }
2398
2399 sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2400 mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2401
2402 if (!mesh_tx)
2403 err = -ENOMEM;
2404 else if (!sending)
2405 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2406 mesh_send_start_complete);
2407
2408 if (err < 0) {
2409 bt_dev_err(hdev, "Send Mesh Failed %d", err);
2410 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2411 MGMT_STATUS_FAILED);
2412
2413 if (mesh_tx) {
2414 if (sending)
2415 mgmt_mesh_remove(mesh_tx);
2416 }
2417 } else {
2418 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2419
2420 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2421 &mesh_tx->handle, 1);
2422 }
2423
2424 done:
2425 hci_dev_unlock(hdev);
2426 return err;
2427 }
2428
set_le(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2429 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2430 {
2431 struct mgmt_mode *cp = data;
2432 struct mgmt_pending_cmd *cmd;
2433 int err;
2434 u8 val, enabled;
2435
2436 bt_dev_dbg(hdev, "sock %p", sk);
2437
2438 if (!lmp_le_capable(hdev))
2439 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2440 MGMT_STATUS_NOT_SUPPORTED);
2441
2442 if (cp->val != 0x00 && cp->val != 0x01)
2443 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2444 MGMT_STATUS_INVALID_PARAMS);
2445
2446 /* Bluetooth single mode LE only controllers or dual-mode
2447 * controllers configured as LE only devices, do not allow
2448 * switching LE off. These have either LE enabled explicitly
2449 * or BR/EDR has been previously switched off.
2450 *
2451 * When trying to enable an already enabled LE, then gracefully
2452 * send a positive response. Trying to disable it however will
2453 * result into rejection.
2454 */
2455 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2456 if (cp->val == 0x01)
2457 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2458
2459 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2460 MGMT_STATUS_REJECTED);
2461 }
2462
2463 hci_dev_lock(hdev);
2464
2465 val = !!cp->val;
2466 enabled = lmp_host_le_capable(hdev);
2467
2468 if (!hdev_is_powered(hdev) || val == enabled) {
2469 bool changed = false;
2470
2471 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2472 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2473 changed = true;
2474 }
2475
2476 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2477 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2478 changed = true;
2479 }
2480
2481 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2482 if (err < 0)
2483 goto unlock;
2484
2485 if (changed)
2486 err = new_settings(hdev, sk);
2487
2488 goto unlock;
2489 }
2490
2491 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2492 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2493 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2494 MGMT_STATUS_BUSY);
2495 goto unlock;
2496 }
2497
2498 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2499 if (!cmd)
2500 err = -ENOMEM;
2501 else
2502 err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2503 set_le_complete);
2504
2505 if (err < 0) {
2506 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2507 MGMT_STATUS_FAILED);
2508
2509 if (cmd)
2510 mgmt_pending_remove(cmd);
2511 }
2512
2513 unlock:
2514 hci_dev_unlock(hdev);
2515 return err;
2516 }
2517
2518 /* This is a helper function to test for pending mgmt commands that can
2519 * cause CoD or EIR HCI commands. We can only allow one such pending
2520 * mgmt command at a time since otherwise we cannot easily track what
2521 * the current values are, will be, and based on that calculate if a new
2522 * HCI command needs to be sent and if yes with what value.
2523 */
pending_eir_or_class(struct hci_dev * hdev)2524 static bool pending_eir_or_class(struct hci_dev *hdev)
2525 {
2526 struct mgmt_pending_cmd *cmd;
2527
2528 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2529 switch (cmd->opcode) {
2530 case MGMT_OP_ADD_UUID:
2531 case MGMT_OP_REMOVE_UUID:
2532 case MGMT_OP_SET_DEV_CLASS:
2533 case MGMT_OP_SET_POWERED:
2534 return true;
2535 }
2536 }
2537
2538 return false;
2539 }
2540
2541 static const u8 bluetooth_base_uuid[] = {
2542 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2543 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2544 };
2545
get_uuid_size(const u8 * uuid)2546 static u8 get_uuid_size(const u8 *uuid)
2547 {
2548 u32 val;
2549
2550 if (memcmp(uuid, bluetooth_base_uuid, 12))
2551 return 128;
2552
2553 val = get_unaligned_le32(&uuid[12]);
2554 if (val > 0xffff)
2555 return 32;
2556
2557 return 16;
2558 }
2559
mgmt_class_complete(struct hci_dev * hdev,void * data,int err)2560 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2561 {
2562 struct mgmt_pending_cmd *cmd = data;
2563
2564 bt_dev_dbg(hdev, "err %d", err);
2565
2566 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2567 mgmt_status(err), hdev->dev_class, 3);
2568
2569 mgmt_pending_free(cmd);
2570 }
2571
add_uuid_sync(struct hci_dev * hdev,void * data)2572 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2573 {
2574 int err;
2575
2576 err = hci_update_class_sync(hdev);
2577 if (err)
2578 return err;
2579
2580 return hci_update_eir_sync(hdev);
2581 }
2582
add_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2583 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2584 {
2585 struct mgmt_cp_add_uuid *cp = data;
2586 struct mgmt_pending_cmd *cmd;
2587 struct bt_uuid *uuid;
2588 int err;
2589
2590 bt_dev_dbg(hdev, "sock %p", sk);
2591
2592 hci_dev_lock(hdev);
2593
2594 if (pending_eir_or_class(hdev)) {
2595 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2596 MGMT_STATUS_BUSY);
2597 goto failed;
2598 }
2599
2600 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2601 if (!uuid) {
2602 err = -ENOMEM;
2603 goto failed;
2604 }
2605
2606 memcpy(uuid->uuid, cp->uuid, 16);
2607 uuid->svc_hint = cp->svc_hint;
2608 uuid->size = get_uuid_size(cp->uuid);
2609
2610 list_add_tail(&uuid->list, &hdev->uuids);
2611
2612 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2613 if (!cmd) {
2614 err = -ENOMEM;
2615 goto failed;
2616 }
2617
2618 /* MGMT_OP_ADD_UUID don't require adapter the UP/Running so use
2619 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2620 */
2621 err = hci_cmd_sync_submit(hdev, add_uuid_sync, cmd,
2622 mgmt_class_complete);
2623 if (err < 0) {
2624 mgmt_pending_free(cmd);
2625 goto failed;
2626 }
2627
2628 failed:
2629 hci_dev_unlock(hdev);
2630 return err;
2631 }
2632
enable_service_cache(struct hci_dev * hdev)2633 static bool enable_service_cache(struct hci_dev *hdev)
2634 {
2635 if (!hdev_is_powered(hdev))
2636 return false;
2637
2638 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2639 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2640 CACHE_TIMEOUT);
2641 return true;
2642 }
2643
2644 return false;
2645 }
2646
remove_uuid_sync(struct hci_dev * hdev,void * data)2647 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2648 {
2649 int err;
2650
2651 err = hci_update_class_sync(hdev);
2652 if (err)
2653 return err;
2654
2655 return hci_update_eir_sync(hdev);
2656 }
2657
remove_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2658 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2659 u16 len)
2660 {
2661 struct mgmt_cp_remove_uuid *cp = data;
2662 struct mgmt_pending_cmd *cmd;
2663 struct bt_uuid *match, *tmp;
2664 static const u8 bt_uuid_any[] = {
2665 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2666 };
2667 int err, found;
2668
2669 bt_dev_dbg(hdev, "sock %p", sk);
2670
2671 hci_dev_lock(hdev);
2672
2673 if (pending_eir_or_class(hdev)) {
2674 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2675 MGMT_STATUS_BUSY);
2676 goto unlock;
2677 }
2678
2679 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2680 hci_uuids_clear(hdev);
2681
2682 if (enable_service_cache(hdev)) {
2683 err = mgmt_cmd_complete(sk, hdev->id,
2684 MGMT_OP_REMOVE_UUID,
2685 0, hdev->dev_class, 3);
2686 goto unlock;
2687 }
2688
2689 goto update_class;
2690 }
2691
2692 found = 0;
2693
2694 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2695 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2696 continue;
2697
2698 list_del(&match->list);
2699 kfree(match);
2700 found++;
2701 }
2702
2703 if (found == 0) {
2704 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2705 MGMT_STATUS_INVALID_PARAMS);
2706 goto unlock;
2707 }
2708
2709 update_class:
2710 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2711 if (!cmd) {
2712 err = -ENOMEM;
2713 goto unlock;
2714 }
2715
2716 /* MGMT_OP_REMOVE_UUID don't require adapter the UP/Running so use
2717 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2718 */
2719 err = hci_cmd_sync_submit(hdev, remove_uuid_sync, cmd,
2720 mgmt_class_complete);
2721 if (err < 0)
2722 mgmt_pending_free(cmd);
2723
2724 unlock:
2725 hci_dev_unlock(hdev);
2726 return err;
2727 }
2728
set_class_sync(struct hci_dev * hdev,void * data)2729 static int set_class_sync(struct hci_dev *hdev, void *data)
2730 {
2731 int err = 0;
2732
2733 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2734 cancel_delayed_work_sync(&hdev->service_cache);
2735 err = hci_update_eir_sync(hdev);
2736 }
2737
2738 if (err)
2739 return err;
2740
2741 return hci_update_class_sync(hdev);
2742 }
2743
set_dev_class(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2744 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2745 u16 len)
2746 {
2747 struct mgmt_cp_set_dev_class *cp = data;
2748 struct mgmt_pending_cmd *cmd;
2749 int err;
2750
2751 bt_dev_dbg(hdev, "sock %p", sk);
2752
2753 if (!lmp_bredr_capable(hdev))
2754 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2755 MGMT_STATUS_NOT_SUPPORTED);
2756
2757 hci_dev_lock(hdev);
2758
2759 if (pending_eir_or_class(hdev)) {
2760 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2761 MGMT_STATUS_BUSY);
2762 goto unlock;
2763 }
2764
2765 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2766 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2767 MGMT_STATUS_INVALID_PARAMS);
2768 goto unlock;
2769 }
2770
2771 hdev->major_class = cp->major;
2772 hdev->minor_class = cp->minor;
2773
2774 if (!hdev_is_powered(hdev)) {
2775 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2776 hdev->dev_class, 3);
2777 goto unlock;
2778 }
2779
2780 cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2781 if (!cmd) {
2782 err = -ENOMEM;
2783 goto unlock;
2784 }
2785
2786 /* MGMT_OP_SET_DEV_CLASS don't require adapter the UP/Running so use
2787 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2788 */
2789 err = hci_cmd_sync_submit(hdev, set_class_sync, cmd,
2790 mgmt_class_complete);
2791 if (err < 0)
2792 mgmt_pending_free(cmd);
2793
2794 unlock:
2795 hci_dev_unlock(hdev);
2796 return err;
2797 }
2798
load_link_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2799 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2800 u16 len)
2801 {
2802 struct mgmt_cp_load_link_keys *cp = data;
2803 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2804 sizeof(struct mgmt_link_key_info));
2805 u16 key_count, expected_len;
2806 bool changed;
2807 int i;
2808
2809 bt_dev_dbg(hdev, "sock %p", sk);
2810
2811 if (!lmp_bredr_capable(hdev))
2812 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2813 MGMT_STATUS_NOT_SUPPORTED);
2814
2815 key_count = __le16_to_cpu(cp->key_count);
2816 if (key_count > max_key_count) {
2817 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2818 key_count);
2819 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2820 MGMT_STATUS_INVALID_PARAMS);
2821 }
2822
2823 expected_len = struct_size(cp, keys, key_count);
2824 if (expected_len != len) {
2825 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2826 expected_len, len);
2827 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2828 MGMT_STATUS_INVALID_PARAMS);
2829 }
2830
2831 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2832 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2833 MGMT_STATUS_INVALID_PARAMS);
2834
2835 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2836 key_count);
2837
2838 hci_dev_lock(hdev);
2839
2840 hci_link_keys_clear(hdev);
2841
2842 if (cp->debug_keys)
2843 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2844 else
2845 changed = hci_dev_test_and_clear_flag(hdev,
2846 HCI_KEEP_DEBUG_KEYS);
2847
2848 if (changed)
2849 new_settings(hdev, NULL);
2850
2851 for (i = 0; i < key_count; i++) {
2852 struct mgmt_link_key_info *key = &cp->keys[i];
2853
2854 if (hci_is_blocked_key(hdev,
2855 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2856 key->val)) {
2857 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2858 &key->addr.bdaddr);
2859 continue;
2860 }
2861
2862 if (key->addr.type != BDADDR_BREDR) {
2863 bt_dev_warn(hdev,
2864 "Invalid link address type %u for %pMR",
2865 key->addr.type, &key->addr.bdaddr);
2866 continue;
2867 }
2868
2869 if (key->type > 0x08) {
2870 bt_dev_warn(hdev, "Invalid link key type %u for %pMR",
2871 key->type, &key->addr.bdaddr);
2872 continue;
2873 }
2874
2875 /* Always ignore debug keys and require a new pairing if
2876 * the user wants to use them.
2877 */
2878 if (key->type == HCI_LK_DEBUG_COMBINATION)
2879 continue;
2880
2881 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2882 key->type, key->pin_len, NULL);
2883 }
2884
2885 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2886
2887 hci_dev_unlock(hdev);
2888
2889 return 0;
2890 }
2891
device_unpaired(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,struct sock * skip_sk)2892 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2893 u8 addr_type, struct sock *skip_sk)
2894 {
2895 struct mgmt_ev_device_unpaired ev;
2896
2897 bacpy(&ev.addr.bdaddr, bdaddr);
2898 ev.addr.type = addr_type;
2899
2900 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2901 skip_sk);
2902 }
2903
unpair_device_complete(struct hci_dev * hdev,void * data,int err)2904 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2905 {
2906 struct mgmt_pending_cmd *cmd = data;
2907 struct mgmt_cp_unpair_device *cp = cmd->param;
2908
2909 if (!err)
2910 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2911
2912 cmd->cmd_complete(cmd, err);
2913 mgmt_pending_free(cmd);
2914 }
2915
unpair_device_sync(struct hci_dev * hdev,void * data)2916 static int unpair_device_sync(struct hci_dev *hdev, void *data)
2917 {
2918 struct mgmt_pending_cmd *cmd = data;
2919 struct mgmt_cp_unpair_device *cp = cmd->param;
2920 struct hci_conn *conn;
2921
2922 if (cp->addr.type == BDADDR_BREDR)
2923 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2924 &cp->addr.bdaddr);
2925 else
2926 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2927 le_addr_type(cp->addr.type));
2928
2929 if (!conn)
2930 return 0;
2931
2932 /* Disregard any possible error since the likes of hci_abort_conn_sync
2933 * will clean up the connection no matter the error.
2934 */
2935 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2936
2937 return 0;
2938 }
2939
unpair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2940 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2941 u16 len)
2942 {
2943 struct mgmt_cp_unpair_device *cp = data;
2944 struct mgmt_rp_unpair_device rp;
2945 struct hci_conn_params *params;
2946 struct mgmt_pending_cmd *cmd;
2947 struct hci_conn *conn;
2948 u8 addr_type;
2949 int err;
2950
2951 memset(&rp, 0, sizeof(rp));
2952 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2953 rp.addr.type = cp->addr.type;
2954
2955 if (!bdaddr_type_is_valid(cp->addr.type))
2956 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2957 MGMT_STATUS_INVALID_PARAMS,
2958 &rp, sizeof(rp));
2959
2960 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2961 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2962 MGMT_STATUS_INVALID_PARAMS,
2963 &rp, sizeof(rp));
2964
2965 hci_dev_lock(hdev);
2966
2967 if (!hdev_is_powered(hdev)) {
2968 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2969 MGMT_STATUS_NOT_POWERED, &rp,
2970 sizeof(rp));
2971 goto unlock;
2972 }
2973
2974 if (cp->addr.type == BDADDR_BREDR) {
2975 /* If disconnection is requested, then look up the
2976 * connection. If the remote device is connected, it
2977 * will be later used to terminate the link.
2978 *
2979 * Setting it to NULL explicitly will cause no
2980 * termination of the link.
2981 */
2982 if (cp->disconnect)
2983 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2984 &cp->addr.bdaddr);
2985 else
2986 conn = NULL;
2987
2988 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2989 if (err < 0) {
2990 err = mgmt_cmd_complete(sk, hdev->id,
2991 MGMT_OP_UNPAIR_DEVICE,
2992 MGMT_STATUS_NOT_PAIRED, &rp,
2993 sizeof(rp));
2994 goto unlock;
2995 }
2996
2997 goto done;
2998 }
2999
3000 /* LE address type */
3001 addr_type = le_addr_type(cp->addr.type);
3002
3003 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
3004 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
3005 if (err < 0) {
3006 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3007 MGMT_STATUS_NOT_PAIRED, &rp,
3008 sizeof(rp));
3009 goto unlock;
3010 }
3011
3012 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3013 if (!conn) {
3014 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3015 goto done;
3016 }
3017
3018
3019 /* Defer clearing up the connection parameters until closing to
3020 * give a chance of keeping them if a repairing happens.
3021 */
3022 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3023
3024 /* Disable auto-connection parameters if present */
3025 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3026 if (params) {
3027 if (params->explicit_connect)
3028 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3029 else
3030 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3031 }
3032
3033 /* If disconnection is not requested, then clear the connection
3034 * variable so that the link is not terminated.
3035 */
3036 if (!cp->disconnect)
3037 conn = NULL;
3038
3039 done:
3040 /* If the connection variable is set, then termination of the
3041 * link is requested.
3042 */
3043 if (!conn) {
3044 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3045 &rp, sizeof(rp));
3046 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3047 goto unlock;
3048 }
3049
3050 cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3051 sizeof(*cp));
3052 if (!cmd) {
3053 err = -ENOMEM;
3054 goto unlock;
3055 }
3056
3057 cmd->cmd_complete = addr_cmd_complete;
3058
3059 err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3060 unpair_device_complete);
3061 if (err < 0)
3062 mgmt_pending_free(cmd);
3063
3064 unlock:
3065 hci_dev_unlock(hdev);
3066 return err;
3067 }
3068
disconnect_complete(struct hci_dev * hdev,void * data,int err)3069 static void disconnect_complete(struct hci_dev *hdev, void *data, int err)
3070 {
3071 struct mgmt_pending_cmd *cmd = data;
3072
3073 cmd->cmd_complete(cmd, mgmt_status(err));
3074 mgmt_pending_free(cmd);
3075 }
3076
disconnect_sync(struct hci_dev * hdev,void * data)3077 static int disconnect_sync(struct hci_dev *hdev, void *data)
3078 {
3079 struct mgmt_pending_cmd *cmd = data;
3080 struct mgmt_cp_disconnect *cp = cmd->param;
3081 struct hci_conn *conn;
3082
3083 if (cp->addr.type == BDADDR_BREDR)
3084 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3085 &cp->addr.bdaddr);
3086 else
3087 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3088 le_addr_type(cp->addr.type));
3089
3090 if (!conn)
3091 return -ENOTCONN;
3092
3093 /* Disregard any possible error since the likes of hci_abort_conn_sync
3094 * will clean up the connection no matter the error.
3095 */
3096 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3097
3098 return 0;
3099 }
3100
disconnect(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3101 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3102 u16 len)
3103 {
3104 struct mgmt_cp_disconnect *cp = data;
3105 struct mgmt_rp_disconnect rp;
3106 struct mgmt_pending_cmd *cmd;
3107 int err;
3108
3109 bt_dev_dbg(hdev, "sock %p", sk);
3110
3111 memset(&rp, 0, sizeof(rp));
3112 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3113 rp.addr.type = cp->addr.type;
3114
3115 if (!bdaddr_type_is_valid(cp->addr.type))
3116 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3117 MGMT_STATUS_INVALID_PARAMS,
3118 &rp, sizeof(rp));
3119
3120 hci_dev_lock(hdev);
3121
3122 if (!test_bit(HCI_UP, &hdev->flags)) {
3123 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3124 MGMT_STATUS_NOT_POWERED, &rp,
3125 sizeof(rp));
3126 goto failed;
3127 }
3128
3129 cmd = mgmt_pending_new(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3130 if (!cmd) {
3131 err = -ENOMEM;
3132 goto failed;
3133 }
3134
3135 cmd->cmd_complete = generic_cmd_complete;
3136
3137 err = hci_cmd_sync_queue(hdev, disconnect_sync, cmd,
3138 disconnect_complete);
3139 if (err < 0)
3140 mgmt_pending_free(cmd);
3141
3142 failed:
3143 hci_dev_unlock(hdev);
3144 return err;
3145 }
3146
link_to_bdaddr(u8 link_type,u8 addr_type)3147 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3148 {
3149 switch (link_type) {
3150 case ISO_LINK:
3151 case LE_LINK:
3152 switch (addr_type) {
3153 case ADDR_LE_DEV_PUBLIC:
3154 return BDADDR_LE_PUBLIC;
3155
3156 default:
3157 /* Fallback to LE Random address type */
3158 return BDADDR_LE_RANDOM;
3159 }
3160
3161 default:
3162 /* Fallback to BR/EDR type */
3163 return BDADDR_BREDR;
3164 }
3165 }
3166
get_connections(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)3167 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3168 u16 data_len)
3169 {
3170 struct mgmt_rp_get_connections *rp;
3171 struct hci_conn *c;
3172 int err;
3173 u16 i;
3174
3175 bt_dev_dbg(hdev, "sock %p", sk);
3176
3177 hci_dev_lock(hdev);
3178
3179 if (!hdev_is_powered(hdev)) {
3180 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3181 MGMT_STATUS_NOT_POWERED);
3182 goto unlock;
3183 }
3184
3185 i = 0;
3186 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3187 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3188 i++;
3189 }
3190
3191 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3192 if (!rp) {
3193 err = -ENOMEM;
3194 goto unlock;
3195 }
3196
3197 i = 0;
3198 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3199 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3200 continue;
3201 bacpy(&rp->addr[i].bdaddr, &c->dst);
3202 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3203 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3204 continue;
3205 i++;
3206 }
3207
3208 rp->conn_count = cpu_to_le16(i);
3209
3210 /* Recalculate length in case of filtered SCO connections, etc */
3211 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3212 struct_size(rp, addr, i));
3213
3214 kfree(rp);
3215
3216 unlock:
3217 hci_dev_unlock(hdev);
3218 return err;
3219 }
3220
send_pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_pin_code_neg_reply * cp)3221 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3222 struct mgmt_cp_pin_code_neg_reply *cp)
3223 {
3224 struct mgmt_pending_cmd *cmd;
3225 int err;
3226
3227 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3228 sizeof(*cp));
3229 if (!cmd)
3230 return -ENOMEM;
3231
3232 cmd->cmd_complete = addr_cmd_complete;
3233
3234 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3235 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3236 if (err < 0)
3237 mgmt_pending_remove(cmd);
3238
3239 return err;
3240 }
3241
pin_code_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3242 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3243 u16 len)
3244 {
3245 struct hci_conn *conn;
3246 struct mgmt_cp_pin_code_reply *cp = data;
3247 struct hci_cp_pin_code_reply reply;
3248 struct mgmt_pending_cmd *cmd;
3249 int err;
3250
3251 bt_dev_dbg(hdev, "sock %p", sk);
3252
3253 hci_dev_lock(hdev);
3254
3255 if (!hdev_is_powered(hdev)) {
3256 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3257 MGMT_STATUS_NOT_POWERED);
3258 goto failed;
3259 }
3260
3261 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3262 if (!conn) {
3263 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3264 MGMT_STATUS_NOT_CONNECTED);
3265 goto failed;
3266 }
3267
3268 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3269 struct mgmt_cp_pin_code_neg_reply ncp;
3270
3271 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3272
3273 bt_dev_err(hdev, "PIN code is not 16 bytes long");
3274
3275 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3276 if (err >= 0)
3277 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3278 MGMT_STATUS_INVALID_PARAMS);
3279
3280 goto failed;
3281 }
3282
3283 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3284 if (!cmd) {
3285 err = -ENOMEM;
3286 goto failed;
3287 }
3288
3289 cmd->cmd_complete = addr_cmd_complete;
3290
3291 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3292 reply.pin_len = cp->pin_len;
3293 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3294
3295 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3296 if (err < 0)
3297 mgmt_pending_remove(cmd);
3298
3299 failed:
3300 hci_dev_unlock(hdev);
3301 return err;
3302 }
3303
set_io_capability(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3304 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3305 u16 len)
3306 {
3307 struct mgmt_cp_set_io_capability *cp = data;
3308
3309 bt_dev_dbg(hdev, "sock %p", sk);
3310
3311 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3312 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3313 MGMT_STATUS_INVALID_PARAMS);
3314
3315 hci_dev_lock(hdev);
3316
3317 hdev->io_capability = cp->io_capability;
3318
3319 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3320
3321 hci_dev_unlock(hdev);
3322
3323 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3324 NULL, 0);
3325 }
3326
find_pairing(struct hci_conn * conn)3327 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3328 {
3329 struct hci_dev *hdev = conn->hdev;
3330 struct mgmt_pending_cmd *cmd;
3331
3332 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3333 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3334 continue;
3335
3336 if (cmd->user_data != conn)
3337 continue;
3338
3339 return cmd;
3340 }
3341
3342 return NULL;
3343 }
3344
pairing_complete(struct mgmt_pending_cmd * cmd,u8 status)3345 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3346 {
3347 struct mgmt_rp_pair_device rp;
3348 struct hci_conn *conn = cmd->user_data;
3349 int err;
3350
3351 bacpy(&rp.addr.bdaddr, &conn->dst);
3352 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3353
3354 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3355 status, &rp, sizeof(rp));
3356
3357 /* So we don't get further callbacks for this connection */
3358 conn->connect_cfm_cb = NULL;
3359 conn->security_cfm_cb = NULL;
3360 conn->disconn_cfm_cb = NULL;
3361
3362 hci_conn_drop(conn);
3363
3364 /* The device is paired so there is no need to remove
3365 * its connection parameters anymore.
3366 */
3367 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3368
3369 hci_conn_put(conn);
3370
3371 return err;
3372 }
3373
mgmt_smp_complete(struct hci_conn * conn,bool complete)3374 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3375 {
3376 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3377 struct mgmt_pending_cmd *cmd;
3378
3379 cmd = find_pairing(conn);
3380 if (cmd) {
3381 cmd->cmd_complete(cmd, status);
3382 mgmt_pending_remove(cmd);
3383 }
3384 }
3385
pairing_complete_cb(struct hci_conn * conn,u8 status)3386 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3387 {
3388 struct mgmt_pending_cmd *cmd;
3389
3390 BT_DBG("status %u", status);
3391
3392 cmd = find_pairing(conn);
3393 if (!cmd) {
3394 BT_DBG("Unable to find a pending command");
3395 return;
3396 }
3397
3398 cmd->cmd_complete(cmd, mgmt_status(status));
3399 mgmt_pending_remove(cmd);
3400 }
3401
le_pairing_complete_cb(struct hci_conn * conn,u8 status)3402 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3403 {
3404 struct mgmt_pending_cmd *cmd;
3405
3406 BT_DBG("status %u", status);
3407
3408 if (!status)
3409 return;
3410
3411 cmd = find_pairing(conn);
3412 if (!cmd) {
3413 BT_DBG("Unable to find a pending command");
3414 return;
3415 }
3416
3417 cmd->cmd_complete(cmd, mgmt_status(status));
3418 mgmt_pending_remove(cmd);
3419 }
3420
pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3421 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3422 u16 len)
3423 {
3424 struct mgmt_cp_pair_device *cp = data;
3425 struct mgmt_rp_pair_device rp;
3426 struct mgmt_pending_cmd *cmd;
3427 u8 sec_level, auth_type;
3428 struct hci_conn *conn;
3429 int err;
3430
3431 bt_dev_dbg(hdev, "sock %p", sk);
3432
3433 memset(&rp, 0, sizeof(rp));
3434 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3435 rp.addr.type = cp->addr.type;
3436
3437 if (!bdaddr_type_is_valid(cp->addr.type))
3438 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3439 MGMT_STATUS_INVALID_PARAMS,
3440 &rp, sizeof(rp));
3441
3442 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3443 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3444 MGMT_STATUS_INVALID_PARAMS,
3445 &rp, sizeof(rp));
3446
3447 hci_dev_lock(hdev);
3448
3449 if (!hdev_is_powered(hdev)) {
3450 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3451 MGMT_STATUS_NOT_POWERED, &rp,
3452 sizeof(rp));
3453 goto unlock;
3454 }
3455
3456 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3457 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3458 MGMT_STATUS_ALREADY_PAIRED, &rp,
3459 sizeof(rp));
3460 goto unlock;
3461 }
3462
3463 sec_level = BT_SECURITY_MEDIUM;
3464 auth_type = HCI_AT_DEDICATED_BONDING;
3465
3466 if (cp->addr.type == BDADDR_BREDR) {
3467 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3468 auth_type, CONN_REASON_PAIR_DEVICE,
3469 HCI_ACL_CONN_TIMEOUT);
3470 } else {
3471 u8 addr_type = le_addr_type(cp->addr.type);
3472 struct hci_conn_params *p;
3473
3474 /* When pairing a new device, it is expected to remember
3475 * this device for future connections. Adding the connection
3476 * parameter information ahead of time allows tracking
3477 * of the peripheral preferred values and will speed up any
3478 * further connection establishment.
3479 *
3480 * If connection parameters already exist, then they
3481 * will be kept and this function does nothing.
3482 */
3483 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3484 if (!p) {
3485 err = -EIO;
3486 goto unlock;
3487 }
3488
3489 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3490 p->auto_connect = HCI_AUTO_CONN_DISABLED;
3491
3492 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3493 sec_level, HCI_LE_CONN_TIMEOUT,
3494 CONN_REASON_PAIR_DEVICE);
3495 }
3496
3497 if (IS_ERR(conn)) {
3498 int status;
3499
3500 if (PTR_ERR(conn) == -EBUSY)
3501 status = MGMT_STATUS_BUSY;
3502 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3503 status = MGMT_STATUS_NOT_SUPPORTED;
3504 else if (PTR_ERR(conn) == -ECONNREFUSED)
3505 status = MGMT_STATUS_REJECTED;
3506 else
3507 status = MGMT_STATUS_CONNECT_FAILED;
3508
3509 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3510 status, &rp, sizeof(rp));
3511 goto unlock;
3512 }
3513
3514 if (conn->connect_cfm_cb) {
3515 hci_conn_drop(conn);
3516 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3517 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3518 goto unlock;
3519 }
3520
3521 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3522 if (!cmd) {
3523 err = -ENOMEM;
3524 hci_conn_drop(conn);
3525 goto unlock;
3526 }
3527
3528 cmd->cmd_complete = pairing_complete;
3529
3530 /* For LE, just connecting isn't a proof that the pairing finished */
3531 if (cp->addr.type == BDADDR_BREDR) {
3532 conn->connect_cfm_cb = pairing_complete_cb;
3533 conn->security_cfm_cb = pairing_complete_cb;
3534 conn->disconn_cfm_cb = pairing_complete_cb;
3535 } else {
3536 conn->connect_cfm_cb = le_pairing_complete_cb;
3537 conn->security_cfm_cb = le_pairing_complete_cb;
3538 conn->disconn_cfm_cb = le_pairing_complete_cb;
3539 }
3540
3541 conn->io_capability = cp->io_cap;
3542 cmd->user_data = hci_conn_get(conn);
3543
3544 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3545 hci_conn_security(conn, sec_level, auth_type, true)) {
3546 cmd->cmd_complete(cmd, 0);
3547 mgmt_pending_remove(cmd);
3548 }
3549
3550 err = 0;
3551
3552 unlock:
3553 hci_dev_unlock(hdev);
3554 return err;
3555 }
3556
cancel_pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3557 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3558 u16 len)
3559 {
3560 struct mgmt_addr_info *addr = data;
3561 struct mgmt_pending_cmd *cmd;
3562 struct hci_conn *conn;
3563 int err;
3564
3565 bt_dev_dbg(hdev, "sock %p", sk);
3566
3567 hci_dev_lock(hdev);
3568
3569 if (!hdev_is_powered(hdev)) {
3570 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3571 MGMT_STATUS_NOT_POWERED);
3572 goto unlock;
3573 }
3574
3575 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3576 if (!cmd) {
3577 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3578 MGMT_STATUS_INVALID_PARAMS);
3579 goto unlock;
3580 }
3581
3582 conn = cmd->user_data;
3583
3584 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3585 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3586 MGMT_STATUS_INVALID_PARAMS);
3587 goto unlock;
3588 }
3589
3590 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3591 mgmt_pending_remove(cmd);
3592
3593 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3594 addr, sizeof(*addr));
3595
3596 /* Since user doesn't want to proceed with the connection, abort any
3597 * ongoing pairing and then terminate the link if it was created
3598 * because of the pair device action.
3599 */
3600 if (addr->type == BDADDR_BREDR)
3601 hci_remove_link_key(hdev, &addr->bdaddr);
3602 else
3603 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3604 le_addr_type(addr->type));
3605
3606 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3607 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3608
3609 unlock:
3610 hci_dev_unlock(hdev);
3611 return err;
3612 }
3613
user_pairing_resp(struct sock * sk,struct hci_dev * hdev,struct mgmt_addr_info * addr,u16 mgmt_op,u16 hci_op,__le32 passkey)3614 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3615 struct mgmt_addr_info *addr, u16 mgmt_op,
3616 u16 hci_op, __le32 passkey)
3617 {
3618 struct mgmt_pending_cmd *cmd;
3619 struct hci_conn *conn;
3620 int err;
3621
3622 hci_dev_lock(hdev);
3623
3624 if (!hdev_is_powered(hdev)) {
3625 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3626 MGMT_STATUS_NOT_POWERED, addr,
3627 sizeof(*addr));
3628 goto done;
3629 }
3630
3631 if (addr->type == BDADDR_BREDR)
3632 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3633 else
3634 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3635 le_addr_type(addr->type));
3636
3637 if (!conn) {
3638 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3639 MGMT_STATUS_NOT_CONNECTED, addr,
3640 sizeof(*addr));
3641 goto done;
3642 }
3643
3644 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3645 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3646 if (!err)
3647 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3648 MGMT_STATUS_SUCCESS, addr,
3649 sizeof(*addr));
3650 else
3651 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3652 MGMT_STATUS_FAILED, addr,
3653 sizeof(*addr));
3654
3655 goto done;
3656 }
3657
3658 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3659 if (!cmd) {
3660 err = -ENOMEM;
3661 goto done;
3662 }
3663
3664 cmd->cmd_complete = addr_cmd_complete;
3665
3666 /* Continue with pairing via HCI */
3667 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3668 struct hci_cp_user_passkey_reply cp;
3669
3670 bacpy(&cp.bdaddr, &addr->bdaddr);
3671 cp.passkey = passkey;
3672 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3673 } else
3674 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3675 &addr->bdaddr);
3676
3677 if (err < 0)
3678 mgmt_pending_remove(cmd);
3679
3680 done:
3681 hci_dev_unlock(hdev);
3682 return err;
3683 }
3684
pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3685 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3686 void *data, u16 len)
3687 {
3688 struct mgmt_cp_pin_code_neg_reply *cp = data;
3689
3690 bt_dev_dbg(hdev, "sock %p", sk);
3691
3692 return user_pairing_resp(sk, hdev, &cp->addr,
3693 MGMT_OP_PIN_CODE_NEG_REPLY,
3694 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3695 }
3696
user_confirm_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3697 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3698 u16 len)
3699 {
3700 struct mgmt_cp_user_confirm_reply *cp = data;
3701
3702 bt_dev_dbg(hdev, "sock %p", sk);
3703
3704 if (len != sizeof(*cp))
3705 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3706 MGMT_STATUS_INVALID_PARAMS);
3707
3708 return user_pairing_resp(sk, hdev, &cp->addr,
3709 MGMT_OP_USER_CONFIRM_REPLY,
3710 HCI_OP_USER_CONFIRM_REPLY, 0);
3711 }
3712
user_confirm_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3713 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3714 void *data, u16 len)
3715 {
3716 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3717
3718 bt_dev_dbg(hdev, "sock %p", sk);
3719
3720 return user_pairing_resp(sk, hdev, &cp->addr,
3721 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3722 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3723 }
3724
user_passkey_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3725 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3726 u16 len)
3727 {
3728 struct mgmt_cp_user_passkey_reply *cp = data;
3729
3730 bt_dev_dbg(hdev, "sock %p", sk);
3731
3732 return user_pairing_resp(sk, hdev, &cp->addr,
3733 MGMT_OP_USER_PASSKEY_REPLY,
3734 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3735 }
3736
user_passkey_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3737 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3738 void *data, u16 len)
3739 {
3740 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3741
3742 bt_dev_dbg(hdev, "sock %p", sk);
3743
3744 return user_pairing_resp(sk, hdev, &cp->addr,
3745 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3746 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3747 }
3748
adv_expire_sync(struct hci_dev * hdev,u32 flags)3749 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3750 {
3751 struct adv_info *adv_instance;
3752
3753 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3754 if (!adv_instance)
3755 return 0;
3756
3757 /* stop if current instance doesn't need to be changed */
3758 if (!(adv_instance->flags & flags))
3759 return 0;
3760
3761 cancel_adv_timeout(hdev);
3762
3763 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3764 if (!adv_instance)
3765 return 0;
3766
3767 hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3768
3769 return 0;
3770 }
3771
name_changed_sync(struct hci_dev * hdev,void * data)3772 static int name_changed_sync(struct hci_dev *hdev, void *data)
3773 {
3774 return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3775 }
3776
set_name_complete(struct hci_dev * hdev,void * data,int err)3777 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3778 {
3779 struct mgmt_pending_cmd *cmd = data;
3780 struct mgmt_cp_set_local_name *cp = cmd->param;
3781 u8 status = mgmt_status(err);
3782
3783 bt_dev_dbg(hdev, "err %d", err);
3784
3785 if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3786 return;
3787
3788 if (status) {
3789 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3790 status);
3791 } else {
3792 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3793 cp, sizeof(*cp));
3794
3795 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3796 hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3797 }
3798
3799 mgmt_pending_remove(cmd);
3800 }
3801
set_name_sync(struct hci_dev * hdev,void * data)3802 static int set_name_sync(struct hci_dev *hdev, void *data)
3803 {
3804 if (lmp_bredr_capable(hdev)) {
3805 hci_update_name_sync(hdev);
3806 hci_update_eir_sync(hdev);
3807 }
3808
3809 /* The name is stored in the scan response data and so
3810 * no need to update the advertising data here.
3811 */
3812 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3813 hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3814
3815 return 0;
3816 }
3817
set_local_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3818 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3819 u16 len)
3820 {
3821 struct mgmt_cp_set_local_name *cp = data;
3822 struct mgmt_pending_cmd *cmd;
3823 int err;
3824
3825 bt_dev_dbg(hdev, "sock %p", sk);
3826
3827 hci_dev_lock(hdev);
3828
3829 /* If the old values are the same as the new ones just return a
3830 * direct command complete event.
3831 */
3832 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3833 !memcmp(hdev->short_name, cp->short_name,
3834 sizeof(hdev->short_name))) {
3835 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3836 data, len);
3837 goto failed;
3838 }
3839
3840 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3841
3842 if (!hdev_is_powered(hdev)) {
3843 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3844
3845 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3846 data, len);
3847 if (err < 0)
3848 goto failed;
3849
3850 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3851 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3852 ext_info_changed(hdev, sk);
3853
3854 goto failed;
3855 }
3856
3857 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3858 if (!cmd)
3859 err = -ENOMEM;
3860 else
3861 err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3862 set_name_complete);
3863
3864 if (err < 0) {
3865 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3866 MGMT_STATUS_FAILED);
3867
3868 if (cmd)
3869 mgmt_pending_remove(cmd);
3870
3871 goto failed;
3872 }
3873
3874 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3875
3876 failed:
3877 hci_dev_unlock(hdev);
3878 return err;
3879 }
3880
appearance_changed_sync(struct hci_dev * hdev,void * data)3881 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3882 {
3883 return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3884 }
3885
set_appearance(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3886 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3887 u16 len)
3888 {
3889 struct mgmt_cp_set_appearance *cp = data;
3890 u16 appearance;
3891 int err;
3892
3893 bt_dev_dbg(hdev, "sock %p", sk);
3894
3895 if (!lmp_le_capable(hdev))
3896 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3897 MGMT_STATUS_NOT_SUPPORTED);
3898
3899 appearance = le16_to_cpu(cp->appearance);
3900
3901 hci_dev_lock(hdev);
3902
3903 if (hdev->appearance != appearance) {
3904 hdev->appearance = appearance;
3905
3906 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3907 hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3908 NULL);
3909
3910 ext_info_changed(hdev, sk);
3911 }
3912
3913 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3914 0);
3915
3916 hci_dev_unlock(hdev);
3917
3918 return err;
3919 }
3920
get_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3921 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3922 void *data, u16 len)
3923 {
3924 struct mgmt_rp_get_phy_configuration rp;
3925
3926 bt_dev_dbg(hdev, "sock %p", sk);
3927
3928 hci_dev_lock(hdev);
3929
3930 memset(&rp, 0, sizeof(rp));
3931
3932 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3933 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3934 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3935
3936 hci_dev_unlock(hdev);
3937
3938 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3939 &rp, sizeof(rp));
3940 }
3941
mgmt_phy_configuration_changed(struct hci_dev * hdev,struct sock * skip)3942 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3943 {
3944 struct mgmt_ev_phy_configuration_changed ev;
3945
3946 memset(&ev, 0, sizeof(ev));
3947
3948 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3949
3950 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3951 sizeof(ev), skip);
3952 }
3953
set_default_phy_complete(struct hci_dev * hdev,void * data,int err)3954 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3955 {
3956 struct mgmt_pending_cmd *cmd = data;
3957 struct sk_buff *skb = cmd->skb;
3958 u8 status = mgmt_status(err);
3959
3960 if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
3961 return;
3962
3963 if (!status) {
3964 if (!skb)
3965 status = MGMT_STATUS_FAILED;
3966 else if (IS_ERR(skb))
3967 status = mgmt_status(PTR_ERR(skb));
3968 else
3969 status = mgmt_status(skb->data[0]);
3970 }
3971
3972 bt_dev_dbg(hdev, "status %d", status);
3973
3974 if (status) {
3975 mgmt_cmd_status(cmd->sk, hdev->id,
3976 MGMT_OP_SET_PHY_CONFIGURATION, status);
3977 } else {
3978 mgmt_cmd_complete(cmd->sk, hdev->id,
3979 MGMT_OP_SET_PHY_CONFIGURATION, 0,
3980 NULL, 0);
3981
3982 mgmt_phy_configuration_changed(hdev, cmd->sk);
3983 }
3984
3985 if (skb && !IS_ERR(skb))
3986 kfree_skb(skb);
3987
3988 mgmt_pending_remove(cmd);
3989 }
3990
set_default_phy_sync(struct hci_dev * hdev,void * data)3991 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
3992 {
3993 struct mgmt_pending_cmd *cmd = data;
3994 struct mgmt_cp_set_phy_configuration *cp = cmd->param;
3995 struct hci_cp_le_set_default_phy cp_phy;
3996 u32 selected_phys = __le32_to_cpu(cp->selected_phys);
3997
3998 memset(&cp_phy, 0, sizeof(cp_phy));
3999
4000 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
4001 cp_phy.all_phys |= 0x01;
4002
4003 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
4004 cp_phy.all_phys |= 0x02;
4005
4006 if (selected_phys & MGMT_PHY_LE_1M_TX)
4007 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
4008
4009 if (selected_phys & MGMT_PHY_LE_2M_TX)
4010 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
4011
4012 if (selected_phys & MGMT_PHY_LE_CODED_TX)
4013 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
4014
4015 if (selected_phys & MGMT_PHY_LE_1M_RX)
4016 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
4017
4018 if (selected_phys & MGMT_PHY_LE_2M_RX)
4019 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
4020
4021 if (selected_phys & MGMT_PHY_LE_CODED_RX)
4022 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
4023
4024 cmd->skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4025 sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
4026
4027 return 0;
4028 }
4029
set_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4030 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4031 void *data, u16 len)
4032 {
4033 struct mgmt_cp_set_phy_configuration *cp = data;
4034 struct mgmt_pending_cmd *cmd;
4035 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4036 u16 pkt_type = (HCI_DH1 | HCI_DM1);
4037 bool changed = false;
4038 int err;
4039
4040 bt_dev_dbg(hdev, "sock %p", sk);
4041
4042 configurable_phys = get_configurable_phys(hdev);
4043 supported_phys = get_supported_phys(hdev);
4044 selected_phys = __le32_to_cpu(cp->selected_phys);
4045
4046 if (selected_phys & ~supported_phys)
4047 return mgmt_cmd_status(sk, hdev->id,
4048 MGMT_OP_SET_PHY_CONFIGURATION,
4049 MGMT_STATUS_INVALID_PARAMS);
4050
4051 unconfigure_phys = supported_phys & ~configurable_phys;
4052
4053 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4054 return mgmt_cmd_status(sk, hdev->id,
4055 MGMT_OP_SET_PHY_CONFIGURATION,
4056 MGMT_STATUS_INVALID_PARAMS);
4057
4058 if (selected_phys == get_selected_phys(hdev))
4059 return mgmt_cmd_complete(sk, hdev->id,
4060 MGMT_OP_SET_PHY_CONFIGURATION,
4061 0, NULL, 0);
4062
4063 hci_dev_lock(hdev);
4064
4065 if (!hdev_is_powered(hdev)) {
4066 err = mgmt_cmd_status(sk, hdev->id,
4067 MGMT_OP_SET_PHY_CONFIGURATION,
4068 MGMT_STATUS_REJECTED);
4069 goto unlock;
4070 }
4071
4072 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4073 err = mgmt_cmd_status(sk, hdev->id,
4074 MGMT_OP_SET_PHY_CONFIGURATION,
4075 MGMT_STATUS_BUSY);
4076 goto unlock;
4077 }
4078
4079 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4080 pkt_type |= (HCI_DH3 | HCI_DM3);
4081 else
4082 pkt_type &= ~(HCI_DH3 | HCI_DM3);
4083
4084 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4085 pkt_type |= (HCI_DH5 | HCI_DM5);
4086 else
4087 pkt_type &= ~(HCI_DH5 | HCI_DM5);
4088
4089 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4090 pkt_type &= ~HCI_2DH1;
4091 else
4092 pkt_type |= HCI_2DH1;
4093
4094 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4095 pkt_type &= ~HCI_2DH3;
4096 else
4097 pkt_type |= HCI_2DH3;
4098
4099 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4100 pkt_type &= ~HCI_2DH5;
4101 else
4102 pkt_type |= HCI_2DH5;
4103
4104 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4105 pkt_type &= ~HCI_3DH1;
4106 else
4107 pkt_type |= HCI_3DH1;
4108
4109 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4110 pkt_type &= ~HCI_3DH3;
4111 else
4112 pkt_type |= HCI_3DH3;
4113
4114 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4115 pkt_type &= ~HCI_3DH5;
4116 else
4117 pkt_type |= HCI_3DH5;
4118
4119 if (pkt_type != hdev->pkt_type) {
4120 hdev->pkt_type = pkt_type;
4121 changed = true;
4122 }
4123
4124 if ((selected_phys & MGMT_PHY_LE_MASK) ==
4125 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4126 if (changed)
4127 mgmt_phy_configuration_changed(hdev, sk);
4128
4129 err = mgmt_cmd_complete(sk, hdev->id,
4130 MGMT_OP_SET_PHY_CONFIGURATION,
4131 0, NULL, 0);
4132
4133 goto unlock;
4134 }
4135
4136 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4137 len);
4138 if (!cmd)
4139 err = -ENOMEM;
4140 else
4141 err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4142 set_default_phy_complete);
4143
4144 if (err < 0) {
4145 err = mgmt_cmd_status(sk, hdev->id,
4146 MGMT_OP_SET_PHY_CONFIGURATION,
4147 MGMT_STATUS_FAILED);
4148
4149 if (cmd)
4150 mgmt_pending_remove(cmd);
4151 }
4152
4153 unlock:
4154 hci_dev_unlock(hdev);
4155
4156 return err;
4157 }
4158
set_blocked_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4159 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4160 u16 len)
4161 {
4162 int err = MGMT_STATUS_SUCCESS;
4163 struct mgmt_cp_set_blocked_keys *keys = data;
4164 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4165 sizeof(struct mgmt_blocked_key_info));
4166 u16 key_count, expected_len;
4167 int i;
4168
4169 bt_dev_dbg(hdev, "sock %p", sk);
4170
4171 key_count = __le16_to_cpu(keys->key_count);
4172 if (key_count > max_key_count) {
4173 bt_dev_err(hdev, "too big key_count value %u", key_count);
4174 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4175 MGMT_STATUS_INVALID_PARAMS);
4176 }
4177
4178 expected_len = struct_size(keys, keys, key_count);
4179 if (expected_len != len) {
4180 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4181 expected_len, len);
4182 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4183 MGMT_STATUS_INVALID_PARAMS);
4184 }
4185
4186 hci_dev_lock(hdev);
4187
4188 hci_blocked_keys_clear(hdev);
4189
4190 for (i = 0; i < key_count; ++i) {
4191 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4192
4193 if (!b) {
4194 err = MGMT_STATUS_NO_RESOURCES;
4195 break;
4196 }
4197
4198 b->type = keys->keys[i].type;
4199 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4200 list_add_rcu(&b->list, &hdev->blocked_keys);
4201 }
4202 hci_dev_unlock(hdev);
4203
4204 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4205 err, NULL, 0);
4206 }
4207
set_wideband_speech(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4208 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4209 void *data, u16 len)
4210 {
4211 struct mgmt_mode *cp = data;
4212 int err;
4213 bool changed = false;
4214
4215 bt_dev_dbg(hdev, "sock %p", sk);
4216
4217 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
4218 return mgmt_cmd_status(sk, hdev->id,
4219 MGMT_OP_SET_WIDEBAND_SPEECH,
4220 MGMT_STATUS_NOT_SUPPORTED);
4221
4222 if (cp->val != 0x00 && cp->val != 0x01)
4223 return mgmt_cmd_status(sk, hdev->id,
4224 MGMT_OP_SET_WIDEBAND_SPEECH,
4225 MGMT_STATUS_INVALID_PARAMS);
4226
4227 hci_dev_lock(hdev);
4228
4229 if (hdev_is_powered(hdev) &&
4230 !!cp->val != hci_dev_test_flag(hdev,
4231 HCI_WIDEBAND_SPEECH_ENABLED)) {
4232 err = mgmt_cmd_status(sk, hdev->id,
4233 MGMT_OP_SET_WIDEBAND_SPEECH,
4234 MGMT_STATUS_REJECTED);
4235 goto unlock;
4236 }
4237
4238 if (cp->val)
4239 changed = !hci_dev_test_and_set_flag(hdev,
4240 HCI_WIDEBAND_SPEECH_ENABLED);
4241 else
4242 changed = hci_dev_test_and_clear_flag(hdev,
4243 HCI_WIDEBAND_SPEECH_ENABLED);
4244
4245 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4246 if (err < 0)
4247 goto unlock;
4248
4249 if (changed)
4250 err = new_settings(hdev, sk);
4251
4252 unlock:
4253 hci_dev_unlock(hdev);
4254 return err;
4255 }
4256
read_controller_cap(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4257 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4258 void *data, u16 data_len)
4259 {
4260 char buf[20];
4261 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4262 u16 cap_len = 0;
4263 u8 flags = 0;
4264 u8 tx_power_range[2];
4265
4266 bt_dev_dbg(hdev, "sock %p", sk);
4267
4268 memset(&buf, 0, sizeof(buf));
4269
4270 hci_dev_lock(hdev);
4271
4272 /* When the Read Simple Pairing Options command is supported, then
4273 * the remote public key validation is supported.
4274 *
4275 * Alternatively, when Microsoft extensions are available, they can
4276 * indicate support for public key validation as well.
4277 */
4278 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4279 flags |= 0x01; /* Remote public key validation (BR/EDR) */
4280
4281 flags |= 0x02; /* Remote public key validation (LE) */
4282
4283 /* When the Read Encryption Key Size command is supported, then the
4284 * encryption key size is enforced.
4285 */
4286 if (hdev->commands[20] & 0x10)
4287 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
4288
4289 flags |= 0x08; /* Encryption key size enforcement (LE) */
4290
4291 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4292 &flags, 1);
4293
4294 /* When the Read Simple Pairing Options command is supported, then
4295 * also max encryption key size information is provided.
4296 */
4297 if (hdev->commands[41] & 0x08)
4298 cap_len = eir_append_le16(rp->cap, cap_len,
4299 MGMT_CAP_MAX_ENC_KEY_SIZE,
4300 hdev->max_enc_key_size);
4301
4302 cap_len = eir_append_le16(rp->cap, cap_len,
4303 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4304 SMP_MAX_ENC_KEY_SIZE);
4305
4306 /* Append the min/max LE tx power parameters if we were able to fetch
4307 * it from the controller
4308 */
4309 if (hdev->commands[38] & 0x80) {
4310 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4311 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4312 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4313 tx_power_range, 2);
4314 }
4315
4316 rp->cap_len = cpu_to_le16(cap_len);
4317
4318 hci_dev_unlock(hdev);
4319
4320 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4321 rp, sizeof(*rp) + cap_len);
4322 }
4323
4324 #ifdef CONFIG_BT_FEATURE_DEBUG
4325 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4326 static const u8 debug_uuid[16] = {
4327 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4328 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4329 };
4330 #endif
4331
4332 /* 330859bc-7506-492d-9370-9a6f0614037f */
4333 static const u8 quality_report_uuid[16] = {
4334 0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4335 0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4336 };
4337
4338 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4339 static const u8 offload_codecs_uuid[16] = {
4340 0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4341 0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4342 };
4343
4344 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4345 static const u8 le_simultaneous_roles_uuid[16] = {
4346 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4347 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4348 };
4349
4350 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
4351 static const u8 rpa_resolution_uuid[16] = {
4352 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
4353 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
4354 };
4355
4356 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4357 static const u8 iso_socket_uuid[16] = {
4358 0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4359 0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4360 };
4361
4362 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4363 static const u8 mgmt_mesh_uuid[16] = {
4364 0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4365 0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4366 };
4367
read_exp_features_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4368 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4369 void *data, u16 data_len)
4370 {
4371 struct mgmt_rp_read_exp_features_info *rp;
4372 size_t len;
4373 u16 idx = 0;
4374 u32 flags;
4375 int status;
4376
4377 bt_dev_dbg(hdev, "sock %p", sk);
4378
4379 /* Enough space for 7 features */
4380 len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4381 rp = kzalloc(len, GFP_KERNEL);
4382 if (!rp)
4383 return -ENOMEM;
4384
4385 #ifdef CONFIG_BT_FEATURE_DEBUG
4386 if (!hdev) {
4387 flags = bt_dbg_get() ? BIT(0) : 0;
4388
4389 memcpy(rp->features[idx].uuid, debug_uuid, 16);
4390 rp->features[idx].flags = cpu_to_le32(flags);
4391 idx++;
4392 }
4393 #endif
4394
4395 if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4396 if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4397 flags = BIT(0);
4398 else
4399 flags = 0;
4400
4401 memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4402 rp->features[idx].flags = cpu_to_le32(flags);
4403 idx++;
4404 }
4405
4406 if (hdev && ll_privacy_capable(hdev)) {
4407 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
4408 flags = BIT(0) | BIT(1);
4409 else
4410 flags = BIT(1);
4411
4412 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
4413 rp->features[idx].flags = cpu_to_le32(flags);
4414 idx++;
4415 }
4416
4417 if (hdev && (aosp_has_quality_report(hdev) ||
4418 hdev->set_quality_report)) {
4419 if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4420 flags = BIT(0);
4421 else
4422 flags = 0;
4423
4424 memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4425 rp->features[idx].flags = cpu_to_le32(flags);
4426 idx++;
4427 }
4428
4429 if (hdev && hdev->get_data_path_id) {
4430 if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4431 flags = BIT(0);
4432 else
4433 flags = 0;
4434
4435 memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4436 rp->features[idx].flags = cpu_to_le32(flags);
4437 idx++;
4438 }
4439
4440 if (IS_ENABLED(CONFIG_BT_LE)) {
4441 flags = iso_enabled() ? BIT(0) : 0;
4442 memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4443 rp->features[idx].flags = cpu_to_le32(flags);
4444 idx++;
4445 }
4446
4447 if (hdev && lmp_le_capable(hdev)) {
4448 if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4449 flags = BIT(0);
4450 else
4451 flags = 0;
4452
4453 memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4454 rp->features[idx].flags = cpu_to_le32(flags);
4455 idx++;
4456 }
4457
4458 rp->feature_count = cpu_to_le16(idx);
4459
4460 /* After reading the experimental features information, enable
4461 * the events to update client on any future change.
4462 */
4463 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4464
4465 status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4466 MGMT_OP_READ_EXP_FEATURES_INFO,
4467 0, rp, sizeof(*rp) + (20 * idx));
4468
4469 kfree(rp);
4470 return status;
4471 }
4472
exp_ll_privacy_feature_changed(bool enabled,struct hci_dev * hdev,struct sock * skip)4473 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4474 struct sock *skip)
4475 {
4476 struct mgmt_ev_exp_feature_changed ev;
4477
4478 memset(&ev, 0, sizeof(ev));
4479 memcpy(ev.uuid, rpa_resolution_uuid, 16);
4480 ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4481
4482 // Do we need to be atomic with the conn_flags?
4483 if (enabled && privacy_mode_capable(hdev))
4484 hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4485 else
4486 hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
4487
4488 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4489 &ev, sizeof(ev),
4490 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4491
4492 }
4493
exp_feature_changed(struct hci_dev * hdev,const u8 * uuid,bool enabled,struct sock * skip)4494 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4495 bool enabled, struct sock *skip)
4496 {
4497 struct mgmt_ev_exp_feature_changed ev;
4498
4499 memset(&ev, 0, sizeof(ev));
4500 memcpy(ev.uuid, uuid, 16);
4501 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4502
4503 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4504 &ev, sizeof(ev),
4505 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4506 }
4507
4508 #define EXP_FEAT(_uuid, _set_func) \
4509 { \
4510 .uuid = _uuid, \
4511 .set_func = _set_func, \
4512 }
4513
4514 /* The zero key uuid is special. Multiple exp features are set through it. */
set_zero_key_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4515 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4516 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4517 {
4518 struct mgmt_rp_set_exp_feature rp;
4519
4520 memset(rp.uuid, 0, 16);
4521 rp.flags = cpu_to_le32(0);
4522
4523 #ifdef CONFIG_BT_FEATURE_DEBUG
4524 if (!hdev) {
4525 bool changed = bt_dbg_get();
4526
4527 bt_dbg_set(false);
4528
4529 if (changed)
4530 exp_feature_changed(NULL, ZERO_KEY, false, sk);
4531 }
4532 #endif
4533
4534 if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4535 bool changed;
4536
4537 changed = hci_dev_test_and_clear_flag(hdev,
4538 HCI_ENABLE_LL_PRIVACY);
4539 if (changed)
4540 exp_feature_changed(hdev, rpa_resolution_uuid, false,
4541 sk);
4542 }
4543
4544 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4545
4546 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4547 MGMT_OP_SET_EXP_FEATURE, 0,
4548 &rp, sizeof(rp));
4549 }
4550
4551 #ifdef CONFIG_BT_FEATURE_DEBUG
set_debug_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4552 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4553 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4554 {
4555 struct mgmt_rp_set_exp_feature rp;
4556
4557 bool val, changed;
4558 int err;
4559
4560 /* Command requires to use the non-controller index */
4561 if (hdev)
4562 return mgmt_cmd_status(sk, hdev->id,
4563 MGMT_OP_SET_EXP_FEATURE,
4564 MGMT_STATUS_INVALID_INDEX);
4565
4566 /* Parameters are limited to a single octet */
4567 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4568 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4569 MGMT_OP_SET_EXP_FEATURE,
4570 MGMT_STATUS_INVALID_PARAMS);
4571
4572 /* Only boolean on/off is supported */
4573 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4574 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4575 MGMT_OP_SET_EXP_FEATURE,
4576 MGMT_STATUS_INVALID_PARAMS);
4577
4578 val = !!cp->param[0];
4579 changed = val ? !bt_dbg_get() : bt_dbg_get();
4580 bt_dbg_set(val);
4581
4582 memcpy(rp.uuid, debug_uuid, 16);
4583 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4584
4585 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4586
4587 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4588 MGMT_OP_SET_EXP_FEATURE, 0,
4589 &rp, sizeof(rp));
4590
4591 if (changed)
4592 exp_feature_changed(hdev, debug_uuid, val, sk);
4593
4594 return err;
4595 }
4596 #endif
4597
set_mgmt_mesh_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4598 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4599 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4600 {
4601 struct mgmt_rp_set_exp_feature rp;
4602 bool val, changed;
4603 int err;
4604
4605 /* Command requires to use the controller index */
4606 if (!hdev)
4607 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4608 MGMT_OP_SET_EXP_FEATURE,
4609 MGMT_STATUS_INVALID_INDEX);
4610
4611 /* Parameters are limited to a single octet */
4612 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4613 return mgmt_cmd_status(sk, hdev->id,
4614 MGMT_OP_SET_EXP_FEATURE,
4615 MGMT_STATUS_INVALID_PARAMS);
4616
4617 /* Only boolean on/off is supported */
4618 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4619 return mgmt_cmd_status(sk, hdev->id,
4620 MGMT_OP_SET_EXP_FEATURE,
4621 MGMT_STATUS_INVALID_PARAMS);
4622
4623 val = !!cp->param[0];
4624
4625 if (val) {
4626 changed = !hci_dev_test_and_set_flag(hdev,
4627 HCI_MESH_EXPERIMENTAL);
4628 } else {
4629 hci_dev_clear_flag(hdev, HCI_MESH);
4630 changed = hci_dev_test_and_clear_flag(hdev,
4631 HCI_MESH_EXPERIMENTAL);
4632 }
4633
4634 memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4635 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4636
4637 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4638
4639 err = mgmt_cmd_complete(sk, hdev->id,
4640 MGMT_OP_SET_EXP_FEATURE, 0,
4641 &rp, sizeof(rp));
4642
4643 if (changed)
4644 exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4645
4646 return err;
4647 }
4648
set_rpa_resolution_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4649 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4650 struct mgmt_cp_set_exp_feature *cp,
4651 u16 data_len)
4652 {
4653 struct mgmt_rp_set_exp_feature rp;
4654 bool val, changed;
4655 int err;
4656 u32 flags;
4657
4658 /* Command requires to use the controller index */
4659 if (!hdev)
4660 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4661 MGMT_OP_SET_EXP_FEATURE,
4662 MGMT_STATUS_INVALID_INDEX);
4663
4664 /* Changes can only be made when controller is powered down */
4665 if (hdev_is_powered(hdev))
4666 return mgmt_cmd_status(sk, hdev->id,
4667 MGMT_OP_SET_EXP_FEATURE,
4668 MGMT_STATUS_REJECTED);
4669
4670 /* Parameters are limited to a single octet */
4671 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4672 return mgmt_cmd_status(sk, hdev->id,
4673 MGMT_OP_SET_EXP_FEATURE,
4674 MGMT_STATUS_INVALID_PARAMS);
4675
4676 /* Only boolean on/off is supported */
4677 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4678 return mgmt_cmd_status(sk, hdev->id,
4679 MGMT_OP_SET_EXP_FEATURE,
4680 MGMT_STATUS_INVALID_PARAMS);
4681
4682 val = !!cp->param[0];
4683
4684 if (val) {
4685 changed = !hci_dev_test_and_set_flag(hdev,
4686 HCI_ENABLE_LL_PRIVACY);
4687 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4688
4689 /* Enable LL privacy + supported settings changed */
4690 flags = BIT(0) | BIT(1);
4691 } else {
4692 changed = hci_dev_test_and_clear_flag(hdev,
4693 HCI_ENABLE_LL_PRIVACY);
4694
4695 /* Disable LL privacy + supported settings changed */
4696 flags = BIT(1);
4697 }
4698
4699 memcpy(rp.uuid, rpa_resolution_uuid, 16);
4700 rp.flags = cpu_to_le32(flags);
4701
4702 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4703
4704 err = mgmt_cmd_complete(sk, hdev->id,
4705 MGMT_OP_SET_EXP_FEATURE, 0,
4706 &rp, sizeof(rp));
4707
4708 if (changed)
4709 exp_ll_privacy_feature_changed(val, hdev, sk);
4710
4711 return err;
4712 }
4713
set_quality_report_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4714 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4715 struct mgmt_cp_set_exp_feature *cp,
4716 u16 data_len)
4717 {
4718 struct mgmt_rp_set_exp_feature rp;
4719 bool val, changed;
4720 int err;
4721
4722 /* Command requires to use a valid controller index */
4723 if (!hdev)
4724 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4725 MGMT_OP_SET_EXP_FEATURE,
4726 MGMT_STATUS_INVALID_INDEX);
4727
4728 /* Parameters are limited to a single octet */
4729 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4730 return mgmt_cmd_status(sk, hdev->id,
4731 MGMT_OP_SET_EXP_FEATURE,
4732 MGMT_STATUS_INVALID_PARAMS);
4733
4734 /* Only boolean on/off is supported */
4735 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4736 return mgmt_cmd_status(sk, hdev->id,
4737 MGMT_OP_SET_EXP_FEATURE,
4738 MGMT_STATUS_INVALID_PARAMS);
4739
4740 hci_req_sync_lock(hdev);
4741
4742 val = !!cp->param[0];
4743 changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4744
4745 if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4746 err = mgmt_cmd_status(sk, hdev->id,
4747 MGMT_OP_SET_EXP_FEATURE,
4748 MGMT_STATUS_NOT_SUPPORTED);
4749 goto unlock_quality_report;
4750 }
4751
4752 if (changed) {
4753 if (hdev->set_quality_report)
4754 err = hdev->set_quality_report(hdev, val);
4755 else
4756 err = aosp_set_quality_report(hdev, val);
4757
4758 if (err) {
4759 err = mgmt_cmd_status(sk, hdev->id,
4760 MGMT_OP_SET_EXP_FEATURE,
4761 MGMT_STATUS_FAILED);
4762 goto unlock_quality_report;
4763 }
4764
4765 if (val)
4766 hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4767 else
4768 hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4769 }
4770
4771 bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4772
4773 memcpy(rp.uuid, quality_report_uuid, 16);
4774 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4775 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4776
4777 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4778 &rp, sizeof(rp));
4779
4780 if (changed)
4781 exp_feature_changed(hdev, quality_report_uuid, val, sk);
4782
4783 unlock_quality_report:
4784 hci_req_sync_unlock(hdev);
4785 return err;
4786 }
4787
set_offload_codec_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4788 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4789 struct mgmt_cp_set_exp_feature *cp,
4790 u16 data_len)
4791 {
4792 bool val, changed;
4793 int err;
4794 struct mgmt_rp_set_exp_feature rp;
4795
4796 /* Command requires to use a valid controller index */
4797 if (!hdev)
4798 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4799 MGMT_OP_SET_EXP_FEATURE,
4800 MGMT_STATUS_INVALID_INDEX);
4801
4802 /* Parameters are limited to a single octet */
4803 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4804 return mgmt_cmd_status(sk, hdev->id,
4805 MGMT_OP_SET_EXP_FEATURE,
4806 MGMT_STATUS_INVALID_PARAMS);
4807
4808 /* Only boolean on/off is supported */
4809 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4810 return mgmt_cmd_status(sk, hdev->id,
4811 MGMT_OP_SET_EXP_FEATURE,
4812 MGMT_STATUS_INVALID_PARAMS);
4813
4814 val = !!cp->param[0];
4815 changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4816
4817 if (!hdev->get_data_path_id) {
4818 return mgmt_cmd_status(sk, hdev->id,
4819 MGMT_OP_SET_EXP_FEATURE,
4820 MGMT_STATUS_NOT_SUPPORTED);
4821 }
4822
4823 if (changed) {
4824 if (val)
4825 hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4826 else
4827 hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4828 }
4829
4830 bt_dev_info(hdev, "offload codecs enable %d changed %d",
4831 val, changed);
4832
4833 memcpy(rp.uuid, offload_codecs_uuid, 16);
4834 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4835 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4836 err = mgmt_cmd_complete(sk, hdev->id,
4837 MGMT_OP_SET_EXP_FEATURE, 0,
4838 &rp, sizeof(rp));
4839
4840 if (changed)
4841 exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4842
4843 return err;
4844 }
4845
set_le_simultaneous_roles_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4846 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4847 struct mgmt_cp_set_exp_feature *cp,
4848 u16 data_len)
4849 {
4850 bool val, changed;
4851 int err;
4852 struct mgmt_rp_set_exp_feature rp;
4853
4854 /* Command requires to use a valid controller index */
4855 if (!hdev)
4856 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4857 MGMT_OP_SET_EXP_FEATURE,
4858 MGMT_STATUS_INVALID_INDEX);
4859
4860 /* Parameters are limited to a single octet */
4861 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4862 return mgmt_cmd_status(sk, hdev->id,
4863 MGMT_OP_SET_EXP_FEATURE,
4864 MGMT_STATUS_INVALID_PARAMS);
4865
4866 /* Only boolean on/off is supported */
4867 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4868 return mgmt_cmd_status(sk, hdev->id,
4869 MGMT_OP_SET_EXP_FEATURE,
4870 MGMT_STATUS_INVALID_PARAMS);
4871
4872 val = !!cp->param[0];
4873 changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4874
4875 if (!hci_dev_le_state_simultaneous(hdev)) {
4876 return mgmt_cmd_status(sk, hdev->id,
4877 MGMT_OP_SET_EXP_FEATURE,
4878 MGMT_STATUS_NOT_SUPPORTED);
4879 }
4880
4881 if (changed) {
4882 if (val)
4883 hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4884 else
4885 hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4886 }
4887
4888 bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4889 val, changed);
4890
4891 memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4892 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4893 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4894 err = mgmt_cmd_complete(sk, hdev->id,
4895 MGMT_OP_SET_EXP_FEATURE, 0,
4896 &rp, sizeof(rp));
4897
4898 if (changed)
4899 exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4900
4901 return err;
4902 }
4903
4904 #ifdef CONFIG_BT_LE
set_iso_socket_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4905 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4906 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4907 {
4908 struct mgmt_rp_set_exp_feature rp;
4909 bool val, changed = false;
4910 int err;
4911
4912 /* Command requires to use the non-controller index */
4913 if (hdev)
4914 return mgmt_cmd_status(sk, hdev->id,
4915 MGMT_OP_SET_EXP_FEATURE,
4916 MGMT_STATUS_INVALID_INDEX);
4917
4918 /* Parameters are limited to a single octet */
4919 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4920 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4921 MGMT_OP_SET_EXP_FEATURE,
4922 MGMT_STATUS_INVALID_PARAMS);
4923
4924 /* Only boolean on/off is supported */
4925 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4926 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4927 MGMT_OP_SET_EXP_FEATURE,
4928 MGMT_STATUS_INVALID_PARAMS);
4929
4930 val = cp->param[0] ? true : false;
4931 if (val)
4932 err = iso_init();
4933 else
4934 err = iso_exit();
4935
4936 if (!err)
4937 changed = true;
4938
4939 memcpy(rp.uuid, iso_socket_uuid, 16);
4940 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4941
4942 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4943
4944 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4945 MGMT_OP_SET_EXP_FEATURE, 0,
4946 &rp, sizeof(rp));
4947
4948 if (changed)
4949 exp_feature_changed(hdev, iso_socket_uuid, val, sk);
4950
4951 return err;
4952 }
4953 #endif
4954
4955 static const struct mgmt_exp_feature {
4956 const u8 *uuid;
4957 int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4958 struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4959 } exp_features[] = {
4960 EXP_FEAT(ZERO_KEY, set_zero_key_func),
4961 #ifdef CONFIG_BT_FEATURE_DEBUG
4962 EXP_FEAT(debug_uuid, set_debug_func),
4963 #endif
4964 EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
4965 EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
4966 EXP_FEAT(quality_report_uuid, set_quality_report_func),
4967 EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
4968 EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
4969 #ifdef CONFIG_BT_LE
4970 EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
4971 #endif
4972
4973 /* end with a null feature */
4974 EXP_FEAT(NULL, NULL)
4975 };
4976
set_exp_feature(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4977 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4978 void *data, u16 data_len)
4979 {
4980 struct mgmt_cp_set_exp_feature *cp = data;
4981 size_t i = 0;
4982
4983 bt_dev_dbg(hdev, "sock %p", sk);
4984
4985 for (i = 0; exp_features[i].uuid; i++) {
4986 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
4987 return exp_features[i].set_func(sk, hdev, cp, data_len);
4988 }
4989
4990 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4991 MGMT_OP_SET_EXP_FEATURE,
4992 MGMT_STATUS_NOT_SUPPORTED);
4993 }
4994
get_params_flags(struct hci_dev * hdev,struct hci_conn_params * params)4995 static u32 get_params_flags(struct hci_dev *hdev,
4996 struct hci_conn_params *params)
4997 {
4998 u32 flags = hdev->conn_flags;
4999
5000 /* Devices using RPAs can only be programmed in the acceptlist if
5001 * LL Privacy has been enable otherwise they cannot mark
5002 * HCI_CONN_FLAG_REMOTE_WAKEUP.
5003 */
5004 if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) &&
5005 hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type))
5006 flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP;
5007
5008 return flags;
5009 }
5010
get_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)5011 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5012 u16 data_len)
5013 {
5014 struct mgmt_cp_get_device_flags *cp = data;
5015 struct mgmt_rp_get_device_flags rp;
5016 struct bdaddr_list_with_flags *br_params;
5017 struct hci_conn_params *params;
5018 u32 supported_flags;
5019 u32 current_flags = 0;
5020 u8 status = MGMT_STATUS_INVALID_PARAMS;
5021
5022 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
5023 &cp->addr.bdaddr, cp->addr.type);
5024
5025 hci_dev_lock(hdev);
5026
5027 supported_flags = hdev->conn_flags;
5028
5029 memset(&rp, 0, sizeof(rp));
5030
5031 if (cp->addr.type == BDADDR_BREDR) {
5032 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5033 &cp->addr.bdaddr,
5034 cp->addr.type);
5035 if (!br_params)
5036 goto done;
5037
5038 current_flags = br_params->flags;
5039 } else {
5040 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5041 le_addr_type(cp->addr.type));
5042 if (!params)
5043 goto done;
5044
5045 supported_flags = get_params_flags(hdev, params);
5046 current_flags = params->flags;
5047 }
5048
5049 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5050 rp.addr.type = cp->addr.type;
5051 rp.supported_flags = cpu_to_le32(supported_flags);
5052 rp.current_flags = cpu_to_le32(current_flags);
5053
5054 status = MGMT_STATUS_SUCCESS;
5055
5056 done:
5057 hci_dev_unlock(hdev);
5058
5059 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5060 &rp, sizeof(rp));
5061 }
5062
device_flags_changed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u32 supported_flags,u32 current_flags)5063 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5064 bdaddr_t *bdaddr, u8 bdaddr_type,
5065 u32 supported_flags, u32 current_flags)
5066 {
5067 struct mgmt_ev_device_flags_changed ev;
5068
5069 bacpy(&ev.addr.bdaddr, bdaddr);
5070 ev.addr.type = bdaddr_type;
5071 ev.supported_flags = cpu_to_le32(supported_flags);
5072 ev.current_flags = cpu_to_le32(current_flags);
5073
5074 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5075 }
5076
set_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5077 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5078 u16 len)
5079 {
5080 struct mgmt_cp_set_device_flags *cp = data;
5081 struct bdaddr_list_with_flags *br_params;
5082 struct hci_conn_params *params;
5083 u8 status = MGMT_STATUS_INVALID_PARAMS;
5084 u32 supported_flags;
5085 u32 current_flags = __le32_to_cpu(cp->current_flags);
5086
5087 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5088 &cp->addr.bdaddr, cp->addr.type, current_flags);
5089
5090 // We should take hci_dev_lock() early, I think.. conn_flags can change
5091 supported_flags = hdev->conn_flags;
5092
5093 if ((supported_flags | current_flags) != supported_flags) {
5094 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5095 current_flags, supported_flags);
5096 goto done;
5097 }
5098
5099 hci_dev_lock(hdev);
5100
5101 if (cp->addr.type == BDADDR_BREDR) {
5102 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5103 &cp->addr.bdaddr,
5104 cp->addr.type);
5105
5106 if (br_params) {
5107 br_params->flags = current_flags;
5108 status = MGMT_STATUS_SUCCESS;
5109 } else {
5110 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5111 &cp->addr.bdaddr, cp->addr.type);
5112 }
5113
5114 goto unlock;
5115 }
5116
5117 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5118 le_addr_type(cp->addr.type));
5119 if (!params) {
5120 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5121 &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5122 goto unlock;
5123 }
5124
5125 supported_flags = get_params_flags(hdev, params);
5126
5127 if ((supported_flags | current_flags) != supported_flags) {
5128 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5129 current_flags, supported_flags);
5130 goto unlock;
5131 }
5132
5133 WRITE_ONCE(params->flags, current_flags);
5134 status = MGMT_STATUS_SUCCESS;
5135
5136 /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5137 * has been set.
5138 */
5139 if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5140 hci_update_passive_scan(hdev);
5141
5142 unlock:
5143 hci_dev_unlock(hdev);
5144
5145 done:
5146 if (status == MGMT_STATUS_SUCCESS)
5147 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5148 supported_flags, current_flags);
5149
5150 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5151 &cp->addr, sizeof(cp->addr));
5152 }
5153
mgmt_adv_monitor_added(struct sock * sk,struct hci_dev * hdev,u16 handle)5154 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5155 u16 handle)
5156 {
5157 struct mgmt_ev_adv_monitor_added ev;
5158
5159 ev.monitor_handle = cpu_to_le16(handle);
5160
5161 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5162 }
5163
mgmt_adv_monitor_removed(struct hci_dev * hdev,u16 handle)5164 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
5165 {
5166 struct mgmt_ev_adv_monitor_removed ev;
5167 struct mgmt_pending_cmd *cmd;
5168 struct sock *sk_skip = NULL;
5169 struct mgmt_cp_remove_adv_monitor *cp;
5170
5171 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
5172 if (cmd) {
5173 cp = cmd->param;
5174
5175 if (cp->monitor_handle)
5176 sk_skip = cmd->sk;
5177 }
5178
5179 ev.monitor_handle = cpu_to_le16(handle);
5180
5181 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
5182 }
5183
read_adv_mon_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5184 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5185 void *data, u16 len)
5186 {
5187 struct adv_monitor *monitor = NULL;
5188 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5189 int handle, err;
5190 size_t rp_size = 0;
5191 __u32 supported = 0;
5192 __u32 enabled = 0;
5193 __u16 num_handles = 0;
5194 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5195
5196 BT_DBG("request for %s", hdev->name);
5197
5198 hci_dev_lock(hdev);
5199
5200 if (msft_monitor_supported(hdev))
5201 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5202
5203 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5204 handles[num_handles++] = monitor->handle;
5205
5206 hci_dev_unlock(hdev);
5207
5208 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5209 rp = kmalloc(rp_size, GFP_KERNEL);
5210 if (!rp)
5211 return -ENOMEM;
5212
5213 /* All supported features are currently enabled */
5214 enabled = supported;
5215
5216 rp->supported_features = cpu_to_le32(supported);
5217 rp->enabled_features = cpu_to_le32(enabled);
5218 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5219 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5220 rp->num_handles = cpu_to_le16(num_handles);
5221 if (num_handles)
5222 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5223
5224 err = mgmt_cmd_complete(sk, hdev->id,
5225 MGMT_OP_READ_ADV_MONITOR_FEATURES,
5226 MGMT_STATUS_SUCCESS, rp, rp_size);
5227
5228 kfree(rp);
5229
5230 return err;
5231 }
5232
mgmt_add_adv_patterns_monitor_complete(struct hci_dev * hdev,void * data,int status)5233 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5234 void *data, int status)
5235 {
5236 struct mgmt_rp_add_adv_patterns_monitor rp;
5237 struct mgmt_pending_cmd *cmd = data;
5238 struct adv_monitor *monitor = cmd->user_data;
5239
5240 hci_dev_lock(hdev);
5241
5242 rp.monitor_handle = cpu_to_le16(monitor->handle);
5243
5244 if (!status) {
5245 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5246 hdev->adv_monitors_cnt++;
5247 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5248 monitor->state = ADV_MONITOR_STATE_REGISTERED;
5249 hci_update_passive_scan(hdev);
5250 }
5251
5252 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5253 mgmt_status(status), &rp, sizeof(rp));
5254 mgmt_pending_remove(cmd);
5255
5256 hci_dev_unlock(hdev);
5257 bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5258 rp.monitor_handle, status);
5259 }
5260
mgmt_add_adv_patterns_monitor_sync(struct hci_dev * hdev,void * data)5261 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5262 {
5263 struct mgmt_pending_cmd *cmd = data;
5264 struct adv_monitor *monitor = cmd->user_data;
5265
5266 return hci_add_adv_monitor(hdev, monitor);
5267 }
5268
__add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,struct adv_monitor * m,u8 status,void * data,u16 len,u16 op)5269 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5270 struct adv_monitor *m, u8 status,
5271 void *data, u16 len, u16 op)
5272 {
5273 struct mgmt_pending_cmd *cmd;
5274 int err;
5275
5276 hci_dev_lock(hdev);
5277
5278 if (status)
5279 goto unlock;
5280
5281 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5282 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5283 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
5284 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
5285 status = MGMT_STATUS_BUSY;
5286 goto unlock;
5287 }
5288
5289 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5290 if (!cmd) {
5291 status = MGMT_STATUS_NO_RESOURCES;
5292 goto unlock;
5293 }
5294
5295 cmd->user_data = m;
5296 err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5297 mgmt_add_adv_patterns_monitor_complete);
5298 if (err) {
5299 if (err == -ENOMEM)
5300 status = MGMT_STATUS_NO_RESOURCES;
5301 else
5302 status = MGMT_STATUS_FAILED;
5303
5304 goto unlock;
5305 }
5306
5307 hci_dev_unlock(hdev);
5308
5309 return 0;
5310
5311 unlock:
5312 hci_free_adv_monitor(hdev, m);
5313 hci_dev_unlock(hdev);
5314 return mgmt_cmd_status(sk, hdev->id, op, status);
5315 }
5316
parse_adv_monitor_rssi(struct adv_monitor * m,struct mgmt_adv_rssi_thresholds * rssi)5317 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5318 struct mgmt_adv_rssi_thresholds *rssi)
5319 {
5320 if (rssi) {
5321 m->rssi.low_threshold = rssi->low_threshold;
5322 m->rssi.low_threshold_timeout =
5323 __le16_to_cpu(rssi->low_threshold_timeout);
5324 m->rssi.high_threshold = rssi->high_threshold;
5325 m->rssi.high_threshold_timeout =
5326 __le16_to_cpu(rssi->high_threshold_timeout);
5327 m->rssi.sampling_period = rssi->sampling_period;
5328 } else {
5329 /* Default values. These numbers are the least constricting
5330 * parameters for MSFT API to work, so it behaves as if there
5331 * are no rssi parameter to consider. May need to be changed
5332 * if other API are to be supported.
5333 */
5334 m->rssi.low_threshold = -127;
5335 m->rssi.low_threshold_timeout = 60;
5336 m->rssi.high_threshold = -127;
5337 m->rssi.high_threshold_timeout = 0;
5338 m->rssi.sampling_period = 0;
5339 }
5340 }
5341
parse_adv_monitor_pattern(struct adv_monitor * m,u8 pattern_count,struct mgmt_adv_pattern * patterns)5342 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5343 struct mgmt_adv_pattern *patterns)
5344 {
5345 u8 offset = 0, length = 0;
5346 struct adv_pattern *p = NULL;
5347 int i;
5348
5349 for (i = 0; i < pattern_count; i++) {
5350 offset = patterns[i].offset;
5351 length = patterns[i].length;
5352 if (offset >= HCI_MAX_EXT_AD_LENGTH ||
5353 length > HCI_MAX_EXT_AD_LENGTH ||
5354 (offset + length) > HCI_MAX_EXT_AD_LENGTH)
5355 return MGMT_STATUS_INVALID_PARAMS;
5356
5357 p = kmalloc(sizeof(*p), GFP_KERNEL);
5358 if (!p)
5359 return MGMT_STATUS_NO_RESOURCES;
5360
5361 p->ad_type = patterns[i].ad_type;
5362 p->offset = patterns[i].offset;
5363 p->length = patterns[i].length;
5364 memcpy(p->value, patterns[i].value, p->length);
5365
5366 INIT_LIST_HEAD(&p->list);
5367 list_add(&p->list, &m->patterns);
5368 }
5369
5370 return MGMT_STATUS_SUCCESS;
5371 }
5372
add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5373 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5374 void *data, u16 len)
5375 {
5376 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5377 struct adv_monitor *m = NULL;
5378 u8 status = MGMT_STATUS_SUCCESS;
5379 size_t expected_size = sizeof(*cp);
5380
5381 BT_DBG("request for %s", hdev->name);
5382
5383 if (len <= sizeof(*cp)) {
5384 status = MGMT_STATUS_INVALID_PARAMS;
5385 goto done;
5386 }
5387
5388 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5389 if (len != expected_size) {
5390 status = MGMT_STATUS_INVALID_PARAMS;
5391 goto done;
5392 }
5393
5394 m = kzalloc(sizeof(*m), GFP_KERNEL);
5395 if (!m) {
5396 status = MGMT_STATUS_NO_RESOURCES;
5397 goto done;
5398 }
5399
5400 INIT_LIST_HEAD(&m->patterns);
5401
5402 parse_adv_monitor_rssi(m, NULL);
5403 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5404
5405 done:
5406 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5407 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5408 }
5409
add_adv_patterns_monitor_rssi(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5410 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5411 void *data, u16 len)
5412 {
5413 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5414 struct adv_monitor *m = NULL;
5415 u8 status = MGMT_STATUS_SUCCESS;
5416 size_t expected_size = sizeof(*cp);
5417
5418 BT_DBG("request for %s", hdev->name);
5419
5420 if (len <= sizeof(*cp)) {
5421 status = MGMT_STATUS_INVALID_PARAMS;
5422 goto done;
5423 }
5424
5425 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5426 if (len != expected_size) {
5427 status = MGMT_STATUS_INVALID_PARAMS;
5428 goto done;
5429 }
5430
5431 m = kzalloc(sizeof(*m), GFP_KERNEL);
5432 if (!m) {
5433 status = MGMT_STATUS_NO_RESOURCES;
5434 goto done;
5435 }
5436
5437 INIT_LIST_HEAD(&m->patterns);
5438
5439 parse_adv_monitor_rssi(m, &cp->rssi);
5440 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5441
5442 done:
5443 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5444 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5445 }
5446
mgmt_remove_adv_monitor_complete(struct hci_dev * hdev,void * data,int status)5447 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5448 void *data, int status)
5449 {
5450 struct mgmt_rp_remove_adv_monitor rp;
5451 struct mgmt_pending_cmd *cmd = data;
5452 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5453
5454 hci_dev_lock(hdev);
5455
5456 rp.monitor_handle = cp->monitor_handle;
5457
5458 if (!status)
5459 hci_update_passive_scan(hdev);
5460
5461 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5462 mgmt_status(status), &rp, sizeof(rp));
5463 mgmt_pending_remove(cmd);
5464
5465 hci_dev_unlock(hdev);
5466 bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5467 rp.monitor_handle, status);
5468 }
5469
mgmt_remove_adv_monitor_sync(struct hci_dev * hdev,void * data)5470 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5471 {
5472 struct mgmt_pending_cmd *cmd = data;
5473 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5474 u16 handle = __le16_to_cpu(cp->monitor_handle);
5475
5476 if (!handle)
5477 return hci_remove_all_adv_monitor(hdev);
5478
5479 return hci_remove_single_adv_monitor(hdev, handle);
5480 }
5481
remove_adv_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5482 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5483 void *data, u16 len)
5484 {
5485 struct mgmt_pending_cmd *cmd;
5486 int err, status;
5487
5488 hci_dev_lock(hdev);
5489
5490 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5491 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
5492 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5493 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5494 status = MGMT_STATUS_BUSY;
5495 goto unlock;
5496 }
5497
5498 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5499 if (!cmd) {
5500 status = MGMT_STATUS_NO_RESOURCES;
5501 goto unlock;
5502 }
5503
5504 err = hci_cmd_sync_submit(hdev, mgmt_remove_adv_monitor_sync, cmd,
5505 mgmt_remove_adv_monitor_complete);
5506
5507 if (err) {
5508 mgmt_pending_remove(cmd);
5509
5510 if (err == -ENOMEM)
5511 status = MGMT_STATUS_NO_RESOURCES;
5512 else
5513 status = MGMT_STATUS_FAILED;
5514
5515 goto unlock;
5516 }
5517
5518 hci_dev_unlock(hdev);
5519
5520 return 0;
5521
5522 unlock:
5523 hci_dev_unlock(hdev);
5524 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5525 status);
5526 }
5527
read_local_oob_data_complete(struct hci_dev * hdev,void * data,int err)5528 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5529 {
5530 struct mgmt_rp_read_local_oob_data mgmt_rp;
5531 size_t rp_size = sizeof(mgmt_rp);
5532 struct mgmt_pending_cmd *cmd = data;
5533 struct sk_buff *skb = cmd->skb;
5534 u8 status = mgmt_status(err);
5535
5536 if (!status) {
5537 if (!skb)
5538 status = MGMT_STATUS_FAILED;
5539 else if (IS_ERR(skb))
5540 status = mgmt_status(PTR_ERR(skb));
5541 else
5542 status = mgmt_status(skb->data[0]);
5543 }
5544
5545 bt_dev_dbg(hdev, "status %d", status);
5546
5547 if (status) {
5548 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5549 goto remove;
5550 }
5551
5552 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5553
5554 if (!bredr_sc_enabled(hdev)) {
5555 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5556
5557 if (skb->len < sizeof(*rp)) {
5558 mgmt_cmd_status(cmd->sk, hdev->id,
5559 MGMT_OP_READ_LOCAL_OOB_DATA,
5560 MGMT_STATUS_FAILED);
5561 goto remove;
5562 }
5563
5564 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5565 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5566
5567 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5568 } else {
5569 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5570
5571 if (skb->len < sizeof(*rp)) {
5572 mgmt_cmd_status(cmd->sk, hdev->id,
5573 MGMT_OP_READ_LOCAL_OOB_DATA,
5574 MGMT_STATUS_FAILED);
5575 goto remove;
5576 }
5577
5578 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5579 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5580
5581 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5582 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5583 }
5584
5585 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5586 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5587
5588 remove:
5589 if (skb && !IS_ERR(skb))
5590 kfree_skb(skb);
5591
5592 mgmt_pending_free(cmd);
5593 }
5594
read_local_oob_data_sync(struct hci_dev * hdev,void * data)5595 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5596 {
5597 struct mgmt_pending_cmd *cmd = data;
5598
5599 if (bredr_sc_enabled(hdev))
5600 cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5601 else
5602 cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5603
5604 if (IS_ERR(cmd->skb))
5605 return PTR_ERR(cmd->skb);
5606 else
5607 return 0;
5608 }
5609
read_local_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)5610 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5611 void *data, u16 data_len)
5612 {
5613 struct mgmt_pending_cmd *cmd;
5614 int err;
5615
5616 bt_dev_dbg(hdev, "sock %p", sk);
5617
5618 hci_dev_lock(hdev);
5619
5620 if (!hdev_is_powered(hdev)) {
5621 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5622 MGMT_STATUS_NOT_POWERED);
5623 goto unlock;
5624 }
5625
5626 if (!lmp_ssp_capable(hdev)) {
5627 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5628 MGMT_STATUS_NOT_SUPPORTED);
5629 goto unlock;
5630 }
5631
5632 cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5633 if (!cmd)
5634 err = -ENOMEM;
5635 else
5636 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5637 read_local_oob_data_complete);
5638
5639 if (err < 0) {
5640 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5641 MGMT_STATUS_FAILED);
5642
5643 if (cmd)
5644 mgmt_pending_free(cmd);
5645 }
5646
5647 unlock:
5648 hci_dev_unlock(hdev);
5649 return err;
5650 }
5651
add_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5652 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5653 void *data, u16 len)
5654 {
5655 struct mgmt_addr_info *addr = data;
5656 int err;
5657
5658 bt_dev_dbg(hdev, "sock %p", sk);
5659
5660 if (!bdaddr_type_is_valid(addr->type))
5661 return mgmt_cmd_complete(sk, hdev->id,
5662 MGMT_OP_ADD_REMOTE_OOB_DATA,
5663 MGMT_STATUS_INVALID_PARAMS,
5664 addr, sizeof(*addr));
5665
5666 hci_dev_lock(hdev);
5667
5668 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5669 struct mgmt_cp_add_remote_oob_data *cp = data;
5670 u8 status;
5671
5672 if (cp->addr.type != BDADDR_BREDR) {
5673 err = mgmt_cmd_complete(sk, hdev->id,
5674 MGMT_OP_ADD_REMOTE_OOB_DATA,
5675 MGMT_STATUS_INVALID_PARAMS,
5676 &cp->addr, sizeof(cp->addr));
5677 goto unlock;
5678 }
5679
5680 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5681 cp->addr.type, cp->hash,
5682 cp->rand, NULL, NULL);
5683 if (err < 0)
5684 status = MGMT_STATUS_FAILED;
5685 else
5686 status = MGMT_STATUS_SUCCESS;
5687
5688 err = mgmt_cmd_complete(sk, hdev->id,
5689 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5690 &cp->addr, sizeof(cp->addr));
5691 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5692 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5693 u8 *rand192, *hash192, *rand256, *hash256;
5694 u8 status;
5695
5696 if (bdaddr_type_is_le(cp->addr.type)) {
5697 /* Enforce zero-valued 192-bit parameters as
5698 * long as legacy SMP OOB isn't implemented.
5699 */
5700 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5701 memcmp(cp->hash192, ZERO_KEY, 16)) {
5702 err = mgmt_cmd_complete(sk, hdev->id,
5703 MGMT_OP_ADD_REMOTE_OOB_DATA,
5704 MGMT_STATUS_INVALID_PARAMS,
5705 addr, sizeof(*addr));
5706 goto unlock;
5707 }
5708
5709 rand192 = NULL;
5710 hash192 = NULL;
5711 } else {
5712 /* In case one of the P-192 values is set to zero,
5713 * then just disable OOB data for P-192.
5714 */
5715 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5716 !memcmp(cp->hash192, ZERO_KEY, 16)) {
5717 rand192 = NULL;
5718 hash192 = NULL;
5719 } else {
5720 rand192 = cp->rand192;
5721 hash192 = cp->hash192;
5722 }
5723 }
5724
5725 /* In case one of the P-256 values is set to zero, then just
5726 * disable OOB data for P-256.
5727 */
5728 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5729 !memcmp(cp->hash256, ZERO_KEY, 16)) {
5730 rand256 = NULL;
5731 hash256 = NULL;
5732 } else {
5733 rand256 = cp->rand256;
5734 hash256 = cp->hash256;
5735 }
5736
5737 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5738 cp->addr.type, hash192, rand192,
5739 hash256, rand256);
5740 if (err < 0)
5741 status = MGMT_STATUS_FAILED;
5742 else
5743 status = MGMT_STATUS_SUCCESS;
5744
5745 err = mgmt_cmd_complete(sk, hdev->id,
5746 MGMT_OP_ADD_REMOTE_OOB_DATA,
5747 status, &cp->addr, sizeof(cp->addr));
5748 } else {
5749 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5750 len);
5751 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5752 MGMT_STATUS_INVALID_PARAMS);
5753 }
5754
5755 unlock:
5756 hci_dev_unlock(hdev);
5757 return err;
5758 }
5759
remove_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5760 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5761 void *data, u16 len)
5762 {
5763 struct mgmt_cp_remove_remote_oob_data *cp = data;
5764 u8 status;
5765 int err;
5766
5767 bt_dev_dbg(hdev, "sock %p", sk);
5768
5769 if (cp->addr.type != BDADDR_BREDR)
5770 return mgmt_cmd_complete(sk, hdev->id,
5771 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5772 MGMT_STATUS_INVALID_PARAMS,
5773 &cp->addr, sizeof(cp->addr));
5774
5775 hci_dev_lock(hdev);
5776
5777 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5778 hci_remote_oob_data_clear(hdev);
5779 status = MGMT_STATUS_SUCCESS;
5780 goto done;
5781 }
5782
5783 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5784 if (err < 0)
5785 status = MGMT_STATUS_INVALID_PARAMS;
5786 else
5787 status = MGMT_STATUS_SUCCESS;
5788
5789 done:
5790 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5791 status, &cp->addr, sizeof(cp->addr));
5792
5793 hci_dev_unlock(hdev);
5794 return err;
5795 }
5796
mgmt_start_discovery_complete(struct hci_dev * hdev,u8 status)5797 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5798 {
5799 struct mgmt_pending_cmd *cmd;
5800
5801 bt_dev_dbg(hdev, "status %u", status);
5802
5803 hci_dev_lock(hdev);
5804
5805 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5806 if (!cmd)
5807 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5808
5809 if (!cmd)
5810 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5811
5812 if (cmd) {
5813 cmd->cmd_complete(cmd, mgmt_status(status));
5814 mgmt_pending_remove(cmd);
5815 }
5816
5817 hci_dev_unlock(hdev);
5818 }
5819
discovery_type_is_valid(struct hci_dev * hdev,uint8_t type,uint8_t * mgmt_status)5820 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5821 uint8_t *mgmt_status)
5822 {
5823 switch (type) {
5824 case DISCOV_TYPE_LE:
5825 *mgmt_status = mgmt_le_support(hdev);
5826 if (*mgmt_status)
5827 return false;
5828 break;
5829 case DISCOV_TYPE_INTERLEAVED:
5830 *mgmt_status = mgmt_le_support(hdev);
5831 if (*mgmt_status)
5832 return false;
5833 fallthrough;
5834 case DISCOV_TYPE_BREDR:
5835 *mgmt_status = mgmt_bredr_support(hdev);
5836 if (*mgmt_status)
5837 return false;
5838 break;
5839 default:
5840 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5841 return false;
5842 }
5843
5844 return true;
5845 }
5846
start_discovery_complete(struct hci_dev * hdev,void * data,int err)5847 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5848 {
5849 struct mgmt_pending_cmd *cmd = data;
5850
5851 if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5852 cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5853 cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5854 return;
5855
5856 bt_dev_dbg(hdev, "err %d", err);
5857
5858 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5859 cmd->param, 1);
5860 mgmt_pending_remove(cmd);
5861
5862 hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5863 DISCOVERY_FINDING);
5864 }
5865
start_discovery_sync(struct hci_dev * hdev,void * data)5866 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5867 {
5868 return hci_start_discovery_sync(hdev);
5869 }
5870
start_discovery_internal(struct sock * sk,struct hci_dev * hdev,u16 op,void * data,u16 len)5871 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5872 u16 op, void *data, u16 len)
5873 {
5874 struct mgmt_cp_start_discovery *cp = data;
5875 struct mgmt_pending_cmd *cmd;
5876 u8 status;
5877 int err;
5878
5879 bt_dev_dbg(hdev, "sock %p", sk);
5880
5881 hci_dev_lock(hdev);
5882
5883 if (!hdev_is_powered(hdev)) {
5884 err = mgmt_cmd_complete(sk, hdev->id, op,
5885 MGMT_STATUS_NOT_POWERED,
5886 &cp->type, sizeof(cp->type));
5887 goto failed;
5888 }
5889
5890 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5891 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5892 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5893 &cp->type, sizeof(cp->type));
5894 goto failed;
5895 }
5896
5897 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5898 err = mgmt_cmd_complete(sk, hdev->id, op, status,
5899 &cp->type, sizeof(cp->type));
5900 goto failed;
5901 }
5902
5903 /* Can't start discovery when it is paused */
5904 if (hdev->discovery_paused) {
5905 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5906 &cp->type, sizeof(cp->type));
5907 goto failed;
5908 }
5909
5910 /* Clear the discovery filter first to free any previously
5911 * allocated memory for the UUID list.
5912 */
5913 hci_discovery_filter_clear(hdev);
5914
5915 hdev->discovery.type = cp->type;
5916 hdev->discovery.report_invalid_rssi = false;
5917 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5918 hdev->discovery.limited = true;
5919 else
5920 hdev->discovery.limited = false;
5921
5922 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5923 if (!cmd) {
5924 err = -ENOMEM;
5925 goto failed;
5926 }
5927
5928 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5929 start_discovery_complete);
5930 if (err < 0) {
5931 mgmt_pending_remove(cmd);
5932 goto failed;
5933 }
5934
5935 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5936
5937 failed:
5938 hci_dev_unlock(hdev);
5939 return err;
5940 }
5941
start_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5942 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5943 void *data, u16 len)
5944 {
5945 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5946 data, len);
5947 }
5948
start_limited_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5949 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5950 void *data, u16 len)
5951 {
5952 return start_discovery_internal(sk, hdev,
5953 MGMT_OP_START_LIMITED_DISCOVERY,
5954 data, len);
5955 }
5956
start_service_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5957 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5958 void *data, u16 len)
5959 {
5960 struct mgmt_cp_start_service_discovery *cp = data;
5961 struct mgmt_pending_cmd *cmd;
5962 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5963 u16 uuid_count, expected_len;
5964 u8 status;
5965 int err;
5966
5967 bt_dev_dbg(hdev, "sock %p", sk);
5968
5969 hci_dev_lock(hdev);
5970
5971 if (!hdev_is_powered(hdev)) {
5972 err = mgmt_cmd_complete(sk, hdev->id,
5973 MGMT_OP_START_SERVICE_DISCOVERY,
5974 MGMT_STATUS_NOT_POWERED,
5975 &cp->type, sizeof(cp->type));
5976 goto failed;
5977 }
5978
5979 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5980 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5981 err = mgmt_cmd_complete(sk, hdev->id,
5982 MGMT_OP_START_SERVICE_DISCOVERY,
5983 MGMT_STATUS_BUSY, &cp->type,
5984 sizeof(cp->type));
5985 goto failed;
5986 }
5987
5988 if (hdev->discovery_paused) {
5989 err = mgmt_cmd_complete(sk, hdev->id,
5990 MGMT_OP_START_SERVICE_DISCOVERY,
5991 MGMT_STATUS_BUSY, &cp->type,
5992 sizeof(cp->type));
5993 goto failed;
5994 }
5995
5996 uuid_count = __le16_to_cpu(cp->uuid_count);
5997 if (uuid_count > max_uuid_count) {
5998 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5999 uuid_count);
6000 err = mgmt_cmd_complete(sk, hdev->id,
6001 MGMT_OP_START_SERVICE_DISCOVERY,
6002 MGMT_STATUS_INVALID_PARAMS, &cp->type,
6003 sizeof(cp->type));
6004 goto failed;
6005 }
6006
6007 expected_len = sizeof(*cp) + uuid_count * 16;
6008 if (expected_len != len) {
6009 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
6010 expected_len, len);
6011 err = mgmt_cmd_complete(sk, hdev->id,
6012 MGMT_OP_START_SERVICE_DISCOVERY,
6013 MGMT_STATUS_INVALID_PARAMS, &cp->type,
6014 sizeof(cp->type));
6015 goto failed;
6016 }
6017
6018 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
6019 err = mgmt_cmd_complete(sk, hdev->id,
6020 MGMT_OP_START_SERVICE_DISCOVERY,
6021 status, &cp->type, sizeof(cp->type));
6022 goto failed;
6023 }
6024
6025 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
6026 hdev, data, len);
6027 if (!cmd) {
6028 err = -ENOMEM;
6029 goto failed;
6030 }
6031
6032 /* Clear the discovery filter first to free any previously
6033 * allocated memory for the UUID list.
6034 */
6035 hci_discovery_filter_clear(hdev);
6036
6037 hdev->discovery.result_filtering = true;
6038 hdev->discovery.type = cp->type;
6039 hdev->discovery.rssi = cp->rssi;
6040 hdev->discovery.uuid_count = uuid_count;
6041
6042 if (uuid_count > 0) {
6043 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
6044 GFP_KERNEL);
6045 if (!hdev->discovery.uuids) {
6046 err = mgmt_cmd_complete(sk, hdev->id,
6047 MGMT_OP_START_SERVICE_DISCOVERY,
6048 MGMT_STATUS_FAILED,
6049 &cp->type, sizeof(cp->type));
6050 mgmt_pending_remove(cmd);
6051 goto failed;
6052 }
6053 }
6054
6055 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6056 start_discovery_complete);
6057 if (err < 0) {
6058 mgmt_pending_remove(cmd);
6059 goto failed;
6060 }
6061
6062 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6063
6064 failed:
6065 hci_dev_unlock(hdev);
6066 return err;
6067 }
6068
mgmt_stop_discovery_complete(struct hci_dev * hdev,u8 status)6069 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
6070 {
6071 struct mgmt_pending_cmd *cmd;
6072
6073 bt_dev_dbg(hdev, "status %u", status);
6074
6075 hci_dev_lock(hdev);
6076
6077 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6078 if (cmd) {
6079 cmd->cmd_complete(cmd, mgmt_status(status));
6080 mgmt_pending_remove(cmd);
6081 }
6082
6083 hci_dev_unlock(hdev);
6084 }
6085
stop_discovery_complete(struct hci_dev * hdev,void * data,int err)6086 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6087 {
6088 struct mgmt_pending_cmd *cmd = data;
6089
6090 if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
6091 return;
6092
6093 bt_dev_dbg(hdev, "err %d", err);
6094
6095 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
6096 cmd->param, 1);
6097 mgmt_pending_remove(cmd);
6098
6099 if (!err)
6100 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6101 }
6102
stop_discovery_sync(struct hci_dev * hdev,void * data)6103 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6104 {
6105 return hci_stop_discovery_sync(hdev);
6106 }
6107
stop_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6108 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6109 u16 len)
6110 {
6111 struct mgmt_cp_stop_discovery *mgmt_cp = data;
6112 struct mgmt_pending_cmd *cmd;
6113 int err;
6114
6115 bt_dev_dbg(hdev, "sock %p", sk);
6116
6117 hci_dev_lock(hdev);
6118
6119 if (!hci_discovery_active(hdev)) {
6120 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6121 MGMT_STATUS_REJECTED, &mgmt_cp->type,
6122 sizeof(mgmt_cp->type));
6123 goto unlock;
6124 }
6125
6126 if (hdev->discovery.type != mgmt_cp->type) {
6127 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6128 MGMT_STATUS_INVALID_PARAMS,
6129 &mgmt_cp->type, sizeof(mgmt_cp->type));
6130 goto unlock;
6131 }
6132
6133 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6134 if (!cmd) {
6135 err = -ENOMEM;
6136 goto unlock;
6137 }
6138
6139 err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6140 stop_discovery_complete);
6141 if (err < 0) {
6142 mgmt_pending_remove(cmd);
6143 goto unlock;
6144 }
6145
6146 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6147
6148 unlock:
6149 hci_dev_unlock(hdev);
6150 return err;
6151 }
6152
confirm_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6153 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6154 u16 len)
6155 {
6156 struct mgmt_cp_confirm_name *cp = data;
6157 struct inquiry_entry *e;
6158 int err;
6159
6160 bt_dev_dbg(hdev, "sock %p", sk);
6161
6162 hci_dev_lock(hdev);
6163
6164 if (!hci_discovery_active(hdev)) {
6165 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6166 MGMT_STATUS_FAILED, &cp->addr,
6167 sizeof(cp->addr));
6168 goto failed;
6169 }
6170
6171 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6172 if (!e) {
6173 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6174 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6175 sizeof(cp->addr));
6176 goto failed;
6177 }
6178
6179 if (cp->name_known) {
6180 e->name_state = NAME_KNOWN;
6181 list_del(&e->list);
6182 } else {
6183 e->name_state = NAME_NEEDED;
6184 hci_inquiry_cache_update_resolve(hdev, e);
6185 }
6186
6187 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6188 &cp->addr, sizeof(cp->addr));
6189
6190 failed:
6191 hci_dev_unlock(hdev);
6192 return err;
6193 }
6194
block_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6195 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6196 u16 len)
6197 {
6198 struct mgmt_cp_block_device *cp = data;
6199 u8 status;
6200 int err;
6201
6202 bt_dev_dbg(hdev, "sock %p", sk);
6203
6204 if (!bdaddr_type_is_valid(cp->addr.type))
6205 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6206 MGMT_STATUS_INVALID_PARAMS,
6207 &cp->addr, sizeof(cp->addr));
6208
6209 hci_dev_lock(hdev);
6210
6211 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6212 cp->addr.type);
6213 if (err < 0) {
6214 status = MGMT_STATUS_FAILED;
6215 goto done;
6216 }
6217
6218 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6219 sk);
6220 status = MGMT_STATUS_SUCCESS;
6221
6222 done:
6223 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6224 &cp->addr, sizeof(cp->addr));
6225
6226 hci_dev_unlock(hdev);
6227
6228 return err;
6229 }
6230
unblock_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6231 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6232 u16 len)
6233 {
6234 struct mgmt_cp_unblock_device *cp = data;
6235 u8 status;
6236 int err;
6237
6238 bt_dev_dbg(hdev, "sock %p", sk);
6239
6240 if (!bdaddr_type_is_valid(cp->addr.type))
6241 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6242 MGMT_STATUS_INVALID_PARAMS,
6243 &cp->addr, sizeof(cp->addr));
6244
6245 hci_dev_lock(hdev);
6246
6247 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6248 cp->addr.type);
6249 if (err < 0) {
6250 status = MGMT_STATUS_INVALID_PARAMS;
6251 goto done;
6252 }
6253
6254 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6255 sk);
6256 status = MGMT_STATUS_SUCCESS;
6257
6258 done:
6259 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6260 &cp->addr, sizeof(cp->addr));
6261
6262 hci_dev_unlock(hdev);
6263
6264 return err;
6265 }
6266
set_device_id_sync(struct hci_dev * hdev,void * data)6267 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6268 {
6269 return hci_update_eir_sync(hdev);
6270 }
6271
set_device_id(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6272 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6273 u16 len)
6274 {
6275 struct mgmt_cp_set_device_id *cp = data;
6276 int err;
6277 __u16 source;
6278
6279 bt_dev_dbg(hdev, "sock %p", sk);
6280
6281 source = __le16_to_cpu(cp->source);
6282
6283 if (source > 0x0002)
6284 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6285 MGMT_STATUS_INVALID_PARAMS);
6286
6287 hci_dev_lock(hdev);
6288
6289 hdev->devid_source = source;
6290 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6291 hdev->devid_product = __le16_to_cpu(cp->product);
6292 hdev->devid_version = __le16_to_cpu(cp->version);
6293
6294 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6295 NULL, 0);
6296
6297 hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6298
6299 hci_dev_unlock(hdev);
6300
6301 return err;
6302 }
6303
enable_advertising_instance(struct hci_dev * hdev,int err)6304 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6305 {
6306 if (err)
6307 bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6308 else
6309 bt_dev_dbg(hdev, "status %d", err);
6310 }
6311
set_advertising_complete(struct hci_dev * hdev,void * data,int err)6312 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6313 {
6314 struct cmd_lookup match = { NULL, hdev };
6315 u8 instance;
6316 struct adv_info *adv_instance;
6317 u8 status = mgmt_status(err);
6318
6319 if (status) {
6320 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
6321 cmd_status_rsp, &status);
6322 return;
6323 }
6324
6325 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6326 hci_dev_set_flag(hdev, HCI_ADVERTISING);
6327 else
6328 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6329
6330 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
6331 &match);
6332
6333 new_settings(hdev, match.sk);
6334
6335 if (match.sk)
6336 sock_put(match.sk);
6337
6338 /* If "Set Advertising" was just disabled and instance advertising was
6339 * set up earlier, then re-enable multi-instance advertising.
6340 */
6341 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6342 list_empty(&hdev->adv_instances))
6343 return;
6344
6345 instance = hdev->cur_adv_instance;
6346 if (!instance) {
6347 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6348 struct adv_info, list);
6349 if (!adv_instance)
6350 return;
6351
6352 instance = adv_instance->instance;
6353 }
6354
6355 err = hci_schedule_adv_instance_sync(hdev, instance, true);
6356
6357 enable_advertising_instance(hdev, err);
6358 }
6359
set_adv_sync(struct hci_dev * hdev,void * data)6360 static int set_adv_sync(struct hci_dev *hdev, void *data)
6361 {
6362 struct mgmt_pending_cmd *cmd = data;
6363 struct mgmt_mode *cp = cmd->param;
6364 u8 val = !!cp->val;
6365
6366 if (cp->val == 0x02)
6367 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6368 else
6369 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6370
6371 cancel_adv_timeout(hdev);
6372
6373 if (val) {
6374 /* Switch to instance "0" for the Set Advertising setting.
6375 * We cannot use update_[adv|scan_rsp]_data() here as the
6376 * HCI_ADVERTISING flag is not yet set.
6377 */
6378 hdev->cur_adv_instance = 0x00;
6379
6380 if (ext_adv_capable(hdev)) {
6381 hci_start_ext_adv_sync(hdev, 0x00);
6382 } else {
6383 hci_update_adv_data_sync(hdev, 0x00);
6384 hci_update_scan_rsp_data_sync(hdev, 0x00);
6385 hci_enable_advertising_sync(hdev);
6386 }
6387 } else {
6388 hci_disable_advertising_sync(hdev);
6389 }
6390
6391 return 0;
6392 }
6393
set_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6394 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6395 u16 len)
6396 {
6397 struct mgmt_mode *cp = data;
6398 struct mgmt_pending_cmd *cmd;
6399 u8 val, status;
6400 int err;
6401
6402 bt_dev_dbg(hdev, "sock %p", sk);
6403
6404 status = mgmt_le_support(hdev);
6405 if (status)
6406 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6407 status);
6408
6409 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6410 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6411 MGMT_STATUS_INVALID_PARAMS);
6412
6413 if (hdev->advertising_paused)
6414 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6415 MGMT_STATUS_BUSY);
6416
6417 hci_dev_lock(hdev);
6418
6419 val = !!cp->val;
6420
6421 /* The following conditions are ones which mean that we should
6422 * not do any HCI communication but directly send a mgmt
6423 * response to user space (after toggling the flag if
6424 * necessary).
6425 */
6426 if (!hdev_is_powered(hdev) ||
6427 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6428 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6429 hci_dev_test_flag(hdev, HCI_MESH) ||
6430 hci_conn_num(hdev, LE_LINK) > 0 ||
6431 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6432 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6433 bool changed;
6434
6435 if (cp->val) {
6436 hdev->cur_adv_instance = 0x00;
6437 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6438 if (cp->val == 0x02)
6439 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6440 else
6441 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6442 } else {
6443 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6444 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6445 }
6446
6447 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6448 if (err < 0)
6449 goto unlock;
6450
6451 if (changed)
6452 err = new_settings(hdev, sk);
6453
6454 goto unlock;
6455 }
6456
6457 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6458 pending_find(MGMT_OP_SET_LE, hdev)) {
6459 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6460 MGMT_STATUS_BUSY);
6461 goto unlock;
6462 }
6463
6464 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6465 if (!cmd)
6466 err = -ENOMEM;
6467 else
6468 err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6469 set_advertising_complete);
6470
6471 if (err < 0 && cmd)
6472 mgmt_pending_remove(cmd);
6473
6474 unlock:
6475 hci_dev_unlock(hdev);
6476 return err;
6477 }
6478
set_static_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6479 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6480 void *data, u16 len)
6481 {
6482 struct mgmt_cp_set_static_address *cp = data;
6483 int err;
6484
6485 bt_dev_dbg(hdev, "sock %p", sk);
6486
6487 if (!lmp_le_capable(hdev))
6488 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6489 MGMT_STATUS_NOT_SUPPORTED);
6490
6491 if (hdev_is_powered(hdev))
6492 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6493 MGMT_STATUS_REJECTED);
6494
6495 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6496 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6497 return mgmt_cmd_status(sk, hdev->id,
6498 MGMT_OP_SET_STATIC_ADDRESS,
6499 MGMT_STATUS_INVALID_PARAMS);
6500
6501 /* Two most significant bits shall be set */
6502 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6503 return mgmt_cmd_status(sk, hdev->id,
6504 MGMT_OP_SET_STATIC_ADDRESS,
6505 MGMT_STATUS_INVALID_PARAMS);
6506 }
6507
6508 hci_dev_lock(hdev);
6509
6510 bacpy(&hdev->static_addr, &cp->bdaddr);
6511
6512 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6513 if (err < 0)
6514 goto unlock;
6515
6516 err = new_settings(hdev, sk);
6517
6518 unlock:
6519 hci_dev_unlock(hdev);
6520 return err;
6521 }
6522
set_scan_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6523 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6524 void *data, u16 len)
6525 {
6526 struct mgmt_cp_set_scan_params *cp = data;
6527 __u16 interval, window;
6528 int err;
6529
6530 bt_dev_dbg(hdev, "sock %p", sk);
6531
6532 if (!lmp_le_capable(hdev))
6533 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6534 MGMT_STATUS_NOT_SUPPORTED);
6535
6536 interval = __le16_to_cpu(cp->interval);
6537
6538 if (interval < 0x0004 || interval > 0x4000)
6539 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6540 MGMT_STATUS_INVALID_PARAMS);
6541
6542 window = __le16_to_cpu(cp->window);
6543
6544 if (window < 0x0004 || window > 0x4000)
6545 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6546 MGMT_STATUS_INVALID_PARAMS);
6547
6548 if (window > interval)
6549 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6550 MGMT_STATUS_INVALID_PARAMS);
6551
6552 hci_dev_lock(hdev);
6553
6554 hdev->le_scan_interval = interval;
6555 hdev->le_scan_window = window;
6556
6557 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6558 NULL, 0);
6559
6560 /* If background scan is running, restart it so new parameters are
6561 * loaded.
6562 */
6563 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6564 hdev->discovery.state == DISCOVERY_STOPPED)
6565 hci_update_passive_scan(hdev);
6566
6567 hci_dev_unlock(hdev);
6568
6569 return err;
6570 }
6571
fast_connectable_complete(struct hci_dev * hdev,void * data,int err)6572 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6573 {
6574 struct mgmt_pending_cmd *cmd = data;
6575
6576 bt_dev_dbg(hdev, "err %d", err);
6577
6578 if (err) {
6579 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6580 mgmt_status(err));
6581 } else {
6582 struct mgmt_mode *cp = cmd->param;
6583
6584 if (cp->val)
6585 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6586 else
6587 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6588
6589 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6590 new_settings(hdev, cmd->sk);
6591 }
6592
6593 mgmt_pending_free(cmd);
6594 }
6595
write_fast_connectable_sync(struct hci_dev * hdev,void * data)6596 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6597 {
6598 struct mgmt_pending_cmd *cmd = data;
6599 struct mgmt_mode *cp = cmd->param;
6600
6601 return hci_write_fast_connectable_sync(hdev, cp->val);
6602 }
6603
set_fast_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6604 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6605 void *data, u16 len)
6606 {
6607 struct mgmt_mode *cp = data;
6608 struct mgmt_pending_cmd *cmd;
6609 int err;
6610
6611 bt_dev_dbg(hdev, "sock %p", sk);
6612
6613 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6614 hdev->hci_ver < BLUETOOTH_VER_1_2)
6615 return mgmt_cmd_status(sk, hdev->id,
6616 MGMT_OP_SET_FAST_CONNECTABLE,
6617 MGMT_STATUS_NOT_SUPPORTED);
6618
6619 if (cp->val != 0x00 && cp->val != 0x01)
6620 return mgmt_cmd_status(sk, hdev->id,
6621 MGMT_OP_SET_FAST_CONNECTABLE,
6622 MGMT_STATUS_INVALID_PARAMS);
6623
6624 hci_dev_lock(hdev);
6625
6626 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6627 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6628 goto unlock;
6629 }
6630
6631 if (!hdev_is_powered(hdev)) {
6632 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6633 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6634 new_settings(hdev, sk);
6635 goto unlock;
6636 }
6637
6638 cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6639 len);
6640 if (!cmd)
6641 err = -ENOMEM;
6642 else
6643 err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6644 fast_connectable_complete);
6645
6646 if (err < 0) {
6647 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6648 MGMT_STATUS_FAILED);
6649
6650 if (cmd)
6651 mgmt_pending_free(cmd);
6652 }
6653
6654 unlock:
6655 hci_dev_unlock(hdev);
6656
6657 return err;
6658 }
6659
set_bredr_complete(struct hci_dev * hdev,void * data,int err)6660 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6661 {
6662 struct mgmt_pending_cmd *cmd = data;
6663
6664 bt_dev_dbg(hdev, "err %d", err);
6665
6666 if (err) {
6667 u8 mgmt_err = mgmt_status(err);
6668
6669 /* We need to restore the flag if related HCI commands
6670 * failed.
6671 */
6672 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6673
6674 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6675 } else {
6676 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6677 new_settings(hdev, cmd->sk);
6678 }
6679
6680 mgmt_pending_free(cmd);
6681 }
6682
set_bredr_sync(struct hci_dev * hdev,void * data)6683 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6684 {
6685 int status;
6686
6687 status = hci_write_fast_connectable_sync(hdev, false);
6688
6689 if (!status)
6690 status = hci_update_scan_sync(hdev);
6691
6692 /* Since only the advertising data flags will change, there
6693 * is no need to update the scan response data.
6694 */
6695 if (!status)
6696 status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6697
6698 return status;
6699 }
6700
set_bredr(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6701 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6702 {
6703 struct mgmt_mode *cp = data;
6704 struct mgmt_pending_cmd *cmd;
6705 int err;
6706
6707 bt_dev_dbg(hdev, "sock %p", sk);
6708
6709 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6710 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6711 MGMT_STATUS_NOT_SUPPORTED);
6712
6713 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6714 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6715 MGMT_STATUS_REJECTED);
6716
6717 if (cp->val != 0x00 && cp->val != 0x01)
6718 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6719 MGMT_STATUS_INVALID_PARAMS);
6720
6721 hci_dev_lock(hdev);
6722
6723 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6724 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6725 goto unlock;
6726 }
6727
6728 if (!hdev_is_powered(hdev)) {
6729 if (!cp->val) {
6730 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6731 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6732 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6733 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6734 }
6735
6736 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6737
6738 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6739 if (err < 0)
6740 goto unlock;
6741
6742 err = new_settings(hdev, sk);
6743 goto unlock;
6744 }
6745
6746 /* Reject disabling when powered on */
6747 if (!cp->val) {
6748 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6749 MGMT_STATUS_REJECTED);
6750 goto unlock;
6751 } else {
6752 /* When configuring a dual-mode controller to operate
6753 * with LE only and using a static address, then switching
6754 * BR/EDR back on is not allowed.
6755 *
6756 * Dual-mode controllers shall operate with the public
6757 * address as its identity address for BR/EDR and LE. So
6758 * reject the attempt to create an invalid configuration.
6759 *
6760 * The same restrictions applies when secure connections
6761 * has been enabled. For BR/EDR this is a controller feature
6762 * while for LE it is a host stack feature. This means that
6763 * switching BR/EDR back on when secure connections has been
6764 * enabled is not a supported transaction.
6765 */
6766 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6767 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6768 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6769 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6770 MGMT_STATUS_REJECTED);
6771 goto unlock;
6772 }
6773 }
6774
6775 cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6776 if (!cmd)
6777 err = -ENOMEM;
6778 else
6779 err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6780 set_bredr_complete);
6781
6782 if (err < 0) {
6783 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6784 MGMT_STATUS_FAILED);
6785 if (cmd)
6786 mgmt_pending_free(cmd);
6787
6788 goto unlock;
6789 }
6790
6791 /* We need to flip the bit already here so that
6792 * hci_req_update_adv_data generates the correct flags.
6793 */
6794 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6795
6796 unlock:
6797 hci_dev_unlock(hdev);
6798 return err;
6799 }
6800
set_secure_conn_complete(struct hci_dev * hdev,void * data,int err)6801 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6802 {
6803 struct mgmt_pending_cmd *cmd = data;
6804 struct mgmt_mode *cp;
6805
6806 bt_dev_dbg(hdev, "err %d", err);
6807
6808 if (err) {
6809 u8 mgmt_err = mgmt_status(err);
6810
6811 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6812 goto done;
6813 }
6814
6815 cp = cmd->param;
6816
6817 switch (cp->val) {
6818 case 0x00:
6819 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6820 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6821 break;
6822 case 0x01:
6823 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6824 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6825 break;
6826 case 0x02:
6827 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6828 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6829 break;
6830 }
6831
6832 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6833 new_settings(hdev, cmd->sk);
6834
6835 done:
6836 mgmt_pending_free(cmd);
6837 }
6838
set_secure_conn_sync(struct hci_dev * hdev,void * data)6839 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6840 {
6841 struct mgmt_pending_cmd *cmd = data;
6842 struct mgmt_mode *cp = cmd->param;
6843 u8 val = !!cp->val;
6844
6845 /* Force write of val */
6846 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6847
6848 return hci_write_sc_support_sync(hdev, val);
6849 }
6850
set_secure_conn(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6851 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6852 void *data, u16 len)
6853 {
6854 struct mgmt_mode *cp = data;
6855 struct mgmt_pending_cmd *cmd;
6856 u8 val;
6857 int err;
6858
6859 bt_dev_dbg(hdev, "sock %p", sk);
6860
6861 if (!lmp_sc_capable(hdev) &&
6862 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6863 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6864 MGMT_STATUS_NOT_SUPPORTED);
6865
6866 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6867 lmp_sc_capable(hdev) &&
6868 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6869 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6870 MGMT_STATUS_REJECTED);
6871
6872 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6873 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6874 MGMT_STATUS_INVALID_PARAMS);
6875
6876 hci_dev_lock(hdev);
6877
6878 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6879 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6880 bool changed;
6881
6882 if (cp->val) {
6883 changed = !hci_dev_test_and_set_flag(hdev,
6884 HCI_SC_ENABLED);
6885 if (cp->val == 0x02)
6886 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6887 else
6888 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6889 } else {
6890 changed = hci_dev_test_and_clear_flag(hdev,
6891 HCI_SC_ENABLED);
6892 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6893 }
6894
6895 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6896 if (err < 0)
6897 goto failed;
6898
6899 if (changed)
6900 err = new_settings(hdev, sk);
6901
6902 goto failed;
6903 }
6904
6905 val = !!cp->val;
6906
6907 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6908 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6909 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6910 goto failed;
6911 }
6912
6913 cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6914 if (!cmd)
6915 err = -ENOMEM;
6916 else
6917 err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6918 set_secure_conn_complete);
6919
6920 if (err < 0) {
6921 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6922 MGMT_STATUS_FAILED);
6923 if (cmd)
6924 mgmt_pending_free(cmd);
6925 }
6926
6927 failed:
6928 hci_dev_unlock(hdev);
6929 return err;
6930 }
6931
set_debug_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6932 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6933 void *data, u16 len)
6934 {
6935 struct mgmt_mode *cp = data;
6936 bool changed, use_changed;
6937 int err;
6938
6939 bt_dev_dbg(hdev, "sock %p", sk);
6940
6941 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6942 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6943 MGMT_STATUS_INVALID_PARAMS);
6944
6945 hci_dev_lock(hdev);
6946
6947 if (cp->val)
6948 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6949 else
6950 changed = hci_dev_test_and_clear_flag(hdev,
6951 HCI_KEEP_DEBUG_KEYS);
6952
6953 if (cp->val == 0x02)
6954 use_changed = !hci_dev_test_and_set_flag(hdev,
6955 HCI_USE_DEBUG_KEYS);
6956 else
6957 use_changed = hci_dev_test_and_clear_flag(hdev,
6958 HCI_USE_DEBUG_KEYS);
6959
6960 if (hdev_is_powered(hdev) && use_changed &&
6961 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6962 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6963 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6964 sizeof(mode), &mode);
6965 }
6966
6967 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6968 if (err < 0)
6969 goto unlock;
6970
6971 if (changed)
6972 err = new_settings(hdev, sk);
6973
6974 unlock:
6975 hci_dev_unlock(hdev);
6976 return err;
6977 }
6978
set_privacy(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)6979 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6980 u16 len)
6981 {
6982 struct mgmt_cp_set_privacy *cp = cp_data;
6983 bool changed;
6984 int err;
6985
6986 bt_dev_dbg(hdev, "sock %p", sk);
6987
6988 if (!lmp_le_capable(hdev))
6989 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6990 MGMT_STATUS_NOT_SUPPORTED);
6991
6992 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6993 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6994 MGMT_STATUS_INVALID_PARAMS);
6995
6996 if (hdev_is_powered(hdev))
6997 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6998 MGMT_STATUS_REJECTED);
6999
7000 hci_dev_lock(hdev);
7001
7002 /* If user space supports this command it is also expected to
7003 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
7004 */
7005 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7006
7007 if (cp->privacy) {
7008 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
7009 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
7010 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
7011 hci_adv_instances_set_rpa_expired(hdev, true);
7012 if (cp->privacy == 0x02)
7013 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
7014 else
7015 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7016 } else {
7017 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
7018 memset(hdev->irk, 0, sizeof(hdev->irk));
7019 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
7020 hci_adv_instances_set_rpa_expired(hdev, false);
7021 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7022 }
7023
7024 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
7025 if (err < 0)
7026 goto unlock;
7027
7028 if (changed)
7029 err = new_settings(hdev, sk);
7030
7031 unlock:
7032 hci_dev_unlock(hdev);
7033 return err;
7034 }
7035
irk_is_valid(struct mgmt_irk_info * irk)7036 static bool irk_is_valid(struct mgmt_irk_info *irk)
7037 {
7038 switch (irk->addr.type) {
7039 case BDADDR_LE_PUBLIC:
7040 return true;
7041
7042 case BDADDR_LE_RANDOM:
7043 /* Two most significant bits shall be set */
7044 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7045 return false;
7046 return true;
7047 }
7048
7049 return false;
7050 }
7051
load_irks(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)7052 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7053 u16 len)
7054 {
7055 struct mgmt_cp_load_irks *cp = cp_data;
7056 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
7057 sizeof(struct mgmt_irk_info));
7058 u16 irk_count, expected_len;
7059 int i, err;
7060
7061 bt_dev_dbg(hdev, "sock %p", sk);
7062
7063 if (!lmp_le_capable(hdev))
7064 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7065 MGMT_STATUS_NOT_SUPPORTED);
7066
7067 irk_count = __le16_to_cpu(cp->irk_count);
7068 if (irk_count > max_irk_count) {
7069 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
7070 irk_count);
7071 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7072 MGMT_STATUS_INVALID_PARAMS);
7073 }
7074
7075 expected_len = struct_size(cp, irks, irk_count);
7076 if (expected_len != len) {
7077 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7078 expected_len, len);
7079 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7080 MGMT_STATUS_INVALID_PARAMS);
7081 }
7082
7083 bt_dev_dbg(hdev, "irk_count %u", irk_count);
7084
7085 for (i = 0; i < irk_count; i++) {
7086 struct mgmt_irk_info *key = &cp->irks[i];
7087
7088 if (!irk_is_valid(key))
7089 return mgmt_cmd_status(sk, hdev->id,
7090 MGMT_OP_LOAD_IRKS,
7091 MGMT_STATUS_INVALID_PARAMS);
7092 }
7093
7094 hci_dev_lock(hdev);
7095
7096 hci_smp_irks_clear(hdev);
7097
7098 for (i = 0; i < irk_count; i++) {
7099 struct mgmt_irk_info *irk = &cp->irks[i];
7100
7101 if (hci_is_blocked_key(hdev,
7102 HCI_BLOCKED_KEY_TYPE_IRK,
7103 irk->val)) {
7104 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7105 &irk->addr.bdaddr);
7106 continue;
7107 }
7108
7109 hci_add_irk(hdev, &irk->addr.bdaddr,
7110 le_addr_type(irk->addr.type), irk->val,
7111 BDADDR_ANY);
7112 }
7113
7114 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7115
7116 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7117
7118 hci_dev_unlock(hdev);
7119
7120 return err;
7121 }
7122
ltk_is_valid(struct mgmt_ltk_info * key)7123 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7124 {
7125 if (key->initiator != 0x00 && key->initiator != 0x01)
7126 return false;
7127
7128 switch (key->addr.type) {
7129 case BDADDR_LE_PUBLIC:
7130 return true;
7131
7132 case BDADDR_LE_RANDOM:
7133 /* Two most significant bits shall be set */
7134 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7135 return false;
7136 return true;
7137 }
7138
7139 return false;
7140 }
7141
load_long_term_keys(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)7142 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7143 void *cp_data, u16 len)
7144 {
7145 struct mgmt_cp_load_long_term_keys *cp = cp_data;
7146 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7147 sizeof(struct mgmt_ltk_info));
7148 u16 key_count, expected_len;
7149 int i, err;
7150
7151 bt_dev_dbg(hdev, "sock %p", sk);
7152
7153 if (!lmp_le_capable(hdev))
7154 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7155 MGMT_STATUS_NOT_SUPPORTED);
7156
7157 key_count = __le16_to_cpu(cp->key_count);
7158 if (key_count > max_key_count) {
7159 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7160 key_count);
7161 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7162 MGMT_STATUS_INVALID_PARAMS);
7163 }
7164
7165 expected_len = struct_size(cp, keys, key_count);
7166 if (expected_len != len) {
7167 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7168 expected_len, len);
7169 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7170 MGMT_STATUS_INVALID_PARAMS);
7171 }
7172
7173 bt_dev_dbg(hdev, "key_count %u", key_count);
7174
7175 hci_dev_lock(hdev);
7176
7177 hci_smp_ltks_clear(hdev);
7178
7179 for (i = 0; i < key_count; i++) {
7180 struct mgmt_ltk_info *key = &cp->keys[i];
7181 u8 type, authenticated;
7182
7183 if (hci_is_blocked_key(hdev,
7184 HCI_BLOCKED_KEY_TYPE_LTK,
7185 key->val)) {
7186 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7187 &key->addr.bdaddr);
7188 continue;
7189 }
7190
7191 if (!ltk_is_valid(key)) {
7192 bt_dev_warn(hdev, "Invalid LTK for %pMR",
7193 &key->addr.bdaddr);
7194 continue;
7195 }
7196
7197 switch (key->type) {
7198 case MGMT_LTK_UNAUTHENTICATED:
7199 authenticated = 0x00;
7200 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7201 break;
7202 case MGMT_LTK_AUTHENTICATED:
7203 authenticated = 0x01;
7204 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7205 break;
7206 case MGMT_LTK_P256_UNAUTH:
7207 authenticated = 0x00;
7208 type = SMP_LTK_P256;
7209 break;
7210 case MGMT_LTK_P256_AUTH:
7211 authenticated = 0x01;
7212 type = SMP_LTK_P256;
7213 break;
7214 case MGMT_LTK_P256_DEBUG:
7215 authenticated = 0x00;
7216 type = SMP_LTK_P256_DEBUG;
7217 fallthrough;
7218 default:
7219 continue;
7220 }
7221
7222 hci_add_ltk(hdev, &key->addr.bdaddr,
7223 le_addr_type(key->addr.type), type, authenticated,
7224 key->val, key->enc_size, key->ediv, key->rand);
7225 }
7226
7227 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7228 NULL, 0);
7229
7230 hci_dev_unlock(hdev);
7231
7232 return err;
7233 }
7234
get_conn_info_complete(struct hci_dev * hdev,void * data,int err)7235 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
7236 {
7237 struct mgmt_pending_cmd *cmd = data;
7238 struct hci_conn *conn = cmd->user_data;
7239 struct mgmt_cp_get_conn_info *cp = cmd->param;
7240 struct mgmt_rp_get_conn_info rp;
7241 u8 status;
7242
7243 bt_dev_dbg(hdev, "err %d", err);
7244
7245 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
7246
7247 status = mgmt_status(err);
7248 if (status == MGMT_STATUS_SUCCESS) {
7249 rp.rssi = conn->rssi;
7250 rp.tx_power = conn->tx_power;
7251 rp.max_tx_power = conn->max_tx_power;
7252 } else {
7253 rp.rssi = HCI_RSSI_INVALID;
7254 rp.tx_power = HCI_TX_POWER_INVALID;
7255 rp.max_tx_power = HCI_TX_POWER_INVALID;
7256 }
7257
7258 mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
7259 &rp, sizeof(rp));
7260
7261 mgmt_pending_free(cmd);
7262 }
7263
get_conn_info_sync(struct hci_dev * hdev,void * data)7264 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
7265 {
7266 struct mgmt_pending_cmd *cmd = data;
7267 struct mgmt_cp_get_conn_info *cp = cmd->param;
7268 struct hci_conn *conn;
7269 int err;
7270 __le16 handle;
7271
7272 /* Make sure we are still connected */
7273 if (cp->addr.type == BDADDR_BREDR)
7274 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7275 &cp->addr.bdaddr);
7276 else
7277 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7278
7279 if (!conn || conn->state != BT_CONNECTED)
7280 return MGMT_STATUS_NOT_CONNECTED;
7281
7282 cmd->user_data = conn;
7283 handle = cpu_to_le16(conn->handle);
7284
7285 /* Refresh RSSI each time */
7286 err = hci_read_rssi_sync(hdev, handle);
7287
7288 /* For LE links TX power does not change thus we don't need to
7289 * query for it once value is known.
7290 */
7291 if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
7292 conn->tx_power == HCI_TX_POWER_INVALID))
7293 err = hci_read_tx_power_sync(hdev, handle, 0x00);
7294
7295 /* Max TX power needs to be read only once per connection */
7296 if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
7297 err = hci_read_tx_power_sync(hdev, handle, 0x01);
7298
7299 return err;
7300 }
7301
get_conn_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7302 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7303 u16 len)
7304 {
7305 struct mgmt_cp_get_conn_info *cp = data;
7306 struct mgmt_rp_get_conn_info rp;
7307 struct hci_conn *conn;
7308 unsigned long conn_info_age;
7309 int err = 0;
7310
7311 bt_dev_dbg(hdev, "sock %p", sk);
7312
7313 memset(&rp, 0, sizeof(rp));
7314 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7315 rp.addr.type = cp->addr.type;
7316
7317 if (!bdaddr_type_is_valid(cp->addr.type))
7318 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7319 MGMT_STATUS_INVALID_PARAMS,
7320 &rp, sizeof(rp));
7321
7322 hci_dev_lock(hdev);
7323
7324 if (!hdev_is_powered(hdev)) {
7325 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7326 MGMT_STATUS_NOT_POWERED, &rp,
7327 sizeof(rp));
7328 goto unlock;
7329 }
7330
7331 if (cp->addr.type == BDADDR_BREDR)
7332 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7333 &cp->addr.bdaddr);
7334 else
7335 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7336
7337 if (!conn || conn->state != BT_CONNECTED) {
7338 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7339 MGMT_STATUS_NOT_CONNECTED, &rp,
7340 sizeof(rp));
7341 goto unlock;
7342 }
7343
7344 /* To avoid client trying to guess when to poll again for information we
7345 * calculate conn info age as random value between min/max set in hdev.
7346 */
7347 conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age,
7348 hdev->conn_info_max_age - 1);
7349
7350 /* Query controller to refresh cached values if they are too old or were
7351 * never read.
7352 */
7353 if (time_after(jiffies, conn->conn_info_timestamp +
7354 msecs_to_jiffies(conn_info_age)) ||
7355 !conn->conn_info_timestamp) {
7356 struct mgmt_pending_cmd *cmd;
7357
7358 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
7359 len);
7360 if (!cmd) {
7361 err = -ENOMEM;
7362 } else {
7363 err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
7364 cmd, get_conn_info_complete);
7365 }
7366
7367 if (err < 0) {
7368 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7369 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7370
7371 if (cmd)
7372 mgmt_pending_free(cmd);
7373
7374 goto unlock;
7375 }
7376
7377 conn->conn_info_timestamp = jiffies;
7378 } else {
7379 /* Cache is valid, just reply with values cached in hci_conn */
7380 rp.rssi = conn->rssi;
7381 rp.tx_power = conn->tx_power;
7382 rp.max_tx_power = conn->max_tx_power;
7383
7384 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7385 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7386 }
7387
7388 unlock:
7389 hci_dev_unlock(hdev);
7390 return err;
7391 }
7392
get_clock_info_complete(struct hci_dev * hdev,void * data,int err)7393 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
7394 {
7395 struct mgmt_pending_cmd *cmd = data;
7396 struct mgmt_cp_get_clock_info *cp = cmd->param;
7397 struct mgmt_rp_get_clock_info rp;
7398 struct hci_conn *conn = cmd->user_data;
7399 u8 status = mgmt_status(err);
7400
7401 bt_dev_dbg(hdev, "err %d", err);
7402
7403 memset(&rp, 0, sizeof(rp));
7404 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7405 rp.addr.type = cp->addr.type;
7406
7407 if (err)
7408 goto complete;
7409
7410 rp.local_clock = cpu_to_le32(hdev->clock);
7411
7412 if (conn) {
7413 rp.piconet_clock = cpu_to_le32(conn->clock);
7414 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7415 }
7416
7417 complete:
7418 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
7419 sizeof(rp));
7420
7421 mgmt_pending_free(cmd);
7422 }
7423
get_clock_info_sync(struct hci_dev * hdev,void * data)7424 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
7425 {
7426 struct mgmt_pending_cmd *cmd = data;
7427 struct mgmt_cp_get_clock_info *cp = cmd->param;
7428 struct hci_cp_read_clock hci_cp;
7429 struct hci_conn *conn;
7430
7431 memset(&hci_cp, 0, sizeof(hci_cp));
7432 hci_read_clock_sync(hdev, &hci_cp);
7433
7434 /* Make sure connection still exists */
7435 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
7436 if (!conn || conn->state != BT_CONNECTED)
7437 return MGMT_STATUS_NOT_CONNECTED;
7438
7439 cmd->user_data = conn;
7440 hci_cp.handle = cpu_to_le16(conn->handle);
7441 hci_cp.which = 0x01; /* Piconet clock */
7442
7443 return hci_read_clock_sync(hdev, &hci_cp);
7444 }
7445
get_clock_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7446 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7447 u16 len)
7448 {
7449 struct mgmt_cp_get_clock_info *cp = data;
7450 struct mgmt_rp_get_clock_info rp;
7451 struct mgmt_pending_cmd *cmd;
7452 struct hci_conn *conn;
7453 int err;
7454
7455 bt_dev_dbg(hdev, "sock %p", sk);
7456
7457 memset(&rp, 0, sizeof(rp));
7458 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7459 rp.addr.type = cp->addr.type;
7460
7461 if (cp->addr.type != BDADDR_BREDR)
7462 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7463 MGMT_STATUS_INVALID_PARAMS,
7464 &rp, sizeof(rp));
7465
7466 hci_dev_lock(hdev);
7467
7468 if (!hdev_is_powered(hdev)) {
7469 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7470 MGMT_STATUS_NOT_POWERED, &rp,
7471 sizeof(rp));
7472 goto unlock;
7473 }
7474
7475 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7476 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7477 &cp->addr.bdaddr);
7478 if (!conn || conn->state != BT_CONNECTED) {
7479 err = mgmt_cmd_complete(sk, hdev->id,
7480 MGMT_OP_GET_CLOCK_INFO,
7481 MGMT_STATUS_NOT_CONNECTED,
7482 &rp, sizeof(rp));
7483 goto unlock;
7484 }
7485 } else {
7486 conn = NULL;
7487 }
7488
7489 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7490 if (!cmd)
7491 err = -ENOMEM;
7492 else
7493 err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7494 get_clock_info_complete);
7495
7496 if (err < 0) {
7497 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7498 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7499
7500 if (cmd)
7501 mgmt_pending_free(cmd);
7502 }
7503
7504
7505 unlock:
7506 hci_dev_unlock(hdev);
7507 return err;
7508 }
7509
is_connected(struct hci_dev * hdev,bdaddr_t * addr,u8 type)7510 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7511 {
7512 struct hci_conn *conn;
7513
7514 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7515 if (!conn)
7516 return false;
7517
7518 if (conn->dst_type != type)
7519 return false;
7520
7521 if (conn->state != BT_CONNECTED)
7522 return false;
7523
7524 return true;
7525 }
7526
7527 /* This function requires the caller holds hdev->lock */
hci_conn_params_set(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type,u8 auto_connect)7528 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7529 u8 addr_type, u8 auto_connect)
7530 {
7531 struct hci_conn_params *params;
7532
7533 params = hci_conn_params_add(hdev, addr, addr_type);
7534 if (!params)
7535 return -EIO;
7536
7537 if (params->auto_connect == auto_connect)
7538 return 0;
7539
7540 hci_pend_le_list_del_init(params);
7541
7542 switch (auto_connect) {
7543 case HCI_AUTO_CONN_DISABLED:
7544 case HCI_AUTO_CONN_LINK_LOSS:
7545 /* If auto connect is being disabled when we're trying to
7546 * connect to device, keep connecting.
7547 */
7548 if (params->explicit_connect)
7549 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7550 break;
7551 case HCI_AUTO_CONN_REPORT:
7552 if (params->explicit_connect)
7553 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7554 else
7555 hci_pend_le_list_add(params, &hdev->pend_le_reports);
7556 break;
7557 case HCI_AUTO_CONN_DIRECT:
7558 case HCI_AUTO_CONN_ALWAYS:
7559 if (!is_connected(hdev, addr, addr_type))
7560 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7561 break;
7562 }
7563
7564 params->auto_connect = auto_connect;
7565
7566 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7567 addr, addr_type, auto_connect);
7568
7569 return 0;
7570 }
7571
device_added(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type,u8 action)7572 static void device_added(struct sock *sk, struct hci_dev *hdev,
7573 bdaddr_t *bdaddr, u8 type, u8 action)
7574 {
7575 struct mgmt_ev_device_added ev;
7576
7577 bacpy(&ev.addr.bdaddr, bdaddr);
7578 ev.addr.type = type;
7579 ev.action = action;
7580
7581 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7582 }
7583
add_device_sync(struct hci_dev * hdev,void * data)7584 static int add_device_sync(struct hci_dev *hdev, void *data)
7585 {
7586 return hci_update_passive_scan_sync(hdev);
7587 }
7588
add_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7589 static int add_device(struct sock *sk, struct hci_dev *hdev,
7590 void *data, u16 len)
7591 {
7592 struct mgmt_cp_add_device *cp = data;
7593 u8 auto_conn, addr_type;
7594 struct hci_conn_params *params;
7595 int err;
7596 u32 current_flags = 0;
7597 u32 supported_flags;
7598
7599 bt_dev_dbg(hdev, "sock %p", sk);
7600
7601 if (!bdaddr_type_is_valid(cp->addr.type) ||
7602 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7603 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7604 MGMT_STATUS_INVALID_PARAMS,
7605 &cp->addr, sizeof(cp->addr));
7606
7607 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7608 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7609 MGMT_STATUS_INVALID_PARAMS,
7610 &cp->addr, sizeof(cp->addr));
7611
7612 hci_dev_lock(hdev);
7613
7614 if (cp->addr.type == BDADDR_BREDR) {
7615 /* Only incoming connections action is supported for now */
7616 if (cp->action != 0x01) {
7617 err = mgmt_cmd_complete(sk, hdev->id,
7618 MGMT_OP_ADD_DEVICE,
7619 MGMT_STATUS_INVALID_PARAMS,
7620 &cp->addr, sizeof(cp->addr));
7621 goto unlock;
7622 }
7623
7624 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7625 &cp->addr.bdaddr,
7626 cp->addr.type, 0);
7627 if (err)
7628 goto unlock;
7629
7630 hci_update_scan(hdev);
7631
7632 goto added;
7633 }
7634
7635 addr_type = le_addr_type(cp->addr.type);
7636
7637 if (cp->action == 0x02)
7638 auto_conn = HCI_AUTO_CONN_ALWAYS;
7639 else if (cp->action == 0x01)
7640 auto_conn = HCI_AUTO_CONN_DIRECT;
7641 else
7642 auto_conn = HCI_AUTO_CONN_REPORT;
7643
7644 /* Kernel internally uses conn_params with resolvable private
7645 * address, but Add Device allows only identity addresses.
7646 * Make sure it is enforced before calling
7647 * hci_conn_params_lookup.
7648 */
7649 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7650 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7651 MGMT_STATUS_INVALID_PARAMS,
7652 &cp->addr, sizeof(cp->addr));
7653 goto unlock;
7654 }
7655
7656 /* If the connection parameters don't exist for this device,
7657 * they will be created and configured with defaults.
7658 */
7659 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7660 auto_conn) < 0) {
7661 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7662 MGMT_STATUS_FAILED, &cp->addr,
7663 sizeof(cp->addr));
7664 goto unlock;
7665 } else {
7666 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7667 addr_type);
7668 if (params)
7669 current_flags = params->flags;
7670 }
7671
7672 err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
7673 if (err < 0)
7674 goto unlock;
7675
7676 added:
7677 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7678 supported_flags = hdev->conn_flags;
7679 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7680 supported_flags, current_flags);
7681
7682 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7683 MGMT_STATUS_SUCCESS, &cp->addr,
7684 sizeof(cp->addr));
7685
7686 unlock:
7687 hci_dev_unlock(hdev);
7688 return err;
7689 }
7690
device_removed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)7691 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7692 bdaddr_t *bdaddr, u8 type)
7693 {
7694 struct mgmt_ev_device_removed ev;
7695
7696 bacpy(&ev.addr.bdaddr, bdaddr);
7697 ev.addr.type = type;
7698
7699 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7700 }
7701
remove_device_sync(struct hci_dev * hdev,void * data)7702 static int remove_device_sync(struct hci_dev *hdev, void *data)
7703 {
7704 return hci_update_passive_scan_sync(hdev);
7705 }
7706
remove_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7707 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7708 void *data, u16 len)
7709 {
7710 struct mgmt_cp_remove_device *cp = data;
7711 int err;
7712
7713 bt_dev_dbg(hdev, "sock %p", sk);
7714
7715 hci_dev_lock(hdev);
7716
7717 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7718 struct hci_conn_params *params;
7719 u8 addr_type;
7720
7721 if (!bdaddr_type_is_valid(cp->addr.type)) {
7722 err = mgmt_cmd_complete(sk, hdev->id,
7723 MGMT_OP_REMOVE_DEVICE,
7724 MGMT_STATUS_INVALID_PARAMS,
7725 &cp->addr, sizeof(cp->addr));
7726 goto unlock;
7727 }
7728
7729 if (cp->addr.type == BDADDR_BREDR) {
7730 err = hci_bdaddr_list_del(&hdev->accept_list,
7731 &cp->addr.bdaddr,
7732 cp->addr.type);
7733 if (err) {
7734 err = mgmt_cmd_complete(sk, hdev->id,
7735 MGMT_OP_REMOVE_DEVICE,
7736 MGMT_STATUS_INVALID_PARAMS,
7737 &cp->addr,
7738 sizeof(cp->addr));
7739 goto unlock;
7740 }
7741
7742 hci_update_scan(hdev);
7743
7744 device_removed(sk, hdev, &cp->addr.bdaddr,
7745 cp->addr.type);
7746 goto complete;
7747 }
7748
7749 addr_type = le_addr_type(cp->addr.type);
7750
7751 /* Kernel internally uses conn_params with resolvable private
7752 * address, but Remove Device allows only identity addresses.
7753 * Make sure it is enforced before calling
7754 * hci_conn_params_lookup.
7755 */
7756 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7757 err = mgmt_cmd_complete(sk, hdev->id,
7758 MGMT_OP_REMOVE_DEVICE,
7759 MGMT_STATUS_INVALID_PARAMS,
7760 &cp->addr, sizeof(cp->addr));
7761 goto unlock;
7762 }
7763
7764 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7765 addr_type);
7766 if (!params) {
7767 err = mgmt_cmd_complete(sk, hdev->id,
7768 MGMT_OP_REMOVE_DEVICE,
7769 MGMT_STATUS_INVALID_PARAMS,
7770 &cp->addr, sizeof(cp->addr));
7771 goto unlock;
7772 }
7773
7774 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7775 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7776 err = mgmt_cmd_complete(sk, hdev->id,
7777 MGMT_OP_REMOVE_DEVICE,
7778 MGMT_STATUS_INVALID_PARAMS,
7779 &cp->addr, sizeof(cp->addr));
7780 goto unlock;
7781 }
7782
7783 hci_conn_params_free(params);
7784
7785 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7786 } else {
7787 struct hci_conn_params *p, *tmp;
7788 struct bdaddr_list *b, *btmp;
7789
7790 if (cp->addr.type) {
7791 err = mgmt_cmd_complete(sk, hdev->id,
7792 MGMT_OP_REMOVE_DEVICE,
7793 MGMT_STATUS_INVALID_PARAMS,
7794 &cp->addr, sizeof(cp->addr));
7795 goto unlock;
7796 }
7797
7798 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7799 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7800 list_del(&b->list);
7801 kfree(b);
7802 }
7803
7804 hci_update_scan(hdev);
7805
7806 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7807 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7808 continue;
7809 device_removed(sk, hdev, &p->addr, p->addr_type);
7810 if (p->explicit_connect) {
7811 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7812 continue;
7813 }
7814 hci_conn_params_free(p);
7815 }
7816
7817 bt_dev_dbg(hdev, "All LE connection parameters were removed");
7818 }
7819
7820 hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7821
7822 complete:
7823 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7824 MGMT_STATUS_SUCCESS, &cp->addr,
7825 sizeof(cp->addr));
7826 unlock:
7827 hci_dev_unlock(hdev);
7828 return err;
7829 }
7830
conn_update_sync(struct hci_dev * hdev,void * data)7831 static int conn_update_sync(struct hci_dev *hdev, void *data)
7832 {
7833 struct hci_conn_params *params = data;
7834 struct hci_conn *conn;
7835
7836 conn = hci_conn_hash_lookup_le(hdev, ¶ms->addr, params->addr_type);
7837 if (!conn)
7838 return -ECANCELED;
7839
7840 return hci_le_conn_update_sync(hdev, conn, params);
7841 }
7842
load_conn_param(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7843 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7844 u16 len)
7845 {
7846 struct mgmt_cp_load_conn_param *cp = data;
7847 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7848 sizeof(struct mgmt_conn_param));
7849 u16 param_count, expected_len;
7850 int i;
7851
7852 if (!lmp_le_capable(hdev))
7853 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7854 MGMT_STATUS_NOT_SUPPORTED);
7855
7856 param_count = __le16_to_cpu(cp->param_count);
7857 if (param_count > max_param_count) {
7858 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7859 param_count);
7860 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7861 MGMT_STATUS_INVALID_PARAMS);
7862 }
7863
7864 expected_len = struct_size(cp, params, param_count);
7865 if (expected_len != len) {
7866 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7867 expected_len, len);
7868 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7869 MGMT_STATUS_INVALID_PARAMS);
7870 }
7871
7872 bt_dev_dbg(hdev, "param_count %u", param_count);
7873
7874 hci_dev_lock(hdev);
7875
7876 if (param_count > 1)
7877 hci_conn_params_clear_disabled(hdev);
7878
7879 for (i = 0; i < param_count; i++) {
7880 struct mgmt_conn_param *param = &cp->params[i];
7881 struct hci_conn_params *hci_param;
7882 u16 min, max, latency, timeout;
7883 bool update = false;
7884 u8 addr_type;
7885
7886 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
7887 param->addr.type);
7888
7889 if (param->addr.type == BDADDR_LE_PUBLIC) {
7890 addr_type = ADDR_LE_DEV_PUBLIC;
7891 } else if (param->addr.type == BDADDR_LE_RANDOM) {
7892 addr_type = ADDR_LE_DEV_RANDOM;
7893 } else {
7894 bt_dev_err(hdev, "ignoring invalid connection parameters");
7895 continue;
7896 }
7897
7898 min = le16_to_cpu(param->min_interval);
7899 max = le16_to_cpu(param->max_interval);
7900 latency = le16_to_cpu(param->latency);
7901 timeout = le16_to_cpu(param->timeout);
7902
7903 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7904 min, max, latency, timeout);
7905
7906 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7907 bt_dev_err(hdev, "ignoring invalid connection parameters");
7908 continue;
7909 }
7910
7911 /* Detect when the loading is for an existing parameter then
7912 * attempt to trigger the connection update procedure.
7913 */
7914 if (!i && param_count == 1) {
7915 hci_param = hci_conn_params_lookup(hdev,
7916 ¶m->addr.bdaddr,
7917 addr_type);
7918 if (hci_param)
7919 update = true;
7920 else
7921 hci_conn_params_clear_disabled(hdev);
7922 }
7923
7924 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
7925 addr_type);
7926 if (!hci_param) {
7927 bt_dev_err(hdev, "failed to add connection parameters");
7928 continue;
7929 }
7930
7931 hci_param->conn_min_interval = min;
7932 hci_param->conn_max_interval = max;
7933 hci_param->conn_latency = latency;
7934 hci_param->supervision_timeout = timeout;
7935
7936 /* Check if we need to trigger a connection update */
7937 if (update) {
7938 struct hci_conn *conn;
7939
7940 /* Lookup for existing connection as central and check
7941 * if parameters match and if they don't then trigger
7942 * a connection update.
7943 */
7944 conn = hci_conn_hash_lookup_le(hdev, &hci_param->addr,
7945 addr_type);
7946 if (conn && conn->role == HCI_ROLE_MASTER &&
7947 (conn->le_conn_min_interval != min ||
7948 conn->le_conn_max_interval != max ||
7949 conn->le_conn_latency != latency ||
7950 conn->le_supv_timeout != timeout))
7951 hci_cmd_sync_queue(hdev, conn_update_sync,
7952 hci_param, NULL);
7953 }
7954 }
7955
7956 hci_dev_unlock(hdev);
7957
7958 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7959 NULL, 0);
7960 }
7961
set_external_config(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7962 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7963 void *data, u16 len)
7964 {
7965 struct mgmt_cp_set_external_config *cp = data;
7966 bool changed;
7967 int err;
7968
7969 bt_dev_dbg(hdev, "sock %p", sk);
7970
7971 if (hdev_is_powered(hdev))
7972 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7973 MGMT_STATUS_REJECTED);
7974
7975 if (cp->config != 0x00 && cp->config != 0x01)
7976 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7977 MGMT_STATUS_INVALID_PARAMS);
7978
7979 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7980 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7981 MGMT_STATUS_NOT_SUPPORTED);
7982
7983 hci_dev_lock(hdev);
7984
7985 if (cp->config)
7986 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7987 else
7988 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7989
7990 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7991 if (err < 0)
7992 goto unlock;
7993
7994 if (!changed)
7995 goto unlock;
7996
7997 err = new_options(hdev, sk);
7998
7999 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
8000 mgmt_index_removed(hdev);
8001
8002 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
8003 hci_dev_set_flag(hdev, HCI_CONFIG);
8004 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8005
8006 queue_work(hdev->req_workqueue, &hdev->power_on);
8007 } else {
8008 set_bit(HCI_RAW, &hdev->flags);
8009 mgmt_index_added(hdev);
8010 }
8011 }
8012
8013 unlock:
8014 hci_dev_unlock(hdev);
8015 return err;
8016 }
8017
set_public_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)8018 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
8019 void *data, u16 len)
8020 {
8021 struct mgmt_cp_set_public_address *cp = data;
8022 bool changed;
8023 int err;
8024
8025 bt_dev_dbg(hdev, "sock %p", sk);
8026
8027 if (hdev_is_powered(hdev))
8028 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8029 MGMT_STATUS_REJECTED);
8030
8031 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
8032 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8033 MGMT_STATUS_INVALID_PARAMS);
8034
8035 if (!hdev->set_bdaddr)
8036 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8037 MGMT_STATUS_NOT_SUPPORTED);
8038
8039 hci_dev_lock(hdev);
8040
8041 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
8042 bacpy(&hdev->public_addr, &cp->bdaddr);
8043
8044 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
8045 if (err < 0)
8046 goto unlock;
8047
8048 if (!changed)
8049 goto unlock;
8050
8051 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
8052 err = new_options(hdev, sk);
8053
8054 if (is_configured(hdev)) {
8055 mgmt_index_removed(hdev);
8056
8057 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
8058
8059 hci_dev_set_flag(hdev, HCI_CONFIG);
8060 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8061
8062 queue_work(hdev->req_workqueue, &hdev->power_on);
8063 }
8064
8065 unlock:
8066 hci_dev_unlock(hdev);
8067 return err;
8068 }
8069
read_local_oob_ext_data_complete(struct hci_dev * hdev,void * data,int err)8070 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
8071 int err)
8072 {
8073 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8074 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8075 u8 *h192, *r192, *h256, *r256;
8076 struct mgmt_pending_cmd *cmd = data;
8077 struct sk_buff *skb = cmd->skb;
8078 u8 status = mgmt_status(err);
8079 u16 eir_len;
8080
8081 if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
8082 return;
8083
8084 if (!status) {
8085 if (!skb)
8086 status = MGMT_STATUS_FAILED;
8087 else if (IS_ERR(skb))
8088 status = mgmt_status(PTR_ERR(skb));
8089 else
8090 status = mgmt_status(skb->data[0]);
8091 }
8092
8093 bt_dev_dbg(hdev, "status %u", status);
8094
8095 mgmt_cp = cmd->param;
8096
8097 if (status) {
8098 status = mgmt_status(status);
8099 eir_len = 0;
8100
8101 h192 = NULL;
8102 r192 = NULL;
8103 h256 = NULL;
8104 r256 = NULL;
8105 } else if (!bredr_sc_enabled(hdev)) {
8106 struct hci_rp_read_local_oob_data *rp;
8107
8108 if (skb->len != sizeof(*rp)) {
8109 status = MGMT_STATUS_FAILED;
8110 eir_len = 0;
8111 } else {
8112 status = MGMT_STATUS_SUCCESS;
8113 rp = (void *)skb->data;
8114
8115 eir_len = 5 + 18 + 18;
8116 h192 = rp->hash;
8117 r192 = rp->rand;
8118 h256 = NULL;
8119 r256 = NULL;
8120 }
8121 } else {
8122 struct hci_rp_read_local_oob_ext_data *rp;
8123
8124 if (skb->len != sizeof(*rp)) {
8125 status = MGMT_STATUS_FAILED;
8126 eir_len = 0;
8127 } else {
8128 status = MGMT_STATUS_SUCCESS;
8129 rp = (void *)skb->data;
8130
8131 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8132 eir_len = 5 + 18 + 18;
8133 h192 = NULL;
8134 r192 = NULL;
8135 } else {
8136 eir_len = 5 + 18 + 18 + 18 + 18;
8137 h192 = rp->hash192;
8138 r192 = rp->rand192;
8139 }
8140
8141 h256 = rp->hash256;
8142 r256 = rp->rand256;
8143 }
8144 }
8145
8146 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8147 if (!mgmt_rp)
8148 goto done;
8149
8150 if (eir_len == 0)
8151 goto send_rsp;
8152
8153 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8154 hdev->dev_class, 3);
8155
8156 if (h192 && r192) {
8157 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8158 EIR_SSP_HASH_C192, h192, 16);
8159 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8160 EIR_SSP_RAND_R192, r192, 16);
8161 }
8162
8163 if (h256 && r256) {
8164 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8165 EIR_SSP_HASH_C256, h256, 16);
8166 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8167 EIR_SSP_RAND_R256, r256, 16);
8168 }
8169
8170 send_rsp:
8171 mgmt_rp->type = mgmt_cp->type;
8172 mgmt_rp->eir_len = cpu_to_le16(eir_len);
8173
8174 err = mgmt_cmd_complete(cmd->sk, hdev->id,
8175 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8176 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8177 if (err < 0 || status)
8178 goto done;
8179
8180 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8181
8182 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8183 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8184 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8185 done:
8186 if (skb && !IS_ERR(skb))
8187 kfree_skb(skb);
8188
8189 kfree(mgmt_rp);
8190 mgmt_pending_remove(cmd);
8191 }
8192
read_local_ssp_oob_req(struct hci_dev * hdev,struct sock * sk,struct mgmt_cp_read_local_oob_ext_data * cp)8193 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8194 struct mgmt_cp_read_local_oob_ext_data *cp)
8195 {
8196 struct mgmt_pending_cmd *cmd;
8197 int err;
8198
8199 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8200 cp, sizeof(*cp));
8201 if (!cmd)
8202 return -ENOMEM;
8203
8204 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
8205 read_local_oob_ext_data_complete);
8206
8207 if (err < 0) {
8208 mgmt_pending_remove(cmd);
8209 return err;
8210 }
8211
8212 return 0;
8213 }
8214
read_local_oob_ext_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8215 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8216 void *data, u16 data_len)
8217 {
8218 struct mgmt_cp_read_local_oob_ext_data *cp = data;
8219 struct mgmt_rp_read_local_oob_ext_data *rp;
8220 size_t rp_len;
8221 u16 eir_len;
8222 u8 status, flags, role, addr[7], hash[16], rand[16];
8223 int err;
8224
8225 bt_dev_dbg(hdev, "sock %p", sk);
8226
8227 if (hdev_is_powered(hdev)) {
8228 switch (cp->type) {
8229 case BIT(BDADDR_BREDR):
8230 status = mgmt_bredr_support(hdev);
8231 if (status)
8232 eir_len = 0;
8233 else
8234 eir_len = 5;
8235 break;
8236 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8237 status = mgmt_le_support(hdev);
8238 if (status)
8239 eir_len = 0;
8240 else
8241 eir_len = 9 + 3 + 18 + 18 + 3;
8242 break;
8243 default:
8244 status = MGMT_STATUS_INVALID_PARAMS;
8245 eir_len = 0;
8246 break;
8247 }
8248 } else {
8249 status = MGMT_STATUS_NOT_POWERED;
8250 eir_len = 0;
8251 }
8252
8253 rp_len = sizeof(*rp) + eir_len;
8254 rp = kmalloc(rp_len, GFP_ATOMIC);
8255 if (!rp)
8256 return -ENOMEM;
8257
8258 if (!status && !lmp_ssp_capable(hdev)) {
8259 status = MGMT_STATUS_NOT_SUPPORTED;
8260 eir_len = 0;
8261 }
8262
8263 if (status)
8264 goto complete;
8265
8266 hci_dev_lock(hdev);
8267
8268 eir_len = 0;
8269 switch (cp->type) {
8270 case BIT(BDADDR_BREDR):
8271 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8272 err = read_local_ssp_oob_req(hdev, sk, cp);
8273 hci_dev_unlock(hdev);
8274 if (!err)
8275 goto done;
8276
8277 status = MGMT_STATUS_FAILED;
8278 goto complete;
8279 } else {
8280 eir_len = eir_append_data(rp->eir, eir_len,
8281 EIR_CLASS_OF_DEV,
8282 hdev->dev_class, 3);
8283 }
8284 break;
8285 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8286 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8287 smp_generate_oob(hdev, hash, rand) < 0) {
8288 hci_dev_unlock(hdev);
8289 status = MGMT_STATUS_FAILED;
8290 goto complete;
8291 }
8292
8293 /* This should return the active RPA, but since the RPA
8294 * is only programmed on demand, it is really hard to fill
8295 * this in at the moment. For now disallow retrieving
8296 * local out-of-band data when privacy is in use.
8297 *
8298 * Returning the identity address will not help here since
8299 * pairing happens before the identity resolving key is
8300 * known and thus the connection establishment happens
8301 * based on the RPA and not the identity address.
8302 */
8303 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8304 hci_dev_unlock(hdev);
8305 status = MGMT_STATUS_REJECTED;
8306 goto complete;
8307 }
8308
8309 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8310 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8311 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8312 bacmp(&hdev->static_addr, BDADDR_ANY))) {
8313 memcpy(addr, &hdev->static_addr, 6);
8314 addr[6] = 0x01;
8315 } else {
8316 memcpy(addr, &hdev->bdaddr, 6);
8317 addr[6] = 0x00;
8318 }
8319
8320 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8321 addr, sizeof(addr));
8322
8323 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8324 role = 0x02;
8325 else
8326 role = 0x01;
8327
8328 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8329 &role, sizeof(role));
8330
8331 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8332 eir_len = eir_append_data(rp->eir, eir_len,
8333 EIR_LE_SC_CONFIRM,
8334 hash, sizeof(hash));
8335
8336 eir_len = eir_append_data(rp->eir, eir_len,
8337 EIR_LE_SC_RANDOM,
8338 rand, sizeof(rand));
8339 }
8340
8341 flags = mgmt_get_adv_discov_flags(hdev);
8342
8343 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8344 flags |= LE_AD_NO_BREDR;
8345
8346 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8347 &flags, sizeof(flags));
8348 break;
8349 }
8350
8351 hci_dev_unlock(hdev);
8352
8353 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8354
8355 status = MGMT_STATUS_SUCCESS;
8356
8357 complete:
8358 rp->type = cp->type;
8359 rp->eir_len = cpu_to_le16(eir_len);
8360
8361 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8362 status, rp, sizeof(*rp) + eir_len);
8363 if (err < 0 || status)
8364 goto done;
8365
8366 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8367 rp, sizeof(*rp) + eir_len,
8368 HCI_MGMT_OOB_DATA_EVENTS, sk);
8369
8370 done:
8371 kfree(rp);
8372
8373 return err;
8374 }
8375
get_supported_adv_flags(struct hci_dev * hdev)8376 static u32 get_supported_adv_flags(struct hci_dev *hdev)
8377 {
8378 u32 flags = 0;
8379
8380 flags |= MGMT_ADV_FLAG_CONNECTABLE;
8381 flags |= MGMT_ADV_FLAG_DISCOV;
8382 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8383 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8384 flags |= MGMT_ADV_FLAG_APPEARANCE;
8385 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8386 flags |= MGMT_ADV_PARAM_DURATION;
8387 flags |= MGMT_ADV_PARAM_TIMEOUT;
8388 flags |= MGMT_ADV_PARAM_INTERVALS;
8389 flags |= MGMT_ADV_PARAM_TX_POWER;
8390 flags |= MGMT_ADV_PARAM_SCAN_RSP;
8391
8392 /* In extended adv TX_POWER returned from Set Adv Param
8393 * will be always valid.
8394 */
8395 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
8396 flags |= MGMT_ADV_FLAG_TX_POWER;
8397
8398 if (ext_adv_capable(hdev)) {
8399 flags |= MGMT_ADV_FLAG_SEC_1M;
8400 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8401 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8402
8403 if (le_2m_capable(hdev))
8404 flags |= MGMT_ADV_FLAG_SEC_2M;
8405
8406 if (le_coded_capable(hdev))
8407 flags |= MGMT_ADV_FLAG_SEC_CODED;
8408 }
8409
8410 return flags;
8411 }
8412
read_adv_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8413 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8414 void *data, u16 data_len)
8415 {
8416 struct mgmt_rp_read_adv_features *rp;
8417 size_t rp_len;
8418 int err;
8419 struct adv_info *adv_instance;
8420 u32 supported_flags;
8421 u8 *instance;
8422
8423 bt_dev_dbg(hdev, "sock %p", sk);
8424
8425 if (!lmp_le_capable(hdev))
8426 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8427 MGMT_STATUS_REJECTED);
8428
8429 hci_dev_lock(hdev);
8430
8431 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8432 rp = kmalloc(rp_len, GFP_ATOMIC);
8433 if (!rp) {
8434 hci_dev_unlock(hdev);
8435 return -ENOMEM;
8436 }
8437
8438 supported_flags = get_supported_adv_flags(hdev);
8439
8440 rp->supported_flags = cpu_to_le32(supported_flags);
8441 rp->max_adv_data_len = max_adv_len(hdev);
8442 rp->max_scan_rsp_len = max_adv_len(hdev);
8443 rp->max_instances = hdev->le_num_of_adv_sets;
8444 rp->num_instances = hdev->adv_instance_cnt;
8445
8446 instance = rp->instance;
8447 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8448 /* Only instances 1-le_num_of_adv_sets are externally visible */
8449 if (adv_instance->instance <= hdev->adv_instance_cnt) {
8450 *instance = adv_instance->instance;
8451 instance++;
8452 } else {
8453 rp->num_instances--;
8454 rp_len--;
8455 }
8456 }
8457
8458 hci_dev_unlock(hdev);
8459
8460 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8461 MGMT_STATUS_SUCCESS, rp, rp_len);
8462
8463 kfree(rp);
8464
8465 return err;
8466 }
8467
calculate_name_len(struct hci_dev * hdev)8468 static u8 calculate_name_len(struct hci_dev *hdev)
8469 {
8470 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 2]; /* len + type + name */
8471
8472 return eir_append_local_name(hdev, buf, 0);
8473 }
8474
tlv_data_max_len(struct hci_dev * hdev,u32 adv_flags,bool is_adv_data)8475 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8476 bool is_adv_data)
8477 {
8478 u8 max_len = max_adv_len(hdev);
8479
8480 if (is_adv_data) {
8481 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8482 MGMT_ADV_FLAG_LIMITED_DISCOV |
8483 MGMT_ADV_FLAG_MANAGED_FLAGS))
8484 max_len -= 3;
8485
8486 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8487 max_len -= 3;
8488 } else {
8489 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8490 max_len -= calculate_name_len(hdev);
8491
8492 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8493 max_len -= 4;
8494 }
8495
8496 return max_len;
8497 }
8498
flags_managed(u32 adv_flags)8499 static bool flags_managed(u32 adv_flags)
8500 {
8501 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8502 MGMT_ADV_FLAG_LIMITED_DISCOV |
8503 MGMT_ADV_FLAG_MANAGED_FLAGS);
8504 }
8505
tx_power_managed(u32 adv_flags)8506 static bool tx_power_managed(u32 adv_flags)
8507 {
8508 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8509 }
8510
name_managed(u32 adv_flags)8511 static bool name_managed(u32 adv_flags)
8512 {
8513 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8514 }
8515
appearance_managed(u32 adv_flags)8516 static bool appearance_managed(u32 adv_flags)
8517 {
8518 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8519 }
8520
tlv_data_is_valid(struct hci_dev * hdev,u32 adv_flags,u8 * data,u8 len,bool is_adv_data)8521 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8522 u8 len, bool is_adv_data)
8523 {
8524 int i, cur_len;
8525 u8 max_len;
8526
8527 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8528
8529 if (len > max_len)
8530 return false;
8531
8532 /* Make sure that the data is correctly formatted. */
8533 for (i = 0; i < len; i += (cur_len + 1)) {
8534 cur_len = data[i];
8535
8536 if (!cur_len)
8537 continue;
8538
8539 if (data[i + 1] == EIR_FLAGS &&
8540 (!is_adv_data || flags_managed(adv_flags)))
8541 return false;
8542
8543 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8544 return false;
8545
8546 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8547 return false;
8548
8549 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8550 return false;
8551
8552 if (data[i + 1] == EIR_APPEARANCE &&
8553 appearance_managed(adv_flags))
8554 return false;
8555
8556 /* If the current field length would exceed the total data
8557 * length, then it's invalid.
8558 */
8559 if (i + cur_len >= len)
8560 return false;
8561 }
8562
8563 return true;
8564 }
8565
requested_adv_flags_are_valid(struct hci_dev * hdev,u32 adv_flags)8566 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8567 {
8568 u32 supported_flags, phy_flags;
8569
8570 /* The current implementation only supports a subset of the specified
8571 * flags. Also need to check mutual exclusiveness of sec flags.
8572 */
8573 supported_flags = get_supported_adv_flags(hdev);
8574 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8575 if (adv_flags & ~supported_flags ||
8576 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8577 return false;
8578
8579 return true;
8580 }
8581
adv_busy(struct hci_dev * hdev)8582 static bool adv_busy(struct hci_dev *hdev)
8583 {
8584 return pending_find(MGMT_OP_SET_LE, hdev);
8585 }
8586
add_adv_complete(struct hci_dev * hdev,struct sock * sk,u8 instance,int err)8587 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8588 int err)
8589 {
8590 struct adv_info *adv, *n;
8591
8592 bt_dev_dbg(hdev, "err %d", err);
8593
8594 hci_dev_lock(hdev);
8595
8596 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8597 u8 instance;
8598
8599 if (!adv->pending)
8600 continue;
8601
8602 if (!err) {
8603 adv->pending = false;
8604 continue;
8605 }
8606
8607 instance = adv->instance;
8608
8609 if (hdev->cur_adv_instance == instance)
8610 cancel_adv_timeout(hdev);
8611
8612 hci_remove_adv_instance(hdev, instance);
8613 mgmt_advertising_removed(sk, hdev, instance);
8614 }
8615
8616 hci_dev_unlock(hdev);
8617 }
8618
add_advertising_complete(struct hci_dev * hdev,void * data,int err)8619 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8620 {
8621 struct mgmt_pending_cmd *cmd = data;
8622 struct mgmt_cp_add_advertising *cp = cmd->param;
8623 struct mgmt_rp_add_advertising rp;
8624
8625 memset(&rp, 0, sizeof(rp));
8626
8627 rp.instance = cp->instance;
8628
8629 if (err)
8630 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8631 mgmt_status(err));
8632 else
8633 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8634 mgmt_status(err), &rp, sizeof(rp));
8635
8636 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8637
8638 mgmt_pending_free(cmd);
8639 }
8640
add_advertising_sync(struct hci_dev * hdev,void * data)8641 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8642 {
8643 struct mgmt_pending_cmd *cmd = data;
8644 struct mgmt_cp_add_advertising *cp = cmd->param;
8645
8646 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8647 }
8648
add_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8649 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8650 void *data, u16 data_len)
8651 {
8652 struct mgmt_cp_add_advertising *cp = data;
8653 struct mgmt_rp_add_advertising rp;
8654 u32 flags;
8655 u8 status;
8656 u16 timeout, duration;
8657 unsigned int prev_instance_cnt;
8658 u8 schedule_instance = 0;
8659 struct adv_info *adv, *next_instance;
8660 int err;
8661 struct mgmt_pending_cmd *cmd;
8662
8663 bt_dev_dbg(hdev, "sock %p", sk);
8664
8665 status = mgmt_le_support(hdev);
8666 if (status)
8667 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8668 status);
8669
8670 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8671 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8672 MGMT_STATUS_INVALID_PARAMS);
8673
8674 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8675 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8676 MGMT_STATUS_INVALID_PARAMS);
8677
8678 flags = __le32_to_cpu(cp->flags);
8679 timeout = __le16_to_cpu(cp->timeout);
8680 duration = __le16_to_cpu(cp->duration);
8681
8682 if (!requested_adv_flags_are_valid(hdev, flags))
8683 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8684 MGMT_STATUS_INVALID_PARAMS);
8685
8686 hci_dev_lock(hdev);
8687
8688 if (timeout && !hdev_is_powered(hdev)) {
8689 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8690 MGMT_STATUS_REJECTED);
8691 goto unlock;
8692 }
8693
8694 if (adv_busy(hdev)) {
8695 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8696 MGMT_STATUS_BUSY);
8697 goto unlock;
8698 }
8699
8700 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8701 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8702 cp->scan_rsp_len, false)) {
8703 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8704 MGMT_STATUS_INVALID_PARAMS);
8705 goto unlock;
8706 }
8707
8708 prev_instance_cnt = hdev->adv_instance_cnt;
8709
8710 adv = hci_add_adv_instance(hdev, cp->instance, flags,
8711 cp->adv_data_len, cp->data,
8712 cp->scan_rsp_len,
8713 cp->data + cp->adv_data_len,
8714 timeout, duration,
8715 HCI_ADV_TX_POWER_NO_PREFERENCE,
8716 hdev->le_adv_min_interval,
8717 hdev->le_adv_max_interval, 0);
8718 if (IS_ERR(adv)) {
8719 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8720 MGMT_STATUS_FAILED);
8721 goto unlock;
8722 }
8723
8724 /* Only trigger an advertising added event if a new instance was
8725 * actually added.
8726 */
8727 if (hdev->adv_instance_cnt > prev_instance_cnt)
8728 mgmt_advertising_added(sk, hdev, cp->instance);
8729
8730 if (hdev->cur_adv_instance == cp->instance) {
8731 /* If the currently advertised instance is being changed then
8732 * cancel the current advertising and schedule the next
8733 * instance. If there is only one instance then the overridden
8734 * advertising data will be visible right away.
8735 */
8736 cancel_adv_timeout(hdev);
8737
8738 next_instance = hci_get_next_instance(hdev, cp->instance);
8739 if (next_instance)
8740 schedule_instance = next_instance->instance;
8741 } else if (!hdev->adv_instance_timeout) {
8742 /* Immediately advertise the new instance if no other
8743 * instance is currently being advertised.
8744 */
8745 schedule_instance = cp->instance;
8746 }
8747
8748 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
8749 * there is no instance to be advertised then we have no HCI
8750 * communication to make. Simply return.
8751 */
8752 if (!hdev_is_powered(hdev) ||
8753 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8754 !schedule_instance) {
8755 rp.instance = cp->instance;
8756 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8757 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8758 goto unlock;
8759 }
8760
8761 /* We're good to go, update advertising data, parameters, and start
8762 * advertising.
8763 */
8764 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8765 data_len);
8766 if (!cmd) {
8767 err = -ENOMEM;
8768 goto unlock;
8769 }
8770
8771 cp->instance = schedule_instance;
8772
8773 err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8774 add_advertising_complete);
8775 if (err < 0)
8776 mgmt_pending_free(cmd);
8777
8778 unlock:
8779 hci_dev_unlock(hdev);
8780
8781 return err;
8782 }
8783
add_ext_adv_params_complete(struct hci_dev * hdev,void * data,int err)8784 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8785 int err)
8786 {
8787 struct mgmt_pending_cmd *cmd = data;
8788 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8789 struct mgmt_rp_add_ext_adv_params rp;
8790 struct adv_info *adv;
8791 u32 flags;
8792
8793 BT_DBG("%s", hdev->name);
8794
8795 hci_dev_lock(hdev);
8796
8797 adv = hci_find_adv_instance(hdev, cp->instance);
8798 if (!adv)
8799 goto unlock;
8800
8801 rp.instance = cp->instance;
8802 rp.tx_power = adv->tx_power;
8803
8804 /* While we're at it, inform userspace of the available space for this
8805 * advertisement, given the flags that will be used.
8806 */
8807 flags = __le32_to_cpu(cp->flags);
8808 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8809 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8810
8811 if (err) {
8812 /* If this advertisement was previously advertising and we
8813 * failed to update it, we signal that it has been removed and
8814 * delete its structure
8815 */
8816 if (!adv->pending)
8817 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8818
8819 hci_remove_adv_instance(hdev, cp->instance);
8820
8821 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8822 mgmt_status(err));
8823 } else {
8824 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8825 mgmt_status(err), &rp, sizeof(rp));
8826 }
8827
8828 unlock:
8829 mgmt_pending_free(cmd);
8830
8831 hci_dev_unlock(hdev);
8832 }
8833
add_ext_adv_params_sync(struct hci_dev * hdev,void * data)8834 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8835 {
8836 struct mgmt_pending_cmd *cmd = data;
8837 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8838
8839 return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8840 }
8841
add_ext_adv_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8842 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8843 void *data, u16 data_len)
8844 {
8845 struct mgmt_cp_add_ext_adv_params *cp = data;
8846 struct mgmt_rp_add_ext_adv_params rp;
8847 struct mgmt_pending_cmd *cmd = NULL;
8848 struct adv_info *adv;
8849 u32 flags, min_interval, max_interval;
8850 u16 timeout, duration;
8851 u8 status;
8852 s8 tx_power;
8853 int err;
8854
8855 BT_DBG("%s", hdev->name);
8856
8857 status = mgmt_le_support(hdev);
8858 if (status)
8859 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8860 status);
8861
8862 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8863 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8864 MGMT_STATUS_INVALID_PARAMS);
8865
8866 /* The purpose of breaking add_advertising into two separate MGMT calls
8867 * for params and data is to allow more parameters to be added to this
8868 * structure in the future. For this reason, we verify that we have the
8869 * bare minimum structure we know of when the interface was defined. Any
8870 * extra parameters we don't know about will be ignored in this request.
8871 */
8872 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8873 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8874 MGMT_STATUS_INVALID_PARAMS);
8875
8876 flags = __le32_to_cpu(cp->flags);
8877
8878 if (!requested_adv_flags_are_valid(hdev, flags))
8879 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8880 MGMT_STATUS_INVALID_PARAMS);
8881
8882 hci_dev_lock(hdev);
8883
8884 /* In new interface, we require that we are powered to register */
8885 if (!hdev_is_powered(hdev)) {
8886 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8887 MGMT_STATUS_REJECTED);
8888 goto unlock;
8889 }
8890
8891 if (adv_busy(hdev)) {
8892 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8893 MGMT_STATUS_BUSY);
8894 goto unlock;
8895 }
8896
8897 /* Parse defined parameters from request, use defaults otherwise */
8898 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8899 __le16_to_cpu(cp->timeout) : 0;
8900
8901 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8902 __le16_to_cpu(cp->duration) :
8903 hdev->def_multi_adv_rotation_duration;
8904
8905 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8906 __le32_to_cpu(cp->min_interval) :
8907 hdev->le_adv_min_interval;
8908
8909 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8910 __le32_to_cpu(cp->max_interval) :
8911 hdev->le_adv_max_interval;
8912
8913 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8914 cp->tx_power :
8915 HCI_ADV_TX_POWER_NO_PREFERENCE;
8916
8917 /* Create advertising instance with no advertising or response data */
8918 adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
8919 timeout, duration, tx_power, min_interval,
8920 max_interval, 0);
8921
8922 if (IS_ERR(adv)) {
8923 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8924 MGMT_STATUS_FAILED);
8925 goto unlock;
8926 }
8927
8928 /* Submit request for advertising params if ext adv available */
8929 if (ext_adv_capable(hdev)) {
8930 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8931 data, data_len);
8932 if (!cmd) {
8933 err = -ENOMEM;
8934 hci_remove_adv_instance(hdev, cp->instance);
8935 goto unlock;
8936 }
8937
8938 err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8939 add_ext_adv_params_complete);
8940 if (err < 0)
8941 mgmt_pending_free(cmd);
8942 } else {
8943 rp.instance = cp->instance;
8944 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8945 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8946 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8947 err = mgmt_cmd_complete(sk, hdev->id,
8948 MGMT_OP_ADD_EXT_ADV_PARAMS,
8949 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8950 }
8951
8952 unlock:
8953 hci_dev_unlock(hdev);
8954
8955 return err;
8956 }
8957
add_ext_adv_data_complete(struct hci_dev * hdev,void * data,int err)8958 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8959 {
8960 struct mgmt_pending_cmd *cmd = data;
8961 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8962 struct mgmt_rp_add_advertising rp;
8963
8964 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8965
8966 memset(&rp, 0, sizeof(rp));
8967
8968 rp.instance = cp->instance;
8969
8970 if (err)
8971 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8972 mgmt_status(err));
8973 else
8974 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8975 mgmt_status(err), &rp, sizeof(rp));
8976
8977 mgmt_pending_free(cmd);
8978 }
8979
add_ext_adv_data_sync(struct hci_dev * hdev,void * data)8980 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8981 {
8982 struct mgmt_pending_cmd *cmd = data;
8983 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8984 int err;
8985
8986 if (ext_adv_capable(hdev)) {
8987 err = hci_update_adv_data_sync(hdev, cp->instance);
8988 if (err)
8989 return err;
8990
8991 err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
8992 if (err)
8993 return err;
8994
8995 return hci_enable_ext_advertising_sync(hdev, cp->instance);
8996 }
8997
8998 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8999 }
9000
add_ext_adv_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9001 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
9002 u16 data_len)
9003 {
9004 struct mgmt_cp_add_ext_adv_data *cp = data;
9005 struct mgmt_rp_add_ext_adv_data rp;
9006 u8 schedule_instance = 0;
9007 struct adv_info *next_instance;
9008 struct adv_info *adv_instance;
9009 int err = 0;
9010 struct mgmt_pending_cmd *cmd;
9011
9012 BT_DBG("%s", hdev->name);
9013
9014 hci_dev_lock(hdev);
9015
9016 adv_instance = hci_find_adv_instance(hdev, cp->instance);
9017
9018 if (!adv_instance) {
9019 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9020 MGMT_STATUS_INVALID_PARAMS);
9021 goto unlock;
9022 }
9023
9024 /* In new interface, we require that we are powered to register */
9025 if (!hdev_is_powered(hdev)) {
9026 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9027 MGMT_STATUS_REJECTED);
9028 goto clear_new_instance;
9029 }
9030
9031 if (adv_busy(hdev)) {
9032 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9033 MGMT_STATUS_BUSY);
9034 goto clear_new_instance;
9035 }
9036
9037 /* Validate new data */
9038 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
9039 cp->adv_data_len, true) ||
9040 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
9041 cp->adv_data_len, cp->scan_rsp_len, false)) {
9042 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9043 MGMT_STATUS_INVALID_PARAMS);
9044 goto clear_new_instance;
9045 }
9046
9047 /* Set the data in the advertising instance */
9048 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
9049 cp->data, cp->scan_rsp_len,
9050 cp->data + cp->adv_data_len);
9051
9052 /* If using software rotation, determine next instance to use */
9053 if (hdev->cur_adv_instance == cp->instance) {
9054 /* If the currently advertised instance is being changed
9055 * then cancel the current advertising and schedule the
9056 * next instance. If there is only one instance then the
9057 * overridden advertising data will be visible right
9058 * away
9059 */
9060 cancel_adv_timeout(hdev);
9061
9062 next_instance = hci_get_next_instance(hdev, cp->instance);
9063 if (next_instance)
9064 schedule_instance = next_instance->instance;
9065 } else if (!hdev->adv_instance_timeout) {
9066 /* Immediately advertise the new instance if no other
9067 * instance is currently being advertised.
9068 */
9069 schedule_instance = cp->instance;
9070 }
9071
9072 /* If the HCI_ADVERTISING flag is set or there is no instance to
9073 * be advertised then we have no HCI communication to make.
9074 * Simply return.
9075 */
9076 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
9077 if (adv_instance->pending) {
9078 mgmt_advertising_added(sk, hdev, cp->instance);
9079 adv_instance->pending = false;
9080 }
9081 rp.instance = cp->instance;
9082 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9083 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9084 goto unlock;
9085 }
9086
9087 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9088 data_len);
9089 if (!cmd) {
9090 err = -ENOMEM;
9091 goto clear_new_instance;
9092 }
9093
9094 err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
9095 add_ext_adv_data_complete);
9096 if (err < 0) {
9097 mgmt_pending_free(cmd);
9098 goto clear_new_instance;
9099 }
9100
9101 /* We were successful in updating data, so trigger advertising_added
9102 * event if this is an instance that wasn't previously advertising. If
9103 * a failure occurs in the requests we initiated, we will remove the
9104 * instance again in add_advertising_complete
9105 */
9106 if (adv_instance->pending)
9107 mgmt_advertising_added(sk, hdev, cp->instance);
9108
9109 goto unlock;
9110
9111 clear_new_instance:
9112 hci_remove_adv_instance(hdev, cp->instance);
9113
9114 unlock:
9115 hci_dev_unlock(hdev);
9116
9117 return err;
9118 }
9119
remove_advertising_complete(struct hci_dev * hdev,void * data,int err)9120 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
9121 int err)
9122 {
9123 struct mgmt_pending_cmd *cmd = data;
9124 struct mgmt_cp_remove_advertising *cp = cmd->param;
9125 struct mgmt_rp_remove_advertising rp;
9126
9127 bt_dev_dbg(hdev, "err %d", err);
9128
9129 memset(&rp, 0, sizeof(rp));
9130 rp.instance = cp->instance;
9131
9132 if (err)
9133 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9134 mgmt_status(err));
9135 else
9136 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9137 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9138
9139 mgmt_pending_free(cmd);
9140 }
9141
remove_advertising_sync(struct hci_dev * hdev,void * data)9142 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
9143 {
9144 struct mgmt_pending_cmd *cmd = data;
9145 struct mgmt_cp_remove_advertising *cp = cmd->param;
9146 int err;
9147
9148 err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
9149 if (err)
9150 return err;
9151
9152 if (list_empty(&hdev->adv_instances))
9153 err = hci_disable_advertising_sync(hdev);
9154
9155 return err;
9156 }
9157
remove_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9158 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9159 void *data, u16 data_len)
9160 {
9161 struct mgmt_cp_remove_advertising *cp = data;
9162 struct mgmt_pending_cmd *cmd;
9163 int err;
9164
9165 bt_dev_dbg(hdev, "sock %p", sk);
9166
9167 hci_dev_lock(hdev);
9168
9169 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
9170 err = mgmt_cmd_status(sk, hdev->id,
9171 MGMT_OP_REMOVE_ADVERTISING,
9172 MGMT_STATUS_INVALID_PARAMS);
9173 goto unlock;
9174 }
9175
9176 if (pending_find(MGMT_OP_SET_LE, hdev)) {
9177 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9178 MGMT_STATUS_BUSY);
9179 goto unlock;
9180 }
9181
9182 if (list_empty(&hdev->adv_instances)) {
9183 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9184 MGMT_STATUS_INVALID_PARAMS);
9185 goto unlock;
9186 }
9187
9188 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9189 data_len);
9190 if (!cmd) {
9191 err = -ENOMEM;
9192 goto unlock;
9193 }
9194
9195 err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
9196 remove_advertising_complete);
9197 if (err < 0)
9198 mgmt_pending_free(cmd);
9199
9200 unlock:
9201 hci_dev_unlock(hdev);
9202
9203 return err;
9204 }
9205
get_adv_size_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9206 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9207 void *data, u16 data_len)
9208 {
9209 struct mgmt_cp_get_adv_size_info *cp = data;
9210 struct mgmt_rp_get_adv_size_info rp;
9211 u32 flags, supported_flags;
9212
9213 bt_dev_dbg(hdev, "sock %p", sk);
9214
9215 if (!lmp_le_capable(hdev))
9216 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9217 MGMT_STATUS_REJECTED);
9218
9219 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9220 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9221 MGMT_STATUS_INVALID_PARAMS);
9222
9223 flags = __le32_to_cpu(cp->flags);
9224
9225 /* The current implementation only supports a subset of the specified
9226 * flags.
9227 */
9228 supported_flags = get_supported_adv_flags(hdev);
9229 if (flags & ~supported_flags)
9230 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9231 MGMT_STATUS_INVALID_PARAMS);
9232
9233 rp.instance = cp->instance;
9234 rp.flags = cp->flags;
9235 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9236 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9237
9238 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9239 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9240 }
9241
9242 static const struct hci_mgmt_handler mgmt_handlers[] = {
9243 { NULL }, /* 0x0000 (no command) */
9244 { read_version, MGMT_READ_VERSION_SIZE,
9245 HCI_MGMT_NO_HDEV |
9246 HCI_MGMT_UNTRUSTED },
9247 { read_commands, MGMT_READ_COMMANDS_SIZE,
9248 HCI_MGMT_NO_HDEV |
9249 HCI_MGMT_UNTRUSTED },
9250 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
9251 HCI_MGMT_NO_HDEV |
9252 HCI_MGMT_UNTRUSTED },
9253 { read_controller_info, MGMT_READ_INFO_SIZE,
9254 HCI_MGMT_UNTRUSTED },
9255 { set_powered, MGMT_SETTING_SIZE },
9256 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
9257 { set_connectable, MGMT_SETTING_SIZE },
9258 { set_fast_connectable, MGMT_SETTING_SIZE },
9259 { set_bondable, MGMT_SETTING_SIZE },
9260 { set_link_security, MGMT_SETTING_SIZE },
9261 { set_ssp, MGMT_SETTING_SIZE },
9262 { set_hs, MGMT_SETTING_SIZE },
9263 { set_le, MGMT_SETTING_SIZE },
9264 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
9265 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
9266 { add_uuid, MGMT_ADD_UUID_SIZE },
9267 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
9268 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
9269 HCI_MGMT_VAR_LEN },
9270 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9271 HCI_MGMT_VAR_LEN },
9272 { disconnect, MGMT_DISCONNECT_SIZE },
9273 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
9274 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
9275 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
9276 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
9277 { pair_device, MGMT_PAIR_DEVICE_SIZE },
9278 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
9279 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
9280 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
9281 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9282 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
9283 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9284 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
9285 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9286 HCI_MGMT_VAR_LEN },
9287 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9288 { start_discovery, MGMT_START_DISCOVERY_SIZE },
9289 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
9290 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
9291 { block_device, MGMT_BLOCK_DEVICE_SIZE },
9292 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
9293 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
9294 { set_advertising, MGMT_SETTING_SIZE },
9295 { set_bredr, MGMT_SETTING_SIZE },
9296 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
9297 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
9298 { set_secure_conn, MGMT_SETTING_SIZE },
9299 { set_debug_keys, MGMT_SETTING_SIZE },
9300 { set_privacy, MGMT_SET_PRIVACY_SIZE },
9301 { load_irks, MGMT_LOAD_IRKS_SIZE,
9302 HCI_MGMT_VAR_LEN },
9303 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
9304 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
9305 { add_device, MGMT_ADD_DEVICE_SIZE },
9306 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
9307 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
9308 HCI_MGMT_VAR_LEN },
9309 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9310 HCI_MGMT_NO_HDEV |
9311 HCI_MGMT_UNTRUSTED },
9312 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
9313 HCI_MGMT_UNCONFIGURED |
9314 HCI_MGMT_UNTRUSTED },
9315 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
9316 HCI_MGMT_UNCONFIGURED },
9317 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
9318 HCI_MGMT_UNCONFIGURED },
9319 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9320 HCI_MGMT_VAR_LEN },
9321 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9322 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
9323 HCI_MGMT_NO_HDEV |
9324 HCI_MGMT_UNTRUSTED },
9325 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
9326 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
9327 HCI_MGMT_VAR_LEN },
9328 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
9329 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
9330 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9331 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9332 HCI_MGMT_UNTRUSTED },
9333 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
9334 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
9335 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
9336 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9337 HCI_MGMT_VAR_LEN },
9338 { set_wideband_speech, MGMT_SETTING_SIZE },
9339 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
9340 HCI_MGMT_UNTRUSTED },
9341 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
9342 HCI_MGMT_UNTRUSTED |
9343 HCI_MGMT_HDEV_OPTIONAL },
9344 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
9345 HCI_MGMT_VAR_LEN |
9346 HCI_MGMT_HDEV_OPTIONAL },
9347 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9348 HCI_MGMT_UNTRUSTED },
9349 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9350 HCI_MGMT_VAR_LEN },
9351 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9352 HCI_MGMT_UNTRUSTED },
9353 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9354 HCI_MGMT_VAR_LEN },
9355 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
9356 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
9357 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9358 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9359 HCI_MGMT_VAR_LEN },
9360 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
9361 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
9362 HCI_MGMT_VAR_LEN },
9363 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
9364 HCI_MGMT_VAR_LEN },
9365 { add_adv_patterns_monitor_rssi,
9366 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
9367 HCI_MGMT_VAR_LEN },
9368 { set_mesh, MGMT_SET_MESH_RECEIVER_SIZE,
9369 HCI_MGMT_VAR_LEN },
9370 { mesh_features, MGMT_MESH_READ_FEATURES_SIZE },
9371 { mesh_send, MGMT_MESH_SEND_SIZE,
9372 HCI_MGMT_VAR_LEN },
9373 { mesh_send_cancel, MGMT_MESH_SEND_CANCEL_SIZE },
9374 };
9375
mgmt_index_added(struct hci_dev * hdev)9376 void mgmt_index_added(struct hci_dev *hdev)
9377 {
9378 struct mgmt_ev_ext_index ev;
9379
9380 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9381 return;
9382
9383 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9384 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0,
9385 HCI_MGMT_UNCONF_INDEX_EVENTS);
9386 ev.type = 0x01;
9387 } else {
9388 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
9389 HCI_MGMT_INDEX_EVENTS);
9390 ev.type = 0x00;
9391 }
9392
9393 ev.bus = hdev->bus;
9394
9395 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
9396 HCI_MGMT_EXT_INDEX_EVENTS);
9397 }
9398
mgmt_index_removed(struct hci_dev * hdev)9399 void mgmt_index_removed(struct hci_dev *hdev)
9400 {
9401 struct mgmt_ev_ext_index ev;
9402 struct cmd_lookup match = { NULL, hdev, MGMT_STATUS_INVALID_INDEX };
9403
9404 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9405 return;
9406
9407 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &match);
9408
9409 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9410 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0,
9411 HCI_MGMT_UNCONF_INDEX_EVENTS);
9412 ev.type = 0x01;
9413 } else {
9414 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
9415 HCI_MGMT_INDEX_EVENTS);
9416 ev.type = 0x00;
9417 }
9418
9419 ev.bus = hdev->bus;
9420
9421 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
9422 HCI_MGMT_EXT_INDEX_EVENTS);
9423
9424 /* Cancel any remaining timed work */
9425 if (!hci_dev_test_flag(hdev, HCI_MGMT))
9426 return;
9427 cancel_delayed_work_sync(&hdev->discov_off);
9428 cancel_delayed_work_sync(&hdev->service_cache);
9429 cancel_delayed_work_sync(&hdev->rpa_expired);
9430 }
9431
mgmt_power_on(struct hci_dev * hdev,int err)9432 void mgmt_power_on(struct hci_dev *hdev, int err)
9433 {
9434 struct cmd_lookup match = { NULL, hdev };
9435
9436 bt_dev_dbg(hdev, "err %d", err);
9437
9438 hci_dev_lock(hdev);
9439
9440 if (!err) {
9441 restart_le_actions(hdev);
9442 hci_update_passive_scan(hdev);
9443 }
9444
9445 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9446
9447 new_settings(hdev, match.sk);
9448
9449 if (match.sk)
9450 sock_put(match.sk);
9451
9452 hci_dev_unlock(hdev);
9453 }
9454
__mgmt_power_off(struct hci_dev * hdev)9455 void __mgmt_power_off(struct hci_dev *hdev)
9456 {
9457 struct cmd_lookup match = { NULL, hdev };
9458 u8 zero_cod[] = { 0, 0, 0 };
9459
9460 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9461
9462 /* If the power off is because of hdev unregistration let
9463 * use the appropriate INVALID_INDEX status. Otherwise use
9464 * NOT_POWERED. We cover both scenarios here since later in
9465 * mgmt_index_removed() any hci_conn callbacks will have already
9466 * been triggered, potentially causing misleading DISCONNECTED
9467 * status responses.
9468 */
9469 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9470 match.mgmt_status = MGMT_STATUS_INVALID_INDEX;
9471 else
9472 match.mgmt_status = MGMT_STATUS_NOT_POWERED;
9473
9474 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &match);
9475
9476 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9477 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9478 zero_cod, sizeof(zero_cod),
9479 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9480 ext_info_changed(hdev, NULL);
9481 }
9482
9483 new_settings(hdev, match.sk);
9484
9485 if (match.sk)
9486 sock_put(match.sk);
9487 }
9488
mgmt_set_powered_failed(struct hci_dev * hdev,int err)9489 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9490 {
9491 struct mgmt_pending_cmd *cmd;
9492 u8 status;
9493
9494 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9495 if (!cmd)
9496 return;
9497
9498 if (err == -ERFKILL)
9499 status = MGMT_STATUS_RFKILLED;
9500 else
9501 status = MGMT_STATUS_FAILED;
9502
9503 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9504
9505 mgmt_pending_remove(cmd);
9506 }
9507
mgmt_new_link_key(struct hci_dev * hdev,struct link_key * key,bool persistent)9508 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9509 bool persistent)
9510 {
9511 struct mgmt_ev_new_link_key ev;
9512
9513 memset(&ev, 0, sizeof(ev));
9514
9515 ev.store_hint = persistent;
9516 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9517 ev.key.addr.type = BDADDR_BREDR;
9518 ev.key.type = key->type;
9519 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9520 ev.key.pin_len = key->pin_len;
9521
9522 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9523 }
9524
mgmt_ltk_type(struct smp_ltk * ltk)9525 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9526 {
9527 switch (ltk->type) {
9528 case SMP_LTK:
9529 case SMP_LTK_RESPONDER:
9530 if (ltk->authenticated)
9531 return MGMT_LTK_AUTHENTICATED;
9532 return MGMT_LTK_UNAUTHENTICATED;
9533 case SMP_LTK_P256:
9534 if (ltk->authenticated)
9535 return MGMT_LTK_P256_AUTH;
9536 return MGMT_LTK_P256_UNAUTH;
9537 case SMP_LTK_P256_DEBUG:
9538 return MGMT_LTK_P256_DEBUG;
9539 }
9540
9541 return MGMT_LTK_UNAUTHENTICATED;
9542 }
9543
mgmt_new_ltk(struct hci_dev * hdev,struct smp_ltk * key,bool persistent)9544 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9545 {
9546 struct mgmt_ev_new_long_term_key ev;
9547
9548 memset(&ev, 0, sizeof(ev));
9549
9550 /* Devices using resolvable or non-resolvable random addresses
9551 * without providing an identity resolving key don't require
9552 * to store long term keys. Their addresses will change the
9553 * next time around.
9554 *
9555 * Only when a remote device provides an identity address
9556 * make sure the long term key is stored. If the remote
9557 * identity is known, the long term keys are internally
9558 * mapped to the identity address. So allow static random
9559 * and public addresses here.
9560 */
9561 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9562 (key->bdaddr.b[5] & 0xc0) != 0xc0)
9563 ev.store_hint = 0x00;
9564 else
9565 ev.store_hint = persistent;
9566
9567 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9568 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
9569 ev.key.type = mgmt_ltk_type(key);
9570 ev.key.enc_size = key->enc_size;
9571 ev.key.ediv = key->ediv;
9572 ev.key.rand = key->rand;
9573
9574 if (key->type == SMP_LTK)
9575 ev.key.initiator = 1;
9576
9577 /* Make sure we copy only the significant bytes based on the
9578 * encryption key size, and set the rest of the value to zeroes.
9579 */
9580 memcpy(ev.key.val, key->val, key->enc_size);
9581 memset(ev.key.val + key->enc_size, 0,
9582 sizeof(ev.key.val) - key->enc_size);
9583
9584 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9585 }
9586
mgmt_new_irk(struct hci_dev * hdev,struct smp_irk * irk,bool persistent)9587 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9588 {
9589 struct mgmt_ev_new_irk ev;
9590
9591 memset(&ev, 0, sizeof(ev));
9592
9593 ev.store_hint = persistent;
9594
9595 bacpy(&ev.rpa, &irk->rpa);
9596 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9597 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
9598 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9599
9600 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9601 }
9602
mgmt_new_csrk(struct hci_dev * hdev,struct smp_csrk * csrk,bool persistent)9603 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9604 bool persistent)
9605 {
9606 struct mgmt_ev_new_csrk ev;
9607
9608 memset(&ev, 0, sizeof(ev));
9609
9610 /* Devices using resolvable or non-resolvable random addresses
9611 * without providing an identity resolving key don't require
9612 * to store signature resolving keys. Their addresses will change
9613 * the next time around.
9614 *
9615 * Only when a remote device provides an identity address
9616 * make sure the signature resolving key is stored. So allow
9617 * static random and public addresses here.
9618 */
9619 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9620 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9621 ev.store_hint = 0x00;
9622 else
9623 ev.store_hint = persistent;
9624
9625 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9626 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
9627 ev.key.type = csrk->type;
9628 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9629
9630 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9631 }
9632
mgmt_new_conn_param(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u8 store_hint,u16 min_interval,u16 max_interval,u16 latency,u16 timeout)9633 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9634 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9635 u16 max_interval, u16 latency, u16 timeout)
9636 {
9637 struct mgmt_ev_new_conn_param ev;
9638
9639 if (!hci_is_identity_address(bdaddr, bdaddr_type))
9640 return;
9641
9642 memset(&ev, 0, sizeof(ev));
9643 bacpy(&ev.addr.bdaddr, bdaddr);
9644 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9645 ev.store_hint = store_hint;
9646 ev.min_interval = cpu_to_le16(min_interval);
9647 ev.max_interval = cpu_to_le16(max_interval);
9648 ev.latency = cpu_to_le16(latency);
9649 ev.timeout = cpu_to_le16(timeout);
9650
9651 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9652 }
9653
mgmt_device_connected(struct hci_dev * hdev,struct hci_conn * conn,u8 * name,u8 name_len)9654 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9655 u8 *name, u8 name_len)
9656 {
9657 struct sk_buff *skb;
9658 struct mgmt_ev_device_connected *ev;
9659 u16 eir_len = 0;
9660 u32 flags = 0;
9661
9662 if (test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
9663 return;
9664
9665 /* allocate buff for LE or BR/EDR adv */
9666 if (conn->le_adv_data_len > 0)
9667 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9668 sizeof(*ev) + conn->le_adv_data_len);
9669 else
9670 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9671 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9672 eir_precalc_len(sizeof(conn->dev_class)));
9673
9674 ev = skb_put(skb, sizeof(*ev));
9675 bacpy(&ev->addr.bdaddr, &conn->dst);
9676 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9677
9678 if (conn->out)
9679 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9680
9681 ev->flags = __cpu_to_le32(flags);
9682
9683 /* We must ensure that the EIR Data fields are ordered and
9684 * unique. Keep it simple for now and avoid the problem by not
9685 * adding any BR/EDR data to the LE adv.
9686 */
9687 if (conn->le_adv_data_len > 0) {
9688 skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9689 eir_len = conn->le_adv_data_len;
9690 } else {
9691 if (name)
9692 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9693
9694 if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9695 eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9696 conn->dev_class, sizeof(conn->dev_class));
9697 }
9698
9699 ev->eir_len = cpu_to_le16(eir_len);
9700
9701 mgmt_event_skb(skb, NULL);
9702 }
9703
unpair_device_rsp(struct mgmt_pending_cmd * cmd,void * data)9704 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9705 {
9706 struct hci_dev *hdev = data;
9707 struct mgmt_cp_unpair_device *cp = cmd->param;
9708
9709 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9710
9711 cmd->cmd_complete(cmd, 0);
9712 mgmt_pending_remove(cmd);
9713 }
9714
mgmt_powering_down(struct hci_dev * hdev)9715 bool mgmt_powering_down(struct hci_dev *hdev)
9716 {
9717 struct mgmt_pending_cmd *cmd;
9718 struct mgmt_mode *cp;
9719
9720 if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
9721 return true;
9722
9723 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9724 if (!cmd)
9725 return false;
9726
9727 cp = cmd->param;
9728 if (!cp->val)
9729 return true;
9730
9731 return false;
9732 }
9733
mgmt_device_disconnected(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 reason,bool mgmt_connected)9734 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9735 u8 link_type, u8 addr_type, u8 reason,
9736 bool mgmt_connected)
9737 {
9738 struct mgmt_ev_device_disconnected ev;
9739 struct sock *sk = NULL;
9740
9741 if (!mgmt_connected)
9742 return;
9743
9744 if (link_type != ACL_LINK && link_type != LE_LINK)
9745 return;
9746
9747 bacpy(&ev.addr.bdaddr, bdaddr);
9748 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9749 ev.reason = reason;
9750
9751 /* Report disconnects due to suspend */
9752 if (hdev->suspended)
9753 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9754
9755 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9756
9757 if (sk)
9758 sock_put(sk);
9759 }
9760
mgmt_disconnect_failed(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9761 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9762 u8 link_type, u8 addr_type, u8 status)
9763 {
9764 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9765 struct mgmt_cp_disconnect *cp;
9766 struct mgmt_pending_cmd *cmd;
9767
9768 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9769 hdev);
9770
9771 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9772 if (!cmd)
9773 return;
9774
9775 cp = cmd->param;
9776
9777 if (bacmp(bdaddr, &cp->addr.bdaddr))
9778 return;
9779
9780 if (cp->addr.type != bdaddr_type)
9781 return;
9782
9783 cmd->cmd_complete(cmd, mgmt_status(status));
9784 mgmt_pending_remove(cmd);
9785 }
9786
mgmt_connect_failed(struct hci_dev * hdev,struct hci_conn * conn,u8 status)9787 void mgmt_connect_failed(struct hci_dev *hdev, struct hci_conn *conn, u8 status)
9788 {
9789 struct mgmt_ev_connect_failed ev;
9790
9791 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
9792 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
9793 conn->dst_type, status, true);
9794 return;
9795 }
9796
9797 bacpy(&ev.addr.bdaddr, &conn->dst);
9798 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9799 ev.status = mgmt_status(status);
9800
9801 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9802 }
9803
mgmt_pin_code_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 secure)9804 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9805 {
9806 struct mgmt_ev_pin_code_request ev;
9807
9808 bacpy(&ev.addr.bdaddr, bdaddr);
9809 ev.addr.type = BDADDR_BREDR;
9810 ev.secure = secure;
9811
9812 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9813 }
9814
mgmt_pin_code_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)9815 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9816 u8 status)
9817 {
9818 struct mgmt_pending_cmd *cmd;
9819
9820 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9821 if (!cmd)
9822 return;
9823
9824 cmd->cmd_complete(cmd, mgmt_status(status));
9825 mgmt_pending_remove(cmd);
9826 }
9827
mgmt_pin_code_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)9828 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9829 u8 status)
9830 {
9831 struct mgmt_pending_cmd *cmd;
9832
9833 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9834 if (!cmd)
9835 return;
9836
9837 cmd->cmd_complete(cmd, mgmt_status(status));
9838 mgmt_pending_remove(cmd);
9839 }
9840
mgmt_user_confirm_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 value,u8 confirm_hint)9841 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9842 u8 link_type, u8 addr_type, u32 value,
9843 u8 confirm_hint)
9844 {
9845 struct mgmt_ev_user_confirm_request ev;
9846
9847 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9848
9849 bacpy(&ev.addr.bdaddr, bdaddr);
9850 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9851 ev.confirm_hint = confirm_hint;
9852 ev.value = cpu_to_le32(value);
9853
9854 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9855 NULL);
9856 }
9857
mgmt_user_passkey_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type)9858 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9859 u8 link_type, u8 addr_type)
9860 {
9861 struct mgmt_ev_user_passkey_request ev;
9862
9863 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9864
9865 bacpy(&ev.addr.bdaddr, bdaddr);
9866 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9867
9868 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9869 NULL);
9870 }
9871
user_pairing_resp_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status,u8 opcode)9872 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9873 u8 link_type, u8 addr_type, u8 status,
9874 u8 opcode)
9875 {
9876 struct mgmt_pending_cmd *cmd;
9877
9878 cmd = pending_find(opcode, hdev);
9879 if (!cmd)
9880 return -ENOENT;
9881
9882 cmd->cmd_complete(cmd, mgmt_status(status));
9883 mgmt_pending_remove(cmd);
9884
9885 return 0;
9886 }
9887
mgmt_user_confirm_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9888 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9889 u8 link_type, u8 addr_type, u8 status)
9890 {
9891 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9892 status, MGMT_OP_USER_CONFIRM_REPLY);
9893 }
9894
mgmt_user_confirm_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9895 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9896 u8 link_type, u8 addr_type, u8 status)
9897 {
9898 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9899 status,
9900 MGMT_OP_USER_CONFIRM_NEG_REPLY);
9901 }
9902
mgmt_user_passkey_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9903 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9904 u8 link_type, u8 addr_type, u8 status)
9905 {
9906 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9907 status, MGMT_OP_USER_PASSKEY_REPLY);
9908 }
9909
mgmt_user_passkey_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9910 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9911 u8 link_type, u8 addr_type, u8 status)
9912 {
9913 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9914 status,
9915 MGMT_OP_USER_PASSKEY_NEG_REPLY);
9916 }
9917
mgmt_user_passkey_notify(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 passkey,u8 entered)9918 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9919 u8 link_type, u8 addr_type, u32 passkey,
9920 u8 entered)
9921 {
9922 struct mgmt_ev_passkey_notify ev;
9923
9924 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9925
9926 bacpy(&ev.addr.bdaddr, bdaddr);
9927 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9928 ev.passkey = __cpu_to_le32(passkey);
9929 ev.entered = entered;
9930
9931 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9932 }
9933
mgmt_auth_failed(struct hci_conn * conn,u8 hci_status)9934 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9935 {
9936 struct mgmt_ev_auth_failed ev;
9937 struct mgmt_pending_cmd *cmd;
9938 u8 status = mgmt_status(hci_status);
9939
9940 bacpy(&ev.addr.bdaddr, &conn->dst);
9941 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9942 ev.status = status;
9943
9944 cmd = find_pairing(conn);
9945
9946 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9947 cmd ? cmd->sk : NULL);
9948
9949 if (cmd) {
9950 cmd->cmd_complete(cmd, status);
9951 mgmt_pending_remove(cmd);
9952 }
9953 }
9954
mgmt_auth_enable_complete(struct hci_dev * hdev,u8 status)9955 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9956 {
9957 struct cmd_lookup match = { NULL, hdev };
9958 bool changed;
9959
9960 if (status) {
9961 u8 mgmt_err = mgmt_status(status);
9962 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9963 cmd_status_rsp, &mgmt_err);
9964 return;
9965 }
9966
9967 if (test_bit(HCI_AUTH, &hdev->flags))
9968 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9969 else
9970 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9971
9972 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
9973 &match);
9974
9975 if (changed)
9976 new_settings(hdev, match.sk);
9977
9978 if (match.sk)
9979 sock_put(match.sk);
9980 }
9981
sk_lookup(struct mgmt_pending_cmd * cmd,void * data)9982 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9983 {
9984 struct cmd_lookup *match = data;
9985
9986 if (match->sk == NULL) {
9987 match->sk = cmd->sk;
9988 sock_hold(match->sk);
9989 }
9990 }
9991
mgmt_set_class_of_dev_complete(struct hci_dev * hdev,u8 * dev_class,u8 status)9992 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9993 u8 status)
9994 {
9995 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
9996
9997 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
9998 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
9999 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
10000
10001 if (!status) {
10002 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
10003 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
10004 ext_info_changed(hdev, NULL);
10005 }
10006
10007 if (match.sk)
10008 sock_put(match.sk);
10009 }
10010
mgmt_set_local_name_complete(struct hci_dev * hdev,u8 * name,u8 status)10011 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
10012 {
10013 struct mgmt_cp_set_local_name ev;
10014 struct mgmt_pending_cmd *cmd;
10015
10016 if (status)
10017 return;
10018
10019 memset(&ev, 0, sizeof(ev));
10020 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
10021 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
10022
10023 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
10024 if (!cmd) {
10025 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
10026
10027 /* If this is a HCI command related to powering on the
10028 * HCI dev don't send any mgmt signals.
10029 */
10030 if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
10031 return;
10032
10033 if (pending_find(MGMT_OP_SET_POWERED, hdev))
10034 return;
10035 }
10036
10037 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
10038 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
10039 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
10040 }
10041
has_uuid(u8 * uuid,u16 uuid_count,u8 (* uuids)[16])10042 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
10043 {
10044 int i;
10045
10046 for (i = 0; i < uuid_count; i++) {
10047 if (!memcmp(uuid, uuids[i], 16))
10048 return true;
10049 }
10050
10051 return false;
10052 }
10053
eir_has_uuids(u8 * eir,u16 eir_len,u16 uuid_count,u8 (* uuids)[16])10054 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
10055 {
10056 u16 parsed = 0;
10057
10058 while (parsed < eir_len) {
10059 u8 field_len = eir[0];
10060 u8 uuid[16];
10061 int i;
10062
10063 if (field_len == 0)
10064 break;
10065
10066 if (eir_len - parsed < field_len + 1)
10067 break;
10068
10069 switch (eir[1]) {
10070 case EIR_UUID16_ALL:
10071 case EIR_UUID16_SOME:
10072 for (i = 0; i + 3 <= field_len; i += 2) {
10073 memcpy(uuid, bluetooth_base_uuid, 16);
10074 uuid[13] = eir[i + 3];
10075 uuid[12] = eir[i + 2];
10076 if (has_uuid(uuid, uuid_count, uuids))
10077 return true;
10078 }
10079 break;
10080 case EIR_UUID32_ALL:
10081 case EIR_UUID32_SOME:
10082 for (i = 0; i + 5 <= field_len; i += 4) {
10083 memcpy(uuid, bluetooth_base_uuid, 16);
10084 uuid[15] = eir[i + 5];
10085 uuid[14] = eir[i + 4];
10086 uuid[13] = eir[i + 3];
10087 uuid[12] = eir[i + 2];
10088 if (has_uuid(uuid, uuid_count, uuids))
10089 return true;
10090 }
10091 break;
10092 case EIR_UUID128_ALL:
10093 case EIR_UUID128_SOME:
10094 for (i = 0; i + 17 <= field_len; i += 16) {
10095 memcpy(uuid, eir + i + 2, 16);
10096 if (has_uuid(uuid, uuid_count, uuids))
10097 return true;
10098 }
10099 break;
10100 }
10101
10102 parsed += field_len + 1;
10103 eir += field_len + 1;
10104 }
10105
10106 return false;
10107 }
10108
is_filter_match(struct hci_dev * hdev,s8 rssi,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len)10109 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10110 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10111 {
10112 /* If a RSSI threshold has been specified, and
10113 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10114 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10115 * is set, let it through for further processing, as we might need to
10116 * restart the scan.
10117 *
10118 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10119 * the results are also dropped.
10120 */
10121 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10122 (rssi == HCI_RSSI_INVALID ||
10123 (rssi < hdev->discovery.rssi &&
10124 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
10125 return false;
10126
10127 if (hdev->discovery.uuid_count != 0) {
10128 /* If a list of UUIDs is provided in filter, results with no
10129 * matching UUID should be dropped.
10130 */
10131 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10132 hdev->discovery.uuids) &&
10133 !eir_has_uuids(scan_rsp, scan_rsp_len,
10134 hdev->discovery.uuid_count,
10135 hdev->discovery.uuids))
10136 return false;
10137 }
10138
10139 /* If duplicate filtering does not report RSSI changes, then restart
10140 * scanning to ensure updated result with updated RSSI values.
10141 */
10142 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
10143 /* Validate RSSI value against the RSSI threshold once more. */
10144 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10145 rssi < hdev->discovery.rssi)
10146 return false;
10147 }
10148
10149 return true;
10150 }
10151
mgmt_adv_monitor_device_lost(struct hci_dev * hdev,u16 handle,bdaddr_t * bdaddr,u8 addr_type)10152 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
10153 bdaddr_t *bdaddr, u8 addr_type)
10154 {
10155 struct mgmt_ev_adv_monitor_device_lost ev;
10156
10157 ev.monitor_handle = cpu_to_le16(handle);
10158 bacpy(&ev.addr.bdaddr, bdaddr);
10159 ev.addr.type = addr_type;
10160
10161 mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
10162 NULL);
10163 }
10164
mgmt_send_adv_monitor_device_found(struct hci_dev * hdev,struct sk_buff * skb,struct sock * skip_sk,u16 handle)10165 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
10166 struct sk_buff *skb,
10167 struct sock *skip_sk,
10168 u16 handle)
10169 {
10170 struct sk_buff *advmon_skb;
10171 size_t advmon_skb_len;
10172 __le16 *monitor_handle;
10173
10174 if (!skb)
10175 return;
10176
10177 advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
10178 sizeof(struct mgmt_ev_device_found)) + skb->len;
10179 advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
10180 advmon_skb_len);
10181 if (!advmon_skb)
10182 return;
10183
10184 /* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10185 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10186 * store monitor_handle of the matched monitor.
10187 */
10188 monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
10189 *monitor_handle = cpu_to_le16(handle);
10190 skb_put_data(advmon_skb, skb->data, skb->len);
10191
10192 mgmt_event_skb(advmon_skb, skip_sk);
10193 }
10194
mgmt_adv_monitor_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,bool report_device,struct sk_buff * skb,struct sock * skip_sk)10195 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
10196 bdaddr_t *bdaddr, bool report_device,
10197 struct sk_buff *skb,
10198 struct sock *skip_sk)
10199 {
10200 struct monitored_device *dev, *tmp;
10201 bool matched = false;
10202 bool notified = false;
10203
10204 /* We have received the Advertisement Report because:
10205 * 1. the kernel has initiated active discovery
10206 * 2. if not, we have pend_le_reports > 0 in which case we are doing
10207 * passive scanning
10208 * 3. if none of the above is true, we have one or more active
10209 * Advertisement Monitor
10210 *
10211 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10212 * and report ONLY one advertisement per device for the matched Monitor
10213 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10214 *
10215 * For case 3, since we are not active scanning and all advertisements
10216 * received are due to a matched Advertisement Monitor, report all
10217 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10218 */
10219 if (report_device && !hdev->advmon_pend_notify) {
10220 mgmt_event_skb(skb, skip_sk);
10221 return;
10222 }
10223
10224 hdev->advmon_pend_notify = false;
10225
10226 list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
10227 if (!bacmp(&dev->bdaddr, bdaddr)) {
10228 matched = true;
10229
10230 if (!dev->notified) {
10231 mgmt_send_adv_monitor_device_found(hdev, skb,
10232 skip_sk,
10233 dev->handle);
10234 notified = true;
10235 dev->notified = true;
10236 }
10237 }
10238
10239 if (!dev->notified)
10240 hdev->advmon_pend_notify = true;
10241 }
10242
10243 if (!report_device &&
10244 ((matched && !notified) || !msft_monitor_supported(hdev))) {
10245 /* Handle 0 indicates that we are not active scanning and this
10246 * is a subsequent advertisement report for an already matched
10247 * Advertisement Monitor or the controller offloading support
10248 * is not available.
10249 */
10250 mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
10251 }
10252
10253 if (report_device)
10254 mgmt_event_skb(skb, skip_sk);
10255 else
10256 kfree_skb(skb);
10257 }
10258
mesh_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len,u64 instant)10259 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
10260 u8 addr_type, s8 rssi, u32 flags, u8 *eir,
10261 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10262 u64 instant)
10263 {
10264 struct sk_buff *skb;
10265 struct mgmt_ev_mesh_device_found *ev;
10266 int i, j;
10267
10268 if (!hdev->mesh_ad_types[0])
10269 goto accepted;
10270
10271 /* Scan for requested AD types */
10272 if (eir_len > 0) {
10273 for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
10274 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10275 if (!hdev->mesh_ad_types[j])
10276 break;
10277
10278 if (hdev->mesh_ad_types[j] == eir[i + 1])
10279 goto accepted;
10280 }
10281 }
10282 }
10283
10284 if (scan_rsp_len > 0) {
10285 for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
10286 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10287 if (!hdev->mesh_ad_types[j])
10288 break;
10289
10290 if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
10291 goto accepted;
10292 }
10293 }
10294 }
10295
10296 return;
10297
10298 accepted:
10299 skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
10300 sizeof(*ev) + eir_len + scan_rsp_len);
10301 if (!skb)
10302 return;
10303
10304 ev = skb_put(skb, sizeof(*ev));
10305
10306 bacpy(&ev->addr.bdaddr, bdaddr);
10307 ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
10308 ev->rssi = rssi;
10309 ev->flags = cpu_to_le32(flags);
10310 ev->instant = cpu_to_le64(instant);
10311
10312 if (eir_len > 0)
10313 /* Copy EIR or advertising data into event */
10314 skb_put_data(skb, eir, eir_len);
10315
10316 if (scan_rsp_len > 0)
10317 /* Append scan response data to event */
10318 skb_put_data(skb, scan_rsp, scan_rsp_len);
10319
10320 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10321
10322 mgmt_event_skb(skb, NULL);
10323 }
10324
mgmt_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 * dev_class,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len,u64 instant)10325 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10326 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10327 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10328 u64 instant)
10329 {
10330 struct sk_buff *skb;
10331 struct mgmt_ev_device_found *ev;
10332 bool report_device = hci_discovery_active(hdev);
10333
10334 if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
10335 mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
10336 eir, eir_len, scan_rsp, scan_rsp_len,
10337 instant);
10338
10339 /* Don't send events for a non-kernel initiated discovery. With
10340 * LE one exception is if we have pend_le_reports > 0 in which
10341 * case we're doing passive scanning and want these events.
10342 */
10343 if (!hci_discovery_active(hdev)) {
10344 if (link_type == ACL_LINK)
10345 return;
10346 if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
10347 report_device = true;
10348 else if (!hci_is_adv_monitoring(hdev))
10349 return;
10350 }
10351
10352 if (hdev->discovery.result_filtering) {
10353 /* We are using service discovery */
10354 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10355 scan_rsp_len))
10356 return;
10357 }
10358
10359 if (hdev->discovery.limited) {
10360 /* Check for limited discoverable bit */
10361 if (dev_class) {
10362 if (!(dev_class[1] & 0x20))
10363 return;
10364 } else {
10365 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10366 if (!flags || !(flags[0] & LE_AD_LIMITED))
10367 return;
10368 }
10369 }
10370
10371 /* Allocate skb. The 5 extra bytes are for the potential CoD field */
10372 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10373 sizeof(*ev) + eir_len + scan_rsp_len + 5);
10374 if (!skb)
10375 return;
10376
10377 ev = skb_put(skb, sizeof(*ev));
10378
10379 /* In case of device discovery with BR/EDR devices (pre 1.2), the
10380 * RSSI value was reported as 0 when not available. This behavior
10381 * is kept when using device discovery. This is required for full
10382 * backwards compatibility with the API.
10383 *
10384 * However when using service discovery, the value 127 will be
10385 * returned when the RSSI is not available.
10386 */
10387 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10388 link_type == ACL_LINK)
10389 rssi = 0;
10390
10391 bacpy(&ev->addr.bdaddr, bdaddr);
10392 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10393 ev->rssi = rssi;
10394 ev->flags = cpu_to_le32(flags);
10395
10396 if (eir_len > 0)
10397 /* Copy EIR or advertising data into event */
10398 skb_put_data(skb, eir, eir_len);
10399
10400 if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
10401 u8 eir_cod[5];
10402
10403 eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
10404 dev_class, 3);
10405 skb_put_data(skb, eir_cod, sizeof(eir_cod));
10406 }
10407
10408 if (scan_rsp_len > 0)
10409 /* Append scan response data to event */
10410 skb_put_data(skb, scan_rsp, scan_rsp_len);
10411
10412 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10413
10414 mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
10415 }
10416
mgmt_remote_name(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,s8 rssi,u8 * name,u8 name_len)10417 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10418 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10419 {
10420 struct sk_buff *skb;
10421 struct mgmt_ev_device_found *ev;
10422 u16 eir_len = 0;
10423 u32 flags = 0;
10424
10425 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10426 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
10427
10428 ev = skb_put(skb, sizeof(*ev));
10429 bacpy(&ev->addr.bdaddr, bdaddr);
10430 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10431 ev->rssi = rssi;
10432
10433 if (name)
10434 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
10435 else
10436 flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
10437
10438 ev->eir_len = cpu_to_le16(eir_len);
10439 ev->flags = cpu_to_le32(flags);
10440
10441 mgmt_event_skb(skb, NULL);
10442 }
10443
mgmt_discovering(struct hci_dev * hdev,u8 discovering)10444 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10445 {
10446 struct mgmt_ev_discovering ev;
10447
10448 bt_dev_dbg(hdev, "discovering %u", discovering);
10449
10450 memset(&ev, 0, sizeof(ev));
10451 ev.type = hdev->discovery.type;
10452 ev.discovering = discovering;
10453
10454 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10455 }
10456
mgmt_suspending(struct hci_dev * hdev,u8 state)10457 void mgmt_suspending(struct hci_dev *hdev, u8 state)
10458 {
10459 struct mgmt_ev_controller_suspend ev;
10460
10461 ev.suspend_state = state;
10462 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10463 }
10464
mgmt_resuming(struct hci_dev * hdev,u8 reason,bdaddr_t * bdaddr,u8 addr_type)10465 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10466 u8 addr_type)
10467 {
10468 struct mgmt_ev_controller_resume ev;
10469
10470 ev.wake_reason = reason;
10471 if (bdaddr) {
10472 bacpy(&ev.addr.bdaddr, bdaddr);
10473 ev.addr.type = addr_type;
10474 } else {
10475 memset(&ev.addr, 0, sizeof(ev.addr));
10476 }
10477
10478 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
10479 }
10480
10481 static struct hci_mgmt_chan chan = {
10482 .channel = HCI_CHANNEL_CONTROL,
10483 .handler_count = ARRAY_SIZE(mgmt_handlers),
10484 .handlers = mgmt_handlers,
10485 .hdev_init = mgmt_init_hdev,
10486 };
10487
mgmt_init(void)10488 int mgmt_init(void)
10489 {
10490 return hci_mgmt_chan_register(&chan);
10491 }
10492
mgmt_exit(void)10493 void mgmt_exit(void)
10494 {
10495 hci_mgmt_chan_unregister(&chan);
10496 }
10497
mgmt_cleanup(struct sock * sk)10498 void mgmt_cleanup(struct sock *sk)
10499 {
10500 struct mgmt_mesh_tx *mesh_tx;
10501 struct hci_dev *hdev;
10502
10503 read_lock(&hci_dev_list_lock);
10504
10505 list_for_each_entry(hdev, &hci_dev_list, list) {
10506 do {
10507 mesh_tx = mgmt_mesh_next(hdev, sk);
10508
10509 if (mesh_tx)
10510 mesh_send_complete(hdev, mesh_tx, true);
10511 } while (mesh_tx);
10512 }
10513
10514 read_unlock(&hci_dev_list_lock);
10515 }
10516