xref: /linux/net/bluetooth/mgmt.c (revision 17f89341cb4281d1da0e2fb0de5406ab7c4e25ef)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2010  Nokia Corporation
5    Copyright (C) 2011-2012 Intel Corporation
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI Management interface */
26 
27 #include <linux/module.h>
28 #include <linux/unaligned.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35 
36 #include "smp.h"
37 #include "mgmt_util.h"
38 #include "mgmt_config.h"
39 #include "msft.h"
40 #include "eir.h"
41 #include "aosp.h"
42 
43 #define MGMT_VERSION	1
44 #define MGMT_REVISION	23
45 
46 static const u16 mgmt_commands[] = {
47 	MGMT_OP_READ_INDEX_LIST,
48 	MGMT_OP_READ_INFO,
49 	MGMT_OP_SET_POWERED,
50 	MGMT_OP_SET_DISCOVERABLE,
51 	MGMT_OP_SET_CONNECTABLE,
52 	MGMT_OP_SET_FAST_CONNECTABLE,
53 	MGMT_OP_SET_BONDABLE,
54 	MGMT_OP_SET_LINK_SECURITY,
55 	MGMT_OP_SET_SSP,
56 	MGMT_OP_SET_HS,
57 	MGMT_OP_SET_LE,
58 	MGMT_OP_SET_DEV_CLASS,
59 	MGMT_OP_SET_LOCAL_NAME,
60 	MGMT_OP_ADD_UUID,
61 	MGMT_OP_REMOVE_UUID,
62 	MGMT_OP_LOAD_LINK_KEYS,
63 	MGMT_OP_LOAD_LONG_TERM_KEYS,
64 	MGMT_OP_DISCONNECT,
65 	MGMT_OP_GET_CONNECTIONS,
66 	MGMT_OP_PIN_CODE_REPLY,
67 	MGMT_OP_PIN_CODE_NEG_REPLY,
68 	MGMT_OP_SET_IO_CAPABILITY,
69 	MGMT_OP_PAIR_DEVICE,
70 	MGMT_OP_CANCEL_PAIR_DEVICE,
71 	MGMT_OP_UNPAIR_DEVICE,
72 	MGMT_OP_USER_CONFIRM_REPLY,
73 	MGMT_OP_USER_CONFIRM_NEG_REPLY,
74 	MGMT_OP_USER_PASSKEY_REPLY,
75 	MGMT_OP_USER_PASSKEY_NEG_REPLY,
76 	MGMT_OP_READ_LOCAL_OOB_DATA,
77 	MGMT_OP_ADD_REMOTE_OOB_DATA,
78 	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
79 	MGMT_OP_START_DISCOVERY,
80 	MGMT_OP_STOP_DISCOVERY,
81 	MGMT_OP_CONFIRM_NAME,
82 	MGMT_OP_BLOCK_DEVICE,
83 	MGMT_OP_UNBLOCK_DEVICE,
84 	MGMT_OP_SET_DEVICE_ID,
85 	MGMT_OP_SET_ADVERTISING,
86 	MGMT_OP_SET_BREDR,
87 	MGMT_OP_SET_STATIC_ADDRESS,
88 	MGMT_OP_SET_SCAN_PARAMS,
89 	MGMT_OP_SET_SECURE_CONN,
90 	MGMT_OP_SET_DEBUG_KEYS,
91 	MGMT_OP_SET_PRIVACY,
92 	MGMT_OP_LOAD_IRKS,
93 	MGMT_OP_GET_CONN_INFO,
94 	MGMT_OP_GET_CLOCK_INFO,
95 	MGMT_OP_ADD_DEVICE,
96 	MGMT_OP_REMOVE_DEVICE,
97 	MGMT_OP_LOAD_CONN_PARAM,
98 	MGMT_OP_READ_UNCONF_INDEX_LIST,
99 	MGMT_OP_READ_CONFIG_INFO,
100 	MGMT_OP_SET_EXTERNAL_CONFIG,
101 	MGMT_OP_SET_PUBLIC_ADDRESS,
102 	MGMT_OP_START_SERVICE_DISCOVERY,
103 	MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
104 	MGMT_OP_READ_EXT_INDEX_LIST,
105 	MGMT_OP_READ_ADV_FEATURES,
106 	MGMT_OP_ADD_ADVERTISING,
107 	MGMT_OP_REMOVE_ADVERTISING,
108 	MGMT_OP_GET_ADV_SIZE_INFO,
109 	MGMT_OP_START_LIMITED_DISCOVERY,
110 	MGMT_OP_READ_EXT_INFO,
111 	MGMT_OP_SET_APPEARANCE,
112 	MGMT_OP_GET_PHY_CONFIGURATION,
113 	MGMT_OP_SET_PHY_CONFIGURATION,
114 	MGMT_OP_SET_BLOCKED_KEYS,
115 	MGMT_OP_SET_WIDEBAND_SPEECH,
116 	MGMT_OP_READ_CONTROLLER_CAP,
117 	MGMT_OP_READ_EXP_FEATURES_INFO,
118 	MGMT_OP_SET_EXP_FEATURE,
119 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
120 	MGMT_OP_SET_DEF_SYSTEM_CONFIG,
121 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
122 	MGMT_OP_SET_DEF_RUNTIME_CONFIG,
123 	MGMT_OP_GET_DEVICE_FLAGS,
124 	MGMT_OP_SET_DEVICE_FLAGS,
125 	MGMT_OP_READ_ADV_MONITOR_FEATURES,
126 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
127 	MGMT_OP_REMOVE_ADV_MONITOR,
128 	MGMT_OP_ADD_EXT_ADV_PARAMS,
129 	MGMT_OP_ADD_EXT_ADV_DATA,
130 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
131 	MGMT_OP_SET_MESH_RECEIVER,
132 	MGMT_OP_MESH_READ_FEATURES,
133 	MGMT_OP_MESH_SEND,
134 	MGMT_OP_MESH_SEND_CANCEL,
135 	MGMT_OP_HCI_CMD_SYNC,
136 };
137 
138 static const u16 mgmt_events[] = {
139 	MGMT_EV_CONTROLLER_ERROR,
140 	MGMT_EV_INDEX_ADDED,
141 	MGMT_EV_INDEX_REMOVED,
142 	MGMT_EV_NEW_SETTINGS,
143 	MGMT_EV_CLASS_OF_DEV_CHANGED,
144 	MGMT_EV_LOCAL_NAME_CHANGED,
145 	MGMT_EV_NEW_LINK_KEY,
146 	MGMT_EV_NEW_LONG_TERM_KEY,
147 	MGMT_EV_DEVICE_CONNECTED,
148 	MGMT_EV_DEVICE_DISCONNECTED,
149 	MGMT_EV_CONNECT_FAILED,
150 	MGMT_EV_PIN_CODE_REQUEST,
151 	MGMT_EV_USER_CONFIRM_REQUEST,
152 	MGMT_EV_USER_PASSKEY_REQUEST,
153 	MGMT_EV_AUTH_FAILED,
154 	MGMT_EV_DEVICE_FOUND,
155 	MGMT_EV_DISCOVERING,
156 	MGMT_EV_DEVICE_BLOCKED,
157 	MGMT_EV_DEVICE_UNBLOCKED,
158 	MGMT_EV_DEVICE_UNPAIRED,
159 	MGMT_EV_PASSKEY_NOTIFY,
160 	MGMT_EV_NEW_IRK,
161 	MGMT_EV_NEW_CSRK,
162 	MGMT_EV_DEVICE_ADDED,
163 	MGMT_EV_DEVICE_REMOVED,
164 	MGMT_EV_NEW_CONN_PARAM,
165 	MGMT_EV_UNCONF_INDEX_ADDED,
166 	MGMT_EV_UNCONF_INDEX_REMOVED,
167 	MGMT_EV_NEW_CONFIG_OPTIONS,
168 	MGMT_EV_EXT_INDEX_ADDED,
169 	MGMT_EV_EXT_INDEX_REMOVED,
170 	MGMT_EV_LOCAL_OOB_DATA_UPDATED,
171 	MGMT_EV_ADVERTISING_ADDED,
172 	MGMT_EV_ADVERTISING_REMOVED,
173 	MGMT_EV_EXT_INFO_CHANGED,
174 	MGMT_EV_PHY_CONFIGURATION_CHANGED,
175 	MGMT_EV_EXP_FEATURE_CHANGED,
176 	MGMT_EV_DEVICE_FLAGS_CHANGED,
177 	MGMT_EV_ADV_MONITOR_ADDED,
178 	MGMT_EV_ADV_MONITOR_REMOVED,
179 	MGMT_EV_CONTROLLER_SUSPEND,
180 	MGMT_EV_CONTROLLER_RESUME,
181 	MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
182 	MGMT_EV_ADV_MONITOR_DEVICE_LOST,
183 };
184 
185 static const u16 mgmt_untrusted_commands[] = {
186 	MGMT_OP_READ_INDEX_LIST,
187 	MGMT_OP_READ_INFO,
188 	MGMT_OP_READ_UNCONF_INDEX_LIST,
189 	MGMT_OP_READ_CONFIG_INFO,
190 	MGMT_OP_READ_EXT_INDEX_LIST,
191 	MGMT_OP_READ_EXT_INFO,
192 	MGMT_OP_READ_CONTROLLER_CAP,
193 	MGMT_OP_READ_EXP_FEATURES_INFO,
194 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
195 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
196 };
197 
198 static const u16 mgmt_untrusted_events[] = {
199 	MGMT_EV_INDEX_ADDED,
200 	MGMT_EV_INDEX_REMOVED,
201 	MGMT_EV_NEW_SETTINGS,
202 	MGMT_EV_CLASS_OF_DEV_CHANGED,
203 	MGMT_EV_LOCAL_NAME_CHANGED,
204 	MGMT_EV_UNCONF_INDEX_ADDED,
205 	MGMT_EV_UNCONF_INDEX_REMOVED,
206 	MGMT_EV_NEW_CONFIG_OPTIONS,
207 	MGMT_EV_EXT_INDEX_ADDED,
208 	MGMT_EV_EXT_INDEX_REMOVED,
209 	MGMT_EV_EXT_INFO_CHANGED,
210 	MGMT_EV_EXP_FEATURE_CHANGED,
211 };
212 
213 #define CACHE_TIMEOUT	secs_to_jiffies(2)
214 
215 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
216 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
217 
218 /* HCI to MGMT error code conversion table */
219 static const u8 mgmt_status_table[] = {
220 	MGMT_STATUS_SUCCESS,
221 	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
222 	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
223 	MGMT_STATUS_FAILED,		/* Hardware Failure */
224 	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
225 	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
226 	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
227 	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
228 	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
229 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
230 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
231 	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
232 	MGMT_STATUS_BUSY,		/* Command Disallowed */
233 	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
234 	MGMT_STATUS_REJECTED,		/* Rejected Security */
235 	MGMT_STATUS_REJECTED,		/* Rejected Personal */
236 	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
237 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
238 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
239 	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
240 	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
241 	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
242 	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
243 	MGMT_STATUS_BUSY,		/* Repeated Attempts */
244 	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
245 	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
246 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
247 	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
248 	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
249 	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
250 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
251 	MGMT_STATUS_FAILED,		/* Unspecified Error */
252 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
253 	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
254 	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
255 	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
256 	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
257 	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
258 	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
259 	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
260 	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
261 	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
262 	MGMT_STATUS_FAILED,		/* Transaction Collision */
263 	MGMT_STATUS_FAILED,		/* Reserved for future use */
264 	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
265 	MGMT_STATUS_REJECTED,		/* QoS Rejected */
266 	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
267 	MGMT_STATUS_REJECTED,		/* Insufficient Security */
268 	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
269 	MGMT_STATUS_FAILED,		/* Reserved for future use */
270 	MGMT_STATUS_BUSY,		/* Role Switch Pending */
271 	MGMT_STATUS_FAILED,		/* Reserved for future use */
272 	MGMT_STATUS_FAILED,		/* Slot Violation */
273 	MGMT_STATUS_FAILED,		/* Role Switch Failed */
274 	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
275 	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
276 	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
277 	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
278 	MGMT_STATUS_BUSY,		/* Controller Busy */
279 	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
280 	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
281 	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
282 	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
283 	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
284 };
285 
286 static u8 mgmt_errno_status(int err)
287 {
288 	switch (err) {
289 	case 0:
290 		return MGMT_STATUS_SUCCESS;
291 	case -EPERM:
292 		return MGMT_STATUS_REJECTED;
293 	case -EINVAL:
294 		return MGMT_STATUS_INVALID_PARAMS;
295 	case -EOPNOTSUPP:
296 		return MGMT_STATUS_NOT_SUPPORTED;
297 	case -EBUSY:
298 		return MGMT_STATUS_BUSY;
299 	case -ETIMEDOUT:
300 		return MGMT_STATUS_AUTH_FAILED;
301 	case -ENOMEM:
302 		return MGMT_STATUS_NO_RESOURCES;
303 	case -EISCONN:
304 		return MGMT_STATUS_ALREADY_CONNECTED;
305 	case -ENOTCONN:
306 		return MGMT_STATUS_DISCONNECTED;
307 	}
308 
309 	return MGMT_STATUS_FAILED;
310 }
311 
312 static u8 mgmt_status(int err)
313 {
314 	if (err < 0)
315 		return mgmt_errno_status(err);
316 
317 	if (err < ARRAY_SIZE(mgmt_status_table))
318 		return mgmt_status_table[err];
319 
320 	return MGMT_STATUS_FAILED;
321 }
322 
323 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
324 			    u16 len, int flag)
325 {
326 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
327 			       flag, NULL);
328 }
329 
330 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
331 			      u16 len, int flag, struct sock *skip_sk)
332 {
333 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
334 			       flag, skip_sk);
335 }
336 
337 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
338 		      struct sock *skip_sk)
339 {
340 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
341 			       HCI_SOCK_TRUSTED, skip_sk);
342 }
343 
344 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
345 {
346 	return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
347 				   skip_sk);
348 }
349 
350 static u8 le_addr_type(u8 mgmt_addr_type)
351 {
352 	if (mgmt_addr_type == BDADDR_LE_PUBLIC)
353 		return ADDR_LE_DEV_PUBLIC;
354 	else
355 		return ADDR_LE_DEV_RANDOM;
356 }
357 
358 void mgmt_fill_version_info(void *ver)
359 {
360 	struct mgmt_rp_read_version *rp = ver;
361 
362 	rp->version = MGMT_VERSION;
363 	rp->revision = cpu_to_le16(MGMT_REVISION);
364 }
365 
366 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
367 			u16 data_len)
368 {
369 	struct mgmt_rp_read_version rp;
370 
371 	bt_dev_dbg(hdev, "sock %p", sk);
372 
373 	mgmt_fill_version_info(&rp);
374 
375 	return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
376 				 &rp, sizeof(rp));
377 }
378 
379 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
380 			 u16 data_len)
381 {
382 	struct mgmt_rp_read_commands *rp;
383 	u16 num_commands, num_events;
384 	size_t rp_size;
385 	int i, err;
386 
387 	bt_dev_dbg(hdev, "sock %p", sk);
388 
389 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
390 		num_commands = ARRAY_SIZE(mgmt_commands);
391 		num_events = ARRAY_SIZE(mgmt_events);
392 	} else {
393 		num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
394 		num_events = ARRAY_SIZE(mgmt_untrusted_events);
395 	}
396 
397 	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
398 
399 	rp = kmalloc(rp_size, GFP_KERNEL);
400 	if (!rp)
401 		return -ENOMEM;
402 
403 	rp->num_commands = cpu_to_le16(num_commands);
404 	rp->num_events = cpu_to_le16(num_events);
405 
406 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
407 		__le16 *opcode = rp->opcodes;
408 
409 		for (i = 0; i < num_commands; i++, opcode++)
410 			put_unaligned_le16(mgmt_commands[i], opcode);
411 
412 		for (i = 0; i < num_events; i++, opcode++)
413 			put_unaligned_le16(mgmt_events[i], opcode);
414 	} else {
415 		__le16 *opcode = rp->opcodes;
416 
417 		for (i = 0; i < num_commands; i++, opcode++)
418 			put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
419 
420 		for (i = 0; i < num_events; i++, opcode++)
421 			put_unaligned_le16(mgmt_untrusted_events[i], opcode);
422 	}
423 
424 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
425 				rp, rp_size);
426 	kfree(rp);
427 
428 	return err;
429 }
430 
431 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
432 			   u16 data_len)
433 {
434 	struct mgmt_rp_read_index_list *rp;
435 	struct hci_dev *d;
436 	size_t rp_len;
437 	u16 count;
438 	int err;
439 
440 	bt_dev_dbg(hdev, "sock %p", sk);
441 
442 	read_lock(&hci_dev_list_lock);
443 
444 	count = 0;
445 	list_for_each_entry(d, &hci_dev_list, list) {
446 		if (!hci_dev_test_flag(d, HCI_UNCONFIGURED))
447 			count++;
448 	}
449 
450 	rp_len = sizeof(*rp) + (2 * count);
451 	rp = kmalloc(rp_len, GFP_ATOMIC);
452 	if (!rp) {
453 		read_unlock(&hci_dev_list_lock);
454 		return -ENOMEM;
455 	}
456 
457 	count = 0;
458 	list_for_each_entry(d, &hci_dev_list, list) {
459 		if (hci_dev_test_flag(d, HCI_SETUP) ||
460 		    hci_dev_test_flag(d, HCI_CONFIG) ||
461 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
462 			continue;
463 
464 		/* Devices marked as raw-only are neither configured
465 		 * nor unconfigured controllers.
466 		 */
467 		if (hci_test_quirk(d, HCI_QUIRK_RAW_DEVICE))
468 			continue;
469 
470 		if (!hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
471 			rp->index[count++] = cpu_to_le16(d->id);
472 			bt_dev_dbg(hdev, "Added hci%u", d->id);
473 		}
474 	}
475 
476 	rp->num_controllers = cpu_to_le16(count);
477 	rp_len = sizeof(*rp) + (2 * count);
478 
479 	read_unlock(&hci_dev_list_lock);
480 
481 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
482 				0, rp, rp_len);
483 
484 	kfree(rp);
485 
486 	return err;
487 }
488 
489 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
490 				  void *data, u16 data_len)
491 {
492 	struct mgmt_rp_read_unconf_index_list *rp;
493 	struct hci_dev *d;
494 	size_t rp_len;
495 	u16 count;
496 	int err;
497 
498 	bt_dev_dbg(hdev, "sock %p", sk);
499 
500 	read_lock(&hci_dev_list_lock);
501 
502 	count = 0;
503 	list_for_each_entry(d, &hci_dev_list, list) {
504 		if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
505 			count++;
506 	}
507 
508 	rp_len = sizeof(*rp) + (2 * count);
509 	rp = kmalloc(rp_len, GFP_ATOMIC);
510 	if (!rp) {
511 		read_unlock(&hci_dev_list_lock);
512 		return -ENOMEM;
513 	}
514 
515 	count = 0;
516 	list_for_each_entry(d, &hci_dev_list, list) {
517 		if (hci_dev_test_flag(d, HCI_SETUP) ||
518 		    hci_dev_test_flag(d, HCI_CONFIG) ||
519 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
520 			continue;
521 
522 		/* Devices marked as raw-only are neither configured
523 		 * nor unconfigured controllers.
524 		 */
525 		if (hci_test_quirk(d, HCI_QUIRK_RAW_DEVICE))
526 			continue;
527 
528 		if (hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
529 			rp->index[count++] = cpu_to_le16(d->id);
530 			bt_dev_dbg(hdev, "Added hci%u", d->id);
531 		}
532 	}
533 
534 	rp->num_controllers = cpu_to_le16(count);
535 	rp_len = sizeof(*rp) + (2 * count);
536 
537 	read_unlock(&hci_dev_list_lock);
538 
539 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
540 				MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
541 
542 	kfree(rp);
543 
544 	return err;
545 }
546 
547 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
548 			       void *data, u16 data_len)
549 {
550 	struct mgmt_rp_read_ext_index_list *rp;
551 	struct hci_dev *d;
552 	u16 count;
553 	int err;
554 
555 	bt_dev_dbg(hdev, "sock %p", sk);
556 
557 	read_lock(&hci_dev_list_lock);
558 
559 	count = 0;
560 	list_for_each_entry(d, &hci_dev_list, list)
561 		count++;
562 
563 	rp = kmalloc_flex(*rp, entry, count, GFP_ATOMIC);
564 	if (!rp) {
565 		read_unlock(&hci_dev_list_lock);
566 		return -ENOMEM;
567 	}
568 
569 	count = 0;
570 	list_for_each_entry(d, &hci_dev_list, list) {
571 		if (hci_dev_test_flag(d, HCI_SETUP) ||
572 		    hci_dev_test_flag(d, HCI_CONFIG) ||
573 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
574 			continue;
575 
576 		/* Devices marked as raw-only are neither configured
577 		 * nor unconfigured controllers.
578 		 */
579 		if (hci_test_quirk(d, HCI_QUIRK_RAW_DEVICE))
580 			continue;
581 
582 		if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
583 			rp->entry[count].type = 0x01;
584 		else
585 			rp->entry[count].type = 0x00;
586 
587 		rp->entry[count].bus = d->bus;
588 		rp->entry[count++].index = cpu_to_le16(d->id);
589 		bt_dev_dbg(hdev, "Added hci%u", d->id);
590 	}
591 
592 	rp->num_controllers = cpu_to_le16(count);
593 
594 	read_unlock(&hci_dev_list_lock);
595 
596 	/* If this command is called at least once, then all the
597 	 * default index and unconfigured index events are disabled
598 	 * and from now on only extended index events are used.
599 	 */
600 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
601 	hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
602 	hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
603 
604 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
605 				MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
606 				struct_size(rp, entry, count));
607 
608 	kfree(rp);
609 
610 	return err;
611 }
612 
613 static bool is_configured(struct hci_dev *hdev)
614 {
615 	if (hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG) &&
616 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
617 		return false;
618 
619 	if ((hci_test_quirk(hdev, HCI_QUIRK_INVALID_BDADDR) ||
620 	     hci_test_quirk(hdev, HCI_QUIRK_USE_BDADDR_PROPERTY)) &&
621 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
622 		return false;
623 
624 	return true;
625 }
626 
627 static __le32 get_missing_options(struct hci_dev *hdev)
628 {
629 	u32 options = 0;
630 
631 	if (hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG) &&
632 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
633 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
634 
635 	if ((hci_test_quirk(hdev, HCI_QUIRK_INVALID_BDADDR) ||
636 	     hci_test_quirk(hdev, HCI_QUIRK_USE_BDADDR_PROPERTY)) &&
637 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
638 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
639 
640 	return cpu_to_le32(options);
641 }
642 
643 static int new_options(struct hci_dev *hdev, struct sock *skip)
644 {
645 	__le32 options = get_missing_options(hdev);
646 
647 	return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
648 				  sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
649 }
650 
651 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
652 {
653 	__le32 options = get_missing_options(hdev);
654 
655 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
656 				 sizeof(options));
657 }
658 
659 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
660 			    void *data, u16 data_len)
661 {
662 	struct mgmt_rp_read_config_info rp;
663 	u32 options = 0;
664 
665 	bt_dev_dbg(hdev, "sock %p", sk);
666 
667 	hci_dev_lock(hdev);
668 
669 	memset(&rp, 0, sizeof(rp));
670 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
671 
672 	if (hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG))
673 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
674 
675 	if (hdev->set_bdaddr)
676 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
677 
678 	rp.supported_options = cpu_to_le32(options);
679 	rp.missing_options = get_missing_options(hdev);
680 
681 	hci_dev_unlock(hdev);
682 
683 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
684 				 &rp, sizeof(rp));
685 }
686 
687 static u32 get_supported_phys(struct hci_dev *hdev)
688 {
689 	u32 supported_phys = 0;
690 
691 	if (lmp_bredr_capable(hdev)) {
692 		supported_phys |= MGMT_PHY_BR_1M_1SLOT;
693 
694 		if (hdev->features[0][0] & LMP_3SLOT)
695 			supported_phys |= MGMT_PHY_BR_1M_3SLOT;
696 
697 		if (hdev->features[0][0] & LMP_5SLOT)
698 			supported_phys |= MGMT_PHY_BR_1M_5SLOT;
699 
700 		if (lmp_edr_2m_capable(hdev)) {
701 			supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
702 
703 			if (lmp_edr_3slot_capable(hdev))
704 				supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
705 
706 			if (lmp_edr_5slot_capable(hdev))
707 				supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
708 
709 			if (lmp_edr_3m_capable(hdev)) {
710 				supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
711 
712 				if (lmp_edr_3slot_capable(hdev))
713 					supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
714 
715 				if (lmp_edr_5slot_capable(hdev))
716 					supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
717 			}
718 		}
719 	}
720 
721 	if (lmp_le_capable(hdev)) {
722 		supported_phys |= MGMT_PHY_LE_1M_TX;
723 		supported_phys |= MGMT_PHY_LE_1M_RX;
724 
725 		if (hdev->le_features[1] & HCI_LE_PHY_2M) {
726 			supported_phys |= MGMT_PHY_LE_2M_TX;
727 			supported_phys |= MGMT_PHY_LE_2M_RX;
728 		}
729 
730 		if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
731 			supported_phys |= MGMT_PHY_LE_CODED_TX;
732 			supported_phys |= MGMT_PHY_LE_CODED_RX;
733 		}
734 	}
735 
736 	return supported_phys;
737 }
738 
739 static u32 get_selected_phys(struct hci_dev *hdev)
740 {
741 	u32 selected_phys = 0;
742 
743 	if (lmp_bredr_capable(hdev)) {
744 		selected_phys |= MGMT_PHY_BR_1M_1SLOT;
745 
746 		if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
747 			selected_phys |= MGMT_PHY_BR_1M_3SLOT;
748 
749 		if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
750 			selected_phys |= MGMT_PHY_BR_1M_5SLOT;
751 
752 		if (lmp_edr_2m_capable(hdev)) {
753 			if (!(hdev->pkt_type & HCI_2DH1))
754 				selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
755 
756 			if (lmp_edr_3slot_capable(hdev) &&
757 			    !(hdev->pkt_type & HCI_2DH3))
758 				selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
759 
760 			if (lmp_edr_5slot_capable(hdev) &&
761 			    !(hdev->pkt_type & HCI_2DH5))
762 				selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
763 
764 			if (lmp_edr_3m_capable(hdev)) {
765 				if (!(hdev->pkt_type & HCI_3DH1))
766 					selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
767 
768 				if (lmp_edr_3slot_capable(hdev) &&
769 				    !(hdev->pkt_type & HCI_3DH3))
770 					selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
771 
772 				if (lmp_edr_5slot_capable(hdev) &&
773 				    !(hdev->pkt_type & HCI_3DH5))
774 					selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
775 			}
776 		}
777 	}
778 
779 	if (lmp_le_capable(hdev)) {
780 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
781 			selected_phys |= MGMT_PHY_LE_1M_TX;
782 
783 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
784 			selected_phys |= MGMT_PHY_LE_1M_RX;
785 
786 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
787 			selected_phys |= MGMT_PHY_LE_2M_TX;
788 
789 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
790 			selected_phys |= MGMT_PHY_LE_2M_RX;
791 
792 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
793 			selected_phys |= MGMT_PHY_LE_CODED_TX;
794 
795 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
796 			selected_phys |= MGMT_PHY_LE_CODED_RX;
797 	}
798 
799 	return selected_phys;
800 }
801 
802 static u32 get_configurable_phys(struct hci_dev *hdev)
803 {
804 	return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
805 		~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
806 }
807 
808 static u32 get_supported_settings(struct hci_dev *hdev)
809 {
810 	u32 settings = 0;
811 
812 	settings |= MGMT_SETTING_POWERED;
813 	settings |= MGMT_SETTING_BONDABLE;
814 	settings |= MGMT_SETTING_DEBUG_KEYS;
815 	settings |= MGMT_SETTING_CONNECTABLE;
816 	settings |= MGMT_SETTING_DISCOVERABLE;
817 
818 	if (lmp_bredr_capable(hdev)) {
819 		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
820 			settings |= MGMT_SETTING_FAST_CONNECTABLE;
821 		settings |= MGMT_SETTING_BREDR;
822 		settings |= MGMT_SETTING_LINK_SECURITY;
823 
824 		if (lmp_ssp_capable(hdev)) {
825 			settings |= MGMT_SETTING_SSP;
826 		}
827 
828 		if (lmp_sc_capable(hdev))
829 			settings |= MGMT_SETTING_SECURE_CONN;
830 
831 		if (hci_test_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED))
832 			settings |= MGMT_SETTING_WIDEBAND_SPEECH;
833 	}
834 
835 	if (lmp_le_capable(hdev)) {
836 		settings |= MGMT_SETTING_LE;
837 		settings |= MGMT_SETTING_SECURE_CONN;
838 		settings |= MGMT_SETTING_PRIVACY;
839 		settings |= MGMT_SETTING_STATIC_ADDRESS;
840 		settings |= MGMT_SETTING_ADVERTISING;
841 	}
842 
843 	if (hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG) || hdev->set_bdaddr)
844 		settings |= MGMT_SETTING_CONFIGURATION;
845 
846 	if (cis_central_capable(hdev))
847 		settings |= MGMT_SETTING_CIS_CENTRAL;
848 
849 	if (cis_peripheral_capable(hdev))
850 		settings |= MGMT_SETTING_CIS_PERIPHERAL;
851 
852 	if (bis_capable(hdev))
853 		settings |= MGMT_SETTING_ISO_BROADCASTER;
854 
855 	if (sync_recv_capable(hdev))
856 		settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
857 
858 	if (ll_privacy_capable(hdev))
859 		settings |= MGMT_SETTING_LL_PRIVACY;
860 
861 	if (past_sender_capable(hdev))
862 		settings |= MGMT_SETTING_PAST_SENDER;
863 
864 	if (past_receiver_capable(hdev))
865 		settings |= MGMT_SETTING_PAST_RECEIVER;
866 
867 	settings |= MGMT_SETTING_PHY_CONFIGURATION;
868 
869 	return settings;
870 }
871 
872 static u32 get_current_settings(struct hci_dev *hdev)
873 {
874 	u32 settings = 0;
875 
876 	if (hdev_is_powered(hdev))
877 		settings |= MGMT_SETTING_POWERED;
878 
879 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
880 		settings |= MGMT_SETTING_CONNECTABLE;
881 
882 	if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
883 		settings |= MGMT_SETTING_FAST_CONNECTABLE;
884 
885 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
886 		settings |= MGMT_SETTING_DISCOVERABLE;
887 
888 	if (hci_dev_test_flag(hdev, HCI_BONDABLE))
889 		settings |= MGMT_SETTING_BONDABLE;
890 
891 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
892 		settings |= MGMT_SETTING_BREDR;
893 
894 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
895 		settings |= MGMT_SETTING_LE;
896 
897 	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
898 		settings |= MGMT_SETTING_LINK_SECURITY;
899 
900 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
901 		settings |= MGMT_SETTING_SSP;
902 
903 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
904 		settings |= MGMT_SETTING_ADVERTISING;
905 
906 	if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
907 		settings |= MGMT_SETTING_SECURE_CONN;
908 
909 	if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
910 		settings |= MGMT_SETTING_DEBUG_KEYS;
911 
912 	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
913 		settings |= MGMT_SETTING_PRIVACY;
914 
915 	/* The current setting for static address has two purposes. The
916 	 * first is to indicate if the static address will be used and
917 	 * the second is to indicate if it is actually set.
918 	 *
919 	 * This means if the static address is not configured, this flag
920 	 * will never be set. If the address is configured, then if the
921 	 * address is actually used decides if the flag is set or not.
922 	 *
923 	 * For single mode LE only controllers and dual-mode controllers
924 	 * with BR/EDR disabled, the existence of the static address will
925 	 * be evaluated.
926 	 */
927 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
928 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
929 	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
930 		if (bacmp(&hdev->static_addr, BDADDR_ANY))
931 			settings |= MGMT_SETTING_STATIC_ADDRESS;
932 	}
933 
934 	if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
935 		settings |= MGMT_SETTING_WIDEBAND_SPEECH;
936 
937 	if (cis_central_enabled(hdev))
938 		settings |= MGMT_SETTING_CIS_CENTRAL;
939 
940 	if (cis_peripheral_enabled(hdev))
941 		settings |= MGMT_SETTING_CIS_PERIPHERAL;
942 
943 	if (bis_enabled(hdev))
944 		settings |= MGMT_SETTING_ISO_BROADCASTER;
945 
946 	if (sync_recv_enabled(hdev))
947 		settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
948 
949 	if (ll_privacy_enabled(hdev))
950 		settings |= MGMT_SETTING_LL_PRIVACY;
951 
952 	if (past_sender_enabled(hdev))
953 		settings |= MGMT_SETTING_PAST_SENDER;
954 
955 	if (past_receiver_enabled(hdev))
956 		settings |= MGMT_SETTING_PAST_RECEIVER;
957 
958 	return settings;
959 }
960 
961 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
962 {
963 	return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
964 }
965 
966 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
967 {
968 	struct mgmt_pending_cmd *cmd;
969 
970 	/* If there's a pending mgmt command the flags will not yet have
971 	 * their final values, so check for this first.
972 	 */
973 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
974 	if (cmd) {
975 		struct mgmt_mode *cp = cmd->param;
976 		if (cp->val == 0x01)
977 			return LE_AD_GENERAL;
978 		else if (cp->val == 0x02)
979 			return LE_AD_LIMITED;
980 	} else {
981 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
982 			return LE_AD_LIMITED;
983 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
984 			return LE_AD_GENERAL;
985 	}
986 
987 	return 0;
988 }
989 
990 bool mgmt_get_connectable(struct hci_dev *hdev)
991 {
992 	struct mgmt_pending_cmd *cmd;
993 
994 	/* If there's a pending mgmt command the flag will not yet have
995 	 * it's final value, so check for this first.
996 	 */
997 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
998 	if (cmd) {
999 		struct mgmt_mode *cp = cmd->param;
1000 
1001 		return cp->val;
1002 	}
1003 
1004 	return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
1005 }
1006 
1007 static int service_cache_sync(struct hci_dev *hdev, void *data)
1008 {
1009 	hci_update_eir_sync(hdev);
1010 	hci_update_class_sync(hdev);
1011 
1012 	return 0;
1013 }
1014 
1015 static void service_cache_off(struct work_struct *work)
1016 {
1017 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1018 					    service_cache.work);
1019 
1020 	if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1021 		return;
1022 
1023 	hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1024 }
1025 
1026 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1027 {
1028 	/* The generation of a new RPA and programming it into the
1029 	 * controller happens in the hci_req_enable_advertising()
1030 	 * function.
1031 	 */
1032 	if (ext_adv_capable(hdev))
1033 		return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1034 	else
1035 		return hci_enable_advertising_sync(hdev);
1036 }
1037 
1038 static void rpa_expired(struct work_struct *work)
1039 {
1040 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1041 					    rpa_expired.work);
1042 
1043 	bt_dev_dbg(hdev, "");
1044 
1045 	hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1046 
1047 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1048 		return;
1049 
1050 	hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1051 }
1052 
1053 static int set_discoverable_sync(struct hci_dev *hdev, void *data);
1054 
1055 static void discov_off(struct work_struct *work)
1056 {
1057 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1058 					    discov_off.work);
1059 
1060 	bt_dev_dbg(hdev, "");
1061 
1062 	hci_dev_lock(hdev);
1063 
1064 	/* When discoverable timeout triggers, then just make sure
1065 	 * the limited discoverable flag is cleared. Even in the case
1066 	 * of a timeout triggered from general discoverable, it is
1067 	 * safe to unconditionally clear the flag.
1068 	 */
1069 	hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1070 	hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1071 	hdev->discov_timeout = 0;
1072 
1073 	hci_cmd_sync_queue(hdev, set_discoverable_sync, NULL, NULL);
1074 
1075 	mgmt_new_settings(hdev);
1076 
1077 	hci_dev_unlock(hdev);
1078 }
1079 
1080 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1081 
1082 static void mesh_send_complete(struct hci_dev *hdev,
1083 			       struct mgmt_mesh_tx *mesh_tx, bool silent)
1084 {
1085 	u8 handle = mesh_tx->handle;
1086 
1087 	if (!silent)
1088 		mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1089 			   sizeof(handle), NULL);
1090 
1091 	mgmt_mesh_remove(mesh_tx);
1092 }
1093 
1094 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1095 {
1096 	struct mgmt_mesh_tx *mesh_tx;
1097 
1098 	hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1099 	if (list_empty(&hdev->adv_instances))
1100 		hci_disable_advertising_sync(hdev);
1101 	mesh_tx = mgmt_mesh_next(hdev, NULL);
1102 
1103 	if (mesh_tx)
1104 		mesh_send_complete(hdev, mesh_tx, false);
1105 
1106 	return 0;
1107 }
1108 
1109 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1110 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
1111 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1112 {
1113 	struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1114 
1115 	if (!mesh_tx)
1116 		return;
1117 
1118 	err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1119 				 mesh_send_start_complete);
1120 
1121 	if (err < 0)
1122 		mesh_send_complete(hdev, mesh_tx, false);
1123 	else
1124 		hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1125 }
1126 
1127 static void mesh_send_done(struct work_struct *work)
1128 {
1129 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1130 					    mesh_send_done.work);
1131 
1132 	if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1133 		return;
1134 
1135 	hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1136 }
1137 
1138 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1139 {
1140 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1141 		return;
1142 
1143 	BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1144 
1145 	INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1146 	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1147 	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1148 	INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1149 
1150 	/* Non-mgmt controlled devices get this bit set
1151 	 * implicitly so that pairing works for them, however
1152 	 * for mgmt we require user-space to explicitly enable
1153 	 * it
1154 	 */
1155 	hci_dev_clear_flag(hdev, HCI_BONDABLE);
1156 
1157 	hci_dev_set_flag(hdev, HCI_MGMT);
1158 }
1159 
1160 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1161 				void *data, u16 data_len)
1162 {
1163 	struct mgmt_rp_read_info rp;
1164 
1165 	bt_dev_dbg(hdev, "sock %p", sk);
1166 
1167 	hci_dev_lock(hdev);
1168 
1169 	memset(&rp, 0, sizeof(rp));
1170 
1171 	bacpy(&rp.bdaddr, &hdev->bdaddr);
1172 
1173 	rp.version = hdev->hci_ver;
1174 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1175 
1176 	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1177 	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1178 
1179 	memcpy(rp.dev_class, hdev->dev_class, 3);
1180 
1181 	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1182 	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1183 
1184 	hci_dev_unlock(hdev);
1185 
1186 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1187 				 sizeof(rp));
1188 }
1189 
1190 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1191 {
1192 	u16 eir_len = 0;
1193 	size_t name_len;
1194 
1195 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1196 		eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1197 					  hdev->dev_class, 3);
1198 
1199 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1200 		eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1201 					  hdev->appearance);
1202 
1203 	name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1204 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1205 				  hdev->dev_name, name_len);
1206 
1207 	name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1208 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1209 				  hdev->short_name, name_len);
1210 
1211 	return eir_len;
1212 }
1213 
1214 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1215 				    void *data, u16 data_len)
1216 {
1217 	char buf[512];
1218 	struct mgmt_rp_read_ext_info *rp = (void *)buf;
1219 	u16 eir_len;
1220 
1221 	bt_dev_dbg(hdev, "sock %p", sk);
1222 
1223 	memset(&buf, 0, sizeof(buf));
1224 
1225 	hci_dev_lock(hdev);
1226 
1227 	bacpy(&rp->bdaddr, &hdev->bdaddr);
1228 
1229 	rp->version = hdev->hci_ver;
1230 	rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1231 
1232 	rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1233 	rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1234 
1235 
1236 	eir_len = append_eir_data_to_buf(hdev, rp->eir);
1237 	rp->eir_len = cpu_to_le16(eir_len);
1238 
1239 	hci_dev_unlock(hdev);
1240 
1241 	/* If this command is called at least once, then the events
1242 	 * for class of device and local name changes are disabled
1243 	 * and only the new extended controller information event
1244 	 * is used.
1245 	 */
1246 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1247 	hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1248 	hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1249 
1250 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1251 				 sizeof(*rp) + eir_len);
1252 }
1253 
1254 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1255 {
1256 	char buf[512];
1257 	struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1258 	u16 eir_len;
1259 
1260 	memset(buf, 0, sizeof(buf));
1261 
1262 	eir_len = append_eir_data_to_buf(hdev, ev->eir);
1263 	ev->eir_len = cpu_to_le16(eir_len);
1264 
1265 	return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1266 				  sizeof(*ev) + eir_len,
1267 				  HCI_MGMT_EXT_INFO_EVENTS, skip);
1268 }
1269 
1270 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1271 {
1272 	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1273 
1274 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1275 				 sizeof(settings));
1276 }
1277 
1278 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1279 {
1280 	struct mgmt_ev_advertising_added ev;
1281 
1282 	ev.instance = instance;
1283 
1284 	mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1285 }
1286 
1287 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1288 			      u8 instance)
1289 {
1290 	struct mgmt_ev_advertising_removed ev;
1291 
1292 	ev.instance = instance;
1293 
1294 	mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1295 }
1296 
1297 static void cancel_adv_timeout(struct hci_dev *hdev)
1298 {
1299 	if (hdev->adv_instance_timeout) {
1300 		hdev->adv_instance_timeout = 0;
1301 		cancel_delayed_work(&hdev->adv_instance_expire);
1302 	}
1303 }
1304 
1305 /* This function requires the caller holds hdev->lock */
1306 static void restart_le_actions(struct hci_dev *hdev)
1307 {
1308 	struct hci_conn_params *p;
1309 
1310 	list_for_each_entry(p, &hdev->le_conn_params, list) {
1311 		/* Needed for AUTO_OFF case where might not "really"
1312 		 * have been powered off.
1313 		 */
1314 		hci_pend_le_list_del_init(p);
1315 
1316 		switch (p->auto_connect) {
1317 		case HCI_AUTO_CONN_DIRECT:
1318 		case HCI_AUTO_CONN_ALWAYS:
1319 			hci_pend_le_list_add(p, &hdev->pend_le_conns);
1320 			break;
1321 		case HCI_AUTO_CONN_REPORT:
1322 			hci_pend_le_list_add(p, &hdev->pend_le_reports);
1323 			break;
1324 		default:
1325 			break;
1326 		}
1327 	}
1328 }
1329 
1330 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1331 {
1332 	__le32 ev = cpu_to_le32(get_current_settings(hdev));
1333 
1334 	return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1335 				  sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1336 }
1337 
1338 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1339 {
1340 	struct mgmt_pending_cmd *cmd = data;
1341 	struct mgmt_mode *cp;
1342 
1343 	/* Make sure cmd still outstanding. */
1344 	if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
1345 		return;
1346 
1347 	cp = cmd->param;
1348 
1349 	bt_dev_dbg(hdev, "err %d", err);
1350 
1351 	if (!err) {
1352 		if (cp->val) {
1353 			hci_dev_lock(hdev);
1354 			restart_le_actions(hdev);
1355 			hci_update_passive_scan(hdev);
1356 			hci_dev_unlock(hdev);
1357 		}
1358 
1359 		send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1360 
1361 		/* Only call new_setting for power on as power off is deferred
1362 		 * to hdev->power_off work which does call hci_dev_do_close.
1363 		 */
1364 		if (cp->val)
1365 			new_settings(hdev, cmd->sk);
1366 	} else {
1367 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1368 				mgmt_status(err));
1369 	}
1370 
1371 	mgmt_pending_free(cmd);
1372 }
1373 
1374 static int set_powered_sync(struct hci_dev *hdev, void *data)
1375 {
1376 	struct mgmt_pending_cmd *cmd = data;
1377 	struct mgmt_mode cp;
1378 
1379 	mutex_lock(&hdev->mgmt_pending_lock);
1380 
1381 	/* Make sure cmd still outstanding. */
1382 	if (!__mgmt_pending_listed(hdev, cmd)) {
1383 		mutex_unlock(&hdev->mgmt_pending_lock);
1384 		return -ECANCELED;
1385 	}
1386 
1387 	memcpy(&cp, cmd->param, sizeof(cp));
1388 
1389 	mutex_unlock(&hdev->mgmt_pending_lock);
1390 
1391 	BT_DBG("%s", hdev->name);
1392 
1393 	return hci_set_powered_sync(hdev, cp.val);
1394 }
1395 
1396 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1397 		       u16 len)
1398 {
1399 	struct mgmt_mode *cp = data;
1400 	struct mgmt_pending_cmd *cmd;
1401 	int err;
1402 
1403 	bt_dev_dbg(hdev, "sock %p", sk);
1404 
1405 	if (cp->val != 0x00 && cp->val != 0x01)
1406 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1407 				       MGMT_STATUS_INVALID_PARAMS);
1408 
1409 	hci_dev_lock(hdev);
1410 
1411 	if (!cp->val) {
1412 		if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN)) {
1413 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1414 					      MGMT_STATUS_BUSY);
1415 			goto failed;
1416 		}
1417 	}
1418 
1419 	if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1420 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1421 				      MGMT_STATUS_BUSY);
1422 		goto failed;
1423 	}
1424 
1425 	if (!!cp->val == hdev_is_powered(hdev)) {
1426 		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1427 		goto failed;
1428 	}
1429 
1430 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1431 	if (!cmd) {
1432 		err = -ENOMEM;
1433 		goto failed;
1434 	}
1435 
1436 	/* Cancel potentially blocking sync operation before power off */
1437 	if (cp->val == 0x00) {
1438 		hci_cmd_sync_cancel_sync(hdev, -EHOSTDOWN);
1439 		err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1440 					 mgmt_set_powered_complete);
1441 	} else {
1442 		/* Use hci_cmd_sync_submit since hdev might not be running */
1443 		err = hci_cmd_sync_submit(hdev, set_powered_sync, cmd,
1444 					  mgmt_set_powered_complete);
1445 	}
1446 
1447 	if (err < 0)
1448 		mgmt_pending_remove(cmd);
1449 
1450 failed:
1451 	hci_dev_unlock(hdev);
1452 	return err;
1453 }
1454 
1455 int mgmt_new_settings(struct hci_dev *hdev)
1456 {
1457 	return new_settings(hdev, NULL);
1458 }
1459 
1460 struct cmd_lookup {
1461 	struct sock *sk;
1462 	struct hci_dev *hdev;
1463 	u8 mgmt_status;
1464 };
1465 
1466 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1467 {
1468 	struct cmd_lookup *match = data;
1469 
1470 	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1471 
1472 	if (match->sk == NULL) {
1473 		match->sk = cmd->sk;
1474 		sock_hold(match->sk);
1475 	}
1476 }
1477 
1478 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1479 {
1480 	u8 *status = data;
1481 
1482 	mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, *status);
1483 }
1484 
1485 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1486 {
1487 	struct cmd_lookup *match = data;
1488 
1489 	/* dequeue cmd_sync entries using cmd as data as that is about to be
1490 	 * removed/freed.
1491 	 */
1492 	hci_cmd_sync_dequeue(match->hdev, NULL, cmd, NULL);
1493 
1494 	if (cmd->cmd_complete) {
1495 		cmd->cmd_complete(cmd, match->mgmt_status);
1496 		return;
1497 	}
1498 
1499 	cmd_status_rsp(cmd, data);
1500 }
1501 
1502 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1503 {
1504 	return mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, status,
1505 				 cmd->param, cmd->param_len);
1506 }
1507 
1508 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1509 {
1510 	return mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, status,
1511 				 cmd->param, sizeof(struct mgmt_addr_info));
1512 }
1513 
1514 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1515 {
1516 	if (!lmp_bredr_capable(hdev))
1517 		return MGMT_STATUS_NOT_SUPPORTED;
1518 	else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1519 		return MGMT_STATUS_REJECTED;
1520 	else
1521 		return MGMT_STATUS_SUCCESS;
1522 }
1523 
1524 static u8 mgmt_le_support(struct hci_dev *hdev)
1525 {
1526 	if (!lmp_le_capable(hdev))
1527 		return MGMT_STATUS_NOT_SUPPORTED;
1528 	else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1529 		return MGMT_STATUS_REJECTED;
1530 	else
1531 		return MGMT_STATUS_SUCCESS;
1532 }
1533 
1534 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1535 					   int err)
1536 {
1537 	struct mgmt_pending_cmd *cmd = data;
1538 
1539 	bt_dev_dbg(hdev, "err %d", err);
1540 
1541 	/* Make sure cmd still outstanding. */
1542 	if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
1543 		return;
1544 
1545 	hci_dev_lock(hdev);
1546 
1547 	if (err) {
1548 		u8 mgmt_err = mgmt_status(err);
1549 		mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
1550 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1551 		goto done;
1552 	}
1553 
1554 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1555 	    hdev->discov_timeout > 0) {
1556 		int to = secs_to_jiffies(hdev->discov_timeout);
1557 		queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1558 	}
1559 
1560 	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1561 	new_settings(hdev, cmd->sk);
1562 
1563 done:
1564 	mgmt_pending_free(cmd);
1565 	hci_dev_unlock(hdev);
1566 }
1567 
1568 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1569 {
1570 	if (!mgmt_pending_listed(hdev, data))
1571 		return -ECANCELED;
1572 
1573 	BT_DBG("%s", hdev->name);
1574 
1575 	return hci_update_discoverable_sync(hdev);
1576 }
1577 
1578 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1579 			    u16 len)
1580 {
1581 	struct mgmt_cp_set_discoverable *cp = data;
1582 	struct mgmt_pending_cmd *cmd;
1583 	u16 timeout;
1584 	int err;
1585 
1586 	bt_dev_dbg(hdev, "sock %p", sk);
1587 
1588 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1589 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1590 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1591 				       MGMT_STATUS_REJECTED);
1592 
1593 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1594 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1595 				       MGMT_STATUS_INVALID_PARAMS);
1596 
1597 	timeout = __le16_to_cpu(cp->timeout);
1598 
1599 	/* Disabling discoverable requires that no timeout is set,
1600 	 * and enabling limited discoverable requires a timeout.
1601 	 */
1602 	if ((cp->val == 0x00 && timeout > 0) ||
1603 	    (cp->val == 0x02 && timeout == 0))
1604 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1605 				       MGMT_STATUS_INVALID_PARAMS);
1606 
1607 	hci_dev_lock(hdev);
1608 
1609 	if (!hdev_is_powered(hdev) && timeout > 0) {
1610 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1611 				      MGMT_STATUS_NOT_POWERED);
1612 		goto failed;
1613 	}
1614 
1615 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1616 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1617 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1618 				      MGMT_STATUS_BUSY);
1619 		goto failed;
1620 	}
1621 
1622 	if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1623 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1624 				      MGMT_STATUS_REJECTED);
1625 		goto failed;
1626 	}
1627 
1628 	if (hdev->advertising_paused) {
1629 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1630 				      MGMT_STATUS_BUSY);
1631 		goto failed;
1632 	}
1633 
1634 	if (!hdev_is_powered(hdev)) {
1635 		bool changed = false;
1636 
1637 		/* Setting limited discoverable when powered off is
1638 		 * not a valid operation since it requires a timeout
1639 		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1640 		 */
1641 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1642 			hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1643 			changed = true;
1644 		}
1645 
1646 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1647 		if (err < 0)
1648 			goto failed;
1649 
1650 		if (changed)
1651 			err = new_settings(hdev, sk);
1652 
1653 		goto failed;
1654 	}
1655 
1656 	/* If the current mode is the same, then just update the timeout
1657 	 * value with the new value. And if only the timeout gets updated,
1658 	 * then no need for any HCI transactions.
1659 	 */
1660 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1661 	    (cp->val == 0x02) == hci_dev_test_flag(hdev,
1662 						   HCI_LIMITED_DISCOVERABLE)) {
1663 		cancel_delayed_work(&hdev->discov_off);
1664 		hdev->discov_timeout = timeout;
1665 
1666 		if (cp->val && hdev->discov_timeout > 0) {
1667 			int to = secs_to_jiffies(hdev->discov_timeout);
1668 			queue_delayed_work(hdev->req_workqueue,
1669 					   &hdev->discov_off, to);
1670 		}
1671 
1672 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1673 		goto failed;
1674 	}
1675 
1676 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1677 	if (!cmd) {
1678 		err = -ENOMEM;
1679 		goto failed;
1680 	}
1681 
1682 	/* Cancel any potential discoverable timeout that might be
1683 	 * still active and store new timeout value. The arming of
1684 	 * the timeout happens in the complete handler.
1685 	 */
1686 	cancel_delayed_work(&hdev->discov_off);
1687 	hdev->discov_timeout = timeout;
1688 
1689 	if (cp->val)
1690 		hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1691 	else
1692 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1693 
1694 	/* Limited discoverable mode */
1695 	if (cp->val == 0x02)
1696 		hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1697 	else
1698 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1699 
1700 	err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1701 				 mgmt_set_discoverable_complete);
1702 
1703 	if (err < 0)
1704 		mgmt_pending_remove(cmd);
1705 
1706 failed:
1707 	hci_dev_unlock(hdev);
1708 	return err;
1709 }
1710 
1711 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1712 					  int err)
1713 {
1714 	struct mgmt_pending_cmd *cmd = data;
1715 
1716 	bt_dev_dbg(hdev, "err %d", err);
1717 
1718 	/* Make sure cmd still outstanding. */
1719 	if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
1720 		return;
1721 
1722 	hci_dev_lock(hdev);
1723 
1724 	if (err) {
1725 		u8 mgmt_err = mgmt_status(err);
1726 		mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
1727 		goto done;
1728 	}
1729 
1730 	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1731 	new_settings(hdev, cmd->sk);
1732 
1733 done:
1734 	mgmt_pending_free(cmd);
1735 
1736 	hci_dev_unlock(hdev);
1737 }
1738 
1739 static int set_connectable_update_settings(struct hci_dev *hdev,
1740 					   struct sock *sk, u8 val)
1741 {
1742 	bool changed = false;
1743 	int err;
1744 
1745 	if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1746 		changed = true;
1747 
1748 	if (val) {
1749 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1750 	} else {
1751 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1752 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1753 	}
1754 
1755 	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1756 	if (err < 0)
1757 		return err;
1758 
1759 	if (changed) {
1760 		hci_update_scan(hdev);
1761 		hci_update_passive_scan(hdev);
1762 		return new_settings(hdev, sk);
1763 	}
1764 
1765 	return 0;
1766 }
1767 
1768 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1769 {
1770 	if (!mgmt_pending_listed(hdev, data))
1771 		return -ECANCELED;
1772 
1773 	BT_DBG("%s", hdev->name);
1774 
1775 	return hci_update_connectable_sync(hdev);
1776 }
1777 
1778 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1779 			   u16 len)
1780 {
1781 	struct mgmt_mode *cp = data;
1782 	struct mgmt_pending_cmd *cmd;
1783 	int err;
1784 
1785 	bt_dev_dbg(hdev, "sock %p", sk);
1786 
1787 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1788 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1789 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1790 				       MGMT_STATUS_REJECTED);
1791 
1792 	if (cp->val != 0x00 && cp->val != 0x01)
1793 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1794 				       MGMT_STATUS_INVALID_PARAMS);
1795 
1796 	hci_dev_lock(hdev);
1797 
1798 	if (!hdev_is_powered(hdev)) {
1799 		err = set_connectable_update_settings(hdev, sk, cp->val);
1800 		goto failed;
1801 	}
1802 
1803 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1804 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1805 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1806 				      MGMT_STATUS_BUSY);
1807 		goto failed;
1808 	}
1809 
1810 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1811 	if (!cmd) {
1812 		err = -ENOMEM;
1813 		goto failed;
1814 	}
1815 
1816 	if (cp->val) {
1817 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1818 	} else {
1819 		if (hdev->discov_timeout > 0)
1820 			cancel_delayed_work(&hdev->discov_off);
1821 
1822 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1823 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1824 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1825 	}
1826 
1827 	err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1828 				 mgmt_set_connectable_complete);
1829 
1830 	if (err < 0)
1831 		mgmt_pending_remove(cmd);
1832 
1833 failed:
1834 	hci_dev_unlock(hdev);
1835 	return err;
1836 }
1837 
1838 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1839 			u16 len)
1840 {
1841 	struct mgmt_mode *cp = data;
1842 	bool changed;
1843 	int err;
1844 
1845 	bt_dev_dbg(hdev, "sock %p", sk);
1846 
1847 	if (cp->val != 0x00 && cp->val != 0x01)
1848 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1849 				       MGMT_STATUS_INVALID_PARAMS);
1850 
1851 	hci_dev_lock(hdev);
1852 
1853 	if (cp->val)
1854 		changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1855 	else
1856 		changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1857 
1858 	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1859 	if (err < 0)
1860 		goto unlock;
1861 
1862 	if (changed) {
1863 		/* In limited privacy mode the change of bondable mode
1864 		 * may affect the local advertising address.
1865 		 */
1866 		hci_update_discoverable(hdev);
1867 
1868 		err = new_settings(hdev, sk);
1869 	}
1870 
1871 unlock:
1872 	hci_dev_unlock(hdev);
1873 	return err;
1874 }
1875 
1876 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1877 			     u16 len)
1878 {
1879 	struct mgmt_mode *cp = data;
1880 	struct mgmt_pending_cmd *cmd;
1881 	u8 val, status;
1882 	int err;
1883 
1884 	bt_dev_dbg(hdev, "sock %p", sk);
1885 
1886 	status = mgmt_bredr_support(hdev);
1887 	if (status)
1888 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1889 				       status);
1890 
1891 	if (cp->val != 0x00 && cp->val != 0x01)
1892 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1893 				       MGMT_STATUS_INVALID_PARAMS);
1894 
1895 	hci_dev_lock(hdev);
1896 
1897 	if (!hdev_is_powered(hdev)) {
1898 		bool changed = false;
1899 
1900 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1901 			hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1902 			changed = true;
1903 		}
1904 
1905 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1906 		if (err < 0)
1907 			goto failed;
1908 
1909 		if (changed)
1910 			err = new_settings(hdev, sk);
1911 
1912 		goto failed;
1913 	}
1914 
1915 	if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1916 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1917 				      MGMT_STATUS_BUSY);
1918 		goto failed;
1919 	}
1920 
1921 	val = !!cp->val;
1922 
1923 	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1924 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1925 		goto failed;
1926 	}
1927 
1928 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1929 	if (!cmd) {
1930 		err = -ENOMEM;
1931 		goto failed;
1932 	}
1933 
1934 	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1935 	if (err < 0) {
1936 		mgmt_pending_remove(cmd);
1937 		goto failed;
1938 	}
1939 
1940 failed:
1941 	hci_dev_unlock(hdev);
1942 	return err;
1943 }
1944 
1945 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1946 {
1947 	struct cmd_lookup match = { NULL, hdev };
1948 	struct mgmt_pending_cmd *cmd = data;
1949 	struct mgmt_mode *cp;
1950 	u8 enable;
1951 	bool changed;
1952 
1953 	/* Make sure cmd still outstanding. */
1954 	if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
1955 		return;
1956 
1957 	cp = cmd->param;
1958 	enable = cp->val;
1959 
1960 	if (err) {
1961 		u8 mgmt_err = mgmt_status(err);
1962 
1963 		if (enable && hci_dev_test_and_clear_flag(hdev,
1964 							  HCI_SSP_ENABLED)) {
1965 			new_settings(hdev, NULL);
1966 		}
1967 
1968 		mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
1969 		mgmt_pending_free(cmd);
1970 		return;
1971 	}
1972 
1973 	if (enable) {
1974 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1975 	} else {
1976 		changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1977 	}
1978 
1979 	settings_rsp(cmd, &match);
1980 
1981 	if (changed)
1982 		new_settings(hdev, match.sk);
1983 
1984 	if (match.sk)
1985 		sock_put(match.sk);
1986 
1987 	hci_update_eir_sync(hdev);
1988 	mgmt_pending_free(cmd);
1989 }
1990 
1991 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1992 {
1993 	struct mgmt_pending_cmd *cmd = data;
1994 	struct mgmt_mode cp;
1995 	bool changed = false;
1996 	int err;
1997 
1998 	mutex_lock(&hdev->mgmt_pending_lock);
1999 
2000 	if (!__mgmt_pending_listed(hdev, cmd)) {
2001 		mutex_unlock(&hdev->mgmt_pending_lock);
2002 		return -ECANCELED;
2003 	}
2004 
2005 	memcpy(&cp, cmd->param, sizeof(cp));
2006 
2007 	mutex_unlock(&hdev->mgmt_pending_lock);
2008 
2009 	if (cp.val)
2010 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
2011 
2012 	err = hci_write_ssp_mode_sync(hdev, cp.val);
2013 
2014 	if (!err && changed)
2015 		hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
2016 
2017 	return err;
2018 }
2019 
2020 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2021 {
2022 	struct mgmt_mode *cp = data;
2023 	struct mgmt_pending_cmd *cmd;
2024 	u8 status;
2025 	int err;
2026 
2027 	bt_dev_dbg(hdev, "sock %p", sk);
2028 
2029 	status = mgmt_bredr_support(hdev);
2030 	if (status)
2031 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2032 
2033 	if (!lmp_ssp_capable(hdev))
2034 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2035 				       MGMT_STATUS_NOT_SUPPORTED);
2036 
2037 	if (cp->val != 0x00 && cp->val != 0x01)
2038 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2039 				       MGMT_STATUS_INVALID_PARAMS);
2040 
2041 	hci_dev_lock(hdev);
2042 
2043 	if (!hdev_is_powered(hdev)) {
2044 		bool changed;
2045 
2046 		if (cp->val) {
2047 			changed = !hci_dev_test_and_set_flag(hdev,
2048 							     HCI_SSP_ENABLED);
2049 		} else {
2050 			changed = hci_dev_test_and_clear_flag(hdev,
2051 							      HCI_SSP_ENABLED);
2052 		}
2053 
2054 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2055 		if (err < 0)
2056 			goto failed;
2057 
2058 		if (changed)
2059 			err = new_settings(hdev, sk);
2060 
2061 		goto failed;
2062 	}
2063 
2064 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2065 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2066 				      MGMT_STATUS_BUSY);
2067 		goto failed;
2068 	}
2069 
2070 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2071 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2072 		goto failed;
2073 	}
2074 
2075 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2076 	if (!cmd)
2077 		err = -ENOMEM;
2078 	else
2079 		err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2080 					 set_ssp_complete);
2081 
2082 	if (err < 0) {
2083 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2084 				      MGMT_STATUS_FAILED);
2085 
2086 		if (cmd)
2087 			mgmt_pending_remove(cmd);
2088 	}
2089 
2090 failed:
2091 	hci_dev_unlock(hdev);
2092 	return err;
2093 }
2094 
2095 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2096 {
2097 	bt_dev_dbg(hdev, "sock %p", sk);
2098 
2099 	return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2100 				       MGMT_STATUS_NOT_SUPPORTED);
2101 }
2102 
2103 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2104 {
2105 	struct mgmt_pending_cmd *cmd = data;
2106 	struct cmd_lookup match = { NULL, hdev };
2107 	u8 status = mgmt_status(err);
2108 
2109 	bt_dev_dbg(hdev, "err %d", err);
2110 
2111 	if (err == -ECANCELED || !mgmt_pending_valid(hdev, data))
2112 		return;
2113 
2114 	if (status) {
2115 		mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, status);
2116 		goto done;
2117 	}
2118 
2119 	settings_rsp(cmd, &match);
2120 
2121 	new_settings(hdev, match.sk);
2122 
2123 	if (match.sk)
2124 		sock_put(match.sk);
2125 
2126 done:
2127 	mgmt_pending_free(cmd);
2128 }
2129 
2130 static int set_le_sync(struct hci_dev *hdev, void *data)
2131 {
2132 	struct mgmt_pending_cmd *cmd = data;
2133 	struct mgmt_mode cp;
2134 	u8 val;
2135 	int err;
2136 
2137 	mutex_lock(&hdev->mgmt_pending_lock);
2138 
2139 	if (!__mgmt_pending_listed(hdev, cmd)) {
2140 		mutex_unlock(&hdev->mgmt_pending_lock);
2141 		return -ECANCELED;
2142 	}
2143 
2144 	memcpy(&cp, cmd->param, sizeof(cp));
2145 	val = !!cp.val;
2146 
2147 	mutex_unlock(&hdev->mgmt_pending_lock);
2148 
2149 	if (!val) {
2150 		hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2151 
2152 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2153 			hci_disable_advertising_sync(hdev);
2154 
2155 		if (ext_adv_capable(hdev))
2156 			hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2157 	} else {
2158 		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2159 	}
2160 
2161 	err = hci_write_le_host_supported_sync(hdev, val, 0);
2162 
2163 	/* Make sure the controller has a good default for
2164 	 * advertising data. Restrict the update to when LE
2165 	 * has actually been enabled. During power on, the
2166 	 * update in powered_update_hci will take care of it.
2167 	 */
2168 	if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2169 		if (ext_adv_capable(hdev)) {
2170 			int status;
2171 
2172 			status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2173 			if (!status)
2174 				hci_update_scan_rsp_data_sync(hdev, 0x00);
2175 		} else {
2176 			hci_update_adv_data_sync(hdev, 0x00);
2177 			hci_update_scan_rsp_data_sync(hdev, 0x00);
2178 		}
2179 
2180 		hci_update_passive_scan(hdev);
2181 	}
2182 
2183 	return err;
2184 }
2185 
2186 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2187 {
2188 	struct mgmt_pending_cmd *cmd = data;
2189 	u8 status = mgmt_status(err);
2190 	struct sock *sk;
2191 
2192 	if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
2193 		return;
2194 
2195 	sk = cmd->sk;
2196 
2197 	if (status) {
2198 		mgmt_cmd_status(cmd->sk, hdev->id, cmd->opcode, status);
2199 		goto done;
2200 	}
2201 
2202 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2203 
2204 done:
2205 	mgmt_pending_free(cmd);
2206 }
2207 
2208 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2209 {
2210 	struct mgmt_pending_cmd *cmd = data;
2211 	DEFINE_FLEX(struct mgmt_cp_set_mesh, cp, ad_types, num_ad_types,
2212 		    sizeof(hdev->mesh_ad_types));
2213 	size_t len;
2214 
2215 	mutex_lock(&hdev->mgmt_pending_lock);
2216 
2217 	if (!__mgmt_pending_listed(hdev, cmd)) {
2218 		mutex_unlock(&hdev->mgmt_pending_lock);
2219 		return -ECANCELED;
2220 	}
2221 
2222 	len = cmd->param_len;
2223 	memcpy(cp, cmd->param, min(__struct_size(cp), len));
2224 
2225 	mutex_unlock(&hdev->mgmt_pending_lock);
2226 
2227 	memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2228 
2229 	if (cp->enable)
2230 		hci_dev_set_flag(hdev, HCI_MESH);
2231 	else
2232 		hci_dev_clear_flag(hdev, HCI_MESH);
2233 
2234 	hdev->le_scan_interval = __le16_to_cpu(cp->period);
2235 	hdev->le_scan_window = __le16_to_cpu(cp->window);
2236 
2237 	len -= sizeof(struct mgmt_cp_set_mesh);
2238 
2239 	/* If filters don't fit, forward all adv pkts */
2240 	if (len <= sizeof(hdev->mesh_ad_types))
2241 		memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2242 
2243 	hci_update_passive_scan_sync(hdev);
2244 	return 0;
2245 }
2246 
2247 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2248 {
2249 	struct mgmt_cp_set_mesh *cp = data;
2250 	struct mgmt_pending_cmd *cmd;
2251 	__u16 period, window;
2252 	int err = 0;
2253 
2254 	bt_dev_dbg(hdev, "sock %p", sk);
2255 
2256 	if (!lmp_le_capable(hdev) ||
2257 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2258 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2259 				       MGMT_STATUS_NOT_SUPPORTED);
2260 
2261 	if (cp->enable != 0x00 && cp->enable != 0x01)
2262 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2263 				       MGMT_STATUS_INVALID_PARAMS);
2264 
2265 	/* Keep allowed ranges in sync with set_scan_params() */
2266 	period = __le16_to_cpu(cp->period);
2267 
2268 	if (period < 0x0004 || period > 0x4000)
2269 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2270 				       MGMT_STATUS_INVALID_PARAMS);
2271 
2272 	window = __le16_to_cpu(cp->window);
2273 
2274 	if (window < 0x0004 || window > 0x4000)
2275 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2276 				       MGMT_STATUS_INVALID_PARAMS);
2277 
2278 	if (window > period)
2279 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2280 				       MGMT_STATUS_INVALID_PARAMS);
2281 
2282 	hci_dev_lock(hdev);
2283 
2284 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2285 	if (!cmd)
2286 		err = -ENOMEM;
2287 	else
2288 		err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2289 					 set_mesh_complete);
2290 
2291 	if (err < 0) {
2292 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2293 				      MGMT_STATUS_FAILED);
2294 
2295 		if (cmd)
2296 			mgmt_pending_remove(cmd);
2297 	}
2298 
2299 	hci_dev_unlock(hdev);
2300 	return err;
2301 }
2302 
2303 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2304 {
2305 	struct mgmt_mesh_tx *mesh_tx = data;
2306 	struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2307 	unsigned long mesh_send_interval;
2308 	u8 mgmt_err = mgmt_status(err);
2309 
2310 	/* Report any errors here, but don't report completion */
2311 
2312 	if (mgmt_err) {
2313 		hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2314 		/* Send Complete Error Code for handle */
2315 		mesh_send_complete(hdev, mesh_tx, false);
2316 		return;
2317 	}
2318 
2319 	mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2320 	queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2321 			   mesh_send_interval);
2322 }
2323 
2324 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2325 {
2326 	struct mgmt_mesh_tx *mesh_tx = data;
2327 	struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2328 	struct adv_info *adv, *next_instance;
2329 	u8 instance = hdev->le_num_of_adv_sets + 1;
2330 	u16 timeout, duration;
2331 	int err = 0;
2332 
2333 	if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2334 		return MGMT_STATUS_BUSY;
2335 
2336 	timeout = 1000;
2337 	duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2338 	adv = hci_add_adv_instance(hdev, instance, 0,
2339 				   send->adv_data_len, send->adv_data,
2340 				   0, NULL,
2341 				   timeout, duration,
2342 				   HCI_ADV_TX_POWER_NO_PREFERENCE,
2343 				   hdev->le_adv_min_interval,
2344 				   hdev->le_adv_max_interval,
2345 				   mesh_tx->handle);
2346 
2347 	if (!IS_ERR(adv))
2348 		mesh_tx->instance = instance;
2349 	else
2350 		err = PTR_ERR(adv);
2351 
2352 	if (hdev->cur_adv_instance == instance) {
2353 		/* If the currently advertised instance is being changed then
2354 		 * cancel the current advertising and schedule the next
2355 		 * instance. If there is only one instance then the overridden
2356 		 * advertising data will be visible right away.
2357 		 */
2358 		cancel_adv_timeout(hdev);
2359 
2360 		next_instance = hci_get_next_instance(hdev, instance);
2361 		if (next_instance)
2362 			instance = next_instance->instance;
2363 		else
2364 			instance = 0;
2365 	} else if (hdev->adv_instance_timeout) {
2366 		/* Immediately advertise the new instance if no other, or
2367 		 * let it go naturally from queue if ADV is already happening
2368 		 */
2369 		instance = 0;
2370 	}
2371 
2372 	if (instance)
2373 		return hci_schedule_adv_instance_sync(hdev, instance, true);
2374 
2375 	return err;
2376 }
2377 
2378 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2379 {
2380 	struct mgmt_rp_mesh_read_features *rp = data;
2381 
2382 	if (rp->used_handles >= rp->max_handles)
2383 		return;
2384 
2385 	rp->handles[rp->used_handles++] = mesh_tx->handle;
2386 }
2387 
2388 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2389 			 void *data, u16 len)
2390 {
2391 	struct mgmt_rp_mesh_read_features rp;
2392 
2393 	if (!lmp_le_capable(hdev) ||
2394 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2395 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2396 				       MGMT_STATUS_NOT_SUPPORTED);
2397 
2398 	memset(&rp, 0, sizeof(rp));
2399 	rp.index = cpu_to_le16(hdev->id);
2400 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2401 		rp.max_handles = MESH_HANDLES_MAX;
2402 
2403 	hci_dev_lock(hdev);
2404 
2405 	if (rp.max_handles)
2406 		mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2407 
2408 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2409 			  rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2410 
2411 	hci_dev_unlock(hdev);
2412 	return 0;
2413 }
2414 
2415 static int send_cancel(struct hci_dev *hdev, void *data)
2416 {
2417 	struct mgmt_pending_cmd *cmd = data;
2418 	struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2419 	struct mgmt_mesh_tx *mesh_tx;
2420 
2421 	if (!cancel->handle) {
2422 		do {
2423 			mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2424 
2425 			if (mesh_tx)
2426 				mesh_send_complete(hdev, mesh_tx, false);
2427 		} while (mesh_tx);
2428 	} else {
2429 		mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2430 
2431 		if (mesh_tx && mesh_tx->sk == cmd->sk)
2432 			mesh_send_complete(hdev, mesh_tx, false);
2433 	}
2434 
2435 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2436 			  0, NULL, 0);
2437 	mgmt_pending_free(cmd);
2438 
2439 	return 0;
2440 }
2441 
2442 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2443 			    void *data, u16 len)
2444 {
2445 	struct mgmt_pending_cmd *cmd;
2446 	int err;
2447 
2448 	if (!lmp_le_capable(hdev) ||
2449 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2450 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2451 				       MGMT_STATUS_NOT_SUPPORTED);
2452 
2453 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2454 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2455 				       MGMT_STATUS_REJECTED);
2456 
2457 	hci_dev_lock(hdev);
2458 	cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2459 	if (!cmd)
2460 		err = -ENOMEM;
2461 	else
2462 		err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2463 
2464 	if (err < 0) {
2465 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2466 				      MGMT_STATUS_FAILED);
2467 
2468 		if (cmd)
2469 			mgmt_pending_free(cmd);
2470 	}
2471 
2472 	hci_dev_unlock(hdev);
2473 	return err;
2474 }
2475 
2476 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2477 {
2478 	struct mgmt_mesh_tx *mesh_tx;
2479 	struct mgmt_cp_mesh_send *send = data;
2480 	struct mgmt_rp_mesh_read_features rp;
2481 	bool sending;
2482 	int err = 0;
2483 
2484 	if (!lmp_le_capable(hdev) ||
2485 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2486 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2487 				       MGMT_STATUS_NOT_SUPPORTED);
2488 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2489 	    len <= MGMT_MESH_SEND_SIZE ||
2490 	    len > (MGMT_MESH_SEND_SIZE + 31))
2491 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2492 				       MGMT_STATUS_REJECTED);
2493 
2494 	hci_dev_lock(hdev);
2495 
2496 	memset(&rp, 0, sizeof(rp));
2497 	rp.max_handles = MESH_HANDLES_MAX;
2498 
2499 	mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2500 
2501 	if (rp.max_handles <= rp.used_handles) {
2502 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2503 				      MGMT_STATUS_BUSY);
2504 		goto done;
2505 	}
2506 
2507 	sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2508 	mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2509 
2510 	if (!mesh_tx)
2511 		err = -ENOMEM;
2512 	else if (!sending)
2513 		err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2514 					 mesh_send_start_complete);
2515 
2516 	if (err < 0) {
2517 		bt_dev_err(hdev, "Send Mesh Failed %d", err);
2518 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2519 				      MGMT_STATUS_FAILED);
2520 
2521 		if (mesh_tx) {
2522 			if (sending)
2523 				mgmt_mesh_remove(mesh_tx);
2524 		}
2525 	} else {
2526 		hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2527 
2528 		mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2529 				  &mesh_tx->handle, 1);
2530 	}
2531 
2532 done:
2533 	hci_dev_unlock(hdev);
2534 	return err;
2535 }
2536 
2537 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2538 {
2539 	struct mgmt_mode *cp = data;
2540 	struct mgmt_pending_cmd *cmd;
2541 	int err;
2542 	u8 val, enabled;
2543 
2544 	bt_dev_dbg(hdev, "sock %p", sk);
2545 
2546 	if (!lmp_le_capable(hdev))
2547 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2548 				       MGMT_STATUS_NOT_SUPPORTED);
2549 
2550 	if (cp->val != 0x00 && cp->val != 0x01)
2551 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2552 				       MGMT_STATUS_INVALID_PARAMS);
2553 
2554 	/* Bluetooth single mode LE only controllers or dual-mode
2555 	 * controllers configured as LE only devices, do not allow
2556 	 * switching LE off. These have either LE enabled explicitly
2557 	 * or BR/EDR has been previously switched off.
2558 	 *
2559 	 * When trying to enable an already enabled LE, then gracefully
2560 	 * send a positive response. Trying to disable it however will
2561 	 * result into rejection.
2562 	 */
2563 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2564 		if (cp->val == 0x01)
2565 			return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2566 
2567 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2568 				       MGMT_STATUS_REJECTED);
2569 	}
2570 
2571 	hci_dev_lock(hdev);
2572 
2573 	val = !!cp->val;
2574 	enabled = lmp_host_le_capable(hdev);
2575 
2576 	if (!hdev_is_powered(hdev) || val == enabled) {
2577 		bool changed = false;
2578 
2579 		if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2580 			hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2581 			changed = true;
2582 		}
2583 
2584 		if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2585 			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2586 			changed = true;
2587 		}
2588 
2589 		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2590 		if (err < 0)
2591 			goto unlock;
2592 
2593 		if (changed)
2594 			err = new_settings(hdev, sk);
2595 
2596 		goto unlock;
2597 	}
2598 
2599 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
2600 	    pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2601 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2602 				      MGMT_STATUS_BUSY);
2603 		goto unlock;
2604 	}
2605 
2606 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2607 	if (!cmd)
2608 		err = -ENOMEM;
2609 	else
2610 		err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2611 					 set_le_complete);
2612 
2613 	if (err < 0) {
2614 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2615 				      MGMT_STATUS_FAILED);
2616 
2617 		if (cmd)
2618 			mgmt_pending_remove(cmd);
2619 	}
2620 
2621 unlock:
2622 	hci_dev_unlock(hdev);
2623 	return err;
2624 }
2625 
2626 static int send_hci_cmd_sync(struct hci_dev *hdev, void *data)
2627 {
2628 	struct mgmt_pending_cmd *cmd = data;
2629 	struct mgmt_cp_hci_cmd_sync *cp = cmd->param;
2630 	struct sk_buff *skb;
2631 
2632 	skb = __hci_cmd_sync_ev(hdev, le16_to_cpu(cp->opcode),
2633 				le16_to_cpu(cp->params_len), cp->params,
2634 				cp->event, cp->timeout ?
2635 				secs_to_jiffies(cp->timeout) :
2636 				HCI_CMD_TIMEOUT);
2637 	if (IS_ERR(skb)) {
2638 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
2639 				mgmt_status(PTR_ERR(skb)));
2640 		goto done;
2641 	}
2642 
2643 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_HCI_CMD_SYNC, 0,
2644 			  skb->data, skb->len);
2645 
2646 	kfree_skb(skb);
2647 
2648 done:
2649 	mgmt_pending_free(cmd);
2650 
2651 	return 0;
2652 }
2653 
2654 static int mgmt_hci_cmd_sync(struct sock *sk, struct hci_dev *hdev,
2655 			     void *data, u16 len)
2656 {
2657 	struct mgmt_cp_hci_cmd_sync *cp = data;
2658 	struct mgmt_pending_cmd *cmd;
2659 	int err;
2660 
2661 	if (len != (offsetof(struct mgmt_cp_hci_cmd_sync, params) +
2662 		    le16_to_cpu(cp->params_len)))
2663 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
2664 				       MGMT_STATUS_INVALID_PARAMS);
2665 
2666 	hci_dev_lock(hdev);
2667 	cmd = mgmt_pending_new(sk, MGMT_OP_HCI_CMD_SYNC, hdev, data, len);
2668 	if (!cmd)
2669 		err = -ENOMEM;
2670 	else
2671 		err = hci_cmd_sync_queue(hdev, send_hci_cmd_sync, cmd, NULL);
2672 
2673 	if (err < 0) {
2674 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
2675 				      MGMT_STATUS_FAILED);
2676 
2677 		if (cmd)
2678 			mgmt_pending_free(cmd);
2679 	}
2680 
2681 	hci_dev_unlock(hdev);
2682 	return err;
2683 }
2684 
2685 /* This is a helper function to test for pending mgmt commands that can
2686  * cause CoD or EIR HCI commands. We can only allow one such pending
2687  * mgmt command at a time since otherwise we cannot easily track what
2688  * the current values are, will be, and based on that calculate if a new
2689  * HCI command needs to be sent and if yes with what value.
2690  */
2691 static bool pending_eir_or_class(struct hci_dev *hdev)
2692 {
2693 	struct mgmt_pending_cmd *cmd;
2694 
2695 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2696 		switch (cmd->opcode) {
2697 		case MGMT_OP_ADD_UUID:
2698 		case MGMT_OP_REMOVE_UUID:
2699 		case MGMT_OP_SET_DEV_CLASS:
2700 		case MGMT_OP_SET_POWERED:
2701 			return true;
2702 		}
2703 	}
2704 
2705 	return false;
2706 }
2707 
2708 static const u8 bluetooth_base_uuid[] = {
2709 			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2710 			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2711 };
2712 
2713 static u8 get_uuid_size(const u8 *uuid)
2714 {
2715 	u32 val;
2716 
2717 	if (memcmp(uuid, bluetooth_base_uuid, 12))
2718 		return 128;
2719 
2720 	val = get_unaligned_le32(&uuid[12]);
2721 	if (val > 0xffff)
2722 		return 32;
2723 
2724 	return 16;
2725 }
2726 
2727 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2728 {
2729 	struct mgmt_pending_cmd *cmd = data;
2730 
2731 	bt_dev_dbg(hdev, "err %d", err);
2732 
2733 	mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
2734 			  mgmt_status(err), hdev->dev_class, 3);
2735 
2736 	mgmt_pending_free(cmd);
2737 }
2738 
2739 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2740 {
2741 	int err;
2742 
2743 	err = hci_update_class_sync(hdev);
2744 	if (err)
2745 		return err;
2746 
2747 	return hci_update_eir_sync(hdev);
2748 }
2749 
2750 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2751 {
2752 	struct mgmt_cp_add_uuid *cp = data;
2753 	struct mgmt_pending_cmd *cmd;
2754 	struct bt_uuid *uuid;
2755 	int err;
2756 
2757 	bt_dev_dbg(hdev, "sock %p", sk);
2758 
2759 	hci_dev_lock(hdev);
2760 
2761 	if (pending_eir_or_class(hdev)) {
2762 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2763 				      MGMT_STATUS_BUSY);
2764 		goto failed;
2765 	}
2766 
2767 	uuid = kmalloc_obj(*uuid);
2768 	if (!uuid) {
2769 		err = -ENOMEM;
2770 		goto failed;
2771 	}
2772 
2773 	memcpy(uuid->uuid, cp->uuid, 16);
2774 	uuid->svc_hint = cp->svc_hint;
2775 	uuid->size = get_uuid_size(cp->uuid);
2776 
2777 	list_add_tail(&uuid->list, &hdev->uuids);
2778 
2779 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2780 	if (!cmd) {
2781 		err = -ENOMEM;
2782 		goto failed;
2783 	}
2784 
2785 	/* MGMT_OP_ADD_UUID don't require adapter the UP/Running so use
2786 	 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2787 	 */
2788 	err = hci_cmd_sync_submit(hdev, add_uuid_sync, cmd,
2789 				  mgmt_class_complete);
2790 	if (err < 0) {
2791 		mgmt_pending_free(cmd);
2792 		goto failed;
2793 	}
2794 
2795 failed:
2796 	hci_dev_unlock(hdev);
2797 	return err;
2798 }
2799 
2800 static bool enable_service_cache(struct hci_dev *hdev)
2801 {
2802 	if (!hdev_is_powered(hdev))
2803 		return false;
2804 
2805 	if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2806 		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2807 				   CACHE_TIMEOUT);
2808 		return true;
2809 	}
2810 
2811 	return false;
2812 }
2813 
2814 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2815 {
2816 	int err;
2817 
2818 	err = hci_update_class_sync(hdev);
2819 	if (err)
2820 		return err;
2821 
2822 	return hci_update_eir_sync(hdev);
2823 }
2824 
2825 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2826 		       u16 len)
2827 {
2828 	struct mgmt_cp_remove_uuid *cp = data;
2829 	struct mgmt_pending_cmd *cmd;
2830 	struct bt_uuid *match, *tmp;
2831 	static const u8 bt_uuid_any[] = {
2832 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2833 	};
2834 	int err, found;
2835 
2836 	bt_dev_dbg(hdev, "sock %p", sk);
2837 
2838 	hci_dev_lock(hdev);
2839 
2840 	if (pending_eir_or_class(hdev)) {
2841 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2842 				      MGMT_STATUS_BUSY);
2843 		goto unlock;
2844 	}
2845 
2846 	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2847 		hci_uuids_clear(hdev);
2848 
2849 		if (enable_service_cache(hdev)) {
2850 			err = mgmt_cmd_complete(sk, hdev->id,
2851 						MGMT_OP_REMOVE_UUID,
2852 						0, hdev->dev_class, 3);
2853 			goto unlock;
2854 		}
2855 
2856 		goto update_class;
2857 	}
2858 
2859 	found = 0;
2860 
2861 	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2862 		if (memcmp(match->uuid, cp->uuid, 16) != 0)
2863 			continue;
2864 
2865 		list_del(&match->list);
2866 		kfree(match);
2867 		found++;
2868 	}
2869 
2870 	if (found == 0) {
2871 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2872 				      MGMT_STATUS_INVALID_PARAMS);
2873 		goto unlock;
2874 	}
2875 
2876 update_class:
2877 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2878 	if (!cmd) {
2879 		err = -ENOMEM;
2880 		goto unlock;
2881 	}
2882 
2883 	/* MGMT_OP_REMOVE_UUID don't require adapter the UP/Running so use
2884 	 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2885 	 */
2886 	err = hci_cmd_sync_submit(hdev, remove_uuid_sync, cmd,
2887 				  mgmt_class_complete);
2888 	if (err < 0)
2889 		mgmt_pending_free(cmd);
2890 
2891 unlock:
2892 	hci_dev_unlock(hdev);
2893 	return err;
2894 }
2895 
2896 static int set_class_sync(struct hci_dev *hdev, void *data)
2897 {
2898 	int err = 0;
2899 
2900 	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2901 		cancel_delayed_work_sync(&hdev->service_cache);
2902 		err = hci_update_eir_sync(hdev);
2903 	}
2904 
2905 	if (err)
2906 		return err;
2907 
2908 	return hci_update_class_sync(hdev);
2909 }
2910 
2911 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2912 			 u16 len)
2913 {
2914 	struct mgmt_cp_set_dev_class *cp = data;
2915 	struct mgmt_pending_cmd *cmd;
2916 	int err;
2917 
2918 	bt_dev_dbg(hdev, "sock %p", sk);
2919 
2920 	if (!lmp_bredr_capable(hdev))
2921 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2922 				       MGMT_STATUS_NOT_SUPPORTED);
2923 
2924 	hci_dev_lock(hdev);
2925 
2926 	if (pending_eir_or_class(hdev)) {
2927 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2928 				      MGMT_STATUS_BUSY);
2929 		goto unlock;
2930 	}
2931 
2932 	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2933 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2934 				      MGMT_STATUS_INVALID_PARAMS);
2935 		goto unlock;
2936 	}
2937 
2938 	hdev->major_class = cp->major;
2939 	hdev->minor_class = cp->minor;
2940 
2941 	if (!hdev_is_powered(hdev)) {
2942 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2943 					hdev->dev_class, 3);
2944 		goto unlock;
2945 	}
2946 
2947 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2948 	if (!cmd) {
2949 		err = -ENOMEM;
2950 		goto unlock;
2951 	}
2952 
2953 	/* MGMT_OP_SET_DEV_CLASS don't require adapter the UP/Running so use
2954 	 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2955 	 */
2956 	err = hci_cmd_sync_submit(hdev, set_class_sync, cmd,
2957 				  mgmt_class_complete);
2958 	if (err < 0)
2959 		mgmt_pending_free(cmd);
2960 
2961 unlock:
2962 	hci_dev_unlock(hdev);
2963 	return err;
2964 }
2965 
2966 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2967 			  u16 len)
2968 {
2969 	struct mgmt_cp_load_link_keys *cp = data;
2970 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2971 				   sizeof(struct mgmt_link_key_info));
2972 	u16 key_count, expected_len;
2973 	bool changed;
2974 	int i;
2975 
2976 	bt_dev_dbg(hdev, "sock %p", sk);
2977 
2978 	if (!lmp_bredr_capable(hdev))
2979 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2980 				       MGMT_STATUS_NOT_SUPPORTED);
2981 
2982 	key_count = __le16_to_cpu(cp->key_count);
2983 	if (key_count > max_key_count) {
2984 		bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2985 			   key_count);
2986 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2987 				       MGMT_STATUS_INVALID_PARAMS);
2988 	}
2989 
2990 	expected_len = struct_size(cp, keys, key_count);
2991 	if (expected_len != len) {
2992 		bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2993 			   expected_len, len);
2994 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2995 				       MGMT_STATUS_INVALID_PARAMS);
2996 	}
2997 
2998 	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2999 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
3000 				       MGMT_STATUS_INVALID_PARAMS);
3001 
3002 	bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
3003 		   key_count);
3004 
3005 	hci_dev_lock(hdev);
3006 
3007 	hci_link_keys_clear(hdev);
3008 
3009 	if (cp->debug_keys)
3010 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
3011 	else
3012 		changed = hci_dev_test_and_clear_flag(hdev,
3013 						      HCI_KEEP_DEBUG_KEYS);
3014 
3015 	if (changed)
3016 		new_settings(hdev, NULL);
3017 
3018 	for (i = 0; i < key_count; i++) {
3019 		struct mgmt_link_key_info *key = &cp->keys[i];
3020 
3021 		if (hci_is_blocked_key(hdev,
3022 				       HCI_BLOCKED_KEY_TYPE_LINKKEY,
3023 				       key->val)) {
3024 			bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
3025 				    &key->addr.bdaddr);
3026 			continue;
3027 		}
3028 
3029 		if (key->addr.type != BDADDR_BREDR) {
3030 			bt_dev_warn(hdev,
3031 				    "Invalid link address type %u for %pMR",
3032 				    key->addr.type, &key->addr.bdaddr);
3033 			continue;
3034 		}
3035 
3036 		if (key->type > 0x08) {
3037 			bt_dev_warn(hdev, "Invalid link key type %u for %pMR",
3038 				    key->type, &key->addr.bdaddr);
3039 			continue;
3040 		}
3041 
3042 		/* Always ignore debug keys and require a new pairing if
3043 		 * the user wants to use them.
3044 		 */
3045 		if (key->type == HCI_LK_DEBUG_COMBINATION)
3046 			continue;
3047 
3048 		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
3049 				 key->type, key->pin_len, NULL);
3050 	}
3051 
3052 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
3053 
3054 	hci_dev_unlock(hdev);
3055 
3056 	return 0;
3057 }
3058 
3059 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
3060 			   u8 addr_type, struct sock *skip_sk)
3061 {
3062 	struct mgmt_ev_device_unpaired ev;
3063 
3064 	bacpy(&ev.addr.bdaddr, bdaddr);
3065 	ev.addr.type = addr_type;
3066 
3067 	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
3068 			  skip_sk);
3069 }
3070 
3071 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
3072 {
3073 	struct mgmt_pending_cmd *cmd = data;
3074 	struct mgmt_cp_unpair_device *cp = cmd->param;
3075 
3076 	if (!err)
3077 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
3078 
3079 	cmd->cmd_complete(cmd, err);
3080 	mgmt_pending_free(cmd);
3081 }
3082 
3083 static int unpair_device_sync(struct hci_dev *hdev, void *data)
3084 {
3085 	struct mgmt_pending_cmd *cmd = data;
3086 	struct mgmt_cp_unpair_device *cp = cmd->param;
3087 	struct hci_conn *conn;
3088 
3089 	if (cp->addr.type == BDADDR_BREDR)
3090 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3091 					       &cp->addr.bdaddr);
3092 	else
3093 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3094 					       le_addr_type(cp->addr.type));
3095 
3096 	if (!conn)
3097 		return 0;
3098 
3099 	/* Disregard any possible error since the likes of hci_abort_conn_sync
3100 	 * will clean up the connection no matter the error.
3101 	 */
3102 	hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3103 
3104 	return 0;
3105 }
3106 
3107 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3108 			 u16 len)
3109 {
3110 	struct mgmt_cp_unpair_device *cp = data;
3111 	struct mgmt_rp_unpair_device rp;
3112 	struct hci_conn_params *params;
3113 	struct mgmt_pending_cmd *cmd;
3114 	struct hci_conn *conn;
3115 	u8 addr_type;
3116 	int err;
3117 
3118 	memset(&rp, 0, sizeof(rp));
3119 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3120 	rp.addr.type = cp->addr.type;
3121 
3122 	if (!bdaddr_type_is_valid(cp->addr.type))
3123 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3124 					 MGMT_STATUS_INVALID_PARAMS,
3125 					 &rp, sizeof(rp));
3126 
3127 	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
3128 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3129 					 MGMT_STATUS_INVALID_PARAMS,
3130 					 &rp, sizeof(rp));
3131 
3132 	hci_dev_lock(hdev);
3133 
3134 	if (!hdev_is_powered(hdev)) {
3135 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3136 					MGMT_STATUS_NOT_POWERED, &rp,
3137 					sizeof(rp));
3138 		goto unlock;
3139 	}
3140 
3141 	if (cp->addr.type == BDADDR_BREDR) {
3142 		/* If disconnection is requested, then look up the
3143 		 * connection. If the remote device is connected, it
3144 		 * will be later used to terminate the link.
3145 		 *
3146 		 * Setting it to NULL explicitly will cause no
3147 		 * termination of the link.
3148 		 */
3149 		if (cp->disconnect)
3150 			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3151 						       &cp->addr.bdaddr);
3152 		else
3153 			conn = NULL;
3154 
3155 		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
3156 		if (err < 0) {
3157 			err = mgmt_cmd_complete(sk, hdev->id,
3158 						MGMT_OP_UNPAIR_DEVICE,
3159 						MGMT_STATUS_NOT_PAIRED, &rp,
3160 						sizeof(rp));
3161 			goto unlock;
3162 		}
3163 
3164 		goto done;
3165 	}
3166 
3167 	/* LE address type */
3168 	addr_type = le_addr_type(cp->addr.type);
3169 
3170 	/* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
3171 	err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
3172 	if (err < 0) {
3173 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3174 					MGMT_STATUS_NOT_PAIRED, &rp,
3175 					sizeof(rp));
3176 		goto unlock;
3177 	}
3178 
3179 	conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3180 	if (!conn) {
3181 		hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3182 		goto done;
3183 	}
3184 
3185 
3186 	/* Defer clearing up the connection parameters until closing to
3187 	 * give a chance of keeping them if a repairing happens.
3188 	 */
3189 	set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3190 
3191 	/* Disable auto-connection parameters if present */
3192 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3193 	if (params) {
3194 		if (params->explicit_connect)
3195 			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3196 		else
3197 			params->auto_connect = HCI_AUTO_CONN_DISABLED;
3198 	}
3199 
3200 	/* If disconnection is not requested, then clear the connection
3201 	 * variable so that the link is not terminated.
3202 	 */
3203 	if (!cp->disconnect)
3204 		conn = NULL;
3205 
3206 done:
3207 	/* If the connection variable is set, then termination of the
3208 	 * link is requested.
3209 	 */
3210 	if (!conn) {
3211 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3212 					&rp, sizeof(rp));
3213 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3214 		goto unlock;
3215 	}
3216 
3217 	cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3218 			       sizeof(*cp));
3219 	if (!cmd) {
3220 		err = -ENOMEM;
3221 		goto unlock;
3222 	}
3223 
3224 	cmd->cmd_complete = addr_cmd_complete;
3225 
3226 	err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3227 				 unpair_device_complete);
3228 	if (err < 0)
3229 		mgmt_pending_free(cmd);
3230 
3231 unlock:
3232 	hci_dev_unlock(hdev);
3233 	return err;
3234 }
3235 
3236 static void disconnect_complete(struct hci_dev *hdev, void *data, int err)
3237 {
3238 	struct mgmt_pending_cmd *cmd = data;
3239 
3240 	cmd->cmd_complete(cmd, mgmt_status(err));
3241 	mgmt_pending_free(cmd);
3242 }
3243 
3244 static int disconnect_sync(struct hci_dev *hdev, void *data)
3245 {
3246 	struct mgmt_pending_cmd *cmd = data;
3247 	struct mgmt_cp_disconnect *cp = cmd->param;
3248 	struct hci_conn *conn;
3249 
3250 	if (cp->addr.type == BDADDR_BREDR)
3251 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3252 					       &cp->addr.bdaddr);
3253 	else
3254 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3255 					       le_addr_type(cp->addr.type));
3256 
3257 	if (!conn)
3258 		return -ENOTCONN;
3259 
3260 	/* Disregard any possible error since the likes of hci_abort_conn_sync
3261 	 * will clean up the connection no matter the error.
3262 	 */
3263 	hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3264 
3265 	return 0;
3266 }
3267 
3268 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3269 		      u16 len)
3270 {
3271 	struct mgmt_cp_disconnect *cp = data;
3272 	struct mgmt_rp_disconnect rp;
3273 	struct mgmt_pending_cmd *cmd;
3274 	int err;
3275 
3276 	bt_dev_dbg(hdev, "sock %p", sk);
3277 
3278 	memset(&rp, 0, sizeof(rp));
3279 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3280 	rp.addr.type = cp->addr.type;
3281 
3282 	if (!bdaddr_type_is_valid(cp->addr.type))
3283 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3284 					 MGMT_STATUS_INVALID_PARAMS,
3285 					 &rp, sizeof(rp));
3286 
3287 	hci_dev_lock(hdev);
3288 
3289 	if (!test_bit(HCI_UP, &hdev->flags)) {
3290 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3291 					MGMT_STATUS_NOT_POWERED, &rp,
3292 					sizeof(rp));
3293 		goto failed;
3294 	}
3295 
3296 	cmd = mgmt_pending_new(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3297 	if (!cmd) {
3298 		err = -ENOMEM;
3299 		goto failed;
3300 	}
3301 
3302 	cmd->cmd_complete = generic_cmd_complete;
3303 
3304 	err = hci_cmd_sync_queue(hdev, disconnect_sync, cmd,
3305 				 disconnect_complete);
3306 	if (err < 0)
3307 		mgmt_pending_free(cmd);
3308 
3309 failed:
3310 	hci_dev_unlock(hdev);
3311 	return err;
3312 }
3313 
3314 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3315 {
3316 	switch (link_type) {
3317 	case CIS_LINK:
3318 	case BIS_LINK:
3319 	case PA_LINK:
3320 	case LE_LINK:
3321 		switch (addr_type) {
3322 		case ADDR_LE_DEV_PUBLIC:
3323 			return BDADDR_LE_PUBLIC;
3324 
3325 		default:
3326 			/* Fallback to LE Random address type */
3327 			return BDADDR_LE_RANDOM;
3328 		}
3329 
3330 	default:
3331 		/* Fallback to BR/EDR type */
3332 		return BDADDR_BREDR;
3333 	}
3334 }
3335 
3336 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3337 			   u16 data_len)
3338 {
3339 	struct mgmt_rp_get_connections *rp;
3340 	struct hci_conn *c;
3341 	int err;
3342 	u16 i;
3343 
3344 	bt_dev_dbg(hdev, "sock %p", sk);
3345 
3346 	hci_dev_lock(hdev);
3347 
3348 	if (!hdev_is_powered(hdev)) {
3349 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3350 				      MGMT_STATUS_NOT_POWERED);
3351 		goto unlock;
3352 	}
3353 
3354 	i = 0;
3355 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
3356 		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3357 			i++;
3358 	}
3359 
3360 	rp = kmalloc_flex(*rp, addr, i);
3361 	if (!rp) {
3362 		err = -ENOMEM;
3363 		goto unlock;
3364 	}
3365 
3366 	i = 0;
3367 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
3368 		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3369 			continue;
3370 		bacpy(&rp->addr[i].bdaddr, &c->dst);
3371 		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3372 		if (c->type == SCO_LINK || c->type == ESCO_LINK)
3373 			continue;
3374 		i++;
3375 	}
3376 
3377 	rp->conn_count = cpu_to_le16(i);
3378 
3379 	/* Recalculate length in case of filtered SCO connections, etc */
3380 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3381 				struct_size(rp, addr, i));
3382 
3383 	kfree(rp);
3384 
3385 unlock:
3386 	hci_dev_unlock(hdev);
3387 	return err;
3388 }
3389 
3390 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3391 				   struct mgmt_cp_pin_code_neg_reply *cp)
3392 {
3393 	struct mgmt_pending_cmd *cmd;
3394 	int err;
3395 
3396 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3397 			       sizeof(*cp));
3398 	if (!cmd)
3399 		return -ENOMEM;
3400 
3401 	cmd->cmd_complete = addr_cmd_complete;
3402 
3403 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3404 			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3405 	if (err < 0)
3406 		mgmt_pending_remove(cmd);
3407 
3408 	return err;
3409 }
3410 
3411 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3412 			  u16 len)
3413 {
3414 	struct hci_conn *conn;
3415 	struct mgmt_cp_pin_code_reply *cp = data;
3416 	struct hci_cp_pin_code_reply reply;
3417 	struct mgmt_pending_cmd *cmd;
3418 	int err;
3419 
3420 	bt_dev_dbg(hdev, "sock %p", sk);
3421 
3422 	hci_dev_lock(hdev);
3423 
3424 	if (!hdev_is_powered(hdev)) {
3425 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3426 				      MGMT_STATUS_NOT_POWERED);
3427 		goto failed;
3428 	}
3429 
3430 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3431 	if (!conn) {
3432 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3433 				      MGMT_STATUS_NOT_CONNECTED);
3434 		goto failed;
3435 	}
3436 
3437 	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3438 		struct mgmt_cp_pin_code_neg_reply ncp;
3439 
3440 		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3441 
3442 		bt_dev_err(hdev, "PIN code is not 16 bytes long");
3443 
3444 		err = send_pin_code_neg_reply(sk, hdev, &ncp);
3445 		if (err >= 0)
3446 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3447 					      MGMT_STATUS_INVALID_PARAMS);
3448 
3449 		goto failed;
3450 	}
3451 
3452 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3453 	if (!cmd) {
3454 		err = -ENOMEM;
3455 		goto failed;
3456 	}
3457 
3458 	cmd->cmd_complete = addr_cmd_complete;
3459 
3460 	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3461 	reply.pin_len = cp->pin_len;
3462 	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3463 
3464 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3465 	if (err < 0)
3466 		mgmt_pending_remove(cmd);
3467 
3468 failed:
3469 	hci_dev_unlock(hdev);
3470 	return err;
3471 }
3472 
3473 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3474 			     u16 len)
3475 {
3476 	struct mgmt_cp_set_io_capability *cp = data;
3477 
3478 	bt_dev_dbg(hdev, "sock %p", sk);
3479 
3480 	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3481 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3482 				       MGMT_STATUS_INVALID_PARAMS);
3483 
3484 	hci_dev_lock(hdev);
3485 
3486 	hdev->io_capability = cp->io_capability;
3487 
3488 	bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3489 
3490 	hci_dev_unlock(hdev);
3491 
3492 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3493 				 NULL, 0);
3494 }
3495 
3496 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3497 {
3498 	struct hci_dev *hdev = conn->hdev;
3499 	struct mgmt_pending_cmd *cmd;
3500 
3501 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3502 		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3503 			continue;
3504 
3505 		if (cmd->user_data != conn)
3506 			continue;
3507 
3508 		return cmd;
3509 	}
3510 
3511 	return NULL;
3512 }
3513 
3514 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3515 {
3516 	struct mgmt_rp_pair_device rp;
3517 	struct hci_conn *conn = cmd->user_data;
3518 	int err;
3519 
3520 	bacpy(&rp.addr.bdaddr, &conn->dst);
3521 	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3522 
3523 	err = mgmt_cmd_complete(cmd->sk, cmd->hdev->id, MGMT_OP_PAIR_DEVICE,
3524 				status, &rp, sizeof(rp));
3525 
3526 	/* So we don't get further callbacks for this connection */
3527 	conn->connect_cfm_cb = NULL;
3528 	conn->security_cfm_cb = NULL;
3529 	conn->disconn_cfm_cb = NULL;
3530 
3531 	hci_conn_drop(conn);
3532 
3533 	/* The device is paired so there is no need to remove
3534 	 * its connection parameters anymore.
3535 	 */
3536 	clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3537 
3538 	hci_conn_put(conn);
3539 
3540 	return err;
3541 }
3542 
3543 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3544 {
3545 	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3546 	struct mgmt_pending_cmd *cmd;
3547 
3548 	cmd = find_pairing(conn);
3549 	if (cmd) {
3550 		cmd->cmd_complete(cmd, status);
3551 		mgmt_pending_remove(cmd);
3552 	}
3553 }
3554 
3555 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3556 {
3557 	struct mgmt_pending_cmd *cmd;
3558 
3559 	BT_DBG("status %u", status);
3560 
3561 	cmd = find_pairing(conn);
3562 	if (!cmd) {
3563 		BT_DBG("Unable to find a pending command");
3564 		return;
3565 	}
3566 
3567 	cmd->cmd_complete(cmd, mgmt_status(status));
3568 	mgmt_pending_remove(cmd);
3569 }
3570 
3571 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3572 {
3573 	struct mgmt_pending_cmd *cmd;
3574 
3575 	BT_DBG("status %u", status);
3576 
3577 	if (!status)
3578 		return;
3579 
3580 	cmd = find_pairing(conn);
3581 	if (!cmd) {
3582 		BT_DBG("Unable to find a pending command");
3583 		return;
3584 	}
3585 
3586 	cmd->cmd_complete(cmd, mgmt_status(status));
3587 	mgmt_pending_remove(cmd);
3588 }
3589 
3590 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3591 		       u16 len)
3592 {
3593 	struct mgmt_cp_pair_device *cp = data;
3594 	struct mgmt_rp_pair_device rp;
3595 	struct mgmt_pending_cmd *cmd;
3596 	u8 sec_level, auth_type;
3597 	struct hci_conn *conn;
3598 	int err;
3599 
3600 	bt_dev_dbg(hdev, "sock %p", sk);
3601 
3602 	memset(&rp, 0, sizeof(rp));
3603 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3604 	rp.addr.type = cp->addr.type;
3605 
3606 	if (!bdaddr_type_is_valid(cp->addr.type))
3607 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3608 					 MGMT_STATUS_INVALID_PARAMS,
3609 					 &rp, sizeof(rp));
3610 
3611 	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3612 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3613 					 MGMT_STATUS_INVALID_PARAMS,
3614 					 &rp, sizeof(rp));
3615 
3616 	hci_dev_lock(hdev);
3617 
3618 	if (!hdev_is_powered(hdev)) {
3619 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3620 					MGMT_STATUS_NOT_POWERED, &rp,
3621 					sizeof(rp));
3622 		goto unlock;
3623 	}
3624 
3625 	if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3626 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3627 					MGMT_STATUS_ALREADY_PAIRED, &rp,
3628 					sizeof(rp));
3629 		goto unlock;
3630 	}
3631 
3632 	sec_level = BT_SECURITY_MEDIUM;
3633 	auth_type = HCI_AT_DEDICATED_BONDING;
3634 
3635 	if (cp->addr.type == BDADDR_BREDR) {
3636 		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3637 				       auth_type, CONN_REASON_PAIR_DEVICE,
3638 				       HCI_ACL_CONN_TIMEOUT);
3639 	} else {
3640 		u8 addr_type = le_addr_type(cp->addr.type);
3641 		struct hci_conn_params *p;
3642 
3643 		/* When pairing a new device, it is expected to remember
3644 		 * this device for future connections. Adding the connection
3645 		 * parameter information ahead of time allows tracking
3646 		 * of the peripheral preferred values and will speed up any
3647 		 * further connection establishment.
3648 		 *
3649 		 * If connection parameters already exist, then they
3650 		 * will be kept and this function does nothing.
3651 		 */
3652 		p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3653 		if (!p) {
3654 			err = -EIO;
3655 			goto unlock;
3656 		}
3657 
3658 		if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3659 			p->auto_connect = HCI_AUTO_CONN_DISABLED;
3660 
3661 		conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3662 					   sec_level, HCI_LE_CONN_TIMEOUT,
3663 					   CONN_REASON_PAIR_DEVICE);
3664 	}
3665 
3666 	if (IS_ERR(conn)) {
3667 		int status;
3668 
3669 		if (PTR_ERR(conn) == -EBUSY)
3670 			status = MGMT_STATUS_BUSY;
3671 		else if (PTR_ERR(conn) == -EOPNOTSUPP)
3672 			status = MGMT_STATUS_NOT_SUPPORTED;
3673 		else if (PTR_ERR(conn) == -ECONNREFUSED)
3674 			status = MGMT_STATUS_REJECTED;
3675 		else
3676 			status = MGMT_STATUS_CONNECT_FAILED;
3677 
3678 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3679 					status, &rp, sizeof(rp));
3680 		goto unlock;
3681 	}
3682 
3683 	if (conn->connect_cfm_cb) {
3684 		hci_conn_drop(conn);
3685 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3686 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
3687 		goto unlock;
3688 	}
3689 
3690 	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3691 	if (!cmd) {
3692 		err = -ENOMEM;
3693 		hci_conn_drop(conn);
3694 		goto unlock;
3695 	}
3696 
3697 	cmd->cmd_complete = pairing_complete;
3698 
3699 	/* For LE, just connecting isn't a proof that the pairing finished */
3700 	if (cp->addr.type == BDADDR_BREDR) {
3701 		conn->connect_cfm_cb = pairing_complete_cb;
3702 		conn->security_cfm_cb = pairing_complete_cb;
3703 		conn->disconn_cfm_cb = pairing_complete_cb;
3704 	} else {
3705 		conn->connect_cfm_cb = le_pairing_complete_cb;
3706 		conn->security_cfm_cb = le_pairing_complete_cb;
3707 		conn->disconn_cfm_cb = le_pairing_complete_cb;
3708 	}
3709 
3710 	conn->io_capability = cp->io_cap;
3711 	cmd->user_data = hci_conn_get(conn);
3712 
3713 	if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3714 	    hci_conn_security(conn, sec_level, auth_type, true)) {
3715 		cmd->cmd_complete(cmd, 0);
3716 		mgmt_pending_remove(cmd);
3717 	}
3718 
3719 	err = 0;
3720 
3721 unlock:
3722 	hci_dev_unlock(hdev);
3723 	return err;
3724 }
3725 
3726 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3727 			      u16 len)
3728 {
3729 	struct mgmt_addr_info *addr = data;
3730 	struct mgmt_pending_cmd *cmd;
3731 	struct hci_conn *conn;
3732 	int err;
3733 
3734 	bt_dev_dbg(hdev, "sock %p", sk);
3735 
3736 	hci_dev_lock(hdev);
3737 
3738 	if (!hdev_is_powered(hdev)) {
3739 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3740 				      MGMT_STATUS_NOT_POWERED);
3741 		goto unlock;
3742 	}
3743 
3744 	cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3745 	if (!cmd) {
3746 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3747 				      MGMT_STATUS_INVALID_PARAMS);
3748 		goto unlock;
3749 	}
3750 
3751 	conn = cmd->user_data;
3752 
3753 	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3754 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3755 				      MGMT_STATUS_INVALID_PARAMS);
3756 		goto unlock;
3757 	}
3758 
3759 	cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3760 	mgmt_pending_remove(cmd);
3761 
3762 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3763 				addr, sizeof(*addr));
3764 
3765 	/* Since user doesn't want to proceed with the connection, abort any
3766 	 * ongoing pairing and then terminate the link if it was created
3767 	 * because of the pair device action.
3768 	 */
3769 	if (addr->type == BDADDR_BREDR)
3770 		hci_remove_link_key(hdev, &addr->bdaddr);
3771 	else
3772 		smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3773 					      le_addr_type(addr->type));
3774 
3775 	if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3776 		hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3777 
3778 unlock:
3779 	hci_dev_unlock(hdev);
3780 	return err;
3781 }
3782 
3783 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3784 			     struct mgmt_addr_info *addr, u16 mgmt_op,
3785 			     u16 hci_op, __le32 passkey)
3786 {
3787 	struct mgmt_pending_cmd *cmd;
3788 	struct hci_conn *conn;
3789 	int err;
3790 
3791 	hci_dev_lock(hdev);
3792 
3793 	if (!hdev_is_powered(hdev)) {
3794 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3795 					MGMT_STATUS_NOT_POWERED, addr,
3796 					sizeof(*addr));
3797 		goto done;
3798 	}
3799 
3800 	if (addr->type == BDADDR_BREDR)
3801 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3802 	else
3803 		conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3804 					       le_addr_type(addr->type));
3805 
3806 	if (!conn) {
3807 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3808 					MGMT_STATUS_NOT_CONNECTED, addr,
3809 					sizeof(*addr));
3810 		goto done;
3811 	}
3812 
3813 	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3814 		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3815 		if (!err)
3816 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3817 						MGMT_STATUS_SUCCESS, addr,
3818 						sizeof(*addr));
3819 		else
3820 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3821 						MGMT_STATUS_FAILED, addr,
3822 						sizeof(*addr));
3823 
3824 		goto done;
3825 	}
3826 
3827 	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3828 	if (!cmd) {
3829 		err = -ENOMEM;
3830 		goto done;
3831 	}
3832 
3833 	cmd->cmd_complete = addr_cmd_complete;
3834 
3835 	/* Continue with pairing via HCI */
3836 	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3837 		struct hci_cp_user_passkey_reply cp;
3838 
3839 		bacpy(&cp.bdaddr, &addr->bdaddr);
3840 		cp.passkey = passkey;
3841 		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3842 	} else
3843 		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3844 				   &addr->bdaddr);
3845 
3846 	if (err < 0)
3847 		mgmt_pending_remove(cmd);
3848 
3849 done:
3850 	hci_dev_unlock(hdev);
3851 	return err;
3852 }
3853 
3854 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3855 			      void *data, u16 len)
3856 {
3857 	struct mgmt_cp_pin_code_neg_reply *cp = data;
3858 
3859 	bt_dev_dbg(hdev, "sock %p", sk);
3860 
3861 	return user_pairing_resp(sk, hdev, &cp->addr,
3862 				MGMT_OP_PIN_CODE_NEG_REPLY,
3863 				HCI_OP_PIN_CODE_NEG_REPLY, 0);
3864 }
3865 
3866 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3867 			      u16 len)
3868 {
3869 	struct mgmt_cp_user_confirm_reply *cp = data;
3870 
3871 	bt_dev_dbg(hdev, "sock %p", sk);
3872 
3873 	if (len != sizeof(*cp))
3874 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3875 				       MGMT_STATUS_INVALID_PARAMS);
3876 
3877 	return user_pairing_resp(sk, hdev, &cp->addr,
3878 				 MGMT_OP_USER_CONFIRM_REPLY,
3879 				 HCI_OP_USER_CONFIRM_REPLY, 0);
3880 }
3881 
3882 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3883 				  void *data, u16 len)
3884 {
3885 	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3886 
3887 	bt_dev_dbg(hdev, "sock %p", sk);
3888 
3889 	return user_pairing_resp(sk, hdev, &cp->addr,
3890 				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3891 				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3892 }
3893 
3894 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3895 			      u16 len)
3896 {
3897 	struct mgmt_cp_user_passkey_reply *cp = data;
3898 
3899 	bt_dev_dbg(hdev, "sock %p", sk);
3900 
3901 	return user_pairing_resp(sk, hdev, &cp->addr,
3902 				 MGMT_OP_USER_PASSKEY_REPLY,
3903 				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3904 }
3905 
3906 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3907 				  void *data, u16 len)
3908 {
3909 	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3910 
3911 	bt_dev_dbg(hdev, "sock %p", sk);
3912 
3913 	return user_pairing_resp(sk, hdev, &cp->addr,
3914 				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3915 				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3916 }
3917 
3918 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3919 {
3920 	struct adv_info *adv_instance;
3921 
3922 	adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3923 	if (!adv_instance)
3924 		return 0;
3925 
3926 	/* stop if current instance doesn't need to be changed */
3927 	if (!(adv_instance->flags & flags))
3928 		return 0;
3929 
3930 	cancel_adv_timeout(hdev);
3931 
3932 	adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3933 	if (!adv_instance)
3934 		return 0;
3935 
3936 	hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3937 
3938 	return 0;
3939 }
3940 
3941 static int name_changed_sync(struct hci_dev *hdev, void *data)
3942 {
3943 	return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3944 }
3945 
3946 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3947 {
3948 	struct mgmt_pending_cmd *cmd = data;
3949 	struct mgmt_cp_set_local_name *cp;
3950 	u8 status = mgmt_status(err);
3951 
3952 	bt_dev_dbg(hdev, "err %d", err);
3953 
3954 	if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
3955 		return;
3956 
3957 	cp = cmd->param;
3958 
3959 	if (status) {
3960 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3961 				status);
3962 	} else {
3963 		mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3964 				  cp, sizeof(*cp));
3965 
3966 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3967 			hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3968 	}
3969 
3970 	mgmt_pending_free(cmd);
3971 }
3972 
3973 static int set_name_sync(struct hci_dev *hdev, void *data)
3974 {
3975 	struct mgmt_pending_cmd *cmd = data;
3976 	struct mgmt_cp_set_local_name cp;
3977 
3978 	mutex_lock(&hdev->mgmt_pending_lock);
3979 
3980 	if (!__mgmt_pending_listed(hdev, cmd)) {
3981 		mutex_unlock(&hdev->mgmt_pending_lock);
3982 		return -ECANCELED;
3983 	}
3984 
3985 	memcpy(&cp, cmd->param, sizeof(cp));
3986 
3987 	mutex_unlock(&hdev->mgmt_pending_lock);
3988 
3989 	if (lmp_bredr_capable(hdev)) {
3990 		hci_update_name_sync(hdev, cp.name);
3991 		hci_update_eir_sync(hdev);
3992 	}
3993 
3994 	/* The name is stored in the scan response data and so
3995 	 * no need to update the advertising data here.
3996 	 */
3997 	if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3998 		hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3999 
4000 	return 0;
4001 }
4002 
4003 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
4004 			  u16 len)
4005 {
4006 	struct mgmt_cp_set_local_name *cp = data;
4007 	struct mgmt_pending_cmd *cmd;
4008 	int err;
4009 
4010 	bt_dev_dbg(hdev, "sock %p", sk);
4011 
4012 	hci_dev_lock(hdev);
4013 
4014 	/* If the old values are the same as the new ones just return a
4015 	 * direct command complete event.
4016 	 */
4017 	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
4018 	    !memcmp(hdev->short_name, cp->short_name,
4019 		    sizeof(hdev->short_name))) {
4020 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
4021 					data, len);
4022 		goto failed;
4023 	}
4024 
4025 	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
4026 
4027 	if (!hdev_is_powered(hdev)) {
4028 		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
4029 
4030 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
4031 					data, len);
4032 		if (err < 0)
4033 			goto failed;
4034 
4035 		err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
4036 					 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
4037 		ext_info_changed(hdev, sk);
4038 
4039 		goto failed;
4040 	}
4041 
4042 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
4043 	if (!cmd)
4044 		err = -ENOMEM;
4045 	else
4046 		err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
4047 					 set_name_complete);
4048 
4049 	if (err < 0) {
4050 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
4051 				      MGMT_STATUS_FAILED);
4052 
4053 		if (cmd)
4054 			mgmt_pending_remove(cmd);
4055 
4056 		goto failed;
4057 	}
4058 
4059 	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
4060 
4061 failed:
4062 	hci_dev_unlock(hdev);
4063 	return err;
4064 }
4065 
4066 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
4067 {
4068 	return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
4069 }
4070 
4071 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
4072 			  u16 len)
4073 {
4074 	struct mgmt_cp_set_appearance *cp = data;
4075 	u16 appearance;
4076 	int err;
4077 
4078 	bt_dev_dbg(hdev, "sock %p", sk);
4079 
4080 	if (!lmp_le_capable(hdev))
4081 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
4082 				       MGMT_STATUS_NOT_SUPPORTED);
4083 
4084 	appearance = le16_to_cpu(cp->appearance);
4085 
4086 	hci_dev_lock(hdev);
4087 
4088 	if (hdev->appearance != appearance) {
4089 		hdev->appearance = appearance;
4090 
4091 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
4092 			hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
4093 					   NULL);
4094 
4095 		ext_info_changed(hdev, sk);
4096 	}
4097 
4098 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
4099 				0);
4100 
4101 	hci_dev_unlock(hdev);
4102 
4103 	return err;
4104 }
4105 
4106 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4107 				 void *data, u16 len)
4108 {
4109 	struct mgmt_rp_get_phy_configuration rp;
4110 
4111 	bt_dev_dbg(hdev, "sock %p", sk);
4112 
4113 	hci_dev_lock(hdev);
4114 
4115 	memset(&rp, 0, sizeof(rp));
4116 
4117 	rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
4118 	rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
4119 	rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
4120 
4121 	hci_dev_unlock(hdev);
4122 
4123 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
4124 				 &rp, sizeof(rp));
4125 }
4126 
4127 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
4128 {
4129 	struct mgmt_ev_phy_configuration_changed ev;
4130 
4131 	memset(&ev, 0, sizeof(ev));
4132 
4133 	ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
4134 
4135 	return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
4136 			  sizeof(ev), skip);
4137 }
4138 
4139 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
4140 {
4141 	struct mgmt_pending_cmd *cmd = data;
4142 	struct sk_buff *skb;
4143 	u8 status = mgmt_status(err);
4144 
4145 	skb = cmd->skb;
4146 
4147 	if (!status) {
4148 		if (!skb)
4149 			status = MGMT_STATUS_FAILED;
4150 		else if (IS_ERR(skb))
4151 			status = mgmt_status(PTR_ERR(skb));
4152 		else
4153 			status = mgmt_status(skb->data[0]);
4154 	}
4155 
4156 	bt_dev_dbg(hdev, "status %d", status);
4157 
4158 	if (status) {
4159 		mgmt_cmd_status(cmd->sk, hdev->id,
4160 				MGMT_OP_SET_PHY_CONFIGURATION, status);
4161 	} else {
4162 		mgmt_cmd_complete(cmd->sk, hdev->id,
4163 				  MGMT_OP_SET_PHY_CONFIGURATION, 0,
4164 				  NULL, 0);
4165 
4166 		mgmt_phy_configuration_changed(hdev, cmd->sk);
4167 	}
4168 
4169 	if (skb && !IS_ERR(skb))
4170 		kfree_skb(skb);
4171 
4172 	mgmt_pending_free(cmd);
4173 }
4174 
4175 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
4176 {
4177 	struct mgmt_pending_cmd *cmd = data;
4178 	struct mgmt_cp_set_phy_configuration *cp = cmd->param;
4179 	struct hci_cp_le_set_default_phy cp_phy;
4180 	u32 selected_phys;
4181 
4182 	selected_phys = __le32_to_cpu(cp->selected_phys);
4183 
4184 	memset(&cp_phy, 0, sizeof(cp_phy));
4185 
4186 	if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
4187 		cp_phy.all_phys |= 0x01;
4188 
4189 	if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
4190 		cp_phy.all_phys |= 0x02;
4191 
4192 	if (selected_phys & MGMT_PHY_LE_1M_TX)
4193 		cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
4194 
4195 	if (selected_phys & MGMT_PHY_LE_2M_TX)
4196 		cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
4197 
4198 	if (selected_phys & MGMT_PHY_LE_CODED_TX)
4199 		cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
4200 
4201 	if (selected_phys & MGMT_PHY_LE_1M_RX)
4202 		cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
4203 
4204 	if (selected_phys & MGMT_PHY_LE_2M_RX)
4205 		cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
4206 
4207 	if (selected_phys & MGMT_PHY_LE_CODED_RX)
4208 		cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
4209 
4210 	cmd->skb =  __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4211 				   sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
4212 
4213 	return 0;
4214 }
4215 
4216 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4217 				 void *data, u16 len)
4218 {
4219 	struct mgmt_cp_set_phy_configuration *cp = data;
4220 	struct mgmt_pending_cmd *cmd;
4221 	u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4222 	u16 pkt_type = (HCI_DH1 | HCI_DM1);
4223 	bool changed = false;
4224 	int err;
4225 
4226 	bt_dev_dbg(hdev, "sock %p", sk);
4227 
4228 	configurable_phys = get_configurable_phys(hdev);
4229 	supported_phys = get_supported_phys(hdev);
4230 	selected_phys = __le32_to_cpu(cp->selected_phys);
4231 
4232 	if (selected_phys & ~supported_phys)
4233 		return mgmt_cmd_status(sk, hdev->id,
4234 				       MGMT_OP_SET_PHY_CONFIGURATION,
4235 				       MGMT_STATUS_INVALID_PARAMS);
4236 
4237 	unconfigure_phys = supported_phys & ~configurable_phys;
4238 
4239 	if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4240 		return mgmt_cmd_status(sk, hdev->id,
4241 				       MGMT_OP_SET_PHY_CONFIGURATION,
4242 				       MGMT_STATUS_INVALID_PARAMS);
4243 
4244 	if (selected_phys == get_selected_phys(hdev))
4245 		return mgmt_cmd_complete(sk, hdev->id,
4246 					 MGMT_OP_SET_PHY_CONFIGURATION,
4247 					 0, NULL, 0);
4248 
4249 	hci_dev_lock(hdev);
4250 
4251 	if (!hdev_is_powered(hdev)) {
4252 		err = mgmt_cmd_status(sk, hdev->id,
4253 				      MGMT_OP_SET_PHY_CONFIGURATION,
4254 				      MGMT_STATUS_REJECTED);
4255 		goto unlock;
4256 	}
4257 
4258 	if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4259 		err = mgmt_cmd_status(sk, hdev->id,
4260 				      MGMT_OP_SET_PHY_CONFIGURATION,
4261 				      MGMT_STATUS_BUSY);
4262 		goto unlock;
4263 	}
4264 
4265 	if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4266 		pkt_type |= (HCI_DH3 | HCI_DM3);
4267 	else
4268 		pkt_type &= ~(HCI_DH3 | HCI_DM3);
4269 
4270 	if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4271 		pkt_type |= (HCI_DH5 | HCI_DM5);
4272 	else
4273 		pkt_type &= ~(HCI_DH5 | HCI_DM5);
4274 
4275 	if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4276 		pkt_type &= ~HCI_2DH1;
4277 	else
4278 		pkt_type |= HCI_2DH1;
4279 
4280 	if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4281 		pkt_type &= ~HCI_2DH3;
4282 	else
4283 		pkt_type |= HCI_2DH3;
4284 
4285 	if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4286 		pkt_type &= ~HCI_2DH5;
4287 	else
4288 		pkt_type |= HCI_2DH5;
4289 
4290 	if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4291 		pkt_type &= ~HCI_3DH1;
4292 	else
4293 		pkt_type |= HCI_3DH1;
4294 
4295 	if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4296 		pkt_type &= ~HCI_3DH3;
4297 	else
4298 		pkt_type |= HCI_3DH3;
4299 
4300 	if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4301 		pkt_type &= ~HCI_3DH5;
4302 	else
4303 		pkt_type |= HCI_3DH5;
4304 
4305 	if (pkt_type != hdev->pkt_type) {
4306 		hdev->pkt_type = pkt_type;
4307 		changed = true;
4308 	}
4309 
4310 	if ((selected_phys & MGMT_PHY_LE_MASK) ==
4311 	    (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4312 		if (changed)
4313 			mgmt_phy_configuration_changed(hdev, sk);
4314 
4315 		err = mgmt_cmd_complete(sk, hdev->id,
4316 					MGMT_OP_SET_PHY_CONFIGURATION,
4317 					0, NULL, 0);
4318 
4319 		goto unlock;
4320 	}
4321 
4322 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4323 			       len);
4324 	if (!cmd)
4325 		err = -ENOMEM;
4326 	else
4327 		err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4328 					 set_default_phy_complete);
4329 
4330 	if (err < 0) {
4331 		err = mgmt_cmd_status(sk, hdev->id,
4332 				      MGMT_OP_SET_PHY_CONFIGURATION,
4333 				      MGMT_STATUS_FAILED);
4334 
4335 		if (cmd)
4336 			mgmt_pending_remove(cmd);
4337 	}
4338 
4339 unlock:
4340 	hci_dev_unlock(hdev);
4341 
4342 	return err;
4343 }
4344 
4345 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4346 			    u16 len)
4347 {
4348 	int err = MGMT_STATUS_SUCCESS;
4349 	struct mgmt_cp_set_blocked_keys *keys = data;
4350 	const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4351 				   sizeof(struct mgmt_blocked_key_info));
4352 	u16 key_count, expected_len;
4353 	int i;
4354 
4355 	bt_dev_dbg(hdev, "sock %p", sk);
4356 
4357 	key_count = __le16_to_cpu(keys->key_count);
4358 	if (key_count > max_key_count) {
4359 		bt_dev_err(hdev, "too big key_count value %u", key_count);
4360 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4361 				       MGMT_STATUS_INVALID_PARAMS);
4362 	}
4363 
4364 	expected_len = struct_size(keys, keys, key_count);
4365 	if (expected_len != len) {
4366 		bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4367 			   expected_len, len);
4368 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4369 				       MGMT_STATUS_INVALID_PARAMS);
4370 	}
4371 
4372 	hci_dev_lock(hdev);
4373 
4374 	hci_blocked_keys_clear(hdev);
4375 
4376 	for (i = 0; i < key_count; ++i) {
4377 		struct blocked_key *b = kzalloc_obj(*b);
4378 
4379 		if (!b) {
4380 			err = MGMT_STATUS_NO_RESOURCES;
4381 			break;
4382 		}
4383 
4384 		b->type = keys->keys[i].type;
4385 		memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4386 		list_add_rcu(&b->list, &hdev->blocked_keys);
4387 	}
4388 	hci_dev_unlock(hdev);
4389 
4390 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4391 				err, NULL, 0);
4392 }
4393 
4394 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4395 			       void *data, u16 len)
4396 {
4397 	struct mgmt_mode *cp = data;
4398 	int err;
4399 	bool changed = false;
4400 
4401 	bt_dev_dbg(hdev, "sock %p", sk);
4402 
4403 	if (!hci_test_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED))
4404 		return mgmt_cmd_status(sk, hdev->id,
4405 				       MGMT_OP_SET_WIDEBAND_SPEECH,
4406 				       MGMT_STATUS_NOT_SUPPORTED);
4407 
4408 	if (cp->val != 0x00 && cp->val != 0x01)
4409 		return mgmt_cmd_status(sk, hdev->id,
4410 				       MGMT_OP_SET_WIDEBAND_SPEECH,
4411 				       MGMT_STATUS_INVALID_PARAMS);
4412 
4413 	hci_dev_lock(hdev);
4414 
4415 	if (hdev_is_powered(hdev) &&
4416 	    !!cp->val != hci_dev_test_flag(hdev,
4417 					   HCI_WIDEBAND_SPEECH_ENABLED)) {
4418 		err = mgmt_cmd_status(sk, hdev->id,
4419 				      MGMT_OP_SET_WIDEBAND_SPEECH,
4420 				      MGMT_STATUS_REJECTED);
4421 		goto unlock;
4422 	}
4423 
4424 	if (cp->val)
4425 		changed = !hci_dev_test_and_set_flag(hdev,
4426 						   HCI_WIDEBAND_SPEECH_ENABLED);
4427 	else
4428 		changed = hci_dev_test_and_clear_flag(hdev,
4429 						   HCI_WIDEBAND_SPEECH_ENABLED);
4430 
4431 	err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4432 	if (err < 0)
4433 		goto unlock;
4434 
4435 	if (changed)
4436 		err = new_settings(hdev, sk);
4437 
4438 unlock:
4439 	hci_dev_unlock(hdev);
4440 	return err;
4441 }
4442 
4443 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4444 			       void *data, u16 data_len)
4445 {
4446 	char buf[20];
4447 	struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4448 	u16 cap_len = 0;
4449 	u8 flags = 0;
4450 	u8 tx_power_range[2];
4451 
4452 	bt_dev_dbg(hdev, "sock %p", sk);
4453 
4454 	memset(&buf, 0, sizeof(buf));
4455 
4456 	hci_dev_lock(hdev);
4457 
4458 	/* When the Read Simple Pairing Options command is supported, then
4459 	 * the remote public key validation is supported.
4460 	 *
4461 	 * Alternatively, when Microsoft extensions are available, they can
4462 	 * indicate support for public key validation as well.
4463 	 */
4464 	if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4465 		flags |= 0x01;	/* Remote public key validation (BR/EDR) */
4466 
4467 	flags |= 0x02;		/* Remote public key validation (LE) */
4468 
4469 	/* When the Read Encryption Key Size command is supported, then the
4470 	 * encryption key size is enforced.
4471 	 */
4472 	if (hdev->commands[20] & 0x10)
4473 		flags |= 0x04;	/* Encryption key size enforcement (BR/EDR) */
4474 
4475 	flags |= 0x08;		/* Encryption key size enforcement (LE) */
4476 
4477 	cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4478 				  &flags, 1);
4479 
4480 	/* When the Read Simple Pairing Options command is supported, then
4481 	 * also max encryption key size information is provided.
4482 	 */
4483 	if (hdev->commands[41] & 0x08)
4484 		cap_len = eir_append_le16(rp->cap, cap_len,
4485 					  MGMT_CAP_MAX_ENC_KEY_SIZE,
4486 					  hdev->max_enc_key_size);
4487 
4488 	cap_len = eir_append_le16(rp->cap, cap_len,
4489 				  MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4490 				  SMP_MAX_ENC_KEY_SIZE);
4491 
4492 	/* Append the min/max LE tx power parameters if we were able to fetch
4493 	 * it from the controller
4494 	 */
4495 	if (hdev->commands[38] & 0x80) {
4496 		memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4497 		memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4498 		cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4499 					  tx_power_range, 2);
4500 	}
4501 
4502 	rp->cap_len = cpu_to_le16(cap_len);
4503 
4504 	hci_dev_unlock(hdev);
4505 
4506 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4507 				 rp, sizeof(*rp) + cap_len);
4508 }
4509 
4510 #ifdef CONFIG_BT_FEATURE_DEBUG
4511 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4512 static const u8 debug_uuid[16] = {
4513 	0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4514 	0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4515 };
4516 #endif
4517 
4518 /* 330859bc-7506-492d-9370-9a6f0614037f */
4519 static const u8 quality_report_uuid[16] = {
4520 	0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4521 	0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4522 };
4523 
4524 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4525 static const u8 offload_codecs_uuid[16] = {
4526 	0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4527 	0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4528 };
4529 
4530 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4531 static const u8 le_simultaneous_roles_uuid[16] = {
4532 	0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4533 	0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4534 };
4535 
4536 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4537 static const u8 iso_socket_uuid[16] = {
4538 	0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4539 	0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4540 };
4541 
4542 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4543 static const u8 mgmt_mesh_uuid[16] = {
4544 	0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4545 	0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4546 };
4547 
4548 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4549 				  void *data, u16 data_len)
4550 {
4551 	struct mgmt_rp_read_exp_features_info *rp;
4552 	size_t len;
4553 	u16 idx = 0;
4554 	u32 flags;
4555 	int status;
4556 
4557 	bt_dev_dbg(hdev, "sock %p", sk);
4558 
4559 	/* Enough space for 7 features */
4560 	len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4561 	rp = kzalloc(len, GFP_KERNEL);
4562 	if (!rp)
4563 		return -ENOMEM;
4564 
4565 #ifdef CONFIG_BT_FEATURE_DEBUG
4566 	flags = bt_dbg_get() ? BIT(0) : 0;
4567 
4568 	memcpy(rp->features[idx].uuid, debug_uuid, 16);
4569 	rp->features[idx].flags = cpu_to_le32(flags);
4570 	idx++;
4571 #endif
4572 
4573 	if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4574 		if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4575 			flags = BIT(0);
4576 		else
4577 			flags = 0;
4578 
4579 		memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4580 		rp->features[idx].flags = cpu_to_le32(flags);
4581 		idx++;
4582 	}
4583 
4584 	if (hdev && (aosp_has_quality_report(hdev) ||
4585 		     hdev->set_quality_report)) {
4586 		if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4587 			flags = BIT(0);
4588 		else
4589 			flags = 0;
4590 
4591 		memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4592 		rp->features[idx].flags = cpu_to_le32(flags);
4593 		idx++;
4594 	}
4595 
4596 	if (hdev && hdev->get_data_path_id) {
4597 		if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4598 			flags = BIT(0);
4599 		else
4600 			flags = 0;
4601 
4602 		memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4603 		rp->features[idx].flags = cpu_to_le32(flags);
4604 		idx++;
4605 	}
4606 
4607 	if (IS_ENABLED(CONFIG_BT_LE)) {
4608 		flags = iso_inited() ? BIT(0) : 0;
4609 		memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4610 		rp->features[idx].flags = cpu_to_le32(flags);
4611 		idx++;
4612 	}
4613 
4614 	if (hdev && lmp_le_capable(hdev)) {
4615 		if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4616 			flags = BIT(0);
4617 		else
4618 			flags = 0;
4619 
4620 		memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4621 		rp->features[idx].flags = cpu_to_le32(flags);
4622 		idx++;
4623 	}
4624 
4625 	rp->feature_count = cpu_to_le16(idx);
4626 
4627 	/* After reading the experimental features information, enable
4628 	 * the events to update client on any future change.
4629 	 */
4630 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4631 
4632 	status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4633 				   MGMT_OP_READ_EXP_FEATURES_INFO,
4634 				   0, rp, sizeof(*rp) + (20 * idx));
4635 
4636 	kfree(rp);
4637 	return status;
4638 }
4639 
4640 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4641 			       bool enabled, struct sock *skip)
4642 {
4643 	struct mgmt_ev_exp_feature_changed ev;
4644 
4645 	memset(&ev, 0, sizeof(ev));
4646 	memcpy(ev.uuid, uuid, 16);
4647 	ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4648 
4649 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4650 				  &ev, sizeof(ev),
4651 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4652 }
4653 
4654 #define EXP_FEAT(_uuid, _set_func)	\
4655 {					\
4656 	.uuid = _uuid,			\
4657 	.set_func = _set_func,		\
4658 }
4659 
4660 /* The zero key uuid is special. Multiple exp features are set through it. */
4661 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4662 			     struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4663 {
4664 	struct mgmt_rp_set_exp_feature rp;
4665 
4666 	memset(rp.uuid, 0, 16);
4667 	rp.flags = cpu_to_le32(0);
4668 
4669 #ifdef CONFIG_BT_FEATURE_DEBUG
4670 	if (!hdev) {
4671 		bool changed = bt_dbg_get();
4672 
4673 		bt_dbg_set(false);
4674 
4675 		if (changed)
4676 			exp_feature_changed(NULL, ZERO_KEY, false, sk);
4677 	}
4678 #endif
4679 
4680 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4681 
4682 	return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4683 				 MGMT_OP_SET_EXP_FEATURE, 0,
4684 				 &rp, sizeof(rp));
4685 }
4686 
4687 #ifdef CONFIG_BT_FEATURE_DEBUG
4688 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4689 			  struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4690 {
4691 	struct mgmt_rp_set_exp_feature rp;
4692 
4693 	bool val, changed;
4694 	int err;
4695 
4696 	/* Command requires to use the non-controller index */
4697 	if (hdev)
4698 		return mgmt_cmd_status(sk, hdev->id,
4699 				       MGMT_OP_SET_EXP_FEATURE,
4700 				       MGMT_STATUS_INVALID_INDEX);
4701 
4702 	/* Parameters are limited to a single octet */
4703 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4704 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4705 				       MGMT_OP_SET_EXP_FEATURE,
4706 				       MGMT_STATUS_INVALID_PARAMS);
4707 
4708 	/* Only boolean on/off is supported */
4709 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4710 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4711 				       MGMT_OP_SET_EXP_FEATURE,
4712 				       MGMT_STATUS_INVALID_PARAMS);
4713 
4714 	val = !!cp->param[0];
4715 	changed = val ? !bt_dbg_get() : bt_dbg_get();
4716 	bt_dbg_set(val);
4717 
4718 	memcpy(rp.uuid, debug_uuid, 16);
4719 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4720 
4721 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4722 
4723 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4724 				MGMT_OP_SET_EXP_FEATURE, 0,
4725 				&rp, sizeof(rp));
4726 
4727 	if (changed)
4728 		exp_feature_changed(hdev, debug_uuid, val, sk);
4729 
4730 	return err;
4731 }
4732 #endif
4733 
4734 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4735 			      struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4736 {
4737 	struct mgmt_rp_set_exp_feature rp;
4738 	bool val, changed;
4739 	int err;
4740 
4741 	/* Command requires to use the controller index */
4742 	if (!hdev)
4743 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4744 				       MGMT_OP_SET_EXP_FEATURE,
4745 				       MGMT_STATUS_INVALID_INDEX);
4746 
4747 	/* Parameters are limited to a single octet */
4748 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4749 		return mgmt_cmd_status(sk, hdev->id,
4750 				       MGMT_OP_SET_EXP_FEATURE,
4751 				       MGMT_STATUS_INVALID_PARAMS);
4752 
4753 	/* Only boolean on/off is supported */
4754 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4755 		return mgmt_cmd_status(sk, hdev->id,
4756 				       MGMT_OP_SET_EXP_FEATURE,
4757 				       MGMT_STATUS_INVALID_PARAMS);
4758 
4759 	val = !!cp->param[0];
4760 
4761 	if (val) {
4762 		changed = !hci_dev_test_and_set_flag(hdev,
4763 						     HCI_MESH_EXPERIMENTAL);
4764 	} else {
4765 		hci_dev_clear_flag(hdev, HCI_MESH);
4766 		changed = hci_dev_test_and_clear_flag(hdev,
4767 						      HCI_MESH_EXPERIMENTAL);
4768 	}
4769 
4770 	memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4771 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4772 
4773 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4774 
4775 	err = mgmt_cmd_complete(sk, hdev->id,
4776 				MGMT_OP_SET_EXP_FEATURE, 0,
4777 				&rp, sizeof(rp));
4778 
4779 	if (changed)
4780 		exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4781 
4782 	return err;
4783 }
4784 
4785 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4786 				   struct mgmt_cp_set_exp_feature *cp,
4787 				   u16 data_len)
4788 {
4789 	struct mgmt_rp_set_exp_feature rp;
4790 	bool val, changed;
4791 	int err;
4792 
4793 	/* Command requires to use a valid controller index */
4794 	if (!hdev)
4795 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4796 				       MGMT_OP_SET_EXP_FEATURE,
4797 				       MGMT_STATUS_INVALID_INDEX);
4798 
4799 	/* Parameters are limited to a single octet */
4800 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4801 		return mgmt_cmd_status(sk, hdev->id,
4802 				       MGMT_OP_SET_EXP_FEATURE,
4803 				       MGMT_STATUS_INVALID_PARAMS);
4804 
4805 	/* Only boolean on/off is supported */
4806 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4807 		return mgmt_cmd_status(sk, hdev->id,
4808 				       MGMT_OP_SET_EXP_FEATURE,
4809 				       MGMT_STATUS_INVALID_PARAMS);
4810 
4811 	hci_req_sync_lock(hdev);
4812 
4813 	val = !!cp->param[0];
4814 	changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4815 
4816 	if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4817 		err = mgmt_cmd_status(sk, hdev->id,
4818 				      MGMT_OP_SET_EXP_FEATURE,
4819 				      MGMT_STATUS_NOT_SUPPORTED);
4820 		goto unlock_quality_report;
4821 	}
4822 
4823 	if (changed) {
4824 		if (hdev->set_quality_report)
4825 			err = hdev->set_quality_report(hdev, val);
4826 		else
4827 			err = aosp_set_quality_report(hdev, val);
4828 
4829 		if (err) {
4830 			err = mgmt_cmd_status(sk, hdev->id,
4831 					      MGMT_OP_SET_EXP_FEATURE,
4832 					      MGMT_STATUS_FAILED);
4833 			goto unlock_quality_report;
4834 		}
4835 
4836 		if (val)
4837 			hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4838 		else
4839 			hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4840 	}
4841 
4842 	bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4843 
4844 	memcpy(rp.uuid, quality_report_uuid, 16);
4845 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4846 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4847 
4848 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4849 				&rp, sizeof(rp));
4850 
4851 	if (changed)
4852 		exp_feature_changed(hdev, quality_report_uuid, val, sk);
4853 
4854 unlock_quality_report:
4855 	hci_req_sync_unlock(hdev);
4856 	return err;
4857 }
4858 
4859 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4860 				  struct mgmt_cp_set_exp_feature *cp,
4861 				  u16 data_len)
4862 {
4863 	bool val, changed;
4864 	int err;
4865 	struct mgmt_rp_set_exp_feature rp;
4866 
4867 	/* Command requires to use a valid controller index */
4868 	if (!hdev)
4869 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4870 				       MGMT_OP_SET_EXP_FEATURE,
4871 				       MGMT_STATUS_INVALID_INDEX);
4872 
4873 	/* Parameters are limited to a single octet */
4874 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4875 		return mgmt_cmd_status(sk, hdev->id,
4876 				       MGMT_OP_SET_EXP_FEATURE,
4877 				       MGMT_STATUS_INVALID_PARAMS);
4878 
4879 	/* Only boolean on/off is supported */
4880 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4881 		return mgmt_cmd_status(sk, hdev->id,
4882 				       MGMT_OP_SET_EXP_FEATURE,
4883 				       MGMT_STATUS_INVALID_PARAMS);
4884 
4885 	val = !!cp->param[0];
4886 	changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4887 
4888 	if (!hdev->get_data_path_id) {
4889 		return mgmt_cmd_status(sk, hdev->id,
4890 				       MGMT_OP_SET_EXP_FEATURE,
4891 				       MGMT_STATUS_NOT_SUPPORTED);
4892 	}
4893 
4894 	if (changed) {
4895 		if (val)
4896 			hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4897 		else
4898 			hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4899 	}
4900 
4901 	bt_dev_info(hdev, "offload codecs enable %d changed %d",
4902 		    val, changed);
4903 
4904 	memcpy(rp.uuid, offload_codecs_uuid, 16);
4905 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4906 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4907 	err = mgmt_cmd_complete(sk, hdev->id,
4908 				MGMT_OP_SET_EXP_FEATURE, 0,
4909 				&rp, sizeof(rp));
4910 
4911 	if (changed)
4912 		exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4913 
4914 	return err;
4915 }
4916 
4917 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4918 					  struct mgmt_cp_set_exp_feature *cp,
4919 					  u16 data_len)
4920 {
4921 	bool val, changed;
4922 	int err;
4923 	struct mgmt_rp_set_exp_feature rp;
4924 
4925 	/* Command requires to use a valid controller index */
4926 	if (!hdev)
4927 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4928 				       MGMT_OP_SET_EXP_FEATURE,
4929 				       MGMT_STATUS_INVALID_INDEX);
4930 
4931 	/* Parameters are limited to a single octet */
4932 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4933 		return mgmt_cmd_status(sk, hdev->id,
4934 				       MGMT_OP_SET_EXP_FEATURE,
4935 				       MGMT_STATUS_INVALID_PARAMS);
4936 
4937 	/* Only boolean on/off is supported */
4938 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4939 		return mgmt_cmd_status(sk, hdev->id,
4940 				       MGMT_OP_SET_EXP_FEATURE,
4941 				       MGMT_STATUS_INVALID_PARAMS);
4942 
4943 	val = !!cp->param[0];
4944 	changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4945 
4946 	if (!hci_dev_le_state_simultaneous(hdev)) {
4947 		return mgmt_cmd_status(sk, hdev->id,
4948 				       MGMT_OP_SET_EXP_FEATURE,
4949 				       MGMT_STATUS_NOT_SUPPORTED);
4950 	}
4951 
4952 	if (changed) {
4953 		if (val)
4954 			hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4955 		else
4956 			hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4957 	}
4958 
4959 	bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4960 		    val, changed);
4961 
4962 	memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4963 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4964 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4965 	err = mgmt_cmd_complete(sk, hdev->id,
4966 				MGMT_OP_SET_EXP_FEATURE, 0,
4967 				&rp, sizeof(rp));
4968 
4969 	if (changed)
4970 		exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4971 
4972 	return err;
4973 }
4974 
4975 #ifdef CONFIG_BT_LE
4976 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4977 			       struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4978 {
4979 	struct mgmt_rp_set_exp_feature rp;
4980 	bool val, changed = false;
4981 	int err;
4982 
4983 	/* Command requires to use the non-controller index */
4984 	if (hdev)
4985 		return mgmt_cmd_status(sk, hdev->id,
4986 				       MGMT_OP_SET_EXP_FEATURE,
4987 				       MGMT_STATUS_INVALID_INDEX);
4988 
4989 	/* Parameters are limited to a single octet */
4990 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4991 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4992 				       MGMT_OP_SET_EXP_FEATURE,
4993 				       MGMT_STATUS_INVALID_PARAMS);
4994 
4995 	/* Only boolean on/off is supported */
4996 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4997 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4998 				       MGMT_OP_SET_EXP_FEATURE,
4999 				       MGMT_STATUS_INVALID_PARAMS);
5000 
5001 	val = cp->param[0] ? true : false;
5002 	if (val)
5003 		err = iso_init();
5004 	else
5005 		err = iso_exit();
5006 
5007 	if (!err)
5008 		changed = true;
5009 
5010 	memcpy(rp.uuid, iso_socket_uuid, 16);
5011 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
5012 
5013 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
5014 
5015 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
5016 				MGMT_OP_SET_EXP_FEATURE, 0,
5017 				&rp, sizeof(rp));
5018 
5019 	if (changed)
5020 		exp_feature_changed(hdev, iso_socket_uuid, val, sk);
5021 
5022 	return err;
5023 }
5024 #endif
5025 
5026 static const struct mgmt_exp_feature {
5027 	const u8 *uuid;
5028 	int (*set_func)(struct sock *sk, struct hci_dev *hdev,
5029 			struct mgmt_cp_set_exp_feature *cp, u16 data_len);
5030 } exp_features[] = {
5031 	EXP_FEAT(ZERO_KEY, set_zero_key_func),
5032 #ifdef CONFIG_BT_FEATURE_DEBUG
5033 	EXP_FEAT(debug_uuid, set_debug_func),
5034 #endif
5035 	EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
5036 	EXP_FEAT(quality_report_uuid, set_quality_report_func),
5037 	EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
5038 	EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
5039 #ifdef CONFIG_BT_LE
5040 	EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
5041 #endif
5042 
5043 	/* end with a null feature */
5044 	EXP_FEAT(NULL, NULL)
5045 };
5046 
5047 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
5048 			   void *data, u16 data_len)
5049 {
5050 	struct mgmt_cp_set_exp_feature *cp = data;
5051 	size_t i = 0;
5052 
5053 	bt_dev_dbg(hdev, "sock %p", sk);
5054 
5055 	for (i = 0; exp_features[i].uuid; i++) {
5056 		if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
5057 			return exp_features[i].set_func(sk, hdev, cp, data_len);
5058 	}
5059 
5060 	return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
5061 			       MGMT_OP_SET_EXP_FEATURE,
5062 			       MGMT_STATUS_NOT_SUPPORTED);
5063 }
5064 
5065 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5066 			    u16 data_len)
5067 {
5068 	struct mgmt_cp_get_device_flags *cp = data;
5069 	struct mgmt_rp_get_device_flags rp;
5070 	struct bdaddr_list_with_flags *br_params;
5071 	struct hci_conn_params *params;
5072 	u32 supported_flags;
5073 	u32 current_flags = 0;
5074 	u8 status = MGMT_STATUS_INVALID_PARAMS;
5075 
5076 	bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
5077 		   &cp->addr.bdaddr, cp->addr.type);
5078 
5079 	hci_dev_lock(hdev);
5080 
5081 	supported_flags = hdev->conn_flags;
5082 
5083 	memset(&rp, 0, sizeof(rp));
5084 
5085 	if (cp->addr.type == BDADDR_BREDR) {
5086 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5087 							      &cp->addr.bdaddr,
5088 							      cp->addr.type);
5089 		if (!br_params)
5090 			goto done;
5091 
5092 		current_flags = br_params->flags;
5093 	} else {
5094 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5095 						le_addr_type(cp->addr.type));
5096 		if (!params)
5097 			goto done;
5098 
5099 		current_flags = params->flags;
5100 	}
5101 
5102 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5103 	rp.addr.type = cp->addr.type;
5104 	rp.supported_flags = cpu_to_le32(supported_flags);
5105 	rp.current_flags = cpu_to_le32(current_flags);
5106 
5107 	status = MGMT_STATUS_SUCCESS;
5108 
5109 done:
5110 	hci_dev_unlock(hdev);
5111 
5112 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5113 				&rp, sizeof(rp));
5114 }
5115 
5116 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5117 				 bdaddr_t *bdaddr, u8 bdaddr_type,
5118 				 u32 supported_flags, u32 current_flags)
5119 {
5120 	struct mgmt_ev_device_flags_changed ev;
5121 
5122 	bacpy(&ev.addr.bdaddr, bdaddr);
5123 	ev.addr.type = bdaddr_type;
5124 	ev.supported_flags = cpu_to_le32(supported_flags);
5125 	ev.current_flags = cpu_to_le32(current_flags);
5126 
5127 	mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5128 }
5129 
5130 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
5131 {
5132 	struct hci_conn *conn;
5133 
5134 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
5135 	if (!conn)
5136 		return false;
5137 
5138 	if (conn->dst_type != type)
5139 		return false;
5140 
5141 	if (conn->state != BT_CONNECTED)
5142 		return false;
5143 
5144 	return true;
5145 }
5146 
5147 /* This function requires the caller holds hdev->lock */
5148 static struct hci_conn_params *hci_conn_params_set(struct hci_dev *hdev,
5149 						   bdaddr_t *addr, u8 addr_type,
5150 						   u8 auto_connect)
5151 {
5152 	struct hci_conn_params *params;
5153 
5154 	params = hci_conn_params_add(hdev, addr, addr_type);
5155 	if (!params)
5156 		return NULL;
5157 
5158 	if (params->auto_connect == auto_connect)
5159 		return params;
5160 
5161 	hci_pend_le_list_del_init(params);
5162 
5163 	switch (auto_connect) {
5164 	case HCI_AUTO_CONN_DISABLED:
5165 	case HCI_AUTO_CONN_LINK_LOSS:
5166 		/* If auto connect is being disabled when we're trying to
5167 		 * connect to device, keep connecting.
5168 		 */
5169 		if (params->explicit_connect)
5170 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
5171 		break;
5172 	case HCI_AUTO_CONN_REPORT:
5173 		if (params->explicit_connect)
5174 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
5175 		else
5176 			hci_pend_le_list_add(params, &hdev->pend_le_reports);
5177 		break;
5178 	case HCI_AUTO_CONN_DIRECT:
5179 	case HCI_AUTO_CONN_ALWAYS:
5180 		if (!is_connected(hdev, addr, addr_type))
5181 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
5182 		break;
5183 	}
5184 
5185 	params->auto_connect = auto_connect;
5186 
5187 	bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
5188 		   addr, addr_type, auto_connect);
5189 
5190 	return params;
5191 }
5192 
5193 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5194 			    u16 len)
5195 {
5196 	struct mgmt_cp_set_device_flags *cp = data;
5197 	struct bdaddr_list_with_flags *br_params;
5198 	struct hci_conn_params *params;
5199 	u8 status = MGMT_STATUS_INVALID_PARAMS;
5200 	u32 supported_flags;
5201 	u32 current_flags = __le32_to_cpu(cp->current_flags);
5202 
5203 	bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5204 		   &cp->addr.bdaddr, cp->addr.type, current_flags);
5205 
5206 	// We should take hci_dev_lock() early, I think.. conn_flags can change
5207 	supported_flags = hdev->conn_flags;
5208 
5209 	if ((supported_flags | current_flags) != supported_flags) {
5210 		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5211 			    current_flags, supported_flags);
5212 		goto done;
5213 	}
5214 
5215 	hci_dev_lock(hdev);
5216 
5217 	if (cp->addr.type == BDADDR_BREDR) {
5218 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5219 							      &cp->addr.bdaddr,
5220 							      cp->addr.type);
5221 
5222 		if (br_params) {
5223 			br_params->flags = current_flags;
5224 			status = MGMT_STATUS_SUCCESS;
5225 		} else {
5226 			bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5227 				    &cp->addr.bdaddr, cp->addr.type);
5228 		}
5229 
5230 		goto unlock;
5231 	}
5232 
5233 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5234 					le_addr_type(cp->addr.type));
5235 	if (!params) {
5236 		/* Create a new hci_conn_params if it doesn't exist */
5237 		params = hci_conn_params_set(hdev, &cp->addr.bdaddr,
5238 					     le_addr_type(cp->addr.type),
5239 					     HCI_AUTO_CONN_DISABLED);
5240 		if (!params) {
5241 			bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5242 				    &cp->addr.bdaddr,
5243 				    le_addr_type(cp->addr.type));
5244 			goto unlock;
5245 		}
5246 	}
5247 
5248 	supported_flags = hdev->conn_flags;
5249 
5250 	if ((supported_flags | current_flags) != supported_flags) {
5251 		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5252 			    current_flags, supported_flags);
5253 		goto unlock;
5254 	}
5255 
5256 	WRITE_ONCE(params->flags, current_flags);
5257 	status = MGMT_STATUS_SUCCESS;
5258 
5259 	/* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5260 	 * has been set.
5261 	 */
5262 	if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5263 		hci_update_passive_scan(hdev);
5264 
5265 unlock:
5266 	hci_dev_unlock(hdev);
5267 
5268 done:
5269 	if (status == MGMT_STATUS_SUCCESS)
5270 		device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5271 				     supported_flags, current_flags);
5272 
5273 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5274 				 &cp->addr, sizeof(cp->addr));
5275 }
5276 
5277 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5278 				   u16 handle)
5279 {
5280 	struct mgmt_ev_adv_monitor_added ev;
5281 
5282 	ev.monitor_handle = cpu_to_le16(handle);
5283 
5284 	mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5285 }
5286 
5287 static void mgmt_adv_monitor_removed(struct sock *sk, struct hci_dev *hdev,
5288 				     __le16 handle)
5289 {
5290 	struct mgmt_ev_adv_monitor_removed ev;
5291 
5292 	ev.monitor_handle = handle;
5293 
5294 	mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk);
5295 }
5296 
5297 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5298 				 void *data, u16 len)
5299 {
5300 	struct adv_monitor *monitor = NULL;
5301 	struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5302 	int handle, err;
5303 	size_t rp_size = 0;
5304 	__u32 supported = 0;
5305 	__u32 enabled = 0;
5306 	__u16 num_handles = 0;
5307 	__u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5308 
5309 	BT_DBG("request for %s", hdev->name);
5310 
5311 	hci_dev_lock(hdev);
5312 
5313 	if (msft_monitor_supported(hdev))
5314 		supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5315 
5316 	idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5317 		handles[num_handles++] = monitor->handle;
5318 
5319 	hci_dev_unlock(hdev);
5320 
5321 	rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5322 	rp = kmalloc(rp_size, GFP_KERNEL);
5323 	if (!rp)
5324 		return -ENOMEM;
5325 
5326 	/* All supported features are currently enabled */
5327 	enabled = supported;
5328 
5329 	rp->supported_features = cpu_to_le32(supported);
5330 	rp->enabled_features = cpu_to_le32(enabled);
5331 	rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5332 	rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5333 	rp->num_handles = cpu_to_le16(num_handles);
5334 	if (num_handles)
5335 		memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5336 
5337 	err = mgmt_cmd_complete(sk, hdev->id,
5338 				MGMT_OP_READ_ADV_MONITOR_FEATURES,
5339 				MGMT_STATUS_SUCCESS, rp, rp_size);
5340 
5341 	kfree(rp);
5342 
5343 	return err;
5344 }
5345 
5346 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5347 						   void *data, int status)
5348 {
5349 	struct mgmt_rp_add_adv_patterns_monitor rp;
5350 	struct mgmt_pending_cmd *cmd = data;
5351 	struct adv_monitor *monitor;
5352 
5353 	/* This is likely the result of hdev being closed and mgmt_index_removed
5354 	 * is attempting to clean up any pending command so
5355 	 * hci_adv_monitors_clear is about to be called which will take care of
5356 	 * freeing the adv_monitor instances.
5357 	 */
5358 	if (status == -ECANCELED && !mgmt_pending_valid(hdev, cmd))
5359 		return;
5360 
5361 	monitor = cmd->user_data;
5362 
5363 	hci_dev_lock(hdev);
5364 
5365 	rp.monitor_handle = cpu_to_le16(monitor->handle);
5366 
5367 	if (!status) {
5368 		mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5369 		hdev->adv_monitors_cnt++;
5370 		if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5371 			monitor->state = ADV_MONITOR_STATE_REGISTERED;
5372 		hci_update_passive_scan(hdev);
5373 	}
5374 
5375 	mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
5376 			  mgmt_status(status), &rp, sizeof(rp));
5377 	mgmt_pending_free(cmd);
5378 
5379 	hci_dev_unlock(hdev);
5380 	bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5381 		   rp.monitor_handle, status);
5382 }
5383 
5384 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5385 {
5386 	struct mgmt_pending_cmd *cmd = data;
5387 	struct adv_monitor *mon;
5388 
5389 	mutex_lock(&hdev->mgmt_pending_lock);
5390 
5391 	if (!__mgmt_pending_listed(hdev, cmd)) {
5392 		mutex_unlock(&hdev->mgmt_pending_lock);
5393 		return -ECANCELED;
5394 	}
5395 
5396 	mon = cmd->user_data;
5397 
5398 	mutex_unlock(&hdev->mgmt_pending_lock);
5399 
5400 	return hci_add_adv_monitor(hdev, mon);
5401 }
5402 
5403 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5404 				      struct adv_monitor *m, u8 status,
5405 				      void *data, u16 len, u16 op)
5406 {
5407 	struct mgmt_pending_cmd *cmd;
5408 	int err;
5409 
5410 	hci_dev_lock(hdev);
5411 
5412 	if (status)
5413 		goto unlock;
5414 
5415 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
5416 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5417 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5418 		status = MGMT_STATUS_BUSY;
5419 		goto unlock;
5420 	}
5421 
5422 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
5423 	if (!cmd) {
5424 		status = MGMT_STATUS_NO_RESOURCES;
5425 		goto unlock;
5426 	}
5427 
5428 	cmd->user_data = m;
5429 	err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5430 				 mgmt_add_adv_patterns_monitor_complete);
5431 	if (err) {
5432 		if (err == -ENOMEM)
5433 			status = MGMT_STATUS_NO_RESOURCES;
5434 		else
5435 			status = MGMT_STATUS_FAILED;
5436 
5437 		goto unlock;
5438 	}
5439 
5440 	hci_dev_unlock(hdev);
5441 
5442 	return 0;
5443 
5444 unlock:
5445 	hci_free_adv_monitor(hdev, m);
5446 	hci_dev_unlock(hdev);
5447 	return mgmt_cmd_status(sk, hdev->id, op, status);
5448 }
5449 
5450 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5451 				   struct mgmt_adv_rssi_thresholds *rssi)
5452 {
5453 	if (rssi) {
5454 		m->rssi.low_threshold = rssi->low_threshold;
5455 		m->rssi.low_threshold_timeout =
5456 		    __le16_to_cpu(rssi->low_threshold_timeout);
5457 		m->rssi.high_threshold = rssi->high_threshold;
5458 		m->rssi.high_threshold_timeout =
5459 		    __le16_to_cpu(rssi->high_threshold_timeout);
5460 		m->rssi.sampling_period = rssi->sampling_period;
5461 	} else {
5462 		/* Default values. These numbers are the least constricting
5463 		 * parameters for MSFT API to work, so it behaves as if there
5464 		 * are no rssi parameter to consider. May need to be changed
5465 		 * if other API are to be supported.
5466 		 */
5467 		m->rssi.low_threshold = -127;
5468 		m->rssi.low_threshold_timeout = 60;
5469 		m->rssi.high_threshold = -127;
5470 		m->rssi.high_threshold_timeout = 0;
5471 		m->rssi.sampling_period = 0;
5472 	}
5473 }
5474 
5475 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5476 				    struct mgmt_adv_pattern *patterns)
5477 {
5478 	u8 offset = 0, length = 0;
5479 	struct adv_pattern *p = NULL;
5480 	int i;
5481 
5482 	for (i = 0; i < pattern_count; i++) {
5483 		offset = patterns[i].offset;
5484 		length = patterns[i].length;
5485 		if (offset >= HCI_MAX_AD_LENGTH ||
5486 		    length > HCI_MAX_AD_LENGTH ||
5487 		    (offset + length) > HCI_MAX_AD_LENGTH)
5488 			return MGMT_STATUS_INVALID_PARAMS;
5489 
5490 		p = kmalloc_obj(*p);
5491 		if (!p)
5492 			return MGMT_STATUS_NO_RESOURCES;
5493 
5494 		p->ad_type = patterns[i].ad_type;
5495 		p->offset = patterns[i].offset;
5496 		p->length = patterns[i].length;
5497 		memcpy(p->value, patterns[i].value, p->length);
5498 
5499 		INIT_LIST_HEAD(&p->list);
5500 		list_add(&p->list, &m->patterns);
5501 	}
5502 
5503 	return MGMT_STATUS_SUCCESS;
5504 }
5505 
5506 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5507 				    void *data, u16 len)
5508 {
5509 	struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5510 	struct adv_monitor *m = NULL;
5511 	u8 status = MGMT_STATUS_SUCCESS;
5512 	size_t expected_size = sizeof(*cp);
5513 
5514 	BT_DBG("request for %s", hdev->name);
5515 
5516 	if (len <= sizeof(*cp)) {
5517 		status = MGMT_STATUS_INVALID_PARAMS;
5518 		goto done;
5519 	}
5520 
5521 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5522 	if (len != expected_size) {
5523 		status = MGMT_STATUS_INVALID_PARAMS;
5524 		goto done;
5525 	}
5526 
5527 	m = kzalloc_obj(*m);
5528 	if (!m) {
5529 		status = MGMT_STATUS_NO_RESOURCES;
5530 		goto done;
5531 	}
5532 
5533 	INIT_LIST_HEAD(&m->patterns);
5534 
5535 	parse_adv_monitor_rssi(m, NULL);
5536 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5537 
5538 done:
5539 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5540 					  MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5541 }
5542 
5543 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5544 					 void *data, u16 len)
5545 {
5546 	struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5547 	struct adv_monitor *m = NULL;
5548 	u8 status = MGMT_STATUS_SUCCESS;
5549 	size_t expected_size = sizeof(*cp);
5550 
5551 	BT_DBG("request for %s", hdev->name);
5552 
5553 	if (len <= sizeof(*cp)) {
5554 		status = MGMT_STATUS_INVALID_PARAMS;
5555 		goto done;
5556 	}
5557 
5558 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5559 	if (len != expected_size) {
5560 		status = MGMT_STATUS_INVALID_PARAMS;
5561 		goto done;
5562 	}
5563 
5564 	m = kzalloc_obj(*m);
5565 	if (!m) {
5566 		status = MGMT_STATUS_NO_RESOURCES;
5567 		goto done;
5568 	}
5569 
5570 	INIT_LIST_HEAD(&m->patterns);
5571 
5572 	parse_adv_monitor_rssi(m, &cp->rssi);
5573 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5574 
5575 done:
5576 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5577 					 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5578 }
5579 
5580 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5581 					     void *data, int status)
5582 {
5583 	struct mgmt_rp_remove_adv_monitor rp;
5584 	struct mgmt_pending_cmd *cmd = data;
5585 	struct mgmt_cp_remove_adv_monitor *cp;
5586 
5587 	if (status == -ECANCELED)
5588 		return;
5589 
5590 	hci_dev_lock(hdev);
5591 
5592 	cp = cmd->param;
5593 
5594 	rp.monitor_handle = cp->monitor_handle;
5595 
5596 	if (!status) {
5597 		mgmt_adv_monitor_removed(cmd->sk, hdev, cp->monitor_handle);
5598 		hci_update_passive_scan(hdev);
5599 	}
5600 
5601 	mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
5602 			  mgmt_status(status), &rp, sizeof(rp));
5603 	mgmt_pending_free(cmd);
5604 
5605 	hci_dev_unlock(hdev);
5606 	bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5607 		   rp.monitor_handle, status);
5608 }
5609 
5610 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5611 {
5612 	struct mgmt_pending_cmd *cmd = data;
5613 	struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5614 	u16 handle = __le16_to_cpu(cp->monitor_handle);
5615 
5616 	if (!handle)
5617 		return hci_remove_all_adv_monitor(hdev);
5618 
5619 	return hci_remove_single_adv_monitor(hdev, handle);
5620 }
5621 
5622 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5623 			      void *data, u16 len)
5624 {
5625 	struct mgmt_pending_cmd *cmd;
5626 	int err, status;
5627 
5628 	hci_dev_lock(hdev);
5629 
5630 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
5631 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5632 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5633 		status = MGMT_STATUS_BUSY;
5634 		goto unlock;
5635 	}
5636 
5637 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5638 	if (!cmd) {
5639 		status = MGMT_STATUS_NO_RESOURCES;
5640 		goto unlock;
5641 	}
5642 
5643 	err = hci_cmd_sync_submit(hdev, mgmt_remove_adv_monitor_sync, cmd,
5644 				  mgmt_remove_adv_monitor_complete);
5645 
5646 	if (err) {
5647 		mgmt_pending_free(cmd);
5648 
5649 		if (err == -ENOMEM)
5650 			status = MGMT_STATUS_NO_RESOURCES;
5651 		else
5652 			status = MGMT_STATUS_FAILED;
5653 
5654 		goto unlock;
5655 	}
5656 
5657 	hci_dev_unlock(hdev);
5658 
5659 	return 0;
5660 
5661 unlock:
5662 	hci_dev_unlock(hdev);
5663 	return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5664 			       status);
5665 }
5666 
5667 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data,
5668 					 int err)
5669 {
5670 	struct mgmt_rp_read_local_oob_data mgmt_rp;
5671 	size_t rp_size = sizeof(mgmt_rp);
5672 	struct mgmt_pending_cmd *cmd = data;
5673 	struct sk_buff *skb = cmd->skb;
5674 	u8 status = mgmt_status(err);
5675 
5676 	if (!status) {
5677 		if (!skb)
5678 			status = MGMT_STATUS_FAILED;
5679 		else if (IS_ERR(skb))
5680 			status = mgmt_status(PTR_ERR(skb));
5681 		else
5682 			status = mgmt_status(skb->data[0]);
5683 	}
5684 
5685 	bt_dev_dbg(hdev, "status %d", status);
5686 
5687 	if (status) {
5688 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5689 				status);
5690 		goto remove;
5691 	}
5692 
5693 	memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5694 
5695 	if (!bredr_sc_enabled(hdev)) {
5696 		struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5697 
5698 		if (skb->len < sizeof(*rp)) {
5699 			mgmt_cmd_status(cmd->sk, hdev->id,
5700 					MGMT_OP_READ_LOCAL_OOB_DATA,
5701 					MGMT_STATUS_FAILED);
5702 			goto remove;
5703 		}
5704 
5705 		memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5706 		memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5707 
5708 		rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5709 	} else {
5710 		struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5711 
5712 		if (skb->len < sizeof(*rp)) {
5713 			mgmt_cmd_status(cmd->sk, hdev->id,
5714 					MGMT_OP_READ_LOCAL_OOB_DATA,
5715 					MGMT_STATUS_FAILED);
5716 			goto remove;
5717 		}
5718 
5719 		memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5720 		memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5721 
5722 		memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5723 		memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5724 	}
5725 
5726 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5727 			  MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5728 
5729 remove:
5730 	if (skb && !IS_ERR(skb))
5731 		kfree_skb(skb);
5732 
5733 	mgmt_pending_free(cmd);
5734 }
5735 
5736 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5737 {
5738 	struct mgmt_pending_cmd *cmd = data;
5739 
5740 	if (bredr_sc_enabled(hdev))
5741 		cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5742 	else
5743 		cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5744 
5745 	if (IS_ERR(cmd->skb))
5746 		return PTR_ERR(cmd->skb);
5747 	else
5748 		return 0;
5749 }
5750 
5751 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5752 			       void *data, u16 data_len)
5753 {
5754 	struct mgmt_pending_cmd *cmd;
5755 	int err;
5756 
5757 	bt_dev_dbg(hdev, "sock %p", sk);
5758 
5759 	hci_dev_lock(hdev);
5760 
5761 	if (!hdev_is_powered(hdev)) {
5762 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5763 				      MGMT_STATUS_NOT_POWERED);
5764 		goto unlock;
5765 	}
5766 
5767 	if (!lmp_ssp_capable(hdev)) {
5768 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5769 				      MGMT_STATUS_NOT_SUPPORTED);
5770 		goto unlock;
5771 	}
5772 
5773 	cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5774 	if (!cmd)
5775 		err = -ENOMEM;
5776 	else
5777 		err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5778 					 read_local_oob_data_complete);
5779 
5780 	if (err < 0) {
5781 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5782 				      MGMT_STATUS_FAILED);
5783 
5784 		if (cmd)
5785 			mgmt_pending_free(cmd);
5786 	}
5787 
5788 unlock:
5789 	hci_dev_unlock(hdev);
5790 	return err;
5791 }
5792 
5793 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5794 			       void *data, u16 len)
5795 {
5796 	struct mgmt_addr_info *addr = data;
5797 	int err;
5798 
5799 	bt_dev_dbg(hdev, "sock %p", sk);
5800 
5801 	if (!bdaddr_type_is_valid(addr->type))
5802 		return mgmt_cmd_complete(sk, hdev->id,
5803 					 MGMT_OP_ADD_REMOTE_OOB_DATA,
5804 					 MGMT_STATUS_INVALID_PARAMS,
5805 					 addr, sizeof(*addr));
5806 
5807 	hci_dev_lock(hdev);
5808 
5809 	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5810 		struct mgmt_cp_add_remote_oob_data *cp = data;
5811 		u8 status;
5812 
5813 		if (cp->addr.type != BDADDR_BREDR) {
5814 			err = mgmt_cmd_complete(sk, hdev->id,
5815 						MGMT_OP_ADD_REMOTE_OOB_DATA,
5816 						MGMT_STATUS_INVALID_PARAMS,
5817 						&cp->addr, sizeof(cp->addr));
5818 			goto unlock;
5819 		}
5820 
5821 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5822 					      cp->addr.type, cp->hash,
5823 					      cp->rand, NULL, NULL);
5824 		if (err < 0)
5825 			status = MGMT_STATUS_FAILED;
5826 		else
5827 			status = MGMT_STATUS_SUCCESS;
5828 
5829 		err = mgmt_cmd_complete(sk, hdev->id,
5830 					MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5831 					&cp->addr, sizeof(cp->addr));
5832 	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5833 		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5834 		u8 *rand192, *hash192, *rand256, *hash256;
5835 		u8 status;
5836 
5837 		if (bdaddr_type_is_le(cp->addr.type)) {
5838 			/* Enforce zero-valued 192-bit parameters as
5839 			 * long as legacy SMP OOB isn't implemented.
5840 			 */
5841 			if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5842 			    memcmp(cp->hash192, ZERO_KEY, 16)) {
5843 				err = mgmt_cmd_complete(sk, hdev->id,
5844 							MGMT_OP_ADD_REMOTE_OOB_DATA,
5845 							MGMT_STATUS_INVALID_PARAMS,
5846 							addr, sizeof(*addr));
5847 				goto unlock;
5848 			}
5849 
5850 			rand192 = NULL;
5851 			hash192 = NULL;
5852 		} else {
5853 			/* In case one of the P-192 values is set to zero,
5854 			 * then just disable OOB data for P-192.
5855 			 */
5856 			if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5857 			    !memcmp(cp->hash192, ZERO_KEY, 16)) {
5858 				rand192 = NULL;
5859 				hash192 = NULL;
5860 			} else {
5861 				rand192 = cp->rand192;
5862 				hash192 = cp->hash192;
5863 			}
5864 		}
5865 
5866 		/* In case one of the P-256 values is set to zero, then just
5867 		 * disable OOB data for P-256.
5868 		 */
5869 		if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5870 		    !memcmp(cp->hash256, ZERO_KEY, 16)) {
5871 			rand256 = NULL;
5872 			hash256 = NULL;
5873 		} else {
5874 			rand256 = cp->rand256;
5875 			hash256 = cp->hash256;
5876 		}
5877 
5878 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5879 					      cp->addr.type, hash192, rand192,
5880 					      hash256, rand256);
5881 		if (err < 0)
5882 			status = MGMT_STATUS_FAILED;
5883 		else
5884 			status = MGMT_STATUS_SUCCESS;
5885 
5886 		err = mgmt_cmd_complete(sk, hdev->id,
5887 					MGMT_OP_ADD_REMOTE_OOB_DATA,
5888 					status, &cp->addr, sizeof(cp->addr));
5889 	} else {
5890 		bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5891 			   len);
5892 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5893 				      MGMT_STATUS_INVALID_PARAMS);
5894 	}
5895 
5896 unlock:
5897 	hci_dev_unlock(hdev);
5898 	return err;
5899 }
5900 
5901 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5902 				  void *data, u16 len)
5903 {
5904 	struct mgmt_cp_remove_remote_oob_data *cp = data;
5905 	u8 status;
5906 	int err;
5907 
5908 	bt_dev_dbg(hdev, "sock %p", sk);
5909 
5910 	if (cp->addr.type != BDADDR_BREDR)
5911 		return mgmt_cmd_complete(sk, hdev->id,
5912 					 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5913 					 MGMT_STATUS_INVALID_PARAMS,
5914 					 &cp->addr, sizeof(cp->addr));
5915 
5916 	hci_dev_lock(hdev);
5917 
5918 	if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5919 		hci_remote_oob_data_clear(hdev);
5920 		status = MGMT_STATUS_SUCCESS;
5921 		goto done;
5922 	}
5923 
5924 	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5925 	if (err < 0)
5926 		status = MGMT_STATUS_INVALID_PARAMS;
5927 	else
5928 		status = MGMT_STATUS_SUCCESS;
5929 
5930 done:
5931 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5932 				status, &cp->addr, sizeof(cp->addr));
5933 
5934 	hci_dev_unlock(hdev);
5935 	return err;
5936 }
5937 
5938 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5939 				    uint8_t *mgmt_status)
5940 {
5941 	switch (type) {
5942 	case DISCOV_TYPE_LE:
5943 		*mgmt_status = mgmt_le_support(hdev);
5944 		if (*mgmt_status)
5945 			return false;
5946 		break;
5947 	case DISCOV_TYPE_INTERLEAVED:
5948 		*mgmt_status = mgmt_le_support(hdev);
5949 		if (*mgmt_status)
5950 			return false;
5951 		fallthrough;
5952 	case DISCOV_TYPE_BREDR:
5953 		*mgmt_status = mgmt_bredr_support(hdev);
5954 		if (*mgmt_status)
5955 			return false;
5956 		break;
5957 	default:
5958 		*mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5959 		return false;
5960 	}
5961 
5962 	return true;
5963 }
5964 
5965 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5966 {
5967 	struct mgmt_pending_cmd *cmd = data;
5968 
5969 	bt_dev_dbg(hdev, "err %d", err);
5970 
5971 	if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
5972 		return;
5973 
5974 	mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err),
5975 			  cmd->param, 1);
5976 	mgmt_pending_free(cmd);
5977 
5978 	hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5979 				DISCOVERY_FINDING);
5980 }
5981 
5982 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5983 {
5984 	if (!mgmt_pending_listed(hdev, data))
5985 		return -ECANCELED;
5986 
5987 	return hci_start_discovery_sync(hdev);
5988 }
5989 
5990 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5991 				    u16 op, void *data, u16 len)
5992 {
5993 	struct mgmt_cp_start_discovery *cp = data;
5994 	struct mgmt_pending_cmd *cmd;
5995 	u8 status;
5996 	int err;
5997 
5998 	bt_dev_dbg(hdev, "sock %p", sk);
5999 
6000 	hci_dev_lock(hdev);
6001 
6002 	if (!hdev_is_powered(hdev)) {
6003 		err = mgmt_cmd_complete(sk, hdev->id, op,
6004 					MGMT_STATUS_NOT_POWERED,
6005 					&cp->type, sizeof(cp->type));
6006 		goto failed;
6007 	}
6008 
6009 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
6010 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
6011 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
6012 					&cp->type, sizeof(cp->type));
6013 		goto failed;
6014 	}
6015 
6016 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
6017 		err = mgmt_cmd_complete(sk, hdev->id, op, status,
6018 					&cp->type, sizeof(cp->type));
6019 		goto failed;
6020 	}
6021 
6022 	/* Can't start discovery when it is paused */
6023 	if (hdev->discovery_paused) {
6024 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
6025 					&cp->type, sizeof(cp->type));
6026 		goto failed;
6027 	}
6028 
6029 	/* Clear the discovery filter first to free any previously
6030 	 * allocated memory for the UUID list.
6031 	 */
6032 	hci_discovery_filter_clear(hdev);
6033 
6034 	hdev->discovery.type = cp->type;
6035 	hdev->discovery.report_invalid_rssi = false;
6036 	if (op == MGMT_OP_START_LIMITED_DISCOVERY)
6037 		hdev->discovery.limited = true;
6038 	else
6039 		hdev->discovery.limited = false;
6040 
6041 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
6042 	if (!cmd) {
6043 		err = -ENOMEM;
6044 		goto failed;
6045 	}
6046 
6047 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6048 				 start_discovery_complete);
6049 	if (err < 0) {
6050 		mgmt_pending_remove(cmd);
6051 		goto failed;
6052 	}
6053 
6054 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6055 
6056 failed:
6057 	hci_dev_unlock(hdev);
6058 	return err;
6059 }
6060 
6061 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
6062 			   void *data, u16 len)
6063 {
6064 	return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
6065 					data, len);
6066 }
6067 
6068 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
6069 				   void *data, u16 len)
6070 {
6071 	return start_discovery_internal(sk, hdev,
6072 					MGMT_OP_START_LIMITED_DISCOVERY,
6073 					data, len);
6074 }
6075 
6076 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
6077 				   void *data, u16 len)
6078 {
6079 	struct mgmt_cp_start_service_discovery *cp = data;
6080 	struct mgmt_pending_cmd *cmd;
6081 	const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
6082 	u16 uuid_count, expected_len;
6083 	u8 status;
6084 	int err;
6085 
6086 	bt_dev_dbg(hdev, "sock %p", sk);
6087 
6088 	hci_dev_lock(hdev);
6089 
6090 	if (!hdev_is_powered(hdev)) {
6091 		err = mgmt_cmd_complete(sk, hdev->id,
6092 					MGMT_OP_START_SERVICE_DISCOVERY,
6093 					MGMT_STATUS_NOT_POWERED,
6094 					&cp->type, sizeof(cp->type));
6095 		goto failed;
6096 	}
6097 
6098 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
6099 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
6100 		err = mgmt_cmd_complete(sk, hdev->id,
6101 					MGMT_OP_START_SERVICE_DISCOVERY,
6102 					MGMT_STATUS_BUSY, &cp->type,
6103 					sizeof(cp->type));
6104 		goto failed;
6105 	}
6106 
6107 	if (hdev->discovery_paused) {
6108 		err = mgmt_cmd_complete(sk, hdev->id,
6109 					MGMT_OP_START_SERVICE_DISCOVERY,
6110 					MGMT_STATUS_BUSY, &cp->type,
6111 					sizeof(cp->type));
6112 		goto failed;
6113 	}
6114 
6115 	uuid_count = __le16_to_cpu(cp->uuid_count);
6116 	if (uuid_count > max_uuid_count) {
6117 		bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
6118 			   uuid_count);
6119 		err = mgmt_cmd_complete(sk, hdev->id,
6120 					MGMT_OP_START_SERVICE_DISCOVERY,
6121 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
6122 					sizeof(cp->type));
6123 		goto failed;
6124 	}
6125 
6126 	expected_len = sizeof(*cp) + uuid_count * 16;
6127 	if (expected_len != len) {
6128 		bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
6129 			   expected_len, len);
6130 		err = mgmt_cmd_complete(sk, hdev->id,
6131 					MGMT_OP_START_SERVICE_DISCOVERY,
6132 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
6133 					sizeof(cp->type));
6134 		goto failed;
6135 	}
6136 
6137 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
6138 		err = mgmt_cmd_complete(sk, hdev->id,
6139 					MGMT_OP_START_SERVICE_DISCOVERY,
6140 					status, &cp->type, sizeof(cp->type));
6141 		goto failed;
6142 	}
6143 
6144 	cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
6145 			       hdev, data, len);
6146 	if (!cmd) {
6147 		err = -ENOMEM;
6148 		goto failed;
6149 	}
6150 
6151 	/* Clear the discovery filter first to free any previously
6152 	 * allocated memory for the UUID list.
6153 	 */
6154 	hci_discovery_filter_clear(hdev);
6155 
6156 	hdev->discovery.result_filtering = true;
6157 	hdev->discovery.type = cp->type;
6158 	hdev->discovery.rssi = cp->rssi;
6159 	hdev->discovery.uuid_count = uuid_count;
6160 
6161 	if (uuid_count > 0) {
6162 		hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
6163 						GFP_KERNEL);
6164 		if (!hdev->discovery.uuids) {
6165 			err = mgmt_cmd_complete(sk, hdev->id,
6166 						MGMT_OP_START_SERVICE_DISCOVERY,
6167 						MGMT_STATUS_FAILED,
6168 						&cp->type, sizeof(cp->type));
6169 			mgmt_pending_remove(cmd);
6170 			goto failed;
6171 		}
6172 	}
6173 
6174 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6175 				 start_discovery_complete);
6176 	if (err < 0) {
6177 		mgmt_pending_remove(cmd);
6178 		goto failed;
6179 	}
6180 
6181 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6182 
6183 failed:
6184 	hci_dev_unlock(hdev);
6185 	return err;
6186 }
6187 
6188 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6189 {
6190 	struct mgmt_pending_cmd *cmd = data;
6191 
6192 	if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
6193 		return;
6194 
6195 	bt_dev_dbg(hdev, "err %d", err);
6196 
6197 	mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err),
6198 			  cmd->param, 1);
6199 	mgmt_pending_free(cmd);
6200 
6201 	if (!err)
6202 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6203 }
6204 
6205 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6206 {
6207 	if (!mgmt_pending_listed(hdev, data))
6208 		return -ECANCELED;
6209 
6210 	return hci_stop_discovery_sync(hdev);
6211 }
6212 
6213 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6214 			  u16 len)
6215 {
6216 	struct mgmt_cp_stop_discovery *mgmt_cp = data;
6217 	struct mgmt_pending_cmd *cmd;
6218 	int err;
6219 
6220 	bt_dev_dbg(hdev, "sock %p", sk);
6221 
6222 	hci_dev_lock(hdev);
6223 
6224 	if (!hci_discovery_active(hdev)) {
6225 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6226 					MGMT_STATUS_REJECTED, &mgmt_cp->type,
6227 					sizeof(mgmt_cp->type));
6228 		goto unlock;
6229 	}
6230 
6231 	if (hdev->discovery.type != mgmt_cp->type) {
6232 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6233 					MGMT_STATUS_INVALID_PARAMS,
6234 					&mgmt_cp->type, sizeof(mgmt_cp->type));
6235 		goto unlock;
6236 	}
6237 
6238 	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6239 	if (!cmd) {
6240 		err = -ENOMEM;
6241 		goto unlock;
6242 	}
6243 
6244 	err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6245 				 stop_discovery_complete);
6246 	if (err < 0) {
6247 		mgmt_pending_remove(cmd);
6248 		goto unlock;
6249 	}
6250 
6251 	hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6252 
6253 unlock:
6254 	hci_dev_unlock(hdev);
6255 	return err;
6256 }
6257 
6258 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6259 			u16 len)
6260 {
6261 	struct mgmt_cp_confirm_name *cp = data;
6262 	struct inquiry_entry *e;
6263 	int err;
6264 
6265 	bt_dev_dbg(hdev, "sock %p", sk);
6266 
6267 	hci_dev_lock(hdev);
6268 
6269 	if (!hci_discovery_active(hdev)) {
6270 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6271 					MGMT_STATUS_FAILED, &cp->addr,
6272 					sizeof(cp->addr));
6273 		goto failed;
6274 	}
6275 
6276 	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6277 	if (!e) {
6278 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6279 					MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6280 					sizeof(cp->addr));
6281 		goto failed;
6282 	}
6283 
6284 	if (cp->name_known) {
6285 		e->name_state = NAME_KNOWN;
6286 		list_del(&e->list);
6287 	} else {
6288 		e->name_state = NAME_NEEDED;
6289 		hci_inquiry_cache_update_resolve(hdev, e);
6290 	}
6291 
6292 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6293 				&cp->addr, sizeof(cp->addr));
6294 
6295 failed:
6296 	hci_dev_unlock(hdev);
6297 	return err;
6298 }
6299 
6300 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6301 			u16 len)
6302 {
6303 	struct mgmt_cp_block_device *cp = data;
6304 	u8 status;
6305 	int err;
6306 
6307 	bt_dev_dbg(hdev, "sock %p", sk);
6308 
6309 	if (!bdaddr_type_is_valid(cp->addr.type))
6310 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6311 					 MGMT_STATUS_INVALID_PARAMS,
6312 					 &cp->addr, sizeof(cp->addr));
6313 
6314 	hci_dev_lock(hdev);
6315 
6316 	err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6317 				  cp->addr.type);
6318 	if (err < 0) {
6319 		status = MGMT_STATUS_FAILED;
6320 		goto done;
6321 	}
6322 
6323 	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6324 		   sk);
6325 	status = MGMT_STATUS_SUCCESS;
6326 
6327 done:
6328 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6329 				&cp->addr, sizeof(cp->addr));
6330 
6331 	hci_dev_unlock(hdev);
6332 
6333 	return err;
6334 }
6335 
6336 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6337 			  u16 len)
6338 {
6339 	struct mgmt_cp_unblock_device *cp = data;
6340 	u8 status;
6341 	int err;
6342 
6343 	bt_dev_dbg(hdev, "sock %p", sk);
6344 
6345 	if (!bdaddr_type_is_valid(cp->addr.type))
6346 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6347 					 MGMT_STATUS_INVALID_PARAMS,
6348 					 &cp->addr, sizeof(cp->addr));
6349 
6350 	hci_dev_lock(hdev);
6351 
6352 	err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6353 				  cp->addr.type);
6354 	if (err < 0) {
6355 		status = MGMT_STATUS_INVALID_PARAMS;
6356 		goto done;
6357 	}
6358 
6359 	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6360 		   sk);
6361 	status = MGMT_STATUS_SUCCESS;
6362 
6363 done:
6364 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6365 				&cp->addr, sizeof(cp->addr));
6366 
6367 	hci_dev_unlock(hdev);
6368 
6369 	return err;
6370 }
6371 
6372 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6373 {
6374 	return hci_update_eir_sync(hdev);
6375 }
6376 
6377 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6378 			 u16 len)
6379 {
6380 	struct mgmt_cp_set_device_id *cp = data;
6381 	int err;
6382 	__u16 source;
6383 
6384 	bt_dev_dbg(hdev, "sock %p", sk);
6385 
6386 	source = __le16_to_cpu(cp->source);
6387 
6388 	if (source > 0x0002)
6389 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6390 				       MGMT_STATUS_INVALID_PARAMS);
6391 
6392 	hci_dev_lock(hdev);
6393 
6394 	hdev->devid_source = source;
6395 	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6396 	hdev->devid_product = __le16_to_cpu(cp->product);
6397 	hdev->devid_version = __le16_to_cpu(cp->version);
6398 
6399 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6400 				NULL, 0);
6401 
6402 	hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6403 
6404 	hci_dev_unlock(hdev);
6405 
6406 	return err;
6407 }
6408 
6409 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6410 {
6411 	if (err)
6412 		bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6413 	else
6414 		bt_dev_dbg(hdev, "status %d", err);
6415 }
6416 
6417 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6418 {
6419 	struct mgmt_pending_cmd *cmd = data;
6420 	struct cmd_lookup match = { NULL, hdev };
6421 	u8 instance;
6422 	struct adv_info *adv_instance;
6423 	u8 status = mgmt_status(err);
6424 
6425 	if (err == -ECANCELED || !mgmt_pending_valid(hdev, data))
6426 		return;
6427 
6428 	if (status) {
6429 		mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, status);
6430 		mgmt_pending_free(cmd);
6431 		return;
6432 	}
6433 
6434 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6435 		hci_dev_set_flag(hdev, HCI_ADVERTISING);
6436 	else
6437 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6438 
6439 	settings_rsp(cmd, &match);
6440 	mgmt_pending_free(cmd);
6441 
6442 	new_settings(hdev, match.sk);
6443 
6444 	if (match.sk)
6445 		sock_put(match.sk);
6446 
6447 	/* If "Set Advertising" was just disabled and instance advertising was
6448 	 * set up earlier, then re-enable multi-instance advertising.
6449 	 */
6450 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6451 	    list_empty(&hdev->adv_instances))
6452 		return;
6453 
6454 	instance = hdev->cur_adv_instance;
6455 	if (!instance) {
6456 		adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6457 							struct adv_info, list);
6458 		if (!adv_instance)
6459 			return;
6460 
6461 		instance = adv_instance->instance;
6462 	}
6463 
6464 	err = hci_schedule_adv_instance_sync(hdev, instance, true);
6465 
6466 	enable_advertising_instance(hdev, err);
6467 }
6468 
6469 static int set_adv_sync(struct hci_dev *hdev, void *data)
6470 {
6471 	struct mgmt_pending_cmd *cmd = data;
6472 	struct mgmt_mode cp;
6473 	u8 val;
6474 
6475 	mutex_lock(&hdev->mgmt_pending_lock);
6476 
6477 	if (!__mgmt_pending_listed(hdev, cmd)) {
6478 		mutex_unlock(&hdev->mgmt_pending_lock);
6479 		return -ECANCELED;
6480 	}
6481 
6482 	memcpy(&cp, cmd->param, sizeof(cp));
6483 
6484 	mutex_unlock(&hdev->mgmt_pending_lock);
6485 
6486 	val = !!cp.val;
6487 
6488 	if (cp.val == 0x02)
6489 		hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6490 	else
6491 		hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6492 
6493 	cancel_adv_timeout(hdev);
6494 
6495 	if (val) {
6496 		/* Switch to instance "0" for the Set Advertising setting.
6497 		 * We cannot use update_[adv|scan_rsp]_data() here as the
6498 		 * HCI_ADVERTISING flag is not yet set.
6499 		 */
6500 		hdev->cur_adv_instance = 0x00;
6501 
6502 		if (ext_adv_capable(hdev)) {
6503 			hci_start_ext_adv_sync(hdev, 0x00);
6504 		} else {
6505 			hci_update_adv_data_sync(hdev, 0x00);
6506 			hci_update_scan_rsp_data_sync(hdev, 0x00);
6507 			hci_enable_advertising_sync(hdev);
6508 		}
6509 	} else {
6510 		hci_disable_advertising_sync(hdev);
6511 	}
6512 
6513 	return 0;
6514 }
6515 
6516 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6517 			   u16 len)
6518 {
6519 	struct mgmt_mode *cp = data;
6520 	struct mgmt_pending_cmd *cmd;
6521 	u8 val, status;
6522 	int err;
6523 
6524 	bt_dev_dbg(hdev, "sock %p", sk);
6525 
6526 	status = mgmt_le_support(hdev);
6527 	if (status)
6528 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6529 				       status);
6530 
6531 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6532 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6533 				       MGMT_STATUS_INVALID_PARAMS);
6534 
6535 	if (hdev->advertising_paused)
6536 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6537 				       MGMT_STATUS_BUSY);
6538 
6539 	hci_dev_lock(hdev);
6540 
6541 	val = !!cp->val;
6542 
6543 	/* The following conditions are ones which mean that we should
6544 	 * not do any HCI communication but directly send a mgmt
6545 	 * response to user space (after toggling the flag if
6546 	 * necessary).
6547 	 */
6548 	if (!hdev_is_powered(hdev) ||
6549 	    (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6550 	     (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6551 	    hci_dev_test_flag(hdev, HCI_MESH) ||
6552 	    hci_conn_num(hdev, LE_LINK) > 0 ||
6553 	    (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6554 	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6555 		bool changed;
6556 
6557 		if (cp->val) {
6558 			hdev->cur_adv_instance = 0x00;
6559 			changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6560 			if (cp->val == 0x02)
6561 				hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6562 			else
6563 				hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6564 		} else {
6565 			changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6566 			hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6567 		}
6568 
6569 		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6570 		if (err < 0)
6571 			goto unlock;
6572 
6573 		if (changed)
6574 			err = new_settings(hdev, sk);
6575 
6576 		goto unlock;
6577 	}
6578 
6579 	if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6580 	    pending_find(MGMT_OP_SET_LE, hdev)) {
6581 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6582 				      MGMT_STATUS_BUSY);
6583 		goto unlock;
6584 	}
6585 
6586 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6587 	if (!cmd)
6588 		err = -ENOMEM;
6589 	else
6590 		err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6591 					 set_advertising_complete);
6592 
6593 	if (err < 0 && cmd)
6594 		mgmt_pending_remove(cmd);
6595 
6596 unlock:
6597 	hci_dev_unlock(hdev);
6598 	return err;
6599 }
6600 
6601 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6602 			      void *data, u16 len)
6603 {
6604 	struct mgmt_cp_set_static_address *cp = data;
6605 	int err;
6606 
6607 	bt_dev_dbg(hdev, "sock %p", sk);
6608 
6609 	if (!lmp_le_capable(hdev))
6610 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6611 				       MGMT_STATUS_NOT_SUPPORTED);
6612 
6613 	if (hdev_is_powered(hdev))
6614 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6615 				       MGMT_STATUS_REJECTED);
6616 
6617 	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6618 		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6619 			return mgmt_cmd_status(sk, hdev->id,
6620 					       MGMT_OP_SET_STATIC_ADDRESS,
6621 					       MGMT_STATUS_INVALID_PARAMS);
6622 
6623 		/* Two most significant bits shall be set */
6624 		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6625 			return mgmt_cmd_status(sk, hdev->id,
6626 					       MGMT_OP_SET_STATIC_ADDRESS,
6627 					       MGMT_STATUS_INVALID_PARAMS);
6628 	}
6629 
6630 	hci_dev_lock(hdev);
6631 
6632 	bacpy(&hdev->static_addr, &cp->bdaddr);
6633 
6634 	err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6635 	if (err < 0)
6636 		goto unlock;
6637 
6638 	err = new_settings(hdev, sk);
6639 
6640 unlock:
6641 	hci_dev_unlock(hdev);
6642 	return err;
6643 }
6644 
6645 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6646 			   void *data, u16 len)
6647 {
6648 	struct mgmt_cp_set_scan_params *cp = data;
6649 	__u16 interval, window;
6650 	int err;
6651 
6652 	bt_dev_dbg(hdev, "sock %p", sk);
6653 
6654 	if (!lmp_le_capable(hdev))
6655 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6656 				       MGMT_STATUS_NOT_SUPPORTED);
6657 
6658 	/* Keep allowed ranges in sync with set_mesh() */
6659 	interval = __le16_to_cpu(cp->interval);
6660 
6661 	if (interval < 0x0004 || interval > 0x4000)
6662 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6663 				       MGMT_STATUS_INVALID_PARAMS);
6664 
6665 	window = __le16_to_cpu(cp->window);
6666 
6667 	if (window < 0x0004 || window > 0x4000)
6668 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6669 				       MGMT_STATUS_INVALID_PARAMS);
6670 
6671 	if (window > interval)
6672 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6673 				       MGMT_STATUS_INVALID_PARAMS);
6674 
6675 	hci_dev_lock(hdev);
6676 
6677 	hdev->le_scan_interval = interval;
6678 	hdev->le_scan_window = window;
6679 
6680 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6681 				NULL, 0);
6682 
6683 	/* If background scan is running, restart it so new parameters are
6684 	 * loaded.
6685 	 */
6686 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6687 	    hdev->discovery.state == DISCOVERY_STOPPED)
6688 		hci_update_passive_scan(hdev);
6689 
6690 	hci_dev_unlock(hdev);
6691 
6692 	return err;
6693 }
6694 
6695 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6696 {
6697 	struct mgmt_pending_cmd *cmd = data;
6698 
6699 	bt_dev_dbg(hdev, "err %d", err);
6700 
6701 	if (err) {
6702 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6703 				mgmt_status(err));
6704 	} else {
6705 		struct mgmt_mode *cp = cmd->param;
6706 
6707 		if (cp->val)
6708 			hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6709 		else
6710 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6711 
6712 		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6713 		new_settings(hdev, cmd->sk);
6714 	}
6715 
6716 	mgmt_pending_free(cmd);
6717 }
6718 
6719 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6720 {
6721 	struct mgmt_pending_cmd *cmd = data;
6722 	struct mgmt_mode *cp = cmd->param;
6723 
6724 	return hci_write_fast_connectable_sync(hdev, cp->val);
6725 }
6726 
6727 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6728 				void *data, u16 len)
6729 {
6730 	struct mgmt_mode *cp = data;
6731 	struct mgmt_pending_cmd *cmd;
6732 	int err;
6733 
6734 	bt_dev_dbg(hdev, "sock %p", sk);
6735 
6736 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6737 	    hdev->hci_ver < BLUETOOTH_VER_1_2)
6738 		return mgmt_cmd_status(sk, hdev->id,
6739 				       MGMT_OP_SET_FAST_CONNECTABLE,
6740 				       MGMT_STATUS_NOT_SUPPORTED);
6741 
6742 	if (cp->val != 0x00 && cp->val != 0x01)
6743 		return mgmt_cmd_status(sk, hdev->id,
6744 				       MGMT_OP_SET_FAST_CONNECTABLE,
6745 				       MGMT_STATUS_INVALID_PARAMS);
6746 
6747 	hci_dev_lock(hdev);
6748 
6749 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6750 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6751 		goto unlock;
6752 	}
6753 
6754 	if (!hdev_is_powered(hdev)) {
6755 		hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6756 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6757 		new_settings(hdev, sk);
6758 		goto unlock;
6759 	}
6760 
6761 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6762 			       len);
6763 	if (!cmd)
6764 		err = -ENOMEM;
6765 	else
6766 		err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6767 					 fast_connectable_complete);
6768 
6769 	if (err < 0) {
6770 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6771 				MGMT_STATUS_FAILED);
6772 
6773 		if (cmd)
6774 			mgmt_pending_free(cmd);
6775 	}
6776 
6777 unlock:
6778 	hci_dev_unlock(hdev);
6779 
6780 	return err;
6781 }
6782 
6783 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6784 {
6785 	struct mgmt_pending_cmd *cmd = data;
6786 
6787 	bt_dev_dbg(hdev, "err %d", err);
6788 
6789 	if (err) {
6790 		u8 mgmt_err = mgmt_status(err);
6791 
6792 		/* We need to restore the flag if related HCI commands
6793 		 * failed.
6794 		 */
6795 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6796 
6797 		mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
6798 	} else {
6799 		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6800 		new_settings(hdev, cmd->sk);
6801 	}
6802 
6803 	mgmt_pending_free(cmd);
6804 }
6805 
6806 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6807 {
6808 	int status;
6809 
6810 	status = hci_write_fast_connectable_sync(hdev, false);
6811 
6812 	if (!status)
6813 		status = hci_update_scan_sync(hdev);
6814 
6815 	/* Since only the advertising data flags will change, there
6816 	 * is no need to update the scan response data.
6817 	 */
6818 	if (!status)
6819 		status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6820 
6821 	return status;
6822 }
6823 
6824 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6825 {
6826 	struct mgmt_mode *cp = data;
6827 	struct mgmt_pending_cmd *cmd;
6828 	int err;
6829 
6830 	bt_dev_dbg(hdev, "sock %p", sk);
6831 
6832 	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6833 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6834 				       MGMT_STATUS_NOT_SUPPORTED);
6835 
6836 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6837 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6838 				       MGMT_STATUS_REJECTED);
6839 
6840 	if (cp->val != 0x00 && cp->val != 0x01)
6841 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6842 				       MGMT_STATUS_INVALID_PARAMS);
6843 
6844 	hci_dev_lock(hdev);
6845 
6846 	if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6847 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6848 		goto unlock;
6849 	}
6850 
6851 	if (!hdev_is_powered(hdev)) {
6852 		if (!cp->val) {
6853 			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6854 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6855 			hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6856 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6857 		}
6858 
6859 		hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6860 
6861 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6862 		if (err < 0)
6863 			goto unlock;
6864 
6865 		err = new_settings(hdev, sk);
6866 		goto unlock;
6867 	}
6868 
6869 	/* Reject disabling when powered on */
6870 	if (!cp->val) {
6871 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6872 				      MGMT_STATUS_REJECTED);
6873 		goto unlock;
6874 	} else {
6875 		/* When configuring a dual-mode controller to operate
6876 		 * with LE only and using a static address, then switching
6877 		 * BR/EDR back on is not allowed.
6878 		 *
6879 		 * Dual-mode controllers shall operate with the public
6880 		 * address as its identity address for BR/EDR and LE. So
6881 		 * reject the attempt to create an invalid configuration.
6882 		 *
6883 		 * The same restrictions applies when secure connections
6884 		 * has been enabled. For BR/EDR this is a controller feature
6885 		 * while for LE it is a host stack feature. This means that
6886 		 * switching BR/EDR back on when secure connections has been
6887 		 * enabled is not a supported transaction.
6888 		 */
6889 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6890 		    (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6891 		     hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6892 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6893 					      MGMT_STATUS_REJECTED);
6894 			goto unlock;
6895 		}
6896 	}
6897 
6898 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6899 	if (!cmd)
6900 		err = -ENOMEM;
6901 	else
6902 		err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6903 					 set_bredr_complete);
6904 
6905 	if (err < 0) {
6906 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6907 				MGMT_STATUS_FAILED);
6908 		if (cmd)
6909 			mgmt_pending_free(cmd);
6910 
6911 		goto unlock;
6912 	}
6913 
6914 	/* We need to flip the bit already here so that
6915 	 * hci_req_update_adv_data generates the correct flags.
6916 	 */
6917 	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6918 
6919 unlock:
6920 	hci_dev_unlock(hdev);
6921 	return err;
6922 }
6923 
6924 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6925 {
6926 	struct mgmt_pending_cmd *cmd = data;
6927 	struct mgmt_mode *cp;
6928 
6929 	bt_dev_dbg(hdev, "err %d", err);
6930 
6931 	if (err) {
6932 		u8 mgmt_err = mgmt_status(err);
6933 
6934 		mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
6935 		goto done;
6936 	}
6937 
6938 	cp = cmd->param;
6939 
6940 	switch (cp->val) {
6941 	case 0x00:
6942 		hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6943 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6944 		break;
6945 	case 0x01:
6946 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6947 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6948 		break;
6949 	case 0x02:
6950 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6951 		hci_dev_set_flag(hdev, HCI_SC_ONLY);
6952 		break;
6953 	}
6954 
6955 	send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6956 	new_settings(hdev, cmd->sk);
6957 
6958 done:
6959 	mgmt_pending_free(cmd);
6960 }
6961 
6962 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6963 {
6964 	struct mgmt_pending_cmd *cmd = data;
6965 	struct mgmt_mode *cp = cmd->param;
6966 	u8 val = !!cp->val;
6967 
6968 	/* Force write of val */
6969 	hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6970 
6971 	return hci_write_sc_support_sync(hdev, val);
6972 }
6973 
6974 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6975 			   void *data, u16 len)
6976 {
6977 	struct mgmt_mode *cp = data;
6978 	struct mgmt_pending_cmd *cmd;
6979 	u8 val;
6980 	int err;
6981 
6982 	bt_dev_dbg(hdev, "sock %p", sk);
6983 
6984 	if (!lmp_sc_capable(hdev) &&
6985 	    !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6986 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6987 				       MGMT_STATUS_NOT_SUPPORTED);
6988 
6989 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6990 	    lmp_sc_capable(hdev) &&
6991 	    !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6992 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6993 				       MGMT_STATUS_REJECTED);
6994 
6995 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6996 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6997 				       MGMT_STATUS_INVALID_PARAMS);
6998 
6999 	hci_dev_lock(hdev);
7000 
7001 	if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
7002 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
7003 		bool changed;
7004 
7005 		if (cp->val) {
7006 			changed = !hci_dev_test_and_set_flag(hdev,
7007 							     HCI_SC_ENABLED);
7008 			if (cp->val == 0x02)
7009 				hci_dev_set_flag(hdev, HCI_SC_ONLY);
7010 			else
7011 				hci_dev_clear_flag(hdev, HCI_SC_ONLY);
7012 		} else {
7013 			changed = hci_dev_test_and_clear_flag(hdev,
7014 							      HCI_SC_ENABLED);
7015 			hci_dev_clear_flag(hdev, HCI_SC_ONLY);
7016 		}
7017 
7018 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
7019 		if (err < 0)
7020 			goto failed;
7021 
7022 		if (changed)
7023 			err = new_settings(hdev, sk);
7024 
7025 		goto failed;
7026 	}
7027 
7028 	val = !!cp->val;
7029 
7030 	if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
7031 	    (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
7032 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
7033 		goto failed;
7034 	}
7035 
7036 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
7037 	if (!cmd)
7038 		err = -ENOMEM;
7039 	else
7040 		err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
7041 					 set_secure_conn_complete);
7042 
7043 	if (err < 0) {
7044 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
7045 				MGMT_STATUS_FAILED);
7046 		if (cmd)
7047 			mgmt_pending_free(cmd);
7048 	}
7049 
7050 failed:
7051 	hci_dev_unlock(hdev);
7052 	return err;
7053 }
7054 
7055 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
7056 			  void *data, u16 len)
7057 {
7058 	struct mgmt_mode *cp = data;
7059 	bool changed, use_changed;
7060 	int err;
7061 
7062 	bt_dev_dbg(hdev, "sock %p", sk);
7063 
7064 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
7065 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
7066 				       MGMT_STATUS_INVALID_PARAMS);
7067 
7068 	hci_dev_lock(hdev);
7069 
7070 	if (cp->val)
7071 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
7072 	else
7073 		changed = hci_dev_test_and_clear_flag(hdev,
7074 						      HCI_KEEP_DEBUG_KEYS);
7075 
7076 	if (cp->val == 0x02)
7077 		use_changed = !hci_dev_test_and_set_flag(hdev,
7078 							 HCI_USE_DEBUG_KEYS);
7079 	else
7080 		use_changed = hci_dev_test_and_clear_flag(hdev,
7081 							  HCI_USE_DEBUG_KEYS);
7082 
7083 	if (hdev_is_powered(hdev) && use_changed &&
7084 	    hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7085 		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
7086 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
7087 			     sizeof(mode), &mode);
7088 	}
7089 
7090 	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
7091 	if (err < 0)
7092 		goto unlock;
7093 
7094 	if (changed)
7095 		err = new_settings(hdev, sk);
7096 
7097 unlock:
7098 	hci_dev_unlock(hdev);
7099 	return err;
7100 }
7101 
7102 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7103 		       u16 len)
7104 {
7105 	struct mgmt_cp_set_privacy *cp = cp_data;
7106 	bool changed;
7107 	int err;
7108 
7109 	bt_dev_dbg(hdev, "sock %p", sk);
7110 
7111 	if (!lmp_le_capable(hdev))
7112 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7113 				       MGMT_STATUS_NOT_SUPPORTED);
7114 
7115 	if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
7116 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7117 				       MGMT_STATUS_INVALID_PARAMS);
7118 
7119 	if (hdev_is_powered(hdev))
7120 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7121 				       MGMT_STATUS_REJECTED);
7122 
7123 	hci_dev_lock(hdev);
7124 
7125 	/* If user space supports this command it is also expected to
7126 	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
7127 	 */
7128 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7129 
7130 	if (cp->privacy) {
7131 		changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
7132 		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
7133 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
7134 		hci_adv_instances_set_rpa_expired(hdev, true);
7135 		if (cp->privacy == 0x02)
7136 			hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
7137 		else
7138 			hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7139 	} else {
7140 		changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
7141 		memset(hdev->irk, 0, sizeof(hdev->irk));
7142 		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
7143 		hci_adv_instances_set_rpa_expired(hdev, false);
7144 		hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7145 	}
7146 
7147 	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
7148 	if (err < 0)
7149 		goto unlock;
7150 
7151 	if (changed)
7152 		err = new_settings(hdev, sk);
7153 
7154 unlock:
7155 	hci_dev_unlock(hdev);
7156 	return err;
7157 }
7158 
7159 static bool irk_is_valid(struct mgmt_irk_info *irk)
7160 {
7161 	switch (irk->addr.type) {
7162 	case BDADDR_LE_PUBLIC:
7163 		return true;
7164 
7165 	case BDADDR_LE_RANDOM:
7166 		/* Two most significant bits shall be set */
7167 		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7168 			return false;
7169 		return true;
7170 	}
7171 
7172 	return false;
7173 }
7174 
7175 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7176 		     u16 len)
7177 {
7178 	struct mgmt_cp_load_irks *cp = cp_data;
7179 	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
7180 				   sizeof(struct mgmt_irk_info));
7181 	u16 irk_count, expected_len;
7182 	int i, err;
7183 
7184 	bt_dev_dbg(hdev, "sock %p", sk);
7185 
7186 	if (!lmp_le_capable(hdev))
7187 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7188 				       MGMT_STATUS_NOT_SUPPORTED);
7189 
7190 	irk_count = __le16_to_cpu(cp->irk_count);
7191 	if (irk_count > max_irk_count) {
7192 		bt_dev_err(hdev, "load_irks: too big irk_count value %u",
7193 			   irk_count);
7194 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7195 				       MGMT_STATUS_INVALID_PARAMS);
7196 	}
7197 
7198 	expected_len = struct_size(cp, irks, irk_count);
7199 	if (expected_len != len) {
7200 		bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7201 			   expected_len, len);
7202 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7203 				       MGMT_STATUS_INVALID_PARAMS);
7204 	}
7205 
7206 	bt_dev_dbg(hdev, "irk_count %u", irk_count);
7207 
7208 	for (i = 0; i < irk_count; i++) {
7209 		struct mgmt_irk_info *key = &cp->irks[i];
7210 
7211 		if (!irk_is_valid(key))
7212 			return mgmt_cmd_status(sk, hdev->id,
7213 					       MGMT_OP_LOAD_IRKS,
7214 					       MGMT_STATUS_INVALID_PARAMS);
7215 	}
7216 
7217 	hci_dev_lock(hdev);
7218 
7219 	hci_smp_irks_clear(hdev);
7220 
7221 	for (i = 0; i < irk_count; i++) {
7222 		struct mgmt_irk_info *irk = &cp->irks[i];
7223 
7224 		if (hci_is_blocked_key(hdev,
7225 				       HCI_BLOCKED_KEY_TYPE_IRK,
7226 				       irk->val)) {
7227 			bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7228 				    &irk->addr.bdaddr);
7229 			continue;
7230 		}
7231 
7232 		hci_add_irk(hdev, &irk->addr.bdaddr,
7233 			    le_addr_type(irk->addr.type), irk->val,
7234 			    BDADDR_ANY);
7235 	}
7236 
7237 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7238 
7239 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7240 
7241 	hci_dev_unlock(hdev);
7242 
7243 	return err;
7244 }
7245 
7246 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7247 {
7248 	if (key->initiator != 0x00 && key->initiator != 0x01)
7249 		return false;
7250 
7251 	switch (key->addr.type) {
7252 	case BDADDR_LE_PUBLIC:
7253 		return true;
7254 
7255 	case BDADDR_LE_RANDOM:
7256 		/* Two most significant bits shall be set */
7257 		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7258 			return false;
7259 		return true;
7260 	}
7261 
7262 	return false;
7263 }
7264 
7265 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7266 			       void *cp_data, u16 len)
7267 {
7268 	struct mgmt_cp_load_long_term_keys *cp = cp_data;
7269 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7270 				   sizeof(struct mgmt_ltk_info));
7271 	u16 key_count, expected_len;
7272 	int i, err;
7273 
7274 	bt_dev_dbg(hdev, "sock %p", sk);
7275 
7276 	if (!lmp_le_capable(hdev))
7277 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7278 				       MGMT_STATUS_NOT_SUPPORTED);
7279 
7280 	key_count = __le16_to_cpu(cp->key_count);
7281 	if (key_count > max_key_count) {
7282 		bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7283 			   key_count);
7284 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7285 				       MGMT_STATUS_INVALID_PARAMS);
7286 	}
7287 
7288 	expected_len = struct_size(cp, keys, key_count);
7289 	if (expected_len != len) {
7290 		bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7291 			   expected_len, len);
7292 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7293 				       MGMT_STATUS_INVALID_PARAMS);
7294 	}
7295 
7296 	bt_dev_dbg(hdev, "key_count %u", key_count);
7297 
7298 	hci_dev_lock(hdev);
7299 
7300 	hci_smp_ltks_clear(hdev);
7301 
7302 	for (i = 0; i < key_count; i++) {
7303 		struct mgmt_ltk_info *key = &cp->keys[i];
7304 		u8 type, authenticated;
7305 
7306 		if (hci_is_blocked_key(hdev,
7307 				       HCI_BLOCKED_KEY_TYPE_LTK,
7308 				       key->val)) {
7309 			bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7310 				    &key->addr.bdaddr);
7311 			continue;
7312 		}
7313 
7314 		if (!ltk_is_valid(key)) {
7315 			bt_dev_warn(hdev, "Invalid LTK for %pMR",
7316 				    &key->addr.bdaddr);
7317 			continue;
7318 		}
7319 
7320 		switch (key->type) {
7321 		case MGMT_LTK_UNAUTHENTICATED:
7322 			authenticated = 0x00;
7323 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7324 			break;
7325 		case MGMT_LTK_AUTHENTICATED:
7326 			authenticated = 0x01;
7327 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7328 			break;
7329 		case MGMT_LTK_P256_UNAUTH:
7330 			authenticated = 0x00;
7331 			type = SMP_LTK_P256;
7332 			break;
7333 		case MGMT_LTK_P256_AUTH:
7334 			authenticated = 0x01;
7335 			type = SMP_LTK_P256;
7336 			break;
7337 		case MGMT_LTK_P256_DEBUG:
7338 			authenticated = 0x00;
7339 			type = SMP_LTK_P256_DEBUG;
7340 			fallthrough;
7341 		default:
7342 			continue;
7343 		}
7344 
7345 		hci_add_ltk(hdev, &key->addr.bdaddr,
7346 			    le_addr_type(key->addr.type), type, authenticated,
7347 			    key->val, key->enc_size, key->ediv, key->rand);
7348 	}
7349 
7350 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7351 			   NULL, 0);
7352 
7353 	hci_dev_unlock(hdev);
7354 
7355 	return err;
7356 }
7357 
7358 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
7359 {
7360 	struct mgmt_pending_cmd *cmd = data;
7361 	struct hci_conn *conn = cmd->user_data;
7362 	struct mgmt_cp_get_conn_info *cp = cmd->param;
7363 	struct mgmt_rp_get_conn_info rp;
7364 	u8 status;
7365 
7366 	bt_dev_dbg(hdev, "err %d", err);
7367 
7368 	memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
7369 
7370 	status = mgmt_status(err);
7371 	if (status == MGMT_STATUS_SUCCESS) {
7372 		rp.rssi = conn->rssi;
7373 		rp.tx_power = conn->tx_power;
7374 		rp.max_tx_power = conn->max_tx_power;
7375 	} else {
7376 		rp.rssi = HCI_RSSI_INVALID;
7377 		rp.tx_power = HCI_TX_POWER_INVALID;
7378 		rp.max_tx_power = HCI_TX_POWER_INVALID;
7379 	}
7380 
7381 	mgmt_cmd_complete(cmd->sk, cmd->hdev->id, MGMT_OP_GET_CONN_INFO, status,
7382 			  &rp, sizeof(rp));
7383 
7384 	mgmt_pending_free(cmd);
7385 }
7386 
7387 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
7388 {
7389 	struct mgmt_pending_cmd *cmd = data;
7390 	struct mgmt_cp_get_conn_info *cp = cmd->param;
7391 	struct hci_conn *conn;
7392 	int err;
7393 	__le16   handle;
7394 
7395 	/* Make sure we are still connected */
7396 	if (cp->addr.type == BDADDR_BREDR)
7397 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7398 					       &cp->addr.bdaddr);
7399 	else
7400 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7401 
7402 	if (!conn || conn->state != BT_CONNECTED)
7403 		return MGMT_STATUS_NOT_CONNECTED;
7404 
7405 	cmd->user_data = conn;
7406 	handle = cpu_to_le16(conn->handle);
7407 
7408 	/* Refresh RSSI each time */
7409 	err = hci_read_rssi_sync(hdev, handle);
7410 
7411 	/* For LE links TX power does not change thus we don't need to
7412 	 * query for it once value is known.
7413 	 */
7414 	if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
7415 		     conn->tx_power == HCI_TX_POWER_INVALID))
7416 		err = hci_read_tx_power_sync(hdev, handle, 0x00);
7417 
7418 	/* Max TX power needs to be read only once per connection */
7419 	if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
7420 		err = hci_read_tx_power_sync(hdev, handle, 0x01);
7421 
7422 	return err;
7423 }
7424 
7425 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7426 			 u16 len)
7427 {
7428 	struct mgmt_cp_get_conn_info *cp = data;
7429 	struct mgmt_rp_get_conn_info rp;
7430 	struct hci_conn *conn;
7431 	unsigned long conn_info_age;
7432 	int err = 0;
7433 
7434 	bt_dev_dbg(hdev, "sock %p", sk);
7435 
7436 	memset(&rp, 0, sizeof(rp));
7437 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7438 	rp.addr.type = cp->addr.type;
7439 
7440 	if (!bdaddr_type_is_valid(cp->addr.type))
7441 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7442 					 MGMT_STATUS_INVALID_PARAMS,
7443 					 &rp, sizeof(rp));
7444 
7445 	hci_dev_lock(hdev);
7446 
7447 	if (!hdev_is_powered(hdev)) {
7448 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7449 					MGMT_STATUS_NOT_POWERED, &rp,
7450 					sizeof(rp));
7451 		goto unlock;
7452 	}
7453 
7454 	if (cp->addr.type == BDADDR_BREDR)
7455 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7456 					       &cp->addr.bdaddr);
7457 	else
7458 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7459 
7460 	if (!conn || conn->state != BT_CONNECTED) {
7461 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7462 					MGMT_STATUS_NOT_CONNECTED, &rp,
7463 					sizeof(rp));
7464 		goto unlock;
7465 	}
7466 
7467 	/* To avoid client trying to guess when to poll again for information we
7468 	 * calculate conn info age as random value between min/max set in hdev.
7469 	 */
7470 	conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age,
7471 						 hdev->conn_info_max_age - 1);
7472 
7473 	/* Query controller to refresh cached values if they are too old or were
7474 	 * never read.
7475 	 */
7476 	if (time_after(jiffies, conn->conn_info_timestamp +
7477 		       msecs_to_jiffies(conn_info_age)) ||
7478 	    !conn->conn_info_timestamp) {
7479 		struct mgmt_pending_cmd *cmd;
7480 
7481 		cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
7482 				       len);
7483 		if (!cmd) {
7484 			err = -ENOMEM;
7485 		} else {
7486 			err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
7487 						 cmd, get_conn_info_complete);
7488 		}
7489 
7490 		if (err < 0) {
7491 			mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7492 					  MGMT_STATUS_FAILED, &rp, sizeof(rp));
7493 
7494 			if (cmd)
7495 				mgmt_pending_free(cmd);
7496 
7497 			goto unlock;
7498 		}
7499 
7500 		conn->conn_info_timestamp = jiffies;
7501 	} else {
7502 		/* Cache is valid, just reply with values cached in hci_conn */
7503 		rp.rssi = conn->rssi;
7504 		rp.tx_power = conn->tx_power;
7505 		rp.max_tx_power = conn->max_tx_power;
7506 
7507 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7508 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7509 	}
7510 
7511 unlock:
7512 	hci_dev_unlock(hdev);
7513 	return err;
7514 }
7515 
7516 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
7517 {
7518 	struct mgmt_pending_cmd *cmd = data;
7519 	struct mgmt_cp_get_clock_info *cp = cmd->param;
7520 	struct mgmt_rp_get_clock_info rp;
7521 	struct hci_conn *conn = cmd->user_data;
7522 	u8 status = mgmt_status(err);
7523 
7524 	bt_dev_dbg(hdev, "err %d", err);
7525 
7526 	memset(&rp, 0, sizeof(rp));
7527 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7528 	rp.addr.type = cp->addr.type;
7529 
7530 	if (err)
7531 		goto complete;
7532 
7533 	rp.local_clock = cpu_to_le32(hdev->clock);
7534 
7535 	if (conn) {
7536 		rp.piconet_clock = cpu_to_le32(conn->clock);
7537 		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7538 	}
7539 
7540 complete:
7541 	mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, status, &rp,
7542 			  sizeof(rp));
7543 
7544 	mgmt_pending_free(cmd);
7545 }
7546 
7547 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
7548 {
7549 	struct mgmt_pending_cmd *cmd = data;
7550 	struct mgmt_cp_get_clock_info *cp = cmd->param;
7551 	struct hci_cp_read_clock hci_cp;
7552 	struct hci_conn *conn;
7553 
7554 	memset(&hci_cp, 0, sizeof(hci_cp));
7555 	hci_read_clock_sync(hdev, &hci_cp);
7556 
7557 	/* Make sure connection still exists */
7558 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
7559 	if (!conn || conn->state != BT_CONNECTED)
7560 		return MGMT_STATUS_NOT_CONNECTED;
7561 
7562 	cmd->user_data = conn;
7563 	hci_cp.handle = cpu_to_le16(conn->handle);
7564 	hci_cp.which = 0x01; /* Piconet clock */
7565 
7566 	return hci_read_clock_sync(hdev, &hci_cp);
7567 }
7568 
7569 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7570 								u16 len)
7571 {
7572 	struct mgmt_cp_get_clock_info *cp = data;
7573 	struct mgmt_rp_get_clock_info rp;
7574 	struct mgmt_pending_cmd *cmd;
7575 	struct hci_conn *conn;
7576 	int err;
7577 
7578 	bt_dev_dbg(hdev, "sock %p", sk);
7579 
7580 	memset(&rp, 0, sizeof(rp));
7581 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7582 	rp.addr.type = cp->addr.type;
7583 
7584 	if (cp->addr.type != BDADDR_BREDR)
7585 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7586 					 MGMT_STATUS_INVALID_PARAMS,
7587 					 &rp, sizeof(rp));
7588 
7589 	hci_dev_lock(hdev);
7590 
7591 	if (!hdev_is_powered(hdev)) {
7592 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7593 					MGMT_STATUS_NOT_POWERED, &rp,
7594 					sizeof(rp));
7595 		goto unlock;
7596 	}
7597 
7598 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7599 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7600 					       &cp->addr.bdaddr);
7601 		if (!conn || conn->state != BT_CONNECTED) {
7602 			err = mgmt_cmd_complete(sk, hdev->id,
7603 						MGMT_OP_GET_CLOCK_INFO,
7604 						MGMT_STATUS_NOT_CONNECTED,
7605 						&rp, sizeof(rp));
7606 			goto unlock;
7607 		}
7608 	} else {
7609 		conn = NULL;
7610 	}
7611 
7612 	cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7613 	if (!cmd)
7614 		err = -ENOMEM;
7615 	else
7616 		err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7617 					 get_clock_info_complete);
7618 
7619 	if (err < 0) {
7620 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7621 					MGMT_STATUS_FAILED, &rp, sizeof(rp));
7622 
7623 		if (cmd)
7624 			mgmt_pending_free(cmd);
7625 	}
7626 
7627 
7628 unlock:
7629 	hci_dev_unlock(hdev);
7630 	return err;
7631 }
7632 
7633 static void device_added(struct sock *sk, struct hci_dev *hdev,
7634 			 bdaddr_t *bdaddr, u8 type, u8 action)
7635 {
7636 	struct mgmt_ev_device_added ev;
7637 
7638 	bacpy(&ev.addr.bdaddr, bdaddr);
7639 	ev.addr.type = type;
7640 	ev.action = action;
7641 
7642 	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7643 }
7644 
7645 static void add_device_complete(struct hci_dev *hdev, void *data, int err)
7646 {
7647 	struct mgmt_pending_cmd *cmd = data;
7648 	struct mgmt_cp_add_device *cp = cmd->param;
7649 
7650 	if (!err) {
7651 		struct hci_conn_params *params;
7652 
7653 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7654 						le_addr_type(cp->addr.type));
7655 
7656 		device_added(cmd->sk, hdev, &cp->addr.bdaddr, cp->addr.type,
7657 			     cp->action);
7658 		device_flags_changed(NULL, hdev, &cp->addr.bdaddr,
7659 				     cp->addr.type, hdev->conn_flags,
7660 				     params ? params->flags : 0);
7661 	}
7662 
7663 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_ADD_DEVICE,
7664 			  mgmt_status(err), &cp->addr, sizeof(cp->addr));
7665 	mgmt_pending_free(cmd);
7666 }
7667 
7668 static int add_device_sync(struct hci_dev *hdev, void *data)
7669 {
7670 	return hci_update_passive_scan_sync(hdev);
7671 }
7672 
7673 static int add_device(struct sock *sk, struct hci_dev *hdev,
7674 		      void *data, u16 len)
7675 {
7676 	struct mgmt_pending_cmd *cmd;
7677 	struct mgmt_cp_add_device *cp = data;
7678 	u8 auto_conn, addr_type;
7679 	struct hci_conn_params *params;
7680 	int err;
7681 	u32 current_flags = 0;
7682 	u32 supported_flags;
7683 
7684 	bt_dev_dbg(hdev, "sock %p", sk);
7685 
7686 	if (!bdaddr_type_is_valid(cp->addr.type) ||
7687 	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7688 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7689 					 MGMT_STATUS_INVALID_PARAMS,
7690 					 &cp->addr, sizeof(cp->addr));
7691 
7692 	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7693 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7694 					 MGMT_STATUS_INVALID_PARAMS,
7695 					 &cp->addr, sizeof(cp->addr));
7696 
7697 	hci_dev_lock(hdev);
7698 
7699 	if (cp->addr.type == BDADDR_BREDR) {
7700 		/* Only incoming connections action is supported for now */
7701 		if (cp->action != 0x01) {
7702 			err = mgmt_cmd_complete(sk, hdev->id,
7703 						MGMT_OP_ADD_DEVICE,
7704 						MGMT_STATUS_INVALID_PARAMS,
7705 						&cp->addr, sizeof(cp->addr));
7706 			goto unlock;
7707 		}
7708 
7709 		err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7710 						     &cp->addr.bdaddr,
7711 						     cp->addr.type, 0);
7712 		if (err)
7713 			goto unlock;
7714 
7715 		hci_update_scan(hdev);
7716 
7717 		goto added;
7718 	}
7719 
7720 	addr_type = le_addr_type(cp->addr.type);
7721 
7722 	if (cp->action == 0x02)
7723 		auto_conn = HCI_AUTO_CONN_ALWAYS;
7724 	else if (cp->action == 0x01)
7725 		auto_conn = HCI_AUTO_CONN_DIRECT;
7726 	else
7727 		auto_conn = HCI_AUTO_CONN_REPORT;
7728 
7729 	/* Kernel internally uses conn_params with resolvable private
7730 	 * address, but Add Device allows only identity addresses.
7731 	 * Make sure it is enforced before calling
7732 	 * hci_conn_params_lookup.
7733 	 */
7734 	if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7735 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7736 					MGMT_STATUS_INVALID_PARAMS,
7737 					&cp->addr, sizeof(cp->addr));
7738 		goto unlock;
7739 	}
7740 
7741 	/* If the connection parameters don't exist for this device,
7742 	 * they will be created and configured with defaults.
7743 	 */
7744 	params = hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7745 				     auto_conn);
7746 	if (!params) {
7747 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7748 					MGMT_STATUS_FAILED, &cp->addr,
7749 					sizeof(cp->addr));
7750 		goto unlock;
7751 	}
7752 
7753 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
7754 	if (!cmd) {
7755 		err = -ENOMEM;
7756 		goto unlock;
7757 	}
7758 
7759 	err = hci_cmd_sync_queue(hdev, add_device_sync, cmd,
7760 				 add_device_complete);
7761 	if (err < 0) {
7762 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7763 					MGMT_STATUS_FAILED, &cp->addr,
7764 					sizeof(cp->addr));
7765 		mgmt_pending_free(cmd);
7766 	}
7767 
7768 	goto unlock;
7769 
7770 added:
7771 	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7772 	supported_flags = hdev->conn_flags;
7773 	device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7774 			     supported_flags, current_flags);
7775 
7776 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7777 				MGMT_STATUS_SUCCESS, &cp->addr,
7778 				sizeof(cp->addr));
7779 
7780 unlock:
7781 	hci_dev_unlock(hdev);
7782 	return err;
7783 }
7784 
7785 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7786 			   bdaddr_t *bdaddr, u8 type)
7787 {
7788 	struct mgmt_ev_device_removed ev;
7789 
7790 	bacpy(&ev.addr.bdaddr, bdaddr);
7791 	ev.addr.type = type;
7792 
7793 	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7794 }
7795 
7796 static int remove_device_sync(struct hci_dev *hdev, void *data)
7797 {
7798 	return hci_update_passive_scan_sync(hdev);
7799 }
7800 
7801 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7802 			 void *data, u16 len)
7803 {
7804 	struct mgmt_cp_remove_device *cp = data;
7805 	int err;
7806 
7807 	bt_dev_dbg(hdev, "sock %p", sk);
7808 
7809 	hci_dev_lock(hdev);
7810 
7811 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7812 		struct hci_conn_params *params;
7813 		u8 addr_type;
7814 
7815 		if (!bdaddr_type_is_valid(cp->addr.type)) {
7816 			err = mgmt_cmd_complete(sk, hdev->id,
7817 						MGMT_OP_REMOVE_DEVICE,
7818 						MGMT_STATUS_INVALID_PARAMS,
7819 						&cp->addr, sizeof(cp->addr));
7820 			goto unlock;
7821 		}
7822 
7823 		if (cp->addr.type == BDADDR_BREDR) {
7824 			err = hci_bdaddr_list_del(&hdev->accept_list,
7825 						  &cp->addr.bdaddr,
7826 						  cp->addr.type);
7827 			if (err) {
7828 				err = mgmt_cmd_complete(sk, hdev->id,
7829 							MGMT_OP_REMOVE_DEVICE,
7830 							MGMT_STATUS_INVALID_PARAMS,
7831 							&cp->addr,
7832 							sizeof(cp->addr));
7833 				goto unlock;
7834 			}
7835 
7836 			hci_update_scan(hdev);
7837 
7838 			device_removed(sk, hdev, &cp->addr.bdaddr,
7839 				       cp->addr.type);
7840 			goto complete;
7841 		}
7842 
7843 		addr_type = le_addr_type(cp->addr.type);
7844 
7845 		/* Kernel internally uses conn_params with resolvable private
7846 		 * address, but Remove Device allows only identity addresses.
7847 		 * Make sure it is enforced before calling
7848 		 * hci_conn_params_lookup.
7849 		 */
7850 		if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7851 			err = mgmt_cmd_complete(sk, hdev->id,
7852 						MGMT_OP_REMOVE_DEVICE,
7853 						MGMT_STATUS_INVALID_PARAMS,
7854 						&cp->addr, sizeof(cp->addr));
7855 			goto unlock;
7856 		}
7857 
7858 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7859 						addr_type);
7860 		if (!params) {
7861 			err = mgmt_cmd_complete(sk, hdev->id,
7862 						MGMT_OP_REMOVE_DEVICE,
7863 						MGMT_STATUS_INVALID_PARAMS,
7864 						&cp->addr, sizeof(cp->addr));
7865 			goto unlock;
7866 		}
7867 
7868 		if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7869 		    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7870 			err = mgmt_cmd_complete(sk, hdev->id,
7871 						MGMT_OP_REMOVE_DEVICE,
7872 						MGMT_STATUS_INVALID_PARAMS,
7873 						&cp->addr, sizeof(cp->addr));
7874 			goto unlock;
7875 		}
7876 
7877 		hci_conn_params_free(params);
7878 
7879 		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7880 	} else {
7881 		struct hci_conn_params *p, *tmp;
7882 		struct bdaddr_list *b, *btmp;
7883 
7884 		if (cp->addr.type) {
7885 			err = mgmt_cmd_complete(sk, hdev->id,
7886 						MGMT_OP_REMOVE_DEVICE,
7887 						MGMT_STATUS_INVALID_PARAMS,
7888 						&cp->addr, sizeof(cp->addr));
7889 			goto unlock;
7890 		}
7891 
7892 		list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7893 			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7894 			list_del(&b->list);
7895 			kfree(b);
7896 		}
7897 
7898 		hci_update_scan(hdev);
7899 
7900 		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7901 			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7902 				continue;
7903 			device_removed(sk, hdev, &p->addr, p->addr_type);
7904 			if (p->explicit_connect) {
7905 				p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7906 				continue;
7907 			}
7908 			hci_conn_params_free(p);
7909 		}
7910 
7911 		bt_dev_dbg(hdev, "All LE connection parameters were removed");
7912 	}
7913 
7914 	hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7915 
7916 complete:
7917 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7918 				MGMT_STATUS_SUCCESS, &cp->addr,
7919 				sizeof(cp->addr));
7920 unlock:
7921 	hci_dev_unlock(hdev);
7922 	return err;
7923 }
7924 
7925 static int conn_update_sync(struct hci_dev *hdev, void *data)
7926 {
7927 	struct hci_conn_params *params = data;
7928 	struct hci_conn *conn;
7929 
7930 	conn = hci_conn_hash_lookup_le(hdev, &params->addr, params->addr_type);
7931 	if (!conn)
7932 		return -ECANCELED;
7933 
7934 	return hci_le_conn_update_sync(hdev, conn, params);
7935 }
7936 
7937 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7938 			   u16 len)
7939 {
7940 	struct mgmt_cp_load_conn_param *cp = data;
7941 	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7942 				     sizeof(struct mgmt_conn_param));
7943 	u16 param_count, expected_len;
7944 	int i;
7945 
7946 	if (!lmp_le_capable(hdev))
7947 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7948 				       MGMT_STATUS_NOT_SUPPORTED);
7949 
7950 	param_count = __le16_to_cpu(cp->param_count);
7951 	if (param_count > max_param_count) {
7952 		bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7953 			   param_count);
7954 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7955 				       MGMT_STATUS_INVALID_PARAMS);
7956 	}
7957 
7958 	expected_len = struct_size(cp, params, param_count);
7959 	if (expected_len != len) {
7960 		bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7961 			   expected_len, len);
7962 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7963 				       MGMT_STATUS_INVALID_PARAMS);
7964 	}
7965 
7966 	bt_dev_dbg(hdev, "param_count %u", param_count);
7967 
7968 	hci_dev_lock(hdev);
7969 
7970 	if (param_count > 1)
7971 		hci_conn_params_clear_disabled(hdev);
7972 
7973 	for (i = 0; i < param_count; i++) {
7974 		struct mgmt_conn_param *param = &cp->params[i];
7975 		struct hci_conn_params *hci_param;
7976 		u16 min, max, latency, timeout;
7977 		bool update = false;
7978 		u8 addr_type;
7979 
7980 		bt_dev_dbg(hdev, "Adding %pMR (type %u)", &param->addr.bdaddr,
7981 			   param->addr.type);
7982 
7983 		if (param->addr.type == BDADDR_LE_PUBLIC) {
7984 			addr_type = ADDR_LE_DEV_PUBLIC;
7985 		} else if (param->addr.type == BDADDR_LE_RANDOM) {
7986 			addr_type = ADDR_LE_DEV_RANDOM;
7987 		} else {
7988 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7989 			continue;
7990 		}
7991 
7992 		min = le16_to_cpu(param->min_interval);
7993 		max = le16_to_cpu(param->max_interval);
7994 		latency = le16_to_cpu(param->latency);
7995 		timeout = le16_to_cpu(param->timeout);
7996 
7997 		bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7998 			   min, max, latency, timeout);
7999 
8000 		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
8001 			bt_dev_err(hdev, "ignoring invalid connection parameters");
8002 			continue;
8003 		}
8004 
8005 		/* Detect when the loading is for an existing parameter then
8006 		 * attempt to trigger the connection update procedure.
8007 		 */
8008 		if (!i && param_count == 1) {
8009 			hci_param = hci_conn_params_lookup(hdev,
8010 							   &param->addr.bdaddr,
8011 							   addr_type);
8012 			if (hci_param)
8013 				update = true;
8014 			else
8015 				hci_conn_params_clear_disabled(hdev);
8016 		}
8017 
8018 		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
8019 						addr_type);
8020 		if (!hci_param) {
8021 			bt_dev_err(hdev, "failed to add connection parameters");
8022 			continue;
8023 		}
8024 
8025 		hci_param->conn_min_interval = min;
8026 		hci_param->conn_max_interval = max;
8027 		hci_param->conn_latency = latency;
8028 		hci_param->supervision_timeout = timeout;
8029 
8030 		/* Check if we need to trigger a connection update */
8031 		if (update) {
8032 			struct hci_conn *conn;
8033 
8034 			/* Lookup for existing connection as central and check
8035 			 * if parameters match and if they don't then trigger
8036 			 * a connection update.
8037 			 */
8038 			conn = hci_conn_hash_lookup_le(hdev, &hci_param->addr,
8039 						       addr_type);
8040 			if (conn && conn->role == HCI_ROLE_MASTER &&
8041 			    (conn->le_conn_min_interval != min ||
8042 			     conn->le_conn_max_interval != max ||
8043 			     conn->le_conn_latency != latency ||
8044 			     conn->le_supv_timeout != timeout))
8045 				hci_cmd_sync_queue(hdev, conn_update_sync,
8046 						   hci_param, NULL);
8047 		}
8048 	}
8049 
8050 	hci_dev_unlock(hdev);
8051 
8052 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
8053 				 NULL, 0);
8054 }
8055 
8056 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
8057 			       void *data, u16 len)
8058 {
8059 	struct mgmt_cp_set_external_config *cp = data;
8060 	bool changed;
8061 	int err;
8062 
8063 	bt_dev_dbg(hdev, "sock %p", sk);
8064 
8065 	if (hdev_is_powered(hdev))
8066 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
8067 				       MGMT_STATUS_REJECTED);
8068 
8069 	if (cp->config != 0x00 && cp->config != 0x01)
8070 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
8071 				         MGMT_STATUS_INVALID_PARAMS);
8072 
8073 	if (!hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG))
8074 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
8075 				       MGMT_STATUS_NOT_SUPPORTED);
8076 
8077 	hci_dev_lock(hdev);
8078 
8079 	if (cp->config)
8080 		changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
8081 	else
8082 		changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
8083 
8084 	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
8085 	if (err < 0)
8086 		goto unlock;
8087 
8088 	if (!changed)
8089 		goto unlock;
8090 
8091 	err = new_options(hdev, sk);
8092 
8093 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
8094 		mgmt_index_removed(hdev);
8095 
8096 		if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
8097 			hci_dev_set_flag(hdev, HCI_CONFIG);
8098 			hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8099 
8100 			queue_work(hdev->req_workqueue, &hdev->power_on);
8101 		} else {
8102 			set_bit(HCI_RAW, &hdev->flags);
8103 			mgmt_index_added(hdev);
8104 		}
8105 	}
8106 
8107 unlock:
8108 	hci_dev_unlock(hdev);
8109 	return err;
8110 }
8111 
8112 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
8113 			      void *data, u16 len)
8114 {
8115 	struct mgmt_cp_set_public_address *cp = data;
8116 	bool changed;
8117 	int err;
8118 
8119 	bt_dev_dbg(hdev, "sock %p", sk);
8120 
8121 	if (hdev_is_powered(hdev))
8122 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8123 				       MGMT_STATUS_REJECTED);
8124 
8125 	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
8126 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8127 				       MGMT_STATUS_INVALID_PARAMS);
8128 
8129 	if (!hdev->set_bdaddr)
8130 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8131 				       MGMT_STATUS_NOT_SUPPORTED);
8132 
8133 	hci_dev_lock(hdev);
8134 
8135 	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
8136 	bacpy(&hdev->public_addr, &cp->bdaddr);
8137 
8138 	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
8139 	if (err < 0)
8140 		goto unlock;
8141 
8142 	if (!changed)
8143 		goto unlock;
8144 
8145 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
8146 		err = new_options(hdev, sk);
8147 
8148 	if (is_configured(hdev)) {
8149 		mgmt_index_removed(hdev);
8150 
8151 		hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
8152 
8153 		hci_dev_set_flag(hdev, HCI_CONFIG);
8154 		hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8155 
8156 		queue_work(hdev->req_workqueue, &hdev->power_on);
8157 	}
8158 
8159 unlock:
8160 	hci_dev_unlock(hdev);
8161 	return err;
8162 }
8163 
8164 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
8165 					     int err)
8166 {
8167 	const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8168 	struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8169 	u8 *h192, *r192, *h256, *r256;
8170 	struct mgmt_pending_cmd *cmd = data;
8171 	struct sk_buff *skb = cmd->skb;
8172 	u8 status = mgmt_status(err);
8173 	u16 eir_len;
8174 
8175 	if (!status) {
8176 		if (!skb)
8177 			status = MGMT_STATUS_FAILED;
8178 		else if (IS_ERR(skb))
8179 			status = mgmt_status(PTR_ERR(skb));
8180 		else
8181 			status = mgmt_status(skb->data[0]);
8182 	}
8183 
8184 	bt_dev_dbg(hdev, "status %u", status);
8185 
8186 	mgmt_cp = cmd->param;
8187 
8188 	if (status) {
8189 		status = mgmt_status(status);
8190 		eir_len = 0;
8191 
8192 		h192 = NULL;
8193 		r192 = NULL;
8194 		h256 = NULL;
8195 		r256 = NULL;
8196 	} else if (!bredr_sc_enabled(hdev)) {
8197 		struct hci_rp_read_local_oob_data *rp;
8198 
8199 		if (skb->len != sizeof(*rp)) {
8200 			status = MGMT_STATUS_FAILED;
8201 			eir_len = 0;
8202 		} else {
8203 			status = MGMT_STATUS_SUCCESS;
8204 			rp = (void *)skb->data;
8205 
8206 			eir_len = 5 + 18 + 18;
8207 			h192 = rp->hash;
8208 			r192 = rp->rand;
8209 			h256 = NULL;
8210 			r256 = NULL;
8211 		}
8212 	} else {
8213 		struct hci_rp_read_local_oob_ext_data *rp;
8214 
8215 		if (skb->len != sizeof(*rp)) {
8216 			status = MGMT_STATUS_FAILED;
8217 			eir_len = 0;
8218 		} else {
8219 			status = MGMT_STATUS_SUCCESS;
8220 			rp = (void *)skb->data;
8221 
8222 			if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8223 				eir_len = 5 + 18 + 18;
8224 				h192 = NULL;
8225 				r192 = NULL;
8226 			} else {
8227 				eir_len = 5 + 18 + 18 + 18 + 18;
8228 				h192 = rp->hash192;
8229 				r192 = rp->rand192;
8230 			}
8231 
8232 			h256 = rp->hash256;
8233 			r256 = rp->rand256;
8234 		}
8235 	}
8236 
8237 	mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8238 	if (!mgmt_rp)
8239 		goto done;
8240 
8241 	if (eir_len == 0)
8242 		goto send_rsp;
8243 
8244 	eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8245 				  hdev->dev_class, 3);
8246 
8247 	if (h192 && r192) {
8248 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8249 					  EIR_SSP_HASH_C192, h192, 16);
8250 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8251 					  EIR_SSP_RAND_R192, r192, 16);
8252 	}
8253 
8254 	if (h256 && r256) {
8255 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8256 					  EIR_SSP_HASH_C256, h256, 16);
8257 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8258 					  EIR_SSP_RAND_R256, r256, 16);
8259 	}
8260 
8261 send_rsp:
8262 	mgmt_rp->type = mgmt_cp->type;
8263 	mgmt_rp->eir_len = cpu_to_le16(eir_len);
8264 
8265 	err = mgmt_cmd_complete(cmd->sk, hdev->id,
8266 				MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8267 				mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8268 	if (err < 0 || status)
8269 		goto done;
8270 
8271 	hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8272 
8273 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8274 				 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8275 				 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8276 done:
8277 	if (skb && !IS_ERR(skb))
8278 		kfree_skb(skb);
8279 
8280 	kfree(mgmt_rp);
8281 	mgmt_pending_free(cmd);
8282 }
8283 
8284 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8285 				  struct mgmt_cp_read_local_oob_ext_data *cp)
8286 {
8287 	struct mgmt_pending_cmd *cmd;
8288 	int err;
8289 
8290 	cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8291 			       cp, sizeof(*cp));
8292 	if (!cmd)
8293 		return -ENOMEM;
8294 
8295 	err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
8296 				 read_local_oob_ext_data_complete);
8297 
8298 	if (err < 0) {
8299 		mgmt_pending_remove(cmd);
8300 		return err;
8301 	}
8302 
8303 	return 0;
8304 }
8305 
8306 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8307 				   void *data, u16 data_len)
8308 {
8309 	struct mgmt_cp_read_local_oob_ext_data *cp = data;
8310 	struct mgmt_rp_read_local_oob_ext_data *rp;
8311 	size_t rp_len;
8312 	u16 eir_len;
8313 	u8 status, flags, role, addr[7], hash[16], rand[16];
8314 	int err;
8315 
8316 	bt_dev_dbg(hdev, "sock %p", sk);
8317 
8318 	if (hdev_is_powered(hdev)) {
8319 		switch (cp->type) {
8320 		case BIT(BDADDR_BREDR):
8321 			status = mgmt_bredr_support(hdev);
8322 			if (status)
8323 				eir_len = 0;
8324 			else
8325 				eir_len = 5;
8326 			break;
8327 		case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8328 			status = mgmt_le_support(hdev);
8329 			if (status)
8330 				eir_len = 0;
8331 			else
8332 				eir_len = 9 + 3 + 18 + 18 + 3;
8333 			break;
8334 		default:
8335 			status = MGMT_STATUS_INVALID_PARAMS;
8336 			eir_len = 0;
8337 			break;
8338 		}
8339 	} else {
8340 		status = MGMT_STATUS_NOT_POWERED;
8341 		eir_len = 0;
8342 	}
8343 
8344 	rp_len = sizeof(*rp) + eir_len;
8345 	rp = kmalloc(rp_len, GFP_ATOMIC);
8346 	if (!rp)
8347 		return -ENOMEM;
8348 
8349 	if (!status && !lmp_ssp_capable(hdev)) {
8350 		status = MGMT_STATUS_NOT_SUPPORTED;
8351 		eir_len = 0;
8352 	}
8353 
8354 	if (status)
8355 		goto complete;
8356 
8357 	hci_dev_lock(hdev);
8358 
8359 	eir_len = 0;
8360 	switch (cp->type) {
8361 	case BIT(BDADDR_BREDR):
8362 		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8363 			err = read_local_ssp_oob_req(hdev, sk, cp);
8364 			hci_dev_unlock(hdev);
8365 			if (!err)
8366 				goto done;
8367 
8368 			status = MGMT_STATUS_FAILED;
8369 			goto complete;
8370 		} else {
8371 			eir_len = eir_append_data(rp->eir, eir_len,
8372 						  EIR_CLASS_OF_DEV,
8373 						  hdev->dev_class, 3);
8374 		}
8375 		break;
8376 	case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8377 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8378 		    smp_generate_oob(hdev, hash, rand) < 0) {
8379 			hci_dev_unlock(hdev);
8380 			status = MGMT_STATUS_FAILED;
8381 			goto complete;
8382 		}
8383 
8384 		/* This should return the active RPA, but since the RPA
8385 		 * is only programmed on demand, it is really hard to fill
8386 		 * this in at the moment. For now disallow retrieving
8387 		 * local out-of-band data when privacy is in use.
8388 		 *
8389 		 * Returning the identity address will not help here since
8390 		 * pairing happens before the identity resolving key is
8391 		 * known and thus the connection establishment happens
8392 		 * based on the RPA and not the identity address.
8393 		 */
8394 		if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8395 			hci_dev_unlock(hdev);
8396 			status = MGMT_STATUS_REJECTED;
8397 			goto complete;
8398 		}
8399 
8400 		if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8401 		   !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8402 		   (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8403 		    bacmp(&hdev->static_addr, BDADDR_ANY))) {
8404 			memcpy(addr, &hdev->static_addr, 6);
8405 			addr[6] = 0x01;
8406 		} else {
8407 			memcpy(addr, &hdev->bdaddr, 6);
8408 			addr[6] = 0x00;
8409 		}
8410 
8411 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8412 					  addr, sizeof(addr));
8413 
8414 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8415 			role = 0x02;
8416 		else
8417 			role = 0x01;
8418 
8419 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8420 					  &role, sizeof(role));
8421 
8422 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8423 			eir_len = eir_append_data(rp->eir, eir_len,
8424 						  EIR_LE_SC_CONFIRM,
8425 						  hash, sizeof(hash));
8426 
8427 			eir_len = eir_append_data(rp->eir, eir_len,
8428 						  EIR_LE_SC_RANDOM,
8429 						  rand, sizeof(rand));
8430 		}
8431 
8432 		flags = mgmt_get_adv_discov_flags(hdev);
8433 
8434 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8435 			flags |= LE_AD_NO_BREDR;
8436 
8437 		eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8438 					  &flags, sizeof(flags));
8439 		break;
8440 	}
8441 
8442 	hci_dev_unlock(hdev);
8443 
8444 	hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8445 
8446 	status = MGMT_STATUS_SUCCESS;
8447 
8448 complete:
8449 	rp->type = cp->type;
8450 	rp->eir_len = cpu_to_le16(eir_len);
8451 
8452 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8453 				status, rp, sizeof(*rp) + eir_len);
8454 	if (err < 0 || status)
8455 		goto done;
8456 
8457 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8458 				 rp, sizeof(*rp) + eir_len,
8459 				 HCI_MGMT_OOB_DATA_EVENTS, sk);
8460 
8461 done:
8462 	kfree(rp);
8463 
8464 	return err;
8465 }
8466 
8467 static u32 get_supported_adv_flags(struct hci_dev *hdev)
8468 {
8469 	u32 flags = 0;
8470 
8471 	flags |= MGMT_ADV_FLAG_CONNECTABLE;
8472 	flags |= MGMT_ADV_FLAG_DISCOV;
8473 	flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8474 	flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8475 	flags |= MGMT_ADV_FLAG_APPEARANCE;
8476 	flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8477 	flags |= MGMT_ADV_PARAM_DURATION;
8478 	flags |= MGMT_ADV_PARAM_TIMEOUT;
8479 	flags |= MGMT_ADV_PARAM_INTERVALS;
8480 	flags |= MGMT_ADV_PARAM_TX_POWER;
8481 	flags |= MGMT_ADV_PARAM_SCAN_RSP;
8482 
8483 	/* In extended adv TX_POWER returned from Set Adv Param
8484 	 * will be always valid.
8485 	 */
8486 	if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
8487 		flags |= MGMT_ADV_FLAG_TX_POWER;
8488 
8489 	if (ext_adv_capable(hdev)) {
8490 		flags |= MGMT_ADV_FLAG_SEC_1M;
8491 		flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8492 		flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8493 
8494 		if (le_2m_capable(hdev))
8495 			flags |= MGMT_ADV_FLAG_SEC_2M;
8496 
8497 		if (le_coded_capable(hdev))
8498 			flags |= MGMT_ADV_FLAG_SEC_CODED;
8499 	}
8500 
8501 	return flags;
8502 }
8503 
8504 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8505 			     void *data, u16 data_len)
8506 {
8507 	struct mgmt_rp_read_adv_features *rp;
8508 	size_t rp_len;
8509 	int err;
8510 	struct adv_info *adv_instance;
8511 	u32 supported_flags;
8512 	u8 *instance;
8513 
8514 	bt_dev_dbg(hdev, "sock %p", sk);
8515 
8516 	if (!lmp_le_capable(hdev))
8517 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8518 				       MGMT_STATUS_REJECTED);
8519 
8520 	hci_dev_lock(hdev);
8521 
8522 	rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8523 	rp = kmalloc(rp_len, GFP_ATOMIC);
8524 	if (!rp) {
8525 		hci_dev_unlock(hdev);
8526 		return -ENOMEM;
8527 	}
8528 
8529 	supported_flags = get_supported_adv_flags(hdev);
8530 
8531 	rp->supported_flags = cpu_to_le32(supported_flags);
8532 	rp->max_adv_data_len = max_adv_len(hdev);
8533 	rp->max_scan_rsp_len = max_adv_len(hdev);
8534 	rp->max_instances = hdev->le_num_of_adv_sets;
8535 	rp->num_instances = hdev->adv_instance_cnt;
8536 
8537 	instance = rp->instance;
8538 	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8539 		/* Only instances 1-le_num_of_adv_sets are externally visible */
8540 		if (adv_instance->instance <= hdev->adv_instance_cnt) {
8541 			*instance = adv_instance->instance;
8542 			instance++;
8543 		} else {
8544 			rp->num_instances--;
8545 			rp_len--;
8546 		}
8547 	}
8548 
8549 	hci_dev_unlock(hdev);
8550 
8551 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8552 				MGMT_STATUS_SUCCESS, rp, rp_len);
8553 
8554 	kfree(rp);
8555 
8556 	return err;
8557 }
8558 
8559 static u8 calculate_name_len(struct hci_dev *hdev)
8560 {
8561 	u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 2]; /* len + type + name */
8562 
8563 	return eir_append_local_name(hdev, buf, 0);
8564 }
8565 
8566 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8567 			   bool is_adv_data)
8568 {
8569 	u8 max_len = max_adv_len(hdev);
8570 
8571 	if (is_adv_data) {
8572 		if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8573 				 MGMT_ADV_FLAG_LIMITED_DISCOV |
8574 				 MGMT_ADV_FLAG_MANAGED_FLAGS))
8575 			max_len -= 3;
8576 
8577 		if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8578 			max_len -= 3;
8579 	} else {
8580 		if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8581 			max_len -= calculate_name_len(hdev);
8582 
8583 		if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8584 			max_len -= 4;
8585 	}
8586 
8587 	return max_len;
8588 }
8589 
8590 static bool flags_managed(u32 adv_flags)
8591 {
8592 	return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8593 			    MGMT_ADV_FLAG_LIMITED_DISCOV |
8594 			    MGMT_ADV_FLAG_MANAGED_FLAGS);
8595 }
8596 
8597 static bool tx_power_managed(u32 adv_flags)
8598 {
8599 	return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8600 }
8601 
8602 static bool name_managed(u32 adv_flags)
8603 {
8604 	return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8605 }
8606 
8607 static bool appearance_managed(u32 adv_flags)
8608 {
8609 	return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8610 }
8611 
8612 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8613 			      u8 len, bool is_adv_data)
8614 {
8615 	int i, cur_len;
8616 	u8 max_len;
8617 
8618 	max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8619 
8620 	if (len > max_len)
8621 		return false;
8622 
8623 	/* Make sure that the data is correctly formatted. */
8624 	for (i = 0; i < len; i += (cur_len + 1)) {
8625 		cur_len = data[i];
8626 
8627 		if (!cur_len)
8628 			continue;
8629 
8630 		if (data[i + 1] == EIR_FLAGS &&
8631 		    (!is_adv_data || flags_managed(adv_flags)))
8632 			return false;
8633 
8634 		if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8635 			return false;
8636 
8637 		if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8638 			return false;
8639 
8640 		if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8641 			return false;
8642 
8643 		if (data[i + 1] == EIR_APPEARANCE &&
8644 		    appearance_managed(adv_flags))
8645 			return false;
8646 
8647 		/* If the current field length would exceed the total data
8648 		 * length, then it's invalid.
8649 		 */
8650 		if (i + cur_len >= len)
8651 			return false;
8652 	}
8653 
8654 	return true;
8655 }
8656 
8657 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8658 {
8659 	u32 supported_flags, phy_flags;
8660 
8661 	/* The current implementation only supports a subset of the specified
8662 	 * flags. Also need to check mutual exclusiveness of sec flags.
8663 	 */
8664 	supported_flags = get_supported_adv_flags(hdev);
8665 	phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8666 	if (adv_flags & ~supported_flags ||
8667 	    ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8668 		return false;
8669 
8670 	return true;
8671 }
8672 
8673 static bool adv_busy(struct hci_dev *hdev)
8674 {
8675 	return pending_find(MGMT_OP_SET_LE, hdev);
8676 }
8677 
8678 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8679 			     int err)
8680 {
8681 	struct adv_info *adv, *n;
8682 
8683 	bt_dev_dbg(hdev, "err %d", err);
8684 
8685 	hci_dev_lock(hdev);
8686 
8687 	list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8688 		u8 instance;
8689 
8690 		if (!adv->pending)
8691 			continue;
8692 
8693 		if (!err) {
8694 			adv->pending = false;
8695 			continue;
8696 		}
8697 
8698 		instance = adv->instance;
8699 
8700 		if (hdev->cur_adv_instance == instance)
8701 			cancel_adv_timeout(hdev);
8702 
8703 		hci_remove_adv_instance(hdev, instance);
8704 		mgmt_advertising_removed(sk, hdev, instance);
8705 	}
8706 
8707 	hci_dev_unlock(hdev);
8708 }
8709 
8710 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8711 {
8712 	struct mgmt_pending_cmd *cmd = data;
8713 	struct mgmt_cp_add_advertising *cp = cmd->param;
8714 	struct mgmt_rp_add_advertising rp;
8715 
8716 	memset(&rp, 0, sizeof(rp));
8717 
8718 	rp.instance = cp->instance;
8719 
8720 	if (err)
8721 		mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
8722 				mgmt_status(err));
8723 	else
8724 		mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
8725 				  mgmt_status(err), &rp, sizeof(rp));
8726 
8727 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
8728 
8729 	mgmt_pending_free(cmd);
8730 }
8731 
8732 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8733 {
8734 	struct mgmt_pending_cmd *cmd = data;
8735 	struct mgmt_cp_add_advertising *cp = cmd->param;
8736 
8737 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8738 }
8739 
8740 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8741 			   void *data, u16 data_len)
8742 {
8743 	struct mgmt_cp_add_advertising *cp = data;
8744 	struct mgmt_rp_add_advertising rp;
8745 	u32 flags;
8746 	u8 status;
8747 	u16 timeout, duration;
8748 	unsigned int prev_instance_cnt;
8749 	u8 schedule_instance = 0;
8750 	struct adv_info *adv, *next_instance;
8751 	int err;
8752 	struct mgmt_pending_cmd *cmd;
8753 
8754 	bt_dev_dbg(hdev, "sock %p", sk);
8755 
8756 	status = mgmt_le_support(hdev);
8757 	if (status)
8758 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8759 				       status);
8760 
8761 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8762 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8763 				       MGMT_STATUS_INVALID_PARAMS);
8764 
8765 	if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8766 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8767 				       MGMT_STATUS_INVALID_PARAMS);
8768 
8769 	flags = __le32_to_cpu(cp->flags);
8770 	timeout = __le16_to_cpu(cp->timeout);
8771 	duration = __le16_to_cpu(cp->duration);
8772 
8773 	if (!requested_adv_flags_are_valid(hdev, flags))
8774 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8775 				       MGMT_STATUS_INVALID_PARAMS);
8776 
8777 	hci_dev_lock(hdev);
8778 
8779 	if (timeout && !hdev_is_powered(hdev)) {
8780 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8781 				      MGMT_STATUS_REJECTED);
8782 		goto unlock;
8783 	}
8784 
8785 	if (adv_busy(hdev)) {
8786 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8787 				      MGMT_STATUS_BUSY);
8788 		goto unlock;
8789 	}
8790 
8791 	if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8792 	    !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8793 			       cp->scan_rsp_len, false)) {
8794 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8795 				      MGMT_STATUS_INVALID_PARAMS);
8796 		goto unlock;
8797 	}
8798 
8799 	prev_instance_cnt = hdev->adv_instance_cnt;
8800 
8801 	adv = hci_add_adv_instance(hdev, cp->instance, flags,
8802 				   cp->adv_data_len, cp->data,
8803 				   cp->scan_rsp_len,
8804 				   cp->data + cp->adv_data_len,
8805 				   timeout, duration,
8806 				   HCI_ADV_TX_POWER_NO_PREFERENCE,
8807 				   hdev->le_adv_min_interval,
8808 				   hdev->le_adv_max_interval, 0);
8809 	if (IS_ERR(adv)) {
8810 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8811 				      MGMT_STATUS_FAILED);
8812 		goto unlock;
8813 	}
8814 
8815 	/* Only trigger an advertising added event if a new instance was
8816 	 * actually added.
8817 	 */
8818 	if (hdev->adv_instance_cnt > prev_instance_cnt)
8819 		mgmt_advertising_added(sk, hdev, cp->instance);
8820 
8821 	if (hdev->cur_adv_instance == cp->instance) {
8822 		/* If the currently advertised instance is being changed then
8823 		 * cancel the current advertising and schedule the next
8824 		 * instance. If there is only one instance then the overridden
8825 		 * advertising data will be visible right away.
8826 		 */
8827 		cancel_adv_timeout(hdev);
8828 
8829 		next_instance = hci_get_next_instance(hdev, cp->instance);
8830 		if (next_instance)
8831 			schedule_instance = next_instance->instance;
8832 	} else if (!hdev->adv_instance_timeout) {
8833 		/* Immediately advertise the new instance if no other
8834 		 * instance is currently being advertised.
8835 		 */
8836 		schedule_instance = cp->instance;
8837 	}
8838 
8839 	/* If the HCI_ADVERTISING flag is set or the device isn't powered or
8840 	 * there is no instance to be advertised then we have no HCI
8841 	 * communication to make. Simply return.
8842 	 */
8843 	if (!hdev_is_powered(hdev) ||
8844 	    hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8845 	    !schedule_instance) {
8846 		rp.instance = cp->instance;
8847 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8848 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8849 		goto unlock;
8850 	}
8851 
8852 	/* We're good to go, update advertising data, parameters, and start
8853 	 * advertising.
8854 	 */
8855 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8856 			       data_len);
8857 	if (!cmd) {
8858 		err = -ENOMEM;
8859 		goto unlock;
8860 	}
8861 
8862 	cp->instance = schedule_instance;
8863 
8864 	err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8865 				 add_advertising_complete);
8866 	if (err < 0)
8867 		mgmt_pending_free(cmd);
8868 
8869 unlock:
8870 	hci_dev_unlock(hdev);
8871 
8872 	return err;
8873 }
8874 
8875 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8876 					int err)
8877 {
8878 	struct mgmt_pending_cmd *cmd = data;
8879 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8880 	struct mgmt_rp_add_ext_adv_params rp;
8881 	struct adv_info *adv;
8882 	u32 flags;
8883 
8884 	BT_DBG("%s", hdev->name);
8885 
8886 	hci_dev_lock(hdev);
8887 
8888 	adv = hci_find_adv_instance(hdev, cp->instance);
8889 	if (!adv)
8890 		goto unlock;
8891 
8892 	rp.instance = cp->instance;
8893 	rp.tx_power = adv->tx_power;
8894 
8895 	/* While we're at it, inform userspace of the available space for this
8896 	 * advertisement, given the flags that will be used.
8897 	 */
8898 	flags = __le32_to_cpu(cp->flags);
8899 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8900 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8901 
8902 	if (err) {
8903 		/* If this advertisement was previously advertising and we
8904 		 * failed to update it, we signal that it has been removed and
8905 		 * delete its structure
8906 		 */
8907 		if (!adv->pending)
8908 			mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8909 
8910 		hci_remove_adv_instance(hdev, cp->instance);
8911 
8912 		mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
8913 				mgmt_status(err));
8914 	} else {
8915 		mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
8916 				  mgmt_status(err), &rp, sizeof(rp));
8917 	}
8918 
8919 unlock:
8920 	mgmt_pending_free(cmd);
8921 
8922 	hci_dev_unlock(hdev);
8923 }
8924 
8925 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8926 {
8927 	struct mgmt_pending_cmd *cmd = data;
8928 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8929 
8930 	return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8931 }
8932 
8933 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8934 			      void *data, u16 data_len)
8935 {
8936 	struct mgmt_cp_add_ext_adv_params *cp = data;
8937 	struct mgmt_rp_add_ext_adv_params rp;
8938 	struct mgmt_pending_cmd *cmd = NULL;
8939 	struct adv_info *adv;
8940 	u32 flags, min_interval, max_interval;
8941 	u16 timeout, duration;
8942 	u8 status;
8943 	s8 tx_power;
8944 	int err;
8945 
8946 	BT_DBG("%s", hdev->name);
8947 
8948 	status = mgmt_le_support(hdev);
8949 	if (status)
8950 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8951 				       status);
8952 
8953 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8954 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8955 				       MGMT_STATUS_INVALID_PARAMS);
8956 
8957 	/* The purpose of breaking add_advertising into two separate MGMT calls
8958 	 * for params and data is to allow more parameters to be added to this
8959 	 * structure in the future. For this reason, we verify that we have the
8960 	 * bare minimum structure we know of when the interface was defined. Any
8961 	 * extra parameters we don't know about will be ignored in this request.
8962 	 */
8963 	if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8964 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8965 				       MGMT_STATUS_INVALID_PARAMS);
8966 
8967 	flags = __le32_to_cpu(cp->flags);
8968 
8969 	if (!requested_adv_flags_are_valid(hdev, flags))
8970 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8971 				       MGMT_STATUS_INVALID_PARAMS);
8972 
8973 	hci_dev_lock(hdev);
8974 
8975 	/* In new interface, we require that we are powered to register */
8976 	if (!hdev_is_powered(hdev)) {
8977 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8978 				      MGMT_STATUS_REJECTED);
8979 		goto unlock;
8980 	}
8981 
8982 	if (adv_busy(hdev)) {
8983 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8984 				      MGMT_STATUS_BUSY);
8985 		goto unlock;
8986 	}
8987 
8988 	/* Parse defined parameters from request, use defaults otherwise */
8989 	timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8990 		  __le16_to_cpu(cp->timeout) : 0;
8991 
8992 	duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8993 		   __le16_to_cpu(cp->duration) :
8994 		   hdev->def_multi_adv_rotation_duration;
8995 
8996 	min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8997 		       __le32_to_cpu(cp->min_interval) :
8998 		       hdev->le_adv_min_interval;
8999 
9000 	max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
9001 		       __le32_to_cpu(cp->max_interval) :
9002 		       hdev->le_adv_max_interval;
9003 
9004 	tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
9005 		   cp->tx_power :
9006 		   HCI_ADV_TX_POWER_NO_PREFERENCE;
9007 
9008 	/* Create advertising instance with no advertising or response data */
9009 	adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
9010 				   timeout, duration, tx_power, min_interval,
9011 				   max_interval, 0);
9012 
9013 	if (IS_ERR(adv)) {
9014 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
9015 				      MGMT_STATUS_FAILED);
9016 		goto unlock;
9017 	}
9018 
9019 	/* Submit request for advertising params if ext adv available */
9020 	if (ext_adv_capable(hdev)) {
9021 		cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
9022 				       data, data_len);
9023 		if (!cmd) {
9024 			err = -ENOMEM;
9025 			hci_remove_adv_instance(hdev, cp->instance);
9026 			goto unlock;
9027 		}
9028 
9029 		err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
9030 					 add_ext_adv_params_complete);
9031 		if (err < 0)
9032 			mgmt_pending_free(cmd);
9033 	} else {
9034 		rp.instance = cp->instance;
9035 		rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
9036 		rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9037 		rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9038 		err = mgmt_cmd_complete(sk, hdev->id,
9039 					MGMT_OP_ADD_EXT_ADV_PARAMS,
9040 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9041 	}
9042 
9043 unlock:
9044 	hci_dev_unlock(hdev);
9045 
9046 	return err;
9047 }
9048 
9049 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
9050 {
9051 	struct mgmt_pending_cmd *cmd = data;
9052 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
9053 	struct mgmt_rp_add_advertising rp;
9054 
9055 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
9056 
9057 	memset(&rp, 0, sizeof(rp));
9058 
9059 	rp.instance = cp->instance;
9060 
9061 	if (err)
9062 		mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
9063 				mgmt_status(err));
9064 	else
9065 		mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
9066 				  mgmt_status(err), &rp, sizeof(rp));
9067 
9068 	mgmt_pending_free(cmd);
9069 }
9070 
9071 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
9072 {
9073 	struct mgmt_pending_cmd *cmd = data;
9074 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
9075 	int err;
9076 
9077 	if (ext_adv_capable(hdev)) {
9078 		err = hci_update_adv_data_sync(hdev, cp->instance);
9079 		if (err)
9080 			return err;
9081 
9082 		err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
9083 		if (err)
9084 			return err;
9085 
9086 		return hci_enable_ext_advertising_sync(hdev, cp->instance);
9087 	}
9088 
9089 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
9090 }
9091 
9092 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
9093 			    u16 data_len)
9094 {
9095 	struct mgmt_cp_add_ext_adv_data *cp = data;
9096 	struct mgmt_rp_add_ext_adv_data rp;
9097 	u8 schedule_instance = 0;
9098 	struct adv_info *next_instance;
9099 	struct adv_info *adv_instance;
9100 	int err = 0;
9101 	struct mgmt_pending_cmd *cmd;
9102 
9103 	BT_DBG("%s", hdev->name);
9104 
9105 	hci_dev_lock(hdev);
9106 
9107 	adv_instance = hci_find_adv_instance(hdev, cp->instance);
9108 
9109 	if (!adv_instance) {
9110 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9111 				      MGMT_STATUS_INVALID_PARAMS);
9112 		goto unlock;
9113 	}
9114 
9115 	/* In new interface, we require that we are powered to register */
9116 	if (!hdev_is_powered(hdev)) {
9117 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9118 				      MGMT_STATUS_REJECTED);
9119 		goto clear_new_instance;
9120 	}
9121 
9122 	if (adv_busy(hdev)) {
9123 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9124 				      MGMT_STATUS_BUSY);
9125 		goto clear_new_instance;
9126 	}
9127 
9128 	/* Validate new data */
9129 	if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
9130 			       cp->adv_data_len, true) ||
9131 	    !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
9132 			       cp->adv_data_len, cp->scan_rsp_len, false)) {
9133 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9134 				      MGMT_STATUS_INVALID_PARAMS);
9135 		goto clear_new_instance;
9136 	}
9137 
9138 	/* Set the data in the advertising instance */
9139 	hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
9140 				  cp->data, cp->scan_rsp_len,
9141 				  cp->data + cp->adv_data_len);
9142 
9143 	/* If using software rotation, determine next instance to use */
9144 	if (hdev->cur_adv_instance == cp->instance) {
9145 		/* If the currently advertised instance is being changed
9146 		 * then cancel the current advertising and schedule the
9147 		 * next instance. If there is only one instance then the
9148 		 * overridden advertising data will be visible right
9149 		 * away
9150 		 */
9151 		cancel_adv_timeout(hdev);
9152 
9153 		next_instance = hci_get_next_instance(hdev, cp->instance);
9154 		if (next_instance)
9155 			schedule_instance = next_instance->instance;
9156 	} else if (!hdev->adv_instance_timeout) {
9157 		/* Immediately advertise the new instance if no other
9158 		 * instance is currently being advertised.
9159 		 */
9160 		schedule_instance = cp->instance;
9161 	}
9162 
9163 	/* If the HCI_ADVERTISING flag is set or there is no instance to
9164 	 * be advertised then we have no HCI communication to make.
9165 	 * Simply return.
9166 	 */
9167 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
9168 		if (adv_instance->pending) {
9169 			mgmt_advertising_added(sk, hdev, cp->instance);
9170 			adv_instance->pending = false;
9171 		}
9172 		rp.instance = cp->instance;
9173 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9174 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9175 		goto unlock;
9176 	}
9177 
9178 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9179 			       data_len);
9180 	if (!cmd) {
9181 		err = -ENOMEM;
9182 		goto clear_new_instance;
9183 	}
9184 
9185 	err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
9186 				 add_ext_adv_data_complete);
9187 	if (err < 0) {
9188 		mgmt_pending_free(cmd);
9189 		goto clear_new_instance;
9190 	}
9191 
9192 	/* We were successful in updating data, so trigger advertising_added
9193 	 * event if this is an instance that wasn't previously advertising. If
9194 	 * a failure occurs in the requests we initiated, we will remove the
9195 	 * instance again in add_advertising_complete
9196 	 */
9197 	if (adv_instance->pending)
9198 		mgmt_advertising_added(sk, hdev, cp->instance);
9199 
9200 	goto unlock;
9201 
9202 clear_new_instance:
9203 	hci_remove_adv_instance(hdev, cp->instance);
9204 
9205 unlock:
9206 	hci_dev_unlock(hdev);
9207 
9208 	return err;
9209 }
9210 
9211 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
9212 					int err)
9213 {
9214 	struct mgmt_pending_cmd *cmd = data;
9215 	struct mgmt_cp_remove_advertising *cp = cmd->param;
9216 	struct mgmt_rp_remove_advertising rp;
9217 
9218 	bt_dev_dbg(hdev, "err %d", err);
9219 
9220 	memset(&rp, 0, sizeof(rp));
9221 	rp.instance = cp->instance;
9222 
9223 	if (err)
9224 		mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
9225 				mgmt_status(err));
9226 	else
9227 		mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
9228 				  MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9229 
9230 	mgmt_pending_free(cmd);
9231 }
9232 
9233 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
9234 {
9235 	struct mgmt_pending_cmd *cmd = data;
9236 	struct mgmt_cp_remove_advertising *cp = cmd->param;
9237 	int err;
9238 
9239 	err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
9240 	if (err)
9241 		return err;
9242 
9243 	if (list_empty(&hdev->adv_instances))
9244 		err = hci_disable_advertising_sync(hdev);
9245 
9246 	return err;
9247 }
9248 
9249 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9250 			      void *data, u16 data_len)
9251 {
9252 	struct mgmt_cp_remove_advertising *cp = data;
9253 	struct mgmt_pending_cmd *cmd;
9254 	int err;
9255 
9256 	bt_dev_dbg(hdev, "sock %p", sk);
9257 
9258 	hci_dev_lock(hdev);
9259 
9260 	if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
9261 		err = mgmt_cmd_status(sk, hdev->id,
9262 				      MGMT_OP_REMOVE_ADVERTISING,
9263 				      MGMT_STATUS_INVALID_PARAMS);
9264 		goto unlock;
9265 	}
9266 
9267 	if (pending_find(MGMT_OP_SET_LE, hdev)) {
9268 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9269 				      MGMT_STATUS_BUSY);
9270 		goto unlock;
9271 	}
9272 
9273 	if (list_empty(&hdev->adv_instances)) {
9274 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9275 				      MGMT_STATUS_INVALID_PARAMS);
9276 		goto unlock;
9277 	}
9278 
9279 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9280 			       data_len);
9281 	if (!cmd) {
9282 		err = -ENOMEM;
9283 		goto unlock;
9284 	}
9285 
9286 	err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
9287 				 remove_advertising_complete);
9288 	if (err < 0)
9289 		mgmt_pending_free(cmd);
9290 
9291 unlock:
9292 	hci_dev_unlock(hdev);
9293 
9294 	return err;
9295 }
9296 
9297 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9298 			     void *data, u16 data_len)
9299 {
9300 	struct mgmt_cp_get_adv_size_info *cp = data;
9301 	struct mgmt_rp_get_adv_size_info rp;
9302 	u32 flags, supported_flags;
9303 
9304 	bt_dev_dbg(hdev, "sock %p", sk);
9305 
9306 	if (!lmp_le_capable(hdev))
9307 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9308 				       MGMT_STATUS_REJECTED);
9309 
9310 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9311 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9312 				       MGMT_STATUS_INVALID_PARAMS);
9313 
9314 	flags = __le32_to_cpu(cp->flags);
9315 
9316 	/* The current implementation only supports a subset of the specified
9317 	 * flags.
9318 	 */
9319 	supported_flags = get_supported_adv_flags(hdev);
9320 	if (flags & ~supported_flags)
9321 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9322 				       MGMT_STATUS_INVALID_PARAMS);
9323 
9324 	rp.instance = cp->instance;
9325 	rp.flags = cp->flags;
9326 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9327 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9328 
9329 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9330 				 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9331 }
9332 
9333 static const struct hci_mgmt_handler mgmt_handlers[] = {
9334 	{ NULL }, /* 0x0000 (no command) */
9335 	{ read_version,            MGMT_READ_VERSION_SIZE,
9336 						HCI_MGMT_NO_HDEV |
9337 						HCI_MGMT_UNTRUSTED },
9338 	{ read_commands,           MGMT_READ_COMMANDS_SIZE,
9339 						HCI_MGMT_NO_HDEV |
9340 						HCI_MGMT_UNTRUSTED },
9341 	{ read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
9342 						HCI_MGMT_NO_HDEV |
9343 						HCI_MGMT_UNTRUSTED },
9344 	{ read_controller_info,    MGMT_READ_INFO_SIZE,
9345 						HCI_MGMT_UNTRUSTED },
9346 	{ set_powered,             MGMT_SETTING_SIZE },
9347 	{ set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
9348 	{ set_connectable,         MGMT_SETTING_SIZE },
9349 	{ set_fast_connectable,    MGMT_SETTING_SIZE },
9350 	{ set_bondable,            MGMT_SETTING_SIZE },
9351 	{ set_link_security,       MGMT_SETTING_SIZE },
9352 	{ set_ssp,                 MGMT_SETTING_SIZE },
9353 	{ set_hs,                  MGMT_SETTING_SIZE },
9354 	{ set_le,                  MGMT_SETTING_SIZE },
9355 	{ set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
9356 	{ set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
9357 	{ add_uuid,                MGMT_ADD_UUID_SIZE },
9358 	{ remove_uuid,             MGMT_REMOVE_UUID_SIZE },
9359 	{ load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
9360 						HCI_MGMT_VAR_LEN },
9361 	{ load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9362 						HCI_MGMT_VAR_LEN },
9363 	{ disconnect,              MGMT_DISCONNECT_SIZE },
9364 	{ get_connections,         MGMT_GET_CONNECTIONS_SIZE },
9365 	{ pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
9366 	{ pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
9367 	{ set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
9368 	{ pair_device,             MGMT_PAIR_DEVICE_SIZE },
9369 	{ cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
9370 	{ unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
9371 	{ user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
9372 	{ user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9373 	{ user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
9374 	{ user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9375 	{ read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
9376 	{ add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9377 						HCI_MGMT_VAR_LEN },
9378 	{ remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9379 	{ start_discovery,         MGMT_START_DISCOVERY_SIZE },
9380 	{ stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
9381 	{ confirm_name,            MGMT_CONFIRM_NAME_SIZE },
9382 	{ block_device,            MGMT_BLOCK_DEVICE_SIZE },
9383 	{ unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
9384 	{ set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
9385 	{ set_advertising,         MGMT_SETTING_SIZE },
9386 	{ set_bredr,               MGMT_SETTING_SIZE },
9387 	{ set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
9388 	{ set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
9389 	{ set_secure_conn,         MGMT_SETTING_SIZE },
9390 	{ set_debug_keys,          MGMT_SETTING_SIZE },
9391 	{ set_privacy,             MGMT_SET_PRIVACY_SIZE },
9392 	{ load_irks,               MGMT_LOAD_IRKS_SIZE,
9393 						HCI_MGMT_VAR_LEN },
9394 	{ get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
9395 	{ get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
9396 	{ add_device,              MGMT_ADD_DEVICE_SIZE },
9397 	{ remove_device,           MGMT_REMOVE_DEVICE_SIZE },
9398 	{ load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
9399 						HCI_MGMT_VAR_LEN },
9400 	{ read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9401 						HCI_MGMT_NO_HDEV |
9402 						HCI_MGMT_UNTRUSTED },
9403 	{ read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
9404 						HCI_MGMT_UNCONFIGURED |
9405 						HCI_MGMT_UNTRUSTED },
9406 	{ set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
9407 						HCI_MGMT_UNCONFIGURED },
9408 	{ set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
9409 						HCI_MGMT_UNCONFIGURED },
9410 	{ start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9411 						HCI_MGMT_VAR_LEN },
9412 	{ read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9413 	{ read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
9414 						HCI_MGMT_NO_HDEV |
9415 						HCI_MGMT_UNTRUSTED },
9416 	{ read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
9417 	{ add_advertising,	   MGMT_ADD_ADVERTISING_SIZE,
9418 						HCI_MGMT_VAR_LEN },
9419 	{ remove_advertising,	   MGMT_REMOVE_ADVERTISING_SIZE },
9420 	{ get_adv_size_info,       MGMT_GET_ADV_SIZE_INFO_SIZE },
9421 	{ start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9422 	{ read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9423 						HCI_MGMT_UNTRUSTED },
9424 	{ set_appearance,	   MGMT_SET_APPEARANCE_SIZE },
9425 	{ get_phy_configuration,   MGMT_GET_PHY_CONFIGURATION_SIZE },
9426 	{ set_phy_configuration,   MGMT_SET_PHY_CONFIGURATION_SIZE },
9427 	{ set_blocked_keys,	   MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9428 						HCI_MGMT_VAR_LEN },
9429 	{ set_wideband_speech,	   MGMT_SETTING_SIZE },
9430 	{ read_controller_cap,     MGMT_READ_CONTROLLER_CAP_SIZE,
9431 						HCI_MGMT_UNTRUSTED },
9432 	{ read_exp_features_info,  MGMT_READ_EXP_FEATURES_INFO_SIZE,
9433 						HCI_MGMT_UNTRUSTED |
9434 						HCI_MGMT_HDEV_OPTIONAL },
9435 	{ set_exp_feature,         MGMT_SET_EXP_FEATURE_SIZE,
9436 						HCI_MGMT_VAR_LEN |
9437 						HCI_MGMT_HDEV_OPTIONAL },
9438 	{ read_def_system_config,  MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9439 						HCI_MGMT_UNTRUSTED },
9440 	{ set_def_system_config,   MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9441 						HCI_MGMT_VAR_LEN },
9442 	{ read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9443 						HCI_MGMT_UNTRUSTED },
9444 	{ set_def_runtime_config,  MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9445 						HCI_MGMT_VAR_LEN },
9446 	{ get_device_flags,        MGMT_GET_DEVICE_FLAGS_SIZE },
9447 	{ set_device_flags,        MGMT_SET_DEVICE_FLAGS_SIZE },
9448 	{ read_adv_mon_features,   MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9449 	{ add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9450 						HCI_MGMT_VAR_LEN },
9451 	{ remove_adv_monitor,      MGMT_REMOVE_ADV_MONITOR_SIZE },
9452 	{ add_ext_adv_params,      MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
9453 						HCI_MGMT_VAR_LEN },
9454 	{ add_ext_adv_data,        MGMT_ADD_EXT_ADV_DATA_SIZE,
9455 						HCI_MGMT_VAR_LEN },
9456 	{ add_adv_patterns_monitor_rssi,
9457 				   MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
9458 						HCI_MGMT_VAR_LEN },
9459 	{ set_mesh,                MGMT_SET_MESH_RECEIVER_SIZE,
9460 						HCI_MGMT_VAR_LEN },
9461 	{ mesh_features,           MGMT_MESH_READ_FEATURES_SIZE },
9462 	{ mesh_send,               MGMT_MESH_SEND_SIZE,
9463 						HCI_MGMT_VAR_LEN },
9464 	{ mesh_send_cancel,        MGMT_MESH_SEND_CANCEL_SIZE },
9465 	{ mgmt_hci_cmd_sync,       MGMT_HCI_CMD_SYNC_SIZE, HCI_MGMT_VAR_LEN },
9466 };
9467 
9468 void mgmt_index_added(struct hci_dev *hdev)
9469 {
9470 	struct mgmt_ev_ext_index ev;
9471 
9472 	if (hci_test_quirk(hdev, HCI_QUIRK_RAW_DEVICE))
9473 		return;
9474 
9475 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9476 		mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0,
9477 				 HCI_MGMT_UNCONF_INDEX_EVENTS);
9478 		ev.type = 0x01;
9479 	} else {
9480 		mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
9481 				 HCI_MGMT_INDEX_EVENTS);
9482 		ev.type = 0x00;
9483 	}
9484 
9485 	ev.bus = hdev->bus;
9486 
9487 	mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
9488 			 HCI_MGMT_EXT_INDEX_EVENTS);
9489 }
9490 
9491 void mgmt_index_removed(struct hci_dev *hdev)
9492 {
9493 	struct mgmt_ev_ext_index ev;
9494 	struct cmd_lookup match = { NULL, hdev, MGMT_STATUS_INVALID_INDEX };
9495 
9496 	if (hci_test_quirk(hdev, HCI_QUIRK_RAW_DEVICE))
9497 		return;
9498 
9499 	mgmt_pending_foreach(0, hdev, true, cmd_complete_rsp, &match);
9500 
9501 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9502 		mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0,
9503 				 HCI_MGMT_UNCONF_INDEX_EVENTS);
9504 		ev.type = 0x01;
9505 	} else {
9506 		mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
9507 				 HCI_MGMT_INDEX_EVENTS);
9508 		ev.type = 0x00;
9509 	}
9510 
9511 	ev.bus = hdev->bus;
9512 
9513 	mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
9514 			 HCI_MGMT_EXT_INDEX_EVENTS);
9515 
9516 	/* Cancel any remaining timed work */
9517 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
9518 		return;
9519 	cancel_delayed_work_sync(&hdev->discov_off);
9520 	cancel_delayed_work_sync(&hdev->service_cache);
9521 	cancel_delayed_work_sync(&hdev->rpa_expired);
9522 	cancel_delayed_work_sync(&hdev->mesh_send_done);
9523 }
9524 
9525 void mgmt_power_on(struct hci_dev *hdev, int err)
9526 {
9527 	struct cmd_lookup match = { NULL, hdev };
9528 
9529 	bt_dev_dbg(hdev, "err %d", err);
9530 
9531 	hci_dev_lock(hdev);
9532 
9533 	if (!err) {
9534 		restart_le_actions(hdev);
9535 		hci_update_passive_scan(hdev);
9536 	}
9537 
9538 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, true, settings_rsp,
9539 			     &match);
9540 
9541 	new_settings(hdev, match.sk);
9542 
9543 	if (match.sk)
9544 		sock_put(match.sk);
9545 
9546 	hci_dev_unlock(hdev);
9547 }
9548 
9549 void __mgmt_power_off(struct hci_dev *hdev)
9550 {
9551 	struct cmd_lookup match = { NULL, hdev };
9552 	u8 zero_cod[] = { 0, 0, 0 };
9553 
9554 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, true, settings_rsp,
9555 			     &match);
9556 
9557 	/* If the power off is because of hdev unregistration let
9558 	 * use the appropriate INVALID_INDEX status. Otherwise use
9559 	 * NOT_POWERED. We cover both scenarios here since later in
9560 	 * mgmt_index_removed() any hci_conn callbacks will have already
9561 	 * been triggered, potentially causing misleading DISCONNECTED
9562 	 * status responses.
9563 	 */
9564 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9565 		match.mgmt_status = MGMT_STATUS_INVALID_INDEX;
9566 	else
9567 		match.mgmt_status = MGMT_STATUS_NOT_POWERED;
9568 
9569 	mgmt_pending_foreach(0, hdev, true, cmd_complete_rsp, &match);
9570 
9571 	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9572 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9573 				   zero_cod, sizeof(zero_cod),
9574 				   HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9575 		ext_info_changed(hdev, NULL);
9576 	}
9577 
9578 	new_settings(hdev, match.sk);
9579 
9580 	if (match.sk)
9581 		sock_put(match.sk);
9582 }
9583 
9584 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9585 {
9586 	struct mgmt_pending_cmd *cmd;
9587 	u8 status;
9588 
9589 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9590 	if (!cmd)
9591 		return;
9592 
9593 	if (err == -ERFKILL)
9594 		status = MGMT_STATUS_RFKILLED;
9595 	else
9596 		status = MGMT_STATUS_FAILED;
9597 
9598 	mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9599 
9600 	mgmt_pending_remove(cmd);
9601 }
9602 
9603 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9604 		       bool persistent)
9605 {
9606 	struct mgmt_ev_new_link_key ev;
9607 
9608 	memset(&ev, 0, sizeof(ev));
9609 
9610 	ev.store_hint = persistent;
9611 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9612 	ev.key.addr.type = BDADDR_BREDR;
9613 	ev.key.type = key->type;
9614 	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9615 	ev.key.pin_len = key->pin_len;
9616 
9617 	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9618 }
9619 
9620 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9621 {
9622 	switch (ltk->type) {
9623 	case SMP_LTK:
9624 	case SMP_LTK_RESPONDER:
9625 		if (ltk->authenticated)
9626 			return MGMT_LTK_AUTHENTICATED;
9627 		return MGMT_LTK_UNAUTHENTICATED;
9628 	case SMP_LTK_P256:
9629 		if (ltk->authenticated)
9630 			return MGMT_LTK_P256_AUTH;
9631 		return MGMT_LTK_P256_UNAUTH;
9632 	case SMP_LTK_P256_DEBUG:
9633 		return MGMT_LTK_P256_DEBUG;
9634 	}
9635 
9636 	return MGMT_LTK_UNAUTHENTICATED;
9637 }
9638 
9639 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9640 {
9641 	struct mgmt_ev_new_long_term_key ev;
9642 
9643 	memset(&ev, 0, sizeof(ev));
9644 
9645 	/* Devices using resolvable or non-resolvable random addresses
9646 	 * without providing an identity resolving key don't require
9647 	 * to store long term keys. Their addresses will change the
9648 	 * next time around.
9649 	 *
9650 	 * Only when a remote device provides an identity address
9651 	 * make sure the long term key is stored. If the remote
9652 	 * identity is known, the long term keys are internally
9653 	 * mapped to the identity address. So allow static random
9654 	 * and public addresses here.
9655 	 */
9656 	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9657 	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
9658 		ev.store_hint = 0x00;
9659 	else
9660 		ev.store_hint = persistent;
9661 
9662 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9663 	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
9664 	ev.key.type = mgmt_ltk_type(key);
9665 	ev.key.enc_size = key->enc_size;
9666 	ev.key.ediv = key->ediv;
9667 	ev.key.rand = key->rand;
9668 
9669 	if (key->type == SMP_LTK)
9670 		ev.key.initiator = 1;
9671 
9672 	/* Make sure we copy only the significant bytes based on the
9673 	 * encryption key size, and set the rest of the value to zeroes.
9674 	 */
9675 	memcpy(ev.key.val, key->val, key->enc_size);
9676 	memset(ev.key.val + key->enc_size, 0,
9677 	       sizeof(ev.key.val) - key->enc_size);
9678 
9679 	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9680 }
9681 
9682 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9683 {
9684 	struct mgmt_ev_new_irk ev;
9685 
9686 	memset(&ev, 0, sizeof(ev));
9687 
9688 	ev.store_hint = persistent;
9689 
9690 	bacpy(&ev.rpa, &irk->rpa);
9691 	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9692 	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
9693 	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9694 
9695 	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9696 }
9697 
9698 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9699 		   bool persistent)
9700 {
9701 	struct mgmt_ev_new_csrk ev;
9702 
9703 	memset(&ev, 0, sizeof(ev));
9704 
9705 	/* Devices using resolvable or non-resolvable random addresses
9706 	 * without providing an identity resolving key don't require
9707 	 * to store signature resolving keys. Their addresses will change
9708 	 * the next time around.
9709 	 *
9710 	 * Only when a remote device provides an identity address
9711 	 * make sure the signature resolving key is stored. So allow
9712 	 * static random and public addresses here.
9713 	 */
9714 	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9715 	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9716 		ev.store_hint = 0x00;
9717 	else
9718 		ev.store_hint = persistent;
9719 
9720 	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9721 	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
9722 	ev.key.type = csrk->type;
9723 	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9724 
9725 	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9726 }
9727 
9728 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9729 			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9730 			 u16 max_interval, u16 latency, u16 timeout)
9731 {
9732 	struct mgmt_ev_new_conn_param ev;
9733 
9734 	if (!hci_is_identity_address(bdaddr, bdaddr_type))
9735 		return;
9736 
9737 	memset(&ev, 0, sizeof(ev));
9738 	bacpy(&ev.addr.bdaddr, bdaddr);
9739 	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9740 	ev.store_hint = store_hint;
9741 	ev.min_interval = cpu_to_le16(min_interval);
9742 	ev.max_interval = cpu_to_le16(max_interval);
9743 	ev.latency = cpu_to_le16(latency);
9744 	ev.timeout = cpu_to_le16(timeout);
9745 
9746 	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9747 }
9748 
9749 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9750 			   u8 *name, u8 name_len)
9751 {
9752 	struct sk_buff *skb;
9753 	struct mgmt_ev_device_connected *ev;
9754 	u16 eir_len = 0;
9755 	u32 flags = 0;
9756 
9757 	if (test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
9758 		return;
9759 
9760 	/* allocate buff for LE or BR/EDR adv */
9761 	if (conn->le_adv_data_len > 0)
9762 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9763 				     sizeof(*ev) + conn->le_adv_data_len);
9764 	else
9765 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9766 				     sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9767 				     eir_precalc_len(sizeof(conn->dev_class)));
9768 
9769 	if (!skb)
9770 		return;
9771 
9772 	ev = skb_put(skb, sizeof(*ev));
9773 	bacpy(&ev->addr.bdaddr, &conn->dst);
9774 	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9775 
9776 	if (conn->out)
9777 		flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9778 
9779 	ev->flags = __cpu_to_le32(flags);
9780 
9781 	/* We must ensure that the EIR Data fields are ordered and
9782 	 * unique. Keep it simple for now and avoid the problem by not
9783 	 * adding any BR/EDR data to the LE adv.
9784 	 */
9785 	if (conn->le_adv_data_len > 0) {
9786 		skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9787 		eir_len = conn->le_adv_data_len;
9788 	} else {
9789 		if (name)
9790 			eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9791 
9792 		if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9793 			eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9794 						    conn->dev_class, sizeof(conn->dev_class));
9795 	}
9796 
9797 	ev->eir_len = cpu_to_le16(eir_len);
9798 
9799 	mgmt_event_skb(skb, NULL);
9800 }
9801 
9802 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9803 {
9804 	struct hci_dev *hdev = data;
9805 	struct mgmt_cp_unpair_device *cp = cmd->param;
9806 
9807 	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9808 
9809 	cmd->cmd_complete(cmd, 0);
9810 }
9811 
9812 bool mgmt_powering_down(struct hci_dev *hdev)
9813 {
9814 	struct mgmt_pending_cmd *cmd;
9815 	struct mgmt_mode *cp;
9816 
9817 	if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
9818 		return true;
9819 
9820 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9821 	if (!cmd)
9822 		return false;
9823 
9824 	cp = cmd->param;
9825 	if (!cp->val)
9826 		return true;
9827 
9828 	return false;
9829 }
9830 
9831 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9832 			      u8 link_type, u8 addr_type, u8 reason,
9833 			      bool mgmt_connected)
9834 {
9835 	struct mgmt_ev_device_disconnected ev;
9836 	struct sock *sk = NULL;
9837 
9838 	if (!mgmt_connected)
9839 		return;
9840 
9841 	if (link_type != ACL_LINK &&
9842 	    link_type != LE_LINK  &&
9843 	    link_type != BIS_LINK)
9844 		return;
9845 
9846 	bacpy(&ev.addr.bdaddr, bdaddr);
9847 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9848 	ev.reason = reason;
9849 
9850 	/* Report disconnects due to suspend */
9851 	if (hdev->suspended)
9852 		ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9853 
9854 	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9855 
9856 	if (sk)
9857 		sock_put(sk);
9858 }
9859 
9860 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9861 			    u8 link_type, u8 addr_type, u8 status)
9862 {
9863 	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9864 	struct mgmt_cp_disconnect *cp;
9865 	struct mgmt_pending_cmd *cmd;
9866 
9867 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, true,
9868 			     unpair_device_rsp, hdev);
9869 
9870 	cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9871 	if (!cmd)
9872 		return;
9873 
9874 	cp = cmd->param;
9875 
9876 	if (bacmp(bdaddr, &cp->addr.bdaddr))
9877 		return;
9878 
9879 	if (cp->addr.type != bdaddr_type)
9880 		return;
9881 
9882 	cmd->cmd_complete(cmd, mgmt_status(status));
9883 	mgmt_pending_remove(cmd);
9884 }
9885 
9886 void mgmt_connect_failed(struct hci_dev *hdev, struct hci_conn *conn, u8 status)
9887 {
9888 	struct mgmt_ev_connect_failed ev;
9889 
9890 	if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
9891 		mgmt_device_disconnected(hdev, &conn->dst, conn->type,
9892 					 conn->dst_type, status, true);
9893 		return;
9894 	}
9895 
9896 	bacpy(&ev.addr.bdaddr, &conn->dst);
9897 	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9898 	ev.status = mgmt_status(status);
9899 
9900 	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9901 }
9902 
9903 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9904 {
9905 	struct mgmt_ev_pin_code_request ev;
9906 
9907 	bacpy(&ev.addr.bdaddr, bdaddr);
9908 	ev.addr.type = BDADDR_BREDR;
9909 	ev.secure = secure;
9910 
9911 	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9912 }
9913 
9914 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9915 				  u8 status)
9916 {
9917 	struct mgmt_pending_cmd *cmd;
9918 
9919 	cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9920 	if (!cmd)
9921 		return;
9922 
9923 	cmd->cmd_complete(cmd, mgmt_status(status));
9924 	mgmt_pending_remove(cmd);
9925 }
9926 
9927 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9928 				      u8 status)
9929 {
9930 	struct mgmt_pending_cmd *cmd;
9931 
9932 	cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9933 	if (!cmd)
9934 		return;
9935 
9936 	cmd->cmd_complete(cmd, mgmt_status(status));
9937 	mgmt_pending_remove(cmd);
9938 }
9939 
9940 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9941 			      u8 link_type, u8 addr_type, u32 value,
9942 			      u8 confirm_hint)
9943 {
9944 	struct mgmt_ev_user_confirm_request ev;
9945 
9946 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9947 
9948 	bacpy(&ev.addr.bdaddr, bdaddr);
9949 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9950 	ev.confirm_hint = confirm_hint;
9951 	ev.value = cpu_to_le32(value);
9952 
9953 	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9954 			  NULL);
9955 }
9956 
9957 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9958 			      u8 link_type, u8 addr_type)
9959 {
9960 	struct mgmt_ev_user_passkey_request ev;
9961 
9962 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9963 
9964 	bacpy(&ev.addr.bdaddr, bdaddr);
9965 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9966 
9967 	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9968 			  NULL);
9969 }
9970 
9971 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9972 				      u8 link_type, u8 addr_type, u8 status,
9973 				      u8 opcode)
9974 {
9975 	struct mgmt_pending_cmd *cmd;
9976 
9977 	cmd = pending_find(opcode, hdev);
9978 	if (!cmd)
9979 		return -ENOENT;
9980 
9981 	cmd->cmd_complete(cmd, mgmt_status(status));
9982 	mgmt_pending_remove(cmd);
9983 
9984 	return 0;
9985 }
9986 
9987 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9988 				     u8 link_type, u8 addr_type, u8 status)
9989 {
9990 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9991 					  status, MGMT_OP_USER_CONFIRM_REPLY);
9992 }
9993 
9994 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9995 					 u8 link_type, u8 addr_type, u8 status)
9996 {
9997 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9998 					  status,
9999 					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
10000 }
10001 
10002 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10003 				     u8 link_type, u8 addr_type, u8 status)
10004 {
10005 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
10006 					  status, MGMT_OP_USER_PASSKEY_REPLY);
10007 }
10008 
10009 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10010 					 u8 link_type, u8 addr_type, u8 status)
10011 {
10012 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
10013 					  status,
10014 					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
10015 }
10016 
10017 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
10018 			     u8 link_type, u8 addr_type, u32 passkey,
10019 			     u8 entered)
10020 {
10021 	struct mgmt_ev_passkey_notify ev;
10022 
10023 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
10024 
10025 	bacpy(&ev.addr.bdaddr, bdaddr);
10026 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
10027 	ev.passkey = __cpu_to_le32(passkey);
10028 	ev.entered = entered;
10029 
10030 	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
10031 }
10032 
10033 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
10034 {
10035 	struct mgmt_ev_auth_failed ev;
10036 	struct mgmt_pending_cmd *cmd;
10037 	u8 status = mgmt_status(hci_status);
10038 
10039 	bacpy(&ev.addr.bdaddr, &conn->dst);
10040 	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
10041 	ev.status = status;
10042 
10043 	cmd = find_pairing(conn);
10044 
10045 	mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
10046 		    cmd ? cmd->sk : NULL);
10047 
10048 	if (cmd) {
10049 		cmd->cmd_complete(cmd, status);
10050 		mgmt_pending_remove(cmd);
10051 	}
10052 }
10053 
10054 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
10055 {
10056 	struct cmd_lookup match = { NULL, hdev };
10057 	bool changed;
10058 
10059 	if (status) {
10060 		u8 mgmt_err = mgmt_status(status);
10061 		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, true,
10062 				     cmd_status_rsp, &mgmt_err);
10063 		return;
10064 	}
10065 
10066 	if (test_bit(HCI_AUTH, &hdev->flags))
10067 		changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
10068 	else
10069 		changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
10070 
10071 	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, true,
10072 			     settings_rsp, &match);
10073 
10074 	if (changed)
10075 		new_settings(hdev, match.sk);
10076 
10077 	if (match.sk)
10078 		sock_put(match.sk);
10079 }
10080 
10081 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
10082 {
10083 	struct cmd_lookup *match = data;
10084 
10085 	if (match->sk == NULL) {
10086 		match->sk = cmd->sk;
10087 		sock_hold(match->sk);
10088 	}
10089 }
10090 
10091 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
10092 				    u8 status)
10093 {
10094 	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
10095 
10096 	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, false, sk_lookup,
10097 			     &match);
10098 	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, false, sk_lookup,
10099 			     &match);
10100 	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, false, sk_lookup,
10101 			     &match);
10102 
10103 	if (!status) {
10104 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
10105 				   3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
10106 		ext_info_changed(hdev, NULL);
10107 	}
10108 
10109 	if (match.sk)
10110 		sock_put(match.sk);
10111 }
10112 
10113 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
10114 {
10115 	struct mgmt_cp_set_local_name ev;
10116 	struct mgmt_pending_cmd *cmd;
10117 
10118 	if (status)
10119 		return;
10120 
10121 	memset(&ev, 0, sizeof(ev));
10122 	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
10123 	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
10124 
10125 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
10126 	if (!cmd) {
10127 		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
10128 
10129 		/* If this is a HCI command related to powering on the
10130 		 * HCI dev don't send any mgmt signals.
10131 		 */
10132 		if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
10133 			return;
10134 
10135 		if (pending_find(MGMT_OP_SET_POWERED, hdev))
10136 			return;
10137 	}
10138 
10139 	mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
10140 			   HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
10141 	ext_info_changed(hdev, cmd ? cmd->sk : NULL);
10142 }
10143 
10144 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
10145 {
10146 	int i;
10147 
10148 	for (i = 0; i < uuid_count; i++) {
10149 		if (!memcmp(uuid, uuids[i], 16))
10150 			return true;
10151 	}
10152 
10153 	return false;
10154 }
10155 
10156 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
10157 {
10158 	u16 parsed = 0;
10159 
10160 	while (parsed < eir_len) {
10161 		u8 field_len = eir[0];
10162 		u8 uuid[16];
10163 		int i;
10164 
10165 		if (field_len == 0)
10166 			break;
10167 
10168 		if (eir_len - parsed < field_len + 1)
10169 			break;
10170 
10171 		switch (eir[1]) {
10172 		case EIR_UUID16_ALL:
10173 		case EIR_UUID16_SOME:
10174 			for (i = 0; i + 3 <= field_len; i += 2) {
10175 				memcpy(uuid, bluetooth_base_uuid, 16);
10176 				uuid[13] = eir[i + 3];
10177 				uuid[12] = eir[i + 2];
10178 				if (has_uuid(uuid, uuid_count, uuids))
10179 					return true;
10180 			}
10181 			break;
10182 		case EIR_UUID32_ALL:
10183 		case EIR_UUID32_SOME:
10184 			for (i = 0; i + 5 <= field_len; i += 4) {
10185 				memcpy(uuid, bluetooth_base_uuid, 16);
10186 				uuid[15] = eir[i + 5];
10187 				uuid[14] = eir[i + 4];
10188 				uuid[13] = eir[i + 3];
10189 				uuid[12] = eir[i + 2];
10190 				if (has_uuid(uuid, uuid_count, uuids))
10191 					return true;
10192 			}
10193 			break;
10194 		case EIR_UUID128_ALL:
10195 		case EIR_UUID128_SOME:
10196 			for (i = 0; i + 17 <= field_len; i += 16) {
10197 				memcpy(uuid, eir + i + 2, 16);
10198 				if (has_uuid(uuid, uuid_count, uuids))
10199 					return true;
10200 			}
10201 			break;
10202 		}
10203 
10204 		parsed += field_len + 1;
10205 		eir += field_len + 1;
10206 	}
10207 
10208 	return false;
10209 }
10210 
10211 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10212 			    u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10213 {
10214 	/* If a RSSI threshold has been specified, and
10215 	 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10216 	 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10217 	 * is set, let it through for further processing, as we might need to
10218 	 * restart the scan.
10219 	 *
10220 	 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10221 	 * the results are also dropped.
10222 	 */
10223 	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10224 	    (rssi == HCI_RSSI_INVALID ||
10225 	    (rssi < hdev->discovery.rssi &&
10226 	     !hci_test_quirk(hdev, HCI_QUIRK_STRICT_DUPLICATE_FILTER))))
10227 		return  false;
10228 
10229 	if (hdev->discovery.uuid_count != 0) {
10230 		/* If a list of UUIDs is provided in filter, results with no
10231 		 * matching UUID should be dropped.
10232 		 */
10233 		if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10234 				   hdev->discovery.uuids) &&
10235 		    !eir_has_uuids(scan_rsp, scan_rsp_len,
10236 				   hdev->discovery.uuid_count,
10237 				   hdev->discovery.uuids))
10238 			return false;
10239 	}
10240 
10241 	/* If duplicate filtering does not report RSSI changes, then restart
10242 	 * scanning to ensure updated result with updated RSSI values.
10243 	 */
10244 	if (hci_test_quirk(hdev, HCI_QUIRK_STRICT_DUPLICATE_FILTER)) {
10245 		/* Validate RSSI value against the RSSI threshold once more. */
10246 		if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10247 		    rssi < hdev->discovery.rssi)
10248 			return false;
10249 	}
10250 
10251 	return true;
10252 }
10253 
10254 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
10255 				  bdaddr_t *bdaddr, u8 addr_type)
10256 {
10257 	struct mgmt_ev_adv_monitor_device_lost ev;
10258 
10259 	ev.monitor_handle = cpu_to_le16(handle);
10260 	bacpy(&ev.addr.bdaddr, bdaddr);
10261 	ev.addr.type = addr_type;
10262 
10263 	mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
10264 		   NULL);
10265 }
10266 
10267 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
10268 					       struct sk_buff *skb,
10269 					       struct sock *skip_sk,
10270 					       u16 handle)
10271 {
10272 	struct sk_buff *advmon_skb;
10273 	size_t advmon_skb_len;
10274 	__le16 *monitor_handle;
10275 
10276 	if (!skb)
10277 		return;
10278 
10279 	advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
10280 			  sizeof(struct mgmt_ev_device_found)) + skb->len;
10281 	advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
10282 				    advmon_skb_len);
10283 	if (!advmon_skb)
10284 		return;
10285 
10286 	/* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10287 	 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10288 	 * store monitor_handle of the matched monitor.
10289 	 */
10290 	monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
10291 	*monitor_handle = cpu_to_le16(handle);
10292 	skb_put_data(advmon_skb, skb->data, skb->len);
10293 
10294 	mgmt_event_skb(advmon_skb, skip_sk);
10295 }
10296 
10297 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
10298 					  bdaddr_t *bdaddr, bool report_device,
10299 					  struct sk_buff *skb,
10300 					  struct sock *skip_sk)
10301 {
10302 	struct monitored_device *dev, *tmp;
10303 	bool matched = false;
10304 	bool notified = false;
10305 
10306 	/* We have received the Advertisement Report because:
10307 	 * 1. the kernel has initiated active discovery
10308 	 * 2. if not, we have pend_le_reports > 0 in which case we are doing
10309 	 *    passive scanning
10310 	 * 3. if none of the above is true, we have one or more active
10311 	 *    Advertisement Monitor
10312 	 *
10313 	 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10314 	 * and report ONLY one advertisement per device for the matched Monitor
10315 	 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10316 	 *
10317 	 * For case 3, since we are not active scanning and all advertisements
10318 	 * received are due to a matched Advertisement Monitor, report all
10319 	 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10320 	 */
10321 	if (report_device && !hdev->advmon_pend_notify) {
10322 		mgmt_event_skb(skb, skip_sk);
10323 		return;
10324 	}
10325 
10326 	hdev->advmon_pend_notify = false;
10327 
10328 	list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
10329 		if (!bacmp(&dev->bdaddr, bdaddr)) {
10330 			matched = true;
10331 
10332 			if (!dev->notified) {
10333 				mgmt_send_adv_monitor_device_found(hdev, skb,
10334 								   skip_sk,
10335 								   dev->handle);
10336 				notified = true;
10337 				dev->notified = true;
10338 			}
10339 		}
10340 
10341 		if (!dev->notified)
10342 			hdev->advmon_pend_notify = true;
10343 	}
10344 
10345 	if (!report_device &&
10346 	    ((matched && !notified) || !msft_monitor_supported(hdev))) {
10347 		/* Handle 0 indicates that we are not active scanning and this
10348 		 * is a subsequent advertisement report for an already matched
10349 		 * Advertisement Monitor or the controller offloading support
10350 		 * is not available.
10351 		 */
10352 		mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
10353 	}
10354 
10355 	if (report_device)
10356 		mgmt_event_skb(skb, skip_sk);
10357 	else
10358 		kfree_skb(skb);
10359 }
10360 
10361 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
10362 			      u8 addr_type, s8 rssi, u32 flags, u8 *eir,
10363 			      u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10364 			      u64 instant)
10365 {
10366 	struct sk_buff *skb;
10367 	struct mgmt_ev_mesh_device_found *ev;
10368 	int i, j;
10369 
10370 	if (!hdev->mesh_ad_types[0])
10371 		goto accepted;
10372 
10373 	/* Scan for requested AD types */
10374 	if (eir_len > 0) {
10375 		for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
10376 			for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10377 				if (!hdev->mesh_ad_types[j])
10378 					break;
10379 
10380 				if (hdev->mesh_ad_types[j] == eir[i + 1])
10381 					goto accepted;
10382 			}
10383 		}
10384 	}
10385 
10386 	if (scan_rsp_len > 0) {
10387 		for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
10388 			for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10389 				if (!hdev->mesh_ad_types[j])
10390 					break;
10391 
10392 				if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
10393 					goto accepted;
10394 			}
10395 		}
10396 	}
10397 
10398 	return;
10399 
10400 accepted:
10401 	skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
10402 			     sizeof(*ev) + eir_len + scan_rsp_len);
10403 	if (!skb)
10404 		return;
10405 
10406 	ev = skb_put(skb, sizeof(*ev));
10407 
10408 	bacpy(&ev->addr.bdaddr, bdaddr);
10409 	ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
10410 	ev->rssi = rssi;
10411 	ev->flags = cpu_to_le32(flags);
10412 	ev->instant = cpu_to_le64(instant);
10413 
10414 	if (eir_len > 0)
10415 		/* Copy EIR or advertising data into event */
10416 		skb_put_data(skb, eir, eir_len);
10417 
10418 	if (scan_rsp_len > 0)
10419 		/* Append scan response data to event */
10420 		skb_put_data(skb, scan_rsp, scan_rsp_len);
10421 
10422 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10423 
10424 	mgmt_event_skb(skb, NULL);
10425 }
10426 
10427 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10428 		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10429 		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10430 		       u64 instant)
10431 {
10432 	struct sk_buff *skb;
10433 	struct mgmt_ev_device_found *ev;
10434 	bool report_device = hci_discovery_active(hdev);
10435 
10436 	if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
10437 		mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
10438 				  eir, eir_len, scan_rsp, scan_rsp_len,
10439 				  instant);
10440 
10441 	/* Don't send events for a non-kernel initiated discovery. With
10442 	 * LE one exception is if we have pend_le_reports > 0 in which
10443 	 * case we're doing passive scanning and want these events.
10444 	 */
10445 	if (!hci_discovery_active(hdev)) {
10446 		if (link_type == ACL_LINK)
10447 			return;
10448 		if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
10449 			report_device = true;
10450 		else if (!hci_is_adv_monitoring(hdev))
10451 			return;
10452 	}
10453 
10454 	if (hdev->discovery.result_filtering) {
10455 		/* We are using service discovery */
10456 		if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10457 				     scan_rsp_len))
10458 			return;
10459 	}
10460 
10461 	if (hdev->discovery.limited) {
10462 		/* Check for limited discoverable bit */
10463 		if (dev_class) {
10464 			if (!(dev_class[1] & 0x20))
10465 				return;
10466 		} else {
10467 			u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10468 			if (!flags || !(flags[0] & LE_AD_LIMITED))
10469 				return;
10470 		}
10471 	}
10472 
10473 	/* Allocate skb. The 5 extra bytes are for the potential CoD field */
10474 	skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10475 			     sizeof(*ev) + eir_len + scan_rsp_len + 5);
10476 	if (!skb)
10477 		return;
10478 
10479 	ev = skb_put(skb, sizeof(*ev));
10480 
10481 	/* In case of device discovery with BR/EDR devices (pre 1.2), the
10482 	 * RSSI value was reported as 0 when not available. This behavior
10483 	 * is kept when using device discovery. This is required for full
10484 	 * backwards compatibility with the API.
10485 	 *
10486 	 * However when using service discovery, the value 127 will be
10487 	 * returned when the RSSI is not available.
10488 	 */
10489 	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10490 	    link_type == ACL_LINK)
10491 		rssi = 0;
10492 
10493 	bacpy(&ev->addr.bdaddr, bdaddr);
10494 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
10495 	ev->rssi = rssi;
10496 	ev->flags = cpu_to_le32(flags);
10497 
10498 	if (eir_len > 0)
10499 		/* Copy EIR or advertising data into event */
10500 		skb_put_data(skb, eir, eir_len);
10501 
10502 	if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
10503 		u8 eir_cod[5];
10504 
10505 		eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
10506 					   dev_class, 3);
10507 		skb_put_data(skb, eir_cod, sizeof(eir_cod));
10508 	}
10509 
10510 	if (scan_rsp_len > 0)
10511 		/* Append scan response data to event */
10512 		skb_put_data(skb, scan_rsp, scan_rsp_len);
10513 
10514 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10515 
10516 	mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
10517 }
10518 
10519 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10520 		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10521 {
10522 	struct sk_buff *skb;
10523 	struct mgmt_ev_device_found *ev;
10524 	u16 eir_len = 0;
10525 	u32 flags = 0;
10526 
10527 	skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10528 			     sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
10529 	if (!skb)
10530 		return;
10531 
10532 	ev = skb_put(skb, sizeof(*ev));
10533 	bacpy(&ev->addr.bdaddr, bdaddr);
10534 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
10535 	ev->rssi = rssi;
10536 
10537 	if (name)
10538 		eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
10539 	else
10540 		flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
10541 
10542 	ev->eir_len = cpu_to_le16(eir_len);
10543 	ev->flags = cpu_to_le32(flags);
10544 
10545 	mgmt_event_skb(skb, NULL);
10546 }
10547 
10548 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10549 {
10550 	struct mgmt_ev_discovering ev;
10551 
10552 	bt_dev_dbg(hdev, "discovering %u", discovering);
10553 
10554 	memset(&ev, 0, sizeof(ev));
10555 	ev.type = hdev->discovery.type;
10556 	ev.discovering = discovering;
10557 
10558 	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10559 }
10560 
10561 void mgmt_suspending(struct hci_dev *hdev, u8 state)
10562 {
10563 	struct mgmt_ev_controller_suspend ev;
10564 
10565 	ev.suspend_state = state;
10566 	mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10567 }
10568 
10569 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10570 		   u8 addr_type)
10571 {
10572 	struct mgmt_ev_controller_resume ev;
10573 
10574 	ev.wake_reason = reason;
10575 	if (bdaddr) {
10576 		bacpy(&ev.addr.bdaddr, bdaddr);
10577 		ev.addr.type = addr_type;
10578 	} else {
10579 		memset(&ev.addr, 0, sizeof(ev.addr));
10580 	}
10581 
10582 	mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
10583 }
10584 
10585 static struct hci_mgmt_chan chan = {
10586 	.channel	= HCI_CHANNEL_CONTROL,
10587 	.handler_count	= ARRAY_SIZE(mgmt_handlers),
10588 	.handlers	= mgmt_handlers,
10589 	.hdev_init	= mgmt_init_hdev,
10590 };
10591 
10592 int mgmt_init(void)
10593 {
10594 	return hci_mgmt_chan_register(&chan);
10595 }
10596 
10597 void mgmt_exit(void)
10598 {
10599 	hci_mgmt_chan_unregister(&chan);
10600 }
10601 
10602 void mgmt_cleanup(struct sock *sk)
10603 {
10604 	struct mgmt_mesh_tx *mesh_tx;
10605 	struct hci_dev *hdev;
10606 
10607 	read_lock(&hci_dev_list_lock);
10608 
10609 	list_for_each_entry(hdev, &hci_dev_list, list) {
10610 		do {
10611 			mesh_tx = mgmt_mesh_next(hdev, sk);
10612 
10613 			if (mesh_tx)
10614 				mesh_send_complete(hdev, mesh_tx, true);
10615 		} while (mesh_tx);
10616 	}
10617 
10618 	read_unlock(&hci_dev_list_lock);
10619 }
10620