xref: /linux/net/bluetooth/mgmt.c (revision 7bb377107c72a40ab7505341f8626c8eb79a0cb7)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2010  Nokia Corporation
5    Copyright (C) 2011-2012 Intel Corporation
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI Management interface */
26 
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35 
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39 
40 #define MGMT_VERSION	1
41 #define MGMT_REVISION	17
42 
43 static const u16 mgmt_commands[] = {
44 	MGMT_OP_READ_INDEX_LIST,
45 	MGMT_OP_READ_INFO,
46 	MGMT_OP_SET_POWERED,
47 	MGMT_OP_SET_DISCOVERABLE,
48 	MGMT_OP_SET_CONNECTABLE,
49 	MGMT_OP_SET_FAST_CONNECTABLE,
50 	MGMT_OP_SET_BONDABLE,
51 	MGMT_OP_SET_LINK_SECURITY,
52 	MGMT_OP_SET_SSP,
53 	MGMT_OP_SET_HS,
54 	MGMT_OP_SET_LE,
55 	MGMT_OP_SET_DEV_CLASS,
56 	MGMT_OP_SET_LOCAL_NAME,
57 	MGMT_OP_ADD_UUID,
58 	MGMT_OP_REMOVE_UUID,
59 	MGMT_OP_LOAD_LINK_KEYS,
60 	MGMT_OP_LOAD_LONG_TERM_KEYS,
61 	MGMT_OP_DISCONNECT,
62 	MGMT_OP_GET_CONNECTIONS,
63 	MGMT_OP_PIN_CODE_REPLY,
64 	MGMT_OP_PIN_CODE_NEG_REPLY,
65 	MGMT_OP_SET_IO_CAPABILITY,
66 	MGMT_OP_PAIR_DEVICE,
67 	MGMT_OP_CANCEL_PAIR_DEVICE,
68 	MGMT_OP_UNPAIR_DEVICE,
69 	MGMT_OP_USER_CONFIRM_REPLY,
70 	MGMT_OP_USER_CONFIRM_NEG_REPLY,
71 	MGMT_OP_USER_PASSKEY_REPLY,
72 	MGMT_OP_USER_PASSKEY_NEG_REPLY,
73 	MGMT_OP_READ_LOCAL_OOB_DATA,
74 	MGMT_OP_ADD_REMOTE_OOB_DATA,
75 	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
76 	MGMT_OP_START_DISCOVERY,
77 	MGMT_OP_STOP_DISCOVERY,
78 	MGMT_OP_CONFIRM_NAME,
79 	MGMT_OP_BLOCK_DEVICE,
80 	MGMT_OP_UNBLOCK_DEVICE,
81 	MGMT_OP_SET_DEVICE_ID,
82 	MGMT_OP_SET_ADVERTISING,
83 	MGMT_OP_SET_BREDR,
84 	MGMT_OP_SET_STATIC_ADDRESS,
85 	MGMT_OP_SET_SCAN_PARAMS,
86 	MGMT_OP_SET_SECURE_CONN,
87 	MGMT_OP_SET_DEBUG_KEYS,
88 	MGMT_OP_SET_PRIVACY,
89 	MGMT_OP_LOAD_IRKS,
90 	MGMT_OP_GET_CONN_INFO,
91 	MGMT_OP_GET_CLOCK_INFO,
92 	MGMT_OP_ADD_DEVICE,
93 	MGMT_OP_REMOVE_DEVICE,
94 	MGMT_OP_LOAD_CONN_PARAM,
95 	MGMT_OP_READ_UNCONF_INDEX_LIST,
96 	MGMT_OP_READ_CONFIG_INFO,
97 	MGMT_OP_SET_EXTERNAL_CONFIG,
98 	MGMT_OP_SET_PUBLIC_ADDRESS,
99 	MGMT_OP_START_SERVICE_DISCOVERY,
100 	MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
101 	MGMT_OP_READ_EXT_INDEX_LIST,
102 	MGMT_OP_READ_ADV_FEATURES,
103 	MGMT_OP_ADD_ADVERTISING,
104 	MGMT_OP_REMOVE_ADVERTISING,
105 	MGMT_OP_GET_ADV_SIZE_INFO,
106 	MGMT_OP_START_LIMITED_DISCOVERY,
107 	MGMT_OP_READ_EXT_INFO,
108 	MGMT_OP_SET_APPEARANCE,
109 	MGMT_OP_SET_BLOCKED_KEYS,
110 	MGMT_OP_SET_WIDEBAND_SPEECH,
111 	MGMT_OP_READ_SECURITY_INFO,
112 };
113 
114 static const u16 mgmt_events[] = {
115 	MGMT_EV_CONTROLLER_ERROR,
116 	MGMT_EV_INDEX_ADDED,
117 	MGMT_EV_INDEX_REMOVED,
118 	MGMT_EV_NEW_SETTINGS,
119 	MGMT_EV_CLASS_OF_DEV_CHANGED,
120 	MGMT_EV_LOCAL_NAME_CHANGED,
121 	MGMT_EV_NEW_LINK_KEY,
122 	MGMT_EV_NEW_LONG_TERM_KEY,
123 	MGMT_EV_DEVICE_CONNECTED,
124 	MGMT_EV_DEVICE_DISCONNECTED,
125 	MGMT_EV_CONNECT_FAILED,
126 	MGMT_EV_PIN_CODE_REQUEST,
127 	MGMT_EV_USER_CONFIRM_REQUEST,
128 	MGMT_EV_USER_PASSKEY_REQUEST,
129 	MGMT_EV_AUTH_FAILED,
130 	MGMT_EV_DEVICE_FOUND,
131 	MGMT_EV_DISCOVERING,
132 	MGMT_EV_DEVICE_BLOCKED,
133 	MGMT_EV_DEVICE_UNBLOCKED,
134 	MGMT_EV_DEVICE_UNPAIRED,
135 	MGMT_EV_PASSKEY_NOTIFY,
136 	MGMT_EV_NEW_IRK,
137 	MGMT_EV_NEW_CSRK,
138 	MGMT_EV_DEVICE_ADDED,
139 	MGMT_EV_DEVICE_REMOVED,
140 	MGMT_EV_NEW_CONN_PARAM,
141 	MGMT_EV_UNCONF_INDEX_ADDED,
142 	MGMT_EV_UNCONF_INDEX_REMOVED,
143 	MGMT_EV_NEW_CONFIG_OPTIONS,
144 	MGMT_EV_EXT_INDEX_ADDED,
145 	MGMT_EV_EXT_INDEX_REMOVED,
146 	MGMT_EV_LOCAL_OOB_DATA_UPDATED,
147 	MGMT_EV_ADVERTISING_ADDED,
148 	MGMT_EV_ADVERTISING_REMOVED,
149 	MGMT_EV_EXT_INFO_CHANGED,
150 };
151 
152 static const u16 mgmt_untrusted_commands[] = {
153 	MGMT_OP_READ_INDEX_LIST,
154 	MGMT_OP_READ_INFO,
155 	MGMT_OP_READ_UNCONF_INDEX_LIST,
156 	MGMT_OP_READ_CONFIG_INFO,
157 	MGMT_OP_READ_EXT_INDEX_LIST,
158 	MGMT_OP_READ_EXT_INFO,
159 	MGMT_OP_READ_SECURITY_INFO,
160 };
161 
162 static const u16 mgmt_untrusted_events[] = {
163 	MGMT_EV_INDEX_ADDED,
164 	MGMT_EV_INDEX_REMOVED,
165 	MGMT_EV_NEW_SETTINGS,
166 	MGMT_EV_CLASS_OF_DEV_CHANGED,
167 	MGMT_EV_LOCAL_NAME_CHANGED,
168 	MGMT_EV_UNCONF_INDEX_ADDED,
169 	MGMT_EV_UNCONF_INDEX_REMOVED,
170 	MGMT_EV_NEW_CONFIG_OPTIONS,
171 	MGMT_EV_EXT_INDEX_ADDED,
172 	MGMT_EV_EXT_INDEX_REMOVED,
173 	MGMT_EV_EXT_INFO_CHANGED,
174 };
175 
176 #define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
177 
178 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
179 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
180 
181 /* HCI to MGMT error code conversion table */
182 static const u8 mgmt_status_table[] = {
183 	MGMT_STATUS_SUCCESS,
184 	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
185 	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
186 	MGMT_STATUS_FAILED,		/* Hardware Failure */
187 	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
188 	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
189 	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
190 	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
191 	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
192 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
193 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
194 	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
195 	MGMT_STATUS_BUSY,		/* Command Disallowed */
196 	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
197 	MGMT_STATUS_REJECTED,		/* Rejected Security */
198 	MGMT_STATUS_REJECTED,		/* Rejected Personal */
199 	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
200 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
201 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
202 	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
203 	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
204 	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
205 	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
206 	MGMT_STATUS_BUSY,		/* Repeated Attempts */
207 	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
208 	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
209 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
210 	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
211 	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
212 	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
213 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
214 	MGMT_STATUS_FAILED,		/* Unspecified Error */
215 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
216 	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
217 	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
218 	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
219 	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
220 	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
221 	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
222 	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
223 	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
224 	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
225 	MGMT_STATUS_FAILED,		/* Transaction Collision */
226 	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
227 	MGMT_STATUS_REJECTED,		/* QoS Rejected */
228 	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
229 	MGMT_STATUS_REJECTED,		/* Insufficient Security */
230 	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
231 	MGMT_STATUS_BUSY,		/* Role Switch Pending */
232 	MGMT_STATUS_FAILED,		/* Slot Violation */
233 	MGMT_STATUS_FAILED,		/* Role Switch Failed */
234 	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
235 	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
236 	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
237 	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
238 	MGMT_STATUS_BUSY,		/* Controller Busy */
239 	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
240 	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
241 	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
242 	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
243 	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
244 };
245 
246 static u8 mgmt_status(u8 hci_status)
247 {
248 	if (hci_status < ARRAY_SIZE(mgmt_status_table))
249 		return mgmt_status_table[hci_status];
250 
251 	return MGMT_STATUS_FAILED;
252 }
253 
254 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
255 			    u16 len, int flag)
256 {
257 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
258 			       flag, NULL);
259 }
260 
261 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
262 			      u16 len, int flag, struct sock *skip_sk)
263 {
264 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
265 			       flag, skip_sk);
266 }
267 
268 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
269 		      struct sock *skip_sk)
270 {
271 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
272 			       HCI_SOCK_TRUSTED, skip_sk);
273 }
274 
275 static u8 le_addr_type(u8 mgmt_addr_type)
276 {
277 	if (mgmt_addr_type == BDADDR_LE_PUBLIC)
278 		return ADDR_LE_DEV_PUBLIC;
279 	else
280 		return ADDR_LE_DEV_RANDOM;
281 }
282 
283 void mgmt_fill_version_info(void *ver)
284 {
285 	struct mgmt_rp_read_version *rp = ver;
286 
287 	rp->version = MGMT_VERSION;
288 	rp->revision = cpu_to_le16(MGMT_REVISION);
289 }
290 
291 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
292 			u16 data_len)
293 {
294 	struct mgmt_rp_read_version rp;
295 
296 	BT_DBG("sock %p", sk);
297 
298 	mgmt_fill_version_info(&rp);
299 
300 	return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
301 				 &rp, sizeof(rp));
302 }
303 
304 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
305 			 u16 data_len)
306 {
307 	struct mgmt_rp_read_commands *rp;
308 	u16 num_commands, num_events;
309 	size_t rp_size;
310 	int i, err;
311 
312 	BT_DBG("sock %p", sk);
313 
314 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
315 		num_commands = ARRAY_SIZE(mgmt_commands);
316 		num_events = ARRAY_SIZE(mgmt_events);
317 	} else {
318 		num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
319 		num_events = ARRAY_SIZE(mgmt_untrusted_events);
320 	}
321 
322 	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
323 
324 	rp = kmalloc(rp_size, GFP_KERNEL);
325 	if (!rp)
326 		return -ENOMEM;
327 
328 	rp->num_commands = cpu_to_le16(num_commands);
329 	rp->num_events = cpu_to_le16(num_events);
330 
331 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
332 		__le16 *opcode = rp->opcodes;
333 
334 		for (i = 0; i < num_commands; i++, opcode++)
335 			put_unaligned_le16(mgmt_commands[i], opcode);
336 
337 		for (i = 0; i < num_events; i++, opcode++)
338 			put_unaligned_le16(mgmt_events[i], opcode);
339 	} else {
340 		__le16 *opcode = rp->opcodes;
341 
342 		for (i = 0; i < num_commands; i++, opcode++)
343 			put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
344 
345 		for (i = 0; i < num_events; i++, opcode++)
346 			put_unaligned_le16(mgmt_untrusted_events[i], opcode);
347 	}
348 
349 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
350 				rp, rp_size);
351 	kfree(rp);
352 
353 	return err;
354 }
355 
356 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
357 			   u16 data_len)
358 {
359 	struct mgmt_rp_read_index_list *rp;
360 	struct hci_dev *d;
361 	size_t rp_len;
362 	u16 count;
363 	int err;
364 
365 	BT_DBG("sock %p", sk);
366 
367 	read_lock(&hci_dev_list_lock);
368 
369 	count = 0;
370 	list_for_each_entry(d, &hci_dev_list, list) {
371 		if (d->dev_type == HCI_PRIMARY &&
372 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED))
373 			count++;
374 	}
375 
376 	rp_len = sizeof(*rp) + (2 * count);
377 	rp = kmalloc(rp_len, GFP_ATOMIC);
378 	if (!rp) {
379 		read_unlock(&hci_dev_list_lock);
380 		return -ENOMEM;
381 	}
382 
383 	count = 0;
384 	list_for_each_entry(d, &hci_dev_list, list) {
385 		if (hci_dev_test_flag(d, HCI_SETUP) ||
386 		    hci_dev_test_flag(d, HCI_CONFIG) ||
387 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
388 			continue;
389 
390 		/* Devices marked as raw-only are neither configured
391 		 * nor unconfigured controllers.
392 		 */
393 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
394 			continue;
395 
396 		if (d->dev_type == HCI_PRIMARY &&
397 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
398 			rp->index[count++] = cpu_to_le16(d->id);
399 			BT_DBG("Added hci%u", d->id);
400 		}
401 	}
402 
403 	rp->num_controllers = cpu_to_le16(count);
404 	rp_len = sizeof(*rp) + (2 * count);
405 
406 	read_unlock(&hci_dev_list_lock);
407 
408 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
409 				0, rp, rp_len);
410 
411 	kfree(rp);
412 
413 	return err;
414 }
415 
416 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
417 				  void *data, u16 data_len)
418 {
419 	struct mgmt_rp_read_unconf_index_list *rp;
420 	struct hci_dev *d;
421 	size_t rp_len;
422 	u16 count;
423 	int err;
424 
425 	BT_DBG("sock %p", sk);
426 
427 	read_lock(&hci_dev_list_lock);
428 
429 	count = 0;
430 	list_for_each_entry(d, &hci_dev_list, list) {
431 		if (d->dev_type == HCI_PRIMARY &&
432 		    hci_dev_test_flag(d, HCI_UNCONFIGURED))
433 			count++;
434 	}
435 
436 	rp_len = sizeof(*rp) + (2 * count);
437 	rp = kmalloc(rp_len, GFP_ATOMIC);
438 	if (!rp) {
439 		read_unlock(&hci_dev_list_lock);
440 		return -ENOMEM;
441 	}
442 
443 	count = 0;
444 	list_for_each_entry(d, &hci_dev_list, list) {
445 		if (hci_dev_test_flag(d, HCI_SETUP) ||
446 		    hci_dev_test_flag(d, HCI_CONFIG) ||
447 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
448 			continue;
449 
450 		/* Devices marked as raw-only are neither configured
451 		 * nor unconfigured controllers.
452 		 */
453 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
454 			continue;
455 
456 		if (d->dev_type == HCI_PRIMARY &&
457 		    hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
458 			rp->index[count++] = cpu_to_le16(d->id);
459 			BT_DBG("Added hci%u", d->id);
460 		}
461 	}
462 
463 	rp->num_controllers = cpu_to_le16(count);
464 	rp_len = sizeof(*rp) + (2 * count);
465 
466 	read_unlock(&hci_dev_list_lock);
467 
468 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
469 				MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
470 
471 	kfree(rp);
472 
473 	return err;
474 }
475 
476 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
477 			       void *data, u16 data_len)
478 {
479 	struct mgmt_rp_read_ext_index_list *rp;
480 	struct hci_dev *d;
481 	u16 count;
482 	int err;
483 
484 	BT_DBG("sock %p", sk);
485 
486 	read_lock(&hci_dev_list_lock);
487 
488 	count = 0;
489 	list_for_each_entry(d, &hci_dev_list, list) {
490 		if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
491 			count++;
492 	}
493 
494 	rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
495 	if (!rp) {
496 		read_unlock(&hci_dev_list_lock);
497 		return -ENOMEM;
498 	}
499 
500 	count = 0;
501 	list_for_each_entry(d, &hci_dev_list, list) {
502 		if (hci_dev_test_flag(d, HCI_SETUP) ||
503 		    hci_dev_test_flag(d, HCI_CONFIG) ||
504 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
505 			continue;
506 
507 		/* Devices marked as raw-only are neither configured
508 		 * nor unconfigured controllers.
509 		 */
510 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
511 			continue;
512 
513 		if (d->dev_type == HCI_PRIMARY) {
514 			if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
515 				rp->entry[count].type = 0x01;
516 			else
517 				rp->entry[count].type = 0x00;
518 		} else if (d->dev_type == HCI_AMP) {
519 			rp->entry[count].type = 0x02;
520 		} else {
521 			continue;
522 		}
523 
524 		rp->entry[count].bus = d->bus;
525 		rp->entry[count++].index = cpu_to_le16(d->id);
526 		BT_DBG("Added hci%u", d->id);
527 	}
528 
529 	rp->num_controllers = cpu_to_le16(count);
530 
531 	read_unlock(&hci_dev_list_lock);
532 
533 	/* If this command is called at least once, then all the
534 	 * default index and unconfigured index events are disabled
535 	 * and from now on only extended index events are used.
536 	 */
537 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
538 	hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
539 	hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
540 
541 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
542 				MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
543 				struct_size(rp, entry, count));
544 
545 	kfree(rp);
546 
547 	return err;
548 }
549 
550 static bool is_configured(struct hci_dev *hdev)
551 {
552 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
553 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
554 		return false;
555 
556 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
557 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
558 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
559 		return false;
560 
561 	return true;
562 }
563 
564 static __le32 get_missing_options(struct hci_dev *hdev)
565 {
566 	u32 options = 0;
567 
568 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
569 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
570 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
571 
572 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
573 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
574 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
575 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
576 
577 	return cpu_to_le32(options);
578 }
579 
580 static int new_options(struct hci_dev *hdev, struct sock *skip)
581 {
582 	__le32 options = get_missing_options(hdev);
583 
584 	return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
585 				  sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
586 }
587 
588 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
589 {
590 	__le32 options = get_missing_options(hdev);
591 
592 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
593 				 sizeof(options));
594 }
595 
596 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
597 			    void *data, u16 data_len)
598 {
599 	struct mgmt_rp_read_config_info rp;
600 	u32 options = 0;
601 
602 	BT_DBG("sock %p %s", sk, hdev->name);
603 
604 	hci_dev_lock(hdev);
605 
606 	memset(&rp, 0, sizeof(rp));
607 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
608 
609 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
610 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
611 
612 	if (hdev->set_bdaddr)
613 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
614 
615 	rp.supported_options = cpu_to_le32(options);
616 	rp.missing_options = get_missing_options(hdev);
617 
618 	hci_dev_unlock(hdev);
619 
620 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
621 				 &rp, sizeof(rp));
622 }
623 
624 static u32 get_supported_phys(struct hci_dev *hdev)
625 {
626 	u32 supported_phys = 0;
627 
628 	if (lmp_bredr_capable(hdev)) {
629 		supported_phys |= MGMT_PHY_BR_1M_1SLOT;
630 
631 		if (hdev->features[0][0] & LMP_3SLOT)
632 			supported_phys |= MGMT_PHY_BR_1M_3SLOT;
633 
634 		if (hdev->features[0][0] & LMP_5SLOT)
635 			supported_phys |= MGMT_PHY_BR_1M_5SLOT;
636 
637 		if (lmp_edr_2m_capable(hdev)) {
638 			supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
639 
640 			if (lmp_edr_3slot_capable(hdev))
641 				supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
642 
643 			if (lmp_edr_5slot_capable(hdev))
644 				supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
645 
646 			if (lmp_edr_3m_capable(hdev)) {
647 				supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
648 
649 				if (lmp_edr_3slot_capable(hdev))
650 					supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
651 
652 				if (lmp_edr_5slot_capable(hdev))
653 					supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
654 			}
655 		}
656 	}
657 
658 	if (lmp_le_capable(hdev)) {
659 		supported_phys |= MGMT_PHY_LE_1M_TX;
660 		supported_phys |= MGMT_PHY_LE_1M_RX;
661 
662 		if (hdev->le_features[1] & HCI_LE_PHY_2M) {
663 			supported_phys |= MGMT_PHY_LE_2M_TX;
664 			supported_phys |= MGMT_PHY_LE_2M_RX;
665 		}
666 
667 		if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
668 			supported_phys |= MGMT_PHY_LE_CODED_TX;
669 			supported_phys |= MGMT_PHY_LE_CODED_RX;
670 		}
671 	}
672 
673 	return supported_phys;
674 }
675 
676 static u32 get_selected_phys(struct hci_dev *hdev)
677 {
678 	u32 selected_phys = 0;
679 
680 	if (lmp_bredr_capable(hdev)) {
681 		selected_phys |= MGMT_PHY_BR_1M_1SLOT;
682 
683 		if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
684 			selected_phys |= MGMT_PHY_BR_1M_3SLOT;
685 
686 		if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
687 			selected_phys |= MGMT_PHY_BR_1M_5SLOT;
688 
689 		if (lmp_edr_2m_capable(hdev)) {
690 			if (!(hdev->pkt_type & HCI_2DH1))
691 				selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
692 
693 			if (lmp_edr_3slot_capable(hdev) &&
694 			    !(hdev->pkt_type & HCI_2DH3))
695 				selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
696 
697 			if (lmp_edr_5slot_capable(hdev) &&
698 			    !(hdev->pkt_type & HCI_2DH5))
699 				selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
700 
701 			if (lmp_edr_3m_capable(hdev)) {
702 				if (!(hdev->pkt_type & HCI_3DH1))
703 					selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
704 
705 				if (lmp_edr_3slot_capable(hdev) &&
706 				    !(hdev->pkt_type & HCI_3DH3))
707 					selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
708 
709 				if (lmp_edr_5slot_capable(hdev) &&
710 				    !(hdev->pkt_type & HCI_3DH5))
711 					selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
712 			}
713 		}
714 	}
715 
716 	if (lmp_le_capable(hdev)) {
717 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
718 			selected_phys |= MGMT_PHY_LE_1M_TX;
719 
720 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
721 			selected_phys |= MGMT_PHY_LE_1M_RX;
722 
723 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
724 			selected_phys |= MGMT_PHY_LE_2M_TX;
725 
726 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
727 			selected_phys |= MGMT_PHY_LE_2M_RX;
728 
729 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
730 			selected_phys |= MGMT_PHY_LE_CODED_TX;
731 
732 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
733 			selected_phys |= MGMT_PHY_LE_CODED_RX;
734 	}
735 
736 	return selected_phys;
737 }
738 
739 static u32 get_configurable_phys(struct hci_dev *hdev)
740 {
741 	return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
742 		~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
743 }
744 
745 static u32 get_supported_settings(struct hci_dev *hdev)
746 {
747 	u32 settings = 0;
748 
749 	settings |= MGMT_SETTING_POWERED;
750 	settings |= MGMT_SETTING_BONDABLE;
751 	settings |= MGMT_SETTING_DEBUG_KEYS;
752 	settings |= MGMT_SETTING_CONNECTABLE;
753 	settings |= MGMT_SETTING_DISCOVERABLE;
754 
755 	if (lmp_bredr_capable(hdev)) {
756 		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
757 			settings |= MGMT_SETTING_FAST_CONNECTABLE;
758 		settings |= MGMT_SETTING_BREDR;
759 		settings |= MGMT_SETTING_LINK_SECURITY;
760 
761 		if (lmp_ssp_capable(hdev)) {
762 			settings |= MGMT_SETTING_SSP;
763 			settings |= MGMT_SETTING_HS;
764 		}
765 
766 		if (lmp_sc_capable(hdev))
767 			settings |= MGMT_SETTING_SECURE_CONN;
768 
769 		if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
770 			     &hdev->quirks))
771 			settings |= MGMT_SETTING_WIDEBAND_SPEECH;
772 	}
773 
774 	if (lmp_le_capable(hdev)) {
775 		settings |= MGMT_SETTING_LE;
776 		settings |= MGMT_SETTING_ADVERTISING;
777 		settings |= MGMT_SETTING_SECURE_CONN;
778 		settings |= MGMT_SETTING_PRIVACY;
779 		settings |= MGMT_SETTING_STATIC_ADDRESS;
780 	}
781 
782 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
783 	    hdev->set_bdaddr)
784 		settings |= MGMT_SETTING_CONFIGURATION;
785 
786 	settings |= MGMT_SETTING_PHY_CONFIGURATION;
787 
788 	return settings;
789 }
790 
791 static u32 get_current_settings(struct hci_dev *hdev)
792 {
793 	u32 settings = 0;
794 
795 	if (hdev_is_powered(hdev))
796 		settings |= MGMT_SETTING_POWERED;
797 
798 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
799 		settings |= MGMT_SETTING_CONNECTABLE;
800 
801 	if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
802 		settings |= MGMT_SETTING_FAST_CONNECTABLE;
803 
804 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
805 		settings |= MGMT_SETTING_DISCOVERABLE;
806 
807 	if (hci_dev_test_flag(hdev, HCI_BONDABLE))
808 		settings |= MGMT_SETTING_BONDABLE;
809 
810 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
811 		settings |= MGMT_SETTING_BREDR;
812 
813 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
814 		settings |= MGMT_SETTING_LE;
815 
816 	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
817 		settings |= MGMT_SETTING_LINK_SECURITY;
818 
819 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
820 		settings |= MGMT_SETTING_SSP;
821 
822 	if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
823 		settings |= MGMT_SETTING_HS;
824 
825 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
826 		settings |= MGMT_SETTING_ADVERTISING;
827 
828 	if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
829 		settings |= MGMT_SETTING_SECURE_CONN;
830 
831 	if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
832 		settings |= MGMT_SETTING_DEBUG_KEYS;
833 
834 	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
835 		settings |= MGMT_SETTING_PRIVACY;
836 
837 	/* The current setting for static address has two purposes. The
838 	 * first is to indicate if the static address will be used and
839 	 * the second is to indicate if it is actually set.
840 	 *
841 	 * This means if the static address is not configured, this flag
842 	 * will never be set. If the address is configured, then if the
843 	 * address is actually used decides if the flag is set or not.
844 	 *
845 	 * For single mode LE only controllers and dual-mode controllers
846 	 * with BR/EDR disabled, the existence of the static address will
847 	 * be evaluated.
848 	 */
849 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
850 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
851 	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
852 		if (bacmp(&hdev->static_addr, BDADDR_ANY))
853 			settings |= MGMT_SETTING_STATIC_ADDRESS;
854 	}
855 
856 	if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
857 		settings |= MGMT_SETTING_WIDEBAND_SPEECH;
858 
859 	return settings;
860 }
861 
862 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
863 {
864 	return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
865 }
866 
867 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
868 						  struct hci_dev *hdev,
869 						  const void *data)
870 {
871 	return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
872 }
873 
874 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
875 {
876 	struct mgmt_pending_cmd *cmd;
877 
878 	/* If there's a pending mgmt command the flags will not yet have
879 	 * their final values, so check for this first.
880 	 */
881 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
882 	if (cmd) {
883 		struct mgmt_mode *cp = cmd->param;
884 		if (cp->val == 0x01)
885 			return LE_AD_GENERAL;
886 		else if (cp->val == 0x02)
887 			return LE_AD_LIMITED;
888 	} else {
889 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
890 			return LE_AD_LIMITED;
891 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
892 			return LE_AD_GENERAL;
893 	}
894 
895 	return 0;
896 }
897 
898 bool mgmt_get_connectable(struct hci_dev *hdev)
899 {
900 	struct mgmt_pending_cmd *cmd;
901 
902 	/* If there's a pending mgmt command the flag will not yet have
903 	 * it's final value, so check for this first.
904 	 */
905 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
906 	if (cmd) {
907 		struct mgmt_mode *cp = cmd->param;
908 
909 		return cp->val;
910 	}
911 
912 	return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
913 }
914 
915 static void service_cache_off(struct work_struct *work)
916 {
917 	struct hci_dev *hdev = container_of(work, struct hci_dev,
918 					    service_cache.work);
919 	struct hci_request req;
920 
921 	if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
922 		return;
923 
924 	hci_req_init(&req, hdev);
925 
926 	hci_dev_lock(hdev);
927 
928 	__hci_req_update_eir(&req);
929 	__hci_req_update_class(&req);
930 
931 	hci_dev_unlock(hdev);
932 
933 	hci_req_run(&req, NULL);
934 }
935 
936 static void rpa_expired(struct work_struct *work)
937 {
938 	struct hci_dev *hdev = container_of(work, struct hci_dev,
939 					    rpa_expired.work);
940 	struct hci_request req;
941 
942 	BT_DBG("");
943 
944 	hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
945 
946 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
947 		return;
948 
949 	/* The generation of a new RPA and programming it into the
950 	 * controller happens in the hci_req_enable_advertising()
951 	 * function.
952 	 */
953 	hci_req_init(&req, hdev);
954 	if (ext_adv_capable(hdev))
955 		__hci_req_start_ext_adv(&req, hdev->cur_adv_instance);
956 	else
957 		__hci_req_enable_advertising(&req);
958 	hci_req_run(&req, NULL);
959 }
960 
961 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
962 {
963 	if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
964 		return;
965 
966 	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
967 	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
968 
969 	/* Non-mgmt controlled devices get this bit set
970 	 * implicitly so that pairing works for them, however
971 	 * for mgmt we require user-space to explicitly enable
972 	 * it
973 	 */
974 	hci_dev_clear_flag(hdev, HCI_BONDABLE);
975 }
976 
977 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
978 				void *data, u16 data_len)
979 {
980 	struct mgmt_rp_read_info rp;
981 
982 	BT_DBG("sock %p %s", sk, hdev->name);
983 
984 	hci_dev_lock(hdev);
985 
986 	memset(&rp, 0, sizeof(rp));
987 
988 	bacpy(&rp.bdaddr, &hdev->bdaddr);
989 
990 	rp.version = hdev->hci_ver;
991 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
992 
993 	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
994 	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
995 
996 	memcpy(rp.dev_class, hdev->dev_class, 3);
997 
998 	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
999 	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1000 
1001 	hci_dev_unlock(hdev);
1002 
1003 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1004 				 sizeof(rp));
1005 }
1006 
1007 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1008 {
1009 	u16 eir_len = 0;
1010 	size_t name_len;
1011 
1012 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1013 		eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1014 					  hdev->dev_class, 3);
1015 
1016 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1017 		eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1018 					  hdev->appearance);
1019 
1020 	name_len = strlen(hdev->dev_name);
1021 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1022 				  hdev->dev_name, name_len);
1023 
1024 	name_len = strlen(hdev->short_name);
1025 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1026 				  hdev->short_name, name_len);
1027 
1028 	return eir_len;
1029 }
1030 
1031 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1032 				    void *data, u16 data_len)
1033 {
1034 	char buf[512];
1035 	struct mgmt_rp_read_ext_info *rp = (void *)buf;
1036 	u16 eir_len;
1037 
1038 	BT_DBG("sock %p %s", sk, hdev->name);
1039 
1040 	memset(&buf, 0, sizeof(buf));
1041 
1042 	hci_dev_lock(hdev);
1043 
1044 	bacpy(&rp->bdaddr, &hdev->bdaddr);
1045 
1046 	rp->version = hdev->hci_ver;
1047 	rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1048 
1049 	rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1050 	rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1051 
1052 
1053 	eir_len = append_eir_data_to_buf(hdev, rp->eir);
1054 	rp->eir_len = cpu_to_le16(eir_len);
1055 
1056 	hci_dev_unlock(hdev);
1057 
1058 	/* If this command is called at least once, then the events
1059 	 * for class of device and local name changes are disabled
1060 	 * and only the new extended controller information event
1061 	 * is used.
1062 	 */
1063 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1064 	hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1065 	hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1066 
1067 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1068 				 sizeof(*rp) + eir_len);
1069 }
1070 
1071 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1072 {
1073 	char buf[512];
1074 	struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1075 	u16 eir_len;
1076 
1077 	memset(buf, 0, sizeof(buf));
1078 
1079 	eir_len = append_eir_data_to_buf(hdev, ev->eir);
1080 	ev->eir_len = cpu_to_le16(eir_len);
1081 
1082 	return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1083 				  sizeof(*ev) + eir_len,
1084 				  HCI_MGMT_EXT_INFO_EVENTS, skip);
1085 }
1086 
1087 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1088 {
1089 	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1090 
1091 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1092 				 sizeof(settings));
1093 }
1094 
1095 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1096 {
1097 	BT_DBG("%s status 0x%02x", hdev->name, status);
1098 
1099 	if (hci_conn_count(hdev) == 0) {
1100 		cancel_delayed_work(&hdev->power_off);
1101 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
1102 	}
1103 }
1104 
1105 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1106 {
1107 	struct mgmt_ev_advertising_added ev;
1108 
1109 	ev.instance = instance;
1110 
1111 	mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1112 }
1113 
1114 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1115 			      u8 instance)
1116 {
1117 	struct mgmt_ev_advertising_removed ev;
1118 
1119 	ev.instance = instance;
1120 
1121 	mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1122 }
1123 
1124 static void cancel_adv_timeout(struct hci_dev *hdev)
1125 {
1126 	if (hdev->adv_instance_timeout) {
1127 		hdev->adv_instance_timeout = 0;
1128 		cancel_delayed_work(&hdev->adv_instance_expire);
1129 	}
1130 }
1131 
1132 static int clean_up_hci_state(struct hci_dev *hdev)
1133 {
1134 	struct hci_request req;
1135 	struct hci_conn *conn;
1136 	bool discov_stopped;
1137 	int err;
1138 
1139 	hci_req_init(&req, hdev);
1140 
1141 	if (test_bit(HCI_ISCAN, &hdev->flags) ||
1142 	    test_bit(HCI_PSCAN, &hdev->flags)) {
1143 		u8 scan = 0x00;
1144 		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1145 	}
1146 
1147 	hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, false);
1148 
1149 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1150 		__hci_req_disable_advertising(&req);
1151 
1152 	discov_stopped = hci_req_stop_discovery(&req);
1153 
1154 	list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1155 		/* 0x15 == Terminated due to Power Off */
1156 		__hci_abort_conn(&req, conn, 0x15);
1157 	}
1158 
1159 	err = hci_req_run(&req, clean_up_hci_complete);
1160 	if (!err && discov_stopped)
1161 		hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1162 
1163 	return err;
1164 }
1165 
1166 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1167 		       u16 len)
1168 {
1169 	struct mgmt_mode *cp = data;
1170 	struct mgmt_pending_cmd *cmd;
1171 	int err;
1172 
1173 	BT_DBG("request for %s", hdev->name);
1174 
1175 	if (cp->val != 0x00 && cp->val != 0x01)
1176 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1177 				       MGMT_STATUS_INVALID_PARAMS);
1178 
1179 	hci_dev_lock(hdev);
1180 
1181 	if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1182 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1183 				      MGMT_STATUS_BUSY);
1184 		goto failed;
1185 	}
1186 
1187 	if (!!cp->val == hdev_is_powered(hdev)) {
1188 		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1189 		goto failed;
1190 	}
1191 
1192 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1193 	if (!cmd) {
1194 		err = -ENOMEM;
1195 		goto failed;
1196 	}
1197 
1198 	if (cp->val) {
1199 		queue_work(hdev->req_workqueue, &hdev->power_on);
1200 		err = 0;
1201 	} else {
1202 		/* Disconnect connections, stop scans, etc */
1203 		err = clean_up_hci_state(hdev);
1204 		if (!err)
1205 			queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1206 					   HCI_POWER_OFF_TIMEOUT);
1207 
1208 		/* ENODATA means there were no HCI commands queued */
1209 		if (err == -ENODATA) {
1210 			cancel_delayed_work(&hdev->power_off);
1211 			queue_work(hdev->req_workqueue, &hdev->power_off.work);
1212 			err = 0;
1213 		}
1214 	}
1215 
1216 failed:
1217 	hci_dev_unlock(hdev);
1218 	return err;
1219 }
1220 
1221 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1222 {
1223 	__le32 ev = cpu_to_le32(get_current_settings(hdev));
1224 
1225 	return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1226 				  sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1227 }
1228 
1229 int mgmt_new_settings(struct hci_dev *hdev)
1230 {
1231 	return new_settings(hdev, NULL);
1232 }
1233 
1234 struct cmd_lookup {
1235 	struct sock *sk;
1236 	struct hci_dev *hdev;
1237 	u8 mgmt_status;
1238 };
1239 
1240 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1241 {
1242 	struct cmd_lookup *match = data;
1243 
1244 	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1245 
1246 	list_del(&cmd->list);
1247 
1248 	if (match->sk == NULL) {
1249 		match->sk = cmd->sk;
1250 		sock_hold(match->sk);
1251 	}
1252 
1253 	mgmt_pending_free(cmd);
1254 }
1255 
1256 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1257 {
1258 	u8 *status = data;
1259 
1260 	mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1261 	mgmt_pending_remove(cmd);
1262 }
1263 
1264 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1265 {
1266 	if (cmd->cmd_complete) {
1267 		u8 *status = data;
1268 
1269 		cmd->cmd_complete(cmd, *status);
1270 		mgmt_pending_remove(cmd);
1271 
1272 		return;
1273 	}
1274 
1275 	cmd_status_rsp(cmd, data);
1276 }
1277 
1278 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1279 {
1280 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1281 				 cmd->param, cmd->param_len);
1282 }
1283 
1284 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1285 {
1286 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1287 				 cmd->param, sizeof(struct mgmt_addr_info));
1288 }
1289 
1290 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1291 {
1292 	if (!lmp_bredr_capable(hdev))
1293 		return MGMT_STATUS_NOT_SUPPORTED;
1294 	else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1295 		return MGMT_STATUS_REJECTED;
1296 	else
1297 		return MGMT_STATUS_SUCCESS;
1298 }
1299 
1300 static u8 mgmt_le_support(struct hci_dev *hdev)
1301 {
1302 	if (!lmp_le_capable(hdev))
1303 		return MGMT_STATUS_NOT_SUPPORTED;
1304 	else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1305 		return MGMT_STATUS_REJECTED;
1306 	else
1307 		return MGMT_STATUS_SUCCESS;
1308 }
1309 
1310 void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
1311 {
1312 	struct mgmt_pending_cmd *cmd;
1313 
1314 	BT_DBG("status 0x%02x", status);
1315 
1316 	hci_dev_lock(hdev);
1317 
1318 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1319 	if (!cmd)
1320 		goto unlock;
1321 
1322 	if (status) {
1323 		u8 mgmt_err = mgmt_status(status);
1324 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1325 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1326 		goto remove_cmd;
1327 	}
1328 
1329 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1330 	    hdev->discov_timeout > 0) {
1331 		int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1332 		queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1333 	}
1334 
1335 	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1336 	new_settings(hdev, cmd->sk);
1337 
1338 remove_cmd:
1339 	mgmt_pending_remove(cmd);
1340 
1341 unlock:
1342 	hci_dev_unlock(hdev);
1343 }
1344 
1345 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1346 			    u16 len)
1347 {
1348 	struct mgmt_cp_set_discoverable *cp = data;
1349 	struct mgmt_pending_cmd *cmd;
1350 	u16 timeout;
1351 	int err;
1352 
1353 	BT_DBG("request for %s", hdev->name);
1354 
1355 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1356 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1357 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1358 				       MGMT_STATUS_REJECTED);
1359 
1360 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1361 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1362 				       MGMT_STATUS_INVALID_PARAMS);
1363 
1364 	timeout = __le16_to_cpu(cp->timeout);
1365 
1366 	/* Disabling discoverable requires that no timeout is set,
1367 	 * and enabling limited discoverable requires a timeout.
1368 	 */
1369 	if ((cp->val == 0x00 && timeout > 0) ||
1370 	    (cp->val == 0x02 && timeout == 0))
1371 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1372 				       MGMT_STATUS_INVALID_PARAMS);
1373 
1374 	hci_dev_lock(hdev);
1375 
1376 	if (!hdev_is_powered(hdev) && timeout > 0) {
1377 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1378 				      MGMT_STATUS_NOT_POWERED);
1379 		goto failed;
1380 	}
1381 
1382 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1383 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1384 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1385 				      MGMT_STATUS_BUSY);
1386 		goto failed;
1387 	}
1388 
1389 	if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1390 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1391 				      MGMT_STATUS_REJECTED);
1392 		goto failed;
1393 	}
1394 
1395 	if (hdev->advertising_paused) {
1396 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1397 				      MGMT_STATUS_BUSY);
1398 		goto failed;
1399 	}
1400 
1401 	if (!hdev_is_powered(hdev)) {
1402 		bool changed = false;
1403 
1404 		/* Setting limited discoverable when powered off is
1405 		 * not a valid operation since it requires a timeout
1406 		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1407 		 */
1408 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1409 			hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1410 			changed = true;
1411 		}
1412 
1413 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1414 		if (err < 0)
1415 			goto failed;
1416 
1417 		if (changed)
1418 			err = new_settings(hdev, sk);
1419 
1420 		goto failed;
1421 	}
1422 
1423 	/* If the current mode is the same, then just update the timeout
1424 	 * value with the new value. And if only the timeout gets updated,
1425 	 * then no need for any HCI transactions.
1426 	 */
1427 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1428 	    (cp->val == 0x02) == hci_dev_test_flag(hdev,
1429 						   HCI_LIMITED_DISCOVERABLE)) {
1430 		cancel_delayed_work(&hdev->discov_off);
1431 		hdev->discov_timeout = timeout;
1432 
1433 		if (cp->val && hdev->discov_timeout > 0) {
1434 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1435 			queue_delayed_work(hdev->req_workqueue,
1436 					   &hdev->discov_off, to);
1437 		}
1438 
1439 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1440 		goto failed;
1441 	}
1442 
1443 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1444 	if (!cmd) {
1445 		err = -ENOMEM;
1446 		goto failed;
1447 	}
1448 
1449 	/* Cancel any potential discoverable timeout that might be
1450 	 * still active and store new timeout value. The arming of
1451 	 * the timeout happens in the complete handler.
1452 	 */
1453 	cancel_delayed_work(&hdev->discov_off);
1454 	hdev->discov_timeout = timeout;
1455 
1456 	if (cp->val)
1457 		hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1458 	else
1459 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1460 
1461 	/* Limited discoverable mode */
1462 	if (cp->val == 0x02)
1463 		hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1464 	else
1465 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1466 
1467 	queue_work(hdev->req_workqueue, &hdev->discoverable_update);
1468 	err = 0;
1469 
1470 failed:
1471 	hci_dev_unlock(hdev);
1472 	return err;
1473 }
1474 
1475 void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status)
1476 {
1477 	struct mgmt_pending_cmd *cmd;
1478 
1479 	BT_DBG("status 0x%02x", status);
1480 
1481 	hci_dev_lock(hdev);
1482 
1483 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1484 	if (!cmd)
1485 		goto unlock;
1486 
1487 	if (status) {
1488 		u8 mgmt_err = mgmt_status(status);
1489 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1490 		goto remove_cmd;
1491 	}
1492 
1493 	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1494 	new_settings(hdev, cmd->sk);
1495 
1496 remove_cmd:
1497 	mgmt_pending_remove(cmd);
1498 
1499 unlock:
1500 	hci_dev_unlock(hdev);
1501 }
1502 
1503 static int set_connectable_update_settings(struct hci_dev *hdev,
1504 					   struct sock *sk, u8 val)
1505 {
1506 	bool changed = false;
1507 	int err;
1508 
1509 	if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1510 		changed = true;
1511 
1512 	if (val) {
1513 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1514 	} else {
1515 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1516 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1517 	}
1518 
1519 	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1520 	if (err < 0)
1521 		return err;
1522 
1523 	if (changed) {
1524 		hci_req_update_scan(hdev);
1525 		hci_update_background_scan(hdev);
1526 		return new_settings(hdev, sk);
1527 	}
1528 
1529 	return 0;
1530 }
1531 
1532 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1533 			   u16 len)
1534 {
1535 	struct mgmt_mode *cp = data;
1536 	struct mgmt_pending_cmd *cmd;
1537 	int err;
1538 
1539 	BT_DBG("request for %s", hdev->name);
1540 
1541 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1542 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1543 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1544 				       MGMT_STATUS_REJECTED);
1545 
1546 	if (cp->val != 0x00 && cp->val != 0x01)
1547 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1548 				       MGMT_STATUS_INVALID_PARAMS);
1549 
1550 	hci_dev_lock(hdev);
1551 
1552 	if (!hdev_is_powered(hdev)) {
1553 		err = set_connectable_update_settings(hdev, sk, cp->val);
1554 		goto failed;
1555 	}
1556 
1557 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1558 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1559 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1560 				      MGMT_STATUS_BUSY);
1561 		goto failed;
1562 	}
1563 
1564 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1565 	if (!cmd) {
1566 		err = -ENOMEM;
1567 		goto failed;
1568 	}
1569 
1570 	if (cp->val) {
1571 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1572 	} else {
1573 		if (hdev->discov_timeout > 0)
1574 			cancel_delayed_work(&hdev->discov_off);
1575 
1576 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1577 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1578 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1579 	}
1580 
1581 	queue_work(hdev->req_workqueue, &hdev->connectable_update);
1582 	err = 0;
1583 
1584 failed:
1585 	hci_dev_unlock(hdev);
1586 	return err;
1587 }
1588 
1589 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1590 			u16 len)
1591 {
1592 	struct mgmt_mode *cp = data;
1593 	bool changed;
1594 	int err;
1595 
1596 	BT_DBG("request for %s", hdev->name);
1597 
1598 	if (cp->val != 0x00 && cp->val != 0x01)
1599 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1600 				       MGMT_STATUS_INVALID_PARAMS);
1601 
1602 	hci_dev_lock(hdev);
1603 
1604 	if (cp->val)
1605 		changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1606 	else
1607 		changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1608 
1609 	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1610 	if (err < 0)
1611 		goto unlock;
1612 
1613 	if (changed) {
1614 		/* In limited privacy mode the change of bondable mode
1615 		 * may affect the local advertising address.
1616 		 */
1617 		if (hdev_is_powered(hdev) &&
1618 		    hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1619 		    hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1620 		    hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1621 			queue_work(hdev->req_workqueue,
1622 				   &hdev->discoverable_update);
1623 
1624 		err = new_settings(hdev, sk);
1625 	}
1626 
1627 unlock:
1628 	hci_dev_unlock(hdev);
1629 	return err;
1630 }
1631 
1632 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1633 			     u16 len)
1634 {
1635 	struct mgmt_mode *cp = data;
1636 	struct mgmt_pending_cmd *cmd;
1637 	u8 val, status;
1638 	int err;
1639 
1640 	BT_DBG("request for %s", hdev->name);
1641 
1642 	status = mgmt_bredr_support(hdev);
1643 	if (status)
1644 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1645 				       status);
1646 
1647 	if (cp->val != 0x00 && cp->val != 0x01)
1648 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1649 				       MGMT_STATUS_INVALID_PARAMS);
1650 
1651 	hci_dev_lock(hdev);
1652 
1653 	if (!hdev_is_powered(hdev)) {
1654 		bool changed = false;
1655 
1656 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1657 			hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1658 			changed = true;
1659 		}
1660 
1661 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1662 		if (err < 0)
1663 			goto failed;
1664 
1665 		if (changed)
1666 			err = new_settings(hdev, sk);
1667 
1668 		goto failed;
1669 	}
1670 
1671 	if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1672 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1673 				      MGMT_STATUS_BUSY);
1674 		goto failed;
1675 	}
1676 
1677 	val = !!cp->val;
1678 
1679 	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1680 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1681 		goto failed;
1682 	}
1683 
1684 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1685 	if (!cmd) {
1686 		err = -ENOMEM;
1687 		goto failed;
1688 	}
1689 
1690 	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1691 	if (err < 0) {
1692 		mgmt_pending_remove(cmd);
1693 		goto failed;
1694 	}
1695 
1696 failed:
1697 	hci_dev_unlock(hdev);
1698 	return err;
1699 }
1700 
1701 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1702 {
1703 	struct mgmt_mode *cp = data;
1704 	struct mgmt_pending_cmd *cmd;
1705 	u8 status;
1706 	int err;
1707 
1708 	BT_DBG("request for %s", hdev->name);
1709 
1710 	status = mgmt_bredr_support(hdev);
1711 	if (status)
1712 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1713 
1714 	if (!lmp_ssp_capable(hdev))
1715 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1716 				       MGMT_STATUS_NOT_SUPPORTED);
1717 
1718 	if (cp->val != 0x00 && cp->val != 0x01)
1719 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1720 				       MGMT_STATUS_INVALID_PARAMS);
1721 
1722 	hci_dev_lock(hdev);
1723 
1724 	if (!hdev_is_powered(hdev)) {
1725 		bool changed;
1726 
1727 		if (cp->val) {
1728 			changed = !hci_dev_test_and_set_flag(hdev,
1729 							     HCI_SSP_ENABLED);
1730 		} else {
1731 			changed = hci_dev_test_and_clear_flag(hdev,
1732 							      HCI_SSP_ENABLED);
1733 			if (!changed)
1734 				changed = hci_dev_test_and_clear_flag(hdev,
1735 								      HCI_HS_ENABLED);
1736 			else
1737 				hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1738 		}
1739 
1740 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1741 		if (err < 0)
1742 			goto failed;
1743 
1744 		if (changed)
1745 			err = new_settings(hdev, sk);
1746 
1747 		goto failed;
1748 	}
1749 
1750 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1751 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1752 				      MGMT_STATUS_BUSY);
1753 		goto failed;
1754 	}
1755 
1756 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1757 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1758 		goto failed;
1759 	}
1760 
1761 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1762 	if (!cmd) {
1763 		err = -ENOMEM;
1764 		goto failed;
1765 	}
1766 
1767 	if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
1768 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
1769 			     sizeof(cp->val), &cp->val);
1770 
1771 	err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1772 	if (err < 0) {
1773 		mgmt_pending_remove(cmd);
1774 		goto failed;
1775 	}
1776 
1777 failed:
1778 	hci_dev_unlock(hdev);
1779 	return err;
1780 }
1781 
1782 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1783 {
1784 	struct mgmt_mode *cp = data;
1785 	bool changed;
1786 	u8 status;
1787 	int err;
1788 
1789 	BT_DBG("request for %s", hdev->name);
1790 
1791 	status = mgmt_bredr_support(hdev);
1792 	if (status)
1793 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1794 
1795 	if (!lmp_ssp_capable(hdev))
1796 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1797 				       MGMT_STATUS_NOT_SUPPORTED);
1798 
1799 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1800 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1801 				       MGMT_STATUS_REJECTED);
1802 
1803 	if (cp->val != 0x00 && cp->val != 0x01)
1804 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1805 				       MGMT_STATUS_INVALID_PARAMS);
1806 
1807 	hci_dev_lock(hdev);
1808 
1809 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1810 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1811 				      MGMT_STATUS_BUSY);
1812 		goto unlock;
1813 	}
1814 
1815 	if (cp->val) {
1816 		changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1817 	} else {
1818 		if (hdev_is_powered(hdev)) {
1819 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1820 					      MGMT_STATUS_REJECTED);
1821 			goto unlock;
1822 		}
1823 
1824 		changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1825 	}
1826 
1827 	err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1828 	if (err < 0)
1829 		goto unlock;
1830 
1831 	if (changed)
1832 		err = new_settings(hdev, sk);
1833 
1834 unlock:
1835 	hci_dev_unlock(hdev);
1836 	return err;
1837 }
1838 
1839 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1840 {
1841 	struct cmd_lookup match = { NULL, hdev };
1842 
1843 	hci_dev_lock(hdev);
1844 
1845 	if (status) {
1846 		u8 mgmt_err = mgmt_status(status);
1847 
1848 		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1849 				     &mgmt_err);
1850 		goto unlock;
1851 	}
1852 
1853 	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1854 
1855 	new_settings(hdev, match.sk);
1856 
1857 	if (match.sk)
1858 		sock_put(match.sk);
1859 
1860 	/* Make sure the controller has a good default for
1861 	 * advertising data. Restrict the update to when LE
1862 	 * has actually been enabled. During power on, the
1863 	 * update in powered_update_hci will take care of it.
1864 	 */
1865 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1866 		struct hci_request req;
1867 		hci_req_init(&req, hdev);
1868 		if (ext_adv_capable(hdev)) {
1869 			int err;
1870 
1871 			err = __hci_req_setup_ext_adv_instance(&req, 0x00);
1872 			if (!err)
1873 				__hci_req_update_scan_rsp_data(&req, 0x00);
1874 		} else {
1875 			__hci_req_update_adv_data(&req, 0x00);
1876 			__hci_req_update_scan_rsp_data(&req, 0x00);
1877 		}
1878 		hci_req_run(&req, NULL);
1879 		hci_update_background_scan(hdev);
1880 	}
1881 
1882 unlock:
1883 	hci_dev_unlock(hdev);
1884 }
1885 
1886 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1887 {
1888 	struct mgmt_mode *cp = data;
1889 	struct hci_cp_write_le_host_supported hci_cp;
1890 	struct mgmt_pending_cmd *cmd;
1891 	struct hci_request req;
1892 	int err;
1893 	u8 val, enabled;
1894 
1895 	BT_DBG("request for %s", hdev->name);
1896 
1897 	if (!lmp_le_capable(hdev))
1898 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1899 				       MGMT_STATUS_NOT_SUPPORTED);
1900 
1901 	if (cp->val != 0x00 && cp->val != 0x01)
1902 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1903 				       MGMT_STATUS_INVALID_PARAMS);
1904 
1905 	/* Bluetooth single mode LE only controllers or dual-mode
1906 	 * controllers configured as LE only devices, do not allow
1907 	 * switching LE off. These have either LE enabled explicitly
1908 	 * or BR/EDR has been previously switched off.
1909 	 *
1910 	 * When trying to enable an already enabled LE, then gracefully
1911 	 * send a positive response. Trying to disable it however will
1912 	 * result into rejection.
1913 	 */
1914 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1915 		if (cp->val == 0x01)
1916 			return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1917 
1918 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1919 				       MGMT_STATUS_REJECTED);
1920 	}
1921 
1922 	hci_dev_lock(hdev);
1923 
1924 	val = !!cp->val;
1925 	enabled = lmp_host_le_capable(hdev);
1926 
1927 	if (!val)
1928 		hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
1929 
1930 	if (!hdev_is_powered(hdev) || val == enabled) {
1931 		bool changed = false;
1932 
1933 		if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1934 			hci_dev_change_flag(hdev, HCI_LE_ENABLED);
1935 			changed = true;
1936 		}
1937 
1938 		if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1939 			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1940 			changed = true;
1941 		}
1942 
1943 		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1944 		if (err < 0)
1945 			goto unlock;
1946 
1947 		if (changed)
1948 			err = new_settings(hdev, sk);
1949 
1950 		goto unlock;
1951 	}
1952 
1953 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
1954 	    pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1955 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1956 				      MGMT_STATUS_BUSY);
1957 		goto unlock;
1958 	}
1959 
1960 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1961 	if (!cmd) {
1962 		err = -ENOMEM;
1963 		goto unlock;
1964 	}
1965 
1966 	hci_req_init(&req, hdev);
1967 
1968 	memset(&hci_cp, 0, sizeof(hci_cp));
1969 
1970 	if (val) {
1971 		hci_cp.le = val;
1972 		hci_cp.simul = 0x00;
1973 	} else {
1974 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1975 			__hci_req_disable_advertising(&req);
1976 
1977 		if (ext_adv_capable(hdev))
1978 			__hci_req_clear_ext_adv_sets(&req);
1979 	}
1980 
1981 	hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1982 		    &hci_cp);
1983 
1984 	err = hci_req_run(&req, le_enable_complete);
1985 	if (err < 0)
1986 		mgmt_pending_remove(cmd);
1987 
1988 unlock:
1989 	hci_dev_unlock(hdev);
1990 	return err;
1991 }
1992 
1993 /* This is a helper function to test for pending mgmt commands that can
1994  * cause CoD or EIR HCI commands. We can only allow one such pending
1995  * mgmt command at a time since otherwise we cannot easily track what
1996  * the current values are, will be, and based on that calculate if a new
1997  * HCI command needs to be sent and if yes with what value.
1998  */
1999 static bool pending_eir_or_class(struct hci_dev *hdev)
2000 {
2001 	struct mgmt_pending_cmd *cmd;
2002 
2003 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2004 		switch (cmd->opcode) {
2005 		case MGMT_OP_ADD_UUID:
2006 		case MGMT_OP_REMOVE_UUID:
2007 		case MGMT_OP_SET_DEV_CLASS:
2008 		case MGMT_OP_SET_POWERED:
2009 			return true;
2010 		}
2011 	}
2012 
2013 	return false;
2014 }
2015 
2016 static const u8 bluetooth_base_uuid[] = {
2017 			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2018 			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2019 };
2020 
2021 static u8 get_uuid_size(const u8 *uuid)
2022 {
2023 	u32 val;
2024 
2025 	if (memcmp(uuid, bluetooth_base_uuid, 12))
2026 		return 128;
2027 
2028 	val = get_unaligned_le32(&uuid[12]);
2029 	if (val > 0xffff)
2030 		return 32;
2031 
2032 	return 16;
2033 }
2034 
2035 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2036 {
2037 	struct mgmt_pending_cmd *cmd;
2038 
2039 	hci_dev_lock(hdev);
2040 
2041 	cmd = pending_find(mgmt_op, hdev);
2042 	if (!cmd)
2043 		goto unlock;
2044 
2045 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2046 			  mgmt_status(status), hdev->dev_class, 3);
2047 
2048 	mgmt_pending_remove(cmd);
2049 
2050 unlock:
2051 	hci_dev_unlock(hdev);
2052 }
2053 
2054 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2055 {
2056 	BT_DBG("status 0x%02x", status);
2057 
2058 	mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2059 }
2060 
2061 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2062 {
2063 	struct mgmt_cp_add_uuid *cp = data;
2064 	struct mgmt_pending_cmd *cmd;
2065 	struct hci_request req;
2066 	struct bt_uuid *uuid;
2067 	int err;
2068 
2069 	BT_DBG("request for %s", hdev->name);
2070 
2071 	hci_dev_lock(hdev);
2072 
2073 	if (pending_eir_or_class(hdev)) {
2074 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2075 				      MGMT_STATUS_BUSY);
2076 		goto failed;
2077 	}
2078 
2079 	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2080 	if (!uuid) {
2081 		err = -ENOMEM;
2082 		goto failed;
2083 	}
2084 
2085 	memcpy(uuid->uuid, cp->uuid, 16);
2086 	uuid->svc_hint = cp->svc_hint;
2087 	uuid->size = get_uuid_size(cp->uuid);
2088 
2089 	list_add_tail(&uuid->list, &hdev->uuids);
2090 
2091 	hci_req_init(&req, hdev);
2092 
2093 	__hci_req_update_class(&req);
2094 	__hci_req_update_eir(&req);
2095 
2096 	err = hci_req_run(&req, add_uuid_complete);
2097 	if (err < 0) {
2098 		if (err != -ENODATA)
2099 			goto failed;
2100 
2101 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2102 					hdev->dev_class, 3);
2103 		goto failed;
2104 	}
2105 
2106 	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2107 	if (!cmd) {
2108 		err = -ENOMEM;
2109 		goto failed;
2110 	}
2111 
2112 	err = 0;
2113 
2114 failed:
2115 	hci_dev_unlock(hdev);
2116 	return err;
2117 }
2118 
2119 static bool enable_service_cache(struct hci_dev *hdev)
2120 {
2121 	if (!hdev_is_powered(hdev))
2122 		return false;
2123 
2124 	if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2125 		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2126 				   CACHE_TIMEOUT);
2127 		return true;
2128 	}
2129 
2130 	return false;
2131 }
2132 
2133 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2134 {
2135 	BT_DBG("status 0x%02x", status);
2136 
2137 	mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2138 }
2139 
2140 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2141 		       u16 len)
2142 {
2143 	struct mgmt_cp_remove_uuid *cp = data;
2144 	struct mgmt_pending_cmd *cmd;
2145 	struct bt_uuid *match, *tmp;
2146 	u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2147 	struct hci_request req;
2148 	int err, found;
2149 
2150 	BT_DBG("request for %s", hdev->name);
2151 
2152 	hci_dev_lock(hdev);
2153 
2154 	if (pending_eir_or_class(hdev)) {
2155 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2156 				      MGMT_STATUS_BUSY);
2157 		goto unlock;
2158 	}
2159 
2160 	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2161 		hci_uuids_clear(hdev);
2162 
2163 		if (enable_service_cache(hdev)) {
2164 			err = mgmt_cmd_complete(sk, hdev->id,
2165 						MGMT_OP_REMOVE_UUID,
2166 						0, hdev->dev_class, 3);
2167 			goto unlock;
2168 		}
2169 
2170 		goto update_class;
2171 	}
2172 
2173 	found = 0;
2174 
2175 	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2176 		if (memcmp(match->uuid, cp->uuid, 16) != 0)
2177 			continue;
2178 
2179 		list_del(&match->list);
2180 		kfree(match);
2181 		found++;
2182 	}
2183 
2184 	if (found == 0) {
2185 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2186 				      MGMT_STATUS_INVALID_PARAMS);
2187 		goto unlock;
2188 	}
2189 
2190 update_class:
2191 	hci_req_init(&req, hdev);
2192 
2193 	__hci_req_update_class(&req);
2194 	__hci_req_update_eir(&req);
2195 
2196 	err = hci_req_run(&req, remove_uuid_complete);
2197 	if (err < 0) {
2198 		if (err != -ENODATA)
2199 			goto unlock;
2200 
2201 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2202 					hdev->dev_class, 3);
2203 		goto unlock;
2204 	}
2205 
2206 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2207 	if (!cmd) {
2208 		err = -ENOMEM;
2209 		goto unlock;
2210 	}
2211 
2212 	err = 0;
2213 
2214 unlock:
2215 	hci_dev_unlock(hdev);
2216 	return err;
2217 }
2218 
2219 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2220 {
2221 	BT_DBG("status 0x%02x", status);
2222 
2223 	mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2224 }
2225 
2226 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2227 			 u16 len)
2228 {
2229 	struct mgmt_cp_set_dev_class *cp = data;
2230 	struct mgmt_pending_cmd *cmd;
2231 	struct hci_request req;
2232 	int err;
2233 
2234 	BT_DBG("request for %s", hdev->name);
2235 
2236 	if (!lmp_bredr_capable(hdev))
2237 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2238 				       MGMT_STATUS_NOT_SUPPORTED);
2239 
2240 	hci_dev_lock(hdev);
2241 
2242 	if (pending_eir_or_class(hdev)) {
2243 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2244 				      MGMT_STATUS_BUSY);
2245 		goto unlock;
2246 	}
2247 
2248 	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2249 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2250 				      MGMT_STATUS_INVALID_PARAMS);
2251 		goto unlock;
2252 	}
2253 
2254 	hdev->major_class = cp->major;
2255 	hdev->minor_class = cp->minor;
2256 
2257 	if (!hdev_is_powered(hdev)) {
2258 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2259 					hdev->dev_class, 3);
2260 		goto unlock;
2261 	}
2262 
2263 	hci_req_init(&req, hdev);
2264 
2265 	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2266 		hci_dev_unlock(hdev);
2267 		cancel_delayed_work_sync(&hdev->service_cache);
2268 		hci_dev_lock(hdev);
2269 		__hci_req_update_eir(&req);
2270 	}
2271 
2272 	__hci_req_update_class(&req);
2273 
2274 	err = hci_req_run(&req, set_class_complete);
2275 	if (err < 0) {
2276 		if (err != -ENODATA)
2277 			goto unlock;
2278 
2279 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2280 					hdev->dev_class, 3);
2281 		goto unlock;
2282 	}
2283 
2284 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2285 	if (!cmd) {
2286 		err = -ENOMEM;
2287 		goto unlock;
2288 	}
2289 
2290 	err = 0;
2291 
2292 unlock:
2293 	hci_dev_unlock(hdev);
2294 	return err;
2295 }
2296 
2297 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2298 			  u16 len)
2299 {
2300 	struct mgmt_cp_load_link_keys *cp = data;
2301 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2302 				   sizeof(struct mgmt_link_key_info));
2303 	u16 key_count, expected_len;
2304 	bool changed;
2305 	int i;
2306 
2307 	BT_DBG("request for %s", hdev->name);
2308 
2309 	if (!lmp_bredr_capable(hdev))
2310 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2311 				       MGMT_STATUS_NOT_SUPPORTED);
2312 
2313 	key_count = __le16_to_cpu(cp->key_count);
2314 	if (key_count > max_key_count) {
2315 		bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2316 			   key_count);
2317 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2318 				       MGMT_STATUS_INVALID_PARAMS);
2319 	}
2320 
2321 	expected_len = struct_size(cp, keys, key_count);
2322 	if (expected_len != len) {
2323 		bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2324 			   expected_len, len);
2325 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2326 				       MGMT_STATUS_INVALID_PARAMS);
2327 	}
2328 
2329 	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2330 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2331 				       MGMT_STATUS_INVALID_PARAMS);
2332 
2333 	BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2334 	       key_count);
2335 
2336 	for (i = 0; i < key_count; i++) {
2337 		struct mgmt_link_key_info *key = &cp->keys[i];
2338 
2339 		if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2340 			return mgmt_cmd_status(sk, hdev->id,
2341 					       MGMT_OP_LOAD_LINK_KEYS,
2342 					       MGMT_STATUS_INVALID_PARAMS);
2343 	}
2344 
2345 	hci_dev_lock(hdev);
2346 
2347 	hci_link_keys_clear(hdev);
2348 
2349 	if (cp->debug_keys)
2350 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2351 	else
2352 		changed = hci_dev_test_and_clear_flag(hdev,
2353 						      HCI_KEEP_DEBUG_KEYS);
2354 
2355 	if (changed)
2356 		new_settings(hdev, NULL);
2357 
2358 	for (i = 0; i < key_count; i++) {
2359 		struct mgmt_link_key_info *key = &cp->keys[i];
2360 
2361 		if (hci_is_blocked_key(hdev,
2362 				       HCI_BLOCKED_KEY_TYPE_LINKKEY,
2363 				       key->val)) {
2364 			bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2365 				    &key->addr.bdaddr);
2366 			continue;
2367 		}
2368 
2369 		/* Always ignore debug keys and require a new pairing if
2370 		 * the user wants to use them.
2371 		 */
2372 		if (key->type == HCI_LK_DEBUG_COMBINATION)
2373 			continue;
2374 
2375 		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2376 				 key->type, key->pin_len, NULL);
2377 	}
2378 
2379 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2380 
2381 	hci_dev_unlock(hdev);
2382 
2383 	return 0;
2384 }
2385 
2386 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2387 			   u8 addr_type, struct sock *skip_sk)
2388 {
2389 	struct mgmt_ev_device_unpaired ev;
2390 
2391 	bacpy(&ev.addr.bdaddr, bdaddr);
2392 	ev.addr.type = addr_type;
2393 
2394 	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2395 			  skip_sk);
2396 }
2397 
2398 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2399 			 u16 len)
2400 {
2401 	struct mgmt_cp_unpair_device *cp = data;
2402 	struct mgmt_rp_unpair_device rp;
2403 	struct hci_conn_params *params;
2404 	struct mgmt_pending_cmd *cmd;
2405 	struct hci_conn *conn;
2406 	u8 addr_type;
2407 	int err;
2408 
2409 	memset(&rp, 0, sizeof(rp));
2410 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2411 	rp.addr.type = cp->addr.type;
2412 
2413 	if (!bdaddr_type_is_valid(cp->addr.type))
2414 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2415 					 MGMT_STATUS_INVALID_PARAMS,
2416 					 &rp, sizeof(rp));
2417 
2418 	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2419 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2420 					 MGMT_STATUS_INVALID_PARAMS,
2421 					 &rp, sizeof(rp));
2422 
2423 	hci_dev_lock(hdev);
2424 
2425 	if (!hdev_is_powered(hdev)) {
2426 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2427 					MGMT_STATUS_NOT_POWERED, &rp,
2428 					sizeof(rp));
2429 		goto unlock;
2430 	}
2431 
2432 	if (cp->addr.type == BDADDR_BREDR) {
2433 		/* If disconnection is requested, then look up the
2434 		 * connection. If the remote device is connected, it
2435 		 * will be later used to terminate the link.
2436 		 *
2437 		 * Setting it to NULL explicitly will cause no
2438 		 * termination of the link.
2439 		 */
2440 		if (cp->disconnect)
2441 			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2442 						       &cp->addr.bdaddr);
2443 		else
2444 			conn = NULL;
2445 
2446 		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2447 		if (err < 0) {
2448 			err = mgmt_cmd_complete(sk, hdev->id,
2449 						MGMT_OP_UNPAIR_DEVICE,
2450 						MGMT_STATUS_NOT_PAIRED, &rp,
2451 						sizeof(rp));
2452 			goto unlock;
2453 		}
2454 
2455 		goto done;
2456 	}
2457 
2458 	/* LE address type */
2459 	addr_type = le_addr_type(cp->addr.type);
2460 
2461 	/* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2462 	err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2463 	if (err < 0) {
2464 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2465 					MGMT_STATUS_NOT_PAIRED, &rp,
2466 					sizeof(rp));
2467 		goto unlock;
2468 	}
2469 
2470 	conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2471 	if (!conn) {
2472 		hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2473 		goto done;
2474 	}
2475 
2476 
2477 	/* Defer clearing up the connection parameters until closing to
2478 	 * give a chance of keeping them if a repairing happens.
2479 	 */
2480 	set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2481 
2482 	/* Disable auto-connection parameters if present */
2483 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2484 	if (params) {
2485 		if (params->explicit_connect)
2486 			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2487 		else
2488 			params->auto_connect = HCI_AUTO_CONN_DISABLED;
2489 	}
2490 
2491 	/* If disconnection is not requested, then clear the connection
2492 	 * variable so that the link is not terminated.
2493 	 */
2494 	if (!cp->disconnect)
2495 		conn = NULL;
2496 
2497 done:
2498 	/* If the connection variable is set, then termination of the
2499 	 * link is requested.
2500 	 */
2501 	if (!conn) {
2502 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2503 					&rp, sizeof(rp));
2504 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2505 		goto unlock;
2506 	}
2507 
2508 	cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2509 			       sizeof(*cp));
2510 	if (!cmd) {
2511 		err = -ENOMEM;
2512 		goto unlock;
2513 	}
2514 
2515 	cmd->cmd_complete = addr_cmd_complete;
2516 
2517 	err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2518 	if (err < 0)
2519 		mgmt_pending_remove(cmd);
2520 
2521 unlock:
2522 	hci_dev_unlock(hdev);
2523 	return err;
2524 }
2525 
2526 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2527 		      u16 len)
2528 {
2529 	struct mgmt_cp_disconnect *cp = data;
2530 	struct mgmt_rp_disconnect rp;
2531 	struct mgmt_pending_cmd *cmd;
2532 	struct hci_conn *conn;
2533 	int err;
2534 
2535 	BT_DBG("");
2536 
2537 	memset(&rp, 0, sizeof(rp));
2538 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2539 	rp.addr.type = cp->addr.type;
2540 
2541 	if (!bdaddr_type_is_valid(cp->addr.type))
2542 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2543 					 MGMT_STATUS_INVALID_PARAMS,
2544 					 &rp, sizeof(rp));
2545 
2546 	hci_dev_lock(hdev);
2547 
2548 	if (!test_bit(HCI_UP, &hdev->flags)) {
2549 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2550 					MGMT_STATUS_NOT_POWERED, &rp,
2551 					sizeof(rp));
2552 		goto failed;
2553 	}
2554 
2555 	if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2556 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2557 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
2558 		goto failed;
2559 	}
2560 
2561 	if (cp->addr.type == BDADDR_BREDR)
2562 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2563 					       &cp->addr.bdaddr);
2564 	else
2565 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2566 					       le_addr_type(cp->addr.type));
2567 
2568 	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2569 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2570 					MGMT_STATUS_NOT_CONNECTED, &rp,
2571 					sizeof(rp));
2572 		goto failed;
2573 	}
2574 
2575 	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2576 	if (!cmd) {
2577 		err = -ENOMEM;
2578 		goto failed;
2579 	}
2580 
2581 	cmd->cmd_complete = generic_cmd_complete;
2582 
2583 	err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2584 	if (err < 0)
2585 		mgmt_pending_remove(cmd);
2586 
2587 failed:
2588 	hci_dev_unlock(hdev);
2589 	return err;
2590 }
2591 
2592 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2593 {
2594 	switch (link_type) {
2595 	case LE_LINK:
2596 		switch (addr_type) {
2597 		case ADDR_LE_DEV_PUBLIC:
2598 			return BDADDR_LE_PUBLIC;
2599 
2600 		default:
2601 			/* Fallback to LE Random address type */
2602 			return BDADDR_LE_RANDOM;
2603 		}
2604 
2605 	default:
2606 		/* Fallback to BR/EDR type */
2607 		return BDADDR_BREDR;
2608 	}
2609 }
2610 
2611 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2612 			   u16 data_len)
2613 {
2614 	struct mgmt_rp_get_connections *rp;
2615 	struct hci_conn *c;
2616 	int err;
2617 	u16 i;
2618 
2619 	BT_DBG("");
2620 
2621 	hci_dev_lock(hdev);
2622 
2623 	if (!hdev_is_powered(hdev)) {
2624 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2625 				      MGMT_STATUS_NOT_POWERED);
2626 		goto unlock;
2627 	}
2628 
2629 	i = 0;
2630 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2631 		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2632 			i++;
2633 	}
2634 
2635 	rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2636 	if (!rp) {
2637 		err = -ENOMEM;
2638 		goto unlock;
2639 	}
2640 
2641 	i = 0;
2642 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2643 		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2644 			continue;
2645 		bacpy(&rp->addr[i].bdaddr, &c->dst);
2646 		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2647 		if (c->type == SCO_LINK || c->type == ESCO_LINK)
2648 			continue;
2649 		i++;
2650 	}
2651 
2652 	rp->conn_count = cpu_to_le16(i);
2653 
2654 	/* Recalculate length in case of filtered SCO connections, etc */
2655 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2656 				struct_size(rp, addr, i));
2657 
2658 	kfree(rp);
2659 
2660 unlock:
2661 	hci_dev_unlock(hdev);
2662 	return err;
2663 }
2664 
2665 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2666 				   struct mgmt_cp_pin_code_neg_reply *cp)
2667 {
2668 	struct mgmt_pending_cmd *cmd;
2669 	int err;
2670 
2671 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2672 			       sizeof(*cp));
2673 	if (!cmd)
2674 		return -ENOMEM;
2675 
2676 	cmd->cmd_complete = addr_cmd_complete;
2677 
2678 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2679 			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2680 	if (err < 0)
2681 		mgmt_pending_remove(cmd);
2682 
2683 	return err;
2684 }
2685 
2686 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2687 			  u16 len)
2688 {
2689 	struct hci_conn *conn;
2690 	struct mgmt_cp_pin_code_reply *cp = data;
2691 	struct hci_cp_pin_code_reply reply;
2692 	struct mgmt_pending_cmd *cmd;
2693 	int err;
2694 
2695 	BT_DBG("");
2696 
2697 	hci_dev_lock(hdev);
2698 
2699 	if (!hdev_is_powered(hdev)) {
2700 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2701 				      MGMT_STATUS_NOT_POWERED);
2702 		goto failed;
2703 	}
2704 
2705 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2706 	if (!conn) {
2707 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2708 				      MGMT_STATUS_NOT_CONNECTED);
2709 		goto failed;
2710 	}
2711 
2712 	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2713 		struct mgmt_cp_pin_code_neg_reply ncp;
2714 
2715 		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2716 
2717 		bt_dev_err(hdev, "PIN code is not 16 bytes long");
2718 
2719 		err = send_pin_code_neg_reply(sk, hdev, &ncp);
2720 		if (err >= 0)
2721 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2722 					      MGMT_STATUS_INVALID_PARAMS);
2723 
2724 		goto failed;
2725 	}
2726 
2727 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2728 	if (!cmd) {
2729 		err = -ENOMEM;
2730 		goto failed;
2731 	}
2732 
2733 	cmd->cmd_complete = addr_cmd_complete;
2734 
2735 	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2736 	reply.pin_len = cp->pin_len;
2737 	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2738 
2739 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2740 	if (err < 0)
2741 		mgmt_pending_remove(cmd);
2742 
2743 failed:
2744 	hci_dev_unlock(hdev);
2745 	return err;
2746 }
2747 
2748 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2749 			     u16 len)
2750 {
2751 	struct mgmt_cp_set_io_capability *cp = data;
2752 
2753 	BT_DBG("");
2754 
2755 	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2756 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2757 				       MGMT_STATUS_INVALID_PARAMS);
2758 
2759 	hci_dev_lock(hdev);
2760 
2761 	hdev->io_capability = cp->io_capability;
2762 
2763 	BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2764 	       hdev->io_capability);
2765 
2766 	hci_dev_unlock(hdev);
2767 
2768 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2769 				 NULL, 0);
2770 }
2771 
2772 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2773 {
2774 	struct hci_dev *hdev = conn->hdev;
2775 	struct mgmt_pending_cmd *cmd;
2776 
2777 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2778 		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2779 			continue;
2780 
2781 		if (cmd->user_data != conn)
2782 			continue;
2783 
2784 		return cmd;
2785 	}
2786 
2787 	return NULL;
2788 }
2789 
2790 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2791 {
2792 	struct mgmt_rp_pair_device rp;
2793 	struct hci_conn *conn = cmd->user_data;
2794 	int err;
2795 
2796 	bacpy(&rp.addr.bdaddr, &conn->dst);
2797 	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2798 
2799 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2800 				status, &rp, sizeof(rp));
2801 
2802 	/* So we don't get further callbacks for this connection */
2803 	conn->connect_cfm_cb = NULL;
2804 	conn->security_cfm_cb = NULL;
2805 	conn->disconn_cfm_cb = NULL;
2806 
2807 	hci_conn_drop(conn);
2808 
2809 	/* The device is paired so there is no need to remove
2810 	 * its connection parameters anymore.
2811 	 */
2812 	clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2813 
2814 	hci_conn_put(conn);
2815 
2816 	return err;
2817 }
2818 
2819 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2820 {
2821 	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2822 	struct mgmt_pending_cmd *cmd;
2823 
2824 	cmd = find_pairing(conn);
2825 	if (cmd) {
2826 		cmd->cmd_complete(cmd, status);
2827 		mgmt_pending_remove(cmd);
2828 	}
2829 }
2830 
2831 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2832 {
2833 	struct mgmt_pending_cmd *cmd;
2834 
2835 	BT_DBG("status %u", status);
2836 
2837 	cmd = find_pairing(conn);
2838 	if (!cmd) {
2839 		BT_DBG("Unable to find a pending command");
2840 		return;
2841 	}
2842 
2843 	cmd->cmd_complete(cmd, mgmt_status(status));
2844 	mgmt_pending_remove(cmd);
2845 }
2846 
2847 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2848 {
2849 	struct mgmt_pending_cmd *cmd;
2850 
2851 	BT_DBG("status %u", status);
2852 
2853 	if (!status)
2854 		return;
2855 
2856 	cmd = find_pairing(conn);
2857 	if (!cmd) {
2858 		BT_DBG("Unable to find a pending command");
2859 		return;
2860 	}
2861 
2862 	cmd->cmd_complete(cmd, mgmt_status(status));
2863 	mgmt_pending_remove(cmd);
2864 }
2865 
2866 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2867 		       u16 len)
2868 {
2869 	struct mgmt_cp_pair_device *cp = data;
2870 	struct mgmt_rp_pair_device rp;
2871 	struct mgmt_pending_cmd *cmd;
2872 	u8 sec_level, auth_type;
2873 	struct hci_conn *conn;
2874 	int err;
2875 
2876 	BT_DBG("");
2877 
2878 	memset(&rp, 0, sizeof(rp));
2879 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2880 	rp.addr.type = cp->addr.type;
2881 
2882 	if (!bdaddr_type_is_valid(cp->addr.type))
2883 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2884 					 MGMT_STATUS_INVALID_PARAMS,
2885 					 &rp, sizeof(rp));
2886 
2887 	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2888 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2889 					 MGMT_STATUS_INVALID_PARAMS,
2890 					 &rp, sizeof(rp));
2891 
2892 	hci_dev_lock(hdev);
2893 
2894 	if (!hdev_is_powered(hdev)) {
2895 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2896 					MGMT_STATUS_NOT_POWERED, &rp,
2897 					sizeof(rp));
2898 		goto unlock;
2899 	}
2900 
2901 	if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
2902 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2903 					MGMT_STATUS_ALREADY_PAIRED, &rp,
2904 					sizeof(rp));
2905 		goto unlock;
2906 	}
2907 
2908 	sec_level = BT_SECURITY_MEDIUM;
2909 	auth_type = HCI_AT_DEDICATED_BONDING;
2910 
2911 	if (cp->addr.type == BDADDR_BREDR) {
2912 		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2913 				       auth_type);
2914 	} else {
2915 		u8 addr_type = le_addr_type(cp->addr.type);
2916 		struct hci_conn_params *p;
2917 
2918 		/* When pairing a new device, it is expected to remember
2919 		 * this device for future connections. Adding the connection
2920 		 * parameter information ahead of time allows tracking
2921 		 * of the slave preferred values and will speed up any
2922 		 * further connection establishment.
2923 		 *
2924 		 * If connection parameters already exist, then they
2925 		 * will be kept and this function does nothing.
2926 		 */
2927 		p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
2928 
2929 		if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
2930 			p->auto_connect = HCI_AUTO_CONN_DISABLED;
2931 
2932 		conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr,
2933 					   addr_type, sec_level,
2934 					   HCI_LE_CONN_TIMEOUT);
2935 	}
2936 
2937 	if (IS_ERR(conn)) {
2938 		int status;
2939 
2940 		if (PTR_ERR(conn) == -EBUSY)
2941 			status = MGMT_STATUS_BUSY;
2942 		else if (PTR_ERR(conn) == -EOPNOTSUPP)
2943 			status = MGMT_STATUS_NOT_SUPPORTED;
2944 		else if (PTR_ERR(conn) == -ECONNREFUSED)
2945 			status = MGMT_STATUS_REJECTED;
2946 		else
2947 			status = MGMT_STATUS_CONNECT_FAILED;
2948 
2949 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2950 					status, &rp, sizeof(rp));
2951 		goto unlock;
2952 	}
2953 
2954 	if (conn->connect_cfm_cb) {
2955 		hci_conn_drop(conn);
2956 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2957 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
2958 		goto unlock;
2959 	}
2960 
2961 	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2962 	if (!cmd) {
2963 		err = -ENOMEM;
2964 		hci_conn_drop(conn);
2965 		goto unlock;
2966 	}
2967 
2968 	cmd->cmd_complete = pairing_complete;
2969 
2970 	/* For LE, just connecting isn't a proof that the pairing finished */
2971 	if (cp->addr.type == BDADDR_BREDR) {
2972 		conn->connect_cfm_cb = pairing_complete_cb;
2973 		conn->security_cfm_cb = pairing_complete_cb;
2974 		conn->disconn_cfm_cb = pairing_complete_cb;
2975 	} else {
2976 		conn->connect_cfm_cb = le_pairing_complete_cb;
2977 		conn->security_cfm_cb = le_pairing_complete_cb;
2978 		conn->disconn_cfm_cb = le_pairing_complete_cb;
2979 	}
2980 
2981 	conn->io_capability = cp->io_cap;
2982 	cmd->user_data = hci_conn_get(conn);
2983 
2984 	if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
2985 	    hci_conn_security(conn, sec_level, auth_type, true)) {
2986 		cmd->cmd_complete(cmd, 0);
2987 		mgmt_pending_remove(cmd);
2988 	}
2989 
2990 	err = 0;
2991 
2992 unlock:
2993 	hci_dev_unlock(hdev);
2994 	return err;
2995 }
2996 
2997 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2998 			      u16 len)
2999 {
3000 	struct mgmt_addr_info *addr = data;
3001 	struct mgmt_pending_cmd *cmd;
3002 	struct hci_conn *conn;
3003 	int err;
3004 
3005 	BT_DBG("");
3006 
3007 	hci_dev_lock(hdev);
3008 
3009 	if (!hdev_is_powered(hdev)) {
3010 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3011 				      MGMT_STATUS_NOT_POWERED);
3012 		goto unlock;
3013 	}
3014 
3015 	cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3016 	if (!cmd) {
3017 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3018 				      MGMT_STATUS_INVALID_PARAMS);
3019 		goto unlock;
3020 	}
3021 
3022 	conn = cmd->user_data;
3023 
3024 	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3025 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3026 				      MGMT_STATUS_INVALID_PARAMS);
3027 		goto unlock;
3028 	}
3029 
3030 	cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3031 	mgmt_pending_remove(cmd);
3032 
3033 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3034 				addr, sizeof(*addr));
3035 unlock:
3036 	hci_dev_unlock(hdev);
3037 	return err;
3038 }
3039 
3040 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3041 			     struct mgmt_addr_info *addr, u16 mgmt_op,
3042 			     u16 hci_op, __le32 passkey)
3043 {
3044 	struct mgmt_pending_cmd *cmd;
3045 	struct hci_conn *conn;
3046 	int err;
3047 
3048 	hci_dev_lock(hdev);
3049 
3050 	if (!hdev_is_powered(hdev)) {
3051 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3052 					MGMT_STATUS_NOT_POWERED, addr,
3053 					sizeof(*addr));
3054 		goto done;
3055 	}
3056 
3057 	if (addr->type == BDADDR_BREDR)
3058 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3059 	else
3060 		conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3061 					       le_addr_type(addr->type));
3062 
3063 	if (!conn) {
3064 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3065 					MGMT_STATUS_NOT_CONNECTED, addr,
3066 					sizeof(*addr));
3067 		goto done;
3068 	}
3069 
3070 	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3071 		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3072 		if (!err)
3073 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3074 						MGMT_STATUS_SUCCESS, addr,
3075 						sizeof(*addr));
3076 		else
3077 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3078 						MGMT_STATUS_FAILED, addr,
3079 						sizeof(*addr));
3080 
3081 		goto done;
3082 	}
3083 
3084 	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3085 	if (!cmd) {
3086 		err = -ENOMEM;
3087 		goto done;
3088 	}
3089 
3090 	cmd->cmd_complete = addr_cmd_complete;
3091 
3092 	/* Continue with pairing via HCI */
3093 	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3094 		struct hci_cp_user_passkey_reply cp;
3095 
3096 		bacpy(&cp.bdaddr, &addr->bdaddr);
3097 		cp.passkey = passkey;
3098 		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3099 	} else
3100 		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3101 				   &addr->bdaddr);
3102 
3103 	if (err < 0)
3104 		mgmt_pending_remove(cmd);
3105 
3106 done:
3107 	hci_dev_unlock(hdev);
3108 	return err;
3109 }
3110 
3111 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3112 			      void *data, u16 len)
3113 {
3114 	struct mgmt_cp_pin_code_neg_reply *cp = data;
3115 
3116 	BT_DBG("");
3117 
3118 	return user_pairing_resp(sk, hdev, &cp->addr,
3119 				MGMT_OP_PIN_CODE_NEG_REPLY,
3120 				HCI_OP_PIN_CODE_NEG_REPLY, 0);
3121 }
3122 
3123 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3124 			      u16 len)
3125 {
3126 	struct mgmt_cp_user_confirm_reply *cp = data;
3127 
3128 	BT_DBG("");
3129 
3130 	if (len != sizeof(*cp))
3131 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3132 				       MGMT_STATUS_INVALID_PARAMS);
3133 
3134 	return user_pairing_resp(sk, hdev, &cp->addr,
3135 				 MGMT_OP_USER_CONFIRM_REPLY,
3136 				 HCI_OP_USER_CONFIRM_REPLY, 0);
3137 }
3138 
3139 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3140 				  void *data, u16 len)
3141 {
3142 	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3143 
3144 	BT_DBG("");
3145 
3146 	return user_pairing_resp(sk, hdev, &cp->addr,
3147 				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3148 				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3149 }
3150 
3151 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3152 			      u16 len)
3153 {
3154 	struct mgmt_cp_user_passkey_reply *cp = data;
3155 
3156 	BT_DBG("");
3157 
3158 	return user_pairing_resp(sk, hdev, &cp->addr,
3159 				 MGMT_OP_USER_PASSKEY_REPLY,
3160 				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3161 }
3162 
3163 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3164 				  void *data, u16 len)
3165 {
3166 	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3167 
3168 	BT_DBG("");
3169 
3170 	return user_pairing_resp(sk, hdev, &cp->addr,
3171 				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3172 				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3173 }
3174 
3175 static void adv_expire(struct hci_dev *hdev, u32 flags)
3176 {
3177 	struct adv_info *adv_instance;
3178 	struct hci_request req;
3179 	int err;
3180 
3181 	adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3182 	if (!adv_instance)
3183 		return;
3184 
3185 	/* stop if current instance doesn't need to be changed */
3186 	if (!(adv_instance->flags & flags))
3187 		return;
3188 
3189 	cancel_adv_timeout(hdev);
3190 
3191 	adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3192 	if (!adv_instance)
3193 		return;
3194 
3195 	hci_req_init(&req, hdev);
3196 	err = __hci_req_schedule_adv_instance(&req, adv_instance->instance,
3197 					      true);
3198 	if (err)
3199 		return;
3200 
3201 	hci_req_run(&req, NULL);
3202 }
3203 
3204 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3205 {
3206 	struct mgmt_cp_set_local_name *cp;
3207 	struct mgmt_pending_cmd *cmd;
3208 
3209 	BT_DBG("status 0x%02x", status);
3210 
3211 	hci_dev_lock(hdev);
3212 
3213 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3214 	if (!cmd)
3215 		goto unlock;
3216 
3217 	cp = cmd->param;
3218 
3219 	if (status) {
3220 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3221 			        mgmt_status(status));
3222 	} else {
3223 		mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3224 				  cp, sizeof(*cp));
3225 
3226 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3227 			adv_expire(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3228 	}
3229 
3230 	mgmt_pending_remove(cmd);
3231 
3232 unlock:
3233 	hci_dev_unlock(hdev);
3234 }
3235 
3236 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3237 			  u16 len)
3238 {
3239 	struct mgmt_cp_set_local_name *cp = data;
3240 	struct mgmt_pending_cmd *cmd;
3241 	struct hci_request req;
3242 	int err;
3243 
3244 	BT_DBG("");
3245 
3246 	hci_dev_lock(hdev);
3247 
3248 	/* If the old values are the same as the new ones just return a
3249 	 * direct command complete event.
3250 	 */
3251 	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3252 	    !memcmp(hdev->short_name, cp->short_name,
3253 		    sizeof(hdev->short_name))) {
3254 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3255 					data, len);
3256 		goto failed;
3257 	}
3258 
3259 	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3260 
3261 	if (!hdev_is_powered(hdev)) {
3262 		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3263 
3264 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3265 					data, len);
3266 		if (err < 0)
3267 			goto failed;
3268 
3269 		err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3270 					 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3271 		ext_info_changed(hdev, sk);
3272 
3273 		goto failed;
3274 	}
3275 
3276 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3277 	if (!cmd) {
3278 		err = -ENOMEM;
3279 		goto failed;
3280 	}
3281 
3282 	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3283 
3284 	hci_req_init(&req, hdev);
3285 
3286 	if (lmp_bredr_capable(hdev)) {
3287 		__hci_req_update_name(&req);
3288 		__hci_req_update_eir(&req);
3289 	}
3290 
3291 	/* The name is stored in the scan response data and so
3292 	 * no need to udpate the advertising data here.
3293 	 */
3294 	if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3295 		__hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
3296 
3297 	err = hci_req_run(&req, set_name_complete);
3298 	if (err < 0)
3299 		mgmt_pending_remove(cmd);
3300 
3301 failed:
3302 	hci_dev_unlock(hdev);
3303 	return err;
3304 }
3305 
3306 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3307 			  u16 len)
3308 {
3309 	struct mgmt_cp_set_appearance *cp = data;
3310 	u16 appearance;
3311 	int err;
3312 
3313 	BT_DBG("");
3314 
3315 	if (!lmp_le_capable(hdev))
3316 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3317 				       MGMT_STATUS_NOT_SUPPORTED);
3318 
3319 	appearance = le16_to_cpu(cp->appearance);
3320 
3321 	hci_dev_lock(hdev);
3322 
3323 	if (hdev->appearance != appearance) {
3324 		hdev->appearance = appearance;
3325 
3326 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3327 			adv_expire(hdev, MGMT_ADV_FLAG_APPEARANCE);
3328 
3329 		ext_info_changed(hdev, sk);
3330 	}
3331 
3332 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3333 				0);
3334 
3335 	hci_dev_unlock(hdev);
3336 
3337 	return err;
3338 }
3339 
3340 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3341 				 void *data, u16 len)
3342 {
3343 	struct mgmt_rp_get_phy_confguration rp;
3344 
3345 	BT_DBG("sock %p %s", sk, hdev->name);
3346 
3347 	hci_dev_lock(hdev);
3348 
3349 	memset(&rp, 0, sizeof(rp));
3350 
3351 	rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3352 	rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3353 	rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3354 
3355 	hci_dev_unlock(hdev);
3356 
3357 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3358 				 &rp, sizeof(rp));
3359 }
3360 
3361 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3362 {
3363 	struct mgmt_ev_phy_configuration_changed ev;
3364 
3365 	memset(&ev, 0, sizeof(ev));
3366 
3367 	ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3368 
3369 	return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3370 			  sizeof(ev), skip);
3371 }
3372 
3373 static void set_default_phy_complete(struct hci_dev *hdev, u8 status,
3374 				     u16 opcode, struct sk_buff *skb)
3375 {
3376 	struct mgmt_pending_cmd *cmd;
3377 
3378 	BT_DBG("status 0x%02x", status);
3379 
3380 	hci_dev_lock(hdev);
3381 
3382 	cmd = pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev);
3383 	if (!cmd)
3384 		goto unlock;
3385 
3386 	if (status) {
3387 		mgmt_cmd_status(cmd->sk, hdev->id,
3388 				MGMT_OP_SET_PHY_CONFIGURATION,
3389 				mgmt_status(status));
3390 	} else {
3391 		mgmt_cmd_complete(cmd->sk, hdev->id,
3392 				  MGMT_OP_SET_PHY_CONFIGURATION, 0,
3393 				  NULL, 0);
3394 
3395 		mgmt_phy_configuration_changed(hdev, cmd->sk);
3396 	}
3397 
3398 	mgmt_pending_remove(cmd);
3399 
3400 unlock:
3401 	hci_dev_unlock(hdev);
3402 }
3403 
3404 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3405 				 void *data, u16 len)
3406 {
3407 	struct mgmt_cp_set_phy_confguration *cp = data;
3408 	struct hci_cp_le_set_default_phy cp_phy;
3409 	struct mgmt_pending_cmd *cmd;
3410 	struct hci_request req;
3411 	u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3412 	u16 pkt_type = (HCI_DH1 | HCI_DM1);
3413 	bool changed = false;
3414 	int err;
3415 
3416 	BT_DBG("sock %p %s", sk, hdev->name);
3417 
3418 	configurable_phys = get_configurable_phys(hdev);
3419 	supported_phys = get_supported_phys(hdev);
3420 	selected_phys = __le32_to_cpu(cp->selected_phys);
3421 
3422 	if (selected_phys & ~supported_phys)
3423 		return mgmt_cmd_status(sk, hdev->id,
3424 				       MGMT_OP_SET_PHY_CONFIGURATION,
3425 				       MGMT_STATUS_INVALID_PARAMS);
3426 
3427 	unconfigure_phys = supported_phys & ~configurable_phys;
3428 
3429 	if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3430 		return mgmt_cmd_status(sk, hdev->id,
3431 				       MGMT_OP_SET_PHY_CONFIGURATION,
3432 				       MGMT_STATUS_INVALID_PARAMS);
3433 
3434 	if (selected_phys == get_selected_phys(hdev))
3435 		return mgmt_cmd_complete(sk, hdev->id,
3436 					 MGMT_OP_SET_PHY_CONFIGURATION,
3437 					 0, NULL, 0);
3438 
3439 	hci_dev_lock(hdev);
3440 
3441 	if (!hdev_is_powered(hdev)) {
3442 		err = mgmt_cmd_status(sk, hdev->id,
3443 				      MGMT_OP_SET_PHY_CONFIGURATION,
3444 				      MGMT_STATUS_REJECTED);
3445 		goto unlock;
3446 	}
3447 
3448 	if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3449 		err = mgmt_cmd_status(sk, hdev->id,
3450 				      MGMT_OP_SET_PHY_CONFIGURATION,
3451 				      MGMT_STATUS_BUSY);
3452 		goto unlock;
3453 	}
3454 
3455 	if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3456 		pkt_type |= (HCI_DH3 | HCI_DM3);
3457 	else
3458 		pkt_type &= ~(HCI_DH3 | HCI_DM3);
3459 
3460 	if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3461 		pkt_type |= (HCI_DH5 | HCI_DM5);
3462 	else
3463 		pkt_type &= ~(HCI_DH5 | HCI_DM5);
3464 
3465 	if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3466 		pkt_type &= ~HCI_2DH1;
3467 	else
3468 		pkt_type |= HCI_2DH1;
3469 
3470 	if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3471 		pkt_type &= ~HCI_2DH3;
3472 	else
3473 		pkt_type |= HCI_2DH3;
3474 
3475 	if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3476 		pkt_type &= ~HCI_2DH5;
3477 	else
3478 		pkt_type |= HCI_2DH5;
3479 
3480 	if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3481 		pkt_type &= ~HCI_3DH1;
3482 	else
3483 		pkt_type |= HCI_3DH1;
3484 
3485 	if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3486 		pkt_type &= ~HCI_3DH3;
3487 	else
3488 		pkt_type |= HCI_3DH3;
3489 
3490 	if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3491 		pkt_type &= ~HCI_3DH5;
3492 	else
3493 		pkt_type |= HCI_3DH5;
3494 
3495 	if (pkt_type != hdev->pkt_type) {
3496 		hdev->pkt_type = pkt_type;
3497 		changed = true;
3498 	}
3499 
3500 	if ((selected_phys & MGMT_PHY_LE_MASK) ==
3501 	    (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3502 		if (changed)
3503 			mgmt_phy_configuration_changed(hdev, sk);
3504 
3505 		err = mgmt_cmd_complete(sk, hdev->id,
3506 					MGMT_OP_SET_PHY_CONFIGURATION,
3507 					0, NULL, 0);
3508 
3509 		goto unlock;
3510 	}
3511 
3512 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3513 			       len);
3514 	if (!cmd) {
3515 		err = -ENOMEM;
3516 		goto unlock;
3517 	}
3518 
3519 	hci_req_init(&req, hdev);
3520 
3521 	memset(&cp_phy, 0, sizeof(cp_phy));
3522 
3523 	if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3524 		cp_phy.all_phys |= 0x01;
3525 
3526 	if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3527 		cp_phy.all_phys |= 0x02;
3528 
3529 	if (selected_phys & MGMT_PHY_LE_1M_TX)
3530 		cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3531 
3532 	if (selected_phys & MGMT_PHY_LE_2M_TX)
3533 		cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3534 
3535 	if (selected_phys & MGMT_PHY_LE_CODED_TX)
3536 		cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3537 
3538 	if (selected_phys & MGMT_PHY_LE_1M_RX)
3539 		cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3540 
3541 	if (selected_phys & MGMT_PHY_LE_2M_RX)
3542 		cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3543 
3544 	if (selected_phys & MGMT_PHY_LE_CODED_RX)
3545 		cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3546 
3547 	hci_req_add(&req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp_phy), &cp_phy);
3548 
3549 	err = hci_req_run_skb(&req, set_default_phy_complete);
3550 	if (err < 0)
3551 		mgmt_pending_remove(cmd);
3552 
3553 unlock:
3554 	hci_dev_unlock(hdev);
3555 
3556 	return err;
3557 }
3558 
3559 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
3560 			    u16 len)
3561 {
3562 	int err = MGMT_STATUS_SUCCESS;
3563 	struct mgmt_cp_set_blocked_keys *keys = data;
3564 	const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
3565 				   sizeof(struct mgmt_blocked_key_info));
3566 	u16 key_count, expected_len;
3567 	int i;
3568 
3569 	BT_DBG("request for %s", hdev->name);
3570 
3571 	key_count = __le16_to_cpu(keys->key_count);
3572 	if (key_count > max_key_count) {
3573 		bt_dev_err(hdev, "too big key_count value %u", key_count);
3574 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3575 				       MGMT_STATUS_INVALID_PARAMS);
3576 	}
3577 
3578 	expected_len = struct_size(keys, keys, key_count);
3579 	if (expected_len != len) {
3580 		bt_dev_err(hdev, "expected %u bytes, got %u bytes",
3581 			   expected_len, len);
3582 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3583 				       MGMT_STATUS_INVALID_PARAMS);
3584 	}
3585 
3586 	hci_dev_lock(hdev);
3587 
3588 	hci_blocked_keys_clear(hdev);
3589 
3590 	for (i = 0; i < keys->key_count; ++i) {
3591 		struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
3592 
3593 		if (!b) {
3594 			err = MGMT_STATUS_NO_RESOURCES;
3595 			break;
3596 		}
3597 
3598 		b->type = keys->keys[i].type;
3599 		memcpy(b->val, keys->keys[i].val, sizeof(b->val));
3600 		list_add_rcu(&b->list, &hdev->blocked_keys);
3601 	}
3602 	hci_dev_unlock(hdev);
3603 
3604 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3605 				err, NULL, 0);
3606 }
3607 
3608 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
3609 			       void *data, u16 len)
3610 {
3611 	struct mgmt_mode *cp = data;
3612 	int err;
3613 	bool changed = false;
3614 
3615 	BT_DBG("request for %s", hdev->name);
3616 
3617 	if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
3618 		return mgmt_cmd_status(sk, hdev->id,
3619 				       MGMT_OP_SET_WIDEBAND_SPEECH,
3620 				       MGMT_STATUS_NOT_SUPPORTED);
3621 
3622 	if (cp->val != 0x00 && cp->val != 0x01)
3623 		return mgmt_cmd_status(sk, hdev->id,
3624 				       MGMT_OP_SET_WIDEBAND_SPEECH,
3625 				       MGMT_STATUS_INVALID_PARAMS);
3626 
3627 	hci_dev_lock(hdev);
3628 
3629 	if (pending_find(MGMT_OP_SET_WIDEBAND_SPEECH, hdev)) {
3630 		err = mgmt_cmd_status(sk, hdev->id,
3631 				      MGMT_OP_SET_WIDEBAND_SPEECH,
3632 				      MGMT_STATUS_BUSY);
3633 		goto unlock;
3634 	}
3635 
3636 	if (hdev_is_powered(hdev) &&
3637 	    !!cp->val != hci_dev_test_flag(hdev,
3638 					   HCI_WIDEBAND_SPEECH_ENABLED)) {
3639 		err = mgmt_cmd_status(sk, hdev->id,
3640 				      MGMT_OP_SET_WIDEBAND_SPEECH,
3641 				      MGMT_STATUS_REJECTED);
3642 		goto unlock;
3643 	}
3644 
3645 	if (cp->val)
3646 		changed = !hci_dev_test_and_set_flag(hdev,
3647 						   HCI_WIDEBAND_SPEECH_ENABLED);
3648 	else
3649 		changed = hci_dev_test_and_clear_flag(hdev,
3650 						   HCI_WIDEBAND_SPEECH_ENABLED);
3651 
3652 	err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
3653 	if (err < 0)
3654 		goto unlock;
3655 
3656 	if (changed)
3657 		err = new_settings(hdev, sk);
3658 
3659 unlock:
3660 	hci_dev_unlock(hdev);
3661 	return err;
3662 }
3663 
3664 static int read_security_info(struct sock *sk, struct hci_dev *hdev,
3665 			      void *data, u16 data_len)
3666 {
3667 	char buf[16];
3668 	struct mgmt_rp_read_security_info *rp = (void *)buf;
3669 	u16 sec_len = 0;
3670 	u8 flags = 0;
3671 
3672 	bt_dev_dbg(hdev, "sock %p", sk);
3673 
3674 	memset(&buf, 0, sizeof(buf));
3675 
3676 	hci_dev_lock(hdev);
3677 
3678 	/* When the Read Simple Pairing Options command is supported, then
3679 	 * the remote public key validation is supported.
3680 	 */
3681 	if (hdev->commands[41] & 0x08)
3682 		flags |= 0x01;	/* Remote public key validation (BR/EDR) */
3683 
3684 	flags |= 0x02;		/* Remote public key validation (LE) */
3685 
3686 	/* When the Read Encryption Key Size command is supported, then the
3687 	 * encryption key size is enforced.
3688 	 */
3689 	if (hdev->commands[20] & 0x10)
3690 		flags |= 0x04;	/* Encryption key size enforcement (BR/EDR) */
3691 
3692 	flags |= 0x08;		/* Encryption key size enforcement (LE) */
3693 
3694 	sec_len = eir_append_data(rp->sec, sec_len, 0x01, &flags, 1);
3695 
3696 	/* When the Read Simple Pairing Options command is supported, then
3697 	 * also max encryption key size information is provided.
3698 	 */
3699 	if (hdev->commands[41] & 0x08)
3700 		sec_len = eir_append_le16(rp->sec, sec_len, 0x02,
3701 					  hdev->max_enc_key_size);
3702 
3703 	sec_len = eir_append_le16(rp->sec, sec_len, 0x03, SMP_MAX_ENC_KEY_SIZE);
3704 
3705 	rp->sec_len = cpu_to_le16(sec_len);
3706 
3707 	hci_dev_unlock(hdev);
3708 
3709 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_SECURITY_INFO, 0,
3710 				 rp, sizeof(*rp) + sec_len);
3711 }
3712 
3713 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
3714 				         u16 opcode, struct sk_buff *skb)
3715 {
3716 	struct mgmt_rp_read_local_oob_data mgmt_rp;
3717 	size_t rp_size = sizeof(mgmt_rp);
3718 	struct mgmt_pending_cmd *cmd;
3719 
3720 	BT_DBG("%s status %u", hdev->name, status);
3721 
3722 	cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
3723 	if (!cmd)
3724 		return;
3725 
3726 	if (status || !skb) {
3727 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3728 				status ? mgmt_status(status) : MGMT_STATUS_FAILED);
3729 		goto remove;
3730 	}
3731 
3732 	memset(&mgmt_rp, 0, sizeof(mgmt_rp));
3733 
3734 	if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
3735 		struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
3736 
3737 		if (skb->len < sizeof(*rp)) {
3738 			mgmt_cmd_status(cmd->sk, hdev->id,
3739 					MGMT_OP_READ_LOCAL_OOB_DATA,
3740 					MGMT_STATUS_FAILED);
3741 			goto remove;
3742 		}
3743 
3744 		memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
3745 		memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
3746 
3747 		rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
3748 	} else {
3749 		struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
3750 
3751 		if (skb->len < sizeof(*rp)) {
3752 			mgmt_cmd_status(cmd->sk, hdev->id,
3753 					MGMT_OP_READ_LOCAL_OOB_DATA,
3754 					MGMT_STATUS_FAILED);
3755 			goto remove;
3756 		}
3757 
3758 		memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
3759 		memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
3760 
3761 		memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
3762 		memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
3763 	}
3764 
3765 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3766 			  MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
3767 
3768 remove:
3769 	mgmt_pending_remove(cmd);
3770 }
3771 
3772 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3773 			       void *data, u16 data_len)
3774 {
3775 	struct mgmt_pending_cmd *cmd;
3776 	struct hci_request req;
3777 	int err;
3778 
3779 	BT_DBG("%s", hdev->name);
3780 
3781 	hci_dev_lock(hdev);
3782 
3783 	if (!hdev_is_powered(hdev)) {
3784 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3785 				      MGMT_STATUS_NOT_POWERED);
3786 		goto unlock;
3787 	}
3788 
3789 	if (!lmp_ssp_capable(hdev)) {
3790 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3791 				      MGMT_STATUS_NOT_SUPPORTED);
3792 		goto unlock;
3793 	}
3794 
3795 	if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3796 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3797 				      MGMT_STATUS_BUSY);
3798 		goto unlock;
3799 	}
3800 
3801 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3802 	if (!cmd) {
3803 		err = -ENOMEM;
3804 		goto unlock;
3805 	}
3806 
3807 	hci_req_init(&req, hdev);
3808 
3809 	if (bredr_sc_enabled(hdev))
3810 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
3811 	else
3812 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3813 
3814 	err = hci_req_run_skb(&req, read_local_oob_data_complete);
3815 	if (err < 0)
3816 		mgmt_pending_remove(cmd);
3817 
3818 unlock:
3819 	hci_dev_unlock(hdev);
3820 	return err;
3821 }
3822 
3823 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3824 			       void *data, u16 len)
3825 {
3826 	struct mgmt_addr_info *addr = data;
3827 	int err;
3828 
3829 	BT_DBG("%s ", hdev->name);
3830 
3831 	if (!bdaddr_type_is_valid(addr->type))
3832 		return mgmt_cmd_complete(sk, hdev->id,
3833 					 MGMT_OP_ADD_REMOTE_OOB_DATA,
3834 					 MGMT_STATUS_INVALID_PARAMS,
3835 					 addr, sizeof(*addr));
3836 
3837 	hci_dev_lock(hdev);
3838 
3839 	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3840 		struct mgmt_cp_add_remote_oob_data *cp = data;
3841 		u8 status;
3842 
3843 		if (cp->addr.type != BDADDR_BREDR) {
3844 			err = mgmt_cmd_complete(sk, hdev->id,
3845 						MGMT_OP_ADD_REMOTE_OOB_DATA,
3846 						MGMT_STATUS_INVALID_PARAMS,
3847 						&cp->addr, sizeof(cp->addr));
3848 			goto unlock;
3849 		}
3850 
3851 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3852 					      cp->addr.type, cp->hash,
3853 					      cp->rand, NULL, NULL);
3854 		if (err < 0)
3855 			status = MGMT_STATUS_FAILED;
3856 		else
3857 			status = MGMT_STATUS_SUCCESS;
3858 
3859 		err = mgmt_cmd_complete(sk, hdev->id,
3860 					MGMT_OP_ADD_REMOTE_OOB_DATA, status,
3861 					&cp->addr, sizeof(cp->addr));
3862 	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3863 		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3864 		u8 *rand192, *hash192, *rand256, *hash256;
3865 		u8 status;
3866 
3867 		if (bdaddr_type_is_le(cp->addr.type)) {
3868 			/* Enforce zero-valued 192-bit parameters as
3869 			 * long as legacy SMP OOB isn't implemented.
3870 			 */
3871 			if (memcmp(cp->rand192, ZERO_KEY, 16) ||
3872 			    memcmp(cp->hash192, ZERO_KEY, 16)) {
3873 				err = mgmt_cmd_complete(sk, hdev->id,
3874 							MGMT_OP_ADD_REMOTE_OOB_DATA,
3875 							MGMT_STATUS_INVALID_PARAMS,
3876 							addr, sizeof(*addr));
3877 				goto unlock;
3878 			}
3879 
3880 			rand192 = NULL;
3881 			hash192 = NULL;
3882 		} else {
3883 			/* In case one of the P-192 values is set to zero,
3884 			 * then just disable OOB data for P-192.
3885 			 */
3886 			if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
3887 			    !memcmp(cp->hash192, ZERO_KEY, 16)) {
3888 				rand192 = NULL;
3889 				hash192 = NULL;
3890 			} else {
3891 				rand192 = cp->rand192;
3892 				hash192 = cp->hash192;
3893 			}
3894 		}
3895 
3896 		/* In case one of the P-256 values is set to zero, then just
3897 		 * disable OOB data for P-256.
3898 		 */
3899 		if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
3900 		    !memcmp(cp->hash256, ZERO_KEY, 16)) {
3901 			rand256 = NULL;
3902 			hash256 = NULL;
3903 		} else {
3904 			rand256 = cp->rand256;
3905 			hash256 = cp->hash256;
3906 		}
3907 
3908 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3909 					      cp->addr.type, hash192, rand192,
3910 					      hash256, rand256);
3911 		if (err < 0)
3912 			status = MGMT_STATUS_FAILED;
3913 		else
3914 			status = MGMT_STATUS_SUCCESS;
3915 
3916 		err = mgmt_cmd_complete(sk, hdev->id,
3917 					MGMT_OP_ADD_REMOTE_OOB_DATA,
3918 					status, &cp->addr, sizeof(cp->addr));
3919 	} else {
3920 		bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
3921 			   len);
3922 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3923 				      MGMT_STATUS_INVALID_PARAMS);
3924 	}
3925 
3926 unlock:
3927 	hci_dev_unlock(hdev);
3928 	return err;
3929 }
3930 
3931 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3932 				  void *data, u16 len)
3933 {
3934 	struct mgmt_cp_remove_remote_oob_data *cp = data;
3935 	u8 status;
3936 	int err;
3937 
3938 	BT_DBG("%s", hdev->name);
3939 
3940 	if (cp->addr.type != BDADDR_BREDR)
3941 		return mgmt_cmd_complete(sk, hdev->id,
3942 					 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3943 					 MGMT_STATUS_INVALID_PARAMS,
3944 					 &cp->addr, sizeof(cp->addr));
3945 
3946 	hci_dev_lock(hdev);
3947 
3948 	if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
3949 		hci_remote_oob_data_clear(hdev);
3950 		status = MGMT_STATUS_SUCCESS;
3951 		goto done;
3952 	}
3953 
3954 	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
3955 	if (err < 0)
3956 		status = MGMT_STATUS_INVALID_PARAMS;
3957 	else
3958 		status = MGMT_STATUS_SUCCESS;
3959 
3960 done:
3961 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3962 				status, &cp->addr, sizeof(cp->addr));
3963 
3964 	hci_dev_unlock(hdev);
3965 	return err;
3966 }
3967 
3968 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
3969 {
3970 	struct mgmt_pending_cmd *cmd;
3971 
3972 	BT_DBG("status %d", status);
3973 
3974 	hci_dev_lock(hdev);
3975 
3976 	cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
3977 	if (!cmd)
3978 		cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
3979 
3980 	if (!cmd)
3981 		cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
3982 
3983 	if (cmd) {
3984 		cmd->cmd_complete(cmd, mgmt_status(status));
3985 		mgmt_pending_remove(cmd);
3986 	}
3987 
3988 	hci_dev_unlock(hdev);
3989 
3990 	/* Handle suspend notifier */
3991 	if (test_and_clear_bit(SUSPEND_UNPAUSE_DISCOVERY,
3992 			       hdev->suspend_tasks)) {
3993 		bt_dev_dbg(hdev, "Unpaused discovery");
3994 		wake_up(&hdev->suspend_wait_q);
3995 	}
3996 }
3997 
3998 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
3999 				    uint8_t *mgmt_status)
4000 {
4001 	switch (type) {
4002 	case DISCOV_TYPE_LE:
4003 		*mgmt_status = mgmt_le_support(hdev);
4004 		if (*mgmt_status)
4005 			return false;
4006 		break;
4007 	case DISCOV_TYPE_INTERLEAVED:
4008 		*mgmt_status = mgmt_le_support(hdev);
4009 		if (*mgmt_status)
4010 			return false;
4011 		/* Intentional fall-through */
4012 	case DISCOV_TYPE_BREDR:
4013 		*mgmt_status = mgmt_bredr_support(hdev);
4014 		if (*mgmt_status)
4015 			return false;
4016 		break;
4017 	default:
4018 		*mgmt_status = MGMT_STATUS_INVALID_PARAMS;
4019 		return false;
4020 	}
4021 
4022 	return true;
4023 }
4024 
4025 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
4026 				    u16 op, void *data, u16 len)
4027 {
4028 	struct mgmt_cp_start_discovery *cp = data;
4029 	struct mgmt_pending_cmd *cmd;
4030 	u8 status;
4031 	int err;
4032 
4033 	BT_DBG("%s", hdev->name);
4034 
4035 	hci_dev_lock(hdev);
4036 
4037 	if (!hdev_is_powered(hdev)) {
4038 		err = mgmt_cmd_complete(sk, hdev->id, op,
4039 					MGMT_STATUS_NOT_POWERED,
4040 					&cp->type, sizeof(cp->type));
4041 		goto failed;
4042 	}
4043 
4044 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
4045 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4046 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4047 					&cp->type, sizeof(cp->type));
4048 		goto failed;
4049 	}
4050 
4051 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
4052 		err = mgmt_cmd_complete(sk, hdev->id, op, status,
4053 					&cp->type, sizeof(cp->type));
4054 		goto failed;
4055 	}
4056 
4057 	/* Can't start discovery when it is paused */
4058 	if (hdev->discovery_paused) {
4059 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4060 					&cp->type, sizeof(cp->type));
4061 		goto failed;
4062 	}
4063 
4064 	/* Clear the discovery filter first to free any previously
4065 	 * allocated memory for the UUID list.
4066 	 */
4067 	hci_discovery_filter_clear(hdev);
4068 
4069 	hdev->discovery.type = cp->type;
4070 	hdev->discovery.report_invalid_rssi = false;
4071 	if (op == MGMT_OP_START_LIMITED_DISCOVERY)
4072 		hdev->discovery.limited = true;
4073 	else
4074 		hdev->discovery.limited = false;
4075 
4076 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
4077 	if (!cmd) {
4078 		err = -ENOMEM;
4079 		goto failed;
4080 	}
4081 
4082 	cmd->cmd_complete = generic_cmd_complete;
4083 
4084 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4085 	queue_work(hdev->req_workqueue, &hdev->discov_update);
4086 	err = 0;
4087 
4088 failed:
4089 	hci_dev_unlock(hdev);
4090 	return err;
4091 }
4092 
4093 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
4094 			   void *data, u16 len)
4095 {
4096 	return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
4097 					data, len);
4098 }
4099 
4100 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
4101 				   void *data, u16 len)
4102 {
4103 	return start_discovery_internal(sk, hdev,
4104 					MGMT_OP_START_LIMITED_DISCOVERY,
4105 					data, len);
4106 }
4107 
4108 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
4109 					  u8 status)
4110 {
4111 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
4112 				 cmd->param, 1);
4113 }
4114 
4115 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4116 				   void *data, u16 len)
4117 {
4118 	struct mgmt_cp_start_service_discovery *cp = data;
4119 	struct mgmt_pending_cmd *cmd;
4120 	const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
4121 	u16 uuid_count, expected_len;
4122 	u8 status;
4123 	int err;
4124 
4125 	BT_DBG("%s", hdev->name);
4126 
4127 	hci_dev_lock(hdev);
4128 
4129 	if (!hdev_is_powered(hdev)) {
4130 		err = mgmt_cmd_complete(sk, hdev->id,
4131 					MGMT_OP_START_SERVICE_DISCOVERY,
4132 					MGMT_STATUS_NOT_POWERED,
4133 					&cp->type, sizeof(cp->type));
4134 		goto failed;
4135 	}
4136 
4137 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
4138 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4139 		err = mgmt_cmd_complete(sk, hdev->id,
4140 					MGMT_OP_START_SERVICE_DISCOVERY,
4141 					MGMT_STATUS_BUSY, &cp->type,
4142 					sizeof(cp->type));
4143 		goto failed;
4144 	}
4145 
4146 	uuid_count = __le16_to_cpu(cp->uuid_count);
4147 	if (uuid_count > max_uuid_count) {
4148 		bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
4149 			   uuid_count);
4150 		err = mgmt_cmd_complete(sk, hdev->id,
4151 					MGMT_OP_START_SERVICE_DISCOVERY,
4152 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
4153 					sizeof(cp->type));
4154 		goto failed;
4155 	}
4156 
4157 	expected_len = sizeof(*cp) + uuid_count * 16;
4158 	if (expected_len != len) {
4159 		bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
4160 			   expected_len, len);
4161 		err = mgmt_cmd_complete(sk, hdev->id,
4162 					MGMT_OP_START_SERVICE_DISCOVERY,
4163 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
4164 					sizeof(cp->type));
4165 		goto failed;
4166 	}
4167 
4168 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
4169 		err = mgmt_cmd_complete(sk, hdev->id,
4170 					MGMT_OP_START_SERVICE_DISCOVERY,
4171 					status, &cp->type, sizeof(cp->type));
4172 		goto failed;
4173 	}
4174 
4175 	cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4176 			       hdev, data, len);
4177 	if (!cmd) {
4178 		err = -ENOMEM;
4179 		goto failed;
4180 	}
4181 
4182 	cmd->cmd_complete = service_discovery_cmd_complete;
4183 
4184 	/* Clear the discovery filter first to free any previously
4185 	 * allocated memory for the UUID list.
4186 	 */
4187 	hci_discovery_filter_clear(hdev);
4188 
4189 	hdev->discovery.result_filtering = true;
4190 	hdev->discovery.type = cp->type;
4191 	hdev->discovery.rssi = cp->rssi;
4192 	hdev->discovery.uuid_count = uuid_count;
4193 
4194 	if (uuid_count > 0) {
4195 		hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4196 						GFP_KERNEL);
4197 		if (!hdev->discovery.uuids) {
4198 			err = mgmt_cmd_complete(sk, hdev->id,
4199 						MGMT_OP_START_SERVICE_DISCOVERY,
4200 						MGMT_STATUS_FAILED,
4201 						&cp->type, sizeof(cp->type));
4202 			mgmt_pending_remove(cmd);
4203 			goto failed;
4204 		}
4205 	}
4206 
4207 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4208 	queue_work(hdev->req_workqueue, &hdev->discov_update);
4209 	err = 0;
4210 
4211 failed:
4212 	hci_dev_unlock(hdev);
4213 	return err;
4214 }
4215 
4216 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
4217 {
4218 	struct mgmt_pending_cmd *cmd;
4219 
4220 	BT_DBG("status %d", status);
4221 
4222 	hci_dev_lock(hdev);
4223 
4224 	cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4225 	if (cmd) {
4226 		cmd->cmd_complete(cmd, mgmt_status(status));
4227 		mgmt_pending_remove(cmd);
4228 	}
4229 
4230 	hci_dev_unlock(hdev);
4231 
4232 	/* Handle suspend notifier */
4233 	if (test_and_clear_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks)) {
4234 		bt_dev_dbg(hdev, "Paused discovery");
4235 		wake_up(&hdev->suspend_wait_q);
4236 	}
4237 }
4238 
4239 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4240 			  u16 len)
4241 {
4242 	struct mgmt_cp_stop_discovery *mgmt_cp = data;
4243 	struct mgmt_pending_cmd *cmd;
4244 	int err;
4245 
4246 	BT_DBG("%s", hdev->name);
4247 
4248 	hci_dev_lock(hdev);
4249 
4250 	if (!hci_discovery_active(hdev)) {
4251 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4252 					MGMT_STATUS_REJECTED, &mgmt_cp->type,
4253 					sizeof(mgmt_cp->type));
4254 		goto unlock;
4255 	}
4256 
4257 	if (hdev->discovery.type != mgmt_cp->type) {
4258 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4259 					MGMT_STATUS_INVALID_PARAMS,
4260 					&mgmt_cp->type, sizeof(mgmt_cp->type));
4261 		goto unlock;
4262 	}
4263 
4264 	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4265 	if (!cmd) {
4266 		err = -ENOMEM;
4267 		goto unlock;
4268 	}
4269 
4270 	cmd->cmd_complete = generic_cmd_complete;
4271 
4272 	hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4273 	queue_work(hdev->req_workqueue, &hdev->discov_update);
4274 	err = 0;
4275 
4276 unlock:
4277 	hci_dev_unlock(hdev);
4278 	return err;
4279 }
4280 
4281 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4282 			u16 len)
4283 {
4284 	struct mgmt_cp_confirm_name *cp = data;
4285 	struct inquiry_entry *e;
4286 	int err;
4287 
4288 	BT_DBG("%s", hdev->name);
4289 
4290 	hci_dev_lock(hdev);
4291 
4292 	if (!hci_discovery_active(hdev)) {
4293 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4294 					MGMT_STATUS_FAILED, &cp->addr,
4295 					sizeof(cp->addr));
4296 		goto failed;
4297 	}
4298 
4299 	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4300 	if (!e) {
4301 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4302 					MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4303 					sizeof(cp->addr));
4304 		goto failed;
4305 	}
4306 
4307 	if (cp->name_known) {
4308 		e->name_state = NAME_KNOWN;
4309 		list_del(&e->list);
4310 	} else {
4311 		e->name_state = NAME_NEEDED;
4312 		hci_inquiry_cache_update_resolve(hdev, e);
4313 	}
4314 
4315 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
4316 				&cp->addr, sizeof(cp->addr));
4317 
4318 failed:
4319 	hci_dev_unlock(hdev);
4320 	return err;
4321 }
4322 
4323 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4324 			u16 len)
4325 {
4326 	struct mgmt_cp_block_device *cp = data;
4327 	u8 status;
4328 	int err;
4329 
4330 	BT_DBG("%s", hdev->name);
4331 
4332 	if (!bdaddr_type_is_valid(cp->addr.type))
4333 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4334 					 MGMT_STATUS_INVALID_PARAMS,
4335 					 &cp->addr, sizeof(cp->addr));
4336 
4337 	hci_dev_lock(hdev);
4338 
4339 	err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4340 				  cp->addr.type);
4341 	if (err < 0) {
4342 		status = MGMT_STATUS_FAILED;
4343 		goto done;
4344 	}
4345 
4346 	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4347 		   sk);
4348 	status = MGMT_STATUS_SUCCESS;
4349 
4350 done:
4351 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4352 				&cp->addr, sizeof(cp->addr));
4353 
4354 	hci_dev_unlock(hdev);
4355 
4356 	return err;
4357 }
4358 
4359 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4360 			  u16 len)
4361 {
4362 	struct mgmt_cp_unblock_device *cp = data;
4363 	u8 status;
4364 	int err;
4365 
4366 	BT_DBG("%s", hdev->name);
4367 
4368 	if (!bdaddr_type_is_valid(cp->addr.type))
4369 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4370 					 MGMT_STATUS_INVALID_PARAMS,
4371 					 &cp->addr, sizeof(cp->addr));
4372 
4373 	hci_dev_lock(hdev);
4374 
4375 	err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4376 				  cp->addr.type);
4377 	if (err < 0) {
4378 		status = MGMT_STATUS_INVALID_PARAMS;
4379 		goto done;
4380 	}
4381 
4382 	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4383 		   sk);
4384 	status = MGMT_STATUS_SUCCESS;
4385 
4386 done:
4387 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4388 				&cp->addr, sizeof(cp->addr));
4389 
4390 	hci_dev_unlock(hdev);
4391 
4392 	return err;
4393 }
4394 
4395 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4396 			 u16 len)
4397 {
4398 	struct mgmt_cp_set_device_id *cp = data;
4399 	struct hci_request req;
4400 	int err;
4401 	__u16 source;
4402 
4403 	BT_DBG("%s", hdev->name);
4404 
4405 	source = __le16_to_cpu(cp->source);
4406 
4407 	if (source > 0x0002)
4408 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4409 				       MGMT_STATUS_INVALID_PARAMS);
4410 
4411 	hci_dev_lock(hdev);
4412 
4413 	hdev->devid_source = source;
4414 	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4415 	hdev->devid_product = __le16_to_cpu(cp->product);
4416 	hdev->devid_version = __le16_to_cpu(cp->version);
4417 
4418 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
4419 				NULL, 0);
4420 
4421 	hci_req_init(&req, hdev);
4422 	__hci_req_update_eir(&req);
4423 	hci_req_run(&req, NULL);
4424 
4425 	hci_dev_unlock(hdev);
4426 
4427 	return err;
4428 }
4429 
4430 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
4431 					u16 opcode)
4432 {
4433 	BT_DBG("status %d", status);
4434 }
4435 
4436 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
4437 				     u16 opcode)
4438 {
4439 	struct cmd_lookup match = { NULL, hdev };
4440 	struct hci_request req;
4441 	u8 instance;
4442 	struct adv_info *adv_instance;
4443 	int err;
4444 
4445 	hci_dev_lock(hdev);
4446 
4447 	if (status) {
4448 		u8 mgmt_err = mgmt_status(status);
4449 
4450 		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4451 				     cmd_status_rsp, &mgmt_err);
4452 		goto unlock;
4453 	}
4454 
4455 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
4456 		hci_dev_set_flag(hdev, HCI_ADVERTISING);
4457 	else
4458 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4459 
4460 	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4461 			     &match);
4462 
4463 	new_settings(hdev, match.sk);
4464 
4465 	if (match.sk)
4466 		sock_put(match.sk);
4467 
4468 	/* Handle suspend notifier */
4469 	if (test_and_clear_bit(SUSPEND_PAUSE_ADVERTISING,
4470 			       hdev->suspend_tasks)) {
4471 		bt_dev_dbg(hdev, "Paused advertising");
4472 		wake_up(&hdev->suspend_wait_q);
4473 	} else if (test_and_clear_bit(SUSPEND_UNPAUSE_ADVERTISING,
4474 				      hdev->suspend_tasks)) {
4475 		bt_dev_dbg(hdev, "Unpaused advertising");
4476 		wake_up(&hdev->suspend_wait_q);
4477 	}
4478 
4479 	/* If "Set Advertising" was just disabled and instance advertising was
4480 	 * set up earlier, then re-enable multi-instance advertising.
4481 	 */
4482 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
4483 	    list_empty(&hdev->adv_instances))
4484 		goto unlock;
4485 
4486 	instance = hdev->cur_adv_instance;
4487 	if (!instance) {
4488 		adv_instance = list_first_entry_or_null(&hdev->adv_instances,
4489 							struct adv_info, list);
4490 		if (!adv_instance)
4491 			goto unlock;
4492 
4493 		instance = adv_instance->instance;
4494 	}
4495 
4496 	hci_req_init(&req, hdev);
4497 
4498 	err = __hci_req_schedule_adv_instance(&req, instance, true);
4499 
4500 	if (!err)
4501 		err = hci_req_run(&req, enable_advertising_instance);
4502 
4503 	if (err)
4504 		bt_dev_err(hdev, "failed to re-configure advertising");
4505 
4506 unlock:
4507 	hci_dev_unlock(hdev);
4508 }
4509 
4510 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4511 			   u16 len)
4512 {
4513 	struct mgmt_mode *cp = data;
4514 	struct mgmt_pending_cmd *cmd;
4515 	struct hci_request req;
4516 	u8 val, status;
4517 	int err;
4518 
4519 	BT_DBG("request for %s", hdev->name);
4520 
4521 	status = mgmt_le_support(hdev);
4522 	if (status)
4523 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4524 				       status);
4525 
4526 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4527 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4528 				       MGMT_STATUS_INVALID_PARAMS);
4529 
4530 	if (hdev->advertising_paused)
4531 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4532 				       MGMT_STATUS_BUSY);
4533 
4534 	hci_dev_lock(hdev);
4535 
4536 	val = !!cp->val;
4537 
4538 	/* The following conditions are ones which mean that we should
4539 	 * not do any HCI communication but directly send a mgmt
4540 	 * response to user space (after toggling the flag if
4541 	 * necessary).
4542 	 */
4543 	if (!hdev_is_powered(hdev) ||
4544 	    (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
4545 	     (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
4546 	    hci_conn_num(hdev, LE_LINK) > 0 ||
4547 	    (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4548 	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4549 		bool changed;
4550 
4551 		if (cp->val) {
4552 			hdev->cur_adv_instance = 0x00;
4553 			changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
4554 			if (cp->val == 0x02)
4555 				hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4556 			else
4557 				hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4558 		} else {
4559 			changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
4560 			hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4561 		}
4562 
4563 		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4564 		if (err < 0)
4565 			goto unlock;
4566 
4567 		if (changed)
4568 			err = new_settings(hdev, sk);
4569 
4570 		goto unlock;
4571 	}
4572 
4573 	if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4574 	    pending_find(MGMT_OP_SET_LE, hdev)) {
4575 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4576 				      MGMT_STATUS_BUSY);
4577 		goto unlock;
4578 	}
4579 
4580 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4581 	if (!cmd) {
4582 		err = -ENOMEM;
4583 		goto unlock;
4584 	}
4585 
4586 	hci_req_init(&req, hdev);
4587 
4588 	if (cp->val == 0x02)
4589 		hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4590 	else
4591 		hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4592 
4593 	cancel_adv_timeout(hdev);
4594 
4595 	if (val) {
4596 		/* Switch to instance "0" for the Set Advertising setting.
4597 		 * We cannot use update_[adv|scan_rsp]_data() here as the
4598 		 * HCI_ADVERTISING flag is not yet set.
4599 		 */
4600 		hdev->cur_adv_instance = 0x00;
4601 
4602 		if (ext_adv_capable(hdev)) {
4603 			__hci_req_start_ext_adv(&req, 0x00);
4604 		} else {
4605 			__hci_req_update_adv_data(&req, 0x00);
4606 			__hci_req_update_scan_rsp_data(&req, 0x00);
4607 			__hci_req_enable_advertising(&req);
4608 		}
4609 	} else {
4610 		__hci_req_disable_advertising(&req);
4611 	}
4612 
4613 	err = hci_req_run(&req, set_advertising_complete);
4614 	if (err < 0)
4615 		mgmt_pending_remove(cmd);
4616 
4617 unlock:
4618 	hci_dev_unlock(hdev);
4619 	return err;
4620 }
4621 
4622 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4623 			      void *data, u16 len)
4624 {
4625 	struct mgmt_cp_set_static_address *cp = data;
4626 	int err;
4627 
4628 	BT_DBG("%s", hdev->name);
4629 
4630 	if (!lmp_le_capable(hdev))
4631 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4632 				       MGMT_STATUS_NOT_SUPPORTED);
4633 
4634 	if (hdev_is_powered(hdev))
4635 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4636 				       MGMT_STATUS_REJECTED);
4637 
4638 	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4639 		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4640 			return mgmt_cmd_status(sk, hdev->id,
4641 					       MGMT_OP_SET_STATIC_ADDRESS,
4642 					       MGMT_STATUS_INVALID_PARAMS);
4643 
4644 		/* Two most significant bits shall be set */
4645 		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4646 			return mgmt_cmd_status(sk, hdev->id,
4647 					       MGMT_OP_SET_STATIC_ADDRESS,
4648 					       MGMT_STATUS_INVALID_PARAMS);
4649 	}
4650 
4651 	hci_dev_lock(hdev);
4652 
4653 	bacpy(&hdev->static_addr, &cp->bdaddr);
4654 
4655 	err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
4656 	if (err < 0)
4657 		goto unlock;
4658 
4659 	err = new_settings(hdev, sk);
4660 
4661 unlock:
4662 	hci_dev_unlock(hdev);
4663 	return err;
4664 }
4665 
4666 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4667 			   void *data, u16 len)
4668 {
4669 	struct mgmt_cp_set_scan_params *cp = data;
4670 	__u16 interval, window;
4671 	int err;
4672 
4673 	BT_DBG("%s", hdev->name);
4674 
4675 	if (!lmp_le_capable(hdev))
4676 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4677 				       MGMT_STATUS_NOT_SUPPORTED);
4678 
4679 	interval = __le16_to_cpu(cp->interval);
4680 
4681 	if (interval < 0x0004 || interval > 0x4000)
4682 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4683 				       MGMT_STATUS_INVALID_PARAMS);
4684 
4685 	window = __le16_to_cpu(cp->window);
4686 
4687 	if (window < 0x0004 || window > 0x4000)
4688 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4689 				       MGMT_STATUS_INVALID_PARAMS);
4690 
4691 	if (window > interval)
4692 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4693 				       MGMT_STATUS_INVALID_PARAMS);
4694 
4695 	hci_dev_lock(hdev);
4696 
4697 	hdev->le_scan_interval = interval;
4698 	hdev->le_scan_window = window;
4699 
4700 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
4701 				NULL, 0);
4702 
4703 	/* If background scan is running, restart it so new parameters are
4704 	 * loaded.
4705 	 */
4706 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4707 	    hdev->discovery.state == DISCOVERY_STOPPED) {
4708 		struct hci_request req;
4709 
4710 		hci_req_init(&req, hdev);
4711 
4712 		hci_req_add_le_scan_disable(&req);
4713 		hci_req_add_le_passive_scan(&req);
4714 
4715 		hci_req_run(&req, NULL);
4716 	}
4717 
4718 	hci_dev_unlock(hdev);
4719 
4720 	return err;
4721 }
4722 
4723 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
4724 				      u16 opcode)
4725 {
4726 	struct mgmt_pending_cmd *cmd;
4727 
4728 	BT_DBG("status 0x%02x", status);
4729 
4730 	hci_dev_lock(hdev);
4731 
4732 	cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4733 	if (!cmd)
4734 		goto unlock;
4735 
4736 	if (status) {
4737 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4738 			        mgmt_status(status));
4739 	} else {
4740 		struct mgmt_mode *cp = cmd->param;
4741 
4742 		if (cp->val)
4743 			hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
4744 		else
4745 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4746 
4747 		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4748 		new_settings(hdev, cmd->sk);
4749 	}
4750 
4751 	mgmt_pending_remove(cmd);
4752 
4753 unlock:
4754 	hci_dev_unlock(hdev);
4755 }
4756 
4757 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4758 				void *data, u16 len)
4759 {
4760 	struct mgmt_mode *cp = data;
4761 	struct mgmt_pending_cmd *cmd;
4762 	struct hci_request req;
4763 	int err;
4764 
4765 	BT_DBG("%s", hdev->name);
4766 
4767 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
4768 	    hdev->hci_ver < BLUETOOTH_VER_1_2)
4769 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4770 				       MGMT_STATUS_NOT_SUPPORTED);
4771 
4772 	if (cp->val != 0x00 && cp->val != 0x01)
4773 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4774 				       MGMT_STATUS_INVALID_PARAMS);
4775 
4776 	hci_dev_lock(hdev);
4777 
4778 	if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4779 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4780 				      MGMT_STATUS_BUSY);
4781 		goto unlock;
4782 	}
4783 
4784 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
4785 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4786 					hdev);
4787 		goto unlock;
4788 	}
4789 
4790 	if (!hdev_is_powered(hdev)) {
4791 		hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
4792 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4793 					hdev);
4794 		new_settings(hdev, sk);
4795 		goto unlock;
4796 	}
4797 
4798 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4799 			       data, len);
4800 	if (!cmd) {
4801 		err = -ENOMEM;
4802 		goto unlock;
4803 	}
4804 
4805 	hci_req_init(&req, hdev);
4806 
4807 	__hci_req_write_fast_connectable(&req, cp->val);
4808 
4809 	err = hci_req_run(&req, fast_connectable_complete);
4810 	if (err < 0) {
4811 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4812 				      MGMT_STATUS_FAILED);
4813 		mgmt_pending_remove(cmd);
4814 	}
4815 
4816 unlock:
4817 	hci_dev_unlock(hdev);
4818 
4819 	return err;
4820 }
4821 
4822 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4823 {
4824 	struct mgmt_pending_cmd *cmd;
4825 
4826 	BT_DBG("status 0x%02x", status);
4827 
4828 	hci_dev_lock(hdev);
4829 
4830 	cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
4831 	if (!cmd)
4832 		goto unlock;
4833 
4834 	if (status) {
4835 		u8 mgmt_err = mgmt_status(status);
4836 
4837 		/* We need to restore the flag if related HCI commands
4838 		 * failed.
4839 		 */
4840 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
4841 
4842 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4843 	} else {
4844 		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4845 		new_settings(hdev, cmd->sk);
4846 	}
4847 
4848 	mgmt_pending_remove(cmd);
4849 
4850 unlock:
4851 	hci_dev_unlock(hdev);
4852 }
4853 
4854 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4855 {
4856 	struct mgmt_mode *cp = data;
4857 	struct mgmt_pending_cmd *cmd;
4858 	struct hci_request req;
4859 	int err;
4860 
4861 	BT_DBG("request for %s", hdev->name);
4862 
4863 	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4864 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4865 				       MGMT_STATUS_NOT_SUPPORTED);
4866 
4867 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
4868 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4869 				       MGMT_STATUS_REJECTED);
4870 
4871 	if (cp->val != 0x00 && cp->val != 0x01)
4872 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4873 				       MGMT_STATUS_INVALID_PARAMS);
4874 
4875 	hci_dev_lock(hdev);
4876 
4877 	if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4878 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4879 		goto unlock;
4880 	}
4881 
4882 	if (!hdev_is_powered(hdev)) {
4883 		if (!cp->val) {
4884 			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
4885 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
4886 			hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
4887 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4888 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
4889 		}
4890 
4891 		hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
4892 
4893 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4894 		if (err < 0)
4895 			goto unlock;
4896 
4897 		err = new_settings(hdev, sk);
4898 		goto unlock;
4899 	}
4900 
4901 	/* Reject disabling when powered on */
4902 	if (!cp->val) {
4903 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4904 				      MGMT_STATUS_REJECTED);
4905 		goto unlock;
4906 	} else {
4907 		/* When configuring a dual-mode controller to operate
4908 		 * with LE only and using a static address, then switching
4909 		 * BR/EDR back on is not allowed.
4910 		 *
4911 		 * Dual-mode controllers shall operate with the public
4912 		 * address as its identity address for BR/EDR and LE. So
4913 		 * reject the attempt to create an invalid configuration.
4914 		 *
4915 		 * The same restrictions applies when secure connections
4916 		 * has been enabled. For BR/EDR this is a controller feature
4917 		 * while for LE it is a host stack feature. This means that
4918 		 * switching BR/EDR back on when secure connections has been
4919 		 * enabled is not a supported transaction.
4920 		 */
4921 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
4922 		    (bacmp(&hdev->static_addr, BDADDR_ANY) ||
4923 		     hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
4924 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4925 					      MGMT_STATUS_REJECTED);
4926 			goto unlock;
4927 		}
4928 	}
4929 
4930 	if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
4931 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4932 				      MGMT_STATUS_BUSY);
4933 		goto unlock;
4934 	}
4935 
4936 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4937 	if (!cmd) {
4938 		err = -ENOMEM;
4939 		goto unlock;
4940 	}
4941 
4942 	/* We need to flip the bit already here so that
4943 	 * hci_req_update_adv_data generates the correct flags.
4944 	 */
4945 	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
4946 
4947 	hci_req_init(&req, hdev);
4948 
4949 	__hci_req_write_fast_connectable(&req, false);
4950 	__hci_req_update_scan(&req);
4951 
4952 	/* Since only the advertising data flags will change, there
4953 	 * is no need to update the scan response data.
4954 	 */
4955 	__hci_req_update_adv_data(&req, hdev->cur_adv_instance);
4956 
4957 	err = hci_req_run(&req, set_bredr_complete);
4958 	if (err < 0)
4959 		mgmt_pending_remove(cmd);
4960 
4961 unlock:
4962 	hci_dev_unlock(hdev);
4963 	return err;
4964 }
4965 
4966 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4967 {
4968 	struct mgmt_pending_cmd *cmd;
4969 	struct mgmt_mode *cp;
4970 
4971 	BT_DBG("%s status %u", hdev->name, status);
4972 
4973 	hci_dev_lock(hdev);
4974 
4975 	cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
4976 	if (!cmd)
4977 		goto unlock;
4978 
4979 	if (status) {
4980 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
4981 			        mgmt_status(status));
4982 		goto remove;
4983 	}
4984 
4985 	cp = cmd->param;
4986 
4987 	switch (cp->val) {
4988 	case 0x00:
4989 		hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
4990 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4991 		break;
4992 	case 0x01:
4993 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
4994 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4995 		break;
4996 	case 0x02:
4997 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
4998 		hci_dev_set_flag(hdev, HCI_SC_ONLY);
4999 		break;
5000 	}
5001 
5002 	send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
5003 	new_settings(hdev, cmd->sk);
5004 
5005 remove:
5006 	mgmt_pending_remove(cmd);
5007 unlock:
5008 	hci_dev_unlock(hdev);
5009 }
5010 
5011 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
5012 			   void *data, u16 len)
5013 {
5014 	struct mgmt_mode *cp = data;
5015 	struct mgmt_pending_cmd *cmd;
5016 	struct hci_request req;
5017 	u8 val;
5018 	int err;
5019 
5020 	BT_DBG("request for %s", hdev->name);
5021 
5022 	if (!lmp_sc_capable(hdev) &&
5023 	    !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5024 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5025 				       MGMT_STATUS_NOT_SUPPORTED);
5026 
5027 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5028 	    lmp_sc_capable(hdev) &&
5029 	    !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5030 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5031 				       MGMT_STATUS_REJECTED);
5032 
5033 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5034 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5035 				  MGMT_STATUS_INVALID_PARAMS);
5036 
5037 	hci_dev_lock(hdev);
5038 
5039 	if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
5040 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5041 		bool changed;
5042 
5043 		if (cp->val) {
5044 			changed = !hci_dev_test_and_set_flag(hdev,
5045 							     HCI_SC_ENABLED);
5046 			if (cp->val == 0x02)
5047 				hci_dev_set_flag(hdev, HCI_SC_ONLY);
5048 			else
5049 				hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5050 		} else {
5051 			changed = hci_dev_test_and_clear_flag(hdev,
5052 							      HCI_SC_ENABLED);
5053 			hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5054 		}
5055 
5056 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5057 		if (err < 0)
5058 			goto failed;
5059 
5060 		if (changed)
5061 			err = new_settings(hdev, sk);
5062 
5063 		goto failed;
5064 	}
5065 
5066 	if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
5067 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5068 				      MGMT_STATUS_BUSY);
5069 		goto failed;
5070 	}
5071 
5072 	val = !!cp->val;
5073 
5074 	if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
5075 	    (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5076 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5077 		goto failed;
5078 	}
5079 
5080 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
5081 	if (!cmd) {
5082 		err = -ENOMEM;
5083 		goto failed;
5084 	}
5085 
5086 	hci_req_init(&req, hdev);
5087 	hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
5088 	err = hci_req_run(&req, sc_enable_complete);
5089 	if (err < 0) {
5090 		mgmt_pending_remove(cmd);
5091 		goto failed;
5092 	}
5093 
5094 failed:
5095 	hci_dev_unlock(hdev);
5096 	return err;
5097 }
5098 
5099 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
5100 			  void *data, u16 len)
5101 {
5102 	struct mgmt_mode *cp = data;
5103 	bool changed, use_changed;
5104 	int err;
5105 
5106 	BT_DBG("request for %s", hdev->name);
5107 
5108 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5109 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
5110 				       MGMT_STATUS_INVALID_PARAMS);
5111 
5112 	hci_dev_lock(hdev);
5113 
5114 	if (cp->val)
5115 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
5116 	else
5117 		changed = hci_dev_test_and_clear_flag(hdev,
5118 						      HCI_KEEP_DEBUG_KEYS);
5119 
5120 	if (cp->val == 0x02)
5121 		use_changed = !hci_dev_test_and_set_flag(hdev,
5122 							 HCI_USE_DEBUG_KEYS);
5123 	else
5124 		use_changed = hci_dev_test_and_clear_flag(hdev,
5125 							  HCI_USE_DEBUG_KEYS);
5126 
5127 	if (hdev_is_powered(hdev) && use_changed &&
5128 	    hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
5129 		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
5130 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
5131 			     sizeof(mode), &mode);
5132 	}
5133 
5134 	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
5135 	if (err < 0)
5136 		goto unlock;
5137 
5138 	if (changed)
5139 		err = new_settings(hdev, sk);
5140 
5141 unlock:
5142 	hci_dev_unlock(hdev);
5143 	return err;
5144 }
5145 
5146 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5147 		       u16 len)
5148 {
5149 	struct mgmt_cp_set_privacy *cp = cp_data;
5150 	bool changed;
5151 	int err;
5152 
5153 	BT_DBG("request for %s", hdev->name);
5154 
5155 	if (!lmp_le_capable(hdev))
5156 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5157 				       MGMT_STATUS_NOT_SUPPORTED);
5158 
5159 	if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
5160 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5161 				       MGMT_STATUS_INVALID_PARAMS);
5162 
5163 	if (hdev_is_powered(hdev))
5164 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5165 				       MGMT_STATUS_REJECTED);
5166 
5167 	hci_dev_lock(hdev);
5168 
5169 	/* If user space supports this command it is also expected to
5170 	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
5171 	 */
5172 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5173 
5174 	if (cp->privacy) {
5175 		changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
5176 		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
5177 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
5178 		hci_adv_instances_set_rpa_expired(hdev, true);
5179 		if (cp->privacy == 0x02)
5180 			hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
5181 		else
5182 			hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
5183 	} else {
5184 		changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
5185 		memset(hdev->irk, 0, sizeof(hdev->irk));
5186 		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
5187 		hci_adv_instances_set_rpa_expired(hdev, false);
5188 		hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
5189 	}
5190 
5191 	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
5192 	if (err < 0)
5193 		goto unlock;
5194 
5195 	if (changed)
5196 		err = new_settings(hdev, sk);
5197 
5198 unlock:
5199 	hci_dev_unlock(hdev);
5200 	return err;
5201 }
5202 
5203 static bool irk_is_valid(struct mgmt_irk_info *irk)
5204 {
5205 	switch (irk->addr.type) {
5206 	case BDADDR_LE_PUBLIC:
5207 		return true;
5208 
5209 	case BDADDR_LE_RANDOM:
5210 		/* Two most significant bits shall be set */
5211 		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5212 			return false;
5213 		return true;
5214 	}
5215 
5216 	return false;
5217 }
5218 
5219 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5220 		     u16 len)
5221 {
5222 	struct mgmt_cp_load_irks *cp = cp_data;
5223 	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
5224 				   sizeof(struct mgmt_irk_info));
5225 	u16 irk_count, expected_len;
5226 	int i, err;
5227 
5228 	BT_DBG("request for %s", hdev->name);
5229 
5230 	if (!lmp_le_capable(hdev))
5231 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5232 				       MGMT_STATUS_NOT_SUPPORTED);
5233 
5234 	irk_count = __le16_to_cpu(cp->irk_count);
5235 	if (irk_count > max_irk_count) {
5236 		bt_dev_err(hdev, "load_irks: too big irk_count value %u",
5237 			   irk_count);
5238 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5239 				       MGMT_STATUS_INVALID_PARAMS);
5240 	}
5241 
5242 	expected_len = struct_size(cp, irks, irk_count);
5243 	if (expected_len != len) {
5244 		bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
5245 			   expected_len, len);
5246 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5247 				       MGMT_STATUS_INVALID_PARAMS);
5248 	}
5249 
5250 	BT_DBG("%s irk_count %u", hdev->name, irk_count);
5251 
5252 	for (i = 0; i < irk_count; i++) {
5253 		struct mgmt_irk_info *key = &cp->irks[i];
5254 
5255 		if (!irk_is_valid(key))
5256 			return mgmt_cmd_status(sk, hdev->id,
5257 					       MGMT_OP_LOAD_IRKS,
5258 					       MGMT_STATUS_INVALID_PARAMS);
5259 	}
5260 
5261 	hci_dev_lock(hdev);
5262 
5263 	hci_smp_irks_clear(hdev);
5264 
5265 	for (i = 0; i < irk_count; i++) {
5266 		struct mgmt_irk_info *irk = &cp->irks[i];
5267 
5268 		if (hci_is_blocked_key(hdev,
5269 				       HCI_BLOCKED_KEY_TYPE_IRK,
5270 				       irk->val)) {
5271 			bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
5272 				    &irk->addr.bdaddr);
5273 			continue;
5274 		}
5275 
5276 		hci_add_irk(hdev, &irk->addr.bdaddr,
5277 			    le_addr_type(irk->addr.type), irk->val,
5278 			    BDADDR_ANY);
5279 	}
5280 
5281 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5282 
5283 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5284 
5285 	hci_dev_unlock(hdev);
5286 
5287 	return err;
5288 }
5289 
5290 static bool ltk_is_valid(struct mgmt_ltk_info *key)
5291 {
5292 	if (key->master != 0x00 && key->master != 0x01)
5293 		return false;
5294 
5295 	switch (key->addr.type) {
5296 	case BDADDR_LE_PUBLIC:
5297 		return true;
5298 
5299 	case BDADDR_LE_RANDOM:
5300 		/* Two most significant bits shall be set */
5301 		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5302 			return false;
5303 		return true;
5304 	}
5305 
5306 	return false;
5307 }
5308 
5309 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5310 			       void *cp_data, u16 len)
5311 {
5312 	struct mgmt_cp_load_long_term_keys *cp = cp_data;
5313 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
5314 				   sizeof(struct mgmt_ltk_info));
5315 	u16 key_count, expected_len;
5316 	int i, err;
5317 
5318 	BT_DBG("request for %s", hdev->name);
5319 
5320 	if (!lmp_le_capable(hdev))
5321 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5322 				       MGMT_STATUS_NOT_SUPPORTED);
5323 
5324 	key_count = __le16_to_cpu(cp->key_count);
5325 	if (key_count > max_key_count) {
5326 		bt_dev_err(hdev, "load_ltks: too big key_count value %u",
5327 			   key_count);
5328 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5329 				       MGMT_STATUS_INVALID_PARAMS);
5330 	}
5331 
5332 	expected_len = struct_size(cp, keys, key_count);
5333 	if (expected_len != len) {
5334 		bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
5335 			   expected_len, len);
5336 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5337 				       MGMT_STATUS_INVALID_PARAMS);
5338 	}
5339 
5340 	BT_DBG("%s key_count %u", hdev->name, key_count);
5341 
5342 	for (i = 0; i < key_count; i++) {
5343 		struct mgmt_ltk_info *key = &cp->keys[i];
5344 
5345 		if (!ltk_is_valid(key))
5346 			return mgmt_cmd_status(sk, hdev->id,
5347 					       MGMT_OP_LOAD_LONG_TERM_KEYS,
5348 					       MGMT_STATUS_INVALID_PARAMS);
5349 	}
5350 
5351 	hci_dev_lock(hdev);
5352 
5353 	hci_smp_ltks_clear(hdev);
5354 
5355 	for (i = 0; i < key_count; i++) {
5356 		struct mgmt_ltk_info *key = &cp->keys[i];
5357 		u8 type, authenticated;
5358 
5359 		if (hci_is_blocked_key(hdev,
5360 				       HCI_BLOCKED_KEY_TYPE_LTK,
5361 				       key->val)) {
5362 			bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
5363 				    &key->addr.bdaddr);
5364 			continue;
5365 		}
5366 
5367 		switch (key->type) {
5368 		case MGMT_LTK_UNAUTHENTICATED:
5369 			authenticated = 0x00;
5370 			type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5371 			break;
5372 		case MGMT_LTK_AUTHENTICATED:
5373 			authenticated = 0x01;
5374 			type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5375 			break;
5376 		case MGMT_LTK_P256_UNAUTH:
5377 			authenticated = 0x00;
5378 			type = SMP_LTK_P256;
5379 			break;
5380 		case MGMT_LTK_P256_AUTH:
5381 			authenticated = 0x01;
5382 			type = SMP_LTK_P256;
5383 			break;
5384 		case MGMT_LTK_P256_DEBUG:
5385 			authenticated = 0x00;
5386 			type = SMP_LTK_P256_DEBUG;
5387 			/* fall through */
5388 		default:
5389 			continue;
5390 		}
5391 
5392 		hci_add_ltk(hdev, &key->addr.bdaddr,
5393 			    le_addr_type(key->addr.type), type, authenticated,
5394 			    key->val, key->enc_size, key->ediv, key->rand);
5395 	}
5396 
5397 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
5398 			   NULL, 0);
5399 
5400 	hci_dev_unlock(hdev);
5401 
5402 	return err;
5403 }
5404 
5405 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5406 {
5407 	struct hci_conn *conn = cmd->user_data;
5408 	struct mgmt_rp_get_conn_info rp;
5409 	int err;
5410 
5411 	memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5412 
5413 	if (status == MGMT_STATUS_SUCCESS) {
5414 		rp.rssi = conn->rssi;
5415 		rp.tx_power = conn->tx_power;
5416 		rp.max_tx_power = conn->max_tx_power;
5417 	} else {
5418 		rp.rssi = HCI_RSSI_INVALID;
5419 		rp.tx_power = HCI_TX_POWER_INVALID;
5420 		rp.max_tx_power = HCI_TX_POWER_INVALID;
5421 	}
5422 
5423 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
5424 				status, &rp, sizeof(rp));
5425 
5426 	hci_conn_drop(conn);
5427 	hci_conn_put(conn);
5428 
5429 	return err;
5430 }
5431 
5432 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
5433 				       u16 opcode)
5434 {
5435 	struct hci_cp_read_rssi *cp;
5436 	struct mgmt_pending_cmd *cmd;
5437 	struct hci_conn *conn;
5438 	u16 handle;
5439 	u8 status;
5440 
5441 	BT_DBG("status 0x%02x", hci_status);
5442 
5443 	hci_dev_lock(hdev);
5444 
5445 	/* Commands sent in request are either Read RSSI or Read Transmit Power
5446 	 * Level so we check which one was last sent to retrieve connection
5447 	 * handle.  Both commands have handle as first parameter so it's safe to
5448 	 * cast data on the same command struct.
5449 	 *
5450 	 * First command sent is always Read RSSI and we fail only if it fails.
5451 	 * In other case we simply override error to indicate success as we
5452 	 * already remembered if TX power value is actually valid.
5453 	 */
5454 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
5455 	if (!cp) {
5456 		cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
5457 		status = MGMT_STATUS_SUCCESS;
5458 	} else {
5459 		status = mgmt_status(hci_status);
5460 	}
5461 
5462 	if (!cp) {
5463 		bt_dev_err(hdev, "invalid sent_cmd in conn_info response");
5464 		goto unlock;
5465 	}
5466 
5467 	handle = __le16_to_cpu(cp->handle);
5468 	conn = hci_conn_hash_lookup_handle(hdev, handle);
5469 	if (!conn) {
5470 		bt_dev_err(hdev, "unknown handle (%d) in conn_info response",
5471 			   handle);
5472 		goto unlock;
5473 	}
5474 
5475 	cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
5476 	if (!cmd)
5477 		goto unlock;
5478 
5479 	cmd->cmd_complete(cmd, status);
5480 	mgmt_pending_remove(cmd);
5481 
5482 unlock:
5483 	hci_dev_unlock(hdev);
5484 }
5485 
5486 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
5487 			 u16 len)
5488 {
5489 	struct mgmt_cp_get_conn_info *cp = data;
5490 	struct mgmt_rp_get_conn_info rp;
5491 	struct hci_conn *conn;
5492 	unsigned long conn_info_age;
5493 	int err = 0;
5494 
5495 	BT_DBG("%s", hdev->name);
5496 
5497 	memset(&rp, 0, sizeof(rp));
5498 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5499 	rp.addr.type = cp->addr.type;
5500 
5501 	if (!bdaddr_type_is_valid(cp->addr.type))
5502 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5503 					 MGMT_STATUS_INVALID_PARAMS,
5504 					 &rp, sizeof(rp));
5505 
5506 	hci_dev_lock(hdev);
5507 
5508 	if (!hdev_is_powered(hdev)) {
5509 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5510 					MGMT_STATUS_NOT_POWERED, &rp,
5511 					sizeof(rp));
5512 		goto unlock;
5513 	}
5514 
5515 	if (cp->addr.type == BDADDR_BREDR)
5516 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5517 					       &cp->addr.bdaddr);
5518 	else
5519 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
5520 
5521 	if (!conn || conn->state != BT_CONNECTED) {
5522 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5523 					MGMT_STATUS_NOT_CONNECTED, &rp,
5524 					sizeof(rp));
5525 		goto unlock;
5526 	}
5527 
5528 	if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
5529 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5530 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
5531 		goto unlock;
5532 	}
5533 
5534 	/* To avoid client trying to guess when to poll again for information we
5535 	 * calculate conn info age as random value between min/max set in hdev.
5536 	 */
5537 	conn_info_age = hdev->conn_info_min_age +
5538 			prandom_u32_max(hdev->conn_info_max_age -
5539 					hdev->conn_info_min_age);
5540 
5541 	/* Query controller to refresh cached values if they are too old or were
5542 	 * never read.
5543 	 */
5544 	if (time_after(jiffies, conn->conn_info_timestamp +
5545 		       msecs_to_jiffies(conn_info_age)) ||
5546 	    !conn->conn_info_timestamp) {
5547 		struct hci_request req;
5548 		struct hci_cp_read_tx_power req_txp_cp;
5549 		struct hci_cp_read_rssi req_rssi_cp;
5550 		struct mgmt_pending_cmd *cmd;
5551 
5552 		hci_req_init(&req, hdev);
5553 		req_rssi_cp.handle = cpu_to_le16(conn->handle);
5554 		hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
5555 			    &req_rssi_cp);
5556 
5557 		/* For LE links TX power does not change thus we don't need to
5558 		 * query for it once value is known.
5559 		 */
5560 		if (!bdaddr_type_is_le(cp->addr.type) ||
5561 		    conn->tx_power == HCI_TX_POWER_INVALID) {
5562 			req_txp_cp.handle = cpu_to_le16(conn->handle);
5563 			req_txp_cp.type = 0x00;
5564 			hci_req_add(&req, HCI_OP_READ_TX_POWER,
5565 				    sizeof(req_txp_cp), &req_txp_cp);
5566 		}
5567 
5568 		/* Max TX power needs to be read only once per connection */
5569 		if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5570 			req_txp_cp.handle = cpu_to_le16(conn->handle);
5571 			req_txp_cp.type = 0x01;
5572 			hci_req_add(&req, HCI_OP_READ_TX_POWER,
5573 				    sizeof(req_txp_cp), &req_txp_cp);
5574 		}
5575 
5576 		err = hci_req_run(&req, conn_info_refresh_complete);
5577 		if (err < 0)
5578 			goto unlock;
5579 
5580 		cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5581 				       data, len);
5582 		if (!cmd) {
5583 			err = -ENOMEM;
5584 			goto unlock;
5585 		}
5586 
5587 		hci_conn_hold(conn);
5588 		cmd->user_data = hci_conn_get(conn);
5589 		cmd->cmd_complete = conn_info_cmd_complete;
5590 
5591 		conn->conn_info_timestamp = jiffies;
5592 	} else {
5593 		/* Cache is valid, just reply with values cached in hci_conn */
5594 		rp.rssi = conn->rssi;
5595 		rp.tx_power = conn->tx_power;
5596 		rp.max_tx_power = conn->max_tx_power;
5597 
5598 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5599 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5600 	}
5601 
5602 unlock:
5603 	hci_dev_unlock(hdev);
5604 	return err;
5605 }
5606 
5607 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5608 {
5609 	struct hci_conn *conn = cmd->user_data;
5610 	struct mgmt_rp_get_clock_info rp;
5611 	struct hci_dev *hdev;
5612 	int err;
5613 
5614 	memset(&rp, 0, sizeof(rp));
5615 	memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5616 
5617 	if (status)
5618 		goto complete;
5619 
5620 	hdev = hci_dev_get(cmd->index);
5621 	if (hdev) {
5622 		rp.local_clock = cpu_to_le32(hdev->clock);
5623 		hci_dev_put(hdev);
5624 	}
5625 
5626 	if (conn) {
5627 		rp.piconet_clock = cpu_to_le32(conn->clock);
5628 		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5629 	}
5630 
5631 complete:
5632 	err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
5633 				sizeof(rp));
5634 
5635 	if (conn) {
5636 		hci_conn_drop(conn);
5637 		hci_conn_put(conn);
5638 	}
5639 
5640 	return err;
5641 }
5642 
5643 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5644 {
5645 	struct hci_cp_read_clock *hci_cp;
5646 	struct mgmt_pending_cmd *cmd;
5647 	struct hci_conn *conn;
5648 
5649 	BT_DBG("%s status %u", hdev->name, status);
5650 
5651 	hci_dev_lock(hdev);
5652 
5653 	hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5654 	if (!hci_cp)
5655 		goto unlock;
5656 
5657 	if (hci_cp->which) {
5658 		u16 handle = __le16_to_cpu(hci_cp->handle);
5659 		conn = hci_conn_hash_lookup_handle(hdev, handle);
5660 	} else {
5661 		conn = NULL;
5662 	}
5663 
5664 	cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5665 	if (!cmd)
5666 		goto unlock;
5667 
5668 	cmd->cmd_complete(cmd, mgmt_status(status));
5669 	mgmt_pending_remove(cmd);
5670 
5671 unlock:
5672 	hci_dev_unlock(hdev);
5673 }
5674 
5675 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5676 			 u16 len)
5677 {
5678 	struct mgmt_cp_get_clock_info *cp = data;
5679 	struct mgmt_rp_get_clock_info rp;
5680 	struct hci_cp_read_clock hci_cp;
5681 	struct mgmt_pending_cmd *cmd;
5682 	struct hci_request req;
5683 	struct hci_conn *conn;
5684 	int err;
5685 
5686 	BT_DBG("%s", hdev->name);
5687 
5688 	memset(&rp, 0, sizeof(rp));
5689 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5690 	rp.addr.type = cp->addr.type;
5691 
5692 	if (cp->addr.type != BDADDR_BREDR)
5693 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5694 					 MGMT_STATUS_INVALID_PARAMS,
5695 					 &rp, sizeof(rp));
5696 
5697 	hci_dev_lock(hdev);
5698 
5699 	if (!hdev_is_powered(hdev)) {
5700 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5701 					MGMT_STATUS_NOT_POWERED, &rp,
5702 					sizeof(rp));
5703 		goto unlock;
5704 	}
5705 
5706 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5707 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5708 					       &cp->addr.bdaddr);
5709 		if (!conn || conn->state != BT_CONNECTED) {
5710 			err = mgmt_cmd_complete(sk, hdev->id,
5711 						MGMT_OP_GET_CLOCK_INFO,
5712 						MGMT_STATUS_NOT_CONNECTED,
5713 						&rp, sizeof(rp));
5714 			goto unlock;
5715 		}
5716 	} else {
5717 		conn = NULL;
5718 	}
5719 
5720 	cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5721 	if (!cmd) {
5722 		err = -ENOMEM;
5723 		goto unlock;
5724 	}
5725 
5726 	cmd->cmd_complete = clock_info_cmd_complete;
5727 
5728 	hci_req_init(&req, hdev);
5729 
5730 	memset(&hci_cp, 0, sizeof(hci_cp));
5731 	hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5732 
5733 	if (conn) {
5734 		hci_conn_hold(conn);
5735 		cmd->user_data = hci_conn_get(conn);
5736 
5737 		hci_cp.handle = cpu_to_le16(conn->handle);
5738 		hci_cp.which = 0x01; /* Piconet clock */
5739 		hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5740 	}
5741 
5742 	err = hci_req_run(&req, get_clock_info_complete);
5743 	if (err < 0)
5744 		mgmt_pending_remove(cmd);
5745 
5746 unlock:
5747 	hci_dev_unlock(hdev);
5748 	return err;
5749 }
5750 
5751 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
5752 {
5753 	struct hci_conn *conn;
5754 
5755 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
5756 	if (!conn)
5757 		return false;
5758 
5759 	if (conn->dst_type != type)
5760 		return false;
5761 
5762 	if (conn->state != BT_CONNECTED)
5763 		return false;
5764 
5765 	return true;
5766 }
5767 
5768 /* This function requires the caller holds hdev->lock */
5769 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
5770 			       u8 addr_type, u8 auto_connect)
5771 {
5772 	struct hci_conn_params *params;
5773 
5774 	params = hci_conn_params_add(hdev, addr, addr_type);
5775 	if (!params)
5776 		return -EIO;
5777 
5778 	if (params->auto_connect == auto_connect)
5779 		return 0;
5780 
5781 	list_del_init(&params->action);
5782 
5783 	switch (auto_connect) {
5784 	case HCI_AUTO_CONN_DISABLED:
5785 	case HCI_AUTO_CONN_LINK_LOSS:
5786 		/* If auto connect is being disabled when we're trying to
5787 		 * connect to device, keep connecting.
5788 		 */
5789 		if (params->explicit_connect)
5790 			list_add(&params->action, &hdev->pend_le_conns);
5791 		break;
5792 	case HCI_AUTO_CONN_REPORT:
5793 		if (params->explicit_connect)
5794 			list_add(&params->action, &hdev->pend_le_conns);
5795 		else
5796 			list_add(&params->action, &hdev->pend_le_reports);
5797 		break;
5798 	case HCI_AUTO_CONN_DIRECT:
5799 	case HCI_AUTO_CONN_ALWAYS:
5800 		if (!is_connected(hdev, addr, addr_type))
5801 			list_add(&params->action, &hdev->pend_le_conns);
5802 		break;
5803 	}
5804 
5805 	params->auto_connect = auto_connect;
5806 
5807 	BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
5808 	       auto_connect);
5809 
5810 	return 0;
5811 }
5812 
5813 static void device_added(struct sock *sk, struct hci_dev *hdev,
5814 			 bdaddr_t *bdaddr, u8 type, u8 action)
5815 {
5816 	struct mgmt_ev_device_added ev;
5817 
5818 	bacpy(&ev.addr.bdaddr, bdaddr);
5819 	ev.addr.type = type;
5820 	ev.action = action;
5821 
5822 	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5823 }
5824 
5825 static int add_device(struct sock *sk, struct hci_dev *hdev,
5826 		      void *data, u16 len)
5827 {
5828 	struct mgmt_cp_add_device *cp = data;
5829 	u8 auto_conn, addr_type;
5830 	int err;
5831 
5832 	BT_DBG("%s", hdev->name);
5833 
5834 	if (!bdaddr_type_is_valid(cp->addr.type) ||
5835 	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5836 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5837 					 MGMT_STATUS_INVALID_PARAMS,
5838 					 &cp->addr, sizeof(cp->addr));
5839 
5840 	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
5841 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5842 					 MGMT_STATUS_INVALID_PARAMS,
5843 					 &cp->addr, sizeof(cp->addr));
5844 
5845 	hci_dev_lock(hdev);
5846 
5847 	if (cp->addr.type == BDADDR_BREDR) {
5848 		/* Only incoming connections action is supported for now */
5849 		if (cp->action != 0x01) {
5850 			err = mgmt_cmd_complete(sk, hdev->id,
5851 						MGMT_OP_ADD_DEVICE,
5852 						MGMT_STATUS_INVALID_PARAMS,
5853 						&cp->addr, sizeof(cp->addr));
5854 			goto unlock;
5855 		}
5856 
5857 		err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
5858 					  cp->addr.type);
5859 		if (err)
5860 			goto unlock;
5861 
5862 		hci_req_update_scan(hdev);
5863 
5864 		goto added;
5865 	}
5866 
5867 	addr_type = le_addr_type(cp->addr.type);
5868 
5869 	if (cp->action == 0x02)
5870 		auto_conn = HCI_AUTO_CONN_ALWAYS;
5871 	else if (cp->action == 0x01)
5872 		auto_conn = HCI_AUTO_CONN_DIRECT;
5873 	else
5874 		auto_conn = HCI_AUTO_CONN_REPORT;
5875 
5876 	/* Kernel internally uses conn_params with resolvable private
5877 	 * address, but Add Device allows only identity addresses.
5878 	 * Make sure it is enforced before calling
5879 	 * hci_conn_params_lookup.
5880 	 */
5881 	if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
5882 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5883 					MGMT_STATUS_INVALID_PARAMS,
5884 					&cp->addr, sizeof(cp->addr));
5885 		goto unlock;
5886 	}
5887 
5888 	/* If the connection parameters don't exist for this device,
5889 	 * they will be created and configured with defaults.
5890 	 */
5891 	if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
5892 				auto_conn) < 0) {
5893 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5894 					MGMT_STATUS_FAILED, &cp->addr,
5895 					sizeof(cp->addr));
5896 		goto unlock;
5897 	}
5898 
5899 	hci_update_background_scan(hdev);
5900 
5901 added:
5902 	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5903 
5904 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5905 				MGMT_STATUS_SUCCESS, &cp->addr,
5906 				sizeof(cp->addr));
5907 
5908 unlock:
5909 	hci_dev_unlock(hdev);
5910 	return err;
5911 }
5912 
5913 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5914 			   bdaddr_t *bdaddr, u8 type)
5915 {
5916 	struct mgmt_ev_device_removed ev;
5917 
5918 	bacpy(&ev.addr.bdaddr, bdaddr);
5919 	ev.addr.type = type;
5920 
5921 	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5922 }
5923 
5924 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5925 			 void *data, u16 len)
5926 {
5927 	struct mgmt_cp_remove_device *cp = data;
5928 	int err;
5929 
5930 	BT_DBG("%s", hdev->name);
5931 
5932 	hci_dev_lock(hdev);
5933 
5934 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5935 		struct hci_conn_params *params;
5936 		u8 addr_type;
5937 
5938 		if (!bdaddr_type_is_valid(cp->addr.type)) {
5939 			err = mgmt_cmd_complete(sk, hdev->id,
5940 						MGMT_OP_REMOVE_DEVICE,
5941 						MGMT_STATUS_INVALID_PARAMS,
5942 						&cp->addr, sizeof(cp->addr));
5943 			goto unlock;
5944 		}
5945 
5946 		if (cp->addr.type == BDADDR_BREDR) {
5947 			err = hci_bdaddr_list_del(&hdev->whitelist,
5948 						  &cp->addr.bdaddr,
5949 						  cp->addr.type);
5950 			if (err) {
5951 				err = mgmt_cmd_complete(sk, hdev->id,
5952 							MGMT_OP_REMOVE_DEVICE,
5953 							MGMT_STATUS_INVALID_PARAMS,
5954 							&cp->addr,
5955 							sizeof(cp->addr));
5956 				goto unlock;
5957 			}
5958 
5959 			hci_req_update_scan(hdev);
5960 
5961 			device_removed(sk, hdev, &cp->addr.bdaddr,
5962 				       cp->addr.type);
5963 			goto complete;
5964 		}
5965 
5966 		addr_type = le_addr_type(cp->addr.type);
5967 
5968 		/* Kernel internally uses conn_params with resolvable private
5969 		 * address, but Remove Device allows only identity addresses.
5970 		 * Make sure it is enforced before calling
5971 		 * hci_conn_params_lookup.
5972 		 */
5973 		if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
5974 			err = mgmt_cmd_complete(sk, hdev->id,
5975 						MGMT_OP_REMOVE_DEVICE,
5976 						MGMT_STATUS_INVALID_PARAMS,
5977 						&cp->addr, sizeof(cp->addr));
5978 			goto unlock;
5979 		}
5980 
5981 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5982 						addr_type);
5983 		if (!params) {
5984 			err = mgmt_cmd_complete(sk, hdev->id,
5985 						MGMT_OP_REMOVE_DEVICE,
5986 						MGMT_STATUS_INVALID_PARAMS,
5987 						&cp->addr, sizeof(cp->addr));
5988 			goto unlock;
5989 		}
5990 
5991 		if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
5992 		    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
5993 			err = mgmt_cmd_complete(sk, hdev->id,
5994 						MGMT_OP_REMOVE_DEVICE,
5995 						MGMT_STATUS_INVALID_PARAMS,
5996 						&cp->addr, sizeof(cp->addr));
5997 			goto unlock;
5998 		}
5999 
6000 		list_del(&params->action);
6001 		list_del(&params->list);
6002 		kfree(params);
6003 		hci_update_background_scan(hdev);
6004 
6005 		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
6006 	} else {
6007 		struct hci_conn_params *p, *tmp;
6008 		struct bdaddr_list *b, *btmp;
6009 
6010 		if (cp->addr.type) {
6011 			err = mgmt_cmd_complete(sk, hdev->id,
6012 						MGMT_OP_REMOVE_DEVICE,
6013 						MGMT_STATUS_INVALID_PARAMS,
6014 						&cp->addr, sizeof(cp->addr));
6015 			goto unlock;
6016 		}
6017 
6018 		list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
6019 			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
6020 			list_del(&b->list);
6021 			kfree(b);
6022 		}
6023 
6024 		hci_req_update_scan(hdev);
6025 
6026 		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
6027 			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
6028 				continue;
6029 			device_removed(sk, hdev, &p->addr, p->addr_type);
6030 			if (p->explicit_connect) {
6031 				p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
6032 				continue;
6033 			}
6034 			list_del(&p->action);
6035 			list_del(&p->list);
6036 			kfree(p);
6037 		}
6038 
6039 		BT_DBG("All LE connection parameters were removed");
6040 
6041 		hci_update_background_scan(hdev);
6042 	}
6043 
6044 complete:
6045 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
6046 				MGMT_STATUS_SUCCESS, &cp->addr,
6047 				sizeof(cp->addr));
6048 unlock:
6049 	hci_dev_unlock(hdev);
6050 	return err;
6051 }
6052 
6053 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
6054 			   u16 len)
6055 {
6056 	struct mgmt_cp_load_conn_param *cp = data;
6057 	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
6058 				     sizeof(struct mgmt_conn_param));
6059 	u16 param_count, expected_len;
6060 	int i;
6061 
6062 	if (!lmp_le_capable(hdev))
6063 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6064 				       MGMT_STATUS_NOT_SUPPORTED);
6065 
6066 	param_count = __le16_to_cpu(cp->param_count);
6067 	if (param_count > max_param_count) {
6068 		bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
6069 			   param_count);
6070 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6071 				       MGMT_STATUS_INVALID_PARAMS);
6072 	}
6073 
6074 	expected_len = struct_size(cp, params, param_count);
6075 	if (expected_len != len) {
6076 		bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
6077 			   expected_len, len);
6078 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6079 				       MGMT_STATUS_INVALID_PARAMS);
6080 	}
6081 
6082 	BT_DBG("%s param_count %u", hdev->name, param_count);
6083 
6084 	hci_dev_lock(hdev);
6085 
6086 	hci_conn_params_clear_disabled(hdev);
6087 
6088 	for (i = 0; i < param_count; i++) {
6089 		struct mgmt_conn_param *param = &cp->params[i];
6090 		struct hci_conn_params *hci_param;
6091 		u16 min, max, latency, timeout;
6092 		u8 addr_type;
6093 
6094 		BT_DBG("Adding %pMR (type %u)", &param->addr.bdaddr,
6095 		       param->addr.type);
6096 
6097 		if (param->addr.type == BDADDR_LE_PUBLIC) {
6098 			addr_type = ADDR_LE_DEV_PUBLIC;
6099 		} else if (param->addr.type == BDADDR_LE_RANDOM) {
6100 			addr_type = ADDR_LE_DEV_RANDOM;
6101 		} else {
6102 			bt_dev_err(hdev, "ignoring invalid connection parameters");
6103 			continue;
6104 		}
6105 
6106 		min = le16_to_cpu(param->min_interval);
6107 		max = le16_to_cpu(param->max_interval);
6108 		latency = le16_to_cpu(param->latency);
6109 		timeout = le16_to_cpu(param->timeout);
6110 
6111 		BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
6112 		       min, max, latency, timeout);
6113 
6114 		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
6115 			bt_dev_err(hdev, "ignoring invalid connection parameters");
6116 			continue;
6117 		}
6118 
6119 		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
6120 						addr_type);
6121 		if (!hci_param) {
6122 			bt_dev_err(hdev, "failed to add connection parameters");
6123 			continue;
6124 		}
6125 
6126 		hci_param->conn_min_interval = min;
6127 		hci_param->conn_max_interval = max;
6128 		hci_param->conn_latency = latency;
6129 		hci_param->supervision_timeout = timeout;
6130 	}
6131 
6132 	hci_dev_unlock(hdev);
6133 
6134 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
6135 				 NULL, 0);
6136 }
6137 
6138 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
6139 			       void *data, u16 len)
6140 {
6141 	struct mgmt_cp_set_external_config *cp = data;
6142 	bool changed;
6143 	int err;
6144 
6145 	BT_DBG("%s", hdev->name);
6146 
6147 	if (hdev_is_powered(hdev))
6148 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6149 				       MGMT_STATUS_REJECTED);
6150 
6151 	if (cp->config != 0x00 && cp->config != 0x01)
6152 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6153 				         MGMT_STATUS_INVALID_PARAMS);
6154 
6155 	if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
6156 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6157 				       MGMT_STATUS_NOT_SUPPORTED);
6158 
6159 	hci_dev_lock(hdev);
6160 
6161 	if (cp->config)
6162 		changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
6163 	else
6164 		changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
6165 
6166 	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
6167 	if (err < 0)
6168 		goto unlock;
6169 
6170 	if (!changed)
6171 		goto unlock;
6172 
6173 	err = new_options(hdev, sk);
6174 
6175 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
6176 		mgmt_index_removed(hdev);
6177 
6178 		if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
6179 			hci_dev_set_flag(hdev, HCI_CONFIG);
6180 			hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6181 
6182 			queue_work(hdev->req_workqueue, &hdev->power_on);
6183 		} else {
6184 			set_bit(HCI_RAW, &hdev->flags);
6185 			mgmt_index_added(hdev);
6186 		}
6187 	}
6188 
6189 unlock:
6190 	hci_dev_unlock(hdev);
6191 	return err;
6192 }
6193 
6194 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
6195 			      void *data, u16 len)
6196 {
6197 	struct mgmt_cp_set_public_address *cp = data;
6198 	bool changed;
6199 	int err;
6200 
6201 	BT_DBG("%s", hdev->name);
6202 
6203 	if (hdev_is_powered(hdev))
6204 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6205 				       MGMT_STATUS_REJECTED);
6206 
6207 	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
6208 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6209 				       MGMT_STATUS_INVALID_PARAMS);
6210 
6211 	if (!hdev->set_bdaddr)
6212 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6213 				       MGMT_STATUS_NOT_SUPPORTED);
6214 
6215 	hci_dev_lock(hdev);
6216 
6217 	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
6218 	bacpy(&hdev->public_addr, &cp->bdaddr);
6219 
6220 	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
6221 	if (err < 0)
6222 		goto unlock;
6223 
6224 	if (!changed)
6225 		goto unlock;
6226 
6227 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
6228 		err = new_options(hdev, sk);
6229 
6230 	if (is_configured(hdev)) {
6231 		mgmt_index_removed(hdev);
6232 
6233 		hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
6234 
6235 		hci_dev_set_flag(hdev, HCI_CONFIG);
6236 		hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6237 
6238 		queue_work(hdev->req_workqueue, &hdev->power_on);
6239 	}
6240 
6241 unlock:
6242 	hci_dev_unlock(hdev);
6243 	return err;
6244 }
6245 
6246 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
6247 					     u16 opcode, struct sk_buff *skb)
6248 {
6249 	const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
6250 	struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
6251 	u8 *h192, *r192, *h256, *r256;
6252 	struct mgmt_pending_cmd *cmd;
6253 	u16 eir_len;
6254 	int err;
6255 
6256 	BT_DBG("%s status %u", hdev->name, status);
6257 
6258 	cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
6259 	if (!cmd)
6260 		return;
6261 
6262 	mgmt_cp = cmd->param;
6263 
6264 	if (status) {
6265 		status = mgmt_status(status);
6266 		eir_len = 0;
6267 
6268 		h192 = NULL;
6269 		r192 = NULL;
6270 		h256 = NULL;
6271 		r256 = NULL;
6272 	} else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
6273 		struct hci_rp_read_local_oob_data *rp;
6274 
6275 		if (skb->len != sizeof(*rp)) {
6276 			status = MGMT_STATUS_FAILED;
6277 			eir_len = 0;
6278 		} else {
6279 			status = MGMT_STATUS_SUCCESS;
6280 			rp = (void *)skb->data;
6281 
6282 			eir_len = 5 + 18 + 18;
6283 			h192 = rp->hash;
6284 			r192 = rp->rand;
6285 			h256 = NULL;
6286 			r256 = NULL;
6287 		}
6288 	} else {
6289 		struct hci_rp_read_local_oob_ext_data *rp;
6290 
6291 		if (skb->len != sizeof(*rp)) {
6292 			status = MGMT_STATUS_FAILED;
6293 			eir_len = 0;
6294 		} else {
6295 			status = MGMT_STATUS_SUCCESS;
6296 			rp = (void *)skb->data;
6297 
6298 			if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6299 				eir_len = 5 + 18 + 18;
6300 				h192 = NULL;
6301 				r192 = NULL;
6302 			} else {
6303 				eir_len = 5 + 18 + 18 + 18 + 18;
6304 				h192 = rp->hash192;
6305 				r192 = rp->rand192;
6306 			}
6307 
6308 			h256 = rp->hash256;
6309 			r256 = rp->rand256;
6310 		}
6311 	}
6312 
6313 	mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
6314 	if (!mgmt_rp)
6315 		goto done;
6316 
6317 	if (status)
6318 		goto send_rsp;
6319 
6320 	eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
6321 				  hdev->dev_class, 3);
6322 
6323 	if (h192 && r192) {
6324 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6325 					  EIR_SSP_HASH_C192, h192, 16);
6326 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6327 					  EIR_SSP_RAND_R192, r192, 16);
6328 	}
6329 
6330 	if (h256 && r256) {
6331 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6332 					  EIR_SSP_HASH_C256, h256, 16);
6333 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6334 					  EIR_SSP_RAND_R256, r256, 16);
6335 	}
6336 
6337 send_rsp:
6338 	mgmt_rp->type = mgmt_cp->type;
6339 	mgmt_rp->eir_len = cpu_to_le16(eir_len);
6340 
6341 	err = mgmt_cmd_complete(cmd->sk, hdev->id,
6342 				MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
6343 				mgmt_rp, sizeof(*mgmt_rp) + eir_len);
6344 	if (err < 0 || status)
6345 		goto done;
6346 
6347 	hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
6348 
6349 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
6350 				 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
6351 				 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
6352 done:
6353 	kfree(mgmt_rp);
6354 	mgmt_pending_remove(cmd);
6355 }
6356 
6357 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
6358 				  struct mgmt_cp_read_local_oob_ext_data *cp)
6359 {
6360 	struct mgmt_pending_cmd *cmd;
6361 	struct hci_request req;
6362 	int err;
6363 
6364 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
6365 			       cp, sizeof(*cp));
6366 	if (!cmd)
6367 		return -ENOMEM;
6368 
6369 	hci_req_init(&req, hdev);
6370 
6371 	if (bredr_sc_enabled(hdev))
6372 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
6373 	else
6374 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
6375 
6376 	err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
6377 	if (err < 0) {
6378 		mgmt_pending_remove(cmd);
6379 		return err;
6380 	}
6381 
6382 	return 0;
6383 }
6384 
6385 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
6386 				   void *data, u16 data_len)
6387 {
6388 	struct mgmt_cp_read_local_oob_ext_data *cp = data;
6389 	struct mgmt_rp_read_local_oob_ext_data *rp;
6390 	size_t rp_len;
6391 	u16 eir_len;
6392 	u8 status, flags, role, addr[7], hash[16], rand[16];
6393 	int err;
6394 
6395 	BT_DBG("%s", hdev->name);
6396 
6397 	if (hdev_is_powered(hdev)) {
6398 		switch (cp->type) {
6399 		case BIT(BDADDR_BREDR):
6400 			status = mgmt_bredr_support(hdev);
6401 			if (status)
6402 				eir_len = 0;
6403 			else
6404 				eir_len = 5;
6405 			break;
6406 		case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6407 			status = mgmt_le_support(hdev);
6408 			if (status)
6409 				eir_len = 0;
6410 			else
6411 				eir_len = 9 + 3 + 18 + 18 + 3;
6412 			break;
6413 		default:
6414 			status = MGMT_STATUS_INVALID_PARAMS;
6415 			eir_len = 0;
6416 			break;
6417 		}
6418 	} else {
6419 		status = MGMT_STATUS_NOT_POWERED;
6420 		eir_len = 0;
6421 	}
6422 
6423 	rp_len = sizeof(*rp) + eir_len;
6424 	rp = kmalloc(rp_len, GFP_ATOMIC);
6425 	if (!rp)
6426 		return -ENOMEM;
6427 
6428 	if (status)
6429 		goto complete;
6430 
6431 	hci_dev_lock(hdev);
6432 
6433 	eir_len = 0;
6434 	switch (cp->type) {
6435 	case BIT(BDADDR_BREDR):
6436 		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6437 			err = read_local_ssp_oob_req(hdev, sk, cp);
6438 			hci_dev_unlock(hdev);
6439 			if (!err)
6440 				goto done;
6441 
6442 			status = MGMT_STATUS_FAILED;
6443 			goto complete;
6444 		} else {
6445 			eir_len = eir_append_data(rp->eir, eir_len,
6446 						  EIR_CLASS_OF_DEV,
6447 						  hdev->dev_class, 3);
6448 		}
6449 		break;
6450 	case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6451 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6452 		    smp_generate_oob(hdev, hash, rand) < 0) {
6453 			hci_dev_unlock(hdev);
6454 			status = MGMT_STATUS_FAILED;
6455 			goto complete;
6456 		}
6457 
6458 		/* This should return the active RPA, but since the RPA
6459 		 * is only programmed on demand, it is really hard to fill
6460 		 * this in at the moment. For now disallow retrieving
6461 		 * local out-of-band data when privacy is in use.
6462 		 *
6463 		 * Returning the identity address will not help here since
6464 		 * pairing happens before the identity resolving key is
6465 		 * known and thus the connection establishment happens
6466 		 * based on the RPA and not the identity address.
6467 		 */
6468 		if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
6469 			hci_dev_unlock(hdev);
6470 			status = MGMT_STATUS_REJECTED;
6471 			goto complete;
6472 		}
6473 
6474 		if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
6475 		   !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
6476 		   (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6477 		    bacmp(&hdev->static_addr, BDADDR_ANY))) {
6478 			memcpy(addr, &hdev->static_addr, 6);
6479 			addr[6] = 0x01;
6480 		} else {
6481 			memcpy(addr, &hdev->bdaddr, 6);
6482 			addr[6] = 0x00;
6483 		}
6484 
6485 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
6486 					  addr, sizeof(addr));
6487 
6488 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
6489 			role = 0x02;
6490 		else
6491 			role = 0x01;
6492 
6493 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
6494 					  &role, sizeof(role));
6495 
6496 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
6497 			eir_len = eir_append_data(rp->eir, eir_len,
6498 						  EIR_LE_SC_CONFIRM,
6499 						  hash, sizeof(hash));
6500 
6501 			eir_len = eir_append_data(rp->eir, eir_len,
6502 						  EIR_LE_SC_RANDOM,
6503 						  rand, sizeof(rand));
6504 		}
6505 
6506 		flags = mgmt_get_adv_discov_flags(hdev);
6507 
6508 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
6509 			flags |= LE_AD_NO_BREDR;
6510 
6511 		eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
6512 					  &flags, sizeof(flags));
6513 		break;
6514 	}
6515 
6516 	hci_dev_unlock(hdev);
6517 
6518 	hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
6519 
6520 	status = MGMT_STATUS_SUCCESS;
6521 
6522 complete:
6523 	rp->type = cp->type;
6524 	rp->eir_len = cpu_to_le16(eir_len);
6525 
6526 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6527 				status, rp, sizeof(*rp) + eir_len);
6528 	if (err < 0 || status)
6529 		goto done;
6530 
6531 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
6532 				 rp, sizeof(*rp) + eir_len,
6533 				 HCI_MGMT_OOB_DATA_EVENTS, sk);
6534 
6535 done:
6536 	kfree(rp);
6537 
6538 	return err;
6539 }
6540 
6541 static u32 get_supported_adv_flags(struct hci_dev *hdev)
6542 {
6543 	u32 flags = 0;
6544 
6545 	flags |= MGMT_ADV_FLAG_CONNECTABLE;
6546 	flags |= MGMT_ADV_FLAG_DISCOV;
6547 	flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
6548 	flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
6549 	flags |= MGMT_ADV_FLAG_APPEARANCE;
6550 	flags |= MGMT_ADV_FLAG_LOCAL_NAME;
6551 
6552 	/* In extended adv TX_POWER returned from Set Adv Param
6553 	 * will be always valid.
6554 	 */
6555 	if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
6556 	    ext_adv_capable(hdev))
6557 		flags |= MGMT_ADV_FLAG_TX_POWER;
6558 
6559 	if (ext_adv_capable(hdev)) {
6560 		flags |= MGMT_ADV_FLAG_SEC_1M;
6561 
6562 		if (hdev->le_features[1] & HCI_LE_PHY_2M)
6563 			flags |= MGMT_ADV_FLAG_SEC_2M;
6564 
6565 		if (hdev->le_features[1] & HCI_LE_PHY_CODED)
6566 			flags |= MGMT_ADV_FLAG_SEC_CODED;
6567 	}
6568 
6569 	return flags;
6570 }
6571 
6572 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
6573 			     void *data, u16 data_len)
6574 {
6575 	struct mgmt_rp_read_adv_features *rp;
6576 	size_t rp_len;
6577 	int err;
6578 	struct adv_info *adv_instance;
6579 	u32 supported_flags;
6580 	u8 *instance;
6581 
6582 	BT_DBG("%s", hdev->name);
6583 
6584 	if (!lmp_le_capable(hdev))
6585 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
6586 				       MGMT_STATUS_REJECTED);
6587 
6588 	hci_dev_lock(hdev);
6589 
6590 	rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
6591 	rp = kmalloc(rp_len, GFP_ATOMIC);
6592 	if (!rp) {
6593 		hci_dev_unlock(hdev);
6594 		return -ENOMEM;
6595 	}
6596 
6597 	supported_flags = get_supported_adv_flags(hdev);
6598 
6599 	rp->supported_flags = cpu_to_le32(supported_flags);
6600 	rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
6601 	rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
6602 	rp->max_instances = HCI_MAX_ADV_INSTANCES;
6603 	rp->num_instances = hdev->adv_instance_cnt;
6604 
6605 	instance = rp->instance;
6606 	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
6607 		*instance = adv_instance->instance;
6608 		instance++;
6609 	}
6610 
6611 	hci_dev_unlock(hdev);
6612 
6613 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
6614 				MGMT_STATUS_SUCCESS, rp, rp_len);
6615 
6616 	kfree(rp);
6617 
6618 	return err;
6619 }
6620 
6621 static u8 calculate_name_len(struct hci_dev *hdev)
6622 {
6623 	u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
6624 
6625 	return append_local_name(hdev, buf, 0);
6626 }
6627 
6628 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
6629 			   bool is_adv_data)
6630 {
6631 	u8 max_len = HCI_MAX_AD_LENGTH;
6632 
6633 	if (is_adv_data) {
6634 		if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
6635 				 MGMT_ADV_FLAG_LIMITED_DISCOV |
6636 				 MGMT_ADV_FLAG_MANAGED_FLAGS))
6637 			max_len -= 3;
6638 
6639 		if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
6640 			max_len -= 3;
6641 	} else {
6642 		if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
6643 			max_len -= calculate_name_len(hdev);
6644 
6645 		if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
6646 			max_len -= 4;
6647 	}
6648 
6649 	return max_len;
6650 }
6651 
6652 static bool flags_managed(u32 adv_flags)
6653 {
6654 	return adv_flags & (MGMT_ADV_FLAG_DISCOV |
6655 			    MGMT_ADV_FLAG_LIMITED_DISCOV |
6656 			    MGMT_ADV_FLAG_MANAGED_FLAGS);
6657 }
6658 
6659 static bool tx_power_managed(u32 adv_flags)
6660 {
6661 	return adv_flags & MGMT_ADV_FLAG_TX_POWER;
6662 }
6663 
6664 static bool name_managed(u32 adv_flags)
6665 {
6666 	return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
6667 }
6668 
6669 static bool appearance_managed(u32 adv_flags)
6670 {
6671 	return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
6672 }
6673 
6674 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
6675 			      u8 len, bool is_adv_data)
6676 {
6677 	int i, cur_len;
6678 	u8 max_len;
6679 
6680 	max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
6681 
6682 	if (len > max_len)
6683 		return false;
6684 
6685 	/* Make sure that the data is correctly formatted. */
6686 	for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
6687 		cur_len = data[i];
6688 
6689 		if (data[i + 1] == EIR_FLAGS &&
6690 		    (!is_adv_data || flags_managed(adv_flags)))
6691 			return false;
6692 
6693 		if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
6694 			return false;
6695 
6696 		if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
6697 			return false;
6698 
6699 		if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
6700 			return false;
6701 
6702 		if (data[i + 1] == EIR_APPEARANCE &&
6703 		    appearance_managed(adv_flags))
6704 			return false;
6705 
6706 		/* If the current field length would exceed the total data
6707 		 * length, then it's invalid.
6708 		 */
6709 		if (i + cur_len >= len)
6710 			return false;
6711 	}
6712 
6713 	return true;
6714 }
6715 
6716 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
6717 				     u16 opcode)
6718 {
6719 	struct mgmt_pending_cmd *cmd;
6720 	struct mgmt_cp_add_advertising *cp;
6721 	struct mgmt_rp_add_advertising rp;
6722 	struct adv_info *adv_instance, *n;
6723 	u8 instance;
6724 
6725 	BT_DBG("status %d", status);
6726 
6727 	hci_dev_lock(hdev);
6728 
6729 	cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
6730 
6731 	list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
6732 		if (!adv_instance->pending)
6733 			continue;
6734 
6735 		if (!status) {
6736 			adv_instance->pending = false;
6737 			continue;
6738 		}
6739 
6740 		instance = adv_instance->instance;
6741 
6742 		if (hdev->cur_adv_instance == instance)
6743 			cancel_adv_timeout(hdev);
6744 
6745 		hci_remove_adv_instance(hdev, instance);
6746 		mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
6747 	}
6748 
6749 	if (!cmd)
6750 		goto unlock;
6751 
6752 	cp = cmd->param;
6753 	rp.instance = cp->instance;
6754 
6755 	if (status)
6756 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
6757 				mgmt_status(status));
6758 	else
6759 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
6760 				  mgmt_status(status), &rp, sizeof(rp));
6761 
6762 	mgmt_pending_remove(cmd);
6763 
6764 unlock:
6765 	hci_dev_unlock(hdev);
6766 }
6767 
6768 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
6769 			   void *data, u16 data_len)
6770 {
6771 	struct mgmt_cp_add_advertising *cp = data;
6772 	struct mgmt_rp_add_advertising rp;
6773 	u32 flags;
6774 	u32 supported_flags, phy_flags;
6775 	u8 status;
6776 	u16 timeout, duration;
6777 	unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
6778 	u8 schedule_instance = 0;
6779 	struct adv_info *next_instance;
6780 	int err;
6781 	struct mgmt_pending_cmd *cmd;
6782 	struct hci_request req;
6783 
6784 	BT_DBG("%s", hdev->name);
6785 
6786 	status = mgmt_le_support(hdev);
6787 	if (status)
6788 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6789 				       status);
6790 
6791 	if (cp->instance < 1 || cp->instance > HCI_MAX_ADV_INSTANCES)
6792 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6793 				       MGMT_STATUS_INVALID_PARAMS);
6794 
6795 	if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
6796 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6797 				       MGMT_STATUS_INVALID_PARAMS);
6798 
6799 	flags = __le32_to_cpu(cp->flags);
6800 	timeout = __le16_to_cpu(cp->timeout);
6801 	duration = __le16_to_cpu(cp->duration);
6802 
6803 	/* The current implementation only supports a subset of the specified
6804 	 * flags. Also need to check mutual exclusiveness of sec flags.
6805 	 */
6806 	supported_flags = get_supported_adv_flags(hdev);
6807 	phy_flags = flags & MGMT_ADV_FLAG_SEC_MASK;
6808 	if (flags & ~supported_flags ||
6809 	    ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
6810 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6811 				       MGMT_STATUS_INVALID_PARAMS);
6812 
6813 	hci_dev_lock(hdev);
6814 
6815 	if (timeout && !hdev_is_powered(hdev)) {
6816 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6817 				      MGMT_STATUS_REJECTED);
6818 		goto unlock;
6819 	}
6820 
6821 	if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
6822 	    pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
6823 	    pending_find(MGMT_OP_SET_LE, hdev)) {
6824 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6825 				      MGMT_STATUS_BUSY);
6826 		goto unlock;
6827 	}
6828 
6829 	if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
6830 	    !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
6831 			       cp->scan_rsp_len, false)) {
6832 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6833 				      MGMT_STATUS_INVALID_PARAMS);
6834 		goto unlock;
6835 	}
6836 
6837 	err = hci_add_adv_instance(hdev, cp->instance, flags,
6838 				   cp->adv_data_len, cp->data,
6839 				   cp->scan_rsp_len,
6840 				   cp->data + cp->adv_data_len,
6841 				   timeout, duration);
6842 	if (err < 0) {
6843 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6844 				      MGMT_STATUS_FAILED);
6845 		goto unlock;
6846 	}
6847 
6848 	/* Only trigger an advertising added event if a new instance was
6849 	 * actually added.
6850 	 */
6851 	if (hdev->adv_instance_cnt > prev_instance_cnt)
6852 		mgmt_advertising_added(sk, hdev, cp->instance);
6853 
6854 	if (hdev->cur_adv_instance == cp->instance) {
6855 		/* If the currently advertised instance is being changed then
6856 		 * cancel the current advertising and schedule the next
6857 		 * instance. If there is only one instance then the overridden
6858 		 * advertising data will be visible right away.
6859 		 */
6860 		cancel_adv_timeout(hdev);
6861 
6862 		next_instance = hci_get_next_instance(hdev, cp->instance);
6863 		if (next_instance)
6864 			schedule_instance = next_instance->instance;
6865 	} else if (!hdev->adv_instance_timeout) {
6866 		/* Immediately advertise the new instance if no other
6867 		 * instance is currently being advertised.
6868 		 */
6869 		schedule_instance = cp->instance;
6870 	}
6871 
6872 	/* If the HCI_ADVERTISING flag is set or the device isn't powered or
6873 	 * there is no instance to be advertised then we have no HCI
6874 	 * communication to make. Simply return.
6875 	 */
6876 	if (!hdev_is_powered(hdev) ||
6877 	    hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6878 	    !schedule_instance) {
6879 		rp.instance = cp->instance;
6880 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6881 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6882 		goto unlock;
6883 	}
6884 
6885 	/* We're good to go, update advertising data, parameters, and start
6886 	 * advertising.
6887 	 */
6888 	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
6889 			       data_len);
6890 	if (!cmd) {
6891 		err = -ENOMEM;
6892 		goto unlock;
6893 	}
6894 
6895 	hci_req_init(&req, hdev);
6896 
6897 	err = __hci_req_schedule_adv_instance(&req, schedule_instance, true);
6898 
6899 	if (!err)
6900 		err = hci_req_run(&req, add_advertising_complete);
6901 
6902 	if (err < 0) {
6903 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6904 				      MGMT_STATUS_FAILED);
6905 		mgmt_pending_remove(cmd);
6906 	}
6907 
6908 unlock:
6909 	hci_dev_unlock(hdev);
6910 
6911 	return err;
6912 }
6913 
6914 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
6915 					u16 opcode)
6916 {
6917 	struct mgmt_pending_cmd *cmd;
6918 	struct mgmt_cp_remove_advertising *cp;
6919 	struct mgmt_rp_remove_advertising rp;
6920 
6921 	BT_DBG("status %d", status);
6922 
6923 	hci_dev_lock(hdev);
6924 
6925 	/* A failure status here only means that we failed to disable
6926 	 * advertising. Otherwise, the advertising instance has been removed,
6927 	 * so report success.
6928 	 */
6929 	cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
6930 	if (!cmd)
6931 		goto unlock;
6932 
6933 	cp = cmd->param;
6934 	rp.instance = cp->instance;
6935 
6936 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
6937 			  &rp, sizeof(rp));
6938 	mgmt_pending_remove(cmd);
6939 
6940 unlock:
6941 	hci_dev_unlock(hdev);
6942 }
6943 
6944 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
6945 			      void *data, u16 data_len)
6946 {
6947 	struct mgmt_cp_remove_advertising *cp = data;
6948 	struct mgmt_rp_remove_advertising rp;
6949 	struct mgmt_pending_cmd *cmd;
6950 	struct hci_request req;
6951 	int err;
6952 
6953 	BT_DBG("%s", hdev->name);
6954 
6955 	hci_dev_lock(hdev);
6956 
6957 	if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
6958 		err = mgmt_cmd_status(sk, hdev->id,
6959 				      MGMT_OP_REMOVE_ADVERTISING,
6960 				      MGMT_STATUS_INVALID_PARAMS);
6961 		goto unlock;
6962 	}
6963 
6964 	if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
6965 	    pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
6966 	    pending_find(MGMT_OP_SET_LE, hdev)) {
6967 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
6968 				      MGMT_STATUS_BUSY);
6969 		goto unlock;
6970 	}
6971 
6972 	if (list_empty(&hdev->adv_instances)) {
6973 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
6974 				      MGMT_STATUS_INVALID_PARAMS);
6975 		goto unlock;
6976 	}
6977 
6978 	hci_req_init(&req, hdev);
6979 
6980 	hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true);
6981 
6982 	if (list_empty(&hdev->adv_instances))
6983 		__hci_req_disable_advertising(&req);
6984 
6985 	/* If no HCI commands have been collected so far or the HCI_ADVERTISING
6986 	 * flag is set or the device isn't powered then we have no HCI
6987 	 * communication to make. Simply return.
6988 	 */
6989 	if (skb_queue_empty(&req.cmd_q) ||
6990 	    !hdev_is_powered(hdev) ||
6991 	    hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
6992 		hci_req_purge(&req);
6993 		rp.instance = cp->instance;
6994 		err = mgmt_cmd_complete(sk, hdev->id,
6995 					MGMT_OP_REMOVE_ADVERTISING,
6996 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6997 		goto unlock;
6998 	}
6999 
7000 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
7001 			       data_len);
7002 	if (!cmd) {
7003 		err = -ENOMEM;
7004 		goto unlock;
7005 	}
7006 
7007 	err = hci_req_run(&req, remove_advertising_complete);
7008 	if (err < 0)
7009 		mgmt_pending_remove(cmd);
7010 
7011 unlock:
7012 	hci_dev_unlock(hdev);
7013 
7014 	return err;
7015 }
7016 
7017 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
7018 			     void *data, u16 data_len)
7019 {
7020 	struct mgmt_cp_get_adv_size_info *cp = data;
7021 	struct mgmt_rp_get_adv_size_info rp;
7022 	u32 flags, supported_flags;
7023 	int err;
7024 
7025 	BT_DBG("%s", hdev->name);
7026 
7027 	if (!lmp_le_capable(hdev))
7028 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
7029 				       MGMT_STATUS_REJECTED);
7030 
7031 	if (cp->instance < 1 || cp->instance > HCI_MAX_ADV_INSTANCES)
7032 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
7033 				       MGMT_STATUS_INVALID_PARAMS);
7034 
7035 	flags = __le32_to_cpu(cp->flags);
7036 
7037 	/* The current implementation only supports a subset of the specified
7038 	 * flags.
7039 	 */
7040 	supported_flags = get_supported_adv_flags(hdev);
7041 	if (flags & ~supported_flags)
7042 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
7043 				       MGMT_STATUS_INVALID_PARAMS);
7044 
7045 	rp.instance = cp->instance;
7046 	rp.flags = cp->flags;
7047 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
7048 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
7049 
7050 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
7051 				MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7052 
7053 	return err;
7054 }
7055 
7056 static const struct hci_mgmt_handler mgmt_handlers[] = {
7057 	{ NULL }, /* 0x0000 (no command) */
7058 	{ read_version,            MGMT_READ_VERSION_SIZE,
7059 						HCI_MGMT_NO_HDEV |
7060 						HCI_MGMT_UNTRUSTED },
7061 	{ read_commands,           MGMT_READ_COMMANDS_SIZE,
7062 						HCI_MGMT_NO_HDEV |
7063 						HCI_MGMT_UNTRUSTED },
7064 	{ read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
7065 						HCI_MGMT_NO_HDEV |
7066 						HCI_MGMT_UNTRUSTED },
7067 	{ read_controller_info,    MGMT_READ_INFO_SIZE,
7068 						HCI_MGMT_UNTRUSTED },
7069 	{ set_powered,             MGMT_SETTING_SIZE },
7070 	{ set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
7071 	{ set_connectable,         MGMT_SETTING_SIZE },
7072 	{ set_fast_connectable,    MGMT_SETTING_SIZE },
7073 	{ set_bondable,            MGMT_SETTING_SIZE },
7074 	{ set_link_security,       MGMT_SETTING_SIZE },
7075 	{ set_ssp,                 MGMT_SETTING_SIZE },
7076 	{ set_hs,                  MGMT_SETTING_SIZE },
7077 	{ set_le,                  MGMT_SETTING_SIZE },
7078 	{ set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
7079 	{ set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
7080 	{ add_uuid,                MGMT_ADD_UUID_SIZE },
7081 	{ remove_uuid,             MGMT_REMOVE_UUID_SIZE },
7082 	{ load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
7083 						HCI_MGMT_VAR_LEN },
7084 	{ load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
7085 						HCI_MGMT_VAR_LEN },
7086 	{ disconnect,              MGMT_DISCONNECT_SIZE },
7087 	{ get_connections,         MGMT_GET_CONNECTIONS_SIZE },
7088 	{ pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
7089 	{ pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
7090 	{ set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
7091 	{ pair_device,             MGMT_PAIR_DEVICE_SIZE },
7092 	{ cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
7093 	{ unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
7094 	{ user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
7095 	{ user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
7096 	{ user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
7097 	{ user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
7098 	{ read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
7099 	{ add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
7100 						HCI_MGMT_VAR_LEN },
7101 	{ remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
7102 	{ start_discovery,         MGMT_START_DISCOVERY_SIZE },
7103 	{ stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
7104 	{ confirm_name,            MGMT_CONFIRM_NAME_SIZE },
7105 	{ block_device,            MGMT_BLOCK_DEVICE_SIZE },
7106 	{ unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
7107 	{ set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
7108 	{ set_advertising,         MGMT_SETTING_SIZE },
7109 	{ set_bredr,               MGMT_SETTING_SIZE },
7110 	{ set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
7111 	{ set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
7112 	{ set_secure_conn,         MGMT_SETTING_SIZE },
7113 	{ set_debug_keys,          MGMT_SETTING_SIZE },
7114 	{ set_privacy,             MGMT_SET_PRIVACY_SIZE },
7115 	{ load_irks,               MGMT_LOAD_IRKS_SIZE,
7116 						HCI_MGMT_VAR_LEN },
7117 	{ get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
7118 	{ get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
7119 	{ add_device,              MGMT_ADD_DEVICE_SIZE },
7120 	{ remove_device,           MGMT_REMOVE_DEVICE_SIZE },
7121 	{ load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
7122 						HCI_MGMT_VAR_LEN },
7123 	{ read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
7124 						HCI_MGMT_NO_HDEV |
7125 						HCI_MGMT_UNTRUSTED },
7126 	{ read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
7127 						HCI_MGMT_UNCONFIGURED |
7128 						HCI_MGMT_UNTRUSTED },
7129 	{ set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
7130 						HCI_MGMT_UNCONFIGURED },
7131 	{ set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
7132 						HCI_MGMT_UNCONFIGURED },
7133 	{ start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
7134 						HCI_MGMT_VAR_LEN },
7135 	{ read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
7136 	{ read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
7137 						HCI_MGMT_NO_HDEV |
7138 						HCI_MGMT_UNTRUSTED },
7139 	{ read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
7140 	{ add_advertising,	   MGMT_ADD_ADVERTISING_SIZE,
7141 						HCI_MGMT_VAR_LEN },
7142 	{ remove_advertising,	   MGMT_REMOVE_ADVERTISING_SIZE },
7143 	{ get_adv_size_info,       MGMT_GET_ADV_SIZE_INFO_SIZE },
7144 	{ start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
7145 	{ read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
7146 						HCI_MGMT_UNTRUSTED },
7147 	{ set_appearance,	   MGMT_SET_APPEARANCE_SIZE },
7148 	{ get_phy_configuration,   MGMT_GET_PHY_CONFIGURATION_SIZE },
7149 	{ set_phy_configuration,   MGMT_SET_PHY_CONFIGURATION_SIZE },
7150 	{ set_blocked_keys,	   MGMT_OP_SET_BLOCKED_KEYS_SIZE,
7151 						HCI_MGMT_VAR_LEN },
7152 	{ set_wideband_speech,	   MGMT_SETTING_SIZE },
7153 	{ read_security_info,      MGMT_READ_SECURITY_INFO_SIZE,
7154 						HCI_MGMT_UNTRUSTED },
7155 };
7156 
7157 void mgmt_index_added(struct hci_dev *hdev)
7158 {
7159 	struct mgmt_ev_ext_index ev;
7160 
7161 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
7162 		return;
7163 
7164 	switch (hdev->dev_type) {
7165 	case HCI_PRIMARY:
7166 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
7167 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
7168 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7169 			ev.type = 0x01;
7170 		} else {
7171 			mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
7172 					 HCI_MGMT_INDEX_EVENTS);
7173 			ev.type = 0x00;
7174 		}
7175 		break;
7176 	case HCI_AMP:
7177 		ev.type = 0x02;
7178 		break;
7179 	default:
7180 		return;
7181 	}
7182 
7183 	ev.bus = hdev->bus;
7184 
7185 	mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
7186 			 HCI_MGMT_EXT_INDEX_EVENTS);
7187 }
7188 
7189 void mgmt_index_removed(struct hci_dev *hdev)
7190 {
7191 	struct mgmt_ev_ext_index ev;
7192 	u8 status = MGMT_STATUS_INVALID_INDEX;
7193 
7194 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
7195 		return;
7196 
7197 	switch (hdev->dev_type) {
7198 	case HCI_PRIMARY:
7199 		mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7200 
7201 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
7202 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
7203 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7204 			ev.type = 0x01;
7205 		} else {
7206 			mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
7207 					 HCI_MGMT_INDEX_EVENTS);
7208 			ev.type = 0x00;
7209 		}
7210 		break;
7211 	case HCI_AMP:
7212 		ev.type = 0x02;
7213 		break;
7214 	default:
7215 		return;
7216 	}
7217 
7218 	ev.bus = hdev->bus;
7219 
7220 	mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
7221 			 HCI_MGMT_EXT_INDEX_EVENTS);
7222 }
7223 
7224 /* This function requires the caller holds hdev->lock */
7225 static void restart_le_actions(struct hci_dev *hdev)
7226 {
7227 	struct hci_conn_params *p;
7228 
7229 	list_for_each_entry(p, &hdev->le_conn_params, list) {
7230 		/* Needed for AUTO_OFF case where might not "really"
7231 		 * have been powered off.
7232 		 */
7233 		list_del_init(&p->action);
7234 
7235 		switch (p->auto_connect) {
7236 		case HCI_AUTO_CONN_DIRECT:
7237 		case HCI_AUTO_CONN_ALWAYS:
7238 			list_add(&p->action, &hdev->pend_le_conns);
7239 			break;
7240 		case HCI_AUTO_CONN_REPORT:
7241 			list_add(&p->action, &hdev->pend_le_reports);
7242 			break;
7243 		default:
7244 			break;
7245 		}
7246 	}
7247 }
7248 
7249 void mgmt_power_on(struct hci_dev *hdev, int err)
7250 {
7251 	struct cmd_lookup match = { NULL, hdev };
7252 
7253 	BT_DBG("err %d", err);
7254 
7255 	hci_dev_lock(hdev);
7256 
7257 	if (!err) {
7258 		restart_le_actions(hdev);
7259 		hci_update_background_scan(hdev);
7260 	}
7261 
7262 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7263 
7264 	new_settings(hdev, match.sk);
7265 
7266 	if (match.sk)
7267 		sock_put(match.sk);
7268 
7269 	hci_dev_unlock(hdev);
7270 }
7271 
7272 void __mgmt_power_off(struct hci_dev *hdev)
7273 {
7274 	struct cmd_lookup match = { NULL, hdev };
7275 	u8 status, zero_cod[] = { 0, 0, 0 };
7276 
7277 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7278 
7279 	/* If the power off is because of hdev unregistration let
7280 	 * use the appropriate INVALID_INDEX status. Otherwise use
7281 	 * NOT_POWERED. We cover both scenarios here since later in
7282 	 * mgmt_index_removed() any hci_conn callbacks will have already
7283 	 * been triggered, potentially causing misleading DISCONNECTED
7284 	 * status responses.
7285 	 */
7286 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
7287 		status = MGMT_STATUS_INVALID_INDEX;
7288 	else
7289 		status = MGMT_STATUS_NOT_POWERED;
7290 
7291 	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7292 
7293 	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
7294 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
7295 				   zero_cod, sizeof(zero_cod),
7296 				   HCI_MGMT_DEV_CLASS_EVENTS, NULL);
7297 		ext_info_changed(hdev, NULL);
7298 	}
7299 
7300 	new_settings(hdev, match.sk);
7301 
7302 	if (match.sk)
7303 		sock_put(match.sk);
7304 }
7305 
7306 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
7307 {
7308 	struct mgmt_pending_cmd *cmd;
7309 	u8 status;
7310 
7311 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
7312 	if (!cmd)
7313 		return;
7314 
7315 	if (err == -ERFKILL)
7316 		status = MGMT_STATUS_RFKILLED;
7317 	else
7318 		status = MGMT_STATUS_FAILED;
7319 
7320 	mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
7321 
7322 	mgmt_pending_remove(cmd);
7323 }
7324 
7325 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
7326 		       bool persistent)
7327 {
7328 	struct mgmt_ev_new_link_key ev;
7329 
7330 	memset(&ev, 0, sizeof(ev));
7331 
7332 	ev.store_hint = persistent;
7333 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
7334 	ev.key.addr.type = BDADDR_BREDR;
7335 	ev.key.type = key->type;
7336 	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
7337 	ev.key.pin_len = key->pin_len;
7338 
7339 	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
7340 }
7341 
7342 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
7343 {
7344 	switch (ltk->type) {
7345 	case SMP_LTK:
7346 	case SMP_LTK_SLAVE:
7347 		if (ltk->authenticated)
7348 			return MGMT_LTK_AUTHENTICATED;
7349 		return MGMT_LTK_UNAUTHENTICATED;
7350 	case SMP_LTK_P256:
7351 		if (ltk->authenticated)
7352 			return MGMT_LTK_P256_AUTH;
7353 		return MGMT_LTK_P256_UNAUTH;
7354 	case SMP_LTK_P256_DEBUG:
7355 		return MGMT_LTK_P256_DEBUG;
7356 	}
7357 
7358 	return MGMT_LTK_UNAUTHENTICATED;
7359 }
7360 
7361 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
7362 {
7363 	struct mgmt_ev_new_long_term_key ev;
7364 
7365 	memset(&ev, 0, sizeof(ev));
7366 
7367 	/* Devices using resolvable or non-resolvable random addresses
7368 	 * without providing an identity resolving key don't require
7369 	 * to store long term keys. Their addresses will change the
7370 	 * next time around.
7371 	 *
7372 	 * Only when a remote device provides an identity address
7373 	 * make sure the long term key is stored. If the remote
7374 	 * identity is known, the long term keys are internally
7375 	 * mapped to the identity address. So allow static random
7376 	 * and public addresses here.
7377 	 */
7378 	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
7379 	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
7380 		ev.store_hint = 0x00;
7381 	else
7382 		ev.store_hint = persistent;
7383 
7384 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
7385 	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
7386 	ev.key.type = mgmt_ltk_type(key);
7387 	ev.key.enc_size = key->enc_size;
7388 	ev.key.ediv = key->ediv;
7389 	ev.key.rand = key->rand;
7390 
7391 	if (key->type == SMP_LTK)
7392 		ev.key.master = 1;
7393 
7394 	/* Make sure we copy only the significant bytes based on the
7395 	 * encryption key size, and set the rest of the value to zeroes.
7396 	 */
7397 	memcpy(ev.key.val, key->val, key->enc_size);
7398 	memset(ev.key.val + key->enc_size, 0,
7399 	       sizeof(ev.key.val) - key->enc_size);
7400 
7401 	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
7402 }
7403 
7404 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
7405 {
7406 	struct mgmt_ev_new_irk ev;
7407 
7408 	memset(&ev, 0, sizeof(ev));
7409 
7410 	ev.store_hint = persistent;
7411 
7412 	bacpy(&ev.rpa, &irk->rpa);
7413 	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
7414 	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
7415 	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
7416 
7417 	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
7418 }
7419 
7420 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
7421 		   bool persistent)
7422 {
7423 	struct mgmt_ev_new_csrk ev;
7424 
7425 	memset(&ev, 0, sizeof(ev));
7426 
7427 	/* Devices using resolvable or non-resolvable random addresses
7428 	 * without providing an identity resolving key don't require
7429 	 * to store signature resolving keys. Their addresses will change
7430 	 * the next time around.
7431 	 *
7432 	 * Only when a remote device provides an identity address
7433 	 * make sure the signature resolving key is stored. So allow
7434 	 * static random and public addresses here.
7435 	 */
7436 	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
7437 	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
7438 		ev.store_hint = 0x00;
7439 	else
7440 		ev.store_hint = persistent;
7441 
7442 	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
7443 	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
7444 	ev.key.type = csrk->type;
7445 	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
7446 
7447 	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
7448 }
7449 
7450 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
7451 			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
7452 			 u16 max_interval, u16 latency, u16 timeout)
7453 {
7454 	struct mgmt_ev_new_conn_param ev;
7455 
7456 	if (!hci_is_identity_address(bdaddr, bdaddr_type))
7457 		return;
7458 
7459 	memset(&ev, 0, sizeof(ev));
7460 	bacpy(&ev.addr.bdaddr, bdaddr);
7461 	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
7462 	ev.store_hint = store_hint;
7463 	ev.min_interval = cpu_to_le16(min_interval);
7464 	ev.max_interval = cpu_to_le16(max_interval);
7465 	ev.latency = cpu_to_le16(latency);
7466 	ev.timeout = cpu_to_le16(timeout);
7467 
7468 	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
7469 }
7470 
7471 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
7472 			   u32 flags, u8 *name, u8 name_len)
7473 {
7474 	char buf[512];
7475 	struct mgmt_ev_device_connected *ev = (void *) buf;
7476 	u16 eir_len = 0;
7477 
7478 	bacpy(&ev->addr.bdaddr, &conn->dst);
7479 	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7480 
7481 	ev->flags = __cpu_to_le32(flags);
7482 
7483 	/* We must ensure that the EIR Data fields are ordered and
7484 	 * unique. Keep it simple for now and avoid the problem by not
7485 	 * adding any BR/EDR data to the LE adv.
7486 	 */
7487 	if (conn->le_adv_data_len > 0) {
7488 		memcpy(&ev->eir[eir_len],
7489 		       conn->le_adv_data, conn->le_adv_data_len);
7490 		eir_len = conn->le_adv_data_len;
7491 	} else {
7492 		if (name_len > 0)
7493 			eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
7494 						  name, name_len);
7495 
7496 		if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
7497 			eir_len = eir_append_data(ev->eir, eir_len,
7498 						  EIR_CLASS_OF_DEV,
7499 						  conn->dev_class, 3);
7500 	}
7501 
7502 	ev->eir_len = cpu_to_le16(eir_len);
7503 
7504 	mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
7505 		    sizeof(*ev) + eir_len, NULL);
7506 }
7507 
7508 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
7509 {
7510 	struct sock **sk = data;
7511 
7512 	cmd->cmd_complete(cmd, 0);
7513 
7514 	*sk = cmd->sk;
7515 	sock_hold(*sk);
7516 
7517 	mgmt_pending_remove(cmd);
7518 }
7519 
7520 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
7521 {
7522 	struct hci_dev *hdev = data;
7523 	struct mgmt_cp_unpair_device *cp = cmd->param;
7524 
7525 	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
7526 
7527 	cmd->cmd_complete(cmd, 0);
7528 	mgmt_pending_remove(cmd);
7529 }
7530 
7531 bool mgmt_powering_down(struct hci_dev *hdev)
7532 {
7533 	struct mgmt_pending_cmd *cmd;
7534 	struct mgmt_mode *cp;
7535 
7536 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
7537 	if (!cmd)
7538 		return false;
7539 
7540 	cp = cmd->param;
7541 	if (!cp->val)
7542 		return true;
7543 
7544 	return false;
7545 }
7546 
7547 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
7548 			      u8 link_type, u8 addr_type, u8 reason,
7549 			      bool mgmt_connected)
7550 {
7551 	struct mgmt_ev_device_disconnected ev;
7552 	struct sock *sk = NULL;
7553 
7554 	/* The connection is still in hci_conn_hash so test for 1
7555 	 * instead of 0 to know if this is the last one.
7556 	 */
7557 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
7558 		cancel_delayed_work(&hdev->power_off);
7559 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
7560 	}
7561 
7562 	if (!mgmt_connected)
7563 		return;
7564 
7565 	if (link_type != ACL_LINK && link_type != LE_LINK)
7566 		return;
7567 
7568 	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
7569 
7570 	bacpy(&ev.addr.bdaddr, bdaddr);
7571 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7572 	ev.reason = reason;
7573 
7574 	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
7575 
7576 	if (sk)
7577 		sock_put(sk);
7578 
7579 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
7580 			     hdev);
7581 }
7582 
7583 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
7584 			    u8 link_type, u8 addr_type, u8 status)
7585 {
7586 	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
7587 	struct mgmt_cp_disconnect *cp;
7588 	struct mgmt_pending_cmd *cmd;
7589 
7590 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
7591 			     hdev);
7592 
7593 	cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
7594 	if (!cmd)
7595 		return;
7596 
7597 	cp = cmd->param;
7598 
7599 	if (bacmp(bdaddr, &cp->addr.bdaddr))
7600 		return;
7601 
7602 	if (cp->addr.type != bdaddr_type)
7603 		return;
7604 
7605 	cmd->cmd_complete(cmd, mgmt_status(status));
7606 	mgmt_pending_remove(cmd);
7607 }
7608 
7609 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7610 			 u8 addr_type, u8 status)
7611 {
7612 	struct mgmt_ev_connect_failed ev;
7613 
7614 	/* The connection is still in hci_conn_hash so test for 1
7615 	 * instead of 0 to know if this is the last one.
7616 	 */
7617 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
7618 		cancel_delayed_work(&hdev->power_off);
7619 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
7620 	}
7621 
7622 	bacpy(&ev.addr.bdaddr, bdaddr);
7623 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7624 	ev.status = mgmt_status(status);
7625 
7626 	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
7627 }
7628 
7629 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
7630 {
7631 	struct mgmt_ev_pin_code_request ev;
7632 
7633 	bacpy(&ev.addr.bdaddr, bdaddr);
7634 	ev.addr.type = BDADDR_BREDR;
7635 	ev.secure = secure;
7636 
7637 	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
7638 }
7639 
7640 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7641 				  u8 status)
7642 {
7643 	struct mgmt_pending_cmd *cmd;
7644 
7645 	cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
7646 	if (!cmd)
7647 		return;
7648 
7649 	cmd->cmd_complete(cmd, mgmt_status(status));
7650 	mgmt_pending_remove(cmd);
7651 }
7652 
7653 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7654 				      u8 status)
7655 {
7656 	struct mgmt_pending_cmd *cmd;
7657 
7658 	cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
7659 	if (!cmd)
7660 		return;
7661 
7662 	cmd->cmd_complete(cmd, mgmt_status(status));
7663 	mgmt_pending_remove(cmd);
7664 }
7665 
7666 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7667 			      u8 link_type, u8 addr_type, u32 value,
7668 			      u8 confirm_hint)
7669 {
7670 	struct mgmt_ev_user_confirm_request ev;
7671 
7672 	BT_DBG("%s", hdev->name);
7673 
7674 	bacpy(&ev.addr.bdaddr, bdaddr);
7675 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7676 	ev.confirm_hint = confirm_hint;
7677 	ev.value = cpu_to_le32(value);
7678 
7679 	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
7680 			  NULL);
7681 }
7682 
7683 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7684 			      u8 link_type, u8 addr_type)
7685 {
7686 	struct mgmt_ev_user_passkey_request ev;
7687 
7688 	BT_DBG("%s", hdev->name);
7689 
7690 	bacpy(&ev.addr.bdaddr, bdaddr);
7691 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7692 
7693 	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
7694 			  NULL);
7695 }
7696 
7697 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7698 				      u8 link_type, u8 addr_type, u8 status,
7699 				      u8 opcode)
7700 {
7701 	struct mgmt_pending_cmd *cmd;
7702 
7703 	cmd = pending_find(opcode, hdev);
7704 	if (!cmd)
7705 		return -ENOENT;
7706 
7707 	cmd->cmd_complete(cmd, mgmt_status(status));
7708 	mgmt_pending_remove(cmd);
7709 
7710 	return 0;
7711 }
7712 
7713 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7714 				     u8 link_type, u8 addr_type, u8 status)
7715 {
7716 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7717 					  status, MGMT_OP_USER_CONFIRM_REPLY);
7718 }
7719 
7720 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7721 					 u8 link_type, u8 addr_type, u8 status)
7722 {
7723 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7724 					  status,
7725 					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
7726 }
7727 
7728 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7729 				     u8 link_type, u8 addr_type, u8 status)
7730 {
7731 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7732 					  status, MGMT_OP_USER_PASSKEY_REPLY);
7733 }
7734 
7735 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7736 					 u8 link_type, u8 addr_type, u8 status)
7737 {
7738 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7739 					  status,
7740 					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
7741 }
7742 
7743 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
7744 			     u8 link_type, u8 addr_type, u32 passkey,
7745 			     u8 entered)
7746 {
7747 	struct mgmt_ev_passkey_notify ev;
7748 
7749 	BT_DBG("%s", hdev->name);
7750 
7751 	bacpy(&ev.addr.bdaddr, bdaddr);
7752 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7753 	ev.passkey = __cpu_to_le32(passkey);
7754 	ev.entered = entered;
7755 
7756 	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
7757 }
7758 
7759 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
7760 {
7761 	struct mgmt_ev_auth_failed ev;
7762 	struct mgmt_pending_cmd *cmd;
7763 	u8 status = mgmt_status(hci_status);
7764 
7765 	bacpy(&ev.addr.bdaddr, &conn->dst);
7766 	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7767 	ev.status = status;
7768 
7769 	cmd = find_pairing(conn);
7770 
7771 	mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
7772 		    cmd ? cmd->sk : NULL);
7773 
7774 	if (cmd) {
7775 		cmd->cmd_complete(cmd, status);
7776 		mgmt_pending_remove(cmd);
7777 	}
7778 }
7779 
7780 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
7781 {
7782 	struct cmd_lookup match = { NULL, hdev };
7783 	bool changed;
7784 
7785 	if (status) {
7786 		u8 mgmt_err = mgmt_status(status);
7787 		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
7788 				     cmd_status_rsp, &mgmt_err);
7789 		return;
7790 	}
7791 
7792 	if (test_bit(HCI_AUTH, &hdev->flags))
7793 		changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
7794 	else
7795 		changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
7796 
7797 	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
7798 			     &match);
7799 
7800 	if (changed)
7801 		new_settings(hdev, match.sk);
7802 
7803 	if (match.sk)
7804 		sock_put(match.sk);
7805 }
7806 
7807 static void clear_eir(struct hci_request *req)
7808 {
7809 	struct hci_dev *hdev = req->hdev;
7810 	struct hci_cp_write_eir cp;
7811 
7812 	if (!lmp_ext_inq_capable(hdev))
7813 		return;
7814 
7815 	memset(hdev->eir, 0, sizeof(hdev->eir));
7816 
7817 	memset(&cp, 0, sizeof(cp));
7818 
7819 	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
7820 }
7821 
7822 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
7823 {
7824 	struct cmd_lookup match = { NULL, hdev };
7825 	struct hci_request req;
7826 	bool changed = false;
7827 
7828 	if (status) {
7829 		u8 mgmt_err = mgmt_status(status);
7830 
7831 		if (enable && hci_dev_test_and_clear_flag(hdev,
7832 							  HCI_SSP_ENABLED)) {
7833 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7834 			new_settings(hdev, NULL);
7835 		}
7836 
7837 		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
7838 				     &mgmt_err);
7839 		return;
7840 	}
7841 
7842 	if (enable) {
7843 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
7844 	} else {
7845 		changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
7846 		if (!changed)
7847 			changed = hci_dev_test_and_clear_flag(hdev,
7848 							      HCI_HS_ENABLED);
7849 		else
7850 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7851 	}
7852 
7853 	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
7854 
7855 	if (changed)
7856 		new_settings(hdev, match.sk);
7857 
7858 	if (match.sk)
7859 		sock_put(match.sk);
7860 
7861 	hci_req_init(&req, hdev);
7862 
7863 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7864 		if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
7865 			hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
7866 				    sizeof(enable), &enable);
7867 		__hci_req_update_eir(&req);
7868 	} else {
7869 		clear_eir(&req);
7870 	}
7871 
7872 	hci_req_run(&req, NULL);
7873 }
7874 
7875 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
7876 {
7877 	struct cmd_lookup *match = data;
7878 
7879 	if (match->sk == NULL) {
7880 		match->sk = cmd->sk;
7881 		sock_hold(match->sk);
7882 	}
7883 }
7884 
7885 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
7886 				    u8 status)
7887 {
7888 	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
7889 
7890 	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
7891 	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
7892 	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
7893 
7894 	if (!status) {
7895 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
7896 				   3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
7897 		ext_info_changed(hdev, NULL);
7898 	}
7899 
7900 	if (match.sk)
7901 		sock_put(match.sk);
7902 }
7903 
7904 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
7905 {
7906 	struct mgmt_cp_set_local_name ev;
7907 	struct mgmt_pending_cmd *cmd;
7908 
7909 	if (status)
7910 		return;
7911 
7912 	memset(&ev, 0, sizeof(ev));
7913 	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
7914 	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
7915 
7916 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
7917 	if (!cmd) {
7918 		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
7919 
7920 		/* If this is a HCI command related to powering on the
7921 		 * HCI dev don't send any mgmt signals.
7922 		 */
7923 		if (pending_find(MGMT_OP_SET_POWERED, hdev))
7924 			return;
7925 	}
7926 
7927 	mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
7928 			   HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
7929 	ext_info_changed(hdev, cmd ? cmd->sk : NULL);
7930 }
7931 
7932 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
7933 {
7934 	int i;
7935 
7936 	for (i = 0; i < uuid_count; i++) {
7937 		if (!memcmp(uuid, uuids[i], 16))
7938 			return true;
7939 	}
7940 
7941 	return false;
7942 }
7943 
7944 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
7945 {
7946 	u16 parsed = 0;
7947 
7948 	while (parsed < eir_len) {
7949 		u8 field_len = eir[0];
7950 		u8 uuid[16];
7951 		int i;
7952 
7953 		if (field_len == 0)
7954 			break;
7955 
7956 		if (eir_len - parsed < field_len + 1)
7957 			break;
7958 
7959 		switch (eir[1]) {
7960 		case EIR_UUID16_ALL:
7961 		case EIR_UUID16_SOME:
7962 			for (i = 0; i + 3 <= field_len; i += 2) {
7963 				memcpy(uuid, bluetooth_base_uuid, 16);
7964 				uuid[13] = eir[i + 3];
7965 				uuid[12] = eir[i + 2];
7966 				if (has_uuid(uuid, uuid_count, uuids))
7967 					return true;
7968 			}
7969 			break;
7970 		case EIR_UUID32_ALL:
7971 		case EIR_UUID32_SOME:
7972 			for (i = 0; i + 5 <= field_len; i += 4) {
7973 				memcpy(uuid, bluetooth_base_uuid, 16);
7974 				uuid[15] = eir[i + 5];
7975 				uuid[14] = eir[i + 4];
7976 				uuid[13] = eir[i + 3];
7977 				uuid[12] = eir[i + 2];
7978 				if (has_uuid(uuid, uuid_count, uuids))
7979 					return true;
7980 			}
7981 			break;
7982 		case EIR_UUID128_ALL:
7983 		case EIR_UUID128_SOME:
7984 			for (i = 0; i + 17 <= field_len; i += 16) {
7985 				memcpy(uuid, eir + i + 2, 16);
7986 				if (has_uuid(uuid, uuid_count, uuids))
7987 					return true;
7988 			}
7989 			break;
7990 		}
7991 
7992 		parsed += field_len + 1;
7993 		eir += field_len + 1;
7994 	}
7995 
7996 	return false;
7997 }
7998 
7999 static void restart_le_scan(struct hci_dev *hdev)
8000 {
8001 	/* If controller is not scanning we are done. */
8002 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
8003 		return;
8004 
8005 	if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
8006 		       hdev->discovery.scan_start +
8007 		       hdev->discovery.scan_duration))
8008 		return;
8009 
8010 	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
8011 			   DISCOV_LE_RESTART_DELAY);
8012 }
8013 
8014 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
8015 			    u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
8016 {
8017 	/* If a RSSI threshold has been specified, and
8018 	 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
8019 	 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
8020 	 * is set, let it through for further processing, as we might need to
8021 	 * restart the scan.
8022 	 *
8023 	 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
8024 	 * the results are also dropped.
8025 	 */
8026 	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
8027 	    (rssi == HCI_RSSI_INVALID ||
8028 	    (rssi < hdev->discovery.rssi &&
8029 	     !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
8030 		return  false;
8031 
8032 	if (hdev->discovery.uuid_count != 0) {
8033 		/* If a list of UUIDs is provided in filter, results with no
8034 		 * matching UUID should be dropped.
8035 		 */
8036 		if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
8037 				   hdev->discovery.uuids) &&
8038 		    !eir_has_uuids(scan_rsp, scan_rsp_len,
8039 				   hdev->discovery.uuid_count,
8040 				   hdev->discovery.uuids))
8041 			return false;
8042 	}
8043 
8044 	/* If duplicate filtering does not report RSSI changes, then restart
8045 	 * scanning to ensure updated result with updated RSSI values.
8046 	 */
8047 	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
8048 		restart_le_scan(hdev);
8049 
8050 		/* Validate RSSI value against the RSSI threshold once more. */
8051 		if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
8052 		    rssi < hdev->discovery.rssi)
8053 			return false;
8054 	}
8055 
8056 	return true;
8057 }
8058 
8059 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8060 		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
8061 		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
8062 {
8063 	char buf[512];
8064 	struct mgmt_ev_device_found *ev = (void *)buf;
8065 	size_t ev_size;
8066 
8067 	/* Don't send events for a non-kernel initiated discovery. With
8068 	 * LE one exception is if we have pend_le_reports > 0 in which
8069 	 * case we're doing passive scanning and want these events.
8070 	 */
8071 	if (!hci_discovery_active(hdev)) {
8072 		if (link_type == ACL_LINK)
8073 			return;
8074 		if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
8075 			return;
8076 	}
8077 
8078 	if (hdev->discovery.result_filtering) {
8079 		/* We are using service discovery */
8080 		if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
8081 				     scan_rsp_len))
8082 			return;
8083 	}
8084 
8085 	if (hdev->discovery.limited) {
8086 		/* Check for limited discoverable bit */
8087 		if (dev_class) {
8088 			if (!(dev_class[1] & 0x20))
8089 				return;
8090 		} else {
8091 			u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
8092 			if (!flags || !(flags[0] & LE_AD_LIMITED))
8093 				return;
8094 		}
8095 	}
8096 
8097 	/* Make sure that the buffer is big enough. The 5 extra bytes
8098 	 * are for the potential CoD field.
8099 	 */
8100 	if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
8101 		return;
8102 
8103 	memset(buf, 0, sizeof(buf));
8104 
8105 	/* In case of device discovery with BR/EDR devices (pre 1.2), the
8106 	 * RSSI value was reported as 0 when not available. This behavior
8107 	 * is kept when using device discovery. This is required for full
8108 	 * backwards compatibility with the API.
8109 	 *
8110 	 * However when using service discovery, the value 127 will be
8111 	 * returned when the RSSI is not available.
8112 	 */
8113 	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
8114 	    link_type == ACL_LINK)
8115 		rssi = 0;
8116 
8117 	bacpy(&ev->addr.bdaddr, bdaddr);
8118 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
8119 	ev->rssi = rssi;
8120 	ev->flags = cpu_to_le32(flags);
8121 
8122 	if (eir_len > 0)
8123 		/* Copy EIR or advertising data into event */
8124 		memcpy(ev->eir, eir, eir_len);
8125 
8126 	if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
8127 				       NULL))
8128 		eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
8129 					  dev_class, 3);
8130 
8131 	if (scan_rsp_len > 0)
8132 		/* Append scan response data to event */
8133 		memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
8134 
8135 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
8136 	ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
8137 
8138 	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
8139 }
8140 
8141 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8142 		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
8143 {
8144 	struct mgmt_ev_device_found *ev;
8145 	char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
8146 	u16 eir_len;
8147 
8148 	ev = (struct mgmt_ev_device_found *) buf;
8149 
8150 	memset(buf, 0, sizeof(buf));
8151 
8152 	bacpy(&ev->addr.bdaddr, bdaddr);
8153 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
8154 	ev->rssi = rssi;
8155 
8156 	eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
8157 				  name_len);
8158 
8159 	ev->eir_len = cpu_to_le16(eir_len);
8160 
8161 	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
8162 }
8163 
8164 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
8165 {
8166 	struct mgmt_ev_discovering ev;
8167 
8168 	BT_DBG("%s discovering %u", hdev->name, discovering);
8169 
8170 	memset(&ev, 0, sizeof(ev));
8171 	ev.type = hdev->discovery.type;
8172 	ev.discovering = discovering;
8173 
8174 	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
8175 }
8176 
8177 static struct hci_mgmt_chan chan = {
8178 	.channel	= HCI_CHANNEL_CONTROL,
8179 	.handler_count	= ARRAY_SIZE(mgmt_handlers),
8180 	.handlers	= mgmt_handlers,
8181 	.hdev_init	= mgmt_init_hdev,
8182 };
8183 
8184 int mgmt_init(void)
8185 {
8186 	return hci_mgmt_chan_register(&chan);
8187 }
8188 
8189 void mgmt_exit(void)
8190 {
8191 	hci_mgmt_chan_unregister(&chan);
8192 }
8193