xref: /linux/net/bluetooth/mgmt.c (revision e6b5be2be4e30037eb551e0ed09dd97bd00d85d3)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2010  Nokia Corporation
5    Copyright (C) 2011-2012 Intel Corporation
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI Management interface */
26 
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33 #include <net/bluetooth/mgmt.h>
34 
35 #include "smp.h"
36 
37 #define MGMT_VERSION	1
38 #define MGMT_REVISION	8
39 
40 static const u16 mgmt_commands[] = {
41 	MGMT_OP_READ_INDEX_LIST,
42 	MGMT_OP_READ_INFO,
43 	MGMT_OP_SET_POWERED,
44 	MGMT_OP_SET_DISCOVERABLE,
45 	MGMT_OP_SET_CONNECTABLE,
46 	MGMT_OP_SET_FAST_CONNECTABLE,
47 	MGMT_OP_SET_BONDABLE,
48 	MGMT_OP_SET_LINK_SECURITY,
49 	MGMT_OP_SET_SSP,
50 	MGMT_OP_SET_HS,
51 	MGMT_OP_SET_LE,
52 	MGMT_OP_SET_DEV_CLASS,
53 	MGMT_OP_SET_LOCAL_NAME,
54 	MGMT_OP_ADD_UUID,
55 	MGMT_OP_REMOVE_UUID,
56 	MGMT_OP_LOAD_LINK_KEYS,
57 	MGMT_OP_LOAD_LONG_TERM_KEYS,
58 	MGMT_OP_DISCONNECT,
59 	MGMT_OP_GET_CONNECTIONS,
60 	MGMT_OP_PIN_CODE_REPLY,
61 	MGMT_OP_PIN_CODE_NEG_REPLY,
62 	MGMT_OP_SET_IO_CAPABILITY,
63 	MGMT_OP_PAIR_DEVICE,
64 	MGMT_OP_CANCEL_PAIR_DEVICE,
65 	MGMT_OP_UNPAIR_DEVICE,
66 	MGMT_OP_USER_CONFIRM_REPLY,
67 	MGMT_OP_USER_CONFIRM_NEG_REPLY,
68 	MGMT_OP_USER_PASSKEY_REPLY,
69 	MGMT_OP_USER_PASSKEY_NEG_REPLY,
70 	MGMT_OP_READ_LOCAL_OOB_DATA,
71 	MGMT_OP_ADD_REMOTE_OOB_DATA,
72 	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
73 	MGMT_OP_START_DISCOVERY,
74 	MGMT_OP_STOP_DISCOVERY,
75 	MGMT_OP_CONFIRM_NAME,
76 	MGMT_OP_BLOCK_DEVICE,
77 	MGMT_OP_UNBLOCK_DEVICE,
78 	MGMT_OP_SET_DEVICE_ID,
79 	MGMT_OP_SET_ADVERTISING,
80 	MGMT_OP_SET_BREDR,
81 	MGMT_OP_SET_STATIC_ADDRESS,
82 	MGMT_OP_SET_SCAN_PARAMS,
83 	MGMT_OP_SET_SECURE_CONN,
84 	MGMT_OP_SET_DEBUG_KEYS,
85 	MGMT_OP_SET_PRIVACY,
86 	MGMT_OP_LOAD_IRKS,
87 	MGMT_OP_GET_CONN_INFO,
88 	MGMT_OP_GET_CLOCK_INFO,
89 	MGMT_OP_ADD_DEVICE,
90 	MGMT_OP_REMOVE_DEVICE,
91 	MGMT_OP_LOAD_CONN_PARAM,
92 	MGMT_OP_READ_UNCONF_INDEX_LIST,
93 	MGMT_OP_READ_CONFIG_INFO,
94 	MGMT_OP_SET_EXTERNAL_CONFIG,
95 	MGMT_OP_SET_PUBLIC_ADDRESS,
96 	MGMT_OP_START_SERVICE_DISCOVERY,
97 };
98 
99 static const u16 mgmt_events[] = {
100 	MGMT_EV_CONTROLLER_ERROR,
101 	MGMT_EV_INDEX_ADDED,
102 	MGMT_EV_INDEX_REMOVED,
103 	MGMT_EV_NEW_SETTINGS,
104 	MGMT_EV_CLASS_OF_DEV_CHANGED,
105 	MGMT_EV_LOCAL_NAME_CHANGED,
106 	MGMT_EV_NEW_LINK_KEY,
107 	MGMT_EV_NEW_LONG_TERM_KEY,
108 	MGMT_EV_DEVICE_CONNECTED,
109 	MGMT_EV_DEVICE_DISCONNECTED,
110 	MGMT_EV_CONNECT_FAILED,
111 	MGMT_EV_PIN_CODE_REQUEST,
112 	MGMT_EV_USER_CONFIRM_REQUEST,
113 	MGMT_EV_USER_PASSKEY_REQUEST,
114 	MGMT_EV_AUTH_FAILED,
115 	MGMT_EV_DEVICE_FOUND,
116 	MGMT_EV_DISCOVERING,
117 	MGMT_EV_DEVICE_BLOCKED,
118 	MGMT_EV_DEVICE_UNBLOCKED,
119 	MGMT_EV_DEVICE_UNPAIRED,
120 	MGMT_EV_PASSKEY_NOTIFY,
121 	MGMT_EV_NEW_IRK,
122 	MGMT_EV_NEW_CSRK,
123 	MGMT_EV_DEVICE_ADDED,
124 	MGMT_EV_DEVICE_REMOVED,
125 	MGMT_EV_NEW_CONN_PARAM,
126 	MGMT_EV_UNCONF_INDEX_ADDED,
127 	MGMT_EV_UNCONF_INDEX_REMOVED,
128 	MGMT_EV_NEW_CONFIG_OPTIONS,
129 };
130 
131 #define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
132 
133 struct pending_cmd {
134 	struct list_head list;
135 	u16 opcode;
136 	int index;
137 	void *param;
138 	size_t param_len;
139 	struct sock *sk;
140 	void *user_data;
141 	void (*cmd_complete)(struct pending_cmd *cmd, u8 status);
142 };
143 
144 /* HCI to MGMT error code conversion table */
145 static u8 mgmt_status_table[] = {
146 	MGMT_STATUS_SUCCESS,
147 	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
148 	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
149 	MGMT_STATUS_FAILED,		/* Hardware Failure */
150 	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
151 	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
152 	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
153 	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
154 	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
155 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
156 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
157 	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
158 	MGMT_STATUS_BUSY,		/* Command Disallowed */
159 	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
160 	MGMT_STATUS_REJECTED,		/* Rejected Security */
161 	MGMT_STATUS_REJECTED,		/* Rejected Personal */
162 	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
163 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
164 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
165 	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
166 	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
167 	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
168 	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
169 	MGMT_STATUS_BUSY,		/* Repeated Attempts */
170 	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
171 	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
172 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
173 	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
174 	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
175 	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
176 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
177 	MGMT_STATUS_FAILED,		/* Unspecified Error */
178 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
179 	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
180 	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
181 	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
182 	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
183 	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
184 	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
185 	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
186 	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
187 	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
188 	MGMT_STATUS_FAILED,		/* Transaction Collision */
189 	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
190 	MGMT_STATUS_REJECTED,		/* QoS Rejected */
191 	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
192 	MGMT_STATUS_REJECTED,		/* Insufficient Security */
193 	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
194 	MGMT_STATUS_BUSY,		/* Role Switch Pending */
195 	MGMT_STATUS_FAILED,		/* Slot Violation */
196 	MGMT_STATUS_FAILED,		/* Role Switch Failed */
197 	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
198 	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
199 	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
200 	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
201 	MGMT_STATUS_BUSY,		/* Controller Busy */
202 	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
203 	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
204 	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
205 	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
206 	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
207 };
208 
209 static u8 mgmt_status(u8 hci_status)
210 {
211 	if (hci_status < ARRAY_SIZE(mgmt_status_table))
212 		return mgmt_status_table[hci_status];
213 
214 	return MGMT_STATUS_FAILED;
215 }
216 
217 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
218 		      struct sock *skip_sk)
219 {
220 	struct sk_buff *skb;
221 	struct mgmt_hdr *hdr;
222 
223 	skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
224 	if (!skb)
225 		return -ENOMEM;
226 
227 	hdr = (void *) skb_put(skb, sizeof(*hdr));
228 	hdr->opcode = cpu_to_le16(event);
229 	if (hdev)
230 		hdr->index = cpu_to_le16(hdev->id);
231 	else
232 		hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
233 	hdr->len = cpu_to_le16(data_len);
234 
235 	if (data)
236 		memcpy(skb_put(skb, data_len), data, data_len);
237 
238 	/* Time stamp */
239 	__net_timestamp(skb);
240 
241 	hci_send_to_control(skb, skip_sk);
242 	kfree_skb(skb);
243 
244 	return 0;
245 }
246 
247 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
248 {
249 	struct sk_buff *skb;
250 	struct mgmt_hdr *hdr;
251 	struct mgmt_ev_cmd_status *ev;
252 	int err;
253 
254 	BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
255 
256 	skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
257 	if (!skb)
258 		return -ENOMEM;
259 
260 	hdr = (void *) skb_put(skb, sizeof(*hdr));
261 
262 	hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
263 	hdr->index = cpu_to_le16(index);
264 	hdr->len = cpu_to_le16(sizeof(*ev));
265 
266 	ev = (void *) skb_put(skb, sizeof(*ev));
267 	ev->status = status;
268 	ev->opcode = cpu_to_le16(cmd);
269 
270 	err = sock_queue_rcv_skb(sk, skb);
271 	if (err < 0)
272 		kfree_skb(skb);
273 
274 	return err;
275 }
276 
277 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
278 			void *rp, size_t rp_len)
279 {
280 	struct sk_buff *skb;
281 	struct mgmt_hdr *hdr;
282 	struct mgmt_ev_cmd_complete *ev;
283 	int err;
284 
285 	BT_DBG("sock %p", sk);
286 
287 	skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
288 	if (!skb)
289 		return -ENOMEM;
290 
291 	hdr = (void *) skb_put(skb, sizeof(*hdr));
292 
293 	hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
294 	hdr->index = cpu_to_le16(index);
295 	hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
296 
297 	ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
298 	ev->opcode = cpu_to_le16(cmd);
299 	ev->status = status;
300 
301 	if (rp)
302 		memcpy(ev->data, rp, rp_len);
303 
304 	err = sock_queue_rcv_skb(sk, skb);
305 	if (err < 0)
306 		kfree_skb(skb);
307 
308 	return err;
309 }
310 
311 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
312 			u16 data_len)
313 {
314 	struct mgmt_rp_read_version rp;
315 
316 	BT_DBG("sock %p", sk);
317 
318 	rp.version = MGMT_VERSION;
319 	rp.revision = cpu_to_le16(MGMT_REVISION);
320 
321 	return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
322 			    sizeof(rp));
323 }
324 
325 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
326 			 u16 data_len)
327 {
328 	struct mgmt_rp_read_commands *rp;
329 	const u16 num_commands = ARRAY_SIZE(mgmt_commands);
330 	const u16 num_events = ARRAY_SIZE(mgmt_events);
331 	__le16 *opcode;
332 	size_t rp_size;
333 	int i, err;
334 
335 	BT_DBG("sock %p", sk);
336 
337 	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
338 
339 	rp = kmalloc(rp_size, GFP_KERNEL);
340 	if (!rp)
341 		return -ENOMEM;
342 
343 	rp->num_commands = cpu_to_le16(num_commands);
344 	rp->num_events = cpu_to_le16(num_events);
345 
346 	for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
347 		put_unaligned_le16(mgmt_commands[i], opcode);
348 
349 	for (i = 0; i < num_events; i++, opcode++)
350 		put_unaligned_le16(mgmt_events[i], opcode);
351 
352 	err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
353 			   rp_size);
354 	kfree(rp);
355 
356 	return err;
357 }
358 
359 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
360 			   u16 data_len)
361 {
362 	struct mgmt_rp_read_index_list *rp;
363 	struct hci_dev *d;
364 	size_t rp_len;
365 	u16 count;
366 	int err;
367 
368 	BT_DBG("sock %p", sk);
369 
370 	read_lock(&hci_dev_list_lock);
371 
372 	count = 0;
373 	list_for_each_entry(d, &hci_dev_list, list) {
374 		if (d->dev_type == HCI_BREDR &&
375 		    !test_bit(HCI_UNCONFIGURED, &d->dev_flags))
376 			count++;
377 	}
378 
379 	rp_len = sizeof(*rp) + (2 * count);
380 	rp = kmalloc(rp_len, GFP_ATOMIC);
381 	if (!rp) {
382 		read_unlock(&hci_dev_list_lock);
383 		return -ENOMEM;
384 	}
385 
386 	count = 0;
387 	list_for_each_entry(d, &hci_dev_list, list) {
388 		if (test_bit(HCI_SETUP, &d->dev_flags) ||
389 		    test_bit(HCI_CONFIG, &d->dev_flags) ||
390 		    test_bit(HCI_USER_CHANNEL, &d->dev_flags))
391 			continue;
392 
393 		/* Devices marked as raw-only are neither configured
394 		 * nor unconfigured controllers.
395 		 */
396 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
397 			continue;
398 
399 		if (d->dev_type == HCI_BREDR &&
400 		    !test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
401 			rp->index[count++] = cpu_to_le16(d->id);
402 			BT_DBG("Added hci%u", d->id);
403 		}
404 	}
405 
406 	rp->num_controllers = cpu_to_le16(count);
407 	rp_len = sizeof(*rp) + (2 * count);
408 
409 	read_unlock(&hci_dev_list_lock);
410 
411 	err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
412 			   rp_len);
413 
414 	kfree(rp);
415 
416 	return err;
417 }
418 
419 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
420 				  void *data, u16 data_len)
421 {
422 	struct mgmt_rp_read_unconf_index_list *rp;
423 	struct hci_dev *d;
424 	size_t rp_len;
425 	u16 count;
426 	int err;
427 
428 	BT_DBG("sock %p", sk);
429 
430 	read_lock(&hci_dev_list_lock);
431 
432 	count = 0;
433 	list_for_each_entry(d, &hci_dev_list, list) {
434 		if (d->dev_type == HCI_BREDR &&
435 		    test_bit(HCI_UNCONFIGURED, &d->dev_flags))
436 			count++;
437 	}
438 
439 	rp_len = sizeof(*rp) + (2 * count);
440 	rp = kmalloc(rp_len, GFP_ATOMIC);
441 	if (!rp) {
442 		read_unlock(&hci_dev_list_lock);
443 		return -ENOMEM;
444 	}
445 
446 	count = 0;
447 	list_for_each_entry(d, &hci_dev_list, list) {
448 		if (test_bit(HCI_SETUP, &d->dev_flags) ||
449 		    test_bit(HCI_CONFIG, &d->dev_flags) ||
450 		    test_bit(HCI_USER_CHANNEL, &d->dev_flags))
451 			continue;
452 
453 		/* Devices marked as raw-only are neither configured
454 		 * nor unconfigured controllers.
455 		 */
456 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
457 			continue;
458 
459 		if (d->dev_type == HCI_BREDR &&
460 		    test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
461 			rp->index[count++] = cpu_to_le16(d->id);
462 			BT_DBG("Added hci%u", d->id);
463 		}
464 	}
465 
466 	rp->num_controllers = cpu_to_le16(count);
467 	rp_len = sizeof(*rp) + (2 * count);
468 
469 	read_unlock(&hci_dev_list_lock);
470 
471 	err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_UNCONF_INDEX_LIST,
472 			   0, rp, rp_len);
473 
474 	kfree(rp);
475 
476 	return err;
477 }
478 
479 static bool is_configured(struct hci_dev *hdev)
480 {
481 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
482 	    !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
483 		return false;
484 
485 	if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
486 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
487 		return false;
488 
489 	return true;
490 }
491 
492 static __le32 get_missing_options(struct hci_dev *hdev)
493 {
494 	u32 options = 0;
495 
496 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
497 	    !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
498 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
499 
500 	if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
501 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
502 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
503 
504 	return cpu_to_le32(options);
505 }
506 
507 static int new_options(struct hci_dev *hdev, struct sock *skip)
508 {
509 	__le32 options = get_missing_options(hdev);
510 
511 	return mgmt_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
512 			  sizeof(options), skip);
513 }
514 
515 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
516 {
517 	__le32 options = get_missing_options(hdev);
518 
519 	return cmd_complete(sk, hdev->id, opcode, 0, &options,
520 			    sizeof(options));
521 }
522 
523 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
524 			    void *data, u16 data_len)
525 {
526 	struct mgmt_rp_read_config_info rp;
527 	u32 options = 0;
528 
529 	BT_DBG("sock %p %s", sk, hdev->name);
530 
531 	hci_dev_lock(hdev);
532 
533 	memset(&rp, 0, sizeof(rp));
534 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
535 
536 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
537 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
538 
539 	if (hdev->set_bdaddr)
540 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
541 
542 	rp.supported_options = cpu_to_le32(options);
543 	rp.missing_options = get_missing_options(hdev);
544 
545 	hci_dev_unlock(hdev);
546 
547 	return cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0, &rp,
548 			    sizeof(rp));
549 }
550 
551 static u32 get_supported_settings(struct hci_dev *hdev)
552 {
553 	u32 settings = 0;
554 
555 	settings |= MGMT_SETTING_POWERED;
556 	settings |= MGMT_SETTING_BONDABLE;
557 	settings |= MGMT_SETTING_DEBUG_KEYS;
558 	settings |= MGMT_SETTING_CONNECTABLE;
559 	settings |= MGMT_SETTING_DISCOVERABLE;
560 
561 	if (lmp_bredr_capable(hdev)) {
562 		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
563 			settings |= MGMT_SETTING_FAST_CONNECTABLE;
564 		settings |= MGMT_SETTING_BREDR;
565 		settings |= MGMT_SETTING_LINK_SECURITY;
566 
567 		if (lmp_ssp_capable(hdev)) {
568 			settings |= MGMT_SETTING_SSP;
569 			settings |= MGMT_SETTING_HS;
570 		}
571 
572 		if (lmp_sc_capable(hdev) ||
573 		    test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
574 			settings |= MGMT_SETTING_SECURE_CONN;
575 	}
576 
577 	if (lmp_le_capable(hdev)) {
578 		settings |= MGMT_SETTING_LE;
579 		settings |= MGMT_SETTING_ADVERTISING;
580 		settings |= MGMT_SETTING_SECURE_CONN;
581 		settings |= MGMT_SETTING_PRIVACY;
582 	}
583 
584 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
585 	    hdev->set_bdaddr)
586 		settings |= MGMT_SETTING_CONFIGURATION;
587 
588 	return settings;
589 }
590 
591 static u32 get_current_settings(struct hci_dev *hdev)
592 {
593 	u32 settings = 0;
594 
595 	if (hdev_is_powered(hdev))
596 		settings |= MGMT_SETTING_POWERED;
597 
598 	if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
599 		settings |= MGMT_SETTING_CONNECTABLE;
600 
601 	if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
602 		settings |= MGMT_SETTING_FAST_CONNECTABLE;
603 
604 	if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
605 		settings |= MGMT_SETTING_DISCOVERABLE;
606 
607 	if (test_bit(HCI_BONDABLE, &hdev->dev_flags))
608 		settings |= MGMT_SETTING_BONDABLE;
609 
610 	if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
611 		settings |= MGMT_SETTING_BREDR;
612 
613 	if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
614 		settings |= MGMT_SETTING_LE;
615 
616 	if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
617 		settings |= MGMT_SETTING_LINK_SECURITY;
618 
619 	if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
620 		settings |= MGMT_SETTING_SSP;
621 
622 	if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
623 		settings |= MGMT_SETTING_HS;
624 
625 	if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
626 		settings |= MGMT_SETTING_ADVERTISING;
627 
628 	if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
629 		settings |= MGMT_SETTING_SECURE_CONN;
630 
631 	if (test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags))
632 		settings |= MGMT_SETTING_DEBUG_KEYS;
633 
634 	if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
635 		settings |= MGMT_SETTING_PRIVACY;
636 
637 	return settings;
638 }
639 
640 #define PNP_INFO_SVCLASS_ID		0x1200
641 
642 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
643 {
644 	u8 *ptr = data, *uuids_start = NULL;
645 	struct bt_uuid *uuid;
646 
647 	if (len < 4)
648 		return ptr;
649 
650 	list_for_each_entry(uuid, &hdev->uuids, list) {
651 		u16 uuid16;
652 
653 		if (uuid->size != 16)
654 			continue;
655 
656 		uuid16 = get_unaligned_le16(&uuid->uuid[12]);
657 		if (uuid16 < 0x1100)
658 			continue;
659 
660 		if (uuid16 == PNP_INFO_SVCLASS_ID)
661 			continue;
662 
663 		if (!uuids_start) {
664 			uuids_start = ptr;
665 			uuids_start[0] = 1;
666 			uuids_start[1] = EIR_UUID16_ALL;
667 			ptr += 2;
668 		}
669 
670 		/* Stop if not enough space to put next UUID */
671 		if ((ptr - data) + sizeof(u16) > len) {
672 			uuids_start[1] = EIR_UUID16_SOME;
673 			break;
674 		}
675 
676 		*ptr++ = (uuid16 & 0x00ff);
677 		*ptr++ = (uuid16 & 0xff00) >> 8;
678 		uuids_start[0] += sizeof(uuid16);
679 	}
680 
681 	return ptr;
682 }
683 
684 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
685 {
686 	u8 *ptr = data, *uuids_start = NULL;
687 	struct bt_uuid *uuid;
688 
689 	if (len < 6)
690 		return ptr;
691 
692 	list_for_each_entry(uuid, &hdev->uuids, list) {
693 		if (uuid->size != 32)
694 			continue;
695 
696 		if (!uuids_start) {
697 			uuids_start = ptr;
698 			uuids_start[0] = 1;
699 			uuids_start[1] = EIR_UUID32_ALL;
700 			ptr += 2;
701 		}
702 
703 		/* Stop if not enough space to put next UUID */
704 		if ((ptr - data) + sizeof(u32) > len) {
705 			uuids_start[1] = EIR_UUID32_SOME;
706 			break;
707 		}
708 
709 		memcpy(ptr, &uuid->uuid[12], sizeof(u32));
710 		ptr += sizeof(u32);
711 		uuids_start[0] += sizeof(u32);
712 	}
713 
714 	return ptr;
715 }
716 
717 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
718 {
719 	u8 *ptr = data, *uuids_start = NULL;
720 	struct bt_uuid *uuid;
721 
722 	if (len < 18)
723 		return ptr;
724 
725 	list_for_each_entry(uuid, &hdev->uuids, list) {
726 		if (uuid->size != 128)
727 			continue;
728 
729 		if (!uuids_start) {
730 			uuids_start = ptr;
731 			uuids_start[0] = 1;
732 			uuids_start[1] = EIR_UUID128_ALL;
733 			ptr += 2;
734 		}
735 
736 		/* Stop if not enough space to put next UUID */
737 		if ((ptr - data) + 16 > len) {
738 			uuids_start[1] = EIR_UUID128_SOME;
739 			break;
740 		}
741 
742 		memcpy(ptr, uuid->uuid, 16);
743 		ptr += 16;
744 		uuids_start[0] += 16;
745 	}
746 
747 	return ptr;
748 }
749 
750 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
751 {
752 	struct pending_cmd *cmd;
753 
754 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
755 		if (cmd->opcode == opcode)
756 			return cmd;
757 	}
758 
759 	return NULL;
760 }
761 
762 static struct pending_cmd *mgmt_pending_find_data(u16 opcode,
763 						  struct hci_dev *hdev,
764 						  const void *data)
765 {
766 	struct pending_cmd *cmd;
767 
768 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
769 		if (cmd->user_data != data)
770 			continue;
771 		if (cmd->opcode == opcode)
772 			return cmd;
773 	}
774 
775 	return NULL;
776 }
777 
778 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
779 {
780 	u8 ad_len = 0;
781 	size_t name_len;
782 
783 	name_len = strlen(hdev->dev_name);
784 	if (name_len > 0) {
785 		size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
786 
787 		if (name_len > max_len) {
788 			name_len = max_len;
789 			ptr[1] = EIR_NAME_SHORT;
790 		} else
791 			ptr[1] = EIR_NAME_COMPLETE;
792 
793 		ptr[0] = name_len + 1;
794 
795 		memcpy(ptr + 2, hdev->dev_name, name_len);
796 
797 		ad_len += (name_len + 2);
798 		ptr += (name_len + 2);
799 	}
800 
801 	return ad_len;
802 }
803 
804 static void update_scan_rsp_data(struct hci_request *req)
805 {
806 	struct hci_dev *hdev = req->hdev;
807 	struct hci_cp_le_set_scan_rsp_data cp;
808 	u8 len;
809 
810 	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
811 		return;
812 
813 	memset(&cp, 0, sizeof(cp));
814 
815 	len = create_scan_rsp_data(hdev, cp.data);
816 
817 	if (hdev->scan_rsp_data_len == len &&
818 	    memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
819 		return;
820 
821 	memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
822 	hdev->scan_rsp_data_len = len;
823 
824 	cp.length = len;
825 
826 	hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
827 }
828 
829 static u8 get_adv_discov_flags(struct hci_dev *hdev)
830 {
831 	struct pending_cmd *cmd;
832 
833 	/* If there's a pending mgmt command the flags will not yet have
834 	 * their final values, so check for this first.
835 	 */
836 	cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
837 	if (cmd) {
838 		struct mgmt_mode *cp = cmd->param;
839 		if (cp->val == 0x01)
840 			return LE_AD_GENERAL;
841 		else if (cp->val == 0x02)
842 			return LE_AD_LIMITED;
843 	} else {
844 		if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
845 			return LE_AD_LIMITED;
846 		else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
847 			return LE_AD_GENERAL;
848 	}
849 
850 	return 0;
851 }
852 
853 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
854 {
855 	u8 ad_len = 0, flags = 0;
856 
857 	flags |= get_adv_discov_flags(hdev);
858 
859 	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
860 		flags |= LE_AD_NO_BREDR;
861 
862 	if (flags) {
863 		BT_DBG("adv flags 0x%02x", flags);
864 
865 		ptr[0] = 2;
866 		ptr[1] = EIR_FLAGS;
867 		ptr[2] = flags;
868 
869 		ad_len += 3;
870 		ptr += 3;
871 	}
872 
873 	if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
874 		ptr[0] = 2;
875 		ptr[1] = EIR_TX_POWER;
876 		ptr[2] = (u8) hdev->adv_tx_power;
877 
878 		ad_len += 3;
879 		ptr += 3;
880 	}
881 
882 	return ad_len;
883 }
884 
885 static void update_adv_data(struct hci_request *req)
886 {
887 	struct hci_dev *hdev = req->hdev;
888 	struct hci_cp_le_set_adv_data cp;
889 	u8 len;
890 
891 	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
892 		return;
893 
894 	memset(&cp, 0, sizeof(cp));
895 
896 	len = create_adv_data(hdev, cp.data);
897 
898 	if (hdev->adv_data_len == len &&
899 	    memcmp(cp.data, hdev->adv_data, len) == 0)
900 		return;
901 
902 	memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
903 	hdev->adv_data_len = len;
904 
905 	cp.length = len;
906 
907 	hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
908 }
909 
910 int mgmt_update_adv_data(struct hci_dev *hdev)
911 {
912 	struct hci_request req;
913 
914 	hci_req_init(&req, hdev);
915 	update_adv_data(&req);
916 
917 	return hci_req_run(&req, NULL);
918 }
919 
920 static void create_eir(struct hci_dev *hdev, u8 *data)
921 {
922 	u8 *ptr = data;
923 	size_t name_len;
924 
925 	name_len = strlen(hdev->dev_name);
926 
927 	if (name_len > 0) {
928 		/* EIR Data type */
929 		if (name_len > 48) {
930 			name_len = 48;
931 			ptr[1] = EIR_NAME_SHORT;
932 		} else
933 			ptr[1] = EIR_NAME_COMPLETE;
934 
935 		/* EIR Data length */
936 		ptr[0] = name_len + 1;
937 
938 		memcpy(ptr + 2, hdev->dev_name, name_len);
939 
940 		ptr += (name_len + 2);
941 	}
942 
943 	if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
944 		ptr[0] = 2;
945 		ptr[1] = EIR_TX_POWER;
946 		ptr[2] = (u8) hdev->inq_tx_power;
947 
948 		ptr += 3;
949 	}
950 
951 	if (hdev->devid_source > 0) {
952 		ptr[0] = 9;
953 		ptr[1] = EIR_DEVICE_ID;
954 
955 		put_unaligned_le16(hdev->devid_source, ptr + 2);
956 		put_unaligned_le16(hdev->devid_vendor, ptr + 4);
957 		put_unaligned_le16(hdev->devid_product, ptr + 6);
958 		put_unaligned_le16(hdev->devid_version, ptr + 8);
959 
960 		ptr += 10;
961 	}
962 
963 	ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
964 	ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
965 	ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
966 }
967 
968 static void update_eir(struct hci_request *req)
969 {
970 	struct hci_dev *hdev = req->hdev;
971 	struct hci_cp_write_eir cp;
972 
973 	if (!hdev_is_powered(hdev))
974 		return;
975 
976 	if (!lmp_ext_inq_capable(hdev))
977 		return;
978 
979 	if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
980 		return;
981 
982 	if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
983 		return;
984 
985 	memset(&cp, 0, sizeof(cp));
986 
987 	create_eir(hdev, cp.data);
988 
989 	if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
990 		return;
991 
992 	memcpy(hdev->eir, cp.data, sizeof(cp.data));
993 
994 	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
995 }
996 
997 static u8 get_service_classes(struct hci_dev *hdev)
998 {
999 	struct bt_uuid *uuid;
1000 	u8 val = 0;
1001 
1002 	list_for_each_entry(uuid, &hdev->uuids, list)
1003 		val |= uuid->svc_hint;
1004 
1005 	return val;
1006 }
1007 
1008 static void update_class(struct hci_request *req)
1009 {
1010 	struct hci_dev *hdev = req->hdev;
1011 	u8 cod[3];
1012 
1013 	BT_DBG("%s", hdev->name);
1014 
1015 	if (!hdev_is_powered(hdev))
1016 		return;
1017 
1018 	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1019 		return;
1020 
1021 	if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1022 		return;
1023 
1024 	cod[0] = hdev->minor_class;
1025 	cod[1] = hdev->major_class;
1026 	cod[2] = get_service_classes(hdev);
1027 
1028 	if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
1029 		cod[1] |= 0x20;
1030 
1031 	if (memcmp(cod, hdev->dev_class, 3) == 0)
1032 		return;
1033 
1034 	hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1035 }
1036 
1037 static bool get_connectable(struct hci_dev *hdev)
1038 {
1039 	struct pending_cmd *cmd;
1040 
1041 	/* If there's a pending mgmt command the flag will not yet have
1042 	 * it's final value, so check for this first.
1043 	 */
1044 	cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1045 	if (cmd) {
1046 		struct mgmt_mode *cp = cmd->param;
1047 		return cp->val;
1048 	}
1049 
1050 	return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1051 }
1052 
1053 static void disable_advertising(struct hci_request *req)
1054 {
1055 	u8 enable = 0x00;
1056 
1057 	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1058 }
1059 
1060 static void enable_advertising(struct hci_request *req)
1061 {
1062 	struct hci_dev *hdev = req->hdev;
1063 	struct hci_cp_le_set_adv_param cp;
1064 	u8 own_addr_type, enable = 0x01;
1065 	bool connectable;
1066 
1067 	if (hci_conn_num(hdev, LE_LINK) > 0)
1068 		return;
1069 
1070 	if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1071 		disable_advertising(req);
1072 
1073 	/* Clear the HCI_LE_ADV bit temporarily so that the
1074 	 * hci_update_random_address knows that it's safe to go ahead
1075 	 * and write a new random address. The flag will be set back on
1076 	 * as soon as the SET_ADV_ENABLE HCI command completes.
1077 	 */
1078 	clear_bit(HCI_LE_ADV, &hdev->dev_flags);
1079 
1080 	connectable = get_connectable(hdev);
1081 
1082 	/* Set require_privacy to true only when non-connectable
1083 	 * advertising is used. In that case it is fine to use a
1084 	 * non-resolvable private address.
1085 	 */
1086 	if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
1087 		return;
1088 
1089 	memset(&cp, 0, sizeof(cp));
1090 	cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
1091 	cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1092 	cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
1093 	cp.own_address_type = own_addr_type;
1094 	cp.channel_map = hdev->le_adv_channel_map;
1095 
1096 	hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1097 
1098 	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1099 }
1100 
1101 static void service_cache_off(struct work_struct *work)
1102 {
1103 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1104 					    service_cache.work);
1105 	struct hci_request req;
1106 
1107 	if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1108 		return;
1109 
1110 	hci_req_init(&req, hdev);
1111 
1112 	hci_dev_lock(hdev);
1113 
1114 	update_eir(&req);
1115 	update_class(&req);
1116 
1117 	hci_dev_unlock(hdev);
1118 
1119 	hci_req_run(&req, NULL);
1120 }
1121 
1122 static void rpa_expired(struct work_struct *work)
1123 {
1124 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1125 					    rpa_expired.work);
1126 	struct hci_request req;
1127 
1128 	BT_DBG("");
1129 
1130 	set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1131 
1132 	if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1133 		return;
1134 
1135 	/* The generation of a new RPA and programming it into the
1136 	 * controller happens in the enable_advertising() function.
1137 	 */
1138 	hci_req_init(&req, hdev);
1139 	enable_advertising(&req);
1140 	hci_req_run(&req, NULL);
1141 }
1142 
1143 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1144 {
1145 	if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
1146 		return;
1147 
1148 	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1149 	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1150 
1151 	/* Non-mgmt controlled devices get this bit set
1152 	 * implicitly so that pairing works for them, however
1153 	 * for mgmt we require user-space to explicitly enable
1154 	 * it
1155 	 */
1156 	clear_bit(HCI_BONDABLE, &hdev->dev_flags);
1157 }
1158 
1159 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1160 				void *data, u16 data_len)
1161 {
1162 	struct mgmt_rp_read_info rp;
1163 
1164 	BT_DBG("sock %p %s", sk, hdev->name);
1165 
1166 	hci_dev_lock(hdev);
1167 
1168 	memset(&rp, 0, sizeof(rp));
1169 
1170 	bacpy(&rp.bdaddr, &hdev->bdaddr);
1171 
1172 	rp.version = hdev->hci_ver;
1173 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1174 
1175 	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1176 	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1177 
1178 	memcpy(rp.dev_class, hdev->dev_class, 3);
1179 
1180 	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1181 	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1182 
1183 	hci_dev_unlock(hdev);
1184 
1185 	return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1186 			    sizeof(rp));
1187 }
1188 
1189 static void mgmt_pending_free(struct pending_cmd *cmd)
1190 {
1191 	sock_put(cmd->sk);
1192 	kfree(cmd->param);
1193 	kfree(cmd);
1194 }
1195 
1196 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
1197 					    struct hci_dev *hdev, void *data,
1198 					    u16 len)
1199 {
1200 	struct pending_cmd *cmd;
1201 
1202 	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1203 	if (!cmd)
1204 		return NULL;
1205 
1206 	cmd->opcode = opcode;
1207 	cmd->index = hdev->id;
1208 
1209 	cmd->param = kmemdup(data, len, GFP_KERNEL);
1210 	if (!cmd->param) {
1211 		kfree(cmd);
1212 		return NULL;
1213 	}
1214 
1215 	cmd->param_len = len;
1216 
1217 	cmd->sk = sk;
1218 	sock_hold(sk);
1219 
1220 	list_add(&cmd->list, &hdev->mgmt_pending);
1221 
1222 	return cmd;
1223 }
1224 
1225 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1226 				 void (*cb)(struct pending_cmd *cmd,
1227 					    void *data),
1228 				 void *data)
1229 {
1230 	struct pending_cmd *cmd, *tmp;
1231 
1232 	list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1233 		if (opcode > 0 && cmd->opcode != opcode)
1234 			continue;
1235 
1236 		cb(cmd, data);
1237 	}
1238 }
1239 
1240 static void mgmt_pending_remove(struct pending_cmd *cmd)
1241 {
1242 	list_del(&cmd->list);
1243 	mgmt_pending_free(cmd);
1244 }
1245 
1246 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1247 {
1248 	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1249 
1250 	return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1251 			    sizeof(settings));
1252 }
1253 
1254 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
1255 {
1256 	BT_DBG("%s status 0x%02x", hdev->name, status);
1257 
1258 	if (hci_conn_count(hdev) == 0) {
1259 		cancel_delayed_work(&hdev->power_off);
1260 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
1261 	}
1262 }
1263 
1264 static bool hci_stop_discovery(struct hci_request *req)
1265 {
1266 	struct hci_dev *hdev = req->hdev;
1267 	struct hci_cp_remote_name_req_cancel cp;
1268 	struct inquiry_entry *e;
1269 
1270 	switch (hdev->discovery.state) {
1271 	case DISCOVERY_FINDING:
1272 		if (test_bit(HCI_INQUIRY, &hdev->flags)) {
1273 			hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1274 		} else {
1275 			cancel_delayed_work(&hdev->le_scan_disable);
1276 			hci_req_add_le_scan_disable(req);
1277 		}
1278 
1279 		return true;
1280 
1281 	case DISCOVERY_RESOLVING:
1282 		e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1283 						     NAME_PENDING);
1284 		if (!e)
1285 			break;
1286 
1287 		bacpy(&cp.bdaddr, &e->data.bdaddr);
1288 		hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1289 			    &cp);
1290 
1291 		return true;
1292 
1293 	default:
1294 		/* Passive scanning */
1295 		if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
1296 			hci_req_add_le_scan_disable(req);
1297 			return true;
1298 		}
1299 
1300 		break;
1301 	}
1302 
1303 	return false;
1304 }
1305 
1306 static int clean_up_hci_state(struct hci_dev *hdev)
1307 {
1308 	struct hci_request req;
1309 	struct hci_conn *conn;
1310 	bool discov_stopped;
1311 	int err;
1312 
1313 	hci_req_init(&req, hdev);
1314 
1315 	if (test_bit(HCI_ISCAN, &hdev->flags) ||
1316 	    test_bit(HCI_PSCAN, &hdev->flags)) {
1317 		u8 scan = 0x00;
1318 		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1319 	}
1320 
1321 	if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1322 		disable_advertising(&req);
1323 
1324 	discov_stopped = hci_stop_discovery(&req);
1325 
1326 	list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1327 		struct hci_cp_disconnect dc;
1328 		struct hci_cp_reject_conn_req rej;
1329 
1330 		switch (conn->state) {
1331 		case BT_CONNECTED:
1332 		case BT_CONFIG:
1333 			dc.handle = cpu_to_le16(conn->handle);
1334 			dc.reason = 0x15; /* Terminated due to Power Off */
1335 			hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1336 			break;
1337 		case BT_CONNECT:
1338 			if (conn->type == LE_LINK)
1339 				hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1340 					    0, NULL);
1341 			else if (conn->type == ACL_LINK)
1342 				hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1343 					    6, &conn->dst);
1344 			break;
1345 		case BT_CONNECT2:
1346 			bacpy(&rej.bdaddr, &conn->dst);
1347 			rej.reason = 0x15; /* Terminated due to Power Off */
1348 			if (conn->type == ACL_LINK)
1349 				hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1350 					    sizeof(rej), &rej);
1351 			else if (conn->type == SCO_LINK)
1352 				hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1353 					    sizeof(rej), &rej);
1354 			break;
1355 		}
1356 	}
1357 
1358 	err = hci_req_run(&req, clean_up_hci_complete);
1359 	if (!err && discov_stopped)
1360 		hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1361 
1362 	return err;
1363 }
1364 
1365 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1366 		       u16 len)
1367 {
1368 	struct mgmt_mode *cp = data;
1369 	struct pending_cmd *cmd;
1370 	int err;
1371 
1372 	BT_DBG("request for %s", hdev->name);
1373 
1374 	if (cp->val != 0x00 && cp->val != 0x01)
1375 		return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1376 				  MGMT_STATUS_INVALID_PARAMS);
1377 
1378 	hci_dev_lock(hdev);
1379 
1380 	if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1381 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1382 				 MGMT_STATUS_BUSY);
1383 		goto failed;
1384 	}
1385 
1386 	if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1387 		cancel_delayed_work(&hdev->power_off);
1388 
1389 		if (cp->val) {
1390 			mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1391 					 data, len);
1392 			err = mgmt_powered(hdev, 1);
1393 			goto failed;
1394 		}
1395 	}
1396 
1397 	if (!!cp->val == hdev_is_powered(hdev)) {
1398 		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1399 		goto failed;
1400 	}
1401 
1402 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1403 	if (!cmd) {
1404 		err = -ENOMEM;
1405 		goto failed;
1406 	}
1407 
1408 	if (cp->val) {
1409 		queue_work(hdev->req_workqueue, &hdev->power_on);
1410 		err = 0;
1411 	} else {
1412 		/* Disconnect connections, stop scans, etc */
1413 		err = clean_up_hci_state(hdev);
1414 		if (!err)
1415 			queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1416 					   HCI_POWER_OFF_TIMEOUT);
1417 
1418 		/* ENODATA means there were no HCI commands queued */
1419 		if (err == -ENODATA) {
1420 			cancel_delayed_work(&hdev->power_off);
1421 			queue_work(hdev->req_workqueue, &hdev->power_off.work);
1422 			err = 0;
1423 		}
1424 	}
1425 
1426 failed:
1427 	hci_dev_unlock(hdev);
1428 	return err;
1429 }
1430 
1431 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1432 {
1433 	__le32 ev;
1434 
1435 	ev = cpu_to_le32(get_current_settings(hdev));
1436 
1437 	return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1438 }
1439 
1440 int mgmt_new_settings(struct hci_dev *hdev)
1441 {
1442 	return new_settings(hdev, NULL);
1443 }
1444 
1445 struct cmd_lookup {
1446 	struct sock *sk;
1447 	struct hci_dev *hdev;
1448 	u8 mgmt_status;
1449 };
1450 
1451 static void settings_rsp(struct pending_cmd *cmd, void *data)
1452 {
1453 	struct cmd_lookup *match = data;
1454 
1455 	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1456 
1457 	list_del(&cmd->list);
1458 
1459 	if (match->sk == NULL) {
1460 		match->sk = cmd->sk;
1461 		sock_hold(match->sk);
1462 	}
1463 
1464 	mgmt_pending_free(cmd);
1465 }
1466 
1467 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1468 {
1469 	u8 *status = data;
1470 
1471 	cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1472 	mgmt_pending_remove(cmd);
1473 }
1474 
1475 static void cmd_complete_rsp(struct pending_cmd *cmd, void *data)
1476 {
1477 	if (cmd->cmd_complete) {
1478 		u8 *status = data;
1479 
1480 		cmd->cmd_complete(cmd, *status);
1481 		mgmt_pending_remove(cmd);
1482 
1483 		return;
1484 	}
1485 
1486 	cmd_status_rsp(cmd, data);
1487 }
1488 
1489 static void generic_cmd_complete(struct pending_cmd *cmd, u8 status)
1490 {
1491 	cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, cmd->param,
1492 		     cmd->param_len);
1493 }
1494 
1495 static void addr_cmd_complete(struct pending_cmd *cmd, u8 status)
1496 {
1497 	cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, cmd->param,
1498 		     sizeof(struct mgmt_addr_info));
1499 }
1500 
1501 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1502 {
1503 	if (!lmp_bredr_capable(hdev))
1504 		return MGMT_STATUS_NOT_SUPPORTED;
1505 	else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1506 		return MGMT_STATUS_REJECTED;
1507 	else
1508 		return MGMT_STATUS_SUCCESS;
1509 }
1510 
1511 static u8 mgmt_le_support(struct hci_dev *hdev)
1512 {
1513 	if (!lmp_le_capable(hdev))
1514 		return MGMT_STATUS_NOT_SUPPORTED;
1515 	else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1516 		return MGMT_STATUS_REJECTED;
1517 	else
1518 		return MGMT_STATUS_SUCCESS;
1519 }
1520 
1521 static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1522 {
1523 	struct pending_cmd *cmd;
1524 	struct mgmt_mode *cp;
1525 	struct hci_request req;
1526 	bool changed;
1527 
1528 	BT_DBG("status 0x%02x", status);
1529 
1530 	hci_dev_lock(hdev);
1531 
1532 	cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1533 	if (!cmd)
1534 		goto unlock;
1535 
1536 	if (status) {
1537 		u8 mgmt_err = mgmt_status(status);
1538 		cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1539 		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1540 		goto remove_cmd;
1541 	}
1542 
1543 	cp = cmd->param;
1544 	if (cp->val) {
1545 		changed = !test_and_set_bit(HCI_DISCOVERABLE,
1546 					    &hdev->dev_flags);
1547 
1548 		if (hdev->discov_timeout > 0) {
1549 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1550 			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1551 					   to);
1552 		}
1553 	} else {
1554 		changed = test_and_clear_bit(HCI_DISCOVERABLE,
1555 					     &hdev->dev_flags);
1556 	}
1557 
1558 	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1559 
1560 	if (changed)
1561 		new_settings(hdev, cmd->sk);
1562 
1563 	/* When the discoverable mode gets changed, make sure
1564 	 * that class of device has the limited discoverable
1565 	 * bit correctly set. Also update page scan based on whitelist
1566 	 * entries.
1567 	 */
1568 	hci_req_init(&req, hdev);
1569 	hci_update_page_scan(hdev, &req);
1570 	update_class(&req);
1571 	hci_req_run(&req, NULL);
1572 
1573 remove_cmd:
1574 	mgmt_pending_remove(cmd);
1575 
1576 unlock:
1577 	hci_dev_unlock(hdev);
1578 }
1579 
1580 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1581 			    u16 len)
1582 {
1583 	struct mgmt_cp_set_discoverable *cp = data;
1584 	struct pending_cmd *cmd;
1585 	struct hci_request req;
1586 	u16 timeout;
1587 	u8 scan;
1588 	int err;
1589 
1590 	BT_DBG("request for %s", hdev->name);
1591 
1592 	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1593 	    !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1594 		return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1595 				  MGMT_STATUS_REJECTED);
1596 
1597 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1598 		return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1599 				  MGMT_STATUS_INVALID_PARAMS);
1600 
1601 	timeout = __le16_to_cpu(cp->timeout);
1602 
1603 	/* Disabling discoverable requires that no timeout is set,
1604 	 * and enabling limited discoverable requires a timeout.
1605 	 */
1606 	if ((cp->val == 0x00 && timeout > 0) ||
1607 	    (cp->val == 0x02 && timeout == 0))
1608 		return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1609 				  MGMT_STATUS_INVALID_PARAMS);
1610 
1611 	hci_dev_lock(hdev);
1612 
1613 	if (!hdev_is_powered(hdev) && timeout > 0) {
1614 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1615 				 MGMT_STATUS_NOT_POWERED);
1616 		goto failed;
1617 	}
1618 
1619 	if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1620 	    mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1621 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1622 				 MGMT_STATUS_BUSY);
1623 		goto failed;
1624 	}
1625 
1626 	if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1627 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1628 				 MGMT_STATUS_REJECTED);
1629 		goto failed;
1630 	}
1631 
1632 	if (!hdev_is_powered(hdev)) {
1633 		bool changed = false;
1634 
1635 		/* Setting limited discoverable when powered off is
1636 		 * not a valid operation since it requires a timeout
1637 		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1638 		 */
1639 		if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1640 			change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1641 			changed = true;
1642 		}
1643 
1644 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1645 		if (err < 0)
1646 			goto failed;
1647 
1648 		if (changed)
1649 			err = new_settings(hdev, sk);
1650 
1651 		goto failed;
1652 	}
1653 
1654 	/* If the current mode is the same, then just update the timeout
1655 	 * value with the new value. And if only the timeout gets updated,
1656 	 * then no need for any HCI transactions.
1657 	 */
1658 	if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1659 	    (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1660 					  &hdev->dev_flags)) {
1661 		cancel_delayed_work(&hdev->discov_off);
1662 		hdev->discov_timeout = timeout;
1663 
1664 		if (cp->val && hdev->discov_timeout > 0) {
1665 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1666 			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1667 					   to);
1668 		}
1669 
1670 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1671 		goto failed;
1672 	}
1673 
1674 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1675 	if (!cmd) {
1676 		err = -ENOMEM;
1677 		goto failed;
1678 	}
1679 
1680 	/* Cancel any potential discoverable timeout that might be
1681 	 * still active and store new timeout value. The arming of
1682 	 * the timeout happens in the complete handler.
1683 	 */
1684 	cancel_delayed_work(&hdev->discov_off);
1685 	hdev->discov_timeout = timeout;
1686 
1687 	/* Limited discoverable mode */
1688 	if (cp->val == 0x02)
1689 		set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1690 	else
1691 		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1692 
1693 	hci_req_init(&req, hdev);
1694 
1695 	/* The procedure for LE-only controllers is much simpler - just
1696 	 * update the advertising data.
1697 	 */
1698 	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1699 		goto update_ad;
1700 
1701 	scan = SCAN_PAGE;
1702 
1703 	if (cp->val) {
1704 		struct hci_cp_write_current_iac_lap hci_cp;
1705 
1706 		if (cp->val == 0x02) {
1707 			/* Limited discoverable mode */
1708 			hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1709 			hci_cp.iac_lap[0] = 0x00;	/* LIAC */
1710 			hci_cp.iac_lap[1] = 0x8b;
1711 			hci_cp.iac_lap[2] = 0x9e;
1712 			hci_cp.iac_lap[3] = 0x33;	/* GIAC */
1713 			hci_cp.iac_lap[4] = 0x8b;
1714 			hci_cp.iac_lap[5] = 0x9e;
1715 		} else {
1716 			/* General discoverable mode */
1717 			hci_cp.num_iac = 1;
1718 			hci_cp.iac_lap[0] = 0x33;	/* GIAC */
1719 			hci_cp.iac_lap[1] = 0x8b;
1720 			hci_cp.iac_lap[2] = 0x9e;
1721 		}
1722 
1723 		hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1724 			    (hci_cp.num_iac * 3) + 1, &hci_cp);
1725 
1726 		scan |= SCAN_INQUIRY;
1727 	} else {
1728 		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1729 	}
1730 
1731 	hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1732 
1733 update_ad:
1734 	update_adv_data(&req);
1735 
1736 	err = hci_req_run(&req, set_discoverable_complete);
1737 	if (err < 0)
1738 		mgmt_pending_remove(cmd);
1739 
1740 failed:
1741 	hci_dev_unlock(hdev);
1742 	return err;
1743 }
1744 
1745 static void write_fast_connectable(struct hci_request *req, bool enable)
1746 {
1747 	struct hci_dev *hdev = req->hdev;
1748 	struct hci_cp_write_page_scan_activity acp;
1749 	u8 type;
1750 
1751 	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1752 		return;
1753 
1754 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1755 		return;
1756 
1757 	if (enable) {
1758 		type = PAGE_SCAN_TYPE_INTERLACED;
1759 
1760 		/* 160 msec page scan interval */
1761 		acp.interval = cpu_to_le16(0x0100);
1762 	} else {
1763 		type = PAGE_SCAN_TYPE_STANDARD;	/* default */
1764 
1765 		/* default 1.28 sec page scan */
1766 		acp.interval = cpu_to_le16(0x0800);
1767 	}
1768 
1769 	acp.window = cpu_to_le16(0x0012);
1770 
1771 	if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1772 	    __cpu_to_le16(hdev->page_scan_window) != acp.window)
1773 		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1774 			    sizeof(acp), &acp);
1775 
1776 	if (hdev->page_scan_type != type)
1777 		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1778 }
1779 
1780 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1781 {
1782 	struct pending_cmd *cmd;
1783 	struct mgmt_mode *cp;
1784 	bool conn_changed, discov_changed;
1785 
1786 	BT_DBG("status 0x%02x", status);
1787 
1788 	hci_dev_lock(hdev);
1789 
1790 	cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1791 	if (!cmd)
1792 		goto unlock;
1793 
1794 	if (status) {
1795 		u8 mgmt_err = mgmt_status(status);
1796 		cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1797 		goto remove_cmd;
1798 	}
1799 
1800 	cp = cmd->param;
1801 	if (cp->val) {
1802 		conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
1803 						 &hdev->dev_flags);
1804 		discov_changed = false;
1805 	} else {
1806 		conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
1807 						  &hdev->dev_flags);
1808 		discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
1809 						    &hdev->dev_flags);
1810 	}
1811 
1812 	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1813 
1814 	if (conn_changed || discov_changed) {
1815 		new_settings(hdev, cmd->sk);
1816 		hci_update_page_scan(hdev, NULL);
1817 		if (discov_changed)
1818 			mgmt_update_adv_data(hdev);
1819 		hci_update_background_scan(hdev);
1820 	}
1821 
1822 remove_cmd:
1823 	mgmt_pending_remove(cmd);
1824 
1825 unlock:
1826 	hci_dev_unlock(hdev);
1827 }
1828 
1829 static int set_connectable_update_settings(struct hci_dev *hdev,
1830 					   struct sock *sk, u8 val)
1831 {
1832 	bool changed = false;
1833 	int err;
1834 
1835 	if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1836 		changed = true;
1837 
1838 	if (val) {
1839 		set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1840 	} else {
1841 		clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1842 		clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1843 	}
1844 
1845 	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1846 	if (err < 0)
1847 		return err;
1848 
1849 	if (changed) {
1850 		hci_update_page_scan(hdev, NULL);
1851 		hci_update_background_scan(hdev);
1852 		return new_settings(hdev, sk);
1853 	}
1854 
1855 	return 0;
1856 }
1857 
1858 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1859 			   u16 len)
1860 {
1861 	struct mgmt_mode *cp = data;
1862 	struct pending_cmd *cmd;
1863 	struct hci_request req;
1864 	u8 scan;
1865 	int err;
1866 
1867 	BT_DBG("request for %s", hdev->name);
1868 
1869 	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1870 	    !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1871 		return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1872 				  MGMT_STATUS_REJECTED);
1873 
1874 	if (cp->val != 0x00 && cp->val != 0x01)
1875 		return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1876 				  MGMT_STATUS_INVALID_PARAMS);
1877 
1878 	hci_dev_lock(hdev);
1879 
1880 	if (!hdev_is_powered(hdev)) {
1881 		err = set_connectable_update_settings(hdev, sk, cp->val);
1882 		goto failed;
1883 	}
1884 
1885 	if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1886 	    mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1887 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1888 				 MGMT_STATUS_BUSY);
1889 		goto failed;
1890 	}
1891 
1892 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1893 	if (!cmd) {
1894 		err = -ENOMEM;
1895 		goto failed;
1896 	}
1897 
1898 	hci_req_init(&req, hdev);
1899 
1900 	/* If BR/EDR is not enabled and we disable advertising as a
1901 	 * by-product of disabling connectable, we need to update the
1902 	 * advertising flags.
1903 	 */
1904 	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1905 		if (!cp->val) {
1906 			clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1907 			clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1908 		}
1909 		update_adv_data(&req);
1910 	} else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1911 		if (cp->val) {
1912 			scan = SCAN_PAGE;
1913 		} else {
1914 			/* If we don't have any whitelist entries just
1915 			 * disable all scanning. If there are entries
1916 			 * and we had both page and inquiry scanning
1917 			 * enabled then fall back to only page scanning.
1918 			 * Otherwise no changes are needed.
1919 			 */
1920 			if (list_empty(&hdev->whitelist))
1921 				scan = SCAN_DISABLED;
1922 			else if (test_bit(HCI_ISCAN, &hdev->flags))
1923 				scan = SCAN_PAGE;
1924 			else
1925 				goto no_scan_update;
1926 
1927 			if (test_bit(HCI_ISCAN, &hdev->flags) &&
1928 			    hdev->discov_timeout > 0)
1929 				cancel_delayed_work(&hdev->discov_off);
1930 		}
1931 
1932 		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1933 	}
1934 
1935 no_scan_update:
1936 	/* If we're going from non-connectable to connectable or
1937 	 * vice-versa when fast connectable is enabled ensure that fast
1938 	 * connectable gets disabled. write_fast_connectable won't do
1939 	 * anything if the page scan parameters are already what they
1940 	 * should be.
1941 	 */
1942 	if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1943 		write_fast_connectable(&req, false);
1944 
1945 	/* Update the advertising parameters if necessary */
1946 	if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1947 		enable_advertising(&req);
1948 
1949 	err = hci_req_run(&req, set_connectable_complete);
1950 	if (err < 0) {
1951 		mgmt_pending_remove(cmd);
1952 		if (err == -ENODATA)
1953 			err = set_connectable_update_settings(hdev, sk,
1954 							      cp->val);
1955 		goto failed;
1956 	}
1957 
1958 failed:
1959 	hci_dev_unlock(hdev);
1960 	return err;
1961 }
1962 
1963 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1964 			u16 len)
1965 {
1966 	struct mgmt_mode *cp = data;
1967 	bool changed;
1968 	int err;
1969 
1970 	BT_DBG("request for %s", hdev->name);
1971 
1972 	if (cp->val != 0x00 && cp->val != 0x01)
1973 		return cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1974 				  MGMT_STATUS_INVALID_PARAMS);
1975 
1976 	hci_dev_lock(hdev);
1977 
1978 	if (cp->val)
1979 		changed = !test_and_set_bit(HCI_BONDABLE, &hdev->dev_flags);
1980 	else
1981 		changed = test_and_clear_bit(HCI_BONDABLE, &hdev->dev_flags);
1982 
1983 	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1984 	if (err < 0)
1985 		goto unlock;
1986 
1987 	if (changed)
1988 		err = new_settings(hdev, sk);
1989 
1990 unlock:
1991 	hci_dev_unlock(hdev);
1992 	return err;
1993 }
1994 
1995 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1996 			     u16 len)
1997 {
1998 	struct mgmt_mode *cp = data;
1999 	struct pending_cmd *cmd;
2000 	u8 val, status;
2001 	int err;
2002 
2003 	BT_DBG("request for %s", hdev->name);
2004 
2005 	status = mgmt_bredr_support(hdev);
2006 	if (status)
2007 		return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2008 				  status);
2009 
2010 	if (cp->val != 0x00 && cp->val != 0x01)
2011 		return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2012 				  MGMT_STATUS_INVALID_PARAMS);
2013 
2014 	hci_dev_lock(hdev);
2015 
2016 	if (!hdev_is_powered(hdev)) {
2017 		bool changed = false;
2018 
2019 		if (!!cp->val != test_bit(HCI_LINK_SECURITY,
2020 					  &hdev->dev_flags)) {
2021 			change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
2022 			changed = true;
2023 		}
2024 
2025 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2026 		if (err < 0)
2027 			goto failed;
2028 
2029 		if (changed)
2030 			err = new_settings(hdev, sk);
2031 
2032 		goto failed;
2033 	}
2034 
2035 	if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
2036 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2037 				 MGMT_STATUS_BUSY);
2038 		goto failed;
2039 	}
2040 
2041 	val = !!cp->val;
2042 
2043 	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
2044 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2045 		goto failed;
2046 	}
2047 
2048 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
2049 	if (!cmd) {
2050 		err = -ENOMEM;
2051 		goto failed;
2052 	}
2053 
2054 	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
2055 	if (err < 0) {
2056 		mgmt_pending_remove(cmd);
2057 		goto failed;
2058 	}
2059 
2060 failed:
2061 	hci_dev_unlock(hdev);
2062 	return err;
2063 }
2064 
2065 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2066 {
2067 	struct mgmt_mode *cp = data;
2068 	struct pending_cmd *cmd;
2069 	u8 status;
2070 	int err;
2071 
2072 	BT_DBG("request for %s", hdev->name);
2073 
2074 	status = mgmt_bredr_support(hdev);
2075 	if (status)
2076 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2077 
2078 	if (!lmp_ssp_capable(hdev))
2079 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2080 				  MGMT_STATUS_NOT_SUPPORTED);
2081 
2082 	if (cp->val != 0x00 && cp->val != 0x01)
2083 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2084 				  MGMT_STATUS_INVALID_PARAMS);
2085 
2086 	hci_dev_lock(hdev);
2087 
2088 	if (!hdev_is_powered(hdev)) {
2089 		bool changed;
2090 
2091 		if (cp->val) {
2092 			changed = !test_and_set_bit(HCI_SSP_ENABLED,
2093 						    &hdev->dev_flags);
2094 		} else {
2095 			changed = test_and_clear_bit(HCI_SSP_ENABLED,
2096 						     &hdev->dev_flags);
2097 			if (!changed)
2098 				changed = test_and_clear_bit(HCI_HS_ENABLED,
2099 							     &hdev->dev_flags);
2100 			else
2101 				clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2102 		}
2103 
2104 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2105 		if (err < 0)
2106 			goto failed;
2107 
2108 		if (changed)
2109 			err = new_settings(hdev, sk);
2110 
2111 		goto failed;
2112 	}
2113 
2114 	if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
2115 	    mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
2116 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2117 				 MGMT_STATUS_BUSY);
2118 		goto failed;
2119 	}
2120 
2121 	if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
2122 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2123 		goto failed;
2124 	}
2125 
2126 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2127 	if (!cmd) {
2128 		err = -ENOMEM;
2129 		goto failed;
2130 	}
2131 
2132 	if (!cp->val && test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
2133 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2134 			     sizeof(cp->val), &cp->val);
2135 
2136 	err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2137 	if (err < 0) {
2138 		mgmt_pending_remove(cmd);
2139 		goto failed;
2140 	}
2141 
2142 failed:
2143 	hci_dev_unlock(hdev);
2144 	return err;
2145 }
2146 
2147 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2148 {
2149 	struct mgmt_mode *cp = data;
2150 	bool changed;
2151 	u8 status;
2152 	int err;
2153 
2154 	BT_DBG("request for %s", hdev->name);
2155 
2156 	status = mgmt_bredr_support(hdev);
2157 	if (status)
2158 		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2159 
2160 	if (!lmp_ssp_capable(hdev))
2161 		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2162 				  MGMT_STATUS_NOT_SUPPORTED);
2163 
2164 	if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
2165 		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2166 				  MGMT_STATUS_REJECTED);
2167 
2168 	if (cp->val != 0x00 && cp->val != 0x01)
2169 		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2170 				  MGMT_STATUS_INVALID_PARAMS);
2171 
2172 	hci_dev_lock(hdev);
2173 
2174 	if (cp->val) {
2175 		changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2176 	} else {
2177 		if (hdev_is_powered(hdev)) {
2178 			err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2179 					 MGMT_STATUS_REJECTED);
2180 			goto unlock;
2181 		}
2182 
2183 		changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2184 	}
2185 
2186 	err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2187 	if (err < 0)
2188 		goto unlock;
2189 
2190 	if (changed)
2191 		err = new_settings(hdev, sk);
2192 
2193 unlock:
2194 	hci_dev_unlock(hdev);
2195 	return err;
2196 }
2197 
2198 static void le_enable_complete(struct hci_dev *hdev, u8 status)
2199 {
2200 	struct cmd_lookup match = { NULL, hdev };
2201 
2202 	if (status) {
2203 		u8 mgmt_err = mgmt_status(status);
2204 
2205 		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2206 				     &mgmt_err);
2207 		return;
2208 	}
2209 
2210 	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2211 
2212 	new_settings(hdev, match.sk);
2213 
2214 	if (match.sk)
2215 		sock_put(match.sk);
2216 
2217 	/* Make sure the controller has a good default for
2218 	 * advertising data. Restrict the update to when LE
2219 	 * has actually been enabled. During power on, the
2220 	 * update in powered_update_hci will take care of it.
2221 	 */
2222 	if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2223 		struct hci_request req;
2224 
2225 		hci_dev_lock(hdev);
2226 
2227 		hci_req_init(&req, hdev);
2228 		update_adv_data(&req);
2229 		update_scan_rsp_data(&req);
2230 		hci_req_run(&req, NULL);
2231 
2232 		hci_update_background_scan(hdev);
2233 
2234 		hci_dev_unlock(hdev);
2235 	}
2236 }
2237 
2238 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2239 {
2240 	struct mgmt_mode *cp = data;
2241 	struct hci_cp_write_le_host_supported hci_cp;
2242 	struct pending_cmd *cmd;
2243 	struct hci_request req;
2244 	int err;
2245 	u8 val, enabled;
2246 
2247 	BT_DBG("request for %s", hdev->name);
2248 
2249 	if (!lmp_le_capable(hdev))
2250 		return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2251 				  MGMT_STATUS_NOT_SUPPORTED);
2252 
2253 	if (cp->val != 0x00 && cp->val != 0x01)
2254 		return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2255 				  MGMT_STATUS_INVALID_PARAMS);
2256 
2257 	/* LE-only devices do not allow toggling LE on/off */
2258 	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
2259 		return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2260 				  MGMT_STATUS_REJECTED);
2261 
2262 	hci_dev_lock(hdev);
2263 
2264 	val = !!cp->val;
2265 	enabled = lmp_host_le_capable(hdev);
2266 
2267 	if (!hdev_is_powered(hdev) || val == enabled) {
2268 		bool changed = false;
2269 
2270 		if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2271 			change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2272 			changed = true;
2273 		}
2274 
2275 		if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
2276 			clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
2277 			changed = true;
2278 		}
2279 
2280 		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2281 		if (err < 0)
2282 			goto unlock;
2283 
2284 		if (changed)
2285 			err = new_settings(hdev, sk);
2286 
2287 		goto unlock;
2288 	}
2289 
2290 	if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
2291 	    mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2292 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2293 				 MGMT_STATUS_BUSY);
2294 		goto unlock;
2295 	}
2296 
2297 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2298 	if (!cmd) {
2299 		err = -ENOMEM;
2300 		goto unlock;
2301 	}
2302 
2303 	hci_req_init(&req, hdev);
2304 
2305 	memset(&hci_cp, 0, sizeof(hci_cp));
2306 
2307 	if (val) {
2308 		hci_cp.le = val;
2309 		hci_cp.simul = 0x00;
2310 	} else {
2311 		if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
2312 			disable_advertising(&req);
2313 	}
2314 
2315 	hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2316 		    &hci_cp);
2317 
2318 	err = hci_req_run(&req, le_enable_complete);
2319 	if (err < 0)
2320 		mgmt_pending_remove(cmd);
2321 
2322 unlock:
2323 	hci_dev_unlock(hdev);
2324 	return err;
2325 }
2326 
2327 /* This is a helper function to test for pending mgmt commands that can
2328  * cause CoD or EIR HCI commands. We can only allow one such pending
2329  * mgmt command at a time since otherwise we cannot easily track what
2330  * the current values are, will be, and based on that calculate if a new
2331  * HCI command needs to be sent and if yes with what value.
2332  */
2333 static bool pending_eir_or_class(struct hci_dev *hdev)
2334 {
2335 	struct pending_cmd *cmd;
2336 
2337 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2338 		switch (cmd->opcode) {
2339 		case MGMT_OP_ADD_UUID:
2340 		case MGMT_OP_REMOVE_UUID:
2341 		case MGMT_OP_SET_DEV_CLASS:
2342 		case MGMT_OP_SET_POWERED:
2343 			return true;
2344 		}
2345 	}
2346 
2347 	return false;
2348 }
2349 
2350 static const u8 bluetooth_base_uuid[] = {
2351 			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2352 			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2353 };
2354 
2355 static u8 get_uuid_size(const u8 *uuid)
2356 {
2357 	u32 val;
2358 
2359 	if (memcmp(uuid, bluetooth_base_uuid, 12))
2360 		return 128;
2361 
2362 	val = get_unaligned_le32(&uuid[12]);
2363 	if (val > 0xffff)
2364 		return 32;
2365 
2366 	return 16;
2367 }
2368 
2369 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2370 {
2371 	struct pending_cmd *cmd;
2372 
2373 	hci_dev_lock(hdev);
2374 
2375 	cmd = mgmt_pending_find(mgmt_op, hdev);
2376 	if (!cmd)
2377 		goto unlock;
2378 
2379 	cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
2380 		     hdev->dev_class, 3);
2381 
2382 	mgmt_pending_remove(cmd);
2383 
2384 unlock:
2385 	hci_dev_unlock(hdev);
2386 }
2387 
2388 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
2389 {
2390 	BT_DBG("status 0x%02x", status);
2391 
2392 	mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2393 }
2394 
2395 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2396 {
2397 	struct mgmt_cp_add_uuid *cp = data;
2398 	struct pending_cmd *cmd;
2399 	struct hci_request req;
2400 	struct bt_uuid *uuid;
2401 	int err;
2402 
2403 	BT_DBG("request for %s", hdev->name);
2404 
2405 	hci_dev_lock(hdev);
2406 
2407 	if (pending_eir_or_class(hdev)) {
2408 		err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2409 				 MGMT_STATUS_BUSY);
2410 		goto failed;
2411 	}
2412 
2413 	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2414 	if (!uuid) {
2415 		err = -ENOMEM;
2416 		goto failed;
2417 	}
2418 
2419 	memcpy(uuid->uuid, cp->uuid, 16);
2420 	uuid->svc_hint = cp->svc_hint;
2421 	uuid->size = get_uuid_size(cp->uuid);
2422 
2423 	list_add_tail(&uuid->list, &hdev->uuids);
2424 
2425 	hci_req_init(&req, hdev);
2426 
2427 	update_class(&req);
2428 	update_eir(&req);
2429 
2430 	err = hci_req_run(&req, add_uuid_complete);
2431 	if (err < 0) {
2432 		if (err != -ENODATA)
2433 			goto failed;
2434 
2435 		err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2436 				   hdev->dev_class, 3);
2437 		goto failed;
2438 	}
2439 
2440 	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2441 	if (!cmd) {
2442 		err = -ENOMEM;
2443 		goto failed;
2444 	}
2445 
2446 	err = 0;
2447 
2448 failed:
2449 	hci_dev_unlock(hdev);
2450 	return err;
2451 }
2452 
2453 static bool enable_service_cache(struct hci_dev *hdev)
2454 {
2455 	if (!hdev_is_powered(hdev))
2456 		return false;
2457 
2458 	if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2459 		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2460 				   CACHE_TIMEOUT);
2461 		return true;
2462 	}
2463 
2464 	return false;
2465 }
2466 
2467 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
2468 {
2469 	BT_DBG("status 0x%02x", status);
2470 
2471 	mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2472 }
2473 
2474 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2475 		       u16 len)
2476 {
2477 	struct mgmt_cp_remove_uuid *cp = data;
2478 	struct pending_cmd *cmd;
2479 	struct bt_uuid *match, *tmp;
2480 	u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2481 	struct hci_request req;
2482 	int err, found;
2483 
2484 	BT_DBG("request for %s", hdev->name);
2485 
2486 	hci_dev_lock(hdev);
2487 
2488 	if (pending_eir_or_class(hdev)) {
2489 		err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2490 				 MGMT_STATUS_BUSY);
2491 		goto unlock;
2492 	}
2493 
2494 	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2495 		hci_uuids_clear(hdev);
2496 
2497 		if (enable_service_cache(hdev)) {
2498 			err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2499 					   0, hdev->dev_class, 3);
2500 			goto unlock;
2501 		}
2502 
2503 		goto update_class;
2504 	}
2505 
2506 	found = 0;
2507 
2508 	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2509 		if (memcmp(match->uuid, cp->uuid, 16) != 0)
2510 			continue;
2511 
2512 		list_del(&match->list);
2513 		kfree(match);
2514 		found++;
2515 	}
2516 
2517 	if (found == 0) {
2518 		err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2519 				 MGMT_STATUS_INVALID_PARAMS);
2520 		goto unlock;
2521 	}
2522 
2523 update_class:
2524 	hci_req_init(&req, hdev);
2525 
2526 	update_class(&req);
2527 	update_eir(&req);
2528 
2529 	err = hci_req_run(&req, remove_uuid_complete);
2530 	if (err < 0) {
2531 		if (err != -ENODATA)
2532 			goto unlock;
2533 
2534 		err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2535 				   hdev->dev_class, 3);
2536 		goto unlock;
2537 	}
2538 
2539 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2540 	if (!cmd) {
2541 		err = -ENOMEM;
2542 		goto unlock;
2543 	}
2544 
2545 	err = 0;
2546 
2547 unlock:
2548 	hci_dev_unlock(hdev);
2549 	return err;
2550 }
2551 
2552 static void set_class_complete(struct hci_dev *hdev, u8 status)
2553 {
2554 	BT_DBG("status 0x%02x", status);
2555 
2556 	mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2557 }
2558 
2559 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2560 			 u16 len)
2561 {
2562 	struct mgmt_cp_set_dev_class *cp = data;
2563 	struct pending_cmd *cmd;
2564 	struct hci_request req;
2565 	int err;
2566 
2567 	BT_DBG("request for %s", hdev->name);
2568 
2569 	if (!lmp_bredr_capable(hdev))
2570 		return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2571 				  MGMT_STATUS_NOT_SUPPORTED);
2572 
2573 	hci_dev_lock(hdev);
2574 
2575 	if (pending_eir_or_class(hdev)) {
2576 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2577 				 MGMT_STATUS_BUSY);
2578 		goto unlock;
2579 	}
2580 
2581 	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2582 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2583 				 MGMT_STATUS_INVALID_PARAMS);
2584 		goto unlock;
2585 	}
2586 
2587 	hdev->major_class = cp->major;
2588 	hdev->minor_class = cp->minor;
2589 
2590 	if (!hdev_is_powered(hdev)) {
2591 		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2592 				   hdev->dev_class, 3);
2593 		goto unlock;
2594 	}
2595 
2596 	hci_req_init(&req, hdev);
2597 
2598 	if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2599 		hci_dev_unlock(hdev);
2600 		cancel_delayed_work_sync(&hdev->service_cache);
2601 		hci_dev_lock(hdev);
2602 		update_eir(&req);
2603 	}
2604 
2605 	update_class(&req);
2606 
2607 	err = hci_req_run(&req, set_class_complete);
2608 	if (err < 0) {
2609 		if (err != -ENODATA)
2610 			goto unlock;
2611 
2612 		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2613 				   hdev->dev_class, 3);
2614 		goto unlock;
2615 	}
2616 
2617 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2618 	if (!cmd) {
2619 		err = -ENOMEM;
2620 		goto unlock;
2621 	}
2622 
2623 	err = 0;
2624 
2625 unlock:
2626 	hci_dev_unlock(hdev);
2627 	return err;
2628 }
2629 
2630 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2631 			  u16 len)
2632 {
2633 	struct mgmt_cp_load_link_keys *cp = data;
2634 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2635 				   sizeof(struct mgmt_link_key_info));
2636 	u16 key_count, expected_len;
2637 	bool changed;
2638 	int i;
2639 
2640 	BT_DBG("request for %s", hdev->name);
2641 
2642 	if (!lmp_bredr_capable(hdev))
2643 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2644 				  MGMT_STATUS_NOT_SUPPORTED);
2645 
2646 	key_count = __le16_to_cpu(cp->key_count);
2647 	if (key_count > max_key_count) {
2648 		BT_ERR("load_link_keys: too big key_count value %u",
2649 		       key_count);
2650 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2651 				  MGMT_STATUS_INVALID_PARAMS);
2652 	}
2653 
2654 	expected_len = sizeof(*cp) + key_count *
2655 					sizeof(struct mgmt_link_key_info);
2656 	if (expected_len != len) {
2657 		BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2658 		       expected_len, len);
2659 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2660 				  MGMT_STATUS_INVALID_PARAMS);
2661 	}
2662 
2663 	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2664 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2665 				  MGMT_STATUS_INVALID_PARAMS);
2666 
2667 	BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2668 	       key_count);
2669 
2670 	for (i = 0; i < key_count; i++) {
2671 		struct mgmt_link_key_info *key = &cp->keys[i];
2672 
2673 		if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2674 			return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2675 					  MGMT_STATUS_INVALID_PARAMS);
2676 	}
2677 
2678 	hci_dev_lock(hdev);
2679 
2680 	hci_link_keys_clear(hdev);
2681 
2682 	if (cp->debug_keys)
2683 		changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
2684 					    &hdev->dev_flags);
2685 	else
2686 		changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
2687 					     &hdev->dev_flags);
2688 
2689 	if (changed)
2690 		new_settings(hdev, NULL);
2691 
2692 	for (i = 0; i < key_count; i++) {
2693 		struct mgmt_link_key_info *key = &cp->keys[i];
2694 
2695 		/* Always ignore debug keys and require a new pairing if
2696 		 * the user wants to use them.
2697 		 */
2698 		if (key->type == HCI_LK_DEBUG_COMBINATION)
2699 			continue;
2700 
2701 		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2702 				 key->type, key->pin_len, NULL);
2703 	}
2704 
2705 	cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2706 
2707 	hci_dev_unlock(hdev);
2708 
2709 	return 0;
2710 }
2711 
2712 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2713 			   u8 addr_type, struct sock *skip_sk)
2714 {
2715 	struct mgmt_ev_device_unpaired ev;
2716 
2717 	bacpy(&ev.addr.bdaddr, bdaddr);
2718 	ev.addr.type = addr_type;
2719 
2720 	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2721 			  skip_sk);
2722 }
2723 
2724 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2725 			 u16 len)
2726 {
2727 	struct mgmt_cp_unpair_device *cp = data;
2728 	struct mgmt_rp_unpair_device rp;
2729 	struct hci_cp_disconnect dc;
2730 	struct pending_cmd *cmd;
2731 	struct hci_conn *conn;
2732 	int err;
2733 
2734 	memset(&rp, 0, sizeof(rp));
2735 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2736 	rp.addr.type = cp->addr.type;
2737 
2738 	if (!bdaddr_type_is_valid(cp->addr.type))
2739 		return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2740 				    MGMT_STATUS_INVALID_PARAMS,
2741 				    &rp, sizeof(rp));
2742 
2743 	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2744 		return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2745 				    MGMT_STATUS_INVALID_PARAMS,
2746 				    &rp, sizeof(rp));
2747 
2748 	hci_dev_lock(hdev);
2749 
2750 	if (!hdev_is_powered(hdev)) {
2751 		err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2752 				   MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2753 		goto unlock;
2754 	}
2755 
2756 	if (cp->addr.type == BDADDR_BREDR) {
2757 		/* If disconnection is requested, then look up the
2758 		 * connection. If the remote device is connected, it
2759 		 * will be later used to terminate the link.
2760 		 *
2761 		 * Setting it to NULL explicitly will cause no
2762 		 * termination of the link.
2763 		 */
2764 		if (cp->disconnect)
2765 			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2766 						       &cp->addr.bdaddr);
2767 		else
2768 			conn = NULL;
2769 
2770 		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2771 	} else {
2772 		u8 addr_type;
2773 
2774 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2775 					       &cp->addr.bdaddr);
2776 		if (conn) {
2777 			/* Defer clearing up the connection parameters
2778 			 * until closing to give a chance of keeping
2779 			 * them if a repairing happens.
2780 			 */
2781 			set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2782 
2783 			/* If disconnection is not requested, then
2784 			 * clear the connection variable so that the
2785 			 * link is not terminated.
2786 			 */
2787 			if (!cp->disconnect)
2788 				conn = NULL;
2789 		}
2790 
2791 		if (cp->addr.type == BDADDR_LE_PUBLIC)
2792 			addr_type = ADDR_LE_DEV_PUBLIC;
2793 		else
2794 			addr_type = ADDR_LE_DEV_RANDOM;
2795 
2796 		hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2797 
2798 		err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2799 	}
2800 
2801 	if (err < 0) {
2802 		err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2803 				   MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2804 		goto unlock;
2805 	}
2806 
2807 	/* If the connection variable is set, then termination of the
2808 	 * link is requested.
2809 	 */
2810 	if (!conn) {
2811 		err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2812 				   &rp, sizeof(rp));
2813 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2814 		goto unlock;
2815 	}
2816 
2817 	cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2818 			       sizeof(*cp));
2819 	if (!cmd) {
2820 		err = -ENOMEM;
2821 		goto unlock;
2822 	}
2823 
2824 	cmd->cmd_complete = addr_cmd_complete;
2825 
2826 	dc.handle = cpu_to_le16(conn->handle);
2827 	dc.reason = 0x13; /* Remote User Terminated Connection */
2828 	err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2829 	if (err < 0)
2830 		mgmt_pending_remove(cmd);
2831 
2832 unlock:
2833 	hci_dev_unlock(hdev);
2834 	return err;
2835 }
2836 
2837 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2838 		      u16 len)
2839 {
2840 	struct mgmt_cp_disconnect *cp = data;
2841 	struct mgmt_rp_disconnect rp;
2842 	struct pending_cmd *cmd;
2843 	struct hci_conn *conn;
2844 	int err;
2845 
2846 	BT_DBG("");
2847 
2848 	memset(&rp, 0, sizeof(rp));
2849 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2850 	rp.addr.type = cp->addr.type;
2851 
2852 	if (!bdaddr_type_is_valid(cp->addr.type))
2853 		return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2854 				    MGMT_STATUS_INVALID_PARAMS,
2855 				    &rp, sizeof(rp));
2856 
2857 	hci_dev_lock(hdev);
2858 
2859 	if (!test_bit(HCI_UP, &hdev->flags)) {
2860 		err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2861 				   MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2862 		goto failed;
2863 	}
2864 
2865 	if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2866 		err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2867 				   MGMT_STATUS_BUSY, &rp, sizeof(rp));
2868 		goto failed;
2869 	}
2870 
2871 	if (cp->addr.type == BDADDR_BREDR)
2872 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2873 					       &cp->addr.bdaddr);
2874 	else
2875 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2876 
2877 	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2878 		err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2879 				   MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2880 		goto failed;
2881 	}
2882 
2883 	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2884 	if (!cmd) {
2885 		err = -ENOMEM;
2886 		goto failed;
2887 	}
2888 
2889 	cmd->cmd_complete = generic_cmd_complete;
2890 
2891 	err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2892 	if (err < 0)
2893 		mgmt_pending_remove(cmd);
2894 
2895 failed:
2896 	hci_dev_unlock(hdev);
2897 	return err;
2898 }
2899 
2900 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2901 {
2902 	switch (link_type) {
2903 	case LE_LINK:
2904 		switch (addr_type) {
2905 		case ADDR_LE_DEV_PUBLIC:
2906 			return BDADDR_LE_PUBLIC;
2907 
2908 		default:
2909 			/* Fallback to LE Random address type */
2910 			return BDADDR_LE_RANDOM;
2911 		}
2912 
2913 	default:
2914 		/* Fallback to BR/EDR type */
2915 		return BDADDR_BREDR;
2916 	}
2917 }
2918 
2919 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2920 			   u16 data_len)
2921 {
2922 	struct mgmt_rp_get_connections *rp;
2923 	struct hci_conn *c;
2924 	size_t rp_len;
2925 	int err;
2926 	u16 i;
2927 
2928 	BT_DBG("");
2929 
2930 	hci_dev_lock(hdev);
2931 
2932 	if (!hdev_is_powered(hdev)) {
2933 		err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2934 				 MGMT_STATUS_NOT_POWERED);
2935 		goto unlock;
2936 	}
2937 
2938 	i = 0;
2939 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2940 		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2941 			i++;
2942 	}
2943 
2944 	rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2945 	rp = kmalloc(rp_len, GFP_KERNEL);
2946 	if (!rp) {
2947 		err = -ENOMEM;
2948 		goto unlock;
2949 	}
2950 
2951 	i = 0;
2952 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2953 		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2954 			continue;
2955 		bacpy(&rp->addr[i].bdaddr, &c->dst);
2956 		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2957 		if (c->type == SCO_LINK || c->type == ESCO_LINK)
2958 			continue;
2959 		i++;
2960 	}
2961 
2962 	rp->conn_count = cpu_to_le16(i);
2963 
2964 	/* Recalculate length in case of filtered SCO connections, etc */
2965 	rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2966 
2967 	err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2968 			   rp_len);
2969 
2970 	kfree(rp);
2971 
2972 unlock:
2973 	hci_dev_unlock(hdev);
2974 	return err;
2975 }
2976 
2977 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2978 				   struct mgmt_cp_pin_code_neg_reply *cp)
2979 {
2980 	struct pending_cmd *cmd;
2981 	int err;
2982 
2983 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2984 			       sizeof(*cp));
2985 	if (!cmd)
2986 		return -ENOMEM;
2987 
2988 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2989 			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2990 	if (err < 0)
2991 		mgmt_pending_remove(cmd);
2992 
2993 	return err;
2994 }
2995 
2996 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2997 			  u16 len)
2998 {
2999 	struct hci_conn *conn;
3000 	struct mgmt_cp_pin_code_reply *cp = data;
3001 	struct hci_cp_pin_code_reply reply;
3002 	struct pending_cmd *cmd;
3003 	int err;
3004 
3005 	BT_DBG("");
3006 
3007 	hci_dev_lock(hdev);
3008 
3009 	if (!hdev_is_powered(hdev)) {
3010 		err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3011 				 MGMT_STATUS_NOT_POWERED);
3012 		goto failed;
3013 	}
3014 
3015 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3016 	if (!conn) {
3017 		err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3018 				 MGMT_STATUS_NOT_CONNECTED);
3019 		goto failed;
3020 	}
3021 
3022 	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3023 		struct mgmt_cp_pin_code_neg_reply ncp;
3024 
3025 		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3026 
3027 		BT_ERR("PIN code is not 16 bytes long");
3028 
3029 		err = send_pin_code_neg_reply(sk, hdev, &ncp);
3030 		if (err >= 0)
3031 			err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3032 					 MGMT_STATUS_INVALID_PARAMS);
3033 
3034 		goto failed;
3035 	}
3036 
3037 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3038 	if (!cmd) {
3039 		err = -ENOMEM;
3040 		goto failed;
3041 	}
3042 
3043 	cmd->cmd_complete = addr_cmd_complete;
3044 
3045 	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3046 	reply.pin_len = cp->pin_len;
3047 	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3048 
3049 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3050 	if (err < 0)
3051 		mgmt_pending_remove(cmd);
3052 
3053 failed:
3054 	hci_dev_unlock(hdev);
3055 	return err;
3056 }
3057 
3058 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3059 			     u16 len)
3060 {
3061 	struct mgmt_cp_set_io_capability *cp = data;
3062 
3063 	BT_DBG("");
3064 
3065 	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3066 		return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3067 				    MGMT_STATUS_INVALID_PARAMS, NULL, 0);
3068 
3069 	hci_dev_lock(hdev);
3070 
3071 	hdev->io_capability = cp->io_capability;
3072 
3073 	BT_DBG("%s IO capability set to 0x%02x", hdev->name,
3074 	       hdev->io_capability);
3075 
3076 	hci_dev_unlock(hdev);
3077 
3078 	return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
3079 			    0);
3080 }
3081 
3082 static struct pending_cmd *find_pairing(struct hci_conn *conn)
3083 {
3084 	struct hci_dev *hdev = conn->hdev;
3085 	struct pending_cmd *cmd;
3086 
3087 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3088 		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3089 			continue;
3090 
3091 		if (cmd->user_data != conn)
3092 			continue;
3093 
3094 		return cmd;
3095 	}
3096 
3097 	return NULL;
3098 }
3099 
3100 static void pairing_complete(struct pending_cmd *cmd, u8 status)
3101 {
3102 	struct mgmt_rp_pair_device rp;
3103 	struct hci_conn *conn = cmd->user_data;
3104 
3105 	bacpy(&rp.addr.bdaddr, &conn->dst);
3106 	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3107 
3108 	cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
3109 		     &rp, sizeof(rp));
3110 
3111 	/* So we don't get further callbacks for this connection */
3112 	conn->connect_cfm_cb = NULL;
3113 	conn->security_cfm_cb = NULL;
3114 	conn->disconn_cfm_cb = NULL;
3115 
3116 	hci_conn_drop(conn);
3117 	hci_conn_put(conn);
3118 
3119 	mgmt_pending_remove(cmd);
3120 
3121 	/* The device is paired so there is no need to remove
3122 	 * its connection parameters anymore.
3123 	 */
3124 	clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3125 }
3126 
3127 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3128 {
3129 	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3130 	struct pending_cmd *cmd;
3131 
3132 	cmd = find_pairing(conn);
3133 	if (cmd)
3134 		cmd->cmd_complete(cmd, status);
3135 }
3136 
3137 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3138 {
3139 	struct pending_cmd *cmd;
3140 
3141 	BT_DBG("status %u", status);
3142 
3143 	cmd = find_pairing(conn);
3144 	if (!cmd)
3145 		BT_DBG("Unable to find a pending command");
3146 	else
3147 		cmd->cmd_complete(cmd, mgmt_status(status));
3148 }
3149 
3150 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3151 {
3152 	struct pending_cmd *cmd;
3153 
3154 	BT_DBG("status %u", status);
3155 
3156 	if (!status)
3157 		return;
3158 
3159 	cmd = find_pairing(conn);
3160 	if (!cmd)
3161 		BT_DBG("Unable to find a pending command");
3162 	else
3163 		cmd->cmd_complete(cmd, mgmt_status(status));
3164 }
3165 
3166 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3167 		       u16 len)
3168 {
3169 	struct mgmt_cp_pair_device *cp = data;
3170 	struct mgmt_rp_pair_device rp;
3171 	struct pending_cmd *cmd;
3172 	u8 sec_level, auth_type;
3173 	struct hci_conn *conn;
3174 	int err;
3175 
3176 	BT_DBG("");
3177 
3178 	memset(&rp, 0, sizeof(rp));
3179 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3180 	rp.addr.type = cp->addr.type;
3181 
3182 	if (!bdaddr_type_is_valid(cp->addr.type))
3183 		return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3184 				    MGMT_STATUS_INVALID_PARAMS,
3185 				    &rp, sizeof(rp));
3186 
3187 	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3188 		return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3189 				    MGMT_STATUS_INVALID_PARAMS,
3190 				    &rp, sizeof(rp));
3191 
3192 	hci_dev_lock(hdev);
3193 
3194 	if (!hdev_is_powered(hdev)) {
3195 		err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3196 				   MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
3197 		goto unlock;
3198 	}
3199 
3200 	sec_level = BT_SECURITY_MEDIUM;
3201 	auth_type = HCI_AT_DEDICATED_BONDING;
3202 
3203 	if (cp->addr.type == BDADDR_BREDR) {
3204 		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3205 				       auth_type);
3206 	} else {
3207 		u8 addr_type;
3208 
3209 		/* Convert from L2CAP channel address type to HCI address type
3210 		 */
3211 		if (cp->addr.type == BDADDR_LE_PUBLIC)
3212 			addr_type = ADDR_LE_DEV_PUBLIC;
3213 		else
3214 			addr_type = ADDR_LE_DEV_RANDOM;
3215 
3216 		/* When pairing a new device, it is expected to remember
3217 		 * this device for future connections. Adding the connection
3218 		 * parameter information ahead of time allows tracking
3219 		 * of the slave preferred values and will speed up any
3220 		 * further connection establishment.
3221 		 *
3222 		 * If connection parameters already exist, then they
3223 		 * will be kept and this function does nothing.
3224 		 */
3225 		hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3226 
3227 		conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3228 				      sec_level, HCI_LE_CONN_TIMEOUT,
3229 				      HCI_ROLE_MASTER);
3230 	}
3231 
3232 	if (IS_ERR(conn)) {
3233 		int status;
3234 
3235 		if (PTR_ERR(conn) == -EBUSY)
3236 			status = MGMT_STATUS_BUSY;
3237 		else
3238 			status = MGMT_STATUS_CONNECT_FAILED;
3239 
3240 		err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3241 				   status, &rp,
3242 				   sizeof(rp));
3243 		goto unlock;
3244 	}
3245 
3246 	if (conn->connect_cfm_cb) {
3247 		hci_conn_drop(conn);
3248 		err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3249 				   MGMT_STATUS_BUSY, &rp, sizeof(rp));
3250 		goto unlock;
3251 	}
3252 
3253 	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3254 	if (!cmd) {
3255 		err = -ENOMEM;
3256 		hci_conn_drop(conn);
3257 		goto unlock;
3258 	}
3259 
3260 	cmd->cmd_complete = pairing_complete;
3261 
3262 	/* For LE, just connecting isn't a proof that the pairing finished */
3263 	if (cp->addr.type == BDADDR_BREDR) {
3264 		conn->connect_cfm_cb = pairing_complete_cb;
3265 		conn->security_cfm_cb = pairing_complete_cb;
3266 		conn->disconn_cfm_cb = pairing_complete_cb;
3267 	} else {
3268 		conn->connect_cfm_cb = le_pairing_complete_cb;
3269 		conn->security_cfm_cb = le_pairing_complete_cb;
3270 		conn->disconn_cfm_cb = le_pairing_complete_cb;
3271 	}
3272 
3273 	conn->io_capability = cp->io_cap;
3274 	cmd->user_data = hci_conn_get(conn);
3275 
3276 	if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3277 	    hci_conn_security(conn, sec_level, auth_type, true))
3278 		pairing_complete(cmd, 0);
3279 
3280 	err = 0;
3281 
3282 unlock:
3283 	hci_dev_unlock(hdev);
3284 	return err;
3285 }
3286 
3287 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3288 			      u16 len)
3289 {
3290 	struct mgmt_addr_info *addr = data;
3291 	struct pending_cmd *cmd;
3292 	struct hci_conn *conn;
3293 	int err;
3294 
3295 	BT_DBG("");
3296 
3297 	hci_dev_lock(hdev);
3298 
3299 	if (!hdev_is_powered(hdev)) {
3300 		err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3301 				 MGMT_STATUS_NOT_POWERED);
3302 		goto unlock;
3303 	}
3304 
3305 	cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3306 	if (!cmd) {
3307 		err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3308 				 MGMT_STATUS_INVALID_PARAMS);
3309 		goto unlock;
3310 	}
3311 
3312 	conn = cmd->user_data;
3313 
3314 	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3315 		err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3316 				 MGMT_STATUS_INVALID_PARAMS);
3317 		goto unlock;
3318 	}
3319 
3320 	pairing_complete(cmd, MGMT_STATUS_CANCELLED);
3321 
3322 	err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3323 			   addr, sizeof(*addr));
3324 unlock:
3325 	hci_dev_unlock(hdev);
3326 	return err;
3327 }
3328 
3329 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3330 			     struct mgmt_addr_info *addr, u16 mgmt_op,
3331 			     u16 hci_op, __le32 passkey)
3332 {
3333 	struct pending_cmd *cmd;
3334 	struct hci_conn *conn;
3335 	int err;
3336 
3337 	hci_dev_lock(hdev);
3338 
3339 	if (!hdev_is_powered(hdev)) {
3340 		err = cmd_complete(sk, hdev->id, mgmt_op,
3341 				   MGMT_STATUS_NOT_POWERED, addr,
3342 				   sizeof(*addr));
3343 		goto done;
3344 	}
3345 
3346 	if (addr->type == BDADDR_BREDR)
3347 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3348 	else
3349 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3350 
3351 	if (!conn) {
3352 		err = cmd_complete(sk, hdev->id, mgmt_op,
3353 				   MGMT_STATUS_NOT_CONNECTED, addr,
3354 				   sizeof(*addr));
3355 		goto done;
3356 	}
3357 
3358 	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3359 		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3360 		if (!err)
3361 			err = cmd_complete(sk, hdev->id, mgmt_op,
3362 					   MGMT_STATUS_SUCCESS, addr,
3363 					   sizeof(*addr));
3364 		else
3365 			err = cmd_complete(sk, hdev->id, mgmt_op,
3366 					   MGMT_STATUS_FAILED, addr,
3367 					   sizeof(*addr));
3368 
3369 		goto done;
3370 	}
3371 
3372 	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3373 	if (!cmd) {
3374 		err = -ENOMEM;
3375 		goto done;
3376 	}
3377 
3378 	cmd->cmd_complete = addr_cmd_complete;
3379 
3380 	/* Continue with pairing via HCI */
3381 	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3382 		struct hci_cp_user_passkey_reply cp;
3383 
3384 		bacpy(&cp.bdaddr, &addr->bdaddr);
3385 		cp.passkey = passkey;
3386 		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3387 	} else
3388 		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3389 				   &addr->bdaddr);
3390 
3391 	if (err < 0)
3392 		mgmt_pending_remove(cmd);
3393 
3394 done:
3395 	hci_dev_unlock(hdev);
3396 	return err;
3397 }
3398 
3399 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3400 			      void *data, u16 len)
3401 {
3402 	struct mgmt_cp_pin_code_neg_reply *cp = data;
3403 
3404 	BT_DBG("");
3405 
3406 	return user_pairing_resp(sk, hdev, &cp->addr,
3407 				MGMT_OP_PIN_CODE_NEG_REPLY,
3408 				HCI_OP_PIN_CODE_NEG_REPLY, 0);
3409 }
3410 
3411 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3412 			      u16 len)
3413 {
3414 	struct mgmt_cp_user_confirm_reply *cp = data;
3415 
3416 	BT_DBG("");
3417 
3418 	if (len != sizeof(*cp))
3419 		return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3420 				  MGMT_STATUS_INVALID_PARAMS);
3421 
3422 	return user_pairing_resp(sk, hdev, &cp->addr,
3423 				 MGMT_OP_USER_CONFIRM_REPLY,
3424 				 HCI_OP_USER_CONFIRM_REPLY, 0);
3425 }
3426 
3427 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3428 				  void *data, u16 len)
3429 {
3430 	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3431 
3432 	BT_DBG("");
3433 
3434 	return user_pairing_resp(sk, hdev, &cp->addr,
3435 				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3436 				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3437 }
3438 
3439 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3440 			      u16 len)
3441 {
3442 	struct mgmt_cp_user_passkey_reply *cp = data;
3443 
3444 	BT_DBG("");
3445 
3446 	return user_pairing_resp(sk, hdev, &cp->addr,
3447 				 MGMT_OP_USER_PASSKEY_REPLY,
3448 				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3449 }
3450 
3451 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3452 				  void *data, u16 len)
3453 {
3454 	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3455 
3456 	BT_DBG("");
3457 
3458 	return user_pairing_resp(sk, hdev, &cp->addr,
3459 				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3460 				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3461 }
3462 
3463 static void update_name(struct hci_request *req)
3464 {
3465 	struct hci_dev *hdev = req->hdev;
3466 	struct hci_cp_write_local_name cp;
3467 
3468 	memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3469 
3470 	hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3471 }
3472 
3473 static void set_name_complete(struct hci_dev *hdev, u8 status)
3474 {
3475 	struct mgmt_cp_set_local_name *cp;
3476 	struct pending_cmd *cmd;
3477 
3478 	BT_DBG("status 0x%02x", status);
3479 
3480 	hci_dev_lock(hdev);
3481 
3482 	cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3483 	if (!cmd)
3484 		goto unlock;
3485 
3486 	cp = cmd->param;
3487 
3488 	if (status)
3489 		cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3490 			   mgmt_status(status));
3491 	else
3492 		cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3493 			     cp, sizeof(*cp));
3494 
3495 	mgmt_pending_remove(cmd);
3496 
3497 unlock:
3498 	hci_dev_unlock(hdev);
3499 }
3500 
3501 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3502 			  u16 len)
3503 {
3504 	struct mgmt_cp_set_local_name *cp = data;
3505 	struct pending_cmd *cmd;
3506 	struct hci_request req;
3507 	int err;
3508 
3509 	BT_DBG("");
3510 
3511 	hci_dev_lock(hdev);
3512 
3513 	/* If the old values are the same as the new ones just return a
3514 	 * direct command complete event.
3515 	 */
3516 	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3517 	    !memcmp(hdev->short_name, cp->short_name,
3518 		    sizeof(hdev->short_name))) {
3519 		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3520 				   data, len);
3521 		goto failed;
3522 	}
3523 
3524 	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3525 
3526 	if (!hdev_is_powered(hdev)) {
3527 		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3528 
3529 		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3530 				   data, len);
3531 		if (err < 0)
3532 			goto failed;
3533 
3534 		err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3535 				 sk);
3536 
3537 		goto failed;
3538 	}
3539 
3540 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3541 	if (!cmd) {
3542 		err = -ENOMEM;
3543 		goto failed;
3544 	}
3545 
3546 	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3547 
3548 	hci_req_init(&req, hdev);
3549 
3550 	if (lmp_bredr_capable(hdev)) {
3551 		update_name(&req);
3552 		update_eir(&req);
3553 	}
3554 
3555 	/* The name is stored in the scan response data and so
3556 	 * no need to udpate the advertising data here.
3557 	 */
3558 	if (lmp_le_capable(hdev))
3559 		update_scan_rsp_data(&req);
3560 
3561 	err = hci_req_run(&req, set_name_complete);
3562 	if (err < 0)
3563 		mgmt_pending_remove(cmd);
3564 
3565 failed:
3566 	hci_dev_unlock(hdev);
3567 	return err;
3568 }
3569 
3570 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3571 			       void *data, u16 data_len)
3572 {
3573 	struct pending_cmd *cmd;
3574 	int err;
3575 
3576 	BT_DBG("%s", hdev->name);
3577 
3578 	hci_dev_lock(hdev);
3579 
3580 	if (!hdev_is_powered(hdev)) {
3581 		err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3582 				 MGMT_STATUS_NOT_POWERED);
3583 		goto unlock;
3584 	}
3585 
3586 	if (!lmp_ssp_capable(hdev)) {
3587 		err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3588 				 MGMT_STATUS_NOT_SUPPORTED);
3589 		goto unlock;
3590 	}
3591 
3592 	if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3593 		err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3594 				 MGMT_STATUS_BUSY);
3595 		goto unlock;
3596 	}
3597 
3598 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3599 	if (!cmd) {
3600 		err = -ENOMEM;
3601 		goto unlock;
3602 	}
3603 
3604 	if (bredr_sc_enabled(hdev))
3605 		err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3606 				   0, NULL);
3607 	else
3608 		err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3609 
3610 	if (err < 0)
3611 		mgmt_pending_remove(cmd);
3612 
3613 unlock:
3614 	hci_dev_unlock(hdev);
3615 	return err;
3616 }
3617 
3618 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3619 			       void *data, u16 len)
3620 {
3621 	int err;
3622 
3623 	BT_DBG("%s ", hdev->name);
3624 
3625 	hci_dev_lock(hdev);
3626 
3627 	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3628 		struct mgmt_cp_add_remote_oob_data *cp = data;
3629 		u8 status;
3630 
3631 		if (cp->addr.type != BDADDR_BREDR) {
3632 			err = cmd_complete(sk, hdev->id,
3633 					   MGMT_OP_ADD_REMOTE_OOB_DATA,
3634 					   MGMT_STATUS_INVALID_PARAMS,
3635 					   &cp->addr, sizeof(cp->addr));
3636 			goto unlock;
3637 		}
3638 
3639 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3640 					      cp->addr.type, cp->hash,
3641 					      cp->rand, NULL, NULL);
3642 		if (err < 0)
3643 			status = MGMT_STATUS_FAILED;
3644 		else
3645 			status = MGMT_STATUS_SUCCESS;
3646 
3647 		err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3648 				   status, &cp->addr, sizeof(cp->addr));
3649 	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3650 		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3651 		u8 *rand192, *hash192;
3652 		u8 status;
3653 
3654 		if (cp->addr.type != BDADDR_BREDR) {
3655 			err = cmd_complete(sk, hdev->id,
3656 					   MGMT_OP_ADD_REMOTE_OOB_DATA,
3657 					   MGMT_STATUS_INVALID_PARAMS,
3658 					   &cp->addr, sizeof(cp->addr));
3659 			goto unlock;
3660 		}
3661 
3662 		if (bdaddr_type_is_le(cp->addr.type)) {
3663 			rand192 = NULL;
3664 			hash192 = NULL;
3665 		} else {
3666 			rand192 = cp->rand192;
3667 			hash192 = cp->hash192;
3668 		}
3669 
3670 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3671 					      cp->addr.type, hash192, rand192,
3672 					      cp->hash256, cp->rand256);
3673 		if (err < 0)
3674 			status = MGMT_STATUS_FAILED;
3675 		else
3676 			status = MGMT_STATUS_SUCCESS;
3677 
3678 		err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3679 				   status, &cp->addr, sizeof(cp->addr));
3680 	} else {
3681 		BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3682 		err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3683 				 MGMT_STATUS_INVALID_PARAMS);
3684 	}
3685 
3686 unlock:
3687 	hci_dev_unlock(hdev);
3688 	return err;
3689 }
3690 
3691 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3692 				  void *data, u16 len)
3693 {
3694 	struct mgmt_cp_remove_remote_oob_data *cp = data;
3695 	u8 status;
3696 	int err;
3697 
3698 	BT_DBG("%s", hdev->name);
3699 
3700 	if (cp->addr.type != BDADDR_BREDR)
3701 		return cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3702 				    MGMT_STATUS_INVALID_PARAMS,
3703 				    &cp->addr, sizeof(cp->addr));
3704 
3705 	hci_dev_lock(hdev);
3706 
3707 	if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
3708 		hci_remote_oob_data_clear(hdev);
3709 		status = MGMT_STATUS_SUCCESS;
3710 		goto done;
3711 	}
3712 
3713 	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
3714 	if (err < 0)
3715 		status = MGMT_STATUS_INVALID_PARAMS;
3716 	else
3717 		status = MGMT_STATUS_SUCCESS;
3718 
3719 done:
3720 	err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3721 			   status, &cp->addr, sizeof(cp->addr));
3722 
3723 	hci_dev_unlock(hdev);
3724 	return err;
3725 }
3726 
3727 static bool trigger_discovery(struct hci_request *req, u8 *status)
3728 {
3729 	struct hci_dev *hdev = req->hdev;
3730 	struct hci_cp_le_set_scan_param param_cp;
3731 	struct hci_cp_le_set_scan_enable enable_cp;
3732 	struct hci_cp_inquiry inq_cp;
3733 	/* General inquiry access code (GIAC) */
3734 	u8 lap[3] = { 0x33, 0x8b, 0x9e };
3735 	u8 own_addr_type;
3736 	int err;
3737 
3738 	switch (hdev->discovery.type) {
3739 	case DISCOV_TYPE_BREDR:
3740 		*status = mgmt_bredr_support(hdev);
3741 		if (*status)
3742 			return false;
3743 
3744 		if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3745 			*status = MGMT_STATUS_BUSY;
3746 			return false;
3747 		}
3748 
3749 		hci_inquiry_cache_flush(hdev);
3750 
3751 		memset(&inq_cp, 0, sizeof(inq_cp));
3752 		memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3753 		inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3754 		hci_req_add(req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3755 		break;
3756 
3757 	case DISCOV_TYPE_LE:
3758 	case DISCOV_TYPE_INTERLEAVED:
3759 		*status = mgmt_le_support(hdev);
3760 		if (*status)
3761 			return false;
3762 
3763 		if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3764 		    !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3765 			*status = MGMT_STATUS_NOT_SUPPORTED;
3766 			return false;
3767 		}
3768 
3769 		if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) {
3770 			/* Don't let discovery abort an outgoing
3771 			 * connection attempt that's using directed
3772 			 * advertising.
3773 			 */
3774 			if (hci_conn_hash_lookup_state(hdev, LE_LINK,
3775 						       BT_CONNECT)) {
3776 				*status = MGMT_STATUS_REJECTED;
3777 				return false;
3778 			}
3779 
3780 			disable_advertising(req);
3781 		}
3782 
3783 		/* If controller is scanning, it means the background scanning
3784 		 * is running. Thus, we should temporarily stop it in order to
3785 		 * set the discovery scanning parameters.
3786 		 */
3787 		if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
3788 			hci_req_add_le_scan_disable(req);
3789 
3790 		memset(&param_cp, 0, sizeof(param_cp));
3791 
3792 		/* All active scans will be done with either a resolvable
3793 		 * private address (when privacy feature has been enabled)
3794 		 * or unresolvable private address.
3795 		 */
3796 		err = hci_update_random_address(req, true, &own_addr_type);
3797 		if (err < 0) {
3798 			*status = MGMT_STATUS_FAILED;
3799 			return false;
3800 		}
3801 
3802 		param_cp.type = LE_SCAN_ACTIVE;
3803 		param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3804 		param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3805 		param_cp.own_address_type = own_addr_type;
3806 		hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3807 			    &param_cp);
3808 
3809 		memset(&enable_cp, 0, sizeof(enable_cp));
3810 		enable_cp.enable = LE_SCAN_ENABLE;
3811 		enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3812 		hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3813 			    &enable_cp);
3814 		break;
3815 
3816 	default:
3817 		*status = MGMT_STATUS_INVALID_PARAMS;
3818 		return false;
3819 	}
3820 
3821 	return true;
3822 }
3823 
3824 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3825 {
3826 	struct pending_cmd *cmd;
3827 	unsigned long timeout;
3828 
3829 	BT_DBG("status %d", status);
3830 
3831 	hci_dev_lock(hdev);
3832 
3833 	cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3834 	if (!cmd)
3835 		cmd = mgmt_pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
3836 
3837 	if (cmd) {
3838 		cmd->cmd_complete(cmd, mgmt_status(status));
3839 		mgmt_pending_remove(cmd);
3840 	}
3841 
3842 	if (status) {
3843 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3844 		goto unlock;
3845 	}
3846 
3847 	hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3848 
3849 	switch (hdev->discovery.type) {
3850 	case DISCOV_TYPE_LE:
3851 		timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3852 		break;
3853 	case DISCOV_TYPE_INTERLEAVED:
3854 		timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3855 		break;
3856 	case DISCOV_TYPE_BREDR:
3857 		timeout = 0;
3858 		break;
3859 	default:
3860 		BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3861 		timeout = 0;
3862 		break;
3863 	}
3864 
3865 	if (timeout)
3866 		queue_delayed_work(hdev->workqueue,
3867 				   &hdev->le_scan_disable, timeout);
3868 
3869 unlock:
3870 	hci_dev_unlock(hdev);
3871 }
3872 
3873 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3874 			   void *data, u16 len)
3875 {
3876 	struct mgmt_cp_start_discovery *cp = data;
3877 	struct pending_cmd *cmd;
3878 	struct hci_request req;
3879 	u8 status;
3880 	int err;
3881 
3882 	BT_DBG("%s", hdev->name);
3883 
3884 	hci_dev_lock(hdev);
3885 
3886 	if (!hdev_is_powered(hdev)) {
3887 		err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3888 				   MGMT_STATUS_NOT_POWERED,
3889 				   &cp->type, sizeof(cp->type));
3890 		goto failed;
3891 	}
3892 
3893 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
3894 	    test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3895 		err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3896 				   MGMT_STATUS_BUSY, &cp->type,
3897 				   sizeof(cp->type));
3898 		goto failed;
3899 	}
3900 
3901 	cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, data, len);
3902 	if (!cmd) {
3903 		err = -ENOMEM;
3904 		goto failed;
3905 	}
3906 
3907 	cmd->cmd_complete = generic_cmd_complete;
3908 
3909 	/* Clear the discovery filter first to free any previously
3910 	 * allocated memory for the UUID list.
3911 	 */
3912 	hci_discovery_filter_clear(hdev);
3913 
3914 	hdev->discovery.type = cp->type;
3915 	hdev->discovery.report_invalid_rssi = false;
3916 
3917 	hci_req_init(&req, hdev);
3918 
3919 	if (!trigger_discovery(&req, &status)) {
3920 		err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3921 				   status, &cp->type, sizeof(cp->type));
3922 		mgmt_pending_remove(cmd);
3923 		goto failed;
3924 	}
3925 
3926 	err = hci_req_run(&req, start_discovery_complete);
3927 	if (err < 0) {
3928 		mgmt_pending_remove(cmd);
3929 		goto failed;
3930 	}
3931 
3932 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3933 
3934 failed:
3935 	hci_dev_unlock(hdev);
3936 	return err;
3937 }
3938 
3939 static void service_discovery_cmd_complete(struct pending_cmd *cmd, u8 status)
3940 {
3941 	cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, cmd->param, 1);
3942 }
3943 
3944 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
3945 				   void *data, u16 len)
3946 {
3947 	struct mgmt_cp_start_service_discovery *cp = data;
3948 	struct pending_cmd *cmd;
3949 	struct hci_request req;
3950 	const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
3951 	u16 uuid_count, expected_len;
3952 	u8 status;
3953 	int err;
3954 
3955 	BT_DBG("%s", hdev->name);
3956 
3957 	hci_dev_lock(hdev);
3958 
3959 	if (!hdev_is_powered(hdev)) {
3960 		err = cmd_complete(sk, hdev->id,
3961 				   MGMT_OP_START_SERVICE_DISCOVERY,
3962 				   MGMT_STATUS_NOT_POWERED,
3963 				   &cp->type, sizeof(cp->type));
3964 		goto failed;
3965 	}
3966 
3967 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
3968 	    test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3969 		err = cmd_complete(sk, hdev->id,
3970 				   MGMT_OP_START_SERVICE_DISCOVERY,
3971 				   MGMT_STATUS_BUSY, &cp->type,
3972 				   sizeof(cp->type));
3973 		goto failed;
3974 	}
3975 
3976 	uuid_count = __le16_to_cpu(cp->uuid_count);
3977 	if (uuid_count > max_uuid_count) {
3978 		BT_ERR("service_discovery: too big uuid_count value %u",
3979 		       uuid_count);
3980 		err = cmd_complete(sk, hdev->id,
3981 				   MGMT_OP_START_SERVICE_DISCOVERY,
3982 				   MGMT_STATUS_INVALID_PARAMS, &cp->type,
3983 				   sizeof(cp->type));
3984 		goto failed;
3985 	}
3986 
3987 	expected_len = sizeof(*cp) + uuid_count * 16;
3988 	if (expected_len != len) {
3989 		BT_ERR("service_discovery: expected %u bytes, got %u bytes",
3990 		       expected_len, len);
3991 		err = cmd_complete(sk, hdev->id,
3992 				   MGMT_OP_START_SERVICE_DISCOVERY,
3993 				   MGMT_STATUS_INVALID_PARAMS, &cp->type,
3994 				   sizeof(cp->type));
3995 		goto failed;
3996 	}
3997 
3998 	cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
3999 			       hdev, data, len);
4000 	if (!cmd) {
4001 		err = -ENOMEM;
4002 		goto failed;
4003 	}
4004 
4005 	cmd->cmd_complete = service_discovery_cmd_complete;
4006 
4007 	/* Clear the discovery filter first to free any previously
4008 	 * allocated memory for the UUID list.
4009 	 */
4010 	hci_discovery_filter_clear(hdev);
4011 
4012 	hdev->discovery.type = cp->type;
4013 	hdev->discovery.rssi = cp->rssi;
4014 	hdev->discovery.uuid_count = uuid_count;
4015 
4016 	if (uuid_count > 0) {
4017 		hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4018 						GFP_KERNEL);
4019 		if (!hdev->discovery.uuids) {
4020 			err = cmd_complete(sk, hdev->id,
4021 					   MGMT_OP_START_SERVICE_DISCOVERY,
4022 					   MGMT_STATUS_FAILED,
4023 					   &cp->type, sizeof(cp->type));
4024 			mgmt_pending_remove(cmd);
4025 			goto failed;
4026 		}
4027 	}
4028 
4029 	hci_req_init(&req, hdev);
4030 
4031 	if (!trigger_discovery(&req, &status)) {
4032 		err = cmd_complete(sk, hdev->id,
4033 				   MGMT_OP_START_SERVICE_DISCOVERY,
4034 				   status, &cp->type, sizeof(cp->type));
4035 		mgmt_pending_remove(cmd);
4036 		goto failed;
4037 	}
4038 
4039 	err = hci_req_run(&req, start_discovery_complete);
4040 	if (err < 0) {
4041 		mgmt_pending_remove(cmd);
4042 		goto failed;
4043 	}
4044 
4045 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4046 
4047 failed:
4048 	hci_dev_unlock(hdev);
4049 	return err;
4050 }
4051 
4052 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
4053 {
4054 	struct pending_cmd *cmd;
4055 
4056 	BT_DBG("status %d", status);
4057 
4058 	hci_dev_lock(hdev);
4059 
4060 	cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4061 	if (cmd) {
4062 		cmd->cmd_complete(cmd, mgmt_status(status));
4063 		mgmt_pending_remove(cmd);
4064 	}
4065 
4066 	if (!status)
4067 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4068 
4069 	hci_dev_unlock(hdev);
4070 }
4071 
4072 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4073 			  u16 len)
4074 {
4075 	struct mgmt_cp_stop_discovery *mgmt_cp = data;
4076 	struct pending_cmd *cmd;
4077 	struct hci_request req;
4078 	int err;
4079 
4080 	BT_DBG("%s", hdev->name);
4081 
4082 	hci_dev_lock(hdev);
4083 
4084 	if (!hci_discovery_active(hdev)) {
4085 		err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4086 				   MGMT_STATUS_REJECTED, &mgmt_cp->type,
4087 				   sizeof(mgmt_cp->type));
4088 		goto unlock;
4089 	}
4090 
4091 	if (hdev->discovery.type != mgmt_cp->type) {
4092 		err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4093 				   MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
4094 				   sizeof(mgmt_cp->type));
4095 		goto unlock;
4096 	}
4097 
4098 	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4099 	if (!cmd) {
4100 		err = -ENOMEM;
4101 		goto unlock;
4102 	}
4103 
4104 	cmd->cmd_complete = generic_cmd_complete;
4105 
4106 	hci_req_init(&req, hdev);
4107 
4108 	hci_stop_discovery(&req);
4109 
4110 	err = hci_req_run(&req, stop_discovery_complete);
4111 	if (!err) {
4112 		hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4113 		goto unlock;
4114 	}
4115 
4116 	mgmt_pending_remove(cmd);
4117 
4118 	/* If no HCI commands were sent we're done */
4119 	if (err == -ENODATA) {
4120 		err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
4121 				   &mgmt_cp->type, sizeof(mgmt_cp->type));
4122 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4123 	}
4124 
4125 unlock:
4126 	hci_dev_unlock(hdev);
4127 	return err;
4128 }
4129 
4130 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4131 			u16 len)
4132 {
4133 	struct mgmt_cp_confirm_name *cp = data;
4134 	struct inquiry_entry *e;
4135 	int err;
4136 
4137 	BT_DBG("%s", hdev->name);
4138 
4139 	hci_dev_lock(hdev);
4140 
4141 	if (!hci_discovery_active(hdev)) {
4142 		err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4143 				   MGMT_STATUS_FAILED, &cp->addr,
4144 				   sizeof(cp->addr));
4145 		goto failed;
4146 	}
4147 
4148 	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4149 	if (!e) {
4150 		err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4151 				   MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4152 				   sizeof(cp->addr));
4153 		goto failed;
4154 	}
4155 
4156 	if (cp->name_known) {
4157 		e->name_state = NAME_KNOWN;
4158 		list_del(&e->list);
4159 	} else {
4160 		e->name_state = NAME_NEEDED;
4161 		hci_inquiry_cache_update_resolve(hdev, e);
4162 	}
4163 
4164 	err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
4165 			   sizeof(cp->addr));
4166 
4167 failed:
4168 	hci_dev_unlock(hdev);
4169 	return err;
4170 }
4171 
4172 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4173 			u16 len)
4174 {
4175 	struct mgmt_cp_block_device *cp = data;
4176 	u8 status;
4177 	int err;
4178 
4179 	BT_DBG("%s", hdev->name);
4180 
4181 	if (!bdaddr_type_is_valid(cp->addr.type))
4182 		return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4183 				    MGMT_STATUS_INVALID_PARAMS,
4184 				    &cp->addr, sizeof(cp->addr));
4185 
4186 	hci_dev_lock(hdev);
4187 
4188 	err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4189 				  cp->addr.type);
4190 	if (err < 0) {
4191 		status = MGMT_STATUS_FAILED;
4192 		goto done;
4193 	}
4194 
4195 	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4196 		   sk);
4197 	status = MGMT_STATUS_SUCCESS;
4198 
4199 done:
4200 	err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4201 			   &cp->addr, sizeof(cp->addr));
4202 
4203 	hci_dev_unlock(hdev);
4204 
4205 	return err;
4206 }
4207 
4208 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4209 			  u16 len)
4210 {
4211 	struct mgmt_cp_unblock_device *cp = data;
4212 	u8 status;
4213 	int err;
4214 
4215 	BT_DBG("%s", hdev->name);
4216 
4217 	if (!bdaddr_type_is_valid(cp->addr.type))
4218 		return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4219 				    MGMT_STATUS_INVALID_PARAMS,
4220 				    &cp->addr, sizeof(cp->addr));
4221 
4222 	hci_dev_lock(hdev);
4223 
4224 	err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4225 				  cp->addr.type);
4226 	if (err < 0) {
4227 		status = MGMT_STATUS_INVALID_PARAMS;
4228 		goto done;
4229 	}
4230 
4231 	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4232 		   sk);
4233 	status = MGMT_STATUS_SUCCESS;
4234 
4235 done:
4236 	err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4237 			   &cp->addr, sizeof(cp->addr));
4238 
4239 	hci_dev_unlock(hdev);
4240 
4241 	return err;
4242 }
4243 
4244 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4245 			 u16 len)
4246 {
4247 	struct mgmt_cp_set_device_id *cp = data;
4248 	struct hci_request req;
4249 	int err;
4250 	__u16 source;
4251 
4252 	BT_DBG("%s", hdev->name);
4253 
4254 	source = __le16_to_cpu(cp->source);
4255 
4256 	if (source > 0x0002)
4257 		return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4258 				  MGMT_STATUS_INVALID_PARAMS);
4259 
4260 	hci_dev_lock(hdev);
4261 
4262 	hdev->devid_source = source;
4263 	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4264 	hdev->devid_product = __le16_to_cpu(cp->product);
4265 	hdev->devid_version = __le16_to_cpu(cp->version);
4266 
4267 	err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
4268 
4269 	hci_req_init(&req, hdev);
4270 	update_eir(&req);
4271 	hci_req_run(&req, NULL);
4272 
4273 	hci_dev_unlock(hdev);
4274 
4275 	return err;
4276 }
4277 
4278 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
4279 {
4280 	struct cmd_lookup match = { NULL, hdev };
4281 
4282 	if (status) {
4283 		u8 mgmt_err = mgmt_status(status);
4284 
4285 		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4286 				     cmd_status_rsp, &mgmt_err);
4287 		return;
4288 	}
4289 
4290 	if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
4291 		set_bit(HCI_ADVERTISING, &hdev->dev_flags);
4292 	else
4293 		clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
4294 
4295 	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4296 			     &match);
4297 
4298 	new_settings(hdev, match.sk);
4299 
4300 	if (match.sk)
4301 		sock_put(match.sk);
4302 }
4303 
4304 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4305 			   u16 len)
4306 {
4307 	struct mgmt_mode *cp = data;
4308 	struct pending_cmd *cmd;
4309 	struct hci_request req;
4310 	u8 val, enabled, status;
4311 	int err;
4312 
4313 	BT_DBG("request for %s", hdev->name);
4314 
4315 	status = mgmt_le_support(hdev);
4316 	if (status)
4317 		return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4318 				  status);
4319 
4320 	if (cp->val != 0x00 && cp->val != 0x01)
4321 		return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4322 				  MGMT_STATUS_INVALID_PARAMS);
4323 
4324 	hci_dev_lock(hdev);
4325 
4326 	val = !!cp->val;
4327 	enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
4328 
4329 	/* The following conditions are ones which mean that we should
4330 	 * not do any HCI communication but directly send a mgmt
4331 	 * response to user space (after toggling the flag if
4332 	 * necessary).
4333 	 */
4334 	if (!hdev_is_powered(hdev) || val == enabled ||
4335 	    hci_conn_num(hdev, LE_LINK) > 0 ||
4336 	    (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4337 	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4338 		bool changed = false;
4339 
4340 		if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
4341 			change_bit(HCI_ADVERTISING, &hdev->dev_flags);
4342 			changed = true;
4343 		}
4344 
4345 		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4346 		if (err < 0)
4347 			goto unlock;
4348 
4349 		if (changed)
4350 			err = new_settings(hdev, sk);
4351 
4352 		goto unlock;
4353 	}
4354 
4355 	if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4356 	    mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
4357 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4358 				 MGMT_STATUS_BUSY);
4359 		goto unlock;
4360 	}
4361 
4362 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4363 	if (!cmd) {
4364 		err = -ENOMEM;
4365 		goto unlock;
4366 	}
4367 
4368 	hci_req_init(&req, hdev);
4369 
4370 	if (val)
4371 		enable_advertising(&req);
4372 	else
4373 		disable_advertising(&req);
4374 
4375 	err = hci_req_run(&req, set_advertising_complete);
4376 	if (err < 0)
4377 		mgmt_pending_remove(cmd);
4378 
4379 unlock:
4380 	hci_dev_unlock(hdev);
4381 	return err;
4382 }
4383 
4384 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4385 			      void *data, u16 len)
4386 {
4387 	struct mgmt_cp_set_static_address *cp = data;
4388 	int err;
4389 
4390 	BT_DBG("%s", hdev->name);
4391 
4392 	if (!lmp_le_capable(hdev))
4393 		return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4394 				  MGMT_STATUS_NOT_SUPPORTED);
4395 
4396 	if (hdev_is_powered(hdev))
4397 		return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4398 				  MGMT_STATUS_REJECTED);
4399 
4400 	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4401 		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4402 			return cmd_status(sk, hdev->id,
4403 					  MGMT_OP_SET_STATIC_ADDRESS,
4404 					  MGMT_STATUS_INVALID_PARAMS);
4405 
4406 		/* Two most significant bits shall be set */
4407 		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4408 			return cmd_status(sk, hdev->id,
4409 					  MGMT_OP_SET_STATIC_ADDRESS,
4410 					  MGMT_STATUS_INVALID_PARAMS);
4411 	}
4412 
4413 	hci_dev_lock(hdev);
4414 
4415 	bacpy(&hdev->static_addr, &cp->bdaddr);
4416 
4417 	err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
4418 
4419 	hci_dev_unlock(hdev);
4420 
4421 	return err;
4422 }
4423 
4424 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4425 			   void *data, u16 len)
4426 {
4427 	struct mgmt_cp_set_scan_params *cp = data;
4428 	__u16 interval, window;
4429 	int err;
4430 
4431 	BT_DBG("%s", hdev->name);
4432 
4433 	if (!lmp_le_capable(hdev))
4434 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4435 				  MGMT_STATUS_NOT_SUPPORTED);
4436 
4437 	interval = __le16_to_cpu(cp->interval);
4438 
4439 	if (interval < 0x0004 || interval > 0x4000)
4440 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4441 				  MGMT_STATUS_INVALID_PARAMS);
4442 
4443 	window = __le16_to_cpu(cp->window);
4444 
4445 	if (window < 0x0004 || window > 0x4000)
4446 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4447 				  MGMT_STATUS_INVALID_PARAMS);
4448 
4449 	if (window > interval)
4450 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4451 				  MGMT_STATUS_INVALID_PARAMS);
4452 
4453 	hci_dev_lock(hdev);
4454 
4455 	hdev->le_scan_interval = interval;
4456 	hdev->le_scan_window = window;
4457 
4458 	err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
4459 
4460 	/* If background scan is running, restart it so new parameters are
4461 	 * loaded.
4462 	 */
4463 	if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4464 	    hdev->discovery.state == DISCOVERY_STOPPED) {
4465 		struct hci_request req;
4466 
4467 		hci_req_init(&req, hdev);
4468 
4469 		hci_req_add_le_scan_disable(&req);
4470 		hci_req_add_le_passive_scan(&req);
4471 
4472 		hci_req_run(&req, NULL);
4473 	}
4474 
4475 	hci_dev_unlock(hdev);
4476 
4477 	return err;
4478 }
4479 
4480 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
4481 {
4482 	struct pending_cmd *cmd;
4483 
4484 	BT_DBG("status 0x%02x", status);
4485 
4486 	hci_dev_lock(hdev);
4487 
4488 	cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4489 	if (!cmd)
4490 		goto unlock;
4491 
4492 	if (status) {
4493 		cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4494 			   mgmt_status(status));
4495 	} else {
4496 		struct mgmt_mode *cp = cmd->param;
4497 
4498 		if (cp->val)
4499 			set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4500 		else
4501 			clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4502 
4503 		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4504 		new_settings(hdev, cmd->sk);
4505 	}
4506 
4507 	mgmt_pending_remove(cmd);
4508 
4509 unlock:
4510 	hci_dev_unlock(hdev);
4511 }
4512 
4513 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4514 				void *data, u16 len)
4515 {
4516 	struct mgmt_mode *cp = data;
4517 	struct pending_cmd *cmd;
4518 	struct hci_request req;
4519 	int err;
4520 
4521 	BT_DBG("%s", hdev->name);
4522 
4523 	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
4524 	    hdev->hci_ver < BLUETOOTH_VER_1_2)
4525 		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4526 				  MGMT_STATUS_NOT_SUPPORTED);
4527 
4528 	if (cp->val != 0x00 && cp->val != 0x01)
4529 		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4530 				  MGMT_STATUS_INVALID_PARAMS);
4531 
4532 	if (!hdev_is_powered(hdev))
4533 		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4534 				  MGMT_STATUS_NOT_POWERED);
4535 
4536 	if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4537 		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4538 				  MGMT_STATUS_REJECTED);
4539 
4540 	hci_dev_lock(hdev);
4541 
4542 	if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4543 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4544 				 MGMT_STATUS_BUSY);
4545 		goto unlock;
4546 	}
4547 
4548 	if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
4549 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4550 					hdev);
4551 		goto unlock;
4552 	}
4553 
4554 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4555 			       data, len);
4556 	if (!cmd) {
4557 		err = -ENOMEM;
4558 		goto unlock;
4559 	}
4560 
4561 	hci_req_init(&req, hdev);
4562 
4563 	write_fast_connectable(&req, cp->val);
4564 
4565 	err = hci_req_run(&req, fast_connectable_complete);
4566 	if (err < 0) {
4567 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4568 				 MGMT_STATUS_FAILED);
4569 		mgmt_pending_remove(cmd);
4570 	}
4571 
4572 unlock:
4573 	hci_dev_unlock(hdev);
4574 
4575 	return err;
4576 }
4577 
4578 static void set_bredr_complete(struct hci_dev *hdev, u8 status)
4579 {
4580 	struct pending_cmd *cmd;
4581 
4582 	BT_DBG("status 0x%02x", status);
4583 
4584 	hci_dev_lock(hdev);
4585 
4586 	cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4587 	if (!cmd)
4588 		goto unlock;
4589 
4590 	if (status) {
4591 		u8 mgmt_err = mgmt_status(status);
4592 
4593 		/* We need to restore the flag if related HCI commands
4594 		 * failed.
4595 		 */
4596 		clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4597 
4598 		cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4599 	} else {
4600 		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4601 		new_settings(hdev, cmd->sk);
4602 	}
4603 
4604 	mgmt_pending_remove(cmd);
4605 
4606 unlock:
4607 	hci_dev_unlock(hdev);
4608 }
4609 
4610 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4611 {
4612 	struct mgmt_mode *cp = data;
4613 	struct pending_cmd *cmd;
4614 	struct hci_request req;
4615 	int err;
4616 
4617 	BT_DBG("request for %s", hdev->name);
4618 
4619 	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4620 		return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4621 				  MGMT_STATUS_NOT_SUPPORTED);
4622 
4623 	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4624 		return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4625 				  MGMT_STATUS_REJECTED);
4626 
4627 	if (cp->val != 0x00 && cp->val != 0x01)
4628 		return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4629 				  MGMT_STATUS_INVALID_PARAMS);
4630 
4631 	hci_dev_lock(hdev);
4632 
4633 	if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4634 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4635 		goto unlock;
4636 	}
4637 
4638 	if (!hdev_is_powered(hdev)) {
4639 		if (!cp->val) {
4640 			clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4641 			clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4642 			clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4643 			clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4644 			clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4645 		}
4646 
4647 		change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4648 
4649 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4650 		if (err < 0)
4651 			goto unlock;
4652 
4653 		err = new_settings(hdev, sk);
4654 		goto unlock;
4655 	}
4656 
4657 	/* Reject disabling when powered on */
4658 	if (!cp->val) {
4659 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4660 				 MGMT_STATUS_REJECTED);
4661 		goto unlock;
4662 	}
4663 
4664 	if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4665 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4666 				 MGMT_STATUS_BUSY);
4667 		goto unlock;
4668 	}
4669 
4670 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4671 	if (!cmd) {
4672 		err = -ENOMEM;
4673 		goto unlock;
4674 	}
4675 
4676 	/* We need to flip the bit already here so that update_adv_data
4677 	 * generates the correct flags.
4678 	 */
4679 	set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4680 
4681 	hci_req_init(&req, hdev);
4682 
4683 	write_fast_connectable(&req, false);
4684 	hci_update_page_scan(hdev, &req);
4685 
4686 	/* Since only the advertising data flags will change, there
4687 	 * is no need to update the scan response data.
4688 	 */
4689 	update_adv_data(&req);
4690 
4691 	err = hci_req_run(&req, set_bredr_complete);
4692 	if (err < 0)
4693 		mgmt_pending_remove(cmd);
4694 
4695 unlock:
4696 	hci_dev_unlock(hdev);
4697 	return err;
4698 }
4699 
4700 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4701 			   void *data, u16 len)
4702 {
4703 	struct mgmt_mode *cp = data;
4704 	struct pending_cmd *cmd;
4705 	u8 val;
4706 	int err;
4707 
4708 	BT_DBG("request for %s", hdev->name);
4709 
4710 	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
4711 	    !lmp_sc_capable(hdev) && !test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
4712 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4713 				  MGMT_STATUS_NOT_SUPPORTED);
4714 
4715 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4716 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4717 				  MGMT_STATUS_INVALID_PARAMS);
4718 
4719 	hci_dev_lock(hdev);
4720 
4721 	if (!hdev_is_powered(hdev) ||
4722 	    (!lmp_sc_capable(hdev) &&
4723 	     !test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) ||
4724 	    !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4725 		bool changed;
4726 
4727 		if (cp->val) {
4728 			changed = !test_and_set_bit(HCI_SC_ENABLED,
4729 						    &hdev->dev_flags);
4730 			if (cp->val == 0x02)
4731 				set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4732 			else
4733 				clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4734 		} else {
4735 			changed = test_and_clear_bit(HCI_SC_ENABLED,
4736 						     &hdev->dev_flags);
4737 			clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4738 		}
4739 
4740 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4741 		if (err < 0)
4742 			goto failed;
4743 
4744 		if (changed)
4745 			err = new_settings(hdev, sk);
4746 
4747 		goto failed;
4748 	}
4749 
4750 	if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4751 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4752 				 MGMT_STATUS_BUSY);
4753 		goto failed;
4754 	}
4755 
4756 	val = !!cp->val;
4757 
4758 	if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4759 	    (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4760 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4761 		goto failed;
4762 	}
4763 
4764 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4765 	if (!cmd) {
4766 		err = -ENOMEM;
4767 		goto failed;
4768 	}
4769 
4770 	err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4771 	if (err < 0) {
4772 		mgmt_pending_remove(cmd);
4773 		goto failed;
4774 	}
4775 
4776 	if (cp->val == 0x02)
4777 		set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4778 	else
4779 		clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4780 
4781 failed:
4782 	hci_dev_unlock(hdev);
4783 	return err;
4784 }
4785 
4786 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4787 			  void *data, u16 len)
4788 {
4789 	struct mgmt_mode *cp = data;
4790 	bool changed, use_changed;
4791 	int err;
4792 
4793 	BT_DBG("request for %s", hdev->name);
4794 
4795 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4796 		return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4797 				  MGMT_STATUS_INVALID_PARAMS);
4798 
4799 	hci_dev_lock(hdev);
4800 
4801 	if (cp->val)
4802 		changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
4803 					    &hdev->dev_flags);
4804 	else
4805 		changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
4806 					     &hdev->dev_flags);
4807 
4808 	if (cp->val == 0x02)
4809 		use_changed = !test_and_set_bit(HCI_USE_DEBUG_KEYS,
4810 						&hdev->dev_flags);
4811 	else
4812 		use_changed = test_and_clear_bit(HCI_USE_DEBUG_KEYS,
4813 						 &hdev->dev_flags);
4814 
4815 	if (hdev_is_powered(hdev) && use_changed &&
4816 	    test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
4817 		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4818 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4819 			     sizeof(mode), &mode);
4820 	}
4821 
4822 	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4823 	if (err < 0)
4824 		goto unlock;
4825 
4826 	if (changed)
4827 		err = new_settings(hdev, sk);
4828 
4829 unlock:
4830 	hci_dev_unlock(hdev);
4831 	return err;
4832 }
4833 
4834 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4835 		       u16 len)
4836 {
4837 	struct mgmt_cp_set_privacy *cp = cp_data;
4838 	bool changed;
4839 	int err;
4840 
4841 	BT_DBG("request for %s", hdev->name);
4842 
4843 	if (!lmp_le_capable(hdev))
4844 		return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4845 				  MGMT_STATUS_NOT_SUPPORTED);
4846 
4847 	if (cp->privacy != 0x00 && cp->privacy != 0x01)
4848 		return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4849 				  MGMT_STATUS_INVALID_PARAMS);
4850 
4851 	if (hdev_is_powered(hdev))
4852 		return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4853 				  MGMT_STATUS_REJECTED);
4854 
4855 	hci_dev_lock(hdev);
4856 
4857 	/* If user space supports this command it is also expected to
4858 	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4859 	 */
4860 	set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4861 
4862 	if (cp->privacy) {
4863 		changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
4864 		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4865 		set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4866 	} else {
4867 		changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
4868 		memset(hdev->irk, 0, sizeof(hdev->irk));
4869 		clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4870 	}
4871 
4872 	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4873 	if (err < 0)
4874 		goto unlock;
4875 
4876 	if (changed)
4877 		err = new_settings(hdev, sk);
4878 
4879 unlock:
4880 	hci_dev_unlock(hdev);
4881 	return err;
4882 }
4883 
4884 static bool irk_is_valid(struct mgmt_irk_info *irk)
4885 {
4886 	switch (irk->addr.type) {
4887 	case BDADDR_LE_PUBLIC:
4888 		return true;
4889 
4890 	case BDADDR_LE_RANDOM:
4891 		/* Two most significant bits shall be set */
4892 		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4893 			return false;
4894 		return true;
4895 	}
4896 
4897 	return false;
4898 }
4899 
4900 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4901 		     u16 len)
4902 {
4903 	struct mgmt_cp_load_irks *cp = cp_data;
4904 	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
4905 				   sizeof(struct mgmt_irk_info));
4906 	u16 irk_count, expected_len;
4907 	int i, err;
4908 
4909 	BT_DBG("request for %s", hdev->name);
4910 
4911 	if (!lmp_le_capable(hdev))
4912 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4913 				  MGMT_STATUS_NOT_SUPPORTED);
4914 
4915 	irk_count = __le16_to_cpu(cp->irk_count);
4916 	if (irk_count > max_irk_count) {
4917 		BT_ERR("load_irks: too big irk_count value %u", irk_count);
4918 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4919 				  MGMT_STATUS_INVALID_PARAMS);
4920 	}
4921 
4922 	expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4923 	if (expected_len != len) {
4924 		BT_ERR("load_irks: expected %u bytes, got %u bytes",
4925 		       expected_len, len);
4926 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4927 				  MGMT_STATUS_INVALID_PARAMS);
4928 	}
4929 
4930 	BT_DBG("%s irk_count %u", hdev->name, irk_count);
4931 
4932 	for (i = 0; i < irk_count; i++) {
4933 		struct mgmt_irk_info *key = &cp->irks[i];
4934 
4935 		if (!irk_is_valid(key))
4936 			return cmd_status(sk, hdev->id,
4937 					  MGMT_OP_LOAD_IRKS,
4938 					  MGMT_STATUS_INVALID_PARAMS);
4939 	}
4940 
4941 	hci_dev_lock(hdev);
4942 
4943 	hci_smp_irks_clear(hdev);
4944 
4945 	for (i = 0; i < irk_count; i++) {
4946 		struct mgmt_irk_info *irk = &cp->irks[i];
4947 		u8 addr_type;
4948 
4949 		if (irk->addr.type == BDADDR_LE_PUBLIC)
4950 			addr_type = ADDR_LE_DEV_PUBLIC;
4951 		else
4952 			addr_type = ADDR_LE_DEV_RANDOM;
4953 
4954 		hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
4955 			    BDADDR_ANY);
4956 	}
4957 
4958 	set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4959 
4960 	err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4961 
4962 	hci_dev_unlock(hdev);
4963 
4964 	return err;
4965 }
4966 
4967 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4968 {
4969 	if (key->master != 0x00 && key->master != 0x01)
4970 		return false;
4971 
4972 	switch (key->addr.type) {
4973 	case BDADDR_LE_PUBLIC:
4974 		return true;
4975 
4976 	case BDADDR_LE_RANDOM:
4977 		/* Two most significant bits shall be set */
4978 		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4979 			return false;
4980 		return true;
4981 	}
4982 
4983 	return false;
4984 }
4985 
4986 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4987 			       void *cp_data, u16 len)
4988 {
4989 	struct mgmt_cp_load_long_term_keys *cp = cp_data;
4990 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
4991 				   sizeof(struct mgmt_ltk_info));
4992 	u16 key_count, expected_len;
4993 	int i, err;
4994 
4995 	BT_DBG("request for %s", hdev->name);
4996 
4997 	if (!lmp_le_capable(hdev))
4998 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4999 				  MGMT_STATUS_NOT_SUPPORTED);
5000 
5001 	key_count = __le16_to_cpu(cp->key_count);
5002 	if (key_count > max_key_count) {
5003 		BT_ERR("load_ltks: too big key_count value %u", key_count);
5004 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5005 				  MGMT_STATUS_INVALID_PARAMS);
5006 	}
5007 
5008 	expected_len = sizeof(*cp) + key_count *
5009 					sizeof(struct mgmt_ltk_info);
5010 	if (expected_len != len) {
5011 		BT_ERR("load_keys: expected %u bytes, got %u bytes",
5012 		       expected_len, len);
5013 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5014 				  MGMT_STATUS_INVALID_PARAMS);
5015 	}
5016 
5017 	BT_DBG("%s key_count %u", hdev->name, key_count);
5018 
5019 	for (i = 0; i < key_count; i++) {
5020 		struct mgmt_ltk_info *key = &cp->keys[i];
5021 
5022 		if (!ltk_is_valid(key))
5023 			return cmd_status(sk, hdev->id,
5024 					  MGMT_OP_LOAD_LONG_TERM_KEYS,
5025 					  MGMT_STATUS_INVALID_PARAMS);
5026 	}
5027 
5028 	hci_dev_lock(hdev);
5029 
5030 	hci_smp_ltks_clear(hdev);
5031 
5032 	for (i = 0; i < key_count; i++) {
5033 		struct mgmt_ltk_info *key = &cp->keys[i];
5034 		u8 type, addr_type, authenticated;
5035 
5036 		if (key->addr.type == BDADDR_LE_PUBLIC)
5037 			addr_type = ADDR_LE_DEV_PUBLIC;
5038 		else
5039 			addr_type = ADDR_LE_DEV_RANDOM;
5040 
5041 		switch (key->type) {
5042 		case MGMT_LTK_UNAUTHENTICATED:
5043 			authenticated = 0x00;
5044 			type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5045 			break;
5046 		case MGMT_LTK_AUTHENTICATED:
5047 			authenticated = 0x01;
5048 			type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5049 			break;
5050 		case MGMT_LTK_P256_UNAUTH:
5051 			authenticated = 0x00;
5052 			type = SMP_LTK_P256;
5053 			break;
5054 		case MGMT_LTK_P256_AUTH:
5055 			authenticated = 0x01;
5056 			type = SMP_LTK_P256;
5057 			break;
5058 		case MGMT_LTK_P256_DEBUG:
5059 			authenticated = 0x00;
5060 			type = SMP_LTK_P256_DEBUG;
5061 		default:
5062 			continue;
5063 		}
5064 
5065 		hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
5066 			    authenticated, key->val, key->enc_size, key->ediv,
5067 			    key->rand);
5068 	}
5069 
5070 	err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
5071 			   NULL, 0);
5072 
5073 	hci_dev_unlock(hdev);
5074 
5075 	return err;
5076 }
5077 
5078 static void conn_info_cmd_complete(struct pending_cmd *cmd, u8 status)
5079 {
5080 	struct hci_conn *conn = cmd->user_data;
5081 	struct mgmt_rp_get_conn_info rp;
5082 
5083 	memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5084 
5085 	if (status == MGMT_STATUS_SUCCESS) {
5086 		rp.rssi = conn->rssi;
5087 		rp.tx_power = conn->tx_power;
5088 		rp.max_tx_power = conn->max_tx_power;
5089 	} else {
5090 		rp.rssi = HCI_RSSI_INVALID;
5091 		rp.tx_power = HCI_TX_POWER_INVALID;
5092 		rp.max_tx_power = HCI_TX_POWER_INVALID;
5093 	}
5094 
5095 	cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
5096 		     &rp, sizeof(rp));
5097 
5098 	hci_conn_drop(conn);
5099 	hci_conn_put(conn);
5100 }
5101 
5102 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status)
5103 {
5104 	struct hci_cp_read_rssi *cp;
5105 	struct pending_cmd *cmd;
5106 	struct hci_conn *conn;
5107 	u16 handle;
5108 	u8 status;
5109 
5110 	BT_DBG("status 0x%02x", hci_status);
5111 
5112 	hci_dev_lock(hdev);
5113 
5114 	/* Commands sent in request are either Read RSSI or Read Transmit Power
5115 	 * Level so we check which one was last sent to retrieve connection
5116 	 * handle.  Both commands have handle as first parameter so it's safe to
5117 	 * cast data on the same command struct.
5118 	 *
5119 	 * First command sent is always Read RSSI and we fail only if it fails.
5120 	 * In other case we simply override error to indicate success as we
5121 	 * already remembered if TX power value is actually valid.
5122 	 */
5123 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
5124 	if (!cp) {
5125 		cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
5126 		status = MGMT_STATUS_SUCCESS;
5127 	} else {
5128 		status = mgmt_status(hci_status);
5129 	}
5130 
5131 	if (!cp) {
5132 		BT_ERR("invalid sent_cmd in conn_info response");
5133 		goto unlock;
5134 	}
5135 
5136 	handle = __le16_to_cpu(cp->handle);
5137 	conn = hci_conn_hash_lookup_handle(hdev, handle);
5138 	if (!conn) {
5139 		BT_ERR("unknown handle (%d) in conn_info response", handle);
5140 		goto unlock;
5141 	}
5142 
5143 	cmd = mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
5144 	if (!cmd)
5145 		goto unlock;
5146 
5147 	cmd->cmd_complete(cmd, status);
5148 	mgmt_pending_remove(cmd);
5149 
5150 unlock:
5151 	hci_dev_unlock(hdev);
5152 }
5153 
5154 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
5155 			 u16 len)
5156 {
5157 	struct mgmt_cp_get_conn_info *cp = data;
5158 	struct mgmt_rp_get_conn_info rp;
5159 	struct hci_conn *conn;
5160 	unsigned long conn_info_age;
5161 	int err = 0;
5162 
5163 	BT_DBG("%s", hdev->name);
5164 
5165 	memset(&rp, 0, sizeof(rp));
5166 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5167 	rp.addr.type = cp->addr.type;
5168 
5169 	if (!bdaddr_type_is_valid(cp->addr.type))
5170 		return cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5171 				    MGMT_STATUS_INVALID_PARAMS,
5172 				    &rp, sizeof(rp));
5173 
5174 	hci_dev_lock(hdev);
5175 
5176 	if (!hdev_is_powered(hdev)) {
5177 		err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5178 				   MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5179 		goto unlock;
5180 	}
5181 
5182 	if (cp->addr.type == BDADDR_BREDR)
5183 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5184 					       &cp->addr.bdaddr);
5185 	else
5186 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
5187 
5188 	if (!conn || conn->state != BT_CONNECTED) {
5189 		err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5190 				   MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
5191 		goto unlock;
5192 	}
5193 
5194 	if (mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
5195 		err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5196 				   MGMT_STATUS_BUSY, &rp, sizeof(rp));
5197 		goto unlock;
5198 	}
5199 
5200 	/* To avoid client trying to guess when to poll again for information we
5201 	 * calculate conn info age as random value between min/max set in hdev.
5202 	 */
5203 	conn_info_age = hdev->conn_info_min_age +
5204 			prandom_u32_max(hdev->conn_info_max_age -
5205 					hdev->conn_info_min_age);
5206 
5207 	/* Query controller to refresh cached values if they are too old or were
5208 	 * never read.
5209 	 */
5210 	if (time_after(jiffies, conn->conn_info_timestamp +
5211 		       msecs_to_jiffies(conn_info_age)) ||
5212 	    !conn->conn_info_timestamp) {
5213 		struct hci_request req;
5214 		struct hci_cp_read_tx_power req_txp_cp;
5215 		struct hci_cp_read_rssi req_rssi_cp;
5216 		struct pending_cmd *cmd;
5217 
5218 		hci_req_init(&req, hdev);
5219 		req_rssi_cp.handle = cpu_to_le16(conn->handle);
5220 		hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
5221 			    &req_rssi_cp);
5222 
5223 		/* For LE links TX power does not change thus we don't need to
5224 		 * query for it once value is known.
5225 		 */
5226 		if (!bdaddr_type_is_le(cp->addr.type) ||
5227 		    conn->tx_power == HCI_TX_POWER_INVALID) {
5228 			req_txp_cp.handle = cpu_to_le16(conn->handle);
5229 			req_txp_cp.type = 0x00;
5230 			hci_req_add(&req, HCI_OP_READ_TX_POWER,
5231 				    sizeof(req_txp_cp), &req_txp_cp);
5232 		}
5233 
5234 		/* Max TX power needs to be read only once per connection */
5235 		if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5236 			req_txp_cp.handle = cpu_to_le16(conn->handle);
5237 			req_txp_cp.type = 0x01;
5238 			hci_req_add(&req, HCI_OP_READ_TX_POWER,
5239 				    sizeof(req_txp_cp), &req_txp_cp);
5240 		}
5241 
5242 		err = hci_req_run(&req, conn_info_refresh_complete);
5243 		if (err < 0)
5244 			goto unlock;
5245 
5246 		cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5247 				       data, len);
5248 		if (!cmd) {
5249 			err = -ENOMEM;
5250 			goto unlock;
5251 		}
5252 
5253 		hci_conn_hold(conn);
5254 		cmd->user_data = hci_conn_get(conn);
5255 		cmd->cmd_complete = conn_info_cmd_complete;
5256 
5257 		conn->conn_info_timestamp = jiffies;
5258 	} else {
5259 		/* Cache is valid, just reply with values cached in hci_conn */
5260 		rp.rssi = conn->rssi;
5261 		rp.tx_power = conn->tx_power;
5262 		rp.max_tx_power = conn->max_tx_power;
5263 
5264 		err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5265 				   MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5266 	}
5267 
5268 unlock:
5269 	hci_dev_unlock(hdev);
5270 	return err;
5271 }
5272 
5273 static void clock_info_cmd_complete(struct pending_cmd *cmd, u8 status)
5274 {
5275 	struct hci_conn *conn = cmd->user_data;
5276 	struct mgmt_rp_get_clock_info rp;
5277 	struct hci_dev *hdev;
5278 
5279 	memset(&rp, 0, sizeof(rp));
5280 	memcpy(&rp.addr, &cmd->param, sizeof(rp.addr));
5281 
5282 	if (status)
5283 		goto complete;
5284 
5285 	hdev = hci_dev_get(cmd->index);
5286 	if (hdev) {
5287 		rp.local_clock = cpu_to_le32(hdev->clock);
5288 		hci_dev_put(hdev);
5289 	}
5290 
5291 	if (conn) {
5292 		rp.piconet_clock = cpu_to_le32(conn->clock);
5293 		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5294 	}
5295 
5296 complete:
5297 	cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp, sizeof(rp));
5298 
5299 	if (conn) {
5300 		hci_conn_drop(conn);
5301 		hci_conn_put(conn);
5302 	}
5303 }
5304 
5305 static void get_clock_info_complete(struct hci_dev *hdev, u8 status)
5306 {
5307 	struct hci_cp_read_clock *hci_cp;
5308 	struct pending_cmd *cmd;
5309 	struct hci_conn *conn;
5310 
5311 	BT_DBG("%s status %u", hdev->name, status);
5312 
5313 	hci_dev_lock(hdev);
5314 
5315 	hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5316 	if (!hci_cp)
5317 		goto unlock;
5318 
5319 	if (hci_cp->which) {
5320 		u16 handle = __le16_to_cpu(hci_cp->handle);
5321 		conn = hci_conn_hash_lookup_handle(hdev, handle);
5322 	} else {
5323 		conn = NULL;
5324 	}
5325 
5326 	cmd = mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5327 	if (!cmd)
5328 		goto unlock;
5329 
5330 	cmd->cmd_complete(cmd, mgmt_status(status));
5331 	mgmt_pending_remove(cmd);
5332 
5333 unlock:
5334 	hci_dev_unlock(hdev);
5335 }
5336 
5337 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5338 			 u16 len)
5339 {
5340 	struct mgmt_cp_get_clock_info *cp = data;
5341 	struct mgmt_rp_get_clock_info rp;
5342 	struct hci_cp_read_clock hci_cp;
5343 	struct pending_cmd *cmd;
5344 	struct hci_request req;
5345 	struct hci_conn *conn;
5346 	int err;
5347 
5348 	BT_DBG("%s", hdev->name);
5349 
5350 	memset(&rp, 0, sizeof(rp));
5351 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5352 	rp.addr.type = cp->addr.type;
5353 
5354 	if (cp->addr.type != BDADDR_BREDR)
5355 		return cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5356 				    MGMT_STATUS_INVALID_PARAMS,
5357 				    &rp, sizeof(rp));
5358 
5359 	hci_dev_lock(hdev);
5360 
5361 	if (!hdev_is_powered(hdev)) {
5362 		err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5363 				   MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5364 		goto unlock;
5365 	}
5366 
5367 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5368 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5369 					       &cp->addr.bdaddr);
5370 		if (!conn || conn->state != BT_CONNECTED) {
5371 			err = cmd_complete(sk, hdev->id,
5372 					   MGMT_OP_GET_CLOCK_INFO,
5373 					   MGMT_STATUS_NOT_CONNECTED,
5374 					   &rp, sizeof(rp));
5375 			goto unlock;
5376 		}
5377 	} else {
5378 		conn = NULL;
5379 	}
5380 
5381 	cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5382 	if (!cmd) {
5383 		err = -ENOMEM;
5384 		goto unlock;
5385 	}
5386 
5387 	cmd->cmd_complete = clock_info_cmd_complete;
5388 
5389 	hci_req_init(&req, hdev);
5390 
5391 	memset(&hci_cp, 0, sizeof(hci_cp));
5392 	hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5393 
5394 	if (conn) {
5395 		hci_conn_hold(conn);
5396 		cmd->user_data = hci_conn_get(conn);
5397 
5398 		hci_cp.handle = cpu_to_le16(conn->handle);
5399 		hci_cp.which = 0x01; /* Piconet clock */
5400 		hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5401 	}
5402 
5403 	err = hci_req_run(&req, get_clock_info_complete);
5404 	if (err < 0)
5405 		mgmt_pending_remove(cmd);
5406 
5407 unlock:
5408 	hci_dev_unlock(hdev);
5409 	return err;
5410 }
5411 
5412 static void device_added(struct sock *sk, struct hci_dev *hdev,
5413 			 bdaddr_t *bdaddr, u8 type, u8 action)
5414 {
5415 	struct mgmt_ev_device_added ev;
5416 
5417 	bacpy(&ev.addr.bdaddr, bdaddr);
5418 	ev.addr.type = type;
5419 	ev.action = action;
5420 
5421 	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5422 }
5423 
5424 static int add_device(struct sock *sk, struct hci_dev *hdev,
5425 		      void *data, u16 len)
5426 {
5427 	struct mgmt_cp_add_device *cp = data;
5428 	u8 auto_conn, addr_type;
5429 	int err;
5430 
5431 	BT_DBG("%s", hdev->name);
5432 
5433 	if (!bdaddr_type_is_valid(cp->addr.type) ||
5434 	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5435 		return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5436 				    MGMT_STATUS_INVALID_PARAMS,
5437 				    &cp->addr, sizeof(cp->addr));
5438 
5439 	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
5440 		return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5441 				    MGMT_STATUS_INVALID_PARAMS,
5442 				    &cp->addr, sizeof(cp->addr));
5443 
5444 	hci_dev_lock(hdev);
5445 
5446 	if (cp->addr.type == BDADDR_BREDR) {
5447 		/* Only incoming connections action is supported for now */
5448 		if (cp->action != 0x01) {
5449 			err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5450 					   MGMT_STATUS_INVALID_PARAMS,
5451 					   &cp->addr, sizeof(cp->addr));
5452 			goto unlock;
5453 		}
5454 
5455 		err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
5456 					  cp->addr.type);
5457 		if (err)
5458 			goto unlock;
5459 
5460 		hci_update_page_scan(hdev, NULL);
5461 
5462 		goto added;
5463 	}
5464 
5465 	if (cp->addr.type == BDADDR_LE_PUBLIC)
5466 		addr_type = ADDR_LE_DEV_PUBLIC;
5467 	else
5468 		addr_type = ADDR_LE_DEV_RANDOM;
5469 
5470 	if (cp->action == 0x02)
5471 		auto_conn = HCI_AUTO_CONN_ALWAYS;
5472 	else if (cp->action == 0x01)
5473 		auto_conn = HCI_AUTO_CONN_DIRECT;
5474 	else
5475 		auto_conn = HCI_AUTO_CONN_REPORT;
5476 
5477 	/* If the connection parameters don't exist for this device,
5478 	 * they will be created and configured with defaults.
5479 	 */
5480 	if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
5481 				auto_conn) < 0) {
5482 		err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5483 				   MGMT_STATUS_FAILED,
5484 				   &cp->addr, sizeof(cp->addr));
5485 		goto unlock;
5486 	}
5487 
5488 added:
5489 	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5490 
5491 	err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5492 			   MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5493 
5494 unlock:
5495 	hci_dev_unlock(hdev);
5496 	return err;
5497 }
5498 
5499 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5500 			   bdaddr_t *bdaddr, u8 type)
5501 {
5502 	struct mgmt_ev_device_removed ev;
5503 
5504 	bacpy(&ev.addr.bdaddr, bdaddr);
5505 	ev.addr.type = type;
5506 
5507 	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5508 }
5509 
5510 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5511 			 void *data, u16 len)
5512 {
5513 	struct mgmt_cp_remove_device *cp = data;
5514 	int err;
5515 
5516 	BT_DBG("%s", hdev->name);
5517 
5518 	hci_dev_lock(hdev);
5519 
5520 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5521 		struct hci_conn_params *params;
5522 		u8 addr_type;
5523 
5524 		if (!bdaddr_type_is_valid(cp->addr.type)) {
5525 			err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5526 					   MGMT_STATUS_INVALID_PARAMS,
5527 					   &cp->addr, sizeof(cp->addr));
5528 			goto unlock;
5529 		}
5530 
5531 		if (cp->addr.type == BDADDR_BREDR) {
5532 			err = hci_bdaddr_list_del(&hdev->whitelist,
5533 						  &cp->addr.bdaddr,
5534 						  cp->addr.type);
5535 			if (err) {
5536 				err = cmd_complete(sk, hdev->id,
5537 						   MGMT_OP_REMOVE_DEVICE,
5538 						   MGMT_STATUS_INVALID_PARAMS,
5539 						   &cp->addr, sizeof(cp->addr));
5540 				goto unlock;
5541 			}
5542 
5543 			hci_update_page_scan(hdev, NULL);
5544 
5545 			device_removed(sk, hdev, &cp->addr.bdaddr,
5546 				       cp->addr.type);
5547 			goto complete;
5548 		}
5549 
5550 		if (cp->addr.type == BDADDR_LE_PUBLIC)
5551 			addr_type = ADDR_LE_DEV_PUBLIC;
5552 		else
5553 			addr_type = ADDR_LE_DEV_RANDOM;
5554 
5555 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5556 						addr_type);
5557 		if (!params) {
5558 			err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5559 					   MGMT_STATUS_INVALID_PARAMS,
5560 					   &cp->addr, sizeof(cp->addr));
5561 			goto unlock;
5562 		}
5563 
5564 		if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
5565 			err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5566 					   MGMT_STATUS_INVALID_PARAMS,
5567 					   &cp->addr, sizeof(cp->addr));
5568 			goto unlock;
5569 		}
5570 
5571 		list_del(&params->action);
5572 		list_del(&params->list);
5573 		kfree(params);
5574 		hci_update_background_scan(hdev);
5575 
5576 		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5577 	} else {
5578 		struct hci_conn_params *p, *tmp;
5579 		struct bdaddr_list *b, *btmp;
5580 
5581 		if (cp->addr.type) {
5582 			err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5583 					   MGMT_STATUS_INVALID_PARAMS,
5584 					   &cp->addr, sizeof(cp->addr));
5585 			goto unlock;
5586 		}
5587 
5588 		list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
5589 			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
5590 			list_del(&b->list);
5591 			kfree(b);
5592 		}
5593 
5594 		hci_update_page_scan(hdev, NULL);
5595 
5596 		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
5597 			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
5598 				continue;
5599 			device_removed(sk, hdev, &p->addr, p->addr_type);
5600 			list_del(&p->action);
5601 			list_del(&p->list);
5602 			kfree(p);
5603 		}
5604 
5605 		BT_DBG("All LE connection parameters were removed");
5606 
5607 		hci_update_background_scan(hdev);
5608 	}
5609 
5610 complete:
5611 	err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5612 			   MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5613 
5614 unlock:
5615 	hci_dev_unlock(hdev);
5616 	return err;
5617 }
5618 
5619 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5620 			   u16 len)
5621 {
5622 	struct mgmt_cp_load_conn_param *cp = data;
5623 	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
5624 				     sizeof(struct mgmt_conn_param));
5625 	u16 param_count, expected_len;
5626 	int i;
5627 
5628 	if (!lmp_le_capable(hdev))
5629 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5630 				  MGMT_STATUS_NOT_SUPPORTED);
5631 
5632 	param_count = __le16_to_cpu(cp->param_count);
5633 	if (param_count > max_param_count) {
5634 		BT_ERR("load_conn_param: too big param_count value %u",
5635 		       param_count);
5636 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5637 				  MGMT_STATUS_INVALID_PARAMS);
5638 	}
5639 
5640 	expected_len = sizeof(*cp) + param_count *
5641 					sizeof(struct mgmt_conn_param);
5642 	if (expected_len != len) {
5643 		BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
5644 		       expected_len, len);
5645 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5646 				  MGMT_STATUS_INVALID_PARAMS);
5647 	}
5648 
5649 	BT_DBG("%s param_count %u", hdev->name, param_count);
5650 
5651 	hci_dev_lock(hdev);
5652 
5653 	hci_conn_params_clear_disabled(hdev);
5654 
5655 	for (i = 0; i < param_count; i++) {
5656 		struct mgmt_conn_param *param = &cp->params[i];
5657 		struct hci_conn_params *hci_param;
5658 		u16 min, max, latency, timeout;
5659 		u8 addr_type;
5660 
5661 		BT_DBG("Adding %pMR (type %u)", &param->addr.bdaddr,
5662 		       param->addr.type);
5663 
5664 		if (param->addr.type == BDADDR_LE_PUBLIC) {
5665 			addr_type = ADDR_LE_DEV_PUBLIC;
5666 		} else if (param->addr.type == BDADDR_LE_RANDOM) {
5667 			addr_type = ADDR_LE_DEV_RANDOM;
5668 		} else {
5669 			BT_ERR("Ignoring invalid connection parameters");
5670 			continue;
5671 		}
5672 
5673 		min = le16_to_cpu(param->min_interval);
5674 		max = le16_to_cpu(param->max_interval);
5675 		latency = le16_to_cpu(param->latency);
5676 		timeout = le16_to_cpu(param->timeout);
5677 
5678 		BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5679 		       min, max, latency, timeout);
5680 
5681 		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
5682 			BT_ERR("Ignoring invalid connection parameters");
5683 			continue;
5684 		}
5685 
5686 		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
5687 						addr_type);
5688 		if (!hci_param) {
5689 			BT_ERR("Failed to add connection parameters");
5690 			continue;
5691 		}
5692 
5693 		hci_param->conn_min_interval = min;
5694 		hci_param->conn_max_interval = max;
5695 		hci_param->conn_latency = latency;
5696 		hci_param->supervision_timeout = timeout;
5697 	}
5698 
5699 	hci_dev_unlock(hdev);
5700 
5701 	return cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0, NULL, 0);
5702 }
5703 
5704 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
5705 			       void *data, u16 len)
5706 {
5707 	struct mgmt_cp_set_external_config *cp = data;
5708 	bool changed;
5709 	int err;
5710 
5711 	BT_DBG("%s", hdev->name);
5712 
5713 	if (hdev_is_powered(hdev))
5714 		return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5715 				  MGMT_STATUS_REJECTED);
5716 
5717 	if (cp->config != 0x00 && cp->config != 0x01)
5718 		return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5719 				    MGMT_STATUS_INVALID_PARAMS);
5720 
5721 	if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
5722 		return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5723 				  MGMT_STATUS_NOT_SUPPORTED);
5724 
5725 	hci_dev_lock(hdev);
5726 
5727 	if (cp->config)
5728 		changed = !test_and_set_bit(HCI_EXT_CONFIGURED,
5729 					    &hdev->dev_flags);
5730 	else
5731 		changed = test_and_clear_bit(HCI_EXT_CONFIGURED,
5732 					     &hdev->dev_flags);
5733 
5734 	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
5735 	if (err < 0)
5736 		goto unlock;
5737 
5738 	if (!changed)
5739 		goto unlock;
5740 
5741 	err = new_options(hdev, sk);
5742 
5743 	if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) == is_configured(hdev)) {
5744 		mgmt_index_removed(hdev);
5745 
5746 		if (test_and_change_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5747 			set_bit(HCI_CONFIG, &hdev->dev_flags);
5748 			set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
5749 
5750 			queue_work(hdev->req_workqueue, &hdev->power_on);
5751 		} else {
5752 			set_bit(HCI_RAW, &hdev->flags);
5753 			mgmt_index_added(hdev);
5754 		}
5755 	}
5756 
5757 unlock:
5758 	hci_dev_unlock(hdev);
5759 	return err;
5760 }
5761 
5762 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
5763 			      void *data, u16 len)
5764 {
5765 	struct mgmt_cp_set_public_address *cp = data;
5766 	bool changed;
5767 	int err;
5768 
5769 	BT_DBG("%s", hdev->name);
5770 
5771 	if (hdev_is_powered(hdev))
5772 		return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5773 				  MGMT_STATUS_REJECTED);
5774 
5775 	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
5776 		return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5777 				  MGMT_STATUS_INVALID_PARAMS);
5778 
5779 	if (!hdev->set_bdaddr)
5780 		return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5781 				  MGMT_STATUS_NOT_SUPPORTED);
5782 
5783 	hci_dev_lock(hdev);
5784 
5785 	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
5786 	bacpy(&hdev->public_addr, &cp->bdaddr);
5787 
5788 	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
5789 	if (err < 0)
5790 		goto unlock;
5791 
5792 	if (!changed)
5793 		goto unlock;
5794 
5795 	if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5796 		err = new_options(hdev, sk);
5797 
5798 	if (is_configured(hdev)) {
5799 		mgmt_index_removed(hdev);
5800 
5801 		clear_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
5802 
5803 		set_bit(HCI_CONFIG, &hdev->dev_flags);
5804 		set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
5805 
5806 		queue_work(hdev->req_workqueue, &hdev->power_on);
5807 	}
5808 
5809 unlock:
5810 	hci_dev_unlock(hdev);
5811 	return err;
5812 }
5813 
5814 static const struct mgmt_handler {
5815 	int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
5816 		     u16 data_len);
5817 	bool var_len;
5818 	size_t data_len;
5819 } mgmt_handlers[] = {
5820 	{ NULL }, /* 0x0000 (no command) */
5821 	{ read_version,           false, MGMT_READ_VERSION_SIZE },
5822 	{ read_commands,          false, MGMT_READ_COMMANDS_SIZE },
5823 	{ read_index_list,        false, MGMT_READ_INDEX_LIST_SIZE },
5824 	{ read_controller_info,   false, MGMT_READ_INFO_SIZE },
5825 	{ set_powered,            false, MGMT_SETTING_SIZE },
5826 	{ set_discoverable,       false, MGMT_SET_DISCOVERABLE_SIZE },
5827 	{ set_connectable,        false, MGMT_SETTING_SIZE },
5828 	{ set_fast_connectable,   false, MGMT_SETTING_SIZE },
5829 	{ set_bondable,           false, MGMT_SETTING_SIZE },
5830 	{ set_link_security,      false, MGMT_SETTING_SIZE },
5831 	{ set_ssp,                false, MGMT_SETTING_SIZE },
5832 	{ set_hs,                 false, MGMT_SETTING_SIZE },
5833 	{ set_le,                 false, MGMT_SETTING_SIZE },
5834 	{ set_dev_class,          false, MGMT_SET_DEV_CLASS_SIZE },
5835 	{ set_local_name,         false, MGMT_SET_LOCAL_NAME_SIZE },
5836 	{ add_uuid,               false, MGMT_ADD_UUID_SIZE },
5837 	{ remove_uuid,            false, MGMT_REMOVE_UUID_SIZE },
5838 	{ load_link_keys,         true,  MGMT_LOAD_LINK_KEYS_SIZE },
5839 	{ load_long_term_keys,    true,  MGMT_LOAD_LONG_TERM_KEYS_SIZE },
5840 	{ disconnect,             false, MGMT_DISCONNECT_SIZE },
5841 	{ get_connections,        false, MGMT_GET_CONNECTIONS_SIZE },
5842 	{ pin_code_reply,         false, MGMT_PIN_CODE_REPLY_SIZE },
5843 	{ pin_code_neg_reply,     false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
5844 	{ set_io_capability,      false, MGMT_SET_IO_CAPABILITY_SIZE },
5845 	{ pair_device,            false, MGMT_PAIR_DEVICE_SIZE },
5846 	{ cancel_pair_device,     false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
5847 	{ unpair_device,          false, MGMT_UNPAIR_DEVICE_SIZE },
5848 	{ user_confirm_reply,     false, MGMT_USER_CONFIRM_REPLY_SIZE },
5849 	{ user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
5850 	{ user_passkey_reply,     false, MGMT_USER_PASSKEY_REPLY_SIZE },
5851 	{ user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
5852 	{ read_local_oob_data,    false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
5853 	{ add_remote_oob_data,    true,  MGMT_ADD_REMOTE_OOB_DATA_SIZE },
5854 	{ remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
5855 	{ start_discovery,        false, MGMT_START_DISCOVERY_SIZE },
5856 	{ stop_discovery,         false, MGMT_STOP_DISCOVERY_SIZE },
5857 	{ confirm_name,           false, MGMT_CONFIRM_NAME_SIZE },
5858 	{ block_device,           false, MGMT_BLOCK_DEVICE_SIZE },
5859 	{ unblock_device,         false, MGMT_UNBLOCK_DEVICE_SIZE },
5860 	{ set_device_id,          false, MGMT_SET_DEVICE_ID_SIZE },
5861 	{ set_advertising,        false, MGMT_SETTING_SIZE },
5862 	{ set_bredr,              false, MGMT_SETTING_SIZE },
5863 	{ set_static_address,     false, MGMT_SET_STATIC_ADDRESS_SIZE },
5864 	{ set_scan_params,        false, MGMT_SET_SCAN_PARAMS_SIZE },
5865 	{ set_secure_conn,        false, MGMT_SETTING_SIZE },
5866 	{ set_debug_keys,         false, MGMT_SETTING_SIZE },
5867 	{ set_privacy,            false, MGMT_SET_PRIVACY_SIZE },
5868 	{ load_irks,              true,  MGMT_LOAD_IRKS_SIZE },
5869 	{ get_conn_info,          false, MGMT_GET_CONN_INFO_SIZE },
5870 	{ get_clock_info,         false, MGMT_GET_CLOCK_INFO_SIZE },
5871 	{ add_device,             false, MGMT_ADD_DEVICE_SIZE },
5872 	{ remove_device,          false, MGMT_REMOVE_DEVICE_SIZE },
5873 	{ load_conn_param,        true,  MGMT_LOAD_CONN_PARAM_SIZE },
5874 	{ read_unconf_index_list, false, MGMT_READ_UNCONF_INDEX_LIST_SIZE },
5875 	{ read_config_info,       false, MGMT_READ_CONFIG_INFO_SIZE },
5876 	{ set_external_config,    false, MGMT_SET_EXTERNAL_CONFIG_SIZE },
5877 	{ set_public_address,     false, MGMT_SET_PUBLIC_ADDRESS_SIZE },
5878 	{ start_service_discovery,true,  MGMT_START_SERVICE_DISCOVERY_SIZE },
5879 };
5880 
5881 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
5882 {
5883 	void *buf;
5884 	u8 *cp;
5885 	struct mgmt_hdr *hdr;
5886 	u16 opcode, index, len;
5887 	struct hci_dev *hdev = NULL;
5888 	const struct mgmt_handler *handler;
5889 	int err;
5890 
5891 	BT_DBG("got %zu bytes", msglen);
5892 
5893 	if (msglen < sizeof(*hdr))
5894 		return -EINVAL;
5895 
5896 	buf = kmalloc(msglen, GFP_KERNEL);
5897 	if (!buf)
5898 		return -ENOMEM;
5899 
5900 	if (memcpy_from_msg(buf, msg, msglen)) {
5901 		err = -EFAULT;
5902 		goto done;
5903 	}
5904 
5905 	hdr = buf;
5906 	opcode = __le16_to_cpu(hdr->opcode);
5907 	index = __le16_to_cpu(hdr->index);
5908 	len = __le16_to_cpu(hdr->len);
5909 
5910 	if (len != msglen - sizeof(*hdr)) {
5911 		err = -EINVAL;
5912 		goto done;
5913 	}
5914 
5915 	if (index != MGMT_INDEX_NONE) {
5916 		hdev = hci_dev_get(index);
5917 		if (!hdev) {
5918 			err = cmd_status(sk, index, opcode,
5919 					 MGMT_STATUS_INVALID_INDEX);
5920 			goto done;
5921 		}
5922 
5923 		if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
5924 		    test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5925 		    test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5926 			err = cmd_status(sk, index, opcode,
5927 					 MGMT_STATUS_INVALID_INDEX);
5928 			goto done;
5929 		}
5930 
5931 		if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
5932 		    opcode != MGMT_OP_READ_CONFIG_INFO &&
5933 		    opcode != MGMT_OP_SET_EXTERNAL_CONFIG &&
5934 		    opcode != MGMT_OP_SET_PUBLIC_ADDRESS) {
5935 			err = cmd_status(sk, index, opcode,
5936 					 MGMT_STATUS_INVALID_INDEX);
5937 			goto done;
5938 		}
5939 	}
5940 
5941 	if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
5942 	    mgmt_handlers[opcode].func == NULL) {
5943 		BT_DBG("Unknown op %u", opcode);
5944 		err = cmd_status(sk, index, opcode,
5945 				 MGMT_STATUS_UNKNOWN_COMMAND);
5946 		goto done;
5947 	}
5948 
5949 	if (hdev && (opcode <= MGMT_OP_READ_INDEX_LIST ||
5950 		     opcode == MGMT_OP_READ_UNCONF_INDEX_LIST)) {
5951 		err = cmd_status(sk, index, opcode,
5952 				 MGMT_STATUS_INVALID_INDEX);
5953 		goto done;
5954 	}
5955 
5956 	if (!hdev && (opcode > MGMT_OP_READ_INDEX_LIST &&
5957 		      opcode != MGMT_OP_READ_UNCONF_INDEX_LIST)) {
5958 		err = cmd_status(sk, index, opcode,
5959 				 MGMT_STATUS_INVALID_INDEX);
5960 		goto done;
5961 	}
5962 
5963 	handler = &mgmt_handlers[opcode];
5964 
5965 	if ((handler->var_len && len < handler->data_len) ||
5966 	    (!handler->var_len && len != handler->data_len)) {
5967 		err = cmd_status(sk, index, opcode,
5968 				 MGMT_STATUS_INVALID_PARAMS);
5969 		goto done;
5970 	}
5971 
5972 	if (hdev)
5973 		mgmt_init_hdev(sk, hdev);
5974 
5975 	cp = buf + sizeof(*hdr);
5976 
5977 	err = handler->func(sk, hdev, cp, len);
5978 	if (err < 0)
5979 		goto done;
5980 
5981 	err = msglen;
5982 
5983 done:
5984 	if (hdev)
5985 		hci_dev_put(hdev);
5986 
5987 	kfree(buf);
5988 	return err;
5989 }
5990 
5991 void mgmt_index_added(struct hci_dev *hdev)
5992 {
5993 	if (hdev->dev_type != HCI_BREDR)
5994 		return;
5995 
5996 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
5997 		return;
5998 
5999 	if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
6000 		mgmt_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0, NULL);
6001 	else
6002 		mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
6003 }
6004 
6005 void mgmt_index_removed(struct hci_dev *hdev)
6006 {
6007 	u8 status = MGMT_STATUS_INVALID_INDEX;
6008 
6009 	if (hdev->dev_type != HCI_BREDR)
6010 		return;
6011 
6012 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6013 		return;
6014 
6015 	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6016 
6017 	if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
6018 		mgmt_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0, NULL);
6019 	else
6020 		mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
6021 }
6022 
6023 /* This function requires the caller holds hdev->lock */
6024 static void restart_le_actions(struct hci_dev *hdev)
6025 {
6026 	struct hci_conn_params *p;
6027 
6028 	list_for_each_entry(p, &hdev->le_conn_params, list) {
6029 		/* Needed for AUTO_OFF case where might not "really"
6030 		 * have been powered off.
6031 		 */
6032 		list_del_init(&p->action);
6033 
6034 		switch (p->auto_connect) {
6035 		case HCI_AUTO_CONN_DIRECT:
6036 		case HCI_AUTO_CONN_ALWAYS:
6037 			list_add(&p->action, &hdev->pend_le_conns);
6038 			break;
6039 		case HCI_AUTO_CONN_REPORT:
6040 			list_add(&p->action, &hdev->pend_le_reports);
6041 			break;
6042 		default:
6043 			break;
6044 		}
6045 	}
6046 
6047 	hci_update_background_scan(hdev);
6048 }
6049 
6050 static void powered_complete(struct hci_dev *hdev, u8 status)
6051 {
6052 	struct cmd_lookup match = { NULL, hdev };
6053 
6054 	BT_DBG("status 0x%02x", status);
6055 
6056 	hci_dev_lock(hdev);
6057 
6058 	restart_le_actions(hdev);
6059 
6060 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6061 
6062 	new_settings(hdev, match.sk);
6063 
6064 	hci_dev_unlock(hdev);
6065 
6066 	if (match.sk)
6067 		sock_put(match.sk);
6068 }
6069 
6070 static int powered_update_hci(struct hci_dev *hdev)
6071 {
6072 	struct hci_request req;
6073 	u8 link_sec;
6074 
6075 	hci_req_init(&req, hdev);
6076 
6077 	if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
6078 	    !lmp_host_ssp_capable(hdev)) {
6079 		u8 ssp = 1;
6080 
6081 		hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
6082 	}
6083 
6084 	if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
6085 	    lmp_bredr_capable(hdev)) {
6086 		struct hci_cp_write_le_host_supported cp;
6087 
6088 		cp.le = 0x01;
6089 		cp.simul = 0x00;
6090 
6091 		/* Check first if we already have the right
6092 		 * host state (host features set)
6093 		 */
6094 		if (cp.le != lmp_host_le_capable(hdev) ||
6095 		    cp.simul != lmp_host_le_br_capable(hdev))
6096 			hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
6097 				    sizeof(cp), &cp);
6098 	}
6099 
6100 	if (lmp_le_capable(hdev)) {
6101 		/* Make sure the controller has a good default for
6102 		 * advertising data. This also applies to the case
6103 		 * where BR/EDR was toggled during the AUTO_OFF phase.
6104 		 */
6105 		if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
6106 			update_adv_data(&req);
6107 			update_scan_rsp_data(&req);
6108 		}
6109 
6110 		if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
6111 			enable_advertising(&req);
6112 	}
6113 
6114 	link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
6115 	if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
6116 		hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
6117 			    sizeof(link_sec), &link_sec);
6118 
6119 	if (lmp_bredr_capable(hdev)) {
6120 		write_fast_connectable(&req, false);
6121 		hci_update_page_scan(hdev, &req);
6122 		update_class(&req);
6123 		update_name(&req);
6124 		update_eir(&req);
6125 	}
6126 
6127 	return hci_req_run(&req, powered_complete);
6128 }
6129 
6130 int mgmt_powered(struct hci_dev *hdev, u8 powered)
6131 {
6132 	struct cmd_lookup match = { NULL, hdev };
6133 	u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
6134 	u8 zero_cod[] = { 0, 0, 0 };
6135 	int err;
6136 
6137 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
6138 		return 0;
6139 
6140 	if (powered) {
6141 		if (powered_update_hci(hdev) == 0)
6142 			return 0;
6143 
6144 		mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
6145 				     &match);
6146 		goto new_settings;
6147 	}
6148 
6149 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6150 	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status_not_powered);
6151 
6152 	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
6153 		mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
6154 			   zero_cod, sizeof(zero_cod), NULL);
6155 
6156 new_settings:
6157 	err = new_settings(hdev, match.sk);
6158 
6159 	if (match.sk)
6160 		sock_put(match.sk);
6161 
6162 	return err;
6163 }
6164 
6165 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
6166 {
6167 	struct pending_cmd *cmd;
6168 	u8 status;
6169 
6170 	cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6171 	if (!cmd)
6172 		return;
6173 
6174 	if (err == -ERFKILL)
6175 		status = MGMT_STATUS_RFKILLED;
6176 	else
6177 		status = MGMT_STATUS_FAILED;
6178 
6179 	cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
6180 
6181 	mgmt_pending_remove(cmd);
6182 }
6183 
6184 void mgmt_discoverable_timeout(struct hci_dev *hdev)
6185 {
6186 	struct hci_request req;
6187 
6188 	hci_dev_lock(hdev);
6189 
6190 	/* When discoverable timeout triggers, then just make sure
6191 	 * the limited discoverable flag is cleared. Even in the case
6192 	 * of a timeout triggered from general discoverable, it is
6193 	 * safe to unconditionally clear the flag.
6194 	 */
6195 	clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
6196 	clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
6197 
6198 	hci_req_init(&req, hdev);
6199 	if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
6200 		u8 scan = SCAN_PAGE;
6201 		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
6202 			    sizeof(scan), &scan);
6203 	}
6204 	update_class(&req);
6205 	update_adv_data(&req);
6206 	hci_req_run(&req, NULL);
6207 
6208 	hdev->discov_timeout = 0;
6209 
6210 	new_settings(hdev, NULL);
6211 
6212 	hci_dev_unlock(hdev);
6213 }
6214 
6215 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
6216 		       bool persistent)
6217 {
6218 	struct mgmt_ev_new_link_key ev;
6219 
6220 	memset(&ev, 0, sizeof(ev));
6221 
6222 	ev.store_hint = persistent;
6223 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6224 	ev.key.addr.type = BDADDR_BREDR;
6225 	ev.key.type = key->type;
6226 	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
6227 	ev.key.pin_len = key->pin_len;
6228 
6229 	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
6230 }
6231 
6232 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
6233 {
6234 	switch (ltk->type) {
6235 	case SMP_LTK:
6236 	case SMP_LTK_SLAVE:
6237 		if (ltk->authenticated)
6238 			return MGMT_LTK_AUTHENTICATED;
6239 		return MGMT_LTK_UNAUTHENTICATED;
6240 	case SMP_LTK_P256:
6241 		if (ltk->authenticated)
6242 			return MGMT_LTK_P256_AUTH;
6243 		return MGMT_LTK_P256_UNAUTH;
6244 	case SMP_LTK_P256_DEBUG:
6245 		return MGMT_LTK_P256_DEBUG;
6246 	}
6247 
6248 	return MGMT_LTK_UNAUTHENTICATED;
6249 }
6250 
6251 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
6252 {
6253 	struct mgmt_ev_new_long_term_key ev;
6254 
6255 	memset(&ev, 0, sizeof(ev));
6256 
6257 	/* Devices using resolvable or non-resolvable random addresses
6258 	 * without providing an indentity resolving key don't require
6259 	 * to store long term keys. Their addresses will change the
6260 	 * next time around.
6261 	 *
6262 	 * Only when a remote device provides an identity address
6263 	 * make sure the long term key is stored. If the remote
6264 	 * identity is known, the long term keys are internally
6265 	 * mapped to the identity address. So allow static random
6266 	 * and public addresses here.
6267 	 */
6268 	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6269 	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
6270 		ev.store_hint = 0x00;
6271 	else
6272 		ev.store_hint = persistent;
6273 
6274 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6275 	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
6276 	ev.key.type = mgmt_ltk_type(key);
6277 	ev.key.enc_size = key->enc_size;
6278 	ev.key.ediv = key->ediv;
6279 	ev.key.rand = key->rand;
6280 
6281 	if (key->type == SMP_LTK)
6282 		ev.key.master = 1;
6283 
6284 	memcpy(ev.key.val, key->val, sizeof(key->val));
6285 
6286 	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
6287 }
6288 
6289 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
6290 {
6291 	struct mgmt_ev_new_irk ev;
6292 
6293 	memset(&ev, 0, sizeof(ev));
6294 
6295 	/* For identity resolving keys from devices that are already
6296 	 * using a public address or static random address, do not
6297 	 * ask for storing this key. The identity resolving key really
6298 	 * is only mandatory for devices using resovlable random
6299 	 * addresses.
6300 	 *
6301 	 * Storing all identity resolving keys has the downside that
6302 	 * they will be also loaded on next boot of they system. More
6303 	 * identity resolving keys, means more time during scanning is
6304 	 * needed to actually resolve these addresses.
6305 	 */
6306 	if (bacmp(&irk->rpa, BDADDR_ANY))
6307 		ev.store_hint = 0x01;
6308 	else
6309 		ev.store_hint = 0x00;
6310 
6311 	bacpy(&ev.rpa, &irk->rpa);
6312 	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
6313 	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
6314 	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
6315 
6316 	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
6317 }
6318 
6319 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
6320 		   bool persistent)
6321 {
6322 	struct mgmt_ev_new_csrk ev;
6323 
6324 	memset(&ev, 0, sizeof(ev));
6325 
6326 	/* Devices using resolvable or non-resolvable random addresses
6327 	 * without providing an indentity resolving key don't require
6328 	 * to store signature resolving keys. Their addresses will change
6329 	 * the next time around.
6330 	 *
6331 	 * Only when a remote device provides an identity address
6332 	 * make sure the signature resolving key is stored. So allow
6333 	 * static random and public addresses here.
6334 	 */
6335 	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6336 	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
6337 		ev.store_hint = 0x00;
6338 	else
6339 		ev.store_hint = persistent;
6340 
6341 	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
6342 	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
6343 	ev.key.master = csrk->master;
6344 	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
6345 
6346 	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
6347 }
6348 
6349 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
6350 			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
6351 			 u16 max_interval, u16 latency, u16 timeout)
6352 {
6353 	struct mgmt_ev_new_conn_param ev;
6354 
6355 	if (!hci_is_identity_address(bdaddr, bdaddr_type))
6356 		return;
6357 
6358 	memset(&ev, 0, sizeof(ev));
6359 	bacpy(&ev.addr.bdaddr, bdaddr);
6360 	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
6361 	ev.store_hint = store_hint;
6362 	ev.min_interval = cpu_to_le16(min_interval);
6363 	ev.max_interval = cpu_to_le16(max_interval);
6364 	ev.latency = cpu_to_le16(latency);
6365 	ev.timeout = cpu_to_le16(timeout);
6366 
6367 	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
6368 }
6369 
6370 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
6371 				  u8 data_len)
6372 {
6373 	eir[eir_len++] = sizeof(type) + data_len;
6374 	eir[eir_len++] = type;
6375 	memcpy(&eir[eir_len], data, data_len);
6376 	eir_len += data_len;
6377 
6378 	return eir_len;
6379 }
6380 
6381 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
6382 			   u32 flags, u8 *name, u8 name_len)
6383 {
6384 	char buf[512];
6385 	struct mgmt_ev_device_connected *ev = (void *) buf;
6386 	u16 eir_len = 0;
6387 
6388 	bacpy(&ev->addr.bdaddr, &conn->dst);
6389 	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
6390 
6391 	ev->flags = __cpu_to_le32(flags);
6392 
6393 	/* We must ensure that the EIR Data fields are ordered and
6394 	 * unique. Keep it simple for now and avoid the problem by not
6395 	 * adding any BR/EDR data to the LE adv.
6396 	 */
6397 	if (conn->le_adv_data_len > 0) {
6398 		memcpy(&ev->eir[eir_len],
6399 		       conn->le_adv_data, conn->le_adv_data_len);
6400 		eir_len = conn->le_adv_data_len;
6401 	} else {
6402 		if (name_len > 0)
6403 			eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
6404 						  name, name_len);
6405 
6406 		if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
6407 			eir_len = eir_append_data(ev->eir, eir_len,
6408 						  EIR_CLASS_OF_DEV,
6409 						  conn->dev_class, 3);
6410 	}
6411 
6412 	ev->eir_len = cpu_to_le16(eir_len);
6413 
6414 	mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
6415 		    sizeof(*ev) + eir_len, NULL);
6416 }
6417 
6418 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
6419 {
6420 	struct sock **sk = data;
6421 
6422 	cmd->cmd_complete(cmd, 0);
6423 
6424 	*sk = cmd->sk;
6425 	sock_hold(*sk);
6426 
6427 	mgmt_pending_remove(cmd);
6428 }
6429 
6430 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
6431 {
6432 	struct hci_dev *hdev = data;
6433 	struct mgmt_cp_unpair_device *cp = cmd->param;
6434 
6435 	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
6436 
6437 	cmd->cmd_complete(cmd, 0);
6438 	mgmt_pending_remove(cmd);
6439 }
6440 
6441 bool mgmt_powering_down(struct hci_dev *hdev)
6442 {
6443 	struct pending_cmd *cmd;
6444 	struct mgmt_mode *cp;
6445 
6446 	cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6447 	if (!cmd)
6448 		return false;
6449 
6450 	cp = cmd->param;
6451 	if (!cp->val)
6452 		return true;
6453 
6454 	return false;
6455 }
6456 
6457 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
6458 			      u8 link_type, u8 addr_type, u8 reason,
6459 			      bool mgmt_connected)
6460 {
6461 	struct mgmt_ev_device_disconnected ev;
6462 	struct sock *sk = NULL;
6463 
6464 	/* The connection is still in hci_conn_hash so test for 1
6465 	 * instead of 0 to know if this is the last one.
6466 	 */
6467 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
6468 		cancel_delayed_work(&hdev->power_off);
6469 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
6470 	}
6471 
6472 	if (!mgmt_connected)
6473 		return;
6474 
6475 	if (link_type != ACL_LINK && link_type != LE_LINK)
6476 		return;
6477 
6478 	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
6479 
6480 	bacpy(&ev.addr.bdaddr, bdaddr);
6481 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
6482 	ev.reason = reason;
6483 
6484 	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
6485 
6486 	if (sk)
6487 		sock_put(sk);
6488 
6489 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6490 			     hdev);
6491 }
6492 
6493 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
6494 			    u8 link_type, u8 addr_type, u8 status)
6495 {
6496 	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
6497 	struct mgmt_cp_disconnect *cp;
6498 	struct pending_cmd *cmd;
6499 
6500 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6501 			     hdev);
6502 
6503 	cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
6504 	if (!cmd)
6505 		return;
6506 
6507 	cp = cmd->param;
6508 
6509 	if (bacmp(bdaddr, &cp->addr.bdaddr))
6510 		return;
6511 
6512 	if (cp->addr.type != bdaddr_type)
6513 		return;
6514 
6515 	cmd->cmd_complete(cmd, mgmt_status(status));
6516 	mgmt_pending_remove(cmd);
6517 }
6518 
6519 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6520 			 u8 addr_type, u8 status)
6521 {
6522 	struct mgmt_ev_connect_failed ev;
6523 
6524 	/* The connection is still in hci_conn_hash so test for 1
6525 	 * instead of 0 to know if this is the last one.
6526 	 */
6527 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
6528 		cancel_delayed_work(&hdev->power_off);
6529 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
6530 	}
6531 
6532 	bacpy(&ev.addr.bdaddr, bdaddr);
6533 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
6534 	ev.status = mgmt_status(status);
6535 
6536 	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
6537 }
6538 
6539 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
6540 {
6541 	struct mgmt_ev_pin_code_request ev;
6542 
6543 	bacpy(&ev.addr.bdaddr, bdaddr);
6544 	ev.addr.type = BDADDR_BREDR;
6545 	ev.secure = secure;
6546 
6547 	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
6548 }
6549 
6550 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6551 				  u8 status)
6552 {
6553 	struct pending_cmd *cmd;
6554 
6555 	cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
6556 	if (!cmd)
6557 		return;
6558 
6559 	cmd->cmd_complete(cmd, mgmt_status(status));
6560 	mgmt_pending_remove(cmd);
6561 }
6562 
6563 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6564 				      u8 status)
6565 {
6566 	struct pending_cmd *cmd;
6567 
6568 	cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
6569 	if (!cmd)
6570 		return;
6571 
6572 	cmd->cmd_complete(cmd, mgmt_status(status));
6573 	mgmt_pending_remove(cmd);
6574 }
6575 
6576 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6577 			      u8 link_type, u8 addr_type, u32 value,
6578 			      u8 confirm_hint)
6579 {
6580 	struct mgmt_ev_user_confirm_request ev;
6581 
6582 	BT_DBG("%s", hdev->name);
6583 
6584 	bacpy(&ev.addr.bdaddr, bdaddr);
6585 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
6586 	ev.confirm_hint = confirm_hint;
6587 	ev.value = cpu_to_le32(value);
6588 
6589 	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
6590 			  NULL);
6591 }
6592 
6593 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6594 			      u8 link_type, u8 addr_type)
6595 {
6596 	struct mgmt_ev_user_passkey_request ev;
6597 
6598 	BT_DBG("%s", hdev->name);
6599 
6600 	bacpy(&ev.addr.bdaddr, bdaddr);
6601 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
6602 
6603 	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
6604 			  NULL);
6605 }
6606 
6607 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6608 				      u8 link_type, u8 addr_type, u8 status,
6609 				      u8 opcode)
6610 {
6611 	struct pending_cmd *cmd;
6612 
6613 	cmd = mgmt_pending_find(opcode, hdev);
6614 	if (!cmd)
6615 		return -ENOENT;
6616 
6617 	cmd->cmd_complete(cmd, mgmt_status(status));
6618 	mgmt_pending_remove(cmd);
6619 
6620 	return 0;
6621 }
6622 
6623 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6624 				     u8 link_type, u8 addr_type, u8 status)
6625 {
6626 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6627 					  status, MGMT_OP_USER_CONFIRM_REPLY);
6628 }
6629 
6630 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6631 					 u8 link_type, u8 addr_type, u8 status)
6632 {
6633 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6634 					  status,
6635 					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
6636 }
6637 
6638 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6639 				     u8 link_type, u8 addr_type, u8 status)
6640 {
6641 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6642 					  status, MGMT_OP_USER_PASSKEY_REPLY);
6643 }
6644 
6645 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6646 					 u8 link_type, u8 addr_type, u8 status)
6647 {
6648 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6649 					  status,
6650 					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
6651 }
6652 
6653 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
6654 			     u8 link_type, u8 addr_type, u32 passkey,
6655 			     u8 entered)
6656 {
6657 	struct mgmt_ev_passkey_notify ev;
6658 
6659 	BT_DBG("%s", hdev->name);
6660 
6661 	bacpy(&ev.addr.bdaddr, bdaddr);
6662 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
6663 	ev.passkey = __cpu_to_le32(passkey);
6664 	ev.entered = entered;
6665 
6666 	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
6667 }
6668 
6669 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
6670 {
6671 	struct mgmt_ev_auth_failed ev;
6672 	struct pending_cmd *cmd;
6673 	u8 status = mgmt_status(hci_status);
6674 
6675 	bacpy(&ev.addr.bdaddr, &conn->dst);
6676 	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
6677 	ev.status = status;
6678 
6679 	cmd = find_pairing(conn);
6680 
6681 	mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
6682 		    cmd ? cmd->sk : NULL);
6683 
6684 	if (cmd)
6685 		pairing_complete(cmd, status);
6686 }
6687 
6688 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
6689 {
6690 	struct cmd_lookup match = { NULL, hdev };
6691 	bool changed;
6692 
6693 	if (status) {
6694 		u8 mgmt_err = mgmt_status(status);
6695 		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
6696 				     cmd_status_rsp, &mgmt_err);
6697 		return;
6698 	}
6699 
6700 	if (test_bit(HCI_AUTH, &hdev->flags))
6701 		changed = !test_and_set_bit(HCI_LINK_SECURITY,
6702 					    &hdev->dev_flags);
6703 	else
6704 		changed = test_and_clear_bit(HCI_LINK_SECURITY,
6705 					     &hdev->dev_flags);
6706 
6707 	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
6708 			     &match);
6709 
6710 	if (changed)
6711 		new_settings(hdev, match.sk);
6712 
6713 	if (match.sk)
6714 		sock_put(match.sk);
6715 }
6716 
6717 static void clear_eir(struct hci_request *req)
6718 {
6719 	struct hci_dev *hdev = req->hdev;
6720 	struct hci_cp_write_eir cp;
6721 
6722 	if (!lmp_ext_inq_capable(hdev))
6723 		return;
6724 
6725 	memset(hdev->eir, 0, sizeof(hdev->eir));
6726 
6727 	memset(&cp, 0, sizeof(cp));
6728 
6729 	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
6730 }
6731 
6732 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6733 {
6734 	struct cmd_lookup match = { NULL, hdev };
6735 	struct hci_request req;
6736 	bool changed = false;
6737 
6738 	if (status) {
6739 		u8 mgmt_err = mgmt_status(status);
6740 
6741 		if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
6742 						 &hdev->dev_flags)) {
6743 			clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6744 			new_settings(hdev, NULL);
6745 		}
6746 
6747 		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
6748 				     &mgmt_err);
6749 		return;
6750 	}
6751 
6752 	if (enable) {
6753 		changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6754 	} else {
6755 		changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6756 		if (!changed)
6757 			changed = test_and_clear_bit(HCI_HS_ENABLED,
6758 						     &hdev->dev_flags);
6759 		else
6760 			clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6761 	}
6762 
6763 	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
6764 
6765 	if (changed)
6766 		new_settings(hdev, match.sk);
6767 
6768 	if (match.sk)
6769 		sock_put(match.sk);
6770 
6771 	hci_req_init(&req, hdev);
6772 
6773 	if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
6774 		if (test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
6775 			hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
6776 				    sizeof(enable), &enable);
6777 		update_eir(&req);
6778 	} else {
6779 		clear_eir(&req);
6780 	}
6781 
6782 	hci_req_run(&req, NULL);
6783 }
6784 
6785 void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6786 {
6787 	struct cmd_lookup match = { NULL, hdev };
6788 	bool changed = false;
6789 
6790 	if (status) {
6791 		u8 mgmt_err = mgmt_status(status);
6792 
6793 		if (enable) {
6794 			if (test_and_clear_bit(HCI_SC_ENABLED,
6795 					       &hdev->dev_flags))
6796 				new_settings(hdev, NULL);
6797 			clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6798 		}
6799 
6800 		mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6801 				     cmd_status_rsp, &mgmt_err);
6802 		return;
6803 	}
6804 
6805 	if (enable) {
6806 		changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6807 	} else {
6808 		changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6809 		clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6810 	}
6811 
6812 	mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6813 			     settings_rsp, &match);
6814 
6815 	if (changed)
6816 		new_settings(hdev, match.sk);
6817 
6818 	if (match.sk)
6819 		sock_put(match.sk);
6820 }
6821 
6822 static void sk_lookup(struct pending_cmd *cmd, void *data)
6823 {
6824 	struct cmd_lookup *match = data;
6825 
6826 	if (match->sk == NULL) {
6827 		match->sk = cmd->sk;
6828 		sock_hold(match->sk);
6829 	}
6830 }
6831 
6832 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
6833 				    u8 status)
6834 {
6835 	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
6836 
6837 	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
6838 	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
6839 	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
6840 
6841 	if (!status)
6842 		mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
6843 			   NULL);
6844 
6845 	if (match.sk)
6846 		sock_put(match.sk);
6847 }
6848 
6849 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
6850 {
6851 	struct mgmt_cp_set_local_name ev;
6852 	struct pending_cmd *cmd;
6853 
6854 	if (status)
6855 		return;
6856 
6857 	memset(&ev, 0, sizeof(ev));
6858 	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
6859 	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
6860 
6861 	cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
6862 	if (!cmd) {
6863 		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
6864 
6865 		/* If this is a HCI command related to powering on the
6866 		 * HCI dev don't send any mgmt signals.
6867 		 */
6868 		if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
6869 			return;
6870 	}
6871 
6872 	mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
6873 		   cmd ? cmd->sk : NULL);
6874 }
6875 
6876 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
6877 				       u8 *rand192, u8 *hash256, u8 *rand256,
6878 				       u8 status)
6879 {
6880 	struct pending_cmd *cmd;
6881 
6882 	BT_DBG("%s status %u", hdev->name, status);
6883 
6884 	cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
6885 	if (!cmd)
6886 		return;
6887 
6888 	if (status) {
6889 		cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
6890 			   mgmt_status(status));
6891 	} else {
6892 		if (bredr_sc_enabled(hdev) && hash256 && rand256) {
6893 			struct mgmt_rp_read_local_oob_ext_data rp;
6894 
6895 			memcpy(rp.hash192, hash192, sizeof(rp.hash192));
6896 			memcpy(rp.rand192, rand192, sizeof(rp.rand192));
6897 
6898 			memcpy(rp.hash256, hash256, sizeof(rp.hash256));
6899 			memcpy(rp.rand256, rand256, sizeof(rp.rand256));
6900 
6901 			cmd_complete(cmd->sk, hdev->id,
6902 				     MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6903 				     &rp, sizeof(rp));
6904 		} else {
6905 			struct mgmt_rp_read_local_oob_data rp;
6906 
6907 			memcpy(rp.hash, hash192, sizeof(rp.hash));
6908 			memcpy(rp.rand, rand192, sizeof(rp.rand));
6909 
6910 			cmd_complete(cmd->sk, hdev->id,
6911 				     MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6912 				     &rp, sizeof(rp));
6913 		}
6914 	}
6915 
6916 	mgmt_pending_remove(cmd);
6917 }
6918 
6919 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
6920 {
6921 	int i;
6922 
6923 	for (i = 0; i < uuid_count; i++) {
6924 		if (!memcmp(uuid, uuids[i], 16))
6925 			return true;
6926 	}
6927 
6928 	return false;
6929 }
6930 
6931 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
6932 {
6933 	u16 parsed = 0;
6934 
6935 	while (parsed < eir_len) {
6936 		u8 field_len = eir[0];
6937 		u8 uuid[16];
6938 		int i;
6939 
6940 		if (field_len == 0)
6941 			break;
6942 
6943 		if (eir_len - parsed < field_len + 1)
6944 			break;
6945 
6946 		switch (eir[1]) {
6947 		case EIR_UUID16_ALL:
6948 		case EIR_UUID16_SOME:
6949 			for (i = 0; i + 3 <= field_len; i += 2) {
6950 				memcpy(uuid, bluetooth_base_uuid, 16);
6951 				uuid[13] = eir[i + 3];
6952 				uuid[12] = eir[i + 2];
6953 				if (has_uuid(uuid, uuid_count, uuids))
6954 					return true;
6955 			}
6956 			break;
6957 		case EIR_UUID32_ALL:
6958 		case EIR_UUID32_SOME:
6959 			for (i = 0; i + 5 <= field_len; i += 4) {
6960 				memcpy(uuid, bluetooth_base_uuid, 16);
6961 				uuid[15] = eir[i + 5];
6962 				uuid[14] = eir[i + 4];
6963 				uuid[13] = eir[i + 3];
6964 				uuid[12] = eir[i + 2];
6965 				if (has_uuid(uuid, uuid_count, uuids))
6966 					return true;
6967 			}
6968 			break;
6969 		case EIR_UUID128_ALL:
6970 		case EIR_UUID128_SOME:
6971 			for (i = 0; i + 17 <= field_len; i += 16) {
6972 				memcpy(uuid, eir + i + 2, 16);
6973 				if (has_uuid(uuid, uuid_count, uuids))
6974 					return true;
6975 			}
6976 			break;
6977 		}
6978 
6979 		parsed += field_len + 1;
6980 		eir += field_len + 1;
6981 	}
6982 
6983 	return false;
6984 }
6985 
6986 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6987 		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
6988 		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
6989 {
6990 	char buf[512];
6991 	struct mgmt_ev_device_found *ev = (void *) buf;
6992 	size_t ev_size;
6993 	bool match;
6994 
6995 	/* Don't send events for a non-kernel initiated discovery. With
6996 	 * LE one exception is if we have pend_le_reports > 0 in which
6997 	 * case we're doing passive scanning and want these events.
6998 	 */
6999 	if (!hci_discovery_active(hdev)) {
7000 		if (link_type == ACL_LINK)
7001 			return;
7002 		if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
7003 			return;
7004 	}
7005 
7006 	/* When using service discovery with a RSSI threshold, then check
7007 	 * if such a RSSI threshold is specified. If a RSSI threshold has
7008 	 * been specified, then all results with a RSSI smaller than the
7009 	 * RSSI threshold will be dropped.
7010 	 *
7011 	 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
7012 	 * the results are also dropped.
7013 	 */
7014 	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7015 	    (rssi < hdev->discovery.rssi || rssi == HCI_RSSI_INVALID))
7016 		return;
7017 
7018 	/* Make sure that the buffer is big enough. The 5 extra bytes
7019 	 * are for the potential CoD field.
7020 	 */
7021 	if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
7022 		return;
7023 
7024 	memset(buf, 0, sizeof(buf));
7025 
7026 	/* In case of device discovery with BR/EDR devices (pre 1.2), the
7027 	 * RSSI value was reported as 0 when not available. This behavior
7028 	 * is kept when using device discovery. This is required for full
7029 	 * backwards compatibility with the API.
7030 	 *
7031 	 * However when using service discovery, the value 127 will be
7032 	 * returned when the RSSI is not available.
7033 	 */
7034 	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi)
7035 		rssi = 0;
7036 
7037 	bacpy(&ev->addr.bdaddr, bdaddr);
7038 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
7039 	ev->rssi = rssi;
7040 	ev->flags = cpu_to_le32(flags);
7041 
7042 	if (eir_len > 0) {
7043 		/* When using service discovery and a list of UUID is
7044 		 * provided, results with no matching UUID should be
7045 		 * dropped. In case there is a match the result is
7046 		 * kept and checking possible scan response data
7047 		 * will be skipped.
7048 		 */
7049 		if (hdev->discovery.uuid_count > 0) {
7050 			match = eir_has_uuids(eir, eir_len,
7051 					      hdev->discovery.uuid_count,
7052 					      hdev->discovery.uuids);
7053 			if (!match)
7054 				return;
7055 		}
7056 
7057 		/* Copy EIR or advertising data into event */
7058 		memcpy(ev->eir, eir, eir_len);
7059 	} else {
7060 		/* When using service discovery and a list of UUID is
7061 		 * provided, results with empty EIR or advertising data
7062 		 * should be dropped since they do not match any UUID.
7063 		 */
7064 		if (hdev->discovery.uuid_count > 0)
7065 			return;
7066 	}
7067 
7068 	if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
7069 		eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
7070 					  dev_class, 3);
7071 
7072 	if (scan_rsp_len > 0) {
7073 		/* When using service discovery and a list of UUID is
7074 		 * provided, results with no matching UUID should be
7075 		 * dropped if there is no previous match from the
7076 		 * advertising data.
7077 		 */
7078 		if (hdev->discovery.uuid_count > 0) {
7079 			if (!match && !eir_has_uuids(scan_rsp, scan_rsp_len,
7080 						     hdev->discovery.uuid_count,
7081 						     hdev->discovery.uuids))
7082 				return;
7083 		}
7084 
7085 		/* Append scan response data to event */
7086 		memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
7087 	} else {
7088 		/* When using service discovery and a list of UUID is
7089 		 * provided, results with empty scan response and no
7090 		 * previous matched advertising data should be dropped.
7091 		 */
7092 		if (hdev->discovery.uuid_count > 0 && !match)
7093 			return;
7094 	}
7095 
7096 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
7097 	ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
7098 
7099 	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
7100 }
7101 
7102 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7103 		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
7104 {
7105 	struct mgmt_ev_device_found *ev;
7106 	char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
7107 	u16 eir_len;
7108 
7109 	ev = (struct mgmt_ev_device_found *) buf;
7110 
7111 	memset(buf, 0, sizeof(buf));
7112 
7113 	bacpy(&ev->addr.bdaddr, bdaddr);
7114 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
7115 	ev->rssi = rssi;
7116 
7117 	eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
7118 				  name_len);
7119 
7120 	ev->eir_len = cpu_to_le16(eir_len);
7121 
7122 	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
7123 }
7124 
7125 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
7126 {
7127 	struct mgmt_ev_discovering ev;
7128 
7129 	BT_DBG("%s discovering %u", hdev->name, discovering);
7130 
7131 	memset(&ev, 0, sizeof(ev));
7132 	ev.type = hdev->discovery.type;
7133 	ev.discovering = discovering;
7134 
7135 	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
7136 }
7137 
7138 static void adv_enable_complete(struct hci_dev *hdev, u8 status)
7139 {
7140 	BT_DBG("%s status %u", hdev->name, status);
7141 }
7142 
7143 void mgmt_reenable_advertising(struct hci_dev *hdev)
7144 {
7145 	struct hci_request req;
7146 
7147 	if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
7148 		return;
7149 
7150 	hci_req_init(&req, hdev);
7151 	enable_advertising(&req);
7152 	hci_req_run(&req, adv_enable_complete);
7153 }
7154