xref: /linux/drivers/net/wireless/intel/iwlwifi/mei/main.c (revision 9410645520e9b820069761f3450ef6661418e279)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2021-2024 Intel Corporation
4  */
5 
6 #include <linux/etherdevice.h>
7 #include <linux/netdevice.h>
8 #include <linux/ieee80211.h>
9 #include <linux/rtnetlink.h>
10 #include <linux/module.h>
11 #include <linux/moduleparam.h>
12 #include <linux/mei_cl_bus.h>
13 #include <linux/rcupdate.h>
14 #include <linux/debugfs.h>
15 #include <linux/skbuff.h>
16 #include <linux/wait.h>
17 #include <linux/slab.h>
18 #include <linux/mm.h>
19 
20 #include <net/cfg80211.h>
21 
22 #include "internal.h"
23 #include "iwl-mei.h"
24 #include "trace.h"
25 #include "trace-data.h"
26 #include "sap.h"
27 
28 MODULE_DESCRIPTION("The Intel(R) wireless / CSME firmware interface");
29 MODULE_LICENSE("GPL");
30 
31 #define MEI_WLAN_UUID UUID_LE(0x13280904, 0x7792, 0x4fcb, \
32 			      0xa1, 0xaa, 0x5e, 0x70, 0xcb, 0xb1, 0xe8, 0x65)
33 
34 /* After CSME takes ownership, it won't release it for 60 seconds to avoid
35  * frequent ownership transitions.
36  */
37 #define MEI_OWNERSHIP_RETAKE_TIMEOUT_MS	msecs_to_jiffies(60000)
38 
39 /*
40  * Since iwlwifi calls iwlmei without any context, hold a pointer to the
41  * mei_cl_device structure here.
42  * Define a mutex that will synchronize all the flows between iwlwifi and
43  * iwlmei.
44  * Note that iwlmei can't have several instances, so it ok to have static
45  * variables here.
46  */
47 static struct mei_cl_device *iwl_mei_global_cldev;
48 static DEFINE_MUTEX(iwl_mei_mutex);
49 static unsigned long iwl_mei_status;
50 
51 enum iwl_mei_status_bits {
52 	IWL_MEI_STATUS_SAP_CONNECTED,
53 };
54 
iwl_mei_is_connected(void)55 bool iwl_mei_is_connected(void)
56 {
57 	return test_bit(IWL_MEI_STATUS_SAP_CONNECTED, &iwl_mei_status);
58 }
59 EXPORT_SYMBOL_GPL(iwl_mei_is_connected);
60 
61 #define SAP_CONTROL_BLOCK_ID 0x21504153 /* SAP! in ASCII */
62 
63 struct iwl_sap_q_ctrl_blk {
64 	__le32 wr_ptr;
65 	__le32 rd_ptr;
66 	__le32 size;
67 };
68 
69 enum iwl_sap_q_idx {
70 	SAP_QUEUE_IDX_NOTIF = 0,
71 	SAP_QUEUE_IDX_DATA,
72 	SAP_QUEUE_IDX_MAX,
73 };
74 
75 struct iwl_sap_dir {
76 	__le32 reserved;
77 	struct iwl_sap_q_ctrl_blk q_ctrl_blk[SAP_QUEUE_IDX_MAX];
78 };
79 
80 enum iwl_sap_dir_idx {
81 	SAP_DIRECTION_HOST_TO_ME = 0,
82 	SAP_DIRECTION_ME_TO_HOST,
83 	SAP_DIRECTION_MAX,
84 };
85 
86 struct iwl_sap_shared_mem_ctrl_blk {
87 	__le32 sap_id;
88 	__le32 size;
89 	struct iwl_sap_dir dir[SAP_DIRECTION_MAX];
90 };
91 
92 /*
93  * The shared area has the following layout:
94  *
95  * +-----------------------------------+
96  * |struct iwl_sap_shared_mem_ctrl_blk |
97  * +-----------------------------------+
98  * |Host -> ME data queue              |
99  * +-----------------------------------+
100  * |Host -> ME notif queue             |
101  * +-----------------------------------+
102  * |ME -> Host data queue              |
103  * +-----------------------------------+
104  * |ME -> host notif queue             |
105  * +-----------------------------------+
106  * |SAP control block id (SAP!)        |
107  * +-----------------------------------+
108  */
109 
110 #define SAP_H2M_DATA_Q_SZ	48256
111 #define SAP_M2H_DATA_Q_SZ	24128
112 #define SAP_H2M_NOTIF_Q_SZ_VER3	2240
113 #define SAP_H2M_NOTIF_Q_SZ_VER4	32768
114 #define SAP_M2H_NOTIF_Q_SZ	62720
115 
116 #define _IWL_MEI_SAP_SHARED_MEM_SZ_VER3 \
117 	(sizeof(struct iwl_sap_shared_mem_ctrl_blk) + \
118 	 SAP_H2M_DATA_Q_SZ + SAP_H2M_NOTIF_Q_SZ_VER3 + \
119 	 SAP_M2H_DATA_Q_SZ + SAP_M2H_NOTIF_Q_SZ + 4)
120 
121 #define _IWL_MEI_SAP_SHARED_MEM_SZ_VER4 \
122 	(sizeof(struct iwl_sap_shared_mem_ctrl_blk) + \
123 	 SAP_H2M_DATA_Q_SZ + SAP_H2M_NOTIF_Q_SZ_VER4 + \
124 	 SAP_M2H_DATA_Q_SZ + SAP_M2H_NOTIF_Q_SZ + 4)
125 
126 struct iwl_mei_shared_mem_ptrs {
127 	struct iwl_sap_shared_mem_ctrl_blk *ctrl;
128 	void *q_head[SAP_DIRECTION_MAX][SAP_QUEUE_IDX_MAX];
129 	size_t q_size[SAP_DIRECTION_MAX][SAP_QUEUE_IDX_MAX];
130 };
131 
132 struct iwl_mei_filters {
133 	struct rcu_head rcu_head;
134 	struct iwl_sap_oob_filters filters;
135 };
136 
137 /**
138  * struct iwl_mei - holds the private date for iwl_mei
139  *
140  * @get_nvm_wq: the wait queue for the get_nvm flow
141  * @send_csa_msg_wk: used to defer the transmission of the CHECK_SHARED_AREA
142  *	message. Used so that we can send CHECK_SHARED_AREA from atomic
143  *	contexts.
144  * @get_ownership_wq: the wait queue for the get_ownership_flow
145  * @shared_mem: the memory that is shared between CSME and the host
146  * @cldev: the pointer to the MEI client device
147  * @nvm: the data returned by the CSME for the NVM
148  * @filters: the filters sent by CSME
149  * @got_ownership: true if we own the device
150  * @amt_enabled: true if CSME has wireless enabled
151  * @csa_throttled: when true, we can't send CHECK_SHARED_AREA over the MEI
152  *	bus, but rather need to wait until send_csa_msg_wk runs
153  * @csme_taking_ownership: true when CSME is taking ownership. Used to remember
154  *	to send CSME_OWNERSHIP_CONFIRMED when the driver completes its down
155  *	flow.
156  * @link_prot_state: true when we are in link protection PASSIVE
157  * @device_down: true if the device is down. Used to remember to send
158  *	CSME_OWNERSHIP_CONFIRMED when the driver is already down.
159  * @csa_throttle_end_wk: used when &csa_throttled is true
160  * @pldr_wq: the wait queue for PLDR flow
161  * @pldr_active: PLDR flow is in progress
162  * @data_q_lock: protects the access to the data queues which are
163  *	accessed without the mutex.
164  * @netdev_work: used to defer registering and unregistering of the netdev to
165  *	avoid taking the rtnl lock in the SAP messages handlers.
166  * @ownership_dwork: used to re-ask for NIC ownership after ownership was taken
167  *	by CSME or when a previous ownership request failed.
168  * @sap_seq_no: the sequence number for the SAP messages
169  * @seq_no: the sequence number for the SAP messages
170  * @dbgfs_dir: the debugfs dir entry
171  */
172 struct iwl_mei {
173 	wait_queue_head_t get_nvm_wq;
174 	struct work_struct send_csa_msg_wk;
175 	wait_queue_head_t get_ownership_wq;
176 	struct iwl_mei_shared_mem_ptrs shared_mem;
177 	struct mei_cl_device *cldev;
178 	struct iwl_mei_nvm *nvm;
179 	struct iwl_mei_filters __rcu *filters;
180 	bool got_ownership;
181 	bool amt_enabled;
182 	bool csa_throttled;
183 	bool csme_taking_ownership;
184 	bool link_prot_state;
185 	bool device_down;
186 	struct delayed_work csa_throttle_end_wk;
187 	wait_queue_head_t pldr_wq;
188 	bool pldr_active;
189 	spinlock_t data_q_lock;
190 	struct work_struct netdev_work;
191 	struct delayed_work ownership_dwork;
192 
193 	atomic_t sap_seq_no;
194 	atomic_t seq_no;
195 
196 	struct dentry *dbgfs_dir;
197 };
198 
199 /**
200  * struct iwl_mei_cache - cache for the parameters from iwlwifi
201  * @ops: Callbacks to iwlwifi.
202  * @netdev: The netdev that will be used to transmit / receive packets.
203  * @conn_info: The connection info message triggered by iwlwifi's association.
204  * @power_limit: pointer to an array of 10 elements (le16) represents the power
205  *	restrictions per chain.
206  * @rf_kill: rf kill state.
207  * @mcc: MCC info
208  * @mac_address: interface MAC address.
209  * @nvm_address: NVM MAC address.
210  * @priv: A pointer to iwlwifi.
211  * @sap_version: The SAP version to use. enum iwl_mei_sap_version.
212  *
213  * This used to cache the configurations coming from iwlwifi's way. The data
214  * is cached here so that we can buffer the configuration even if we don't have
215  * a bind from the mei bus and hence, on iwl_mei structure.
216  */
217 struct iwl_mei_cache {
218 	const struct iwl_mei_ops *ops;
219 	struct net_device __rcu *netdev;
220 	const struct iwl_sap_notif_connection_info *conn_info;
221 	const __le16 *power_limit;
222 	u32 rf_kill;
223 	u16 mcc;
224 	u8 mac_address[6];
225 	u8 nvm_address[6];
226 	enum iwl_mei_sap_version sap_version;
227 	void *priv;
228 };
229 
230 static struct iwl_mei_cache iwl_mei_cache = {
231 	.rf_kill = SAP_HW_RFKILL_DEASSERTED | SAP_SW_RFKILL_DEASSERTED
232 };
233 
iwl_mei_free_shared_mem(struct mei_cl_device * cldev)234 static void iwl_mei_free_shared_mem(struct mei_cl_device *cldev)
235 {
236 	struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
237 
238 	if (mei_cldev_dma_unmap(cldev))
239 		dev_err(&cldev->dev, "Couldn't unmap the shared mem properly\n");
240 	memset(&mei->shared_mem, 0, sizeof(mei->shared_mem));
241 }
242 
243 #define HBM_DMA_BUF_ID_WLAN 1
244 
iwl_mei_alloc_mem_for_version(struct mei_cl_device * cldev,enum iwl_mei_sap_version version)245 static int iwl_mei_alloc_mem_for_version(struct mei_cl_device *cldev,
246 					 enum iwl_mei_sap_version version)
247 {
248 	struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
249 	struct iwl_mei_shared_mem_ptrs *mem = &mei->shared_mem;
250 	u32 mem_size = roundup(version == IWL_MEI_SAP_VERSION_4 ?
251 			       _IWL_MEI_SAP_SHARED_MEM_SZ_VER4 :
252 			       _IWL_MEI_SAP_SHARED_MEM_SZ_VER3, PAGE_SIZE);
253 
254 	iwl_mei_cache.sap_version = version;
255 	mem->ctrl = mei_cldev_dma_map(cldev, HBM_DMA_BUF_ID_WLAN, mem_size);
256 	if (IS_ERR(mem->ctrl)) {
257 		int ret = PTR_ERR(mem->ctrl);
258 
259 		mem->ctrl = NULL;
260 
261 		return ret;
262 	}
263 
264 	memset(mem->ctrl, 0, mem_size);
265 
266 	return 0;
267 }
268 
iwl_mei_alloc_shared_mem(struct mei_cl_device * cldev)269 static int iwl_mei_alloc_shared_mem(struct mei_cl_device *cldev)
270 {
271 	int ret;
272 
273 	/*
274 	 * SAP version 4 uses a larger Host to MEI notif queue.
275 	 * Since it is unknown at this stage which SAP version is used by the
276 	 * CSME firmware on this platform, try to allocate the version 4 first.
277 	 * If the CSME firmware uses version 3, this allocation is expected to
278 	 * fail because the CSME firmware allocated less memory for our driver.
279 	 */
280 	ret = iwl_mei_alloc_mem_for_version(cldev, IWL_MEI_SAP_VERSION_4);
281 	if (ret)
282 		ret = iwl_mei_alloc_mem_for_version(cldev,
283 						    IWL_MEI_SAP_VERSION_3);
284 
285 	return ret;
286 }
287 
iwl_mei_init_shared_mem(struct iwl_mei * mei)288 static void iwl_mei_init_shared_mem(struct iwl_mei *mei)
289 {
290 	struct iwl_mei_shared_mem_ptrs *mem = &mei->shared_mem;
291 	struct iwl_sap_dir *h2m;
292 	struct iwl_sap_dir *m2h;
293 	int dir, queue;
294 	u8 *q_head;
295 
296 	mem->ctrl->sap_id = cpu_to_le32(SAP_CONTROL_BLOCK_ID);
297 
298 	mem->ctrl->size = cpu_to_le32(sizeof(*mem->ctrl));
299 
300 	h2m = &mem->ctrl->dir[SAP_DIRECTION_HOST_TO_ME];
301 	m2h = &mem->ctrl->dir[SAP_DIRECTION_ME_TO_HOST];
302 
303 	h2m->q_ctrl_blk[SAP_QUEUE_IDX_DATA].size =
304 		cpu_to_le32(SAP_H2M_DATA_Q_SZ);
305 	h2m->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF].size =
306 		iwl_mei_cache.sap_version == IWL_MEI_SAP_VERSION_3 ?
307 		cpu_to_le32(SAP_H2M_NOTIF_Q_SZ_VER3) :
308 		cpu_to_le32(SAP_H2M_NOTIF_Q_SZ_VER4);
309 	m2h->q_ctrl_blk[SAP_QUEUE_IDX_DATA].size =
310 		cpu_to_le32(SAP_M2H_DATA_Q_SZ);
311 	m2h->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF].size =
312 		cpu_to_le32(SAP_M2H_NOTIF_Q_SZ);
313 
314 	/* q_head points to the start of the first queue */
315 	q_head = (void *)(mem->ctrl + 1);
316 
317 	/* Initialize the queue heads */
318 	for (dir = 0; dir < SAP_DIRECTION_MAX; dir++) {
319 		for (queue = 0; queue < SAP_QUEUE_IDX_MAX; queue++) {
320 			mem->q_head[dir][queue] = q_head;
321 			q_head +=
322 				le32_to_cpu(mem->ctrl->dir[dir].q_ctrl_blk[queue].size);
323 			mem->q_size[dir][queue] =
324 				le32_to_cpu(mem->ctrl->dir[dir].q_ctrl_blk[queue].size);
325 		}
326 	}
327 
328 	*(__le32 *)q_head = cpu_to_le32(SAP_CONTROL_BLOCK_ID);
329 }
330 
iwl_mei_write_cyclic_buf(struct mei_cl_device * cldev,struct iwl_sap_q_ctrl_blk * notif_q,u8 * q_head,const struct iwl_sap_hdr * hdr,u32 q_sz)331 static ssize_t iwl_mei_write_cyclic_buf(struct mei_cl_device *cldev,
332 					struct iwl_sap_q_ctrl_blk *notif_q,
333 					u8 *q_head,
334 					const struct iwl_sap_hdr *hdr,
335 					u32 q_sz)
336 {
337 	u32 rd = le32_to_cpu(READ_ONCE(notif_q->rd_ptr));
338 	u32 wr = le32_to_cpu(READ_ONCE(notif_q->wr_ptr));
339 	size_t room_in_buf;
340 	size_t tx_sz = sizeof(*hdr) + le16_to_cpu(hdr->len);
341 
342 	if (rd > q_sz || wr > q_sz) {
343 		dev_err(&cldev->dev,
344 			"Pointers are past the end of the buffer\n");
345 		return -EINVAL;
346 	}
347 
348 	room_in_buf = wr >= rd ? q_sz - wr + rd : rd - wr;
349 
350 	/* we don't have enough room for the data to write */
351 	if (room_in_buf < tx_sz) {
352 		dev_err(&cldev->dev,
353 			"Not enough room in the buffer\n");
354 		return -ENOSPC;
355 	}
356 
357 	if (wr + tx_sz <= q_sz) {
358 		memcpy(q_head + wr, hdr, tx_sz);
359 	} else {
360 		memcpy(q_head + wr, hdr, q_sz - wr);
361 		memcpy(q_head, (const u8 *)hdr + q_sz - wr, tx_sz - (q_sz - wr));
362 	}
363 
364 	WRITE_ONCE(notif_q->wr_ptr, cpu_to_le32((wr + tx_sz) % q_sz));
365 	return 0;
366 }
367 
iwl_mei_host_to_me_data_pending(const struct iwl_mei * mei)368 static bool iwl_mei_host_to_me_data_pending(const struct iwl_mei *mei)
369 {
370 	struct iwl_sap_q_ctrl_blk *notif_q;
371 	struct iwl_sap_dir *dir;
372 
373 	dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_HOST_TO_ME];
374 	notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_DATA];
375 
376 	if (READ_ONCE(notif_q->wr_ptr) != READ_ONCE(notif_q->rd_ptr))
377 		return true;
378 
379 	notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF];
380 	return READ_ONCE(notif_q->wr_ptr) != READ_ONCE(notif_q->rd_ptr);
381 }
382 
iwl_mei_send_check_shared_area(struct mei_cl_device * cldev)383 static int iwl_mei_send_check_shared_area(struct mei_cl_device *cldev)
384 {
385 	struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
386 	struct iwl_sap_me_msg_start msg = {
387 		.hdr.type = cpu_to_le32(SAP_ME_MSG_CHECK_SHARED_AREA),
388 		.hdr.seq_num = cpu_to_le32(atomic_inc_return(&mei->seq_no)),
389 	};
390 	int ret;
391 
392 	lockdep_assert_held(&iwl_mei_mutex);
393 
394 	if (mei->csa_throttled)
395 		return 0;
396 
397 	trace_iwlmei_me_msg(&msg.hdr, true);
398 	ret = mei_cldev_send(cldev, (void *)&msg, sizeof(msg));
399 	if (ret != sizeof(msg)) {
400 		dev_err(&cldev->dev,
401 			"failed to send the SAP_ME_MSG_CHECK_SHARED_AREA message %d\n",
402 			ret);
403 		return ret;
404 	}
405 
406 	mei->csa_throttled = true;
407 
408 	schedule_delayed_work(&mei->csa_throttle_end_wk,
409 			      msecs_to_jiffies(100));
410 
411 	return 0;
412 }
413 
iwl_mei_csa_throttle_end_wk(struct work_struct * wk)414 static void iwl_mei_csa_throttle_end_wk(struct work_struct *wk)
415 {
416 	struct iwl_mei *mei =
417 		container_of(wk, struct iwl_mei, csa_throttle_end_wk.work);
418 
419 	mutex_lock(&iwl_mei_mutex);
420 
421 	mei->csa_throttled = false;
422 
423 	if (iwl_mei_host_to_me_data_pending(mei))
424 		iwl_mei_send_check_shared_area(mei->cldev);
425 
426 	mutex_unlock(&iwl_mei_mutex);
427 }
428 
iwl_mei_send_sap_msg_payload(struct mei_cl_device * cldev,struct iwl_sap_hdr * hdr)429 static int iwl_mei_send_sap_msg_payload(struct mei_cl_device *cldev,
430 					struct iwl_sap_hdr *hdr)
431 {
432 	struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
433 	struct iwl_sap_q_ctrl_blk *notif_q;
434 	struct iwl_sap_dir *dir;
435 	void *q_head;
436 	u32 q_sz;
437 	int ret;
438 
439 	lockdep_assert_held(&iwl_mei_mutex);
440 
441 	if (!mei->shared_mem.ctrl) {
442 		dev_err(&cldev->dev,
443 			"No shared memory, can't send any SAP message\n");
444 		return -EINVAL;
445 	}
446 
447 	if (!iwl_mei_is_connected()) {
448 		dev_err(&cldev->dev,
449 			"Can't send a SAP message if we're not connected\n");
450 		return -ENODEV;
451 	}
452 
453 	hdr->seq_num = cpu_to_le32(atomic_inc_return(&mei->sap_seq_no));
454 	dev_dbg(&cldev->dev, "Sending %d\n", hdr->type);
455 
456 	dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_HOST_TO_ME];
457 	notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF];
458 	q_head = mei->shared_mem.q_head[SAP_DIRECTION_HOST_TO_ME][SAP_QUEUE_IDX_NOTIF];
459 	q_sz = mei->shared_mem.q_size[SAP_DIRECTION_HOST_TO_ME][SAP_QUEUE_IDX_NOTIF];
460 	ret = iwl_mei_write_cyclic_buf(q_head, notif_q, q_head, hdr, q_sz);
461 
462 	if (ret < 0)
463 		return ret;
464 
465 	trace_iwlmei_sap_cmd(hdr, true);
466 
467 	return iwl_mei_send_check_shared_area(cldev);
468 }
469 
iwl_mei_add_data_to_ring(struct sk_buff * skb,bool cb_tx)470 void iwl_mei_add_data_to_ring(struct sk_buff *skb, bool cb_tx)
471 {
472 	struct iwl_sap_q_ctrl_blk *notif_q;
473 	struct iwl_sap_dir *dir;
474 	struct iwl_mei *mei;
475 	size_t room_in_buf;
476 	size_t tx_sz;
477 	size_t hdr_sz;
478 	u32 q_sz;
479 	u32 rd;
480 	u32 wr;
481 	u8 *q_head;
482 
483 	if (!iwl_mei_global_cldev)
484 		return;
485 
486 	mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
487 
488 	/*
489 	 * We access this path for Rx packets (the more common case)
490 	 * and from Tx path when we send DHCP packets, the latter is
491 	 * very unlikely.
492 	 * Take the lock already here to make sure we see that remove()
493 	 * might have cleared the IWL_MEI_STATUS_SAP_CONNECTED bit.
494 	 */
495 	spin_lock_bh(&mei->data_q_lock);
496 
497 	if (!iwl_mei_is_connected()) {
498 		spin_unlock_bh(&mei->data_q_lock);
499 		return;
500 	}
501 
502 	/*
503 	 * We are in a RCU critical section and the remove from the CSME bus
504 	 * which would free this memory waits for the readers to complete (this
505 	 * is done in netdev_rx_handler_unregister).
506 	 */
507 	dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_HOST_TO_ME];
508 	notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_DATA];
509 	q_head = mei->shared_mem.q_head[SAP_DIRECTION_HOST_TO_ME][SAP_QUEUE_IDX_DATA];
510 	q_sz = mei->shared_mem.q_size[SAP_DIRECTION_HOST_TO_ME][SAP_QUEUE_IDX_DATA];
511 
512 	rd = le32_to_cpu(READ_ONCE(notif_q->rd_ptr));
513 	wr = le32_to_cpu(READ_ONCE(notif_q->wr_ptr));
514 	hdr_sz = cb_tx ? sizeof(struct iwl_sap_cb_data) :
515 			 sizeof(struct iwl_sap_hdr);
516 	tx_sz = skb->len + hdr_sz;
517 
518 	if (rd > q_sz || wr > q_sz) {
519 		dev_err(&mei->cldev->dev,
520 			"can't write the data: pointers are past the end of the buffer\n");
521 		goto out;
522 	}
523 
524 	room_in_buf = wr >= rd ? q_sz - wr + rd : rd - wr;
525 
526 	/* we don't have enough room for the data to write */
527 	if (room_in_buf < tx_sz) {
528 		dev_err(&mei->cldev->dev,
529 			"Not enough room in the buffer for this data\n");
530 		goto out;
531 	}
532 
533 	if (skb_headroom(skb) < hdr_sz) {
534 		dev_err(&mei->cldev->dev,
535 			"Not enough headroom in the skb to write the SAP header\n");
536 		goto out;
537 	}
538 
539 	if (cb_tx) {
540 		struct iwl_sap_cb_data *cb_hdr = skb_push(skb, sizeof(*cb_hdr));
541 
542 		memset(cb_hdr, 0, sizeof(*cb_hdr));
543 		cb_hdr->hdr.type = cpu_to_le16(SAP_MSG_CB_DATA_PACKET);
544 		cb_hdr->hdr.len = cpu_to_le16(skb->len - sizeof(cb_hdr->hdr));
545 		cb_hdr->hdr.seq_num = cpu_to_le32(atomic_inc_return(&mei->sap_seq_no));
546 		cb_hdr->to_me_filt_status = cpu_to_le32(BIT(CB_TX_DHCP_FILT_IDX));
547 		cb_hdr->data_len = cpu_to_le32(skb->len - sizeof(*cb_hdr));
548 		trace_iwlmei_sap_data(skb, IWL_SAP_TX_DHCP);
549 	} else {
550 		struct iwl_sap_hdr *hdr = skb_push(skb, sizeof(*hdr));
551 
552 		hdr->type = cpu_to_le16(SAP_MSG_DATA_PACKET);
553 		hdr->len = cpu_to_le16(skb->len - sizeof(*hdr));
554 		hdr->seq_num = cpu_to_le32(atomic_inc_return(&mei->sap_seq_no));
555 		trace_iwlmei_sap_data(skb, IWL_SAP_TX_DATA_FROM_AIR);
556 	}
557 
558 	if (wr + tx_sz <= q_sz) {
559 		skb_copy_bits(skb, 0, q_head + wr, tx_sz);
560 	} else {
561 		skb_copy_bits(skb, 0, q_head + wr, q_sz - wr);
562 		skb_copy_bits(skb, q_sz - wr, q_head, tx_sz - (q_sz - wr));
563 	}
564 
565 	WRITE_ONCE(notif_q->wr_ptr, cpu_to_le32((wr + tx_sz) % q_sz));
566 
567 out:
568 	spin_unlock_bh(&mei->data_q_lock);
569 }
570 
571 static int
iwl_mei_send_sap_msg(struct mei_cl_device * cldev,u16 type)572 iwl_mei_send_sap_msg(struct mei_cl_device *cldev, u16 type)
573 {
574 	struct iwl_sap_hdr msg = {
575 		.type = cpu_to_le16(type),
576 	};
577 
578 	return iwl_mei_send_sap_msg_payload(cldev, &msg);
579 }
580 
iwl_mei_send_csa_msg_wk(struct work_struct * wk)581 static void iwl_mei_send_csa_msg_wk(struct work_struct *wk)
582 {
583 	struct iwl_mei *mei =
584 		container_of(wk, struct iwl_mei, send_csa_msg_wk);
585 
586 	if (!iwl_mei_is_connected())
587 		return;
588 
589 	mutex_lock(&iwl_mei_mutex);
590 
591 	iwl_mei_send_check_shared_area(mei->cldev);
592 
593 	mutex_unlock(&iwl_mei_mutex);
594 }
595 
596 /* Called in a RCU read critical section from netif_receive_skb */
iwl_mei_rx_handler(struct sk_buff ** pskb)597 static rx_handler_result_t iwl_mei_rx_handler(struct sk_buff **pskb)
598 {
599 	struct sk_buff *skb = *pskb;
600 	struct iwl_mei *mei =
601 		rcu_dereference(skb->dev->rx_handler_data);
602 	struct iwl_mei_filters *filters = rcu_dereference(mei->filters);
603 	bool rx_for_csme = false;
604 	rx_handler_result_t res;
605 
606 	/*
607 	 * remove() unregisters this handler and synchronize_net, so this
608 	 * should never happen.
609 	 */
610 	if (!iwl_mei_is_connected()) {
611 		dev_err(&mei->cldev->dev,
612 			"Got an Rx packet, but we're not connected to SAP?\n");
613 		return RX_HANDLER_PASS;
614 	}
615 
616 	if (filters)
617 		res = iwl_mei_rx_filter(skb, &filters->filters, &rx_for_csme);
618 	else
619 		res = RX_HANDLER_PASS;
620 
621 	/*
622 	 * The data is already on the ring of the shared area, all we
623 	 * need to do is to tell the CSME firmware to check what we have
624 	 * there.
625 	 */
626 	if (rx_for_csme)
627 		schedule_work(&mei->send_csa_msg_wk);
628 
629 	if (res != RX_HANDLER_PASS) {
630 		trace_iwlmei_sap_data(skb, IWL_SAP_RX_DATA_DROPPED_FROM_AIR);
631 		dev_kfree_skb(skb);
632 	}
633 
634 	return res;
635 }
636 
iwl_mei_netdev_work(struct work_struct * wk)637 static void iwl_mei_netdev_work(struct work_struct *wk)
638 {
639 	struct iwl_mei *mei =
640 		container_of(wk, struct iwl_mei, netdev_work);
641 	struct net_device *netdev;
642 
643 	/*
644 	 * First take rtnl and only then the mutex to avoid an ABBA
645 	 * with iwl_mei_set_netdev()
646 	 */
647 	rtnl_lock();
648 	mutex_lock(&iwl_mei_mutex);
649 
650 	netdev = rcu_dereference_protected(iwl_mei_cache.netdev,
651 					   lockdep_is_held(&iwl_mei_mutex));
652 	if (netdev) {
653 		if (mei->amt_enabled)
654 			netdev_rx_handler_register(netdev, iwl_mei_rx_handler,
655 						   mei);
656 		else
657 			netdev_rx_handler_unregister(netdev);
658 	}
659 
660 	mutex_unlock(&iwl_mei_mutex);
661 	rtnl_unlock();
662 }
663 
664 static void
iwl_mei_handle_rx_start_ok(struct mei_cl_device * cldev,const struct iwl_sap_me_msg_start_ok * rsp,ssize_t len)665 iwl_mei_handle_rx_start_ok(struct mei_cl_device *cldev,
666 			   const struct iwl_sap_me_msg_start_ok *rsp,
667 			   ssize_t len)
668 {
669 	if (len != sizeof(*rsp)) {
670 		dev_err(&cldev->dev,
671 			"got invalid SAP_ME_MSG_START_OK from CSME firmware\n");
672 		dev_err(&cldev->dev,
673 			"size is incorrect: %zd instead of %zu\n",
674 			len, sizeof(*rsp));
675 		return;
676 	}
677 
678 	if (rsp->supported_version != iwl_mei_cache.sap_version) {
679 		dev_err(&cldev->dev,
680 			"didn't get the expected version: got %d\n",
681 			rsp->supported_version);
682 		return;
683 	}
684 
685 	mutex_lock(&iwl_mei_mutex);
686 	set_bit(IWL_MEI_STATUS_SAP_CONNECTED, &iwl_mei_status);
687 	/*
688 	 * We'll receive AMT_STATE SAP message in a bit and
689 	 * that will continue the flow
690 	 */
691 	mutex_unlock(&iwl_mei_mutex);
692 }
693 
iwl_mei_handle_csme_filters(struct mei_cl_device * cldev,const struct iwl_sap_csme_filters * filters)694 static void iwl_mei_handle_csme_filters(struct mei_cl_device *cldev,
695 					const struct iwl_sap_csme_filters *filters)
696 {
697 	struct iwl_mei *mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
698 	struct iwl_mei_filters *new_filters;
699 	struct iwl_mei_filters *old_filters;
700 
701 	old_filters =
702 		rcu_dereference_protected(mei->filters,
703 					  lockdep_is_held(&iwl_mei_mutex));
704 
705 	new_filters = kzalloc(sizeof(*new_filters), GFP_KERNEL);
706 	if (!new_filters)
707 		return;
708 
709 	/* Copy the OOB filters */
710 	new_filters->filters = filters->filters;
711 
712 	rcu_assign_pointer(mei->filters, new_filters);
713 
714 	if (old_filters)
715 		kfree_rcu(old_filters, rcu_head);
716 }
717 
718 static void
iwl_mei_handle_conn_status(struct mei_cl_device * cldev,const struct iwl_sap_notif_conn_status * status)719 iwl_mei_handle_conn_status(struct mei_cl_device *cldev,
720 			   const struct iwl_sap_notif_conn_status *status)
721 {
722 	struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
723 	struct iwl_mei_conn_info conn_info = {
724 		.lp_state = le32_to_cpu(status->link_prot_state),
725 		.ssid_len = le32_to_cpu(status->conn_info.ssid_len),
726 		.channel = status->conn_info.channel,
727 		.band = status->conn_info.band,
728 		.auth_mode = le32_to_cpu(status->conn_info.auth_mode),
729 		.pairwise_cipher = le32_to_cpu(status->conn_info.pairwise_cipher),
730 	};
731 
732 	if (!iwl_mei_cache.ops ||
733 	    conn_info.ssid_len > ARRAY_SIZE(conn_info.ssid))
734 		return;
735 
736 	memcpy(conn_info.ssid, status->conn_info.ssid, conn_info.ssid_len);
737 	ether_addr_copy(conn_info.bssid, status->conn_info.bssid);
738 
739 	iwl_mei_cache.ops->me_conn_status(iwl_mei_cache.priv, &conn_info);
740 
741 	mei->link_prot_state = status->link_prot_state;
742 
743 	/*
744 	 * Update the Rfkill state in case the host does not own the device:
745 	 * if we are in Link Protection, ask to not touch the device, else,
746 	 * unblock rfkill.
747 	 * If the host owns the device, inform the user space whether it can
748 	 * roam.
749 	 */
750 	if (mei->got_ownership)
751 		iwl_mei_cache.ops->roaming_forbidden(iwl_mei_cache.priv,
752 						     status->link_prot_state);
753 	else
754 		iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv,
755 					  status->link_prot_state, false);
756 }
757 
iwl_mei_set_init_conf(struct iwl_mei * mei)758 static void iwl_mei_set_init_conf(struct iwl_mei *mei)
759 {
760 	struct iwl_sap_notif_host_link_up link_msg = {
761 		.hdr.type = cpu_to_le16(SAP_MSG_NOTIF_HOST_LINK_UP),
762 		.hdr.len = cpu_to_le16(sizeof(link_msg) - sizeof(link_msg.hdr)),
763 	};
764 	struct iwl_sap_notif_country_code mcc_msg = {
765 		.hdr.type = cpu_to_le16(SAP_MSG_NOTIF_COUNTRY_CODE),
766 		.hdr.len = cpu_to_le16(sizeof(mcc_msg) - sizeof(mcc_msg.hdr)),
767 		.mcc = cpu_to_le16(iwl_mei_cache.mcc),
768 	};
769 	struct iwl_sap_notif_sar_limits sar_msg = {
770 		.hdr.type = cpu_to_le16(SAP_MSG_NOTIF_SAR_LIMITS),
771 		.hdr.len = cpu_to_le16(sizeof(sar_msg) - sizeof(sar_msg.hdr)),
772 	};
773 	struct iwl_sap_notif_host_nic_info nic_info_msg = {
774 		.hdr.type = cpu_to_le16(SAP_MSG_NOTIF_NIC_INFO),
775 		.hdr.len = cpu_to_le16(sizeof(nic_info_msg) - sizeof(nic_info_msg.hdr)),
776 	};
777 	struct iwl_sap_msg_dw rfkill_msg = {
778 		.hdr.type = cpu_to_le16(SAP_MSG_NOTIF_RADIO_STATE),
779 		.hdr.len = cpu_to_le16(sizeof(rfkill_msg) - sizeof(rfkill_msg.hdr)),
780 		.val = cpu_to_le32(iwl_mei_cache.rf_kill),
781 	};
782 
783 	/* wifi driver has registered already */
784 	if (iwl_mei_cache.ops) {
785 		iwl_mei_send_sap_msg(mei->cldev,
786 				     SAP_MSG_NOTIF_WIFIDR_UP);
787 		iwl_mei_cache.ops->sap_connected(iwl_mei_cache.priv);
788 	}
789 
790 	iwl_mei_send_sap_msg(mei->cldev, SAP_MSG_NOTIF_WHO_OWNS_NIC);
791 
792 	if (iwl_mei_cache.conn_info) {
793 		link_msg.conn_info = *iwl_mei_cache.conn_info;
794 		iwl_mei_send_sap_msg_payload(mei->cldev, &link_msg.hdr);
795 	}
796 
797 	iwl_mei_send_sap_msg_payload(mei->cldev, &mcc_msg.hdr);
798 
799 	if (iwl_mei_cache.power_limit) {
800 		memcpy(sar_msg.sar_chain_info_table, iwl_mei_cache.power_limit,
801 		       sizeof(sar_msg.sar_chain_info_table));
802 		iwl_mei_send_sap_msg_payload(mei->cldev, &sar_msg.hdr);
803 	}
804 
805 	if (is_valid_ether_addr(iwl_mei_cache.mac_address)) {
806 		ether_addr_copy(nic_info_msg.mac_address,
807 				iwl_mei_cache.mac_address);
808 		ether_addr_copy(nic_info_msg.nvm_address,
809 				iwl_mei_cache.nvm_address);
810 		iwl_mei_send_sap_msg_payload(mei->cldev, &nic_info_msg.hdr);
811 	}
812 
813 	iwl_mei_send_sap_msg_payload(mei->cldev, &rfkill_msg.hdr);
814 }
815 
iwl_mei_handle_amt_state(struct mei_cl_device * cldev,const struct iwl_sap_msg_dw * dw)816 static void iwl_mei_handle_amt_state(struct mei_cl_device *cldev,
817 				     const struct iwl_sap_msg_dw *dw)
818 {
819 	struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
820 
821 	mutex_lock(&iwl_mei_mutex);
822 
823 	if (mei->amt_enabled == !!le32_to_cpu(dw->val))
824 		goto out;
825 
826 	mei->amt_enabled = dw->val;
827 
828 	if (mei->amt_enabled)
829 		iwl_mei_set_init_conf(mei);
830 	else if (iwl_mei_cache.ops)
831 		iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, false, false);
832 
833 	schedule_work(&mei->netdev_work);
834 
835 out:
836 	mutex_unlock(&iwl_mei_mutex);
837 }
838 
iwl_mei_handle_nic_owner(struct mei_cl_device * cldev,const struct iwl_sap_msg_dw * dw)839 static void iwl_mei_handle_nic_owner(struct mei_cl_device *cldev,
840 				     const struct iwl_sap_msg_dw *dw)
841 {
842 	struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
843 
844 	mei->got_ownership = dw->val != cpu_to_le32(SAP_NIC_OWNER_ME);
845 }
846 
iwl_mei_handle_can_release_ownership(struct mei_cl_device * cldev,const void * payload)847 static void iwl_mei_handle_can_release_ownership(struct mei_cl_device *cldev,
848 						 const void *payload)
849 {
850 	/* We can get ownership and driver is registered, go ahead */
851 	if (iwl_mei_cache.ops)
852 		iwl_mei_send_sap_msg(cldev,
853 				     SAP_MSG_NOTIF_HOST_ASKS_FOR_NIC_OWNERSHIP);
854 }
855 
iwl_mei_handle_csme_taking_ownership(struct mei_cl_device * cldev,const void * payload)856 static void iwl_mei_handle_csme_taking_ownership(struct mei_cl_device *cldev,
857 						 const void *payload)
858 {
859 	struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
860 
861 	dev_info(&cldev->dev, "CSME takes ownership\n");
862 
863 	mei->got_ownership = false;
864 
865 	if (iwl_mei_cache.ops && !mei->device_down) {
866 		/*
867 		 * Remember to send CSME_OWNERSHIP_CONFIRMED when the wifi
868 		 * driver is finished taking the device down.
869 		 */
870 		mei->csme_taking_ownership = true;
871 
872 		iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, true, true);
873 	} else {
874 		iwl_mei_send_sap_msg(cldev,
875 				     SAP_MSG_NOTIF_CSME_OWNERSHIP_CONFIRMED);
876 		schedule_delayed_work(&mei->ownership_dwork,
877 				      MEI_OWNERSHIP_RETAKE_TIMEOUT_MS);
878 	}
879 }
880 
iwl_mei_handle_nvm(struct mei_cl_device * cldev,const struct iwl_sap_nvm * sap_nvm)881 static void iwl_mei_handle_nvm(struct mei_cl_device *cldev,
882 			       const struct iwl_sap_nvm *sap_nvm)
883 {
884 	struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
885 	const struct iwl_mei_nvm *mei_nvm = (const void *)sap_nvm;
886 	int i;
887 
888 	kfree(mei->nvm);
889 	mei->nvm = kzalloc(sizeof(*mei_nvm), GFP_KERNEL);
890 	if (!mei->nvm)
891 		return;
892 
893 	ether_addr_copy(mei->nvm->hw_addr, sap_nvm->hw_addr);
894 	mei->nvm->n_hw_addrs = sap_nvm->n_hw_addrs;
895 	mei->nvm->radio_cfg = le32_to_cpu(sap_nvm->radio_cfg);
896 	mei->nvm->caps = le32_to_cpu(sap_nvm->caps);
897 	mei->nvm->nvm_version = le32_to_cpu(sap_nvm->nvm_version);
898 
899 	for (i = 0; i < ARRAY_SIZE(mei->nvm->channels); i++)
900 		mei->nvm->channels[i] = le32_to_cpu(sap_nvm->channels[i]);
901 
902 	wake_up_all(&mei->get_nvm_wq);
903 }
904 
iwl_mei_handle_rx_host_own_req(struct mei_cl_device * cldev,const struct iwl_sap_msg_dw * dw)905 static void iwl_mei_handle_rx_host_own_req(struct mei_cl_device *cldev,
906 					   const struct iwl_sap_msg_dw *dw)
907 {
908 	struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
909 
910 	/*
911 	 * This means that we can't use the wifi device right now, CSME is not
912 	 * ready to let us use it.
913 	 */
914 	if (!dw->val) {
915 		dev_info(&cldev->dev, "Ownership req denied\n");
916 		return;
917 	}
918 
919 	mei->got_ownership = true;
920 	wake_up_all(&mei->get_ownership_wq);
921 
922 	iwl_mei_send_sap_msg(cldev,
923 			     SAP_MSG_NOTIF_HOST_OWNERSHIP_CONFIRMED);
924 
925 	/* We can now start the connection, unblock rfkill */
926 	if (iwl_mei_cache.ops)
927 		iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, false, false);
928 }
929 
iwl_mei_handle_pldr_ack(struct mei_cl_device * cldev,const struct iwl_sap_pldr_ack_data * ack)930 static void iwl_mei_handle_pldr_ack(struct mei_cl_device *cldev,
931 				    const struct iwl_sap_pldr_ack_data *ack)
932 {
933 	struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
934 
935 	mei->pldr_active = le32_to_cpu(ack->status) == SAP_PLDR_STATUS_SUCCESS;
936 	wake_up_all(&mei->pldr_wq);
937 }
938 
iwl_mei_handle_ping(struct mei_cl_device * cldev,const struct iwl_sap_hdr * hdr)939 static void iwl_mei_handle_ping(struct mei_cl_device *cldev,
940 				const struct iwl_sap_hdr *hdr)
941 {
942 	iwl_mei_send_sap_msg(cldev, SAP_MSG_NOTIF_PONG);
943 }
944 
iwl_mei_handle_sap_msg(struct mei_cl_device * cldev,const struct iwl_sap_hdr * hdr)945 static void iwl_mei_handle_sap_msg(struct mei_cl_device *cldev,
946 				   const struct iwl_sap_hdr *hdr)
947 {
948 	u16 len = le16_to_cpu(hdr->len) + sizeof(*hdr);
949 	u16 type = le16_to_cpu(hdr->type);
950 
951 	dev_dbg(&cldev->dev,
952 		"Got a new SAP message: type %d, len %d, seq %d\n",
953 		le16_to_cpu(hdr->type), len,
954 		le32_to_cpu(hdr->seq_num));
955 
956 #define SAP_MSG_HANDLER(_cmd, _handler, _sz)				\
957 	case SAP_MSG_NOTIF_ ## _cmd:					\
958 		if (len < _sz) {					\
959 			dev_err(&cldev->dev,				\
960 				"Bad size for %d: %u < %u\n",		\
961 				le16_to_cpu(hdr->type),			\
962 				(unsigned int)len,			\
963 				(unsigned int)_sz);			\
964 			break;						\
965 		}							\
966 		mutex_lock(&iwl_mei_mutex);				\
967 		_handler(cldev, (const void *)hdr);			\
968 		mutex_unlock(&iwl_mei_mutex);				\
969 		break
970 
971 #define SAP_MSG_HANDLER_NO_LOCK(_cmd, _handler, _sz)			\
972 	case SAP_MSG_NOTIF_ ## _cmd:					\
973 		if (len < _sz) {					\
974 			dev_err(&cldev->dev,				\
975 				"Bad size for %d: %u < %u\n",		\
976 				le16_to_cpu(hdr->type),			\
977 				(unsigned int)len,			\
978 				(unsigned int)_sz);			\
979 			break;						\
980 		}							\
981 		_handler(cldev, (const void *)hdr);			\
982 		break
983 
984 #define SAP_MSG_HANDLER_NO_HANDLER(_cmd, _sz)				\
985 	case SAP_MSG_NOTIF_ ## _cmd:					\
986 		if (len < _sz) {					\
987 			dev_err(&cldev->dev,				\
988 				"Bad size for %d: %u < %u\n",		\
989 				le16_to_cpu(hdr->type),			\
990 				(unsigned int)len,			\
991 				(unsigned int)_sz);			\
992 			break;						\
993 		}							\
994 		break
995 
996 	switch (type) {
997 	SAP_MSG_HANDLER(PING, iwl_mei_handle_ping, 0);
998 	SAP_MSG_HANDLER(CSME_FILTERS,
999 			iwl_mei_handle_csme_filters,
1000 			sizeof(struct iwl_sap_csme_filters));
1001 	SAP_MSG_HANDLER(CSME_CONN_STATUS,
1002 			iwl_mei_handle_conn_status,
1003 			sizeof(struct iwl_sap_notif_conn_status));
1004 	SAP_MSG_HANDLER_NO_LOCK(AMT_STATE,
1005 				iwl_mei_handle_amt_state,
1006 				sizeof(struct iwl_sap_msg_dw));
1007 	SAP_MSG_HANDLER_NO_HANDLER(PONG, 0);
1008 	SAP_MSG_HANDLER(NVM, iwl_mei_handle_nvm,
1009 			sizeof(struct iwl_sap_nvm));
1010 	SAP_MSG_HANDLER(CSME_REPLY_TO_HOST_OWNERSHIP_REQ,
1011 			iwl_mei_handle_rx_host_own_req,
1012 			sizeof(struct iwl_sap_msg_dw));
1013 	SAP_MSG_HANDLER(NIC_OWNER, iwl_mei_handle_nic_owner,
1014 			sizeof(struct iwl_sap_msg_dw));
1015 	SAP_MSG_HANDLER(CSME_CAN_RELEASE_OWNERSHIP,
1016 			iwl_mei_handle_can_release_ownership, 0);
1017 	SAP_MSG_HANDLER(CSME_TAKING_OWNERSHIP,
1018 			iwl_mei_handle_csme_taking_ownership, 0);
1019 	SAP_MSG_HANDLER(PLDR_ACK, iwl_mei_handle_pldr_ack,
1020 			sizeof(struct iwl_sap_pldr_ack_data));
1021 	default:
1022 	/*
1023 	 * This is not really an error, there are message that we decided
1024 	 * to ignore, yet, it is useful to be able to leave a note if debug
1025 	 * is enabled.
1026 	 */
1027 	dev_dbg(&cldev->dev, "Unsupported message: type %d, len %d\n",
1028 		le16_to_cpu(hdr->type), len);
1029 	}
1030 
1031 #undef SAP_MSG_HANDLER
1032 #undef SAP_MSG_HANDLER_NO_LOCK
1033 }
1034 
iwl_mei_read_from_q(const u8 * q_head,u32 q_sz,u32 * _rd,u32 wr,void * _buf,u32 len)1035 static void iwl_mei_read_from_q(const u8 *q_head, u32 q_sz,
1036 				u32 *_rd, u32 wr,
1037 				void *_buf, u32 len)
1038 {
1039 	u8 *buf = _buf;
1040 	u32 rd = *_rd;
1041 
1042 	if (rd + len <= q_sz) {
1043 		memcpy(buf, q_head + rd, len);
1044 		rd += len;
1045 	} else {
1046 		memcpy(buf, q_head + rd, q_sz - rd);
1047 		memcpy(buf + q_sz - rd, q_head, len - (q_sz - rd));
1048 		rd = len - (q_sz - rd);
1049 	}
1050 
1051 	*_rd = rd;
1052 }
1053 
1054 #define QOS_HDR_IV_SNAP_LEN (sizeof(struct ieee80211_qos_hdr) +      \
1055 			     IEEE80211_TKIP_IV_LEN +                 \
1056 			     sizeof(rfc1042_header) + ETH_TLEN)
1057 
iwl_mei_handle_sap_data(struct mei_cl_device * cldev,const u8 * q_head,u32 q_sz,u32 rd,u32 wr,ssize_t valid_rx_sz,struct sk_buff_head * tx_skbs)1058 static void iwl_mei_handle_sap_data(struct mei_cl_device *cldev,
1059 				    const u8 *q_head, u32 q_sz,
1060 				    u32 rd, u32 wr, ssize_t valid_rx_sz,
1061 				    struct sk_buff_head *tx_skbs)
1062 {
1063 	struct iwl_sap_hdr hdr;
1064 	struct net_device *netdev =
1065 		rcu_dereference_protected(iwl_mei_cache.netdev,
1066 					  lockdep_is_held(&iwl_mei_mutex));
1067 
1068 	if (!netdev)
1069 		return;
1070 
1071 	while (valid_rx_sz >= sizeof(hdr)) {
1072 		struct ethhdr *ethhdr;
1073 		unsigned char *data;
1074 		struct sk_buff *skb;
1075 		u16 len;
1076 
1077 		iwl_mei_read_from_q(q_head, q_sz, &rd, wr, &hdr, sizeof(hdr));
1078 		valid_rx_sz -= sizeof(hdr);
1079 		len = le16_to_cpu(hdr.len);
1080 
1081 		if (valid_rx_sz < len) {
1082 			dev_err(&cldev->dev,
1083 				"Data queue is corrupted: valid data len %zd, len %d\n",
1084 				valid_rx_sz, len);
1085 			break;
1086 		}
1087 
1088 		if (len < sizeof(*ethhdr)) {
1089 			dev_err(&cldev->dev,
1090 				"Data len is smaller than an ethernet header? len = %d\n",
1091 				len);
1092 		}
1093 
1094 		valid_rx_sz -= len;
1095 
1096 		if (le16_to_cpu(hdr.type) != SAP_MSG_DATA_PACKET) {
1097 			dev_err(&cldev->dev, "Unsupported Rx data: type %d, len %d\n",
1098 				le16_to_cpu(hdr.type), len);
1099 			continue;
1100 		}
1101 
1102 		/* We need enough room for the WiFi header + SNAP + IV */
1103 		skb = netdev_alloc_skb(netdev, len + QOS_HDR_IV_SNAP_LEN);
1104 		if (!skb)
1105 			continue;
1106 
1107 		skb_reserve(skb, QOS_HDR_IV_SNAP_LEN);
1108 		ethhdr = skb_push(skb, sizeof(*ethhdr));
1109 
1110 		iwl_mei_read_from_q(q_head, q_sz, &rd, wr,
1111 				    ethhdr, sizeof(*ethhdr));
1112 		len -= sizeof(*ethhdr);
1113 
1114 		skb_reset_mac_header(skb);
1115 		skb_reset_network_header(skb);
1116 		skb->protocol = ethhdr->h_proto;
1117 
1118 		data = skb_put(skb, len);
1119 		iwl_mei_read_from_q(q_head, q_sz, &rd, wr, data, len);
1120 
1121 		/*
1122 		 * Enqueue the skb here so that it can be sent later when we
1123 		 * do not hold the mutex. TX'ing a packet with a mutex held is
1124 		 * possible, but it wouldn't be nice to forbid the TX path to
1125 		 * call any of iwlmei's functions, since every API from iwlmei
1126 		 * needs the mutex.
1127 		 */
1128 		__skb_queue_tail(tx_skbs, skb);
1129 	}
1130 }
1131 
iwl_mei_handle_sap_rx_cmd(struct mei_cl_device * cldev,const u8 * q_head,u32 q_sz,u32 rd,u32 wr,ssize_t valid_rx_sz)1132 static void iwl_mei_handle_sap_rx_cmd(struct mei_cl_device *cldev,
1133 				      const u8 *q_head, u32 q_sz,
1134 				      u32 rd, u32 wr, ssize_t valid_rx_sz)
1135 {
1136 	struct page *p = alloc_page(GFP_KERNEL);
1137 	struct iwl_sap_hdr *hdr;
1138 
1139 	if (!p)
1140 		return;
1141 
1142 	hdr = page_address(p);
1143 
1144 	while (valid_rx_sz >= sizeof(*hdr)) {
1145 		u16 len;
1146 
1147 		iwl_mei_read_from_q(q_head, q_sz, &rd, wr, hdr, sizeof(*hdr));
1148 		valid_rx_sz -= sizeof(*hdr);
1149 		len = le16_to_cpu(hdr->len);
1150 
1151 		if (valid_rx_sz < len)
1152 			break;
1153 
1154 		iwl_mei_read_from_q(q_head, q_sz, &rd, wr, hdr + 1, len);
1155 
1156 		trace_iwlmei_sap_cmd(hdr, false);
1157 		iwl_mei_handle_sap_msg(cldev, hdr);
1158 		valid_rx_sz -= len;
1159 	}
1160 
1161 	/* valid_rx_sz must be 0 now... */
1162 	if (valid_rx_sz)
1163 		dev_err(&cldev->dev,
1164 			"More data in the buffer although we read it all\n");
1165 
1166 	__free_page(p);
1167 }
1168 
iwl_mei_handle_sap_rx(struct mei_cl_device * cldev,struct iwl_sap_q_ctrl_blk * notif_q,const u8 * q_head,struct sk_buff_head * skbs,u32 q_sz)1169 static void iwl_mei_handle_sap_rx(struct mei_cl_device *cldev,
1170 				  struct iwl_sap_q_ctrl_blk *notif_q,
1171 				  const u8 *q_head,
1172 				  struct sk_buff_head *skbs,
1173 				  u32 q_sz)
1174 {
1175 	u32 rd = le32_to_cpu(READ_ONCE(notif_q->rd_ptr));
1176 	u32 wr = le32_to_cpu(READ_ONCE(notif_q->wr_ptr));
1177 	ssize_t valid_rx_sz;
1178 
1179 	if (rd > q_sz || wr > q_sz) {
1180 		dev_err(&cldev->dev,
1181 			"Pointers are past the buffer limit\n");
1182 		return;
1183 	}
1184 
1185 	if (rd == wr)
1186 		return;
1187 
1188 	valid_rx_sz = wr > rd ? wr - rd : q_sz - rd + wr;
1189 
1190 	if (skbs)
1191 		iwl_mei_handle_sap_data(cldev, q_head, q_sz, rd, wr,
1192 					valid_rx_sz, skbs);
1193 	else
1194 		iwl_mei_handle_sap_rx_cmd(cldev, q_head, q_sz, rd, wr,
1195 					  valid_rx_sz);
1196 
1197 	/* Increment the read pointer to point to the write pointer */
1198 	WRITE_ONCE(notif_q->rd_ptr, cpu_to_le32(wr));
1199 }
1200 
iwl_mei_handle_check_shared_area(struct mei_cl_device * cldev)1201 static void iwl_mei_handle_check_shared_area(struct mei_cl_device *cldev)
1202 {
1203 	struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
1204 	struct iwl_sap_q_ctrl_blk *notif_q;
1205 	struct sk_buff_head tx_skbs;
1206 	struct iwl_sap_dir *dir;
1207 	void *q_head;
1208 	u32 q_sz;
1209 
1210 	if (!mei->shared_mem.ctrl)
1211 		return;
1212 
1213 	dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_ME_TO_HOST];
1214 	notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF];
1215 	q_head = mei->shared_mem.q_head[SAP_DIRECTION_ME_TO_HOST][SAP_QUEUE_IDX_NOTIF];
1216 	q_sz = mei->shared_mem.q_size[SAP_DIRECTION_ME_TO_HOST][SAP_QUEUE_IDX_NOTIF];
1217 
1218 	/*
1219 	 * Do not hold the mutex here, but rather each and every message
1220 	 * handler takes it.
1221 	 * This allows message handlers to take it at a certain time.
1222 	 */
1223 	iwl_mei_handle_sap_rx(cldev, notif_q, q_head, NULL, q_sz);
1224 
1225 	mutex_lock(&iwl_mei_mutex);
1226 	dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_ME_TO_HOST];
1227 	notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_DATA];
1228 	q_head = mei->shared_mem.q_head[SAP_DIRECTION_ME_TO_HOST][SAP_QUEUE_IDX_DATA];
1229 	q_sz = mei->shared_mem.q_size[SAP_DIRECTION_ME_TO_HOST][SAP_QUEUE_IDX_DATA];
1230 
1231 	__skb_queue_head_init(&tx_skbs);
1232 
1233 	iwl_mei_handle_sap_rx(cldev, notif_q, q_head, &tx_skbs, q_sz);
1234 
1235 	if (skb_queue_empty(&tx_skbs)) {
1236 		mutex_unlock(&iwl_mei_mutex);
1237 		return;
1238 	}
1239 
1240 	/*
1241 	 * Take the RCU read lock before we unlock the mutex to make sure that
1242 	 * even if the netdev is replaced by another non-NULL netdev right after
1243 	 * we unlock the mutex, the old netdev will still be valid when we
1244 	 * transmit the frames. We can't allow to replace the netdev here because
1245 	 * the skbs hold a pointer to the netdev.
1246 	 */
1247 	rcu_read_lock();
1248 
1249 	mutex_unlock(&iwl_mei_mutex);
1250 
1251 	if (!rcu_access_pointer(iwl_mei_cache.netdev)) {
1252 		dev_err(&cldev->dev, "Can't Tx without a netdev\n");
1253 		skb_queue_purge(&tx_skbs);
1254 		goto out;
1255 	}
1256 
1257 	while (!skb_queue_empty(&tx_skbs)) {
1258 		struct sk_buff *skb = __skb_dequeue(&tx_skbs);
1259 
1260 		trace_iwlmei_sap_data(skb, IWL_SAP_RX_DATA_TO_AIR);
1261 		dev_queue_xmit(skb);
1262 	}
1263 
1264 out:
1265 	rcu_read_unlock();
1266 }
1267 
iwl_mei_rx(struct mei_cl_device * cldev)1268 static void iwl_mei_rx(struct mei_cl_device *cldev)
1269 {
1270 	struct iwl_sap_me_msg_hdr *hdr;
1271 	u8 msg[100];
1272 	ssize_t ret;
1273 
1274 	ret = mei_cldev_recv(cldev, (u8 *)&msg, sizeof(msg));
1275 	if (ret < 0) {
1276 		dev_err(&cldev->dev, "failed to receive data: %zd\n", ret);
1277 		return;
1278 	}
1279 
1280 	if (ret == 0) {
1281 		dev_err(&cldev->dev, "got an empty response\n");
1282 		return;
1283 	}
1284 
1285 	hdr = (void *)msg;
1286 	trace_iwlmei_me_msg(hdr, false);
1287 
1288 	switch (le32_to_cpu(hdr->type)) {
1289 	case SAP_ME_MSG_START_OK:
1290 		BUILD_BUG_ON(sizeof(struct iwl_sap_me_msg_start_ok) >
1291 			     sizeof(msg));
1292 
1293 		iwl_mei_handle_rx_start_ok(cldev, (void *)msg, ret);
1294 		break;
1295 	case SAP_ME_MSG_CHECK_SHARED_AREA:
1296 		iwl_mei_handle_check_shared_area(cldev);
1297 		break;
1298 	default:
1299 		dev_err(&cldev->dev, "got a RX notification: %d\n",
1300 			le32_to_cpu(hdr->type));
1301 		break;
1302 	}
1303 }
1304 
iwl_mei_send_start(struct mei_cl_device * cldev)1305 static int iwl_mei_send_start(struct mei_cl_device *cldev)
1306 {
1307 	struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
1308 	struct iwl_sap_me_msg_start msg = {
1309 		.hdr.type = cpu_to_le32(SAP_ME_MSG_START),
1310 		.hdr.seq_num = cpu_to_le32(atomic_inc_return(&mei->seq_no)),
1311 		.hdr.len = cpu_to_le32(sizeof(msg)),
1312 		.supported_versions[0] = iwl_mei_cache.sap_version,
1313 		.init_data_seq_num = cpu_to_le16(0x100),
1314 		.init_notif_seq_num = cpu_to_le16(0x800),
1315 	};
1316 	int ret;
1317 
1318 	trace_iwlmei_me_msg(&msg.hdr, true);
1319 	ret = mei_cldev_send(cldev, (void *)&msg, sizeof(msg));
1320 	if (ret != sizeof(msg)) {
1321 		dev_err(&cldev->dev,
1322 			"failed to send the SAP_ME_MSG_START message %d\n",
1323 			ret);
1324 		return ret;
1325 	}
1326 
1327 	return 0;
1328 }
1329 
iwl_mei_enable(struct mei_cl_device * cldev)1330 static int iwl_mei_enable(struct mei_cl_device *cldev)
1331 {
1332 	int ret;
1333 
1334 	ret = mei_cldev_enable(cldev);
1335 	if (ret < 0) {
1336 		dev_err(&cldev->dev, "failed to enable the device: %d\n", ret);
1337 		return ret;
1338 	}
1339 
1340 	ret = mei_cldev_register_rx_cb(cldev, iwl_mei_rx);
1341 	if (ret) {
1342 		dev_err(&cldev->dev,
1343 			"failed to register to the rx cb: %d\n", ret);
1344 		mei_cldev_disable(cldev);
1345 		return ret;
1346 	}
1347 
1348 	return 0;
1349 }
1350 
iwl_mei_get_nvm(void)1351 struct iwl_mei_nvm *iwl_mei_get_nvm(void)
1352 {
1353 	struct iwl_mei_nvm *nvm = NULL;
1354 	struct iwl_mei *mei;
1355 	int ret;
1356 
1357 	mutex_lock(&iwl_mei_mutex);
1358 
1359 	if (!iwl_mei_is_connected())
1360 		goto out;
1361 
1362 	mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1363 
1364 	if (!mei)
1365 		goto out;
1366 
1367 	ret = iwl_mei_send_sap_msg(iwl_mei_global_cldev,
1368 				   SAP_MSG_NOTIF_GET_NVM);
1369 	if (ret)
1370 		goto out;
1371 
1372 	mutex_unlock(&iwl_mei_mutex);
1373 
1374 	ret = wait_event_timeout(mei->get_nvm_wq, mei->nvm, 2 * HZ);
1375 	if (!ret)
1376 		return NULL;
1377 
1378 	mutex_lock(&iwl_mei_mutex);
1379 
1380 	if (!iwl_mei_is_connected())
1381 		goto out;
1382 
1383 	mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1384 
1385 	if (!mei)
1386 		goto out;
1387 
1388 	if (mei->nvm)
1389 		nvm = kmemdup(mei->nvm, sizeof(*mei->nvm), GFP_KERNEL);
1390 
1391 out:
1392 	mutex_unlock(&iwl_mei_mutex);
1393 	return nvm;
1394 }
1395 EXPORT_SYMBOL_GPL(iwl_mei_get_nvm);
1396 
1397 #define IWL_MEI_PLDR_NUM_RETRIES	3
1398 
iwl_mei_pldr_req(void)1399 int iwl_mei_pldr_req(void)
1400 {
1401 	struct iwl_mei *mei;
1402 	int ret;
1403 	struct iwl_sap_pldr_data msg = {
1404 		.hdr.type = cpu_to_le16(SAP_MSG_NOTIF_PLDR),
1405 		.hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
1406 	};
1407 	int i;
1408 
1409 	mutex_lock(&iwl_mei_mutex);
1410 
1411 	/* In case we didn't have a bind */
1412 	if (!iwl_mei_is_connected()) {
1413 		ret = 0;
1414 		goto out;
1415 	}
1416 
1417 	mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1418 
1419 	if (!mei) {
1420 		ret = -ENODEV;
1421 		goto out;
1422 	}
1423 
1424 	if (!mei->amt_enabled) {
1425 		ret = 0;
1426 		goto out;
1427 	}
1428 
1429 	for (i = 0; i < IWL_MEI_PLDR_NUM_RETRIES; i++) {
1430 		ret = iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
1431 		mutex_unlock(&iwl_mei_mutex);
1432 		if (ret)
1433 			return ret;
1434 
1435 		ret = wait_event_timeout(mei->pldr_wq, mei->pldr_active, HZ / 2);
1436 		if (ret)
1437 			break;
1438 
1439 		/* Take the mutex for the next iteration */
1440 		mutex_lock(&iwl_mei_mutex);
1441 	}
1442 
1443 	if (ret)
1444 		return 0;
1445 
1446 	ret = -ETIMEDOUT;
1447 out:
1448 	mutex_unlock(&iwl_mei_mutex);
1449 	return ret;
1450 }
1451 EXPORT_SYMBOL_GPL(iwl_mei_pldr_req);
1452 
iwl_mei_get_ownership(void)1453 int iwl_mei_get_ownership(void)
1454 {
1455 	struct iwl_mei *mei;
1456 	int ret;
1457 
1458 	mutex_lock(&iwl_mei_mutex);
1459 
1460 	/* In case we didn't have a bind */
1461 	if (!iwl_mei_is_connected()) {
1462 		ret = 0;
1463 		goto out;
1464 	}
1465 
1466 	mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1467 
1468 	if (!mei) {
1469 		ret = -ENODEV;
1470 		goto out;
1471 	}
1472 
1473 	if (!mei->amt_enabled) {
1474 		ret = 0;
1475 		goto out;
1476 	}
1477 
1478 	if (mei->got_ownership) {
1479 		ret = 0;
1480 		goto out;
1481 	}
1482 
1483 	ret = iwl_mei_send_sap_msg(mei->cldev,
1484 				   SAP_MSG_NOTIF_HOST_ASKS_FOR_NIC_OWNERSHIP);
1485 	if (ret)
1486 		goto out;
1487 
1488 	mutex_unlock(&iwl_mei_mutex);
1489 
1490 	ret = wait_event_timeout(mei->get_ownership_wq,
1491 				 mei->got_ownership, HZ / 2);
1492 	if (!ret) {
1493 		schedule_delayed_work(&mei->ownership_dwork,
1494 				      MEI_OWNERSHIP_RETAKE_TIMEOUT_MS);
1495 		return -ETIMEDOUT;
1496 	}
1497 
1498 	return 0;
1499 out:
1500 	mutex_unlock(&iwl_mei_mutex);
1501 	return ret;
1502 }
1503 EXPORT_SYMBOL_GPL(iwl_mei_get_ownership);
1504 
iwl_mei_alive_notif(bool success)1505 void iwl_mei_alive_notif(bool success)
1506 {
1507 	struct iwl_mei *mei;
1508 	struct iwl_sap_pldr_end_data msg = {
1509 		.hdr.type = cpu_to_le16(SAP_MSG_NOTIF_PLDR_END),
1510 		.hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
1511 		.status = success ? cpu_to_le32(SAP_PLDR_STATUS_SUCCESS) :
1512 			cpu_to_le32(SAP_PLDR_STATUS_FAILURE),
1513 	};
1514 
1515 	mutex_lock(&iwl_mei_mutex);
1516 
1517 	if (!iwl_mei_is_connected())
1518 		goto out;
1519 
1520 	mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1521 	if (!mei || !mei->pldr_active)
1522 		goto out;
1523 
1524 	mei->pldr_active = false;
1525 
1526 	iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
1527 out:
1528 	mutex_unlock(&iwl_mei_mutex);
1529 }
1530 EXPORT_SYMBOL_GPL(iwl_mei_alive_notif);
1531 
iwl_mei_host_associated(const struct iwl_mei_conn_info * conn_info,const struct iwl_mei_colloc_info * colloc_info)1532 void iwl_mei_host_associated(const struct iwl_mei_conn_info *conn_info,
1533 			     const struct iwl_mei_colloc_info *colloc_info)
1534 {
1535 	struct iwl_sap_notif_host_link_up msg = {
1536 		.hdr.type = cpu_to_le16(SAP_MSG_NOTIF_HOST_LINK_UP),
1537 		.hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
1538 		.conn_info = {
1539 			.ssid_len = cpu_to_le32(conn_info->ssid_len),
1540 			.channel = conn_info->channel,
1541 			.band = conn_info->band,
1542 			.pairwise_cipher = cpu_to_le32(conn_info->pairwise_cipher),
1543 			.auth_mode = cpu_to_le32(conn_info->auth_mode),
1544 		},
1545 	};
1546 	struct iwl_mei *mei;
1547 
1548 	if (conn_info->ssid_len > ARRAY_SIZE(msg.conn_info.ssid))
1549 		return;
1550 
1551 	memcpy(msg.conn_info.ssid, conn_info->ssid, conn_info->ssid_len);
1552 	memcpy(msg.conn_info.bssid, conn_info->bssid, ETH_ALEN);
1553 
1554 	if (colloc_info) {
1555 		msg.colloc_channel = colloc_info->channel;
1556 		msg.colloc_band = colloc_info->channel <= 14 ? 0 : 1;
1557 		memcpy(msg.colloc_bssid, colloc_info->bssid, ETH_ALEN);
1558 	}
1559 
1560 	mutex_lock(&iwl_mei_mutex);
1561 
1562 	if (!iwl_mei_is_connected())
1563 		goto out;
1564 
1565 	mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1566 
1567 	if (!mei || !mei->amt_enabled)
1568 		goto out;
1569 
1570 	iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
1571 
1572 out:
1573 	kfree(iwl_mei_cache.conn_info);
1574 	iwl_mei_cache.conn_info =
1575 		kmemdup(&msg.conn_info, sizeof(msg.conn_info), GFP_KERNEL);
1576 	mutex_unlock(&iwl_mei_mutex);
1577 }
1578 EXPORT_SYMBOL_GPL(iwl_mei_host_associated);
1579 
iwl_mei_host_disassociated(void)1580 void iwl_mei_host_disassociated(void)
1581 {
1582 	struct iwl_mei *mei;
1583 	struct iwl_sap_notif_host_link_down msg = {
1584 		.hdr.type = cpu_to_le16(SAP_MSG_NOTIF_HOST_LINK_DOWN),
1585 		.hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
1586 		.type = HOST_LINK_DOWN_TYPE_TEMPORARY,
1587 	};
1588 
1589 	mutex_lock(&iwl_mei_mutex);
1590 
1591 	if (!iwl_mei_is_connected())
1592 		goto out;
1593 
1594 	mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1595 
1596 	if (!mei || !mei->amt_enabled)
1597 		goto out;
1598 
1599 	iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
1600 
1601 out:
1602 	kfree(iwl_mei_cache.conn_info);
1603 	iwl_mei_cache.conn_info = NULL;
1604 	mutex_unlock(&iwl_mei_mutex);
1605 }
1606 EXPORT_SYMBOL_GPL(iwl_mei_host_disassociated);
1607 
iwl_mei_set_rfkill_state(bool hw_rfkill,bool sw_rfkill)1608 void iwl_mei_set_rfkill_state(bool hw_rfkill, bool sw_rfkill)
1609 {
1610 	struct iwl_mei *mei;
1611 	u32 rfkill_state = 0;
1612 	struct iwl_sap_msg_dw msg = {
1613 		.hdr.type = cpu_to_le16(SAP_MSG_NOTIF_RADIO_STATE),
1614 		.hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
1615 	};
1616 
1617 	if (!sw_rfkill)
1618 		rfkill_state |= SAP_SW_RFKILL_DEASSERTED;
1619 
1620 	if (!hw_rfkill)
1621 		rfkill_state |= SAP_HW_RFKILL_DEASSERTED;
1622 
1623 	mutex_lock(&iwl_mei_mutex);
1624 
1625 	if (!iwl_mei_is_connected())
1626 		goto out;
1627 
1628 	msg.val = cpu_to_le32(rfkill_state);
1629 
1630 	mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1631 
1632 	if (!mei || !mei->amt_enabled)
1633 		goto out;
1634 
1635 	iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
1636 
1637 out:
1638 	iwl_mei_cache.rf_kill = rfkill_state;
1639 	mutex_unlock(&iwl_mei_mutex);
1640 }
1641 EXPORT_SYMBOL_GPL(iwl_mei_set_rfkill_state);
1642 
iwl_mei_set_nic_info(const u8 * mac_address,const u8 * nvm_address)1643 void iwl_mei_set_nic_info(const u8 *mac_address, const u8 *nvm_address)
1644 {
1645 	struct iwl_mei *mei;
1646 	struct iwl_sap_notif_host_nic_info msg = {
1647 		.hdr.type = cpu_to_le16(SAP_MSG_NOTIF_NIC_INFO),
1648 		.hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
1649 	};
1650 
1651 	mutex_lock(&iwl_mei_mutex);
1652 
1653 	if (!iwl_mei_is_connected())
1654 		goto out;
1655 
1656 	ether_addr_copy(msg.mac_address, mac_address);
1657 	ether_addr_copy(msg.nvm_address, nvm_address);
1658 
1659 	mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1660 
1661 	if (!mei || !mei->amt_enabled)
1662 		goto out;
1663 
1664 	iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
1665 
1666 out:
1667 	ether_addr_copy(iwl_mei_cache.mac_address, mac_address);
1668 	ether_addr_copy(iwl_mei_cache.nvm_address, nvm_address);
1669 	mutex_unlock(&iwl_mei_mutex);
1670 }
1671 EXPORT_SYMBOL_GPL(iwl_mei_set_nic_info);
1672 
iwl_mei_set_country_code(u16 mcc)1673 void iwl_mei_set_country_code(u16 mcc)
1674 {
1675 	struct iwl_mei *mei;
1676 	struct iwl_sap_notif_country_code msg = {
1677 		.hdr.type = cpu_to_le16(SAP_MSG_NOTIF_COUNTRY_CODE),
1678 		.hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
1679 		.mcc = cpu_to_le16(mcc),
1680 	};
1681 
1682 	mutex_lock(&iwl_mei_mutex);
1683 
1684 	if (!iwl_mei_is_connected())
1685 		goto out;
1686 
1687 	mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1688 
1689 	if (!mei || !mei->amt_enabled)
1690 		goto out;
1691 
1692 	iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
1693 
1694 out:
1695 	iwl_mei_cache.mcc = mcc;
1696 	mutex_unlock(&iwl_mei_mutex);
1697 }
1698 EXPORT_SYMBOL_GPL(iwl_mei_set_country_code);
1699 
iwl_mei_set_power_limit(const __le16 * power_limit)1700 void iwl_mei_set_power_limit(const __le16 *power_limit)
1701 {
1702 	struct iwl_mei *mei;
1703 	struct iwl_sap_notif_sar_limits msg = {
1704 		.hdr.type = cpu_to_le16(SAP_MSG_NOTIF_SAR_LIMITS),
1705 		.hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
1706 	};
1707 
1708 	mutex_lock(&iwl_mei_mutex);
1709 
1710 	if (!iwl_mei_is_connected())
1711 		goto out;
1712 
1713 	mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1714 
1715 	if (!mei || !mei->amt_enabled)
1716 		goto out;
1717 
1718 	memcpy(msg.sar_chain_info_table, power_limit, sizeof(msg.sar_chain_info_table));
1719 
1720 	iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
1721 
1722 out:
1723 	kfree(iwl_mei_cache.power_limit);
1724 	iwl_mei_cache.power_limit = kmemdup(power_limit,
1725 					    sizeof(msg.sar_chain_info_table), GFP_KERNEL);
1726 	mutex_unlock(&iwl_mei_mutex);
1727 }
1728 EXPORT_SYMBOL_GPL(iwl_mei_set_power_limit);
1729 
iwl_mei_set_netdev(struct net_device * netdev)1730 void iwl_mei_set_netdev(struct net_device *netdev)
1731 {
1732 	struct iwl_mei *mei;
1733 
1734 	mutex_lock(&iwl_mei_mutex);
1735 
1736 	if (!iwl_mei_is_connected()) {
1737 		rcu_assign_pointer(iwl_mei_cache.netdev, netdev);
1738 		goto out;
1739 	}
1740 
1741 	mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1742 
1743 	if (!mei)
1744 		goto out;
1745 
1746 	if (!netdev) {
1747 		struct net_device *dev =
1748 			rcu_dereference_protected(iwl_mei_cache.netdev,
1749 						  lockdep_is_held(&iwl_mei_mutex));
1750 
1751 		if (!dev)
1752 			goto out;
1753 
1754 		netdev_rx_handler_unregister(dev);
1755 	}
1756 
1757 	rcu_assign_pointer(iwl_mei_cache.netdev, netdev);
1758 
1759 	if (netdev && mei->amt_enabled)
1760 		netdev_rx_handler_register(netdev, iwl_mei_rx_handler, mei);
1761 
1762 out:
1763 	mutex_unlock(&iwl_mei_mutex);
1764 }
1765 EXPORT_SYMBOL_GPL(iwl_mei_set_netdev);
1766 
iwl_mei_device_state(bool up)1767 void iwl_mei_device_state(bool up)
1768 {
1769 	struct iwl_mei *mei;
1770 
1771 	mutex_lock(&iwl_mei_mutex);
1772 
1773 	if (!iwl_mei_is_connected())
1774 		goto out;
1775 
1776 	mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1777 
1778 	if (!mei)
1779 		goto out;
1780 
1781 	mei->device_down = !up;
1782 
1783 	if (up || !mei->csme_taking_ownership)
1784 		goto out;
1785 
1786 	iwl_mei_send_sap_msg(mei->cldev,
1787 			     SAP_MSG_NOTIF_CSME_OWNERSHIP_CONFIRMED);
1788 	mei->csme_taking_ownership = false;
1789 	schedule_delayed_work(&mei->ownership_dwork,
1790 			      MEI_OWNERSHIP_RETAKE_TIMEOUT_MS);
1791 out:
1792 	mutex_unlock(&iwl_mei_mutex);
1793 }
1794 EXPORT_SYMBOL_GPL(iwl_mei_device_state);
1795 
iwl_mei_register(void * priv,const struct iwl_mei_ops * ops)1796 int iwl_mei_register(void *priv, const struct iwl_mei_ops *ops)
1797 {
1798 	int ret;
1799 
1800 	/*
1801 	 * We must have a non-NULL priv pointer to not crash when there are
1802 	 * multiple WiFi devices.
1803 	 */
1804 	if (!priv)
1805 		return -EINVAL;
1806 
1807 	mutex_lock(&iwl_mei_mutex);
1808 
1809 	/* do not allow registration if someone else already registered */
1810 	if (iwl_mei_cache.priv || iwl_mei_cache.ops) {
1811 		ret = -EBUSY;
1812 		goto out;
1813 	}
1814 
1815 	iwl_mei_cache.priv = priv;
1816 	iwl_mei_cache.ops = ops;
1817 
1818 	if (iwl_mei_global_cldev) {
1819 		struct iwl_mei *mei =
1820 			mei_cldev_get_drvdata(iwl_mei_global_cldev);
1821 
1822 		/* we have already a SAP connection */
1823 		if (iwl_mei_is_connected()) {
1824 			if (mei->amt_enabled)
1825 				iwl_mei_send_sap_msg(mei->cldev,
1826 						     SAP_MSG_NOTIF_WIFIDR_UP);
1827 			ops->rfkill(priv, mei->link_prot_state, false);
1828 		}
1829 	}
1830 	ret = 0;
1831 
1832 out:
1833 	mutex_unlock(&iwl_mei_mutex);
1834 	return ret;
1835 }
1836 EXPORT_SYMBOL_GPL(iwl_mei_register);
1837 
iwl_mei_start_unregister(void)1838 void iwl_mei_start_unregister(void)
1839 {
1840 	mutex_lock(&iwl_mei_mutex);
1841 
1842 	/* At this point, the wifi driver should have removed the netdev */
1843 	if (rcu_access_pointer(iwl_mei_cache.netdev))
1844 		pr_err("Still had a netdev pointer set upon unregister\n");
1845 
1846 	kfree(iwl_mei_cache.conn_info);
1847 	iwl_mei_cache.conn_info = NULL;
1848 	kfree(iwl_mei_cache.power_limit);
1849 	iwl_mei_cache.power_limit = NULL;
1850 	iwl_mei_cache.ops = NULL;
1851 	/* leave iwl_mei_cache.priv non-NULL to prevent any new registration */
1852 
1853 	mutex_unlock(&iwl_mei_mutex);
1854 }
1855 EXPORT_SYMBOL_GPL(iwl_mei_start_unregister);
1856 
iwl_mei_unregister_complete(void)1857 void iwl_mei_unregister_complete(void)
1858 {
1859 	mutex_lock(&iwl_mei_mutex);
1860 
1861 	iwl_mei_cache.priv = NULL;
1862 
1863 	if (iwl_mei_global_cldev) {
1864 		struct iwl_mei *mei =
1865 			mei_cldev_get_drvdata(iwl_mei_global_cldev);
1866 
1867 		if (mei->amt_enabled)
1868 			iwl_mei_send_sap_msg(mei->cldev,
1869 					     SAP_MSG_NOTIF_WIFIDR_DOWN);
1870 		mei->got_ownership = false;
1871 	}
1872 
1873 	mutex_unlock(&iwl_mei_mutex);
1874 }
1875 EXPORT_SYMBOL_GPL(iwl_mei_unregister_complete);
1876 
1877 #if IS_ENABLED(CONFIG_DEBUG_FS)
1878 
1879 static ssize_t
iwl_mei_dbgfs_send_start_message_write(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)1880 iwl_mei_dbgfs_send_start_message_write(struct file *file,
1881 				       const char __user *user_buf,
1882 				       size_t count, loff_t *ppos)
1883 {
1884 	int ret;
1885 
1886 	mutex_lock(&iwl_mei_mutex);
1887 
1888 	if (!iwl_mei_global_cldev) {
1889 		ret = -ENODEV;
1890 		goto out;
1891 	}
1892 
1893 	ret = iwl_mei_send_start(iwl_mei_global_cldev);
1894 
1895 out:
1896 	mutex_unlock(&iwl_mei_mutex);
1897 	return ret ?: count;
1898 }
1899 
1900 static const struct file_operations iwl_mei_dbgfs_send_start_message_ops = {
1901 	.write = iwl_mei_dbgfs_send_start_message_write,
1902 	.open = simple_open,
1903 	.llseek = default_llseek,
1904 };
1905 
iwl_mei_dbgfs_req_ownership_write(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)1906 static ssize_t iwl_mei_dbgfs_req_ownership_write(struct file *file,
1907 						 const char __user *user_buf,
1908 						 size_t count, loff_t *ppos)
1909 {
1910 	iwl_mei_get_ownership();
1911 
1912 	return count;
1913 }
1914 
1915 static const struct file_operations iwl_mei_dbgfs_req_ownership_ops = {
1916 	.write = iwl_mei_dbgfs_req_ownership_write,
1917 	.open = simple_open,
1918 	.llseek = default_llseek,
1919 };
1920 
iwl_mei_dbgfs_register(struct iwl_mei * mei)1921 static void iwl_mei_dbgfs_register(struct iwl_mei *mei)
1922 {
1923 	mei->dbgfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
1924 
1925 	if (!mei->dbgfs_dir)
1926 		return;
1927 
1928 	debugfs_create_ulong("status", S_IRUSR,
1929 			     mei->dbgfs_dir, &iwl_mei_status);
1930 	debugfs_create_file("send_start_message", S_IWUSR, mei->dbgfs_dir,
1931 			    mei, &iwl_mei_dbgfs_send_start_message_ops);
1932 	debugfs_create_file("req_ownership", S_IWUSR, mei->dbgfs_dir,
1933 			    mei, &iwl_mei_dbgfs_req_ownership_ops);
1934 }
1935 
iwl_mei_dbgfs_unregister(struct iwl_mei * mei)1936 static void iwl_mei_dbgfs_unregister(struct iwl_mei *mei)
1937 {
1938 	debugfs_remove_recursive(mei->dbgfs_dir);
1939 	mei->dbgfs_dir = NULL;
1940 }
1941 
1942 #else
1943 
iwl_mei_dbgfs_register(struct iwl_mei * mei)1944 static void iwl_mei_dbgfs_register(struct iwl_mei *mei) {}
iwl_mei_dbgfs_unregister(struct iwl_mei * mei)1945 static void iwl_mei_dbgfs_unregister(struct iwl_mei *mei) {}
1946 
1947 #endif /* CONFIG_DEBUG_FS */
1948 
iwl_mei_ownership_dwork(struct work_struct * wk)1949 static void iwl_mei_ownership_dwork(struct work_struct *wk)
1950 {
1951 	iwl_mei_get_ownership();
1952 }
1953 
1954 #define ALLOC_SHARED_MEM_RETRY_MAX_NUM	3
1955 
1956 /*
1957  * iwl_mei_probe - the probe function called by the mei bus enumeration
1958  *
1959  * This allocates the data needed by iwlmei and sets a pointer to this data
1960  * into the mei_cl_device's drvdata.
1961  * It starts the SAP protocol by sending the SAP_ME_MSG_START without
1962  * waiting for the answer. The answer will be caught later by the Rx callback.
1963  */
iwl_mei_probe(struct mei_cl_device * cldev,const struct mei_cl_device_id * id)1964 static int iwl_mei_probe(struct mei_cl_device *cldev,
1965 			 const struct mei_cl_device_id *id)
1966 {
1967 	int alloc_retry = ALLOC_SHARED_MEM_RETRY_MAX_NUM;
1968 	struct iwl_mei *mei;
1969 	int ret;
1970 
1971 	mei = devm_kzalloc(&cldev->dev, sizeof(*mei), GFP_KERNEL);
1972 	if (!mei)
1973 		return -ENOMEM;
1974 
1975 	init_waitqueue_head(&mei->get_nvm_wq);
1976 	INIT_WORK(&mei->send_csa_msg_wk, iwl_mei_send_csa_msg_wk);
1977 	INIT_DELAYED_WORK(&mei->csa_throttle_end_wk,
1978 			  iwl_mei_csa_throttle_end_wk);
1979 	init_waitqueue_head(&mei->get_ownership_wq);
1980 	init_waitqueue_head(&mei->pldr_wq);
1981 	spin_lock_init(&mei->data_q_lock);
1982 	INIT_WORK(&mei->netdev_work, iwl_mei_netdev_work);
1983 	INIT_DELAYED_WORK(&mei->ownership_dwork, iwl_mei_ownership_dwork);
1984 
1985 	mei_cldev_set_drvdata(cldev, mei);
1986 	mei->cldev = cldev;
1987 	mei->device_down = true;
1988 
1989 	do {
1990 		ret = iwl_mei_alloc_shared_mem(cldev);
1991 		if (!ret)
1992 			break;
1993 		/*
1994 		 * The CSME firmware needs to boot the internal WLAN client.
1995 		 * This can take time in certain configurations (usually
1996 		 * upon resume and when the whole CSME firmware is shut down
1997 		 * during suspend).
1998 		 *
1999 		 * Wait a bit before retrying and hope we'll succeed next time.
2000 		 */
2001 
2002 		dev_dbg(&cldev->dev,
2003 			"Couldn't allocate the shared memory: %d, attempt %d / %d\n",
2004 			ret, alloc_retry, ALLOC_SHARED_MEM_RETRY_MAX_NUM);
2005 		msleep(100);
2006 		alloc_retry--;
2007 	} while (alloc_retry);
2008 
2009 	if (ret) {
2010 		dev_err(&cldev->dev, "Couldn't allocate the shared memory: %d\n",
2011 			ret);
2012 		goto free;
2013 	}
2014 
2015 	iwl_mei_init_shared_mem(mei);
2016 
2017 	ret = iwl_mei_enable(cldev);
2018 	if (ret)
2019 		goto free_shared_mem;
2020 
2021 	iwl_mei_dbgfs_register(mei);
2022 
2023 	/*
2024 	 * We now have a Rx function in place, start the SAP protocol
2025 	 * we expect to get the SAP_ME_MSG_START_OK response later on.
2026 	 */
2027 	mutex_lock(&iwl_mei_mutex);
2028 	ret = iwl_mei_send_start(cldev);
2029 	mutex_unlock(&iwl_mei_mutex);
2030 	if (ret)
2031 		goto debugfs_unregister;
2032 
2033 	/* must be last */
2034 	iwl_mei_global_cldev = cldev;
2035 
2036 	return 0;
2037 
2038 debugfs_unregister:
2039 	iwl_mei_dbgfs_unregister(mei);
2040 	mei_cldev_disable(cldev);
2041 free_shared_mem:
2042 	iwl_mei_free_shared_mem(cldev);
2043 free:
2044 	mei_cldev_set_drvdata(cldev, NULL);
2045 	devm_kfree(&cldev->dev, mei);
2046 
2047 	return ret;
2048 }
2049 
2050 #define SEND_SAP_MAX_WAIT_ITERATION 10
2051 #define IWLMEI_DEVICE_DOWN_WAIT_ITERATION 50
2052 
iwl_mei_remove(struct mei_cl_device * cldev)2053 static void iwl_mei_remove(struct mei_cl_device *cldev)
2054 {
2055 	struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
2056 	int i;
2057 
2058 	/*
2059 	 * We are being removed while the bus is active, it means we are
2060 	 * going to suspend/ shutdown, so the NIC will disappear.
2061 	 */
2062 	if (mei_cldev_enabled(cldev) && iwl_mei_cache.ops) {
2063 		unsigned int iter = IWLMEI_DEVICE_DOWN_WAIT_ITERATION;
2064 		bool down = false;
2065 
2066 		/*
2067 		 * In case of suspend, wait for the mac to stop and don't remove
2068 		 * the interface. This will allow the interface to come back
2069 		 * on resume.
2070 		 */
2071 		while (!down && iter--) {
2072 			mdelay(1);
2073 
2074 			mutex_lock(&iwl_mei_mutex);
2075 			down = mei->device_down;
2076 			mutex_unlock(&iwl_mei_mutex);
2077 		}
2078 
2079 		if (!down)
2080 			iwl_mei_cache.ops->nic_stolen(iwl_mei_cache.priv);
2081 	}
2082 
2083 	if (rcu_access_pointer(iwl_mei_cache.netdev)) {
2084 		struct net_device *dev;
2085 
2086 		/*
2087 		 * First take rtnl and only then the mutex to avoid an ABBA
2088 		 * with iwl_mei_set_netdev()
2089 		 */
2090 		rtnl_lock();
2091 		mutex_lock(&iwl_mei_mutex);
2092 
2093 		/*
2094 		 * If we are suspending and the wifi driver hasn't removed it's netdev
2095 		 * yet, do it now. In any case, don't change the cache.netdev pointer.
2096 		 */
2097 		dev = rcu_dereference_protected(iwl_mei_cache.netdev,
2098 						lockdep_is_held(&iwl_mei_mutex));
2099 
2100 		netdev_rx_handler_unregister(dev);
2101 		mutex_unlock(&iwl_mei_mutex);
2102 		rtnl_unlock();
2103 	}
2104 
2105 	mutex_lock(&iwl_mei_mutex);
2106 
2107 	/* Tell CSME that we are going down so that it won't access the
2108 	 * memory anymore, make sure this message goes through immediately.
2109 	 */
2110 	mei->csa_throttled = false;
2111 	iwl_mei_send_sap_msg(mei->cldev,
2112 			     SAP_MSG_NOTIF_HOST_GOES_DOWN);
2113 
2114 	for (i = 0; i < SEND_SAP_MAX_WAIT_ITERATION; i++) {
2115 		if (!iwl_mei_host_to_me_data_pending(mei))
2116 			break;
2117 
2118 		msleep(20);
2119 	}
2120 
2121 	/* If we couldn't make sure that CSME saw the HOST_GOES_DOWN
2122 	 * message, it means that it will probably keep reading memory
2123 	 * that we are going to unmap and free, expect IOMMU error
2124 	 * messages.
2125 	 */
2126 	if (i == SEND_SAP_MAX_WAIT_ITERATION)
2127 		dev_err(&mei->cldev->dev,
2128 			"Couldn't get ACK from CSME on HOST_GOES_DOWN message\n");
2129 
2130 	mutex_unlock(&iwl_mei_mutex);
2131 
2132 	/*
2133 	 * This looks strange, but this lock is taken here to make sure that
2134 	 * iwl_mei_add_data_to_ring called from the Tx path sees that we
2135 	 * clear the IWL_MEI_STATUS_SAP_CONNECTED bit.
2136 	 * Rx isn't a problem because the rx_handler can't be called after
2137 	 * having been unregistered.
2138 	 */
2139 	spin_lock_bh(&mei->data_q_lock);
2140 	clear_bit(IWL_MEI_STATUS_SAP_CONNECTED, &iwl_mei_status);
2141 	spin_unlock_bh(&mei->data_q_lock);
2142 
2143 	if (iwl_mei_cache.ops)
2144 		iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, false, false);
2145 
2146 	/*
2147 	 * mei_cldev_disable will return only after all the MEI Rx is done.
2148 	 * It must be called when iwl_mei_mutex is *not* held, since it waits
2149 	 * for our Rx handler to complete.
2150 	 * After it returns, no new Rx will start.
2151 	 */
2152 	mei_cldev_disable(cldev);
2153 
2154 	/*
2155 	 * Since the netdev was already removed and the netdev's removal
2156 	 * includes a call to synchronize_net() so that we know there won't be
2157 	 * any new Rx that will trigger the following workers.
2158 	 */
2159 	cancel_work_sync(&mei->send_csa_msg_wk);
2160 	cancel_delayed_work_sync(&mei->csa_throttle_end_wk);
2161 	cancel_work_sync(&mei->netdev_work);
2162 	cancel_delayed_work_sync(&mei->ownership_dwork);
2163 
2164 	/*
2165 	 * If someone waits for the ownership, let him know that we are going
2166 	 * down and that we are not connected anymore. He'll be able to take
2167 	 * the device.
2168 	 */
2169 	wake_up_all(&mei->get_ownership_wq);
2170 	wake_up_all(&mei->pldr_wq);
2171 
2172 	mutex_lock(&iwl_mei_mutex);
2173 
2174 	iwl_mei_global_cldev = NULL;
2175 
2176 	wake_up_all(&mei->get_nvm_wq);
2177 
2178 	iwl_mei_free_shared_mem(cldev);
2179 
2180 	iwl_mei_dbgfs_unregister(mei);
2181 
2182 	mei_cldev_set_drvdata(cldev, NULL);
2183 
2184 	kfree(mei->nvm);
2185 
2186 	kfree(rcu_access_pointer(mei->filters));
2187 
2188 	devm_kfree(&cldev->dev, mei);
2189 
2190 	mutex_unlock(&iwl_mei_mutex);
2191 }
2192 
2193 static const struct mei_cl_device_id iwl_mei_tbl[] = {
2194 	{
2195 		.name = KBUILD_MODNAME,
2196 		.uuid = MEI_WLAN_UUID,
2197 		.version = MEI_CL_VERSION_ANY,
2198 	},
2199 
2200 	/* required last entry */
2201 	{ }
2202 };
2203 
2204 /*
2205  * Do not export the device table because this module is loaded by
2206  * iwlwifi's dependency.
2207  */
2208 
2209 static struct mei_cl_driver iwl_mei_cl_driver = {
2210 	.id_table = iwl_mei_tbl,
2211 	.name = KBUILD_MODNAME,
2212 	.probe = iwl_mei_probe,
2213 	.remove = iwl_mei_remove,
2214 };
2215 
2216 module_mei_cl_driver(iwl_mei_cl_driver);
2217