xref: /linux/drivers/misc/mei/hw-me.c (revision 0ea5c948cb64bab5bc7a5516774eb8536f05aa0d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2003-2022, Intel Corporation. All rights reserved.
4  * Intel Management Engine Interface (Intel MEI) Linux driver
5  */
6 
7 #include <linux/pci.h>
8 
9 #include <linux/kthread.h>
10 #include <linux/interrupt.h>
11 #include <linux/pm_runtime.h>
12 #include <linux/sizes.h>
13 #include <linux/delay.h>
14 
15 #include "mei_dev.h"
16 #include "hbm.h"
17 
18 #include "hw-me.h"
19 #include "hw-me-regs.h"
20 
21 #include "mei-trace.h"
22 
23 /**
24  * mei_me_reg_read - Reads 32bit data from the mei device
25  *
26  * @hw: the me hardware structure
27  * @offset: offset from which to read the data
28  *
29  * Return: register value (u32)
30  */
mei_me_reg_read(const struct mei_me_hw * hw,unsigned long offset)31 static inline u32 mei_me_reg_read(const struct mei_me_hw *hw,
32 			       unsigned long offset)
33 {
34 	return ioread32(hw->mem_addr + offset);
35 }
36 
37 
38 /**
39  * mei_me_reg_write - Writes 32bit data to the mei device
40  *
41  * @hw: the me hardware structure
42  * @offset: offset from which to write the data
43  * @value: register value to write (u32)
44  */
mei_me_reg_write(const struct mei_me_hw * hw,unsigned long offset,u32 value)45 static inline void mei_me_reg_write(const struct mei_me_hw *hw,
46 				 unsigned long offset, u32 value)
47 {
48 	iowrite32(value, hw->mem_addr + offset);
49 }
50 
51 /**
52  * mei_me_mecbrw_read - Reads 32bit data from ME circular buffer
53  *  read window register
54  *
55  * @dev: the device structure
56  *
57  * Return: ME_CB_RW register value (u32)
58  */
mei_me_mecbrw_read(const struct mei_device * dev)59 static inline u32 mei_me_mecbrw_read(const struct mei_device *dev)
60 {
61 	return mei_me_reg_read(to_me_hw(dev), ME_CB_RW);
62 }
63 
64 /**
65  * mei_me_hcbww_write - write 32bit data to the host circular buffer
66  *
67  * @dev: the device structure
68  * @data: 32bit data to be written to the host circular buffer
69  */
mei_me_hcbww_write(struct mei_device * dev,u32 data)70 static inline void mei_me_hcbww_write(struct mei_device *dev, u32 data)
71 {
72 	mei_me_reg_write(to_me_hw(dev), H_CB_WW, data);
73 }
74 
75 /**
76  * mei_me_mecsr_read - Reads 32bit data from the ME CSR
77  *
78  * @dev: the device structure
79  *
80  * Return: ME_CSR_HA register value (u32)
81  */
mei_me_mecsr_read(const struct mei_device * dev)82 static inline u32 mei_me_mecsr_read(const struct mei_device *dev)
83 {
84 	u32 reg;
85 
86 	reg = mei_me_reg_read(to_me_hw(dev), ME_CSR_HA);
87 	trace_mei_reg_read(dev->dev, "ME_CSR_HA", ME_CSR_HA, reg);
88 
89 	return reg;
90 }
91 
92 /**
93  * mei_hcsr_read - Reads 32bit data from the host CSR
94  *
95  * @dev: the device structure
96  *
97  * Return: H_CSR register value (u32)
98  */
mei_hcsr_read(const struct mei_device * dev)99 static inline u32 mei_hcsr_read(const struct mei_device *dev)
100 {
101 	u32 reg;
102 
103 	reg = mei_me_reg_read(to_me_hw(dev), H_CSR);
104 	trace_mei_reg_read(dev->dev, "H_CSR", H_CSR, reg);
105 
106 	return reg;
107 }
108 
109 /**
110  * mei_hcsr_write - writes H_CSR register to the mei device
111  *
112  * @dev: the device structure
113  * @reg: new register value
114  */
mei_hcsr_write(struct mei_device * dev,u32 reg)115 static inline void mei_hcsr_write(struct mei_device *dev, u32 reg)
116 {
117 	trace_mei_reg_write(dev->dev, "H_CSR", H_CSR, reg);
118 	mei_me_reg_write(to_me_hw(dev), H_CSR, reg);
119 }
120 
121 /**
122  * mei_hcsr_set - writes H_CSR register to the mei device,
123  * and ignores the H_IS bit for it is write-one-to-zero.
124  *
125  * @dev: the device structure
126  * @reg: new register value
127  */
mei_hcsr_set(struct mei_device * dev,u32 reg)128 static inline void mei_hcsr_set(struct mei_device *dev, u32 reg)
129 {
130 	reg &= ~H_CSR_IS_MASK;
131 	mei_hcsr_write(dev, reg);
132 }
133 
134 /**
135  * mei_hcsr_set_hig - set host interrupt (set H_IG)
136  *
137  * @dev: the device structure
138  */
mei_hcsr_set_hig(struct mei_device * dev)139 static inline void mei_hcsr_set_hig(struct mei_device *dev)
140 {
141 	u32 hcsr;
142 
143 	hcsr = mei_hcsr_read(dev) | H_IG;
144 	mei_hcsr_set(dev, hcsr);
145 }
146 
147 /**
148  * mei_me_d0i3c_read - Reads 32bit data from the D0I3C register
149  *
150  * @dev: the device structure
151  *
152  * Return: H_D0I3C register value (u32)
153  */
mei_me_d0i3c_read(const struct mei_device * dev)154 static inline u32 mei_me_d0i3c_read(const struct mei_device *dev)
155 {
156 	u32 reg;
157 
158 	reg = mei_me_reg_read(to_me_hw(dev), H_D0I3C);
159 	trace_mei_reg_read(dev->dev, "H_D0I3C", H_D0I3C, reg);
160 
161 	return reg;
162 }
163 
164 /**
165  * mei_me_d0i3c_write - writes H_D0I3C register to device
166  *
167  * @dev: the device structure
168  * @reg: new register value
169  */
mei_me_d0i3c_write(struct mei_device * dev,u32 reg)170 static inline void mei_me_d0i3c_write(struct mei_device *dev, u32 reg)
171 {
172 	trace_mei_reg_write(dev->dev, "H_D0I3C", H_D0I3C, reg);
173 	mei_me_reg_write(to_me_hw(dev), H_D0I3C, reg);
174 }
175 
176 /**
177  * mei_me_trc_status - read trc status register
178  *
179  * @dev: mei device
180  * @trc: trc status register value
181  *
182  * Return: 0 on success, error otherwise
183  */
mei_me_trc_status(struct mei_device * dev,u32 * trc)184 static int mei_me_trc_status(struct mei_device *dev, u32 *trc)
185 {
186 	struct mei_me_hw *hw = to_me_hw(dev);
187 
188 	if (!hw->cfg->hw_trc_supported)
189 		return -EOPNOTSUPP;
190 
191 	*trc = mei_me_reg_read(hw, ME_TRC);
192 	trace_mei_reg_read(dev->dev, "ME_TRC", ME_TRC, *trc);
193 
194 	return 0;
195 }
196 
197 /**
198  * mei_me_fw_status - read fw status register from pci config space
199  *
200  * @dev: mei device
201  * @fw_status: fw status register values
202  *
203  * Return: 0 on success, error otherwise
204  */
mei_me_fw_status(struct mei_device * dev,struct mei_fw_status * fw_status)205 static int mei_me_fw_status(struct mei_device *dev,
206 			    struct mei_fw_status *fw_status)
207 {
208 	struct mei_me_hw *hw = to_me_hw(dev);
209 	const struct mei_fw_status *fw_src = &hw->cfg->fw_status;
210 	int ret;
211 	int i;
212 
213 	if (!fw_status || !hw->read_fws)
214 		return -EINVAL;
215 
216 	fw_status->count = fw_src->count;
217 	for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) {
218 		ret = hw->read_fws(dev, fw_src->status[i],
219 				   &fw_status->status[i]);
220 		trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_X",
221 				       fw_src->status[i],
222 				       fw_status->status[i]);
223 		if (ret)
224 			return ret;
225 	}
226 
227 	return 0;
228 }
229 
230 /**
231  * mei_me_hw_config - configure hw dependent settings
232  *
233  * @dev: mei device
234  *
235  * Return:
236  *  * -EINVAL when read_fws is not set
237  *  * 0 on success
238  *
239  */
mei_me_hw_config(struct mei_device * dev)240 static int mei_me_hw_config(struct mei_device *dev)
241 {
242 	struct mei_me_hw *hw = to_me_hw(dev);
243 	u32 hcsr, reg;
244 
245 	if (WARN_ON(!hw->read_fws))
246 		return -EINVAL;
247 
248 	/* Doesn't change in runtime */
249 	hcsr = mei_hcsr_read(dev);
250 	hw->hbuf_depth = (hcsr & H_CBD) >> 24;
251 
252 	reg = 0;
253 	hw->read_fws(dev, PCI_CFG_HFS_1, &reg);
254 	trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg);
255 	hw->d0i3_supported =
256 		((reg & PCI_CFG_HFS_1_D0I3_MSK) == PCI_CFG_HFS_1_D0I3_MSK);
257 
258 	hw->pg_state = MEI_PG_OFF;
259 	if (hw->d0i3_supported) {
260 		reg = mei_me_d0i3c_read(dev);
261 		if (reg & H_D0I3C_I3)
262 			hw->pg_state = MEI_PG_ON;
263 	}
264 
265 	return 0;
266 }
267 
268 /**
269  * mei_me_pg_state  - translate internal pg state
270  *   to the mei power gating state
271  *
272  * @dev:  mei device
273  *
274  * Return: MEI_PG_OFF if aliveness is on and MEI_PG_ON otherwise
275  */
mei_me_pg_state(struct mei_device * dev)276 static inline enum mei_pg_state mei_me_pg_state(struct mei_device *dev)
277 {
278 	struct mei_me_hw *hw = to_me_hw(dev);
279 
280 	return hw->pg_state;
281 }
282 
me_intr_src(u32 hcsr)283 static inline u32 me_intr_src(u32 hcsr)
284 {
285 	return hcsr & H_CSR_IS_MASK;
286 }
287 
288 /**
289  * me_intr_disable - disables mei device interrupts
290  *      using supplied hcsr register value.
291  *
292  * @dev: the device structure
293  * @hcsr: supplied hcsr register value
294  */
me_intr_disable(struct mei_device * dev,u32 hcsr)295 static inline void me_intr_disable(struct mei_device *dev, u32 hcsr)
296 {
297 	hcsr &= ~H_CSR_IE_MASK;
298 	mei_hcsr_set(dev, hcsr);
299 }
300 
301 /**
302  * me_intr_clear - clear and stop interrupts
303  *
304  * @dev: the device structure
305  * @hcsr: supplied hcsr register value
306  */
me_intr_clear(struct mei_device * dev,u32 hcsr)307 static inline void me_intr_clear(struct mei_device *dev, u32 hcsr)
308 {
309 	if (me_intr_src(hcsr))
310 		mei_hcsr_write(dev, hcsr);
311 }
312 
313 /**
314  * mei_me_intr_clear - clear and stop interrupts
315  *
316  * @dev: the device structure
317  */
mei_me_intr_clear(struct mei_device * dev)318 static void mei_me_intr_clear(struct mei_device *dev)
319 {
320 	u32 hcsr = mei_hcsr_read(dev);
321 
322 	me_intr_clear(dev, hcsr);
323 }
324 /**
325  * mei_me_intr_enable - enables mei device interrupts
326  *
327  * @dev: the device structure
328  */
mei_me_intr_enable(struct mei_device * dev)329 static void mei_me_intr_enable(struct mei_device *dev)
330 {
331 	u32 hcsr;
332 
333 	if (mei_me_hw_use_polling(to_me_hw(dev)))
334 		return;
335 
336 	hcsr = mei_hcsr_read(dev) | H_CSR_IE_MASK;
337 	mei_hcsr_set(dev, hcsr);
338 }
339 
340 /**
341  * mei_me_intr_disable - disables mei device interrupts
342  *
343  * @dev: the device structure
344  */
mei_me_intr_disable(struct mei_device * dev)345 static void mei_me_intr_disable(struct mei_device *dev)
346 {
347 	u32 hcsr = mei_hcsr_read(dev);
348 
349 	me_intr_disable(dev, hcsr);
350 }
351 
352 /**
353  * mei_me_synchronize_irq - wait for pending IRQ handlers
354  *
355  * @dev: the device structure
356  */
mei_me_synchronize_irq(struct mei_device * dev)357 static void mei_me_synchronize_irq(struct mei_device *dev)
358 {
359 	struct mei_me_hw *hw = to_me_hw(dev);
360 
361 	if (mei_me_hw_use_polling(hw))
362 		return;
363 
364 	synchronize_irq(hw->irq);
365 }
366 
367 /**
368  * mei_me_hw_reset_release - release device from the reset
369  *
370  * @dev: the device structure
371  */
mei_me_hw_reset_release(struct mei_device * dev)372 static void mei_me_hw_reset_release(struct mei_device *dev)
373 {
374 	u32 hcsr = mei_hcsr_read(dev);
375 
376 	hcsr |= H_IG;
377 	hcsr &= ~H_RST;
378 	mei_hcsr_set(dev, hcsr);
379 }
380 
381 /**
382  * mei_me_host_set_ready - enable device
383  *
384  * @dev: mei device
385  */
mei_me_host_set_ready(struct mei_device * dev)386 static void mei_me_host_set_ready(struct mei_device *dev)
387 {
388 	u32 hcsr = mei_hcsr_read(dev);
389 
390 	if (!mei_me_hw_use_polling(to_me_hw(dev)))
391 		hcsr |= H_CSR_IE_MASK;
392 
393 	hcsr |=  H_IG | H_RDY;
394 	mei_hcsr_set(dev, hcsr);
395 }
396 
397 /**
398  * mei_me_host_is_ready - check whether the host has turned ready
399  *
400  * @dev: mei device
401  * Return: bool
402  */
mei_me_host_is_ready(struct mei_device * dev)403 static bool mei_me_host_is_ready(struct mei_device *dev)
404 {
405 	u32 hcsr = mei_hcsr_read(dev);
406 
407 	return (hcsr & H_RDY) == H_RDY;
408 }
409 
410 /**
411  * mei_me_hw_is_ready - check whether the me(hw) has turned ready
412  *
413  * @dev: mei device
414  * Return: bool
415  */
mei_me_hw_is_ready(struct mei_device * dev)416 static bool mei_me_hw_is_ready(struct mei_device *dev)
417 {
418 	u32 mecsr = mei_me_mecsr_read(dev);
419 
420 	return (mecsr & ME_RDY_HRA) == ME_RDY_HRA;
421 }
422 
423 /**
424  * mei_me_hw_is_resetting - check whether the me(hw) is in reset
425  *
426  * @dev: mei device
427  * Return: bool
428  */
mei_me_hw_is_resetting(struct mei_device * dev)429 static bool mei_me_hw_is_resetting(struct mei_device *dev)
430 {
431 	u32 mecsr = mei_me_mecsr_read(dev);
432 
433 	return (mecsr & ME_RST_HRA) == ME_RST_HRA;
434 }
435 
436 /**
437  * mei_gsc_pxp_check - check for gsc firmware entering pxp mode
438  *
439  * @dev: the device structure
440  */
mei_gsc_pxp_check(struct mei_device * dev)441 static void mei_gsc_pxp_check(struct mei_device *dev)
442 {
443 	struct mei_me_hw *hw = to_me_hw(dev);
444 	u32 fwsts5 = 0;
445 
446 	if (!kind_is_gsc(dev) && !kind_is_gscfi(dev))
447 		return;
448 
449 	hw->read_fws(dev, PCI_CFG_HFS_5, &fwsts5);
450 	trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_5", PCI_CFG_HFS_5, fwsts5);
451 
452 	if ((fwsts5 & GSC_CFG_HFS_5_BOOT_TYPE_MSK) == GSC_CFG_HFS_5_BOOT_TYPE_PXP) {
453 		if (dev->gsc_reset_to_pxp == MEI_DEV_RESET_TO_PXP_DEFAULT)
454 			dev->gsc_reset_to_pxp = MEI_DEV_RESET_TO_PXP_PERFORMED;
455 	} else {
456 		dev->gsc_reset_to_pxp = MEI_DEV_RESET_TO_PXP_DEFAULT;
457 	}
458 
459 	if (dev->pxp_mode == MEI_DEV_PXP_DEFAULT)
460 		return;
461 
462 	if ((fwsts5 & GSC_CFG_HFS_5_BOOT_TYPE_MSK) == GSC_CFG_HFS_5_BOOT_TYPE_PXP) {
463 		dev_dbg(dev->dev, "pxp mode is ready 0x%08x\n", fwsts5);
464 		dev->pxp_mode = MEI_DEV_PXP_READY;
465 	} else {
466 		dev_dbg(dev->dev, "pxp mode is not ready 0x%08x\n", fwsts5);
467 	}
468 }
469 
470 /**
471  * mei_me_hw_ready_wait - wait until the me(hw) has turned ready
472  *  or timeout is reached
473  *
474  * @dev: mei device
475  * Return: 0 on success, error otherwise
476  */
mei_me_hw_ready_wait(struct mei_device * dev)477 static int mei_me_hw_ready_wait(struct mei_device *dev)
478 {
479 	mutex_unlock(&dev->device_lock);
480 	wait_event_timeout(dev->wait_hw_ready,
481 			dev->recvd_hw_ready,
482 			dev->timeouts.hw_ready);
483 	mutex_lock(&dev->device_lock);
484 	if (!dev->recvd_hw_ready) {
485 		dev_err(dev->dev, "wait hw ready failed\n");
486 		return -ETIME;
487 	}
488 
489 	mei_gsc_pxp_check(dev);
490 
491 	mei_me_hw_reset_release(dev);
492 	dev->recvd_hw_ready = false;
493 	return 0;
494 }
495 
496 /**
497  * mei_me_check_fw_reset - check for the firmware reset error and exception conditions
498  *
499  * @dev: mei device
500  */
mei_me_check_fw_reset(struct mei_device * dev)501 static void mei_me_check_fw_reset(struct mei_device *dev)
502 {
503 	struct mei_fw_status fw_status;
504 	char fw_sts_str[MEI_FW_STATUS_STR_SZ] = {0};
505 	int ret;
506 	u32 fw_pm_event = 0;
507 
508 	if (!dev->saved_fw_status_flag)
509 		goto end;
510 
511 	if (dev->gsc_reset_to_pxp == MEI_DEV_RESET_TO_PXP_PERFORMED) {
512 		ret = mei_fw_status(dev, &fw_status);
513 		if (!ret) {
514 			fw_pm_event = fw_status.status[1] & PCI_CFG_HFS_2_PM_EVENT_MASK;
515 			if (fw_pm_event != PCI_CFG_HFS_2_PM_CMOFF_TO_CMX_ERROR &&
516 			    fw_pm_event != PCI_CFG_HFS_2_PM_CM_RESET_ERROR)
517 				goto end;
518 		} else {
519 			dev_err(dev->dev, "failed to read firmware status: %d\n", ret);
520 		}
521 	}
522 
523 	mei_fw_status2str(&dev->saved_fw_status, fw_sts_str, sizeof(fw_sts_str));
524 	dev_warn(dev->dev, "unexpected reset: fw_pm_event = 0x%x, dev_state = %u fw status = %s\n",
525 		 fw_pm_event, dev->saved_dev_state, fw_sts_str);
526 
527 end:
528 	if (dev->gsc_reset_to_pxp == MEI_DEV_RESET_TO_PXP_PERFORMED)
529 		dev->gsc_reset_to_pxp = MEI_DEV_RESET_TO_PXP_DONE;
530 	dev->saved_fw_status_flag = false;
531 }
532 
533 /**
534  * mei_me_hw_start - hw start routine
535  *
536  * @dev: mei device
537  * Return: 0 on success, error otherwise
538  */
mei_me_hw_start(struct mei_device * dev)539 static int mei_me_hw_start(struct mei_device *dev)
540 {
541 	int ret = mei_me_hw_ready_wait(dev);
542 
543 	if (kind_is_gsc(dev) || kind_is_gscfi(dev))
544 		mei_me_check_fw_reset(dev);
545 	if (ret)
546 		return ret;
547 	dev_dbg(dev->dev, "hw is ready\n");
548 
549 	mei_me_host_set_ready(dev);
550 	return ret;
551 }
552 
553 
554 /**
555  * mei_hbuf_filled_slots - gets number of device filled buffer slots
556  *
557  * @dev: the device structure
558  *
559  * Return: number of filled slots
560  */
mei_hbuf_filled_slots(struct mei_device * dev)561 static unsigned char mei_hbuf_filled_slots(struct mei_device *dev)
562 {
563 	u32 hcsr;
564 	char read_ptr, write_ptr;
565 
566 	hcsr = mei_hcsr_read(dev);
567 
568 	read_ptr = (char) ((hcsr & H_CBRP) >> 8);
569 	write_ptr = (char) ((hcsr & H_CBWP) >> 16);
570 
571 	return (unsigned char) (write_ptr - read_ptr);
572 }
573 
574 /**
575  * mei_me_hbuf_is_empty - checks if host buffer is empty.
576  *
577  * @dev: the device structure
578  *
579  * Return: true if empty, false - otherwise.
580  */
mei_me_hbuf_is_empty(struct mei_device * dev)581 static bool mei_me_hbuf_is_empty(struct mei_device *dev)
582 {
583 	return mei_hbuf_filled_slots(dev) == 0;
584 }
585 
586 /**
587  * mei_me_hbuf_empty_slots - counts write empty slots.
588  *
589  * @dev: the device structure
590  *
591  * Return: -EOVERFLOW if overflow, otherwise empty slots count
592  */
mei_me_hbuf_empty_slots(struct mei_device * dev)593 static int mei_me_hbuf_empty_slots(struct mei_device *dev)
594 {
595 	struct mei_me_hw *hw = to_me_hw(dev);
596 	unsigned char filled_slots, empty_slots;
597 
598 	filled_slots = mei_hbuf_filled_slots(dev);
599 	empty_slots = hw->hbuf_depth - filled_slots;
600 
601 	/* check for overflow */
602 	if (filled_slots > hw->hbuf_depth)
603 		return -EOVERFLOW;
604 
605 	return empty_slots;
606 }
607 
608 /**
609  * mei_me_hbuf_depth - returns depth of the hw buffer.
610  *
611  * @dev: the device structure
612  *
613  * Return: size of hw buffer in slots
614  */
mei_me_hbuf_depth(const struct mei_device * dev)615 static u32 mei_me_hbuf_depth(const struct mei_device *dev)
616 {
617 	struct mei_me_hw *hw = to_me_hw(dev);
618 
619 	return hw->hbuf_depth;
620 }
621 
622 /**
623  * mei_me_hbuf_write - writes a message to host hw buffer.
624  *
625  * @dev: the device structure
626  * @hdr: header of message
627  * @hdr_len: header length in bytes: must be multiplication of a slot (4bytes)
628  * @data: payload
629  * @data_len: payload length in bytes
630  *
631  * Return: 0 if success, < 0 - otherwise.
632  */
mei_me_hbuf_write(struct mei_device * dev,const void * hdr,size_t hdr_len,const void * data,size_t data_len)633 static int mei_me_hbuf_write(struct mei_device *dev,
634 			     const void *hdr, size_t hdr_len,
635 			     const void *data, size_t data_len)
636 {
637 	unsigned long rem;
638 	unsigned long i;
639 	const u32 *reg_buf;
640 	u32 dw_cnt;
641 	int empty_slots;
642 
643 	if (WARN_ON(!hdr || hdr_len & 0x3))
644 		return -EINVAL;
645 
646 	if (!data && data_len) {
647 		dev_err(dev->dev, "wrong parameters null data with data_len = %zu\n", data_len);
648 		return -EINVAL;
649 	}
650 
651 	dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM((struct mei_msg_hdr *)hdr));
652 
653 	empty_slots = mei_hbuf_empty_slots(dev);
654 	dev_dbg(dev->dev, "empty slots = %d.\n", empty_slots);
655 
656 	if (empty_slots < 0)
657 		return -EOVERFLOW;
658 
659 	dw_cnt = mei_data2slots(hdr_len + data_len);
660 	if (dw_cnt > (u32)empty_slots)
661 		return -EMSGSIZE;
662 
663 	reg_buf = hdr;
664 	for (i = 0; i < hdr_len / MEI_SLOT_SIZE; i++)
665 		mei_me_hcbww_write(dev, reg_buf[i]);
666 
667 	reg_buf = data;
668 	for (i = 0; i < data_len / MEI_SLOT_SIZE; i++)
669 		mei_me_hcbww_write(dev, reg_buf[i]);
670 
671 	rem = data_len & 0x3;
672 	if (rem > 0) {
673 		u32 reg = 0;
674 
675 		memcpy(&reg, (const u8 *)data + data_len - rem, rem);
676 		mei_me_hcbww_write(dev, reg);
677 	}
678 
679 	mei_hcsr_set_hig(dev);
680 	if (!mei_me_hw_is_ready(dev))
681 		return -EIO;
682 
683 	return 0;
684 }
685 
686 /**
687  * mei_me_count_full_read_slots - counts read full slots.
688  *
689  * @dev: the device structure
690  *
691  * Return: -EOVERFLOW if overflow, otherwise filled slots count
692  */
mei_me_count_full_read_slots(struct mei_device * dev)693 static int mei_me_count_full_read_slots(struct mei_device *dev)
694 {
695 	u32 me_csr;
696 	char read_ptr, write_ptr;
697 	unsigned char buffer_depth, filled_slots;
698 
699 	me_csr = mei_me_mecsr_read(dev);
700 	buffer_depth = (unsigned char)((me_csr & ME_CBD_HRA) >> 24);
701 	read_ptr = (char) ((me_csr & ME_CBRP_HRA) >> 8);
702 	write_ptr = (char) ((me_csr & ME_CBWP_HRA) >> 16);
703 	filled_slots = (unsigned char) (write_ptr - read_ptr);
704 
705 	/* check for overflow */
706 	if (filled_slots > buffer_depth)
707 		return -EOVERFLOW;
708 
709 	dev_dbg(dev->dev, "filled_slots =%08x\n", filled_slots);
710 	return (int)filled_slots;
711 }
712 
713 /**
714  * mei_me_read_slots - reads a message from mei device.
715  *
716  * @dev: the device structure
717  * @buffer: message buffer will be written
718  * @buffer_length: message size will be read
719  *
720  * Return: always 0
721  */
mei_me_read_slots(struct mei_device * dev,unsigned char * buffer,unsigned long buffer_length)722 static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer,
723 			     unsigned long buffer_length)
724 {
725 	u32 *reg_buf = (u32 *)buffer;
726 
727 	for (; buffer_length >= MEI_SLOT_SIZE; buffer_length -= MEI_SLOT_SIZE)
728 		*reg_buf++ = mei_me_mecbrw_read(dev);
729 
730 	if (buffer_length > 0) {
731 		u32 reg = mei_me_mecbrw_read(dev);
732 
733 		memcpy(reg_buf, &reg, buffer_length);
734 	}
735 
736 	mei_hcsr_set_hig(dev);
737 	return 0;
738 }
739 
740 /**
741  * mei_me_pg_set - write pg enter register
742  *
743  * @dev: the device structure
744  */
mei_me_pg_set(struct mei_device * dev)745 static void mei_me_pg_set(struct mei_device *dev)
746 {
747 	struct mei_me_hw *hw = to_me_hw(dev);
748 	u32 reg;
749 
750 	reg = mei_me_reg_read(hw, H_HPG_CSR);
751 	trace_mei_reg_read(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
752 
753 	reg |= H_HPG_CSR_PGI;
754 
755 	trace_mei_reg_write(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
756 	mei_me_reg_write(hw, H_HPG_CSR, reg);
757 }
758 
759 /**
760  * mei_me_pg_unset - write pg exit register
761  *
762  * @dev: the device structure
763  */
mei_me_pg_unset(struct mei_device * dev)764 static void mei_me_pg_unset(struct mei_device *dev)
765 {
766 	struct mei_me_hw *hw = to_me_hw(dev);
767 	u32 reg;
768 
769 	reg = mei_me_reg_read(hw, H_HPG_CSR);
770 	trace_mei_reg_read(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
771 
772 	WARN(!(reg & H_HPG_CSR_PGI), "PGI is not set\n");
773 
774 	reg |= H_HPG_CSR_PGIHEXR;
775 
776 	trace_mei_reg_write(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
777 	mei_me_reg_write(hw, H_HPG_CSR, reg);
778 }
779 
780 /**
781  * mei_me_pg_legacy_enter_sync - perform legacy pg entry procedure
782  *
783  * @dev: the device structure
784  *
785  * Return: 0 on success an error code otherwise
786  */
mei_me_pg_legacy_enter_sync(struct mei_device * dev)787 static int mei_me_pg_legacy_enter_sync(struct mei_device *dev)
788 {
789 	struct mei_me_hw *hw = to_me_hw(dev);
790 	int ret;
791 
792 	dev->pg_event = MEI_PG_EVENT_WAIT;
793 
794 	ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD);
795 	if (ret)
796 		return ret;
797 
798 	mutex_unlock(&dev->device_lock);
799 	wait_event_timeout(dev->wait_pg,
800 		dev->pg_event == MEI_PG_EVENT_RECEIVED,
801 		dev->timeouts.pgi);
802 	mutex_lock(&dev->device_lock);
803 
804 	if (dev->pg_event == MEI_PG_EVENT_RECEIVED) {
805 		mei_me_pg_set(dev);
806 		ret = 0;
807 	} else {
808 		ret = -ETIME;
809 	}
810 
811 	dev->pg_event = MEI_PG_EVENT_IDLE;
812 	hw->pg_state = MEI_PG_ON;
813 
814 	return ret;
815 }
816 
817 /**
818  * mei_me_pg_legacy_exit_sync - perform legacy pg exit procedure
819  *
820  * @dev: the device structure
821  *
822  * Return: 0 on success an error code otherwise
823  */
mei_me_pg_legacy_exit_sync(struct mei_device * dev)824 static int mei_me_pg_legacy_exit_sync(struct mei_device *dev)
825 {
826 	struct mei_me_hw *hw = to_me_hw(dev);
827 	int ret;
828 
829 	if (dev->pg_event == MEI_PG_EVENT_RECEIVED)
830 		goto reply;
831 
832 	dev->pg_event = MEI_PG_EVENT_WAIT;
833 
834 	mei_me_pg_unset(dev);
835 
836 	mutex_unlock(&dev->device_lock);
837 	wait_event_timeout(dev->wait_pg,
838 		dev->pg_event == MEI_PG_EVENT_RECEIVED,
839 		dev->timeouts.pgi);
840 	mutex_lock(&dev->device_lock);
841 
842 reply:
843 	if (dev->pg_event != MEI_PG_EVENT_RECEIVED) {
844 		ret = -ETIME;
845 		goto out;
846 	}
847 
848 	dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
849 	ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_EXIT_RES_CMD);
850 	if (ret)
851 		return ret;
852 
853 	mutex_unlock(&dev->device_lock);
854 	wait_event_timeout(dev->wait_pg,
855 		dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED,
856 		dev->timeouts.pgi);
857 	mutex_lock(&dev->device_lock);
858 
859 	if (dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED)
860 		ret = 0;
861 	else
862 		ret = -ETIME;
863 
864 out:
865 	dev->pg_event = MEI_PG_EVENT_IDLE;
866 	hw->pg_state = MEI_PG_OFF;
867 
868 	return ret;
869 }
870 
871 /**
872  * mei_me_pg_in_transition - is device now in pg transition
873  *
874  * @dev: the device structure
875  *
876  * Return: true if in pg transition, false otherwise
877  */
mei_me_pg_in_transition(struct mei_device * dev)878 static bool mei_me_pg_in_transition(struct mei_device *dev)
879 {
880 	return dev->pg_event >= MEI_PG_EVENT_WAIT &&
881 	       dev->pg_event <= MEI_PG_EVENT_INTR_WAIT;
882 }
883 
884 /**
885  * mei_me_pg_is_enabled - detect if PG is supported by HW
886  *
887  * @dev: the device structure
888  *
889  * Return: true is pg supported, false otherwise
890  */
mei_me_pg_is_enabled(struct mei_device * dev)891 static bool mei_me_pg_is_enabled(struct mei_device *dev)
892 {
893 	struct mei_me_hw *hw = to_me_hw(dev);
894 	u32 reg = mei_me_mecsr_read(dev);
895 
896 	if (hw->d0i3_supported)
897 		return true;
898 
899 	if ((reg & ME_PGIC_HRA) == 0)
900 		goto notsupported;
901 
902 	if (!dev->hbm_f_pg_supported)
903 		goto notsupported;
904 
905 	return true;
906 
907 notsupported:
908 	dev_dbg(dev->dev, "pg: not supported: d0i3 = %d HGP = %d hbm version %d.%d ?= %d.%d\n",
909 		hw->d0i3_supported,
910 		!!(reg & ME_PGIC_HRA),
911 		dev->version.major_version,
912 		dev->version.minor_version,
913 		HBM_MAJOR_VERSION_PGI,
914 		HBM_MINOR_VERSION_PGI);
915 
916 	return false;
917 }
918 
919 /**
920  * mei_me_d0i3_set - write d0i3 register bit on mei device.
921  *
922  * @dev: the device structure
923  * @intr: ask for interrupt
924  *
925  * Return: D0I3C register value
926  */
mei_me_d0i3_set(struct mei_device * dev,bool intr)927 static u32 mei_me_d0i3_set(struct mei_device *dev, bool intr)
928 {
929 	u32 reg = mei_me_d0i3c_read(dev);
930 
931 	reg |= H_D0I3C_I3;
932 	if (intr)
933 		reg |= H_D0I3C_IR;
934 	else
935 		reg &= ~H_D0I3C_IR;
936 	mei_me_d0i3c_write(dev, reg);
937 	/* read it to ensure HW consistency */
938 	reg = mei_me_d0i3c_read(dev);
939 	return reg;
940 }
941 
942 /**
943  * mei_me_d0i3_unset - clean d0i3 register bit on mei device.
944  *
945  * @dev: the device structure
946  *
947  * Return: D0I3C register value
948  */
mei_me_d0i3_unset(struct mei_device * dev)949 static u32 mei_me_d0i3_unset(struct mei_device *dev)
950 {
951 	u32 reg = mei_me_d0i3c_read(dev);
952 
953 	reg &= ~H_D0I3C_I3;
954 	reg |= H_D0I3C_IR;
955 	mei_me_d0i3c_write(dev, reg);
956 	/* read it to ensure HW consistency */
957 	reg = mei_me_d0i3c_read(dev);
958 	return reg;
959 }
960 
961 /**
962  * mei_me_d0i3_enter_sync - perform d0i3 entry procedure
963  *
964  * @dev: the device structure
965  *
966  * Return: 0 on success an error code otherwise
967  */
mei_me_d0i3_enter_sync(struct mei_device * dev)968 static int mei_me_d0i3_enter_sync(struct mei_device *dev)
969 {
970 	struct mei_me_hw *hw = to_me_hw(dev);
971 	int ret;
972 	u32 reg;
973 
974 	reg = mei_me_d0i3c_read(dev);
975 	if (reg & H_D0I3C_I3) {
976 		/* we are in d0i3, nothing to do */
977 		dev_dbg(dev->dev, "d0i3 set not needed\n");
978 		ret = 0;
979 		goto on;
980 	}
981 
982 	/* PGI entry procedure */
983 	dev->pg_event = MEI_PG_EVENT_WAIT;
984 
985 	ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD);
986 	if (ret)
987 		/* FIXME: should we reset here? */
988 		goto out;
989 
990 	mutex_unlock(&dev->device_lock);
991 	wait_event_timeout(dev->wait_pg,
992 		dev->pg_event == MEI_PG_EVENT_RECEIVED,
993 		dev->timeouts.pgi);
994 	mutex_lock(&dev->device_lock);
995 
996 	if (dev->pg_event != MEI_PG_EVENT_RECEIVED) {
997 		ret = -ETIME;
998 		goto out;
999 	}
1000 	/* end PGI entry procedure */
1001 
1002 	dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
1003 
1004 	reg = mei_me_d0i3_set(dev, true);
1005 	if (!(reg & H_D0I3C_CIP)) {
1006 		dev_dbg(dev->dev, "d0i3 enter wait not needed\n");
1007 		ret = 0;
1008 		goto on;
1009 	}
1010 
1011 	mutex_unlock(&dev->device_lock);
1012 	wait_event_timeout(dev->wait_pg,
1013 		dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED,
1014 		dev->timeouts.d0i3);
1015 	mutex_lock(&dev->device_lock);
1016 
1017 	if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) {
1018 		reg = mei_me_d0i3c_read(dev);
1019 		if (!(reg & H_D0I3C_I3)) {
1020 			ret = -ETIME;
1021 			goto out;
1022 		}
1023 	}
1024 
1025 	ret = 0;
1026 on:
1027 	hw->pg_state = MEI_PG_ON;
1028 out:
1029 	dev->pg_event = MEI_PG_EVENT_IDLE;
1030 	dev_dbg(dev->dev, "d0i3 enter ret = %d\n", ret);
1031 	return ret;
1032 }
1033 
1034 /**
1035  * mei_me_d0i3_enter - perform d0i3 entry procedure
1036  *   no hbm PG handshake
1037  *   no waiting for confirmation; runs with interrupts
1038  *   disabled
1039  *
1040  * @dev: the device structure
1041  *
1042  * Return: 0 on success an error code otherwise
1043  */
mei_me_d0i3_enter(struct mei_device * dev)1044 static int mei_me_d0i3_enter(struct mei_device *dev)
1045 {
1046 	struct mei_me_hw *hw = to_me_hw(dev);
1047 	u32 reg;
1048 
1049 	reg = mei_me_d0i3c_read(dev);
1050 	if (reg & H_D0I3C_I3) {
1051 		/* we are in d0i3, nothing to do */
1052 		dev_dbg(dev->dev, "already d0i3 : set not needed\n");
1053 		goto on;
1054 	}
1055 
1056 	mei_me_d0i3_set(dev, false);
1057 on:
1058 	hw->pg_state = MEI_PG_ON;
1059 	dev->pg_event = MEI_PG_EVENT_IDLE;
1060 	dev_dbg(dev->dev, "d0i3 enter\n");
1061 	return 0;
1062 }
1063 
1064 /**
1065  * mei_me_d0i3_exit_sync - perform d0i3 exit procedure
1066  *
1067  * @dev: the device structure
1068  *
1069  * Return: 0 on success an error code otherwise
1070  */
mei_me_d0i3_exit_sync(struct mei_device * dev)1071 static int mei_me_d0i3_exit_sync(struct mei_device *dev)
1072 {
1073 	struct mei_me_hw *hw = to_me_hw(dev);
1074 	int ret;
1075 	u32 reg;
1076 
1077 	dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
1078 
1079 	reg = mei_me_d0i3c_read(dev);
1080 	if (!(reg & H_D0I3C_I3)) {
1081 		/* we are not in d0i3, nothing to do */
1082 		dev_dbg(dev->dev, "d0i3 exit not needed\n");
1083 		ret = 0;
1084 		goto off;
1085 	}
1086 
1087 	reg = mei_me_d0i3_unset(dev);
1088 	if (!(reg & H_D0I3C_CIP)) {
1089 		dev_dbg(dev->dev, "d0i3 exit wait not needed\n");
1090 		ret = 0;
1091 		goto off;
1092 	}
1093 
1094 	mutex_unlock(&dev->device_lock);
1095 	wait_event_timeout(dev->wait_pg,
1096 		dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED,
1097 		dev->timeouts.d0i3);
1098 	mutex_lock(&dev->device_lock);
1099 
1100 	if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) {
1101 		reg = mei_me_d0i3c_read(dev);
1102 		if (reg & H_D0I3C_I3) {
1103 			ret = -ETIME;
1104 			goto out;
1105 		}
1106 	}
1107 
1108 	ret = 0;
1109 off:
1110 	hw->pg_state = MEI_PG_OFF;
1111 out:
1112 	dev->pg_event = MEI_PG_EVENT_IDLE;
1113 
1114 	dev_dbg(dev->dev, "d0i3 exit ret = %d\n", ret);
1115 	return ret;
1116 }
1117 
1118 /**
1119  * mei_me_pg_legacy_intr - perform legacy pg processing
1120  *			   in interrupt thread handler
1121  *
1122  * @dev: the device structure
1123  */
mei_me_pg_legacy_intr(struct mei_device * dev)1124 static void mei_me_pg_legacy_intr(struct mei_device *dev)
1125 {
1126 	struct mei_me_hw *hw = to_me_hw(dev);
1127 
1128 	if (dev->pg_event != MEI_PG_EVENT_INTR_WAIT)
1129 		return;
1130 
1131 	dev->pg_event = MEI_PG_EVENT_INTR_RECEIVED;
1132 	hw->pg_state = MEI_PG_OFF;
1133 	if (waitqueue_active(&dev->wait_pg))
1134 		wake_up(&dev->wait_pg);
1135 }
1136 
1137 /**
1138  * mei_me_d0i3_intr - perform d0i3 processing in interrupt thread handler
1139  *
1140  * @dev: the device structure
1141  * @intr_source: interrupt source
1142  */
mei_me_d0i3_intr(struct mei_device * dev,u32 intr_source)1143 static void mei_me_d0i3_intr(struct mei_device *dev, u32 intr_source)
1144 {
1145 	struct mei_me_hw *hw = to_me_hw(dev);
1146 
1147 	if (dev->pg_event == MEI_PG_EVENT_INTR_WAIT &&
1148 	    (intr_source & H_D0I3C_IS)) {
1149 		dev->pg_event = MEI_PG_EVENT_INTR_RECEIVED;
1150 		if (hw->pg_state == MEI_PG_ON) {
1151 			hw->pg_state = MEI_PG_OFF;
1152 			if (dev->hbm_state != MEI_HBM_IDLE) {
1153 				/*
1154 				 * force H_RDY because it could be
1155 				 * wiped off during PG
1156 				 */
1157 				dev_dbg(dev->dev, "d0i3 set host ready\n");
1158 				mei_me_host_set_ready(dev);
1159 			}
1160 		} else {
1161 			hw->pg_state = MEI_PG_ON;
1162 		}
1163 
1164 		wake_up(&dev->wait_pg);
1165 	}
1166 
1167 	if (hw->pg_state == MEI_PG_ON && (intr_source & H_IS)) {
1168 		/*
1169 		 * HW sent some data and we are in D0i3, so
1170 		 * we got here because of HW initiated exit from D0i3.
1171 		 * Start runtime pm resume sequence to exit low power state.
1172 		 */
1173 		dev_dbg(dev->dev, "d0i3 want resume\n");
1174 		mei_hbm_pg_resume(dev);
1175 	}
1176 }
1177 
1178 /**
1179  * mei_me_pg_intr - perform pg processing in interrupt thread handler
1180  *
1181  * @dev: the device structure
1182  * @intr_source: interrupt source
1183  */
mei_me_pg_intr(struct mei_device * dev,u32 intr_source)1184 static void mei_me_pg_intr(struct mei_device *dev, u32 intr_source)
1185 {
1186 	struct mei_me_hw *hw = to_me_hw(dev);
1187 
1188 	if (hw->d0i3_supported)
1189 		mei_me_d0i3_intr(dev, intr_source);
1190 	else
1191 		mei_me_pg_legacy_intr(dev);
1192 }
1193 
1194 /**
1195  * mei_me_pg_enter_sync - perform runtime pm entry procedure
1196  *
1197  * @dev: the device structure
1198  *
1199  * Return: 0 on success an error code otherwise
1200  */
mei_me_pg_enter_sync(struct mei_device * dev)1201 int mei_me_pg_enter_sync(struct mei_device *dev)
1202 {
1203 	struct mei_me_hw *hw = to_me_hw(dev);
1204 
1205 	if (hw->d0i3_supported)
1206 		return mei_me_d0i3_enter_sync(dev);
1207 	else
1208 		return mei_me_pg_legacy_enter_sync(dev);
1209 }
1210 
1211 /**
1212  * mei_me_pg_exit_sync - perform runtime pm exit procedure
1213  *
1214  * @dev: the device structure
1215  *
1216  * Return: 0 on success an error code otherwise
1217  */
mei_me_pg_exit_sync(struct mei_device * dev)1218 int mei_me_pg_exit_sync(struct mei_device *dev)
1219 {
1220 	struct mei_me_hw *hw = to_me_hw(dev);
1221 
1222 	if (hw->d0i3_supported)
1223 		return mei_me_d0i3_exit_sync(dev);
1224 	else
1225 		return mei_me_pg_legacy_exit_sync(dev);
1226 }
1227 
1228 /**
1229  * mei_me_hw_reset - resets fw via mei csr register.
1230  *
1231  * @dev: the device structure
1232  * @intr_enable: if interrupt should be enabled after reset.
1233  *
1234  * Return: 0 on success an error code otherwise
1235  */
mei_me_hw_reset(struct mei_device * dev,bool intr_enable)1236 static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
1237 {
1238 	struct mei_me_hw *hw = to_me_hw(dev);
1239 	int ret;
1240 	u32 hcsr;
1241 
1242 	if (intr_enable) {
1243 		mei_me_intr_enable(dev);
1244 		if (hw->d0i3_supported) {
1245 			ret = mei_me_d0i3_exit_sync(dev);
1246 			if (ret)
1247 				return ret;
1248 		} else {
1249 			hw->pg_state = MEI_PG_OFF;
1250 		}
1251 	}
1252 
1253 	pm_runtime_set_active(dev->dev);
1254 
1255 	hcsr = mei_hcsr_read(dev);
1256 	/* H_RST may be found lit before reset is started,
1257 	 * for example if preceding reset flow hasn't completed.
1258 	 * In that case asserting H_RST will be ignored, therefore
1259 	 * we need to clean H_RST bit to start a successful reset sequence.
1260 	 */
1261 	if ((hcsr & H_RST) == H_RST) {
1262 		dev_warn(dev->dev, "H_RST is set = 0x%08X", hcsr);
1263 		hcsr &= ~H_RST;
1264 		mei_hcsr_set(dev, hcsr);
1265 		hcsr = mei_hcsr_read(dev);
1266 	}
1267 
1268 	hcsr |= H_RST | H_IG | H_CSR_IS_MASK;
1269 
1270 	if (!intr_enable || mei_me_hw_use_polling(to_me_hw(dev)))
1271 		hcsr &= ~H_CSR_IE_MASK;
1272 
1273 	dev->recvd_hw_ready = false;
1274 	mei_hcsr_write(dev, hcsr);
1275 
1276 	/*
1277 	 * Host reads the H_CSR once to ensure that the
1278 	 * posted write to H_CSR completes.
1279 	 */
1280 	hcsr = mei_hcsr_read(dev);
1281 
1282 	if ((hcsr & H_RST) == 0)
1283 		dev_warn(dev->dev, "H_RST is not set = 0x%08X", hcsr);
1284 
1285 	if ((hcsr & H_RDY) == H_RDY)
1286 		dev_warn(dev->dev, "H_RDY is not cleared 0x%08X", hcsr);
1287 
1288 	if (!intr_enable) {
1289 		mei_me_hw_reset_release(dev);
1290 		if (hw->d0i3_supported) {
1291 			ret = mei_me_d0i3_enter(dev);
1292 			if (ret)
1293 				return ret;
1294 		}
1295 	}
1296 	return 0;
1297 }
1298 
1299 /**
1300  * mei_me_irq_quick_handler - The ISR of the MEI device
1301  *
1302  * @irq: The irq number
1303  * @dev_id: pointer to the device structure
1304  *
1305  * Return: irqreturn_t
1306  */
mei_me_irq_quick_handler(int irq,void * dev_id)1307 irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id)
1308 {
1309 	struct mei_device *dev = (struct mei_device *)dev_id;
1310 	u32 hcsr;
1311 
1312 	hcsr = mei_hcsr_read(dev);
1313 	if (!me_intr_src(hcsr))
1314 		return IRQ_NONE;
1315 
1316 	dev_dbg(dev->dev, "interrupt source 0x%08X\n", me_intr_src(hcsr));
1317 
1318 	/* disable interrupts on device */
1319 	me_intr_disable(dev, hcsr);
1320 	return IRQ_WAKE_THREAD;
1321 }
1322 EXPORT_SYMBOL_GPL(mei_me_irq_quick_handler);
1323 
1324 /**
1325  * mei_me_irq_thread_handler - function called after ISR to handle the interrupt
1326  * processing.
1327  *
1328  * @irq: The irq number
1329  * @dev_id: pointer to the device structure
1330  *
1331  * Return: irqreturn_t
1332  *
1333  */
mei_me_irq_thread_handler(int irq,void * dev_id)1334 irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
1335 {
1336 	struct mei_device *dev = (struct mei_device *) dev_id;
1337 	struct list_head cmpl_list;
1338 	s32 slots;
1339 	u32 hcsr;
1340 	int rets = 0;
1341 
1342 	dev_dbg(dev->dev, "function called after ISR to handle the interrupt processing.\n");
1343 	/* initialize our complete list */
1344 	mutex_lock(&dev->device_lock);
1345 
1346 	hcsr = mei_hcsr_read(dev);
1347 	me_intr_clear(dev, hcsr);
1348 
1349 	INIT_LIST_HEAD(&cmpl_list);
1350 
1351 	/* check if ME wants a reset */
1352 	if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) {
1353 		if (kind_is_gsc(dev) || kind_is_gscfi(dev)) {
1354 			dev_dbg(dev->dev, "FW not ready: resetting: dev_state = %d\n",
1355 				dev->dev_state);
1356 		} else {
1357 			dev_warn(dev->dev, "FW not ready: resetting: dev_state = %d\n",
1358 				 dev->dev_state);
1359 		}
1360 		if (dev->dev_state == MEI_DEV_POWERING_DOWN ||
1361 		    dev->dev_state == MEI_DEV_POWER_DOWN)
1362 			mei_cl_all_disconnect(dev);
1363 		else if (dev->dev_state != MEI_DEV_DISABLED)
1364 			schedule_work(&dev->reset_work);
1365 		goto end;
1366 	}
1367 
1368 	if (mei_me_hw_is_resetting(dev))
1369 		mei_hcsr_set_hig(dev);
1370 
1371 	mei_me_pg_intr(dev, me_intr_src(hcsr));
1372 
1373 	/*  check if we need to start the dev */
1374 	if (!mei_host_is_ready(dev)) {
1375 		if (mei_hw_is_ready(dev)) {
1376 			dev_dbg(dev->dev, "we need to start the dev.\n");
1377 			dev->recvd_hw_ready = true;
1378 			wake_up(&dev->wait_hw_ready);
1379 		} else {
1380 			dev_dbg(dev->dev, "Spurious Interrupt\n");
1381 		}
1382 		goto end;
1383 	}
1384 	/* check slots available for reading */
1385 	slots = mei_count_full_read_slots(dev);
1386 	while (slots > 0) {
1387 		dev_dbg(dev->dev, "slots to read = %08x\n", slots);
1388 		rets = mei_irq_read_handler(dev, &cmpl_list, &slots);
1389 		/* There is a race between ME write and interrupt delivery:
1390 		 * Not all data is always available immediately after the
1391 		 * interrupt, so try to read again on the next interrupt.
1392 		 */
1393 		if (rets == -ENODATA)
1394 			break;
1395 
1396 		if (rets) {
1397 			dev_err(dev->dev, "mei_irq_read_handler ret = %d, state = %d.\n",
1398 				rets, dev->dev_state);
1399 			if (dev->dev_state != MEI_DEV_RESETTING &&
1400 			    dev->dev_state != MEI_DEV_DISABLED &&
1401 			    dev->dev_state != MEI_DEV_POWERING_DOWN &&
1402 			    dev->dev_state != MEI_DEV_POWER_DOWN)
1403 				schedule_work(&dev->reset_work);
1404 			goto end;
1405 		}
1406 	}
1407 
1408 	dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
1409 
1410 	/*
1411 	 * During PG handshake only allowed write is the replay to the
1412 	 * PG exit message, so block calling write function
1413 	 * if the pg event is in PG handshake
1414 	 */
1415 	if (dev->pg_event != MEI_PG_EVENT_WAIT &&
1416 	    dev->pg_event != MEI_PG_EVENT_RECEIVED) {
1417 		rets = mei_irq_write_handler(dev, &cmpl_list);
1418 		dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
1419 	}
1420 
1421 	mei_irq_compl_handler(dev, &cmpl_list);
1422 
1423 end:
1424 	dev_dbg(dev->dev, "interrupt thread end ret = %d\n", rets);
1425 	mei_me_intr_enable(dev);
1426 	mutex_unlock(&dev->device_lock);
1427 	return IRQ_HANDLED;
1428 }
1429 EXPORT_SYMBOL_GPL(mei_me_irq_thread_handler);
1430 
1431 #define MEI_POLLING_TIMEOUT_ACTIVE 100
1432 #define MEI_POLLING_TIMEOUT_IDLE   500
1433 
1434 /**
1435  * mei_me_polling_thread - interrupt register polling thread
1436  *
1437  * @_dev: mei device
1438  *
1439  * The thread monitors the interrupt source register and calls
1440  * mei_me_irq_thread_handler() to handle the firmware
1441  * input.
1442  *
1443  * The function polls in MEI_POLLING_TIMEOUT_ACTIVE timeout
1444  * in case there was an event, in idle case the polling
1445  * time increases yet again by MEI_POLLING_TIMEOUT_ACTIVE
1446  * up to MEI_POLLING_TIMEOUT_IDLE.
1447  *
1448  * Return: always 0
1449  */
mei_me_polling_thread(void * _dev)1450 int mei_me_polling_thread(void *_dev)
1451 {
1452 	struct mei_device *dev = _dev;
1453 	irqreturn_t irq_ret;
1454 	long polling_timeout = MEI_POLLING_TIMEOUT_ACTIVE;
1455 
1456 	dev_dbg(dev->dev, "kernel thread is running\n");
1457 	while (!kthread_should_stop()) {
1458 		struct mei_me_hw *hw = to_me_hw(dev);
1459 		u32 hcsr;
1460 
1461 		wait_event_timeout(hw->wait_active,
1462 				   hw->is_active || kthread_should_stop(),
1463 				   msecs_to_jiffies(MEI_POLLING_TIMEOUT_IDLE));
1464 
1465 		if (kthread_should_stop())
1466 			break;
1467 
1468 		hcsr = mei_hcsr_read(dev);
1469 		if (me_intr_src(hcsr)) {
1470 			polling_timeout = MEI_POLLING_TIMEOUT_ACTIVE;
1471 			irq_ret = mei_me_irq_thread_handler(1, dev);
1472 			if (irq_ret != IRQ_HANDLED)
1473 				dev_err(dev->dev, "irq_ret %d\n", irq_ret);
1474 		} else {
1475 			/*
1476 			 * Increase timeout by MEI_POLLING_TIMEOUT_ACTIVE
1477 			 * up to MEI_POLLING_TIMEOUT_IDLE
1478 			 */
1479 			polling_timeout = clamp_val(polling_timeout + MEI_POLLING_TIMEOUT_ACTIVE,
1480 						    MEI_POLLING_TIMEOUT_ACTIVE,
1481 						    MEI_POLLING_TIMEOUT_IDLE);
1482 		}
1483 
1484 		schedule_timeout_interruptible(msecs_to_jiffies(polling_timeout));
1485 	}
1486 
1487 	return 0;
1488 }
1489 EXPORT_SYMBOL_GPL(mei_me_polling_thread);
1490 
1491 static const struct mei_hw_ops mei_me_hw_ops = {
1492 
1493 	.trc_status = mei_me_trc_status,
1494 	.fw_status = mei_me_fw_status,
1495 	.pg_state  = mei_me_pg_state,
1496 
1497 	.host_is_ready = mei_me_host_is_ready,
1498 
1499 	.hw_is_ready = mei_me_hw_is_ready,
1500 	.hw_reset = mei_me_hw_reset,
1501 	.hw_config = mei_me_hw_config,
1502 	.hw_start = mei_me_hw_start,
1503 
1504 	.pg_in_transition = mei_me_pg_in_transition,
1505 	.pg_is_enabled = mei_me_pg_is_enabled,
1506 
1507 	.intr_clear = mei_me_intr_clear,
1508 	.intr_enable = mei_me_intr_enable,
1509 	.intr_disable = mei_me_intr_disable,
1510 	.synchronize_irq = mei_me_synchronize_irq,
1511 
1512 	.hbuf_free_slots = mei_me_hbuf_empty_slots,
1513 	.hbuf_is_ready = mei_me_hbuf_is_empty,
1514 	.hbuf_depth = mei_me_hbuf_depth,
1515 
1516 	.write = mei_me_hbuf_write,
1517 
1518 	.rdbuf_full_slots = mei_me_count_full_read_slots,
1519 	.read_hdr = mei_me_mecbrw_read,
1520 	.read = mei_me_read_slots
1521 };
1522 
1523 /**
1524  * mei_me_fw_type_nm() - check for nm sku
1525  *
1526  * @pdev: pci device
1527  *
1528  * Read ME FW Status register to check for the Node Manager (NM) Firmware.
1529  * The NM FW is only signaled in PCI function 0.
1530  * __Note__: Deprecated by PCH8 and newer.
1531  *
1532  * Return: true in case of NM firmware
1533  */
mei_me_fw_type_nm(const struct pci_dev * pdev)1534 static bool mei_me_fw_type_nm(const struct pci_dev *pdev)
1535 {
1536 	u32 reg;
1537 	unsigned int devfn;
1538 
1539 	devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
1540 	pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_2, &reg);
1541 	trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_2", PCI_CFG_HFS_2, reg);
1542 	/* make sure that bit 9 (NM) is up and bit 10 (DM) is down */
1543 	return (reg & 0x600) == 0x200;
1544 }
1545 
1546 #define MEI_CFG_FW_NM                           \
1547 	.quirk_probe = mei_me_fw_type_nm
1548 
1549 /**
1550  * mei_me_fw_type_sps_4() - check for sps 4.0 sku
1551  *
1552  * @pdev: pci device
1553  *
1554  * Read ME FW Status register to check for SPS Firmware.
1555  * The SPS FW is only signaled in the PCI function 0.
1556  * __Note__: Deprecated by SPS 5.0 and newer.
1557  *
1558  * Return: true in case of SPS firmware
1559  */
mei_me_fw_type_sps_4(const struct pci_dev * pdev)1560 static bool mei_me_fw_type_sps_4(const struct pci_dev *pdev)
1561 {
1562 	u32 reg;
1563 	unsigned int devfn;
1564 
1565 	devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
1566 	pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_1, &reg);
1567 	trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg);
1568 	return (reg & PCI_CFG_HFS_1_OPMODE_MSK) == PCI_CFG_HFS_1_OPMODE_SPS;
1569 }
1570 
1571 #define MEI_CFG_FW_SPS_4                          \
1572 	.quirk_probe = mei_me_fw_type_sps_4
1573 
1574 /**
1575  * mei_me_fw_type_sps_ign() - check for sps or ign sku
1576  *
1577  * @pdev: pci device
1578  *
1579  * Read ME FW Status register to check for SPS or IGN Firmware.
1580  * The SPS/IGN FW is only signaled in pci function 0
1581  *
1582  * Return: true in case of SPS/IGN firmware
1583  */
mei_me_fw_type_sps_ign(const struct pci_dev * pdev)1584 static bool mei_me_fw_type_sps_ign(const struct pci_dev *pdev)
1585 {
1586 	u32 reg;
1587 	u32 fw_type;
1588 	unsigned int devfn;
1589 
1590 	devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
1591 	pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_3, &reg);
1592 	trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_3", PCI_CFG_HFS_3, reg);
1593 	fw_type = (reg & PCI_CFG_HFS_3_FW_SKU_MSK);
1594 
1595 	dev_dbg(&pdev->dev, "fw type is %d\n", fw_type);
1596 
1597 	return fw_type == PCI_CFG_HFS_3_FW_SKU_IGN ||
1598 	       fw_type == PCI_CFG_HFS_3_FW_SKU_SPS;
1599 }
1600 
1601 #define MEI_CFG_KIND_ITOUCH                     \
1602 	.kind = "itouch"
1603 
1604 #define MEI_CFG_TYPE_GSC                        \
1605 	.kind = "gsc"
1606 
1607 #define MEI_CFG_TYPE_GSCFI                      \
1608 	.kind = "gscfi"
1609 
1610 #define MEI_CFG_FW_SPS_IGN                      \
1611 	.quirk_probe = mei_me_fw_type_sps_ign
1612 
1613 #define MEI_CFG_FW_VER_SUPP                     \
1614 	.fw_ver_supported = 1
1615 
1616 #define MEI_CFG_ICH_HFS                      \
1617 	.fw_status.count = 0
1618 
1619 #define MEI_CFG_ICH10_HFS                        \
1620 	.fw_status.count = 1,                   \
1621 	.fw_status.status[0] = PCI_CFG_HFS_1
1622 
1623 #define MEI_CFG_PCH_HFS                         \
1624 	.fw_status.count = 2,                   \
1625 	.fw_status.status[0] = PCI_CFG_HFS_1,   \
1626 	.fw_status.status[1] = PCI_CFG_HFS_2
1627 
1628 #define MEI_CFG_PCH8_HFS                        \
1629 	.fw_status.count = 6,                   \
1630 	.fw_status.status[0] = PCI_CFG_HFS_1,   \
1631 	.fw_status.status[1] = PCI_CFG_HFS_2,   \
1632 	.fw_status.status[2] = PCI_CFG_HFS_3,   \
1633 	.fw_status.status[3] = PCI_CFG_HFS_4,   \
1634 	.fw_status.status[4] = PCI_CFG_HFS_5,   \
1635 	.fw_status.status[5] = PCI_CFG_HFS_6
1636 
1637 #define MEI_CFG_DMA_128 \
1638 	.dma_size[DMA_DSCR_HOST] = SZ_128K, \
1639 	.dma_size[DMA_DSCR_DEVICE] = SZ_128K, \
1640 	.dma_size[DMA_DSCR_CTRL] = PAGE_SIZE
1641 
1642 #define MEI_CFG_TRC \
1643 	.hw_trc_supported = 1
1644 
1645 /* ICH Legacy devices */
1646 static const struct mei_cfg mei_me_ich_cfg = {
1647 	MEI_CFG_ICH_HFS,
1648 };
1649 
1650 /* ICH devices */
1651 static const struct mei_cfg mei_me_ich10_cfg = {
1652 	MEI_CFG_ICH10_HFS,
1653 };
1654 
1655 /* PCH6 devices */
1656 static const struct mei_cfg mei_me_pch6_cfg = {
1657 	MEI_CFG_PCH_HFS,
1658 };
1659 
1660 /* PCH7 devices */
1661 static const struct mei_cfg mei_me_pch7_cfg = {
1662 	MEI_CFG_PCH_HFS,
1663 	MEI_CFG_FW_VER_SUPP,
1664 };
1665 
1666 /* PCH Cougar Point and Patsburg with quirk for Node Manager exclusion */
1667 static const struct mei_cfg mei_me_pch_cpt_pbg_cfg = {
1668 	MEI_CFG_PCH_HFS,
1669 	MEI_CFG_FW_VER_SUPP,
1670 	MEI_CFG_FW_NM,
1671 };
1672 
1673 /* PCH8 Lynx Point and newer devices */
1674 static const struct mei_cfg mei_me_pch8_cfg = {
1675 	MEI_CFG_PCH8_HFS,
1676 	MEI_CFG_FW_VER_SUPP,
1677 };
1678 
1679 /* PCH8 Lynx Point and newer devices - iTouch */
1680 static const struct mei_cfg mei_me_pch8_itouch_cfg = {
1681 	MEI_CFG_KIND_ITOUCH,
1682 	MEI_CFG_PCH8_HFS,
1683 	MEI_CFG_FW_VER_SUPP,
1684 };
1685 
1686 /* PCH8 Lynx Point with quirk for SPS Firmware exclusion */
1687 static const struct mei_cfg mei_me_pch8_sps_4_cfg = {
1688 	MEI_CFG_PCH8_HFS,
1689 	MEI_CFG_FW_VER_SUPP,
1690 	MEI_CFG_FW_SPS_4,
1691 };
1692 
1693 /* LBG with quirk for SPS (4.0) Firmware exclusion */
1694 static const struct mei_cfg mei_me_pch12_sps_4_cfg = {
1695 	MEI_CFG_PCH8_HFS,
1696 	MEI_CFG_FW_VER_SUPP,
1697 	MEI_CFG_FW_SPS_4,
1698 };
1699 
1700 /* Cannon Lake and newer devices */
1701 static const struct mei_cfg mei_me_pch12_cfg = {
1702 	MEI_CFG_PCH8_HFS,
1703 	MEI_CFG_FW_VER_SUPP,
1704 	MEI_CFG_DMA_128,
1705 };
1706 
1707 /* Cannon Lake with quirk for SPS 5.0 and newer Firmware exclusion */
1708 static const struct mei_cfg mei_me_pch12_sps_cfg = {
1709 	MEI_CFG_PCH8_HFS,
1710 	MEI_CFG_FW_VER_SUPP,
1711 	MEI_CFG_DMA_128,
1712 	MEI_CFG_FW_SPS_IGN,
1713 };
1714 
1715 /* Cannon Lake itouch with quirk for SPS 5.0 and newer Firmware exclusion
1716  * w/o DMA support.
1717  */
1718 static const struct mei_cfg mei_me_pch12_itouch_sps_cfg = {
1719 	MEI_CFG_KIND_ITOUCH,
1720 	MEI_CFG_PCH8_HFS,
1721 	MEI_CFG_FW_VER_SUPP,
1722 	MEI_CFG_FW_SPS_IGN,
1723 };
1724 
1725 /* Tiger Lake and newer devices */
1726 static const struct mei_cfg mei_me_pch15_cfg = {
1727 	MEI_CFG_PCH8_HFS,
1728 	MEI_CFG_FW_VER_SUPP,
1729 	MEI_CFG_DMA_128,
1730 	MEI_CFG_TRC,
1731 };
1732 
1733 /* Tiger Lake with quirk for SPS 5.0 and newer Firmware exclusion */
1734 static const struct mei_cfg mei_me_pch15_sps_cfg = {
1735 	MEI_CFG_PCH8_HFS,
1736 	MEI_CFG_FW_VER_SUPP,
1737 	MEI_CFG_DMA_128,
1738 	MEI_CFG_TRC,
1739 	MEI_CFG_FW_SPS_IGN,
1740 };
1741 
1742 /* Graphics System Controller */
1743 static const struct mei_cfg mei_me_gsc_cfg = {
1744 	MEI_CFG_TYPE_GSC,
1745 	MEI_CFG_PCH8_HFS,
1746 	MEI_CFG_FW_VER_SUPP,
1747 };
1748 
1749 /* Graphics System Controller Firmware Interface */
1750 static const struct mei_cfg mei_me_gscfi_cfg = {
1751 	MEI_CFG_TYPE_GSCFI,
1752 	MEI_CFG_PCH8_HFS,
1753 	MEI_CFG_FW_VER_SUPP,
1754 };
1755 
1756 /*
1757  * mei_cfg_list - A list of platform platform specific configurations.
1758  * Note: has to be synchronized with  enum mei_cfg_idx.
1759  */
1760 static const struct mei_cfg *const mei_cfg_list[] = {
1761 	[MEI_ME_UNDEF_CFG] = NULL,
1762 	[MEI_ME_ICH_CFG] = &mei_me_ich_cfg,
1763 	[MEI_ME_ICH10_CFG] = &mei_me_ich10_cfg,
1764 	[MEI_ME_PCH6_CFG] = &mei_me_pch6_cfg,
1765 	[MEI_ME_PCH7_CFG] = &mei_me_pch7_cfg,
1766 	[MEI_ME_PCH_CPT_PBG_CFG] = &mei_me_pch_cpt_pbg_cfg,
1767 	[MEI_ME_PCH8_CFG] = &mei_me_pch8_cfg,
1768 	[MEI_ME_PCH8_ITOUCH_CFG] = &mei_me_pch8_itouch_cfg,
1769 	[MEI_ME_PCH8_SPS_4_CFG] = &mei_me_pch8_sps_4_cfg,
1770 	[MEI_ME_PCH12_CFG] = &mei_me_pch12_cfg,
1771 	[MEI_ME_PCH12_SPS_4_CFG] = &mei_me_pch12_sps_4_cfg,
1772 	[MEI_ME_PCH12_SPS_CFG] = &mei_me_pch12_sps_cfg,
1773 	[MEI_ME_PCH12_SPS_ITOUCH_CFG] = &mei_me_pch12_itouch_sps_cfg,
1774 	[MEI_ME_PCH15_CFG] = &mei_me_pch15_cfg,
1775 	[MEI_ME_PCH15_SPS_CFG] = &mei_me_pch15_sps_cfg,
1776 	[MEI_ME_GSC_CFG] = &mei_me_gsc_cfg,
1777 	[MEI_ME_GSCFI_CFG] = &mei_me_gscfi_cfg,
1778 };
1779 
mei_me_get_cfg(kernel_ulong_t idx)1780 const struct mei_cfg *mei_me_get_cfg(kernel_ulong_t idx)
1781 {
1782 	BUILD_BUG_ON(ARRAY_SIZE(mei_cfg_list) != MEI_ME_NUM_CFG);
1783 
1784 	if (idx >= MEI_ME_NUM_CFG)
1785 		return NULL;
1786 
1787 	return mei_cfg_list[idx];
1788 }
1789 EXPORT_SYMBOL_GPL(mei_me_get_cfg);
1790 
1791 /**
1792  * mei_me_dev_init - allocates and initializes the mei device structure
1793  *
1794  * @parent: device associated with physical device (pci/platform)
1795  * @cfg: per device generation config
1796  * @slow_fw: configure longer timeouts as FW is slow
1797  *
1798  * Return: The mei_device pointer on success, NULL on failure.
1799  */
mei_me_dev_init(struct device * parent,const struct mei_cfg * cfg,bool slow_fw)1800 struct mei_device *mei_me_dev_init(struct device *parent,
1801 				   const struct mei_cfg *cfg, bool slow_fw)
1802 {
1803 	struct mei_device *dev;
1804 	struct mei_me_hw *hw;
1805 	int i;
1806 
1807 	dev = devm_kzalloc(parent, sizeof(*dev) + sizeof(*hw), GFP_KERNEL);
1808 	if (!dev)
1809 		return NULL;
1810 
1811 	hw = to_me_hw(dev);
1812 
1813 	for (i = 0; i < DMA_DSCR_NUM; i++)
1814 		dev->dr_dscr[i].size = cfg->dma_size[i];
1815 
1816 	mei_device_init(dev, parent, slow_fw, &mei_me_hw_ops);
1817 	hw->cfg = cfg;
1818 
1819 	dev->fw_f_fw_ver_supported = cfg->fw_ver_supported;
1820 
1821 	dev->kind = cfg->kind;
1822 
1823 	return dev;
1824 }
1825 EXPORT_SYMBOL_GPL(mei_me_dev_init);
1826