xref: /linux/drivers/misc/mei/hw-me.c (revision cb4eb6771c0f8fd1c52a8f6fdec7762fb087380a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2003-2022, Intel Corporation. All rights reserved.
4  * Intel Management Engine Interface (Intel MEI) Linux driver
5  */
6 
7 #include <linux/pci.h>
8 
9 #include <linux/kthread.h>
10 #include <linux/interrupt.h>
11 #include <linux/pm_runtime.h>
12 #include <linux/sizes.h>
13 #include <linux/delay.h>
14 
15 #include "mei_dev.h"
16 #include "hbm.h"
17 
18 #include "hw-me.h"
19 #include "hw-me-regs.h"
20 
21 #include "mei-trace.h"
22 
23 /**
24  * mei_me_reg_read - Reads 32bit data from the mei device
25  *
26  * @hw: the me hardware structure
27  * @offset: offset from which to read the data
28  *
29  * Return: register value (u32)
30  */
mei_me_reg_read(const struct mei_me_hw * hw,unsigned long offset)31 static inline u32 mei_me_reg_read(const struct mei_me_hw *hw,
32 			       unsigned long offset)
33 {
34 	return ioread32(hw->mem_addr + offset);
35 }
36 
37 
38 /**
39  * mei_me_reg_write - Writes 32bit data to the mei device
40  *
41  * @hw: the me hardware structure
42  * @offset: offset from which to write the data
43  * @value: register value to write (u32)
44  */
mei_me_reg_write(const struct mei_me_hw * hw,unsigned long offset,u32 value)45 static inline void mei_me_reg_write(const struct mei_me_hw *hw,
46 				 unsigned long offset, u32 value)
47 {
48 	iowrite32(value, hw->mem_addr + offset);
49 }
50 
51 /**
52  * mei_me_mecbrw_read - Reads 32bit data from ME circular buffer
53  *  read window register
54  *
55  * @dev: the device structure
56  *
57  * Return: ME_CB_RW register value (u32)
58  */
mei_me_mecbrw_read(const struct mei_device * dev)59 static inline u32 mei_me_mecbrw_read(const struct mei_device *dev)
60 {
61 	return mei_me_reg_read(to_me_hw(dev), ME_CB_RW);
62 }
63 
64 /**
65  * mei_me_hcbww_write - write 32bit data to the host circular buffer
66  *
67  * @dev: the device structure
68  * @data: 32bit data to be written to the host circular buffer
69  */
mei_me_hcbww_write(struct mei_device * dev,u32 data)70 static inline void mei_me_hcbww_write(struct mei_device *dev, u32 data)
71 {
72 	mei_me_reg_write(to_me_hw(dev), H_CB_WW, data);
73 }
74 
75 /**
76  * mei_me_mecsr_read - Reads 32bit data from the ME CSR
77  *
78  * @dev: the device structure
79  *
80  * Return: ME_CSR_HA register value (u32)
81  */
mei_me_mecsr_read(const struct mei_device * dev)82 static inline u32 mei_me_mecsr_read(const struct mei_device *dev)
83 {
84 	u32 reg;
85 
86 	reg = mei_me_reg_read(to_me_hw(dev), ME_CSR_HA);
87 	trace_mei_reg_read(&dev->dev, "ME_CSR_HA", ME_CSR_HA, reg);
88 
89 	return reg;
90 }
91 
92 /**
93  * mei_hcsr_read - Reads 32bit data from the host CSR
94  *
95  * @dev: the device structure
96  *
97  * Return: H_CSR register value (u32)
98  */
mei_hcsr_read(const struct mei_device * dev)99 static inline u32 mei_hcsr_read(const struct mei_device *dev)
100 {
101 	u32 reg;
102 
103 	reg = mei_me_reg_read(to_me_hw(dev), H_CSR);
104 	trace_mei_reg_read(&dev->dev, "H_CSR", H_CSR, reg);
105 
106 	return reg;
107 }
108 
109 /**
110  * mei_hcsr_write - writes H_CSR register to the mei device
111  *
112  * @dev: the device structure
113  * @reg: new register value
114  */
mei_hcsr_write(struct mei_device * dev,u32 reg)115 static inline void mei_hcsr_write(struct mei_device *dev, u32 reg)
116 {
117 	trace_mei_reg_write(&dev->dev, "H_CSR", H_CSR, reg);
118 	mei_me_reg_write(to_me_hw(dev), H_CSR, reg);
119 }
120 
121 /**
122  * mei_hcsr_set - writes H_CSR register to the mei device,
123  * and ignores the H_IS bit for it is write-one-to-zero.
124  *
125  * @dev: the device structure
126  * @reg: new register value
127  */
mei_hcsr_set(struct mei_device * dev,u32 reg)128 static inline void mei_hcsr_set(struct mei_device *dev, u32 reg)
129 {
130 	reg &= ~H_CSR_IS_MASK;
131 	mei_hcsr_write(dev, reg);
132 }
133 
134 /**
135  * mei_hcsr_set_hig - set host interrupt (set H_IG)
136  *
137  * @dev: the device structure
138  */
mei_hcsr_set_hig(struct mei_device * dev)139 static inline void mei_hcsr_set_hig(struct mei_device *dev)
140 {
141 	u32 hcsr;
142 
143 	hcsr = mei_hcsr_read(dev) | H_IG;
144 	mei_hcsr_set(dev, hcsr);
145 }
146 
147 /**
148  * mei_me_d0i3c_read - Reads 32bit data from the D0I3C register
149  *
150  * @dev: the device structure
151  *
152  * Return: H_D0I3C register value (u32)
153  */
mei_me_d0i3c_read(const struct mei_device * dev)154 static inline u32 mei_me_d0i3c_read(const struct mei_device *dev)
155 {
156 	u32 reg;
157 
158 	reg = mei_me_reg_read(to_me_hw(dev), H_D0I3C);
159 	trace_mei_reg_read(&dev->dev, "H_D0I3C", H_D0I3C, reg);
160 
161 	return reg;
162 }
163 
164 /**
165  * mei_me_d0i3c_write - writes H_D0I3C register to device
166  *
167  * @dev: the device structure
168  * @reg: new register value
169  */
mei_me_d0i3c_write(struct mei_device * dev,u32 reg)170 static inline void mei_me_d0i3c_write(struct mei_device *dev, u32 reg)
171 {
172 	trace_mei_reg_write(&dev->dev, "H_D0I3C", H_D0I3C, reg);
173 	mei_me_reg_write(to_me_hw(dev), H_D0I3C, reg);
174 }
175 
176 /**
177  * mei_me_trc_status - read trc status register
178  *
179  * @dev: mei device
180  * @trc: trc status register value
181  *
182  * Return: 0 on success, error otherwise
183  */
mei_me_trc_status(struct mei_device * dev,u32 * trc)184 static int mei_me_trc_status(struct mei_device *dev, u32 *trc)
185 {
186 	struct mei_me_hw *hw = to_me_hw(dev);
187 
188 	if (!hw->cfg->hw_trc_supported)
189 		return -EOPNOTSUPP;
190 
191 	*trc = mei_me_reg_read(hw, ME_TRC);
192 	trace_mei_reg_read(&dev->dev, "ME_TRC", ME_TRC, *trc);
193 
194 	return 0;
195 }
196 
197 /**
198  * mei_me_fw_status - read fw status register from pci config space
199  *
200  * @dev: mei device
201  * @fw_status: fw status register values
202  *
203  * Return: 0 on success, error otherwise
204  */
mei_me_fw_status(struct mei_device * dev,struct mei_fw_status * fw_status)205 static int mei_me_fw_status(struct mei_device *dev,
206 			    struct mei_fw_status *fw_status)
207 {
208 	struct mei_me_hw *hw = to_me_hw(dev);
209 	const struct mei_fw_status *fw_src = &hw->cfg->fw_status;
210 	int ret;
211 	int i;
212 
213 	if (!fw_status || !hw->read_fws)
214 		return -EINVAL;
215 
216 	fw_status->count = fw_src->count;
217 	for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) {
218 		ret = hw->read_fws(dev, fw_src->status[i], "PCI_CFG_HFS_X",
219 				   &fw_status->status[i]);
220 		if (ret)
221 			return ret;
222 	}
223 
224 	return 0;
225 }
226 
mei_csc_pg_blocked(struct mei_device * dev)227 static bool mei_csc_pg_blocked(struct mei_device *dev)
228 {
229 	struct mei_me_hw *hw = to_me_hw(dev);
230 	u32 reg = 0;
231 
232 	hw->read_fws(dev, PCI_CFG_HFS_2, "PCI_CFG_HFS_2", &reg);
233 	return (reg & PCI_CFG_HFS_2_D3_BLOCK) == PCI_CFG_HFS_2_D3_BLOCK;
234 }
235 
236 /**
237  * mei_me_hw_config - configure hw dependent settings
238  *
239  * @dev: mei device
240  *
241  * Return:
242  *  * -EINVAL when read_fws is not set
243  *  * 0 on success
244  *
245  */
mei_me_hw_config(struct mei_device * dev)246 static int mei_me_hw_config(struct mei_device *dev)
247 {
248 	struct mei_me_hw *hw = to_me_hw(dev);
249 	u32 hcsr, reg;
250 
251 	if (WARN_ON(!hw->read_fws))
252 		return -EINVAL;
253 
254 	/* Doesn't change in runtime */
255 	hcsr = mei_hcsr_read(dev);
256 	hw->hbuf_depth = (hcsr & H_CBD) >> 24;
257 
258 	reg = 0;
259 	hw->read_fws(dev, PCI_CFG_HFS_1, "PCI_CFG_HFS_1", &reg);
260 	hw->d0i3_supported =
261 		((reg & PCI_CFG_HFS_1_D0I3_MSK) == PCI_CFG_HFS_1_D0I3_MSK);
262 
263 	hw->pg_state = MEI_PG_OFF;
264 	if (hw->d0i3_supported) {
265 		reg = mei_me_d0i3c_read(dev);
266 		if (reg & H_D0I3C_I3)
267 			hw->pg_state = MEI_PG_ON;
268 	}
269 
270 	return 0;
271 }
272 
273 /**
274  * mei_me_pg_state  - translate internal pg state
275  *   to the mei power gating state
276  *
277  * @dev:  mei device
278  *
279  * Return: MEI_PG_OFF if aliveness is on and MEI_PG_ON otherwise
280  */
mei_me_pg_state(struct mei_device * dev)281 static inline enum mei_pg_state mei_me_pg_state(struct mei_device *dev)
282 {
283 	struct mei_me_hw *hw = to_me_hw(dev);
284 
285 	return hw->pg_state;
286 }
287 
me_intr_src(u32 hcsr)288 static inline u32 me_intr_src(u32 hcsr)
289 {
290 	return hcsr & H_CSR_IS_MASK;
291 }
292 
293 /**
294  * me_intr_disable - disables mei device interrupts
295  *      using supplied hcsr register value.
296  *
297  * @dev: the device structure
298  * @hcsr: supplied hcsr register value
299  */
me_intr_disable(struct mei_device * dev,u32 hcsr)300 static inline void me_intr_disable(struct mei_device *dev, u32 hcsr)
301 {
302 	hcsr &= ~H_CSR_IE_MASK;
303 	mei_hcsr_set(dev, hcsr);
304 }
305 
306 /**
307  * me_intr_clear - clear and stop interrupts
308  *
309  * @dev: the device structure
310  * @hcsr: supplied hcsr register value
311  */
me_intr_clear(struct mei_device * dev,u32 hcsr)312 static inline void me_intr_clear(struct mei_device *dev, u32 hcsr)
313 {
314 	if (me_intr_src(hcsr))
315 		mei_hcsr_write(dev, hcsr);
316 }
317 
318 /**
319  * mei_me_intr_clear - clear and stop interrupts
320  *
321  * @dev: the device structure
322  */
mei_me_intr_clear(struct mei_device * dev)323 static void mei_me_intr_clear(struct mei_device *dev)
324 {
325 	u32 hcsr = mei_hcsr_read(dev);
326 
327 	me_intr_clear(dev, hcsr);
328 }
329 /**
330  * mei_me_intr_enable - enables mei device interrupts
331  *
332  * @dev: the device structure
333  */
mei_me_intr_enable(struct mei_device * dev)334 static void mei_me_intr_enable(struct mei_device *dev)
335 {
336 	u32 hcsr;
337 
338 	if (mei_me_hw_use_polling(to_me_hw(dev)))
339 		return;
340 
341 	hcsr = mei_hcsr_read(dev) | H_CSR_IE_MASK;
342 	mei_hcsr_set(dev, hcsr);
343 }
344 
345 /**
346  * mei_me_intr_disable - disables mei device interrupts
347  *
348  * @dev: the device structure
349  */
mei_me_intr_disable(struct mei_device * dev)350 static void mei_me_intr_disable(struct mei_device *dev)
351 {
352 	u32 hcsr = mei_hcsr_read(dev);
353 
354 	me_intr_disable(dev, hcsr);
355 }
356 
357 /**
358  * mei_me_synchronize_irq - wait for pending IRQ handlers
359  *
360  * @dev: the device structure
361  */
mei_me_synchronize_irq(struct mei_device * dev)362 static void mei_me_synchronize_irq(struct mei_device *dev)
363 {
364 	struct mei_me_hw *hw = to_me_hw(dev);
365 
366 	if (mei_me_hw_use_polling(hw))
367 		return;
368 
369 	synchronize_irq(hw->irq);
370 }
371 
372 /**
373  * mei_me_hw_reset_release - release device from the reset
374  *
375  * @dev: the device structure
376  */
mei_me_hw_reset_release(struct mei_device * dev)377 static void mei_me_hw_reset_release(struct mei_device *dev)
378 {
379 	u32 hcsr = mei_hcsr_read(dev);
380 
381 	hcsr |= H_IG;
382 	hcsr &= ~H_RST;
383 	mei_hcsr_set(dev, hcsr);
384 }
385 
386 /**
387  * mei_me_host_set_ready - enable device
388  *
389  * @dev: mei device
390  */
mei_me_host_set_ready(struct mei_device * dev)391 static void mei_me_host_set_ready(struct mei_device *dev)
392 {
393 	u32 hcsr = mei_hcsr_read(dev);
394 
395 	if (!mei_me_hw_use_polling(to_me_hw(dev)))
396 		hcsr |= H_CSR_IE_MASK;
397 
398 	hcsr |=  H_IG | H_RDY;
399 	mei_hcsr_set(dev, hcsr);
400 }
401 
402 /**
403  * mei_me_host_is_ready - check whether the host has turned ready
404  *
405  * @dev: mei device
406  * Return: bool
407  */
mei_me_host_is_ready(struct mei_device * dev)408 static bool mei_me_host_is_ready(struct mei_device *dev)
409 {
410 	u32 hcsr = mei_hcsr_read(dev);
411 
412 	return (hcsr & H_RDY) == H_RDY;
413 }
414 
415 /**
416  * mei_me_hw_is_ready - check whether the me(hw) has turned ready
417  *
418  * @dev: mei device
419  * Return: bool
420  */
mei_me_hw_is_ready(struct mei_device * dev)421 static bool mei_me_hw_is_ready(struct mei_device *dev)
422 {
423 	u32 mecsr = mei_me_mecsr_read(dev);
424 
425 	return (mecsr & ME_RDY_HRA) == ME_RDY_HRA;
426 }
427 
428 /**
429  * mei_me_hw_is_resetting - check whether the me(hw) is in reset
430  *
431  * @dev: mei device
432  * Return: bool
433  */
mei_me_hw_is_resetting(struct mei_device * dev)434 static bool mei_me_hw_is_resetting(struct mei_device *dev)
435 {
436 	u32 mecsr = mei_me_mecsr_read(dev);
437 
438 	return (mecsr & ME_RST_HRA) == ME_RST_HRA;
439 }
440 
441 /**
442  * mei_gsc_pxp_check - check for gsc firmware entering pxp mode
443  *
444  * @dev: the device structure
445  */
mei_gsc_pxp_check(struct mei_device * dev)446 static void mei_gsc_pxp_check(struct mei_device *dev)
447 {
448 	struct mei_me_hw *hw = to_me_hw(dev);
449 	u32 fwsts5 = 0;
450 
451 	if (!kind_is_gsc(dev) && !kind_is_gscfi(dev))
452 		return;
453 
454 	hw->read_fws(dev, PCI_CFG_HFS_5, "PCI_CFG_HFS_5", &fwsts5);
455 
456 	if ((fwsts5 & GSC_CFG_HFS_5_BOOT_TYPE_MSK) == GSC_CFG_HFS_5_BOOT_TYPE_PXP) {
457 		if (dev->gsc_reset_to_pxp == MEI_DEV_RESET_TO_PXP_DEFAULT)
458 			dev->gsc_reset_to_pxp = MEI_DEV_RESET_TO_PXP_PERFORMED;
459 	} else {
460 		dev->gsc_reset_to_pxp = MEI_DEV_RESET_TO_PXP_DEFAULT;
461 	}
462 
463 	if (dev->pxp_mode == MEI_DEV_PXP_DEFAULT)
464 		return;
465 
466 	if ((fwsts5 & GSC_CFG_HFS_5_BOOT_TYPE_MSK) == GSC_CFG_HFS_5_BOOT_TYPE_PXP) {
467 		dev_dbg(&dev->dev, "pxp mode is ready 0x%08x\n", fwsts5);
468 		dev->pxp_mode = MEI_DEV_PXP_READY;
469 	} else {
470 		dev_dbg(&dev->dev, "pxp mode is not ready 0x%08x\n", fwsts5);
471 	}
472 }
473 
474 /**
475  * mei_me_hw_ready_wait - wait until the me(hw) has turned ready
476  *  or timeout is reached
477  *
478  * @dev: mei device
479  * Return: 0 on success, error otherwise
480  */
mei_me_hw_ready_wait(struct mei_device * dev)481 static int mei_me_hw_ready_wait(struct mei_device *dev)
482 {
483 	mutex_unlock(&dev->device_lock);
484 	wait_event_timeout(dev->wait_hw_ready,
485 			dev->recvd_hw_ready,
486 			dev->timeouts.hw_ready);
487 	mutex_lock(&dev->device_lock);
488 	if (!dev->recvd_hw_ready) {
489 		dev_err(&dev->dev, "wait hw ready failed\n");
490 		return -ETIME;
491 	}
492 
493 	mei_gsc_pxp_check(dev);
494 
495 	mei_me_hw_reset_release(dev);
496 	dev->recvd_hw_ready = false;
497 	return 0;
498 }
499 
500 /**
501  * mei_me_hw_start - hw start routine
502  *
503  * @dev: mei device
504  * Return: 0 on success, error otherwise
505  */
mei_me_hw_start(struct mei_device * dev)506 static int mei_me_hw_start(struct mei_device *dev)
507 {
508 	int ret = mei_me_hw_ready_wait(dev);
509 
510 	if ((kind_is_gsc(dev) || kind_is_gscfi(dev)) &&
511 	    dev->gsc_reset_to_pxp == MEI_DEV_RESET_TO_PXP_PERFORMED)
512 		dev->gsc_reset_to_pxp = MEI_DEV_RESET_TO_PXP_DONE;
513 	if (ret)
514 		return ret;
515 	dev_dbg(&dev->dev, "hw is ready\n");
516 
517 	mei_me_host_set_ready(dev);
518 	return ret;
519 }
520 
521 
522 /**
523  * mei_hbuf_filled_slots - gets number of device filled buffer slots
524  *
525  * @dev: the device structure
526  *
527  * Return: number of filled slots
528  */
mei_hbuf_filled_slots(struct mei_device * dev)529 static unsigned char mei_hbuf_filled_slots(struct mei_device *dev)
530 {
531 	u32 hcsr;
532 	char read_ptr, write_ptr;
533 
534 	hcsr = mei_hcsr_read(dev);
535 
536 	read_ptr = (char) ((hcsr & H_CBRP) >> 8);
537 	write_ptr = (char) ((hcsr & H_CBWP) >> 16);
538 
539 	return (unsigned char) (write_ptr - read_ptr);
540 }
541 
542 /**
543  * mei_me_hbuf_is_empty - checks if host buffer is empty.
544  *
545  * @dev: the device structure
546  *
547  * Return: true if empty, false - otherwise.
548  */
mei_me_hbuf_is_empty(struct mei_device * dev)549 static bool mei_me_hbuf_is_empty(struct mei_device *dev)
550 {
551 	return mei_hbuf_filled_slots(dev) == 0;
552 }
553 
554 /**
555  * mei_me_hbuf_empty_slots - counts write empty slots.
556  *
557  * @dev: the device structure
558  *
559  * Return: -EOVERFLOW if overflow, otherwise empty slots count
560  */
mei_me_hbuf_empty_slots(struct mei_device * dev)561 static int mei_me_hbuf_empty_slots(struct mei_device *dev)
562 {
563 	struct mei_me_hw *hw = to_me_hw(dev);
564 	unsigned char filled_slots, empty_slots;
565 
566 	filled_slots = mei_hbuf_filled_slots(dev);
567 	empty_slots = hw->hbuf_depth - filled_slots;
568 
569 	/* check for overflow */
570 	if (filled_slots > hw->hbuf_depth)
571 		return -EOVERFLOW;
572 
573 	return empty_slots;
574 }
575 
576 /**
577  * mei_me_hbuf_depth - returns depth of the hw buffer.
578  *
579  * @dev: the device structure
580  *
581  * Return: size of hw buffer in slots
582  */
mei_me_hbuf_depth(const struct mei_device * dev)583 static u32 mei_me_hbuf_depth(const struct mei_device *dev)
584 {
585 	struct mei_me_hw *hw = to_me_hw(dev);
586 
587 	return hw->hbuf_depth;
588 }
589 
590 /**
591  * mei_me_hbuf_write - writes a message to host hw buffer.
592  *
593  * @dev: the device structure
594  * @hdr: header of message
595  * @hdr_len: header length in bytes: must be multiplication of a slot (4bytes)
596  * @data: payload
597  * @data_len: payload length in bytes
598  *
599  * Return: 0 if success, < 0 - otherwise.
600  */
mei_me_hbuf_write(struct mei_device * dev,const void * hdr,size_t hdr_len,const void * data,size_t data_len)601 static int mei_me_hbuf_write(struct mei_device *dev,
602 			     const void *hdr, size_t hdr_len,
603 			     const void *data, size_t data_len)
604 {
605 	unsigned long rem;
606 	unsigned long i;
607 	const u32 *reg_buf;
608 	u32 dw_cnt;
609 	int empty_slots;
610 
611 	if (WARN_ON(!hdr || hdr_len & 0x3))
612 		return -EINVAL;
613 
614 	if (!data && data_len) {
615 		dev_err(&dev->dev, "wrong parameters null data with data_len = %zu\n", data_len);
616 		return -EINVAL;
617 	}
618 
619 	dev_dbg(&dev->dev, MEI_HDR_FMT, MEI_HDR_PRM((struct mei_msg_hdr *)hdr));
620 
621 	empty_slots = mei_hbuf_empty_slots(dev);
622 	dev_dbg(&dev->dev, "empty slots = %d.\n", empty_slots);
623 
624 	if (empty_slots < 0)
625 		return -EOVERFLOW;
626 
627 	dw_cnt = mei_data2slots(hdr_len + data_len);
628 	if (dw_cnt > (u32)empty_slots)
629 		return -EMSGSIZE;
630 
631 	reg_buf = hdr;
632 	for (i = 0; i < hdr_len / MEI_SLOT_SIZE; i++)
633 		mei_me_hcbww_write(dev, reg_buf[i]);
634 
635 	reg_buf = data;
636 	for (i = 0; i < data_len / MEI_SLOT_SIZE; i++)
637 		mei_me_hcbww_write(dev, reg_buf[i]);
638 
639 	rem = data_len & 0x3;
640 	if (rem > 0) {
641 		u32 reg = 0;
642 
643 		memcpy(&reg, (const u8 *)data + data_len - rem, rem);
644 		mei_me_hcbww_write(dev, reg);
645 	}
646 
647 	mei_hcsr_set_hig(dev);
648 	if (!mei_me_hw_is_ready(dev))
649 		return -EIO;
650 
651 	return 0;
652 }
653 
654 /**
655  * mei_me_count_full_read_slots - counts read full slots.
656  *
657  * @dev: the device structure
658  *
659  * Return: -EOVERFLOW if overflow, otherwise filled slots count
660  */
mei_me_count_full_read_slots(struct mei_device * dev)661 static int mei_me_count_full_read_slots(struct mei_device *dev)
662 {
663 	u32 me_csr;
664 	char read_ptr, write_ptr;
665 	unsigned char buffer_depth, filled_slots;
666 
667 	me_csr = mei_me_mecsr_read(dev);
668 	buffer_depth = (unsigned char)((me_csr & ME_CBD_HRA) >> 24);
669 	read_ptr = (char) ((me_csr & ME_CBRP_HRA) >> 8);
670 	write_ptr = (char) ((me_csr & ME_CBWP_HRA) >> 16);
671 	filled_slots = (unsigned char) (write_ptr - read_ptr);
672 
673 	/* check for overflow */
674 	if (filled_slots > buffer_depth)
675 		return -EOVERFLOW;
676 
677 	dev_dbg(&dev->dev, "filled_slots =%08x\n", filled_slots);
678 	return (int)filled_slots;
679 }
680 
681 /**
682  * mei_me_read_slots - reads a message from mei device.
683  *
684  * @dev: the device structure
685  * @buffer: message buffer will be written
686  * @buffer_length: message size will be read
687  *
688  * Return: always 0
689  */
mei_me_read_slots(struct mei_device * dev,unsigned char * buffer,unsigned long buffer_length)690 static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer,
691 			     unsigned long buffer_length)
692 {
693 	u32 *reg_buf = (u32 *)buffer;
694 
695 	for (; buffer_length >= MEI_SLOT_SIZE; buffer_length -= MEI_SLOT_SIZE)
696 		*reg_buf++ = mei_me_mecbrw_read(dev);
697 
698 	if (buffer_length > 0) {
699 		u32 reg = mei_me_mecbrw_read(dev);
700 
701 		memcpy(reg_buf, &reg, buffer_length);
702 	}
703 
704 	mei_hcsr_set_hig(dev);
705 	return 0;
706 }
707 
708 /**
709  * mei_me_pg_set - write pg enter register
710  *
711  * @dev: the device structure
712  */
mei_me_pg_set(struct mei_device * dev)713 static void mei_me_pg_set(struct mei_device *dev)
714 {
715 	struct mei_me_hw *hw = to_me_hw(dev);
716 	u32 reg;
717 
718 	reg = mei_me_reg_read(hw, H_HPG_CSR);
719 	trace_mei_reg_read(&dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
720 
721 	reg |= H_HPG_CSR_PGI;
722 
723 	trace_mei_reg_write(&dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
724 	mei_me_reg_write(hw, H_HPG_CSR, reg);
725 }
726 
727 /**
728  * mei_me_pg_unset - write pg exit register
729  *
730  * @dev: the device structure
731  */
mei_me_pg_unset(struct mei_device * dev)732 static void mei_me_pg_unset(struct mei_device *dev)
733 {
734 	struct mei_me_hw *hw = to_me_hw(dev);
735 	u32 reg;
736 
737 	reg = mei_me_reg_read(hw, H_HPG_CSR);
738 	trace_mei_reg_read(&dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
739 
740 	WARN(!(reg & H_HPG_CSR_PGI), "PGI is not set\n");
741 
742 	reg |= H_HPG_CSR_PGIHEXR;
743 
744 	trace_mei_reg_write(&dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
745 	mei_me_reg_write(hw, H_HPG_CSR, reg);
746 }
747 
748 /**
749  * mei_me_pg_legacy_enter_sync - perform legacy pg entry procedure
750  *
751  * @dev: the device structure
752  *
753  * Return: 0 on success an error code otherwise
754  */
mei_me_pg_legacy_enter_sync(struct mei_device * dev)755 static int mei_me_pg_legacy_enter_sync(struct mei_device *dev)
756 {
757 	struct mei_me_hw *hw = to_me_hw(dev);
758 	int ret;
759 
760 	dev->pg_event = MEI_PG_EVENT_WAIT;
761 
762 	ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD);
763 	if (ret)
764 		return ret;
765 
766 	mutex_unlock(&dev->device_lock);
767 	wait_event_timeout(dev->wait_pg,
768 		dev->pg_event == MEI_PG_EVENT_RECEIVED,
769 		dev->timeouts.pgi);
770 	mutex_lock(&dev->device_lock);
771 
772 	if (dev->pg_event == MEI_PG_EVENT_RECEIVED) {
773 		mei_me_pg_set(dev);
774 		ret = 0;
775 	} else {
776 		ret = -ETIME;
777 	}
778 
779 	dev->pg_event = MEI_PG_EVENT_IDLE;
780 	hw->pg_state = MEI_PG_ON;
781 
782 	return ret;
783 }
784 
785 /**
786  * mei_me_pg_legacy_exit_sync - perform legacy pg exit procedure
787  *
788  * @dev: the device structure
789  *
790  * Return: 0 on success an error code otherwise
791  */
mei_me_pg_legacy_exit_sync(struct mei_device * dev)792 static int mei_me_pg_legacy_exit_sync(struct mei_device *dev)
793 {
794 	struct mei_me_hw *hw = to_me_hw(dev);
795 	int ret;
796 
797 	if (dev->pg_event == MEI_PG_EVENT_RECEIVED)
798 		goto reply;
799 
800 	dev->pg_event = MEI_PG_EVENT_WAIT;
801 
802 	mei_me_pg_unset(dev);
803 
804 	mutex_unlock(&dev->device_lock);
805 	wait_event_timeout(dev->wait_pg,
806 		dev->pg_event == MEI_PG_EVENT_RECEIVED,
807 		dev->timeouts.pgi);
808 	mutex_lock(&dev->device_lock);
809 
810 reply:
811 	if (dev->pg_event != MEI_PG_EVENT_RECEIVED) {
812 		ret = -ETIME;
813 		goto out;
814 	}
815 
816 	dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
817 	ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_EXIT_RES_CMD);
818 	if (ret)
819 		return ret;
820 
821 	mutex_unlock(&dev->device_lock);
822 	wait_event_timeout(dev->wait_pg,
823 		dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED,
824 		dev->timeouts.pgi);
825 	mutex_lock(&dev->device_lock);
826 
827 	if (dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED)
828 		ret = 0;
829 	else
830 		ret = -ETIME;
831 
832 out:
833 	dev->pg_event = MEI_PG_EVENT_IDLE;
834 	hw->pg_state = MEI_PG_OFF;
835 
836 	return ret;
837 }
838 
839 /**
840  * mei_me_pg_in_transition - is device now in pg transition
841  *
842  * @dev: the device structure
843  *
844  * Return: true if in pg transition, false otherwise
845  */
mei_me_pg_in_transition(struct mei_device * dev)846 static bool mei_me_pg_in_transition(struct mei_device *dev)
847 {
848 	return dev->pg_event >= MEI_PG_EVENT_WAIT &&
849 	       dev->pg_event <= MEI_PG_EVENT_INTR_WAIT;
850 }
851 
852 /**
853  * mei_me_pg_is_enabled - detect if PG is supported by HW
854  *
855  * @dev: the device structure
856  *
857  * Return: true is pg supported, false otherwise
858  */
mei_me_pg_is_enabled(struct mei_device * dev)859 static bool mei_me_pg_is_enabled(struct mei_device *dev)
860 {
861 	struct mei_me_hw *hw = to_me_hw(dev);
862 	u32 reg = mei_me_mecsr_read(dev);
863 
864 	if (hw->d0i3_supported)
865 		return true;
866 
867 	if ((reg & ME_PGIC_HRA) == 0)
868 		goto notsupported;
869 
870 	if (!dev->hbm_f_pg_supported)
871 		goto notsupported;
872 
873 	return true;
874 
875 notsupported:
876 	dev_dbg(&dev->dev, "pg: not supported: d0i3 = %d HGP = %d hbm version %d.%d ?= %d.%d\n",
877 		hw->d0i3_supported,
878 		!!(reg & ME_PGIC_HRA),
879 		dev->version.major_version,
880 		dev->version.minor_version,
881 		HBM_MAJOR_VERSION_PGI,
882 		HBM_MINOR_VERSION_PGI);
883 
884 	return false;
885 }
886 
887 /**
888  * mei_me_d0i3_set - write d0i3 register bit on mei device.
889  *
890  * @dev: the device structure
891  * @intr: ask for interrupt
892  *
893  * Return: D0I3C register value
894  */
mei_me_d0i3_set(struct mei_device * dev,bool intr)895 static u32 mei_me_d0i3_set(struct mei_device *dev, bool intr)
896 {
897 	u32 reg = mei_me_d0i3c_read(dev);
898 
899 	reg |= H_D0I3C_I3;
900 	if (intr)
901 		reg |= H_D0I3C_IR;
902 	else
903 		reg &= ~H_D0I3C_IR;
904 	mei_me_d0i3c_write(dev, reg);
905 	/* read it to ensure HW consistency */
906 	reg = mei_me_d0i3c_read(dev);
907 	return reg;
908 }
909 
910 /**
911  * mei_me_d0i3_unset - clean d0i3 register bit on mei device.
912  *
913  * @dev: the device structure
914  *
915  * Return: D0I3C register value
916  */
mei_me_d0i3_unset(struct mei_device * dev)917 static u32 mei_me_d0i3_unset(struct mei_device *dev)
918 {
919 	u32 reg = mei_me_d0i3c_read(dev);
920 
921 	reg &= ~H_D0I3C_I3;
922 	reg |= H_D0I3C_IR;
923 	mei_me_d0i3c_write(dev, reg);
924 	/* read it to ensure HW consistency */
925 	reg = mei_me_d0i3c_read(dev);
926 	return reg;
927 }
928 
929 /**
930  * mei_me_d0i3_enter_sync - perform d0i3 entry procedure
931  *
932  * @dev: the device structure
933  *
934  * Return: 0 on success an error code otherwise
935  */
mei_me_d0i3_enter_sync(struct mei_device * dev)936 static int mei_me_d0i3_enter_sync(struct mei_device *dev)
937 {
938 	struct mei_me_hw *hw = to_me_hw(dev);
939 	int ret;
940 	u32 reg;
941 
942 	reg = mei_me_d0i3c_read(dev);
943 	if (reg & H_D0I3C_I3) {
944 		/* we are in d0i3, nothing to do */
945 		dev_dbg(&dev->dev, "d0i3 set not needed\n");
946 		ret = 0;
947 		goto on;
948 	}
949 
950 	/* PGI entry procedure */
951 	dev->pg_event = MEI_PG_EVENT_WAIT;
952 
953 	ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD);
954 	if (ret)
955 		/* FIXME: should we reset here? */
956 		goto out;
957 
958 	mutex_unlock(&dev->device_lock);
959 	wait_event_timeout(dev->wait_pg,
960 		dev->pg_event == MEI_PG_EVENT_RECEIVED,
961 		dev->timeouts.pgi);
962 	mutex_lock(&dev->device_lock);
963 
964 	if (dev->pg_event != MEI_PG_EVENT_RECEIVED) {
965 		ret = -ETIME;
966 		goto out;
967 	}
968 	/* end PGI entry procedure */
969 
970 	dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
971 
972 	reg = mei_me_d0i3_set(dev, true);
973 	if (!(reg & H_D0I3C_CIP)) {
974 		dev_dbg(&dev->dev, "d0i3 enter wait not needed\n");
975 		ret = 0;
976 		goto on;
977 	}
978 
979 	mutex_unlock(&dev->device_lock);
980 	wait_event_timeout(dev->wait_pg,
981 		dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED,
982 		dev->timeouts.d0i3);
983 	mutex_lock(&dev->device_lock);
984 
985 	if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) {
986 		reg = mei_me_d0i3c_read(dev);
987 		if (!(reg & H_D0I3C_I3)) {
988 			ret = -ETIME;
989 			goto out;
990 		}
991 	}
992 
993 	ret = 0;
994 on:
995 	hw->pg_state = MEI_PG_ON;
996 out:
997 	dev->pg_event = MEI_PG_EVENT_IDLE;
998 	dev_dbg(&dev->dev, "d0i3 enter ret = %d\n", ret);
999 	return ret;
1000 }
1001 
1002 /**
1003  * mei_me_d0i3_enter - perform d0i3 entry procedure
1004  *   no hbm PG handshake
1005  *   no waiting for confirmation; runs with interrupts
1006  *   disabled
1007  *
1008  * @dev: the device structure
1009  *
1010  * Return: 0 on success an error code otherwise
1011  */
mei_me_d0i3_enter(struct mei_device * dev)1012 static int mei_me_d0i3_enter(struct mei_device *dev)
1013 {
1014 	struct mei_me_hw *hw = to_me_hw(dev);
1015 	u32 reg;
1016 
1017 	reg = mei_me_d0i3c_read(dev);
1018 	if (reg & H_D0I3C_I3) {
1019 		/* we are in d0i3, nothing to do */
1020 		dev_dbg(&dev->dev, "already d0i3 : set not needed\n");
1021 		goto on;
1022 	}
1023 
1024 	mei_me_d0i3_set(dev, false);
1025 on:
1026 	hw->pg_state = MEI_PG_ON;
1027 	dev->pg_event = MEI_PG_EVENT_IDLE;
1028 	dev_dbg(&dev->dev, "d0i3 enter\n");
1029 	return 0;
1030 }
1031 
1032 /**
1033  * mei_me_d0i3_exit_sync - perform d0i3 exit procedure
1034  *
1035  * @dev: the device structure
1036  *
1037  * Return: 0 on success an error code otherwise
1038  */
mei_me_d0i3_exit_sync(struct mei_device * dev)1039 static int mei_me_d0i3_exit_sync(struct mei_device *dev)
1040 {
1041 	struct mei_me_hw *hw = to_me_hw(dev);
1042 	int ret;
1043 	u32 reg;
1044 
1045 	dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
1046 
1047 	reg = mei_me_d0i3c_read(dev);
1048 	if (!(reg & H_D0I3C_I3)) {
1049 		/* we are not in d0i3, nothing to do */
1050 		dev_dbg(&dev->dev, "d0i3 exit not needed\n");
1051 		ret = 0;
1052 		goto off;
1053 	}
1054 
1055 	reg = mei_me_d0i3_unset(dev);
1056 	if (!(reg & H_D0I3C_CIP)) {
1057 		dev_dbg(&dev->dev, "d0i3 exit wait not needed\n");
1058 		ret = 0;
1059 		goto off;
1060 	}
1061 
1062 	mutex_unlock(&dev->device_lock);
1063 	wait_event_timeout(dev->wait_pg,
1064 		dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED,
1065 		dev->timeouts.d0i3);
1066 	mutex_lock(&dev->device_lock);
1067 
1068 	if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) {
1069 		reg = mei_me_d0i3c_read(dev);
1070 		if (reg & H_D0I3C_I3) {
1071 			ret = -ETIME;
1072 			goto out;
1073 		}
1074 	}
1075 
1076 	ret = 0;
1077 off:
1078 	hw->pg_state = MEI_PG_OFF;
1079 out:
1080 	dev->pg_event = MEI_PG_EVENT_IDLE;
1081 
1082 	dev_dbg(&dev->dev, "d0i3 exit ret = %d\n", ret);
1083 	return ret;
1084 }
1085 
1086 /**
1087  * mei_me_pg_legacy_intr - perform legacy pg processing
1088  *			   in interrupt thread handler
1089  *
1090  * @dev: the device structure
1091  */
mei_me_pg_legacy_intr(struct mei_device * dev)1092 static void mei_me_pg_legacy_intr(struct mei_device *dev)
1093 {
1094 	struct mei_me_hw *hw = to_me_hw(dev);
1095 
1096 	if (dev->pg_event != MEI_PG_EVENT_INTR_WAIT)
1097 		return;
1098 
1099 	dev->pg_event = MEI_PG_EVENT_INTR_RECEIVED;
1100 	hw->pg_state = MEI_PG_OFF;
1101 	if (waitqueue_active(&dev->wait_pg))
1102 		wake_up(&dev->wait_pg);
1103 }
1104 
1105 /**
1106  * mei_me_d0i3_intr - perform d0i3 processing in interrupt thread handler
1107  *
1108  * @dev: the device structure
1109  * @intr_source: interrupt source
1110  */
mei_me_d0i3_intr(struct mei_device * dev,u32 intr_source)1111 static void mei_me_d0i3_intr(struct mei_device *dev, u32 intr_source)
1112 {
1113 	struct mei_me_hw *hw = to_me_hw(dev);
1114 
1115 	if (dev->pg_event == MEI_PG_EVENT_INTR_WAIT &&
1116 	    (intr_source & H_D0I3C_IS)) {
1117 		dev->pg_event = MEI_PG_EVENT_INTR_RECEIVED;
1118 		if (hw->pg_state == MEI_PG_ON) {
1119 			hw->pg_state = MEI_PG_OFF;
1120 			if (dev->hbm_state != MEI_HBM_IDLE) {
1121 				/*
1122 				 * force H_RDY because it could be
1123 				 * wiped off during PG
1124 				 */
1125 				dev_dbg(&dev->dev, "d0i3 set host ready\n");
1126 				mei_me_host_set_ready(dev);
1127 			}
1128 		} else {
1129 			hw->pg_state = MEI_PG_ON;
1130 		}
1131 
1132 		wake_up(&dev->wait_pg);
1133 	}
1134 
1135 	if (hw->pg_state == MEI_PG_ON && (intr_source & H_IS)) {
1136 		/*
1137 		 * HW sent some data and we are in D0i3, so
1138 		 * we got here because of HW initiated exit from D0i3.
1139 		 * Start runtime pm resume sequence to exit low power state.
1140 		 */
1141 		dev_dbg(&dev->dev, "d0i3 want resume\n");
1142 		mei_hbm_pg_resume(dev);
1143 	}
1144 }
1145 
1146 /**
1147  * mei_me_pg_intr - perform pg processing in interrupt thread handler
1148  *
1149  * @dev: the device structure
1150  * @intr_source: interrupt source
1151  */
mei_me_pg_intr(struct mei_device * dev,u32 intr_source)1152 static void mei_me_pg_intr(struct mei_device *dev, u32 intr_source)
1153 {
1154 	struct mei_me_hw *hw = to_me_hw(dev);
1155 
1156 	if (hw->d0i3_supported)
1157 		mei_me_d0i3_intr(dev, intr_source);
1158 	else
1159 		mei_me_pg_legacy_intr(dev);
1160 }
1161 
1162 /**
1163  * mei_me_pg_enter_sync - perform runtime pm entry procedure
1164  *
1165  * @dev: the device structure
1166  *
1167  * Return: 0 on success an error code otherwise
1168  */
mei_me_pg_enter_sync(struct mei_device * dev)1169 int mei_me_pg_enter_sync(struct mei_device *dev)
1170 {
1171 	struct mei_me_hw *hw = to_me_hw(dev);
1172 
1173 	if (hw->d0i3_supported)
1174 		return mei_me_d0i3_enter_sync(dev);
1175 	else
1176 		return mei_me_pg_legacy_enter_sync(dev);
1177 }
1178 
1179 /**
1180  * mei_me_pg_exit_sync - perform runtime pm exit procedure
1181  *
1182  * @dev: the device structure
1183  *
1184  * Return: 0 on success an error code otherwise
1185  */
mei_me_pg_exit_sync(struct mei_device * dev)1186 int mei_me_pg_exit_sync(struct mei_device *dev)
1187 {
1188 	struct mei_me_hw *hw = to_me_hw(dev);
1189 
1190 	if (hw->d0i3_supported)
1191 		return mei_me_d0i3_exit_sync(dev);
1192 	else
1193 		return mei_me_pg_legacy_exit_sync(dev);
1194 }
1195 
1196 /**
1197  * mei_me_hw_reset - resets fw via mei csr register.
1198  *
1199  * @dev: the device structure
1200  * @intr_enable: if interrupt should be enabled after reset.
1201  *
1202  * Return: 0 on success an error code otherwise
1203  */
mei_me_hw_reset(struct mei_device * dev,bool intr_enable)1204 static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
1205 {
1206 	struct mei_me_hw *hw = to_me_hw(dev);
1207 	int ret;
1208 	u32 hcsr;
1209 
1210 	if (intr_enable) {
1211 		mei_me_intr_enable(dev);
1212 		if (hw->d0i3_supported) {
1213 			ret = mei_me_d0i3_exit_sync(dev);
1214 			if (ret)
1215 				return ret;
1216 		} else {
1217 			hw->pg_state = MEI_PG_OFF;
1218 			dev->pg_blocked = mei_csc_pg_blocked(dev);
1219 		}
1220 	}
1221 
1222 	pm_runtime_set_active(dev->parent);
1223 
1224 	hcsr = mei_hcsr_read(dev);
1225 	/* H_RST may be found lit before reset is started,
1226 	 * for example if preceding reset flow hasn't completed.
1227 	 * In that case asserting H_RST will be ignored, therefore
1228 	 * we need to clean H_RST bit to start a successful reset sequence.
1229 	 */
1230 	if ((hcsr & H_RST) == H_RST) {
1231 		dev_warn(&dev->dev, "H_RST is set = 0x%08X", hcsr);
1232 		hcsr &= ~H_RST;
1233 		mei_hcsr_set(dev, hcsr);
1234 		hcsr = mei_hcsr_read(dev);
1235 	}
1236 
1237 	hcsr |= H_RST | H_IG | H_CSR_IS_MASK;
1238 
1239 	if (!intr_enable || mei_me_hw_use_polling(to_me_hw(dev)))
1240 		hcsr &= ~H_CSR_IE_MASK;
1241 
1242 	dev->recvd_hw_ready = false;
1243 	mei_hcsr_write(dev, hcsr);
1244 
1245 	/*
1246 	 * Host reads the H_CSR once to ensure that the
1247 	 * posted write to H_CSR completes.
1248 	 */
1249 	hcsr = mei_hcsr_read(dev);
1250 
1251 	if ((hcsr & H_RST) == 0)
1252 		dev_warn(&dev->dev, "H_RST is not set = 0x%08X", hcsr);
1253 
1254 	if ((hcsr & H_RDY) == H_RDY)
1255 		dev_warn(&dev->dev, "H_RDY is not cleared 0x%08X", hcsr);
1256 
1257 	if (!intr_enable) {
1258 		mei_me_hw_reset_release(dev);
1259 		if (hw->d0i3_supported) {
1260 			ret = mei_me_d0i3_enter(dev);
1261 			if (ret)
1262 				return ret;
1263 		}
1264 	}
1265 	return 0;
1266 }
1267 
1268 /**
1269  * mei_me_irq_quick_handler - The ISR of the MEI device
1270  *
1271  * @irq: The irq number
1272  * @dev_id: pointer to the device structure
1273  *
1274  * Return: irqreturn_t
1275  */
mei_me_irq_quick_handler(int irq,void * dev_id)1276 irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id)
1277 {
1278 	struct mei_device *dev = (struct mei_device *)dev_id;
1279 	u32 hcsr;
1280 
1281 	hcsr = mei_hcsr_read(dev);
1282 	if (!me_intr_src(hcsr))
1283 		return IRQ_NONE;
1284 
1285 	dev_dbg(&dev->dev, "interrupt source 0x%08X\n", me_intr_src(hcsr));
1286 
1287 	/* disable interrupts on device */
1288 	me_intr_disable(dev, hcsr);
1289 	return IRQ_WAKE_THREAD;
1290 }
1291 EXPORT_SYMBOL_GPL(mei_me_irq_quick_handler);
1292 
1293 /**
1294  * mei_me_irq_thread_handler - function called after ISR to handle the interrupt
1295  * processing.
1296  *
1297  * @irq: The irq number
1298  * @dev_id: pointer to the device structure
1299  *
1300  * Return: irqreturn_t
1301  *
1302  */
mei_me_irq_thread_handler(int irq,void * dev_id)1303 irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
1304 {
1305 	struct mei_device *dev = (struct mei_device *) dev_id;
1306 	struct list_head cmpl_list;
1307 	bool pg_blocked;
1308 	s32 slots;
1309 	u32 hcsr;
1310 	int rets = 0;
1311 
1312 	dev_dbg(&dev->dev, "function called after ISR to handle the interrupt processing.\n");
1313 	/* initialize our complete list */
1314 	mutex_lock(&dev->device_lock);
1315 
1316 	hcsr = mei_hcsr_read(dev);
1317 	me_intr_clear(dev, hcsr);
1318 
1319 	INIT_LIST_HEAD(&cmpl_list);
1320 
1321 	/* check if ME wants a reset */
1322 	if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) {
1323 		if (kind_is_gsc(dev) || kind_is_gscfi(dev)) {
1324 			dev_dbg(&dev->dev, "FW not ready: resetting: dev_state = %d\n",
1325 				dev->dev_state);
1326 		} else {
1327 			dev_warn(&dev->dev, "FW not ready: resetting: dev_state = %d\n",
1328 				 dev->dev_state);
1329 		}
1330 		if (dev->dev_state == MEI_DEV_POWERING_DOWN ||
1331 		    dev->dev_state == MEI_DEV_POWER_DOWN)
1332 			mei_cl_all_disconnect(dev);
1333 		else if (dev->dev_state != MEI_DEV_DISABLED)
1334 			schedule_work(&dev->reset_work);
1335 		goto end;
1336 	}
1337 
1338 	if (mei_me_hw_is_resetting(dev))
1339 		mei_hcsr_set_hig(dev);
1340 
1341 	mei_me_pg_intr(dev, me_intr_src(hcsr));
1342 
1343 	/*  check if we need to start the dev */
1344 	if (!mei_host_is_ready(dev)) {
1345 		if (mei_hw_is_ready(dev)) {
1346 			if (dev->dev_state == MEI_DEV_ENABLED) {
1347 				dev_dbg(&dev->dev, "Force link reset.\n");
1348 				schedule_work(&dev->reset_work);
1349 			} else {
1350 				dev_dbg(&dev->dev, "we need to start the dev.\n");
1351 				dev->recvd_hw_ready = true;
1352 				wake_up(&dev->wait_hw_ready);
1353 			}
1354 		} else {
1355 			dev_dbg(&dev->dev, "Spurious Interrupt\n");
1356 		}
1357 		goto end;
1358 	}
1359 
1360 	pg_blocked = mei_csc_pg_blocked(dev);
1361 	if (pg_blocked && !dev->pg_blocked) /* PG block requested */
1362 		pm_request_resume(&dev->dev);
1363 	else if (!pg_blocked && dev->pg_blocked) /* PG block lifted */
1364 		pm_request_autosuspend(&dev->dev);
1365 	dev->pg_blocked = pg_blocked;
1366 
1367 	/* check slots available for reading */
1368 	slots = mei_count_full_read_slots(dev);
1369 	while (slots > 0) {
1370 		dev_dbg(&dev->dev, "slots to read = %08x\n", slots);
1371 		rets = mei_irq_read_handler(dev, &cmpl_list, &slots);
1372 		/* There is a race between ME write and interrupt delivery:
1373 		 * Not all data is always available immediately after the
1374 		 * interrupt, so try to read again on the next interrupt.
1375 		 */
1376 		if (rets == -ENODATA)
1377 			break;
1378 
1379 		if (rets) {
1380 			dev_err(&dev->dev, "mei_irq_read_handler ret = %d, state = %d.\n",
1381 				rets, dev->dev_state);
1382 			if (dev->dev_state != MEI_DEV_RESETTING &&
1383 			    dev->dev_state != MEI_DEV_DISABLED &&
1384 			    dev->dev_state != MEI_DEV_POWERING_DOWN &&
1385 			    dev->dev_state != MEI_DEV_POWER_DOWN)
1386 				schedule_work(&dev->reset_work);
1387 			goto end;
1388 		}
1389 	}
1390 
1391 	dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
1392 
1393 	/*
1394 	 * During PG handshake only allowed write is the replay to the
1395 	 * PG exit message, so block calling write function
1396 	 * if the pg event is in PG handshake
1397 	 */
1398 	if (dev->pg_event != MEI_PG_EVENT_WAIT &&
1399 	    dev->pg_event != MEI_PG_EVENT_RECEIVED) {
1400 		rets = mei_irq_write_handler(dev, &cmpl_list);
1401 		dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
1402 	}
1403 
1404 	mei_irq_compl_handler(dev, &cmpl_list);
1405 
1406 end:
1407 	dev_dbg(&dev->dev, "interrupt thread end ret = %d\n", rets);
1408 	mei_me_intr_enable(dev);
1409 	mutex_unlock(&dev->device_lock);
1410 	return IRQ_HANDLED;
1411 }
1412 EXPORT_SYMBOL_GPL(mei_me_irq_thread_handler);
1413 
1414 #define MEI_POLLING_TIMEOUT_ACTIVE 100
1415 #define MEI_POLLING_TIMEOUT_IDLE   500
1416 
1417 /**
1418  * mei_me_polling_thread - interrupt register polling thread
1419  *
1420  * @_dev: mei device
1421  *
1422  * The thread monitors the interrupt source register and calls
1423  * mei_me_irq_thread_handler() to handle the firmware
1424  * input.
1425  *
1426  * The function polls in MEI_POLLING_TIMEOUT_ACTIVE timeout
1427  * in case there was an event, in idle case the polling
1428  * time increases yet again by MEI_POLLING_TIMEOUT_ACTIVE
1429  * up to MEI_POLLING_TIMEOUT_IDLE.
1430  *
1431  * Return: always 0
1432  */
mei_me_polling_thread(void * _dev)1433 int mei_me_polling_thread(void *_dev)
1434 {
1435 	struct mei_device *dev = _dev;
1436 	irqreturn_t irq_ret;
1437 	long polling_timeout = MEI_POLLING_TIMEOUT_ACTIVE;
1438 
1439 	dev_dbg(&dev->dev, "kernel thread is running\n");
1440 	while (!kthread_should_stop()) {
1441 		struct mei_me_hw *hw = to_me_hw(dev);
1442 		u32 hcsr;
1443 
1444 		wait_event_timeout(hw->wait_active,
1445 				   hw->is_active || kthread_should_stop(),
1446 				   msecs_to_jiffies(MEI_POLLING_TIMEOUT_IDLE));
1447 
1448 		if (kthread_should_stop())
1449 			break;
1450 
1451 		hcsr = mei_hcsr_read(dev);
1452 		if (me_intr_src(hcsr)) {
1453 			polling_timeout = MEI_POLLING_TIMEOUT_ACTIVE;
1454 			irq_ret = mei_me_irq_thread_handler(1, dev);
1455 			if (irq_ret != IRQ_HANDLED)
1456 				dev_err(&dev->dev, "irq_ret %d\n", irq_ret);
1457 		} else {
1458 			/*
1459 			 * Increase timeout by MEI_POLLING_TIMEOUT_ACTIVE
1460 			 * up to MEI_POLLING_TIMEOUT_IDLE
1461 			 */
1462 			polling_timeout = clamp_val(polling_timeout + MEI_POLLING_TIMEOUT_ACTIVE,
1463 						    MEI_POLLING_TIMEOUT_ACTIVE,
1464 						    MEI_POLLING_TIMEOUT_IDLE);
1465 		}
1466 
1467 		schedule_timeout_interruptible(msecs_to_jiffies(polling_timeout));
1468 	}
1469 
1470 	return 0;
1471 }
1472 EXPORT_SYMBOL_GPL(mei_me_polling_thread);
1473 
1474 static const struct mei_hw_ops mei_me_hw_ops = {
1475 
1476 	.trc_status = mei_me_trc_status,
1477 	.fw_status = mei_me_fw_status,
1478 	.pg_state  = mei_me_pg_state,
1479 
1480 	.host_is_ready = mei_me_host_is_ready,
1481 
1482 	.hw_is_ready = mei_me_hw_is_ready,
1483 	.hw_reset = mei_me_hw_reset,
1484 	.hw_config = mei_me_hw_config,
1485 	.hw_start = mei_me_hw_start,
1486 
1487 	.pg_in_transition = mei_me_pg_in_transition,
1488 	.pg_is_enabled = mei_me_pg_is_enabled,
1489 
1490 	.intr_clear = mei_me_intr_clear,
1491 	.intr_enable = mei_me_intr_enable,
1492 	.intr_disable = mei_me_intr_disable,
1493 	.synchronize_irq = mei_me_synchronize_irq,
1494 
1495 	.hbuf_free_slots = mei_me_hbuf_empty_slots,
1496 	.hbuf_is_ready = mei_me_hbuf_is_empty,
1497 	.hbuf_depth = mei_me_hbuf_depth,
1498 
1499 	.write = mei_me_hbuf_write,
1500 
1501 	.rdbuf_full_slots = mei_me_count_full_read_slots,
1502 	.read_hdr = mei_me_mecbrw_read,
1503 	.read = mei_me_read_slots
1504 };
1505 
1506 /**
1507  * mei_me_fw_type_nm() - check for nm sku
1508  *
1509  * @pdev: pci device
1510  *
1511  * Read ME FW Status register to check for the Node Manager (NM) Firmware.
1512  * The NM FW is only signaled in PCI function 0.
1513  * __Note__: Deprecated by PCH8 and newer.
1514  *
1515  * Return: true in case of NM firmware
1516  */
mei_me_fw_type_nm(const struct pci_dev * pdev)1517 static bool mei_me_fw_type_nm(const struct pci_dev *pdev)
1518 {
1519 	u32 reg;
1520 	unsigned int devfn;
1521 	int ret;
1522 
1523 	devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
1524 	ret = pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_2, &reg);
1525 	trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_2", PCI_CFG_HFS_2, reg, ret);
1526 	/* make sure that bit 9 (NM) is up and bit 10 (DM) is down */
1527 	return (reg & 0x600) == 0x200;
1528 }
1529 
1530 #define MEI_CFG_FW_NM                           \
1531 	.quirk_probe = mei_me_fw_type_nm
1532 
1533 /**
1534  * mei_me_fw_type_sps_4() - check for sps 4.0 sku
1535  *
1536  * @pdev: pci device
1537  *
1538  * Read ME FW Status register to check for SPS Firmware.
1539  * The SPS FW is only signaled in the PCI function 0.
1540  * __Note__: Deprecated by SPS 5.0 and newer.
1541  *
1542  * Return: true in case of SPS firmware
1543  */
mei_me_fw_type_sps_4(const struct pci_dev * pdev)1544 static bool mei_me_fw_type_sps_4(const struct pci_dev *pdev)
1545 {
1546 	u32 reg;
1547 	unsigned int devfn;
1548 	int ret;
1549 
1550 	devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
1551 	ret = pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_1, &reg);
1552 	trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg, ret);
1553 	return (reg & PCI_CFG_HFS_1_OPMODE_MSK) == PCI_CFG_HFS_1_OPMODE_SPS;
1554 }
1555 
1556 #define MEI_CFG_FW_SPS_4                          \
1557 	.quirk_probe = mei_me_fw_type_sps_4
1558 
1559 /**
1560  * mei_me_fw_type_sps_ign() - check for sps or ign sku
1561  *
1562  * @pdev: pci device
1563  *
1564  * Read ME FW Status register to check for SPS or IGN Firmware.
1565  * The SPS/IGN FW is only signaled in pci function 0
1566  *
1567  * Return: true in case of SPS/IGN firmware
1568  */
mei_me_fw_type_sps_ign(const struct pci_dev * pdev)1569 static bool mei_me_fw_type_sps_ign(const struct pci_dev *pdev)
1570 {
1571 	u32 reg;
1572 	u32 fw_type;
1573 	unsigned int devfn;
1574 	int ret;
1575 
1576 	devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
1577 	ret = pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_3, &reg);
1578 	trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_3", PCI_CFG_HFS_3, reg, ret);
1579 	fw_type = (reg & PCI_CFG_HFS_3_FW_SKU_MSK);
1580 
1581 	dev_dbg(&pdev->dev, "fw type is %d\n", fw_type);
1582 
1583 	return fw_type == PCI_CFG_HFS_3_FW_SKU_IGN ||
1584 	       fw_type == PCI_CFG_HFS_3_FW_SKU_SPS;
1585 }
1586 
1587 #define MEI_CFG_KIND_ITOUCH                     \
1588 	.kind = "itouch"
1589 
1590 #define MEI_CFG_TYPE_GSC                        \
1591 	.kind = "gsc"
1592 
1593 #define MEI_CFG_TYPE_GSCFI                      \
1594 	.kind = "gscfi"
1595 
1596 #define MEI_CFG_FW_SPS_IGN                      \
1597 	.quirk_probe = mei_me_fw_type_sps_ign
1598 
1599 #define MEI_CFG_FW_VER_SUPP                     \
1600 	.fw_ver_supported = 1
1601 
1602 #define MEI_CFG_ICH_HFS                      \
1603 	.fw_status.count = 0
1604 
1605 #define MEI_CFG_ICH10_HFS                        \
1606 	.fw_status.count = 1,                   \
1607 	.fw_status.status[0] = PCI_CFG_HFS_1
1608 
1609 #define MEI_CFG_PCH_HFS                         \
1610 	.fw_status.count = 2,                   \
1611 	.fw_status.status[0] = PCI_CFG_HFS_1,   \
1612 	.fw_status.status[1] = PCI_CFG_HFS_2
1613 
1614 #define MEI_CFG_PCH8_HFS                        \
1615 	.fw_status.count = 6,                   \
1616 	.fw_status.status[0] = PCI_CFG_HFS_1,   \
1617 	.fw_status.status[1] = PCI_CFG_HFS_2,   \
1618 	.fw_status.status[2] = PCI_CFG_HFS_3,   \
1619 	.fw_status.status[3] = PCI_CFG_HFS_4,   \
1620 	.fw_status.status[4] = PCI_CFG_HFS_5,   \
1621 	.fw_status.status[5] = PCI_CFG_HFS_6
1622 
1623 #define MEI_CFG_DMA_128 \
1624 	.dma_size[DMA_DSCR_HOST] = SZ_128K, \
1625 	.dma_size[DMA_DSCR_DEVICE] = SZ_128K, \
1626 	.dma_size[DMA_DSCR_CTRL] = PAGE_SIZE
1627 
1628 #define MEI_CFG_TRC \
1629 	.hw_trc_supported = 1
1630 
1631 /* ICH Legacy devices */
1632 static const struct mei_cfg mei_me_ich_cfg = {
1633 	MEI_CFG_ICH_HFS,
1634 };
1635 
1636 /* ICH devices */
1637 static const struct mei_cfg mei_me_ich10_cfg = {
1638 	MEI_CFG_ICH10_HFS,
1639 };
1640 
1641 /* PCH6 devices */
1642 static const struct mei_cfg mei_me_pch6_cfg = {
1643 	MEI_CFG_PCH_HFS,
1644 };
1645 
1646 /* PCH7 devices */
1647 static const struct mei_cfg mei_me_pch7_cfg = {
1648 	MEI_CFG_PCH_HFS,
1649 	MEI_CFG_FW_VER_SUPP,
1650 };
1651 
1652 /* PCH Cougar Point and Patsburg with quirk for Node Manager exclusion */
1653 static const struct mei_cfg mei_me_pch_cpt_pbg_cfg = {
1654 	MEI_CFG_PCH_HFS,
1655 	MEI_CFG_FW_VER_SUPP,
1656 	MEI_CFG_FW_NM,
1657 };
1658 
1659 /* PCH8 Lynx Point and newer devices */
1660 static const struct mei_cfg mei_me_pch8_cfg = {
1661 	MEI_CFG_PCH8_HFS,
1662 	MEI_CFG_FW_VER_SUPP,
1663 };
1664 
1665 /* PCH8 Lynx Point and newer devices - iTouch */
1666 static const struct mei_cfg mei_me_pch8_itouch_cfg = {
1667 	MEI_CFG_KIND_ITOUCH,
1668 	MEI_CFG_PCH8_HFS,
1669 	MEI_CFG_FW_VER_SUPP,
1670 };
1671 
1672 /* PCH8 Lynx Point with quirk for SPS Firmware exclusion */
1673 static const struct mei_cfg mei_me_pch8_sps_4_cfg = {
1674 	MEI_CFG_PCH8_HFS,
1675 	MEI_CFG_FW_VER_SUPP,
1676 	MEI_CFG_FW_SPS_4,
1677 };
1678 
1679 /* LBG with quirk for SPS (4.0) Firmware exclusion */
1680 static const struct mei_cfg mei_me_pch12_sps_4_cfg = {
1681 	MEI_CFG_PCH8_HFS,
1682 	MEI_CFG_FW_VER_SUPP,
1683 	MEI_CFG_FW_SPS_4,
1684 };
1685 
1686 /* Cannon Lake and newer devices */
1687 static const struct mei_cfg mei_me_pch12_cfg = {
1688 	MEI_CFG_PCH8_HFS,
1689 	MEI_CFG_FW_VER_SUPP,
1690 	MEI_CFG_DMA_128,
1691 };
1692 
1693 /* Cannon Lake with quirk for SPS 5.0 and newer Firmware exclusion */
1694 static const struct mei_cfg mei_me_pch12_sps_cfg = {
1695 	MEI_CFG_PCH8_HFS,
1696 	MEI_CFG_FW_VER_SUPP,
1697 	MEI_CFG_DMA_128,
1698 	MEI_CFG_FW_SPS_IGN,
1699 };
1700 
1701 /* Cannon Lake itouch with quirk for SPS 5.0 and newer Firmware exclusion
1702  * w/o DMA support.
1703  */
1704 static const struct mei_cfg mei_me_pch12_itouch_sps_cfg = {
1705 	MEI_CFG_KIND_ITOUCH,
1706 	MEI_CFG_PCH8_HFS,
1707 	MEI_CFG_FW_VER_SUPP,
1708 	MEI_CFG_FW_SPS_IGN,
1709 };
1710 
1711 /* Tiger Lake and newer devices */
1712 static const struct mei_cfg mei_me_pch15_cfg = {
1713 	MEI_CFG_PCH8_HFS,
1714 	MEI_CFG_FW_VER_SUPP,
1715 	MEI_CFG_DMA_128,
1716 	MEI_CFG_TRC,
1717 };
1718 
1719 /* Tiger Lake with quirk for SPS 5.0 and newer Firmware exclusion */
1720 static const struct mei_cfg mei_me_pch15_sps_cfg = {
1721 	MEI_CFG_PCH8_HFS,
1722 	MEI_CFG_FW_VER_SUPP,
1723 	MEI_CFG_DMA_128,
1724 	MEI_CFG_TRC,
1725 	MEI_CFG_FW_SPS_IGN,
1726 };
1727 
1728 /* Graphics System Controller */
1729 static const struct mei_cfg mei_me_gsc_cfg = {
1730 	MEI_CFG_TYPE_GSC,
1731 	MEI_CFG_PCH8_HFS,
1732 	MEI_CFG_FW_VER_SUPP,
1733 };
1734 
1735 /* Graphics System Controller Firmware Interface */
1736 static const struct mei_cfg mei_me_gscfi_cfg = {
1737 	MEI_CFG_TYPE_GSCFI,
1738 	MEI_CFG_PCH8_HFS,
1739 	MEI_CFG_FW_VER_SUPP,
1740 };
1741 
1742 /* Chassis System Controller Firmware Interface */
1743 static const struct mei_cfg mei_me_csc_cfg = {
1744 	MEI_CFG_TYPE_GSCFI,
1745 	MEI_CFG_PCH8_HFS,
1746 	MEI_CFG_FW_VER_SUPP,
1747 };
1748 
1749 /*
1750  * mei_cfg_list - A list of platform platform specific configurations.
1751  * Note: has to be synchronized with  enum mei_cfg_idx.
1752  */
1753 static const struct mei_cfg *const mei_cfg_list[] = {
1754 	[MEI_ME_UNDEF_CFG] = NULL,
1755 	[MEI_ME_ICH_CFG] = &mei_me_ich_cfg,
1756 	[MEI_ME_ICH10_CFG] = &mei_me_ich10_cfg,
1757 	[MEI_ME_PCH6_CFG] = &mei_me_pch6_cfg,
1758 	[MEI_ME_PCH7_CFG] = &mei_me_pch7_cfg,
1759 	[MEI_ME_PCH_CPT_PBG_CFG] = &mei_me_pch_cpt_pbg_cfg,
1760 	[MEI_ME_PCH8_CFG] = &mei_me_pch8_cfg,
1761 	[MEI_ME_PCH8_ITOUCH_CFG] = &mei_me_pch8_itouch_cfg,
1762 	[MEI_ME_PCH8_SPS_4_CFG] = &mei_me_pch8_sps_4_cfg,
1763 	[MEI_ME_PCH12_CFG] = &mei_me_pch12_cfg,
1764 	[MEI_ME_PCH12_SPS_4_CFG] = &mei_me_pch12_sps_4_cfg,
1765 	[MEI_ME_PCH12_SPS_CFG] = &mei_me_pch12_sps_cfg,
1766 	[MEI_ME_PCH12_SPS_ITOUCH_CFG] = &mei_me_pch12_itouch_sps_cfg,
1767 	[MEI_ME_PCH15_CFG] = &mei_me_pch15_cfg,
1768 	[MEI_ME_PCH15_SPS_CFG] = &mei_me_pch15_sps_cfg,
1769 	[MEI_ME_GSC_CFG] = &mei_me_gsc_cfg,
1770 	[MEI_ME_GSCFI_CFG] = &mei_me_gscfi_cfg,
1771 	[MEI_ME_CSC_CFG] = &mei_me_csc_cfg,
1772 };
1773 
mei_me_get_cfg(kernel_ulong_t idx)1774 const struct mei_cfg *mei_me_get_cfg(kernel_ulong_t idx)
1775 {
1776 	BUILD_BUG_ON(ARRAY_SIZE(mei_cfg_list) != MEI_ME_NUM_CFG);
1777 
1778 	if (idx >= MEI_ME_NUM_CFG)
1779 		return NULL;
1780 
1781 	return mei_cfg_list[idx];
1782 }
1783 EXPORT_SYMBOL_GPL(mei_me_get_cfg);
1784 
1785 /**
1786  * mei_me_dev_init - allocates and initializes the mei device structure
1787  *
1788  * @parent: device associated with physical device (pci/platform)
1789  * @cfg: per device generation config
1790  * @slow_fw: configure longer timeouts as FW is slow
1791  *
1792  * Return: The mei_device pointer on success, NULL on failure.
1793  */
mei_me_dev_init(struct device * parent,const struct mei_cfg * cfg,bool slow_fw)1794 struct mei_device *mei_me_dev_init(struct device *parent,
1795 				   const struct mei_cfg *cfg, bool slow_fw)
1796 {
1797 	struct mei_device *dev;
1798 	struct mei_me_hw *hw;
1799 	int i;
1800 
1801 	dev = kzalloc(sizeof(*dev) + sizeof(*hw), GFP_KERNEL);
1802 	if (!dev)
1803 		return NULL;
1804 
1805 	hw = to_me_hw(dev);
1806 
1807 	for (i = 0; i < DMA_DSCR_NUM; i++)
1808 		dev->dr_dscr[i].size = cfg->dma_size[i];
1809 
1810 	mei_device_init(dev, parent, slow_fw, &mei_me_hw_ops);
1811 	hw->cfg = cfg;
1812 
1813 	dev->fw_f_fw_ver_supported = cfg->fw_ver_supported;
1814 
1815 	dev->kind = cfg->kind;
1816 
1817 	return dev;
1818 }
1819 EXPORT_SYMBOL_GPL(mei_me_dev_init);
1820