xref: /linux/drivers/platform/x86/intel/pmc/core.c (revision 1193e205dbb6feca917dc8e1862ffcdf2194234b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Intel Core SoC Power Management Controller Driver
4  *
5  * Copyright (c) 2016, Intel Corporation.
6  * All Rights Reserved.
7  *
8  * Authors: Rajneesh Bhardwaj <rajneesh.bhardwaj@intel.com>
9  *          Vishwanath Somayaji <vishwanath.somayaji@intel.com>
10  */
11 
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 
14 #include <linux/bitfield.h>
15 #include <linux/debugfs.h>
16 #include <linux/delay.h>
17 #include <linux/dmi.h>
18 #include <linux/io.h>
19 #include <linux/module.h>
20 #include <linux/pci.h>
21 #include <linux/slab.h>
22 #include <linux/suspend.h>
23 #include <linux/units.h>
24 
25 #include <asm/cpuid/api.h>
26 #include <asm/cpu_device_id.h>
27 #include <asm/intel-family.h>
28 #include <asm/msr.h>
29 #include <asm/tsc.h>
30 
31 #include "core.h"
32 #include "ssram_telemetry.h"
33 #include "../pmt/telemetry.h"
34 
35 /* Maximum number of modes supported by platfoms that has low power mode capability */
36 const char *pmc_lpm_modes[] = {
37 	"S0i2.0",
38 	"S0i2.1",
39 	"S0i2.2",
40 	"S0i3.0",
41 	"S0i3.1",
42 	"S0i3.2",
43 	"S0i3.3",
44 	"S0i3.4",
45 	NULL
46 };
47 
48 /* PKGC MSRs are common across Intel Core SoCs */
49 const struct pmc_bit_map msr_map[] = {
50 	{"Package C2",                  MSR_PKG_C2_RESIDENCY},
51 	{"Package C3",                  MSR_PKG_C3_RESIDENCY},
52 	{"Package C6",                  MSR_PKG_C6_RESIDENCY},
53 	{"Package C7",                  MSR_PKG_C7_RESIDENCY},
54 	{"Package C8",                  MSR_PKG_C8_RESIDENCY},
55 	{"Package C9",                  MSR_PKG_C9_RESIDENCY},
56 	{"Package C10",                 MSR_PKG_C10_RESIDENCY},
57 	{}
58 };
59 
pmc_core_reg_read(struct pmc * pmc,int reg_offset)60 static inline u32 pmc_core_reg_read(struct pmc *pmc, int reg_offset)
61 {
62 	return readl(pmc->regbase + reg_offset);
63 }
64 
pmc_core_reg_write(struct pmc * pmc,int reg_offset,u32 val)65 static inline void pmc_core_reg_write(struct pmc *pmc, int reg_offset,
66 				      u32 val)
67 {
68 	writel(val, pmc->regbase + reg_offset);
69 }
70 
pmc_core_adjust_slp_s0_step(struct pmc * pmc,u32 value)71 static inline u64 pmc_core_adjust_slp_s0_step(struct pmc *pmc, u32 value)
72 {
73 	/*
74 	 * ADL PCH does not have the SLP_S0 counter and LPM Residency counters are
75 	 * used as a workaround which uses 30.5 usec tick. All other client
76 	 * programs have the legacy SLP_S0 residency counter that is using the 122
77 	 * usec tick.
78 	 */
79 	const int lpm_adj_x2 = pmc->map->lpm_res_counter_step_x2;
80 
81 	if (pmc->map == &adl_reg_map)
82 		return (u64)value * GET_X2_COUNTER((u64)lpm_adj_x2);
83 	else
84 		return (u64)value * pmc->map->slp_s0_res_counter_step;
85 }
86 
set_etr3(struct pmc_dev * pmcdev)87 static int set_etr3(struct pmc_dev *pmcdev)
88 {
89 	struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
90 	const struct pmc_reg_map *map = pmc->map;
91 	u32 reg;
92 
93 	if (!map->etr3_offset)
94 		return -EOPNOTSUPP;
95 
96 	guard(mutex)(&pmcdev->lock);
97 
98 	/* check if CF9 is locked */
99 	reg = pmc_core_reg_read(pmc, map->etr3_offset);
100 	if (reg & ETR3_CF9LOCK)
101 		return -EACCES;
102 
103 	/* write CF9 global reset bit */
104 	reg |= ETR3_CF9GR;
105 	pmc_core_reg_write(pmc, map->etr3_offset, reg);
106 
107 	reg = pmc_core_reg_read(pmc, map->etr3_offset);
108 	if (!(reg & ETR3_CF9GR))
109 		return -EIO;
110 
111 	return 0;
112 }
etr3_is_visible(struct kobject * kobj,struct attribute * attr,int idx)113 static umode_t etr3_is_visible(struct kobject *kobj,
114 				struct attribute *attr,
115 				int idx)
116 {
117 	struct device *dev = kobj_to_dev(kobj);
118 	struct pmc_dev *pmcdev = dev_get_drvdata(dev);
119 	struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
120 	const struct pmc_reg_map *map = pmc->map;
121 	u32 reg;
122 
123 	scoped_guard(mutex, &pmcdev->lock)
124 		reg = pmc_core_reg_read(pmc, map->etr3_offset);
125 
126 	return reg & ETR3_CF9LOCK ? attr->mode & (SYSFS_PREALLOC | 0444) : attr->mode;
127 }
128 
etr3_show(struct device * dev,struct device_attribute * attr,char * buf)129 static ssize_t etr3_show(struct device *dev,
130 				 struct device_attribute *attr, char *buf)
131 {
132 	struct pmc_dev *pmcdev = dev_get_drvdata(dev);
133 	struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
134 	const struct pmc_reg_map *map = pmc->map;
135 	u32 reg;
136 
137 	if (!map->etr3_offset)
138 		return -EOPNOTSUPP;
139 
140 	scoped_guard(mutex, &pmcdev->lock) {
141 		reg = pmc_core_reg_read(pmc, map->etr3_offset);
142 		reg &= ETR3_CF9GR | ETR3_CF9LOCK;
143 	}
144 
145 	return sysfs_emit(buf, "0x%08x", reg);
146 }
147 
etr3_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)148 static ssize_t etr3_store(struct device *dev,
149 				  struct device_attribute *attr,
150 				  const char *buf, size_t len)
151 {
152 	struct pmc_dev *pmcdev = dev_get_drvdata(dev);
153 	int err;
154 	u32 reg;
155 
156 	err = kstrtouint(buf, 16, &reg);
157 	if (err)
158 		return err;
159 
160 	/* allow only CF9 writes */
161 	if (reg != ETR3_CF9GR)
162 		return -EINVAL;
163 
164 	err = set_etr3(pmcdev);
165 	if (err)
166 		return err;
167 
168 	return len;
169 }
170 static DEVICE_ATTR_RW(etr3);
171 
172 static struct attribute *pmc_attrs[] = {
173 	&dev_attr_etr3.attr,
174 	NULL
175 };
176 
177 static const struct attribute_group pmc_attr_group = {
178 	.attrs = pmc_attrs,
179 	.is_visible = etr3_is_visible,
180 };
181 
182 static const struct attribute_group *pmc_dev_groups[] = {
183 	&pmc_attr_group,
184 	NULL
185 };
186 
pmc_core_dev_state_get(void * data,u64 * val)187 static int pmc_core_dev_state_get(void *data, u64 *val)
188 {
189 	struct pmc *pmc = data;
190 	const struct pmc_reg_map *map = pmc->map;
191 	u32 value;
192 
193 	value = pmc_core_reg_read(pmc, map->slp_s0_offset);
194 	*val = pmc_core_adjust_slp_s0_step(pmc, value);
195 
196 	return 0;
197 }
198 
199 DEFINE_DEBUGFS_ATTRIBUTE(pmc_core_dev_state, pmc_core_dev_state_get, NULL, "%llu\n");
200 
pmc_core_pson_residency_get(void * data,u64 * val)201 static int pmc_core_pson_residency_get(void *data, u64 *val)
202 {
203 	struct pmc *pmc = data;
204 	const struct pmc_reg_map *map = pmc->map;
205 	u32 value;
206 
207 	value = pmc_core_reg_read(pmc, map->pson_residency_offset);
208 	*val = (u64)value * map->pson_residency_counter_step;
209 
210 	return 0;
211 }
212 
213 DEFINE_DEBUGFS_ATTRIBUTE(pmc_core_pson_residency, pmc_core_pson_residency_get, NULL, "%llu\n");
214 
pmc_core_check_read_lock_bit(struct pmc * pmc)215 static int pmc_core_check_read_lock_bit(struct pmc *pmc)
216 {
217 	u32 value;
218 
219 	value = pmc_core_reg_read(pmc, pmc->map->pm_cfg_offset);
220 	return value & BIT(pmc->map->pm_read_disable_bit);
221 }
222 
pmc_core_slps0_display(struct pmc * pmc,struct device * dev,struct seq_file * s)223 static void pmc_core_slps0_display(struct pmc *pmc, struct device *dev,
224 				   struct seq_file *s)
225 {
226 	const struct pmc_bit_map **maps = pmc->map->slps0_dbg_maps;
227 	const struct pmc_bit_map *map;
228 	int offset = pmc->map->slps0_dbg_offset;
229 	u32 data;
230 
231 	while (*maps) {
232 		map = *maps;
233 		data = pmc_core_reg_read(pmc, offset);
234 		offset += 4;
235 		while (map->name) {
236 			if (dev)
237 				dev_info(dev, "SLP_S0_DBG: %-32s\tState: %s\n",
238 					map->name,
239 					data & map->bit_mask ? "Yes" : "No");
240 			if (s)
241 				seq_printf(s, "SLP_S0_DBG: %-32s\tState: %s\n",
242 					   map->name,
243 					   data & map->bit_mask ? "Yes" : "No");
244 			++map;
245 		}
246 		++maps;
247 	}
248 }
249 
pmc_core_lpm_get_arr_size(const struct pmc_bit_map ** maps)250 static unsigned int pmc_core_lpm_get_arr_size(const struct pmc_bit_map **maps)
251 {
252 	unsigned int idx;
253 
254 	for (idx = 0; maps[idx]; idx++)
255 		;/* Nothing */
256 
257 	return idx;
258 }
259 
pmc_core_lpm_display(struct pmc * pmc,struct device * dev,struct seq_file * s,u32 offset,int pmc_index,const char * str,const struct pmc_bit_map ** maps)260 static void pmc_core_lpm_display(struct pmc *pmc, struct device *dev,
261 				 struct seq_file *s, u32 offset, int pmc_index,
262 				 const char *str,
263 				 const struct pmc_bit_map **maps)
264 {
265 	unsigned int index, idx, len = 32, arr_size;
266 	u32 bit_mask, *lpm_regs;
267 
268 	arr_size = pmc_core_lpm_get_arr_size(maps);
269 	lpm_regs = kmalloc_array(arr_size, sizeof(*lpm_regs), GFP_KERNEL);
270 	if (!lpm_regs)
271 		return;
272 
273 	for (index = 0; index < arr_size; index++) {
274 		lpm_regs[index] = pmc_core_reg_read(pmc, offset);
275 		offset += 4;
276 	}
277 
278 	for (idx = 0; idx < arr_size; idx++) {
279 		if (dev)
280 			dev_info(dev, "\nPMC%d:LPM_%s_%d:\t0x%x\n", pmc_index, str, idx,
281 				lpm_regs[idx]);
282 		if (s)
283 			seq_printf(s, "\nPMC%d:LPM_%s_%d:\t0x%x\n", pmc_index, str, idx,
284 				   lpm_regs[idx]);
285 		for (index = 0; maps[idx][index].name && index < len; index++) {
286 			bit_mask = maps[idx][index].bit_mask;
287 			if (dev)
288 				dev_info(dev, "PMC%d:%-30s %-30d\n", pmc_index,
289 					maps[idx][index].name,
290 					lpm_regs[idx] & bit_mask ? 1 : 0);
291 			if (s)
292 				seq_printf(s, "PMC%d:%-30s %-30d\n", pmc_index,
293 					   maps[idx][index].name,
294 					   lpm_regs[idx] & bit_mask ? 1 : 0);
295 		}
296 	}
297 
298 	kfree(lpm_regs);
299 }
300 
301 static bool slps0_dbg_latch;
302 
pmc_core_reg_read_byte(struct pmc * pmc,int offset)303 static inline u8 pmc_core_reg_read_byte(struct pmc *pmc, int offset)
304 {
305 	return readb(pmc->regbase + offset);
306 }
307 
pmc_core_display_map(struct seq_file * s,int index,int idx,int ip,int pmc_index,u8 pf_reg,const struct pmc_bit_map ** pf_map)308 static void pmc_core_display_map(struct seq_file *s, int index, int idx, int ip,
309 				 int pmc_index, u8 pf_reg, const struct pmc_bit_map **pf_map)
310 {
311 	seq_printf(s, "PMC%d:PCH IP: %-2d - %-32s\tState: %s\n",
312 		   pmc_index, ip, pf_map[idx][index].name,
313 		   pf_map[idx][index].bit_mask & pf_reg ? "Off" : "On");
314 }
315 
pmc_core_ppfear_show(struct seq_file * s,void * unused)316 static int pmc_core_ppfear_show(struct seq_file *s, void *unused)
317 {
318 	struct pmc_dev *pmcdev = s->private;
319 	unsigned int i;
320 
321 	for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
322 		struct pmc *pmc = pmcdev->pmcs[i];
323 		const struct pmc_bit_map **maps;
324 		u8 pf_regs[PPFEAR_MAX_NUM_ENTRIES];
325 		unsigned int index, iter, idx, ip = 0;
326 
327 		if (!pmc)
328 			continue;
329 
330 		maps = pmc->map->pfear_sts;
331 		iter = pmc->map->ppfear0_offset;
332 
333 		for (index = 0; index < pmc->map->ppfear_buckets &&
334 		     index < PPFEAR_MAX_NUM_ENTRIES; index++, iter++)
335 			pf_regs[index] = pmc_core_reg_read_byte(pmc, iter);
336 
337 		for (idx = 0; maps[idx]; idx++) {
338 			for (index = 0; maps[idx][index].name &&
339 			     index < pmc->map->ppfear_buckets * 8; ip++, index++)
340 				pmc_core_display_map(s, index, idx, ip, i,
341 						     pf_regs[index / 8], maps);
342 		}
343 	}
344 
345 	return 0;
346 }
347 DEFINE_SHOW_ATTRIBUTE(pmc_core_ppfear);
348 
349 /* This function should return link status, 0 means ready */
pmc_core_mtpmc_link_status(struct pmc * pmc)350 static int pmc_core_mtpmc_link_status(struct pmc *pmc)
351 {
352 	u32 value;
353 
354 	value = pmc_core_reg_read(pmc, SPT_PMC_PM_STS_OFFSET);
355 	return value & BIT(SPT_PMC_MSG_FULL_STS_BIT);
356 }
357 
pmc_core_send_msg(struct pmc * pmc,u32 * addr_xram)358 static int pmc_core_send_msg(struct pmc *pmc, u32 *addr_xram)
359 {
360 	u32 dest;
361 	int timeout;
362 
363 	for (timeout = NUM_RETRIES; timeout > 0; timeout--) {
364 		if (pmc_core_mtpmc_link_status(pmc) == 0)
365 			break;
366 		msleep(5);
367 	}
368 
369 	if (timeout <= 0 && pmc_core_mtpmc_link_status(pmc))
370 		return -EBUSY;
371 
372 	dest = (*addr_xram & MTPMC_MASK) | (1U << 1);
373 	pmc_core_reg_write(pmc, SPT_PMC_MTPMC_OFFSET, dest);
374 	return 0;
375 }
376 
pmc_core_mphy_pg_show(struct seq_file * s,void * unused)377 static int pmc_core_mphy_pg_show(struct seq_file *s, void *unused)
378 {
379 	struct pmc_dev *pmcdev = s->private;
380 	struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
381 	const struct pmc_bit_map *map = pmc->map->mphy_sts;
382 	u32 mphy_core_reg_low, mphy_core_reg_high;
383 	u32 val_low, val_high;
384 	unsigned int index;
385 	int err = 0;
386 
387 	if (pmcdev->pmc_xram_read_bit) {
388 		seq_puts(s, "Access denied: please disable PMC_READ_DISABLE setting in BIOS.");
389 		return 0;
390 	}
391 
392 	mphy_core_reg_low  = (SPT_PMC_MPHY_CORE_STS_0 << 16);
393 	mphy_core_reg_high = (SPT_PMC_MPHY_CORE_STS_1 << 16);
394 
395 	guard(mutex)(&pmcdev->lock);
396 
397 	err = pmc_core_send_msg(pmc, &mphy_core_reg_low);
398 	if (err)
399 		return err;
400 
401 	msleep(10);
402 	val_low = pmc_core_reg_read(pmc, SPT_PMC_MFPMC_OFFSET);
403 
404 	err = pmc_core_send_msg(pmc, &mphy_core_reg_high);
405 	if (err)
406 		return err;
407 
408 	msleep(10);
409 	val_high = pmc_core_reg_read(pmc, SPT_PMC_MFPMC_OFFSET);
410 
411 	for (index = 0; index < 8 && map[index].name; index++) {
412 		seq_printf(s, "%-32s\tState: %s\n",
413 			   map[index].name,
414 			   map[index].bit_mask & val_low ? "Not power gated" :
415 			   "Power gated");
416 	}
417 
418 	for (index = 8; map[index].name; index++) {
419 		seq_printf(s, "%-32s\tState: %s\n",
420 			   map[index].name,
421 			   map[index].bit_mask & val_high ? "Not power gated" :
422 			   "Power gated");
423 	}
424 
425 	return 0;
426 }
427 DEFINE_SHOW_ATTRIBUTE(pmc_core_mphy_pg);
428 
pmc_core_pll_show(struct seq_file * s,void * unused)429 static int pmc_core_pll_show(struct seq_file *s, void *unused)
430 {
431 	struct pmc_dev *pmcdev = s->private;
432 	struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
433 	const struct pmc_bit_map *map = pmc->map->pll_sts;
434 	u32 mphy_common_reg, val;
435 	unsigned int index;
436 	int err = 0;
437 
438 	if (pmcdev->pmc_xram_read_bit) {
439 		seq_puts(s, "Access denied: please disable PMC_READ_DISABLE setting in BIOS.");
440 		return 0;
441 	}
442 
443 	mphy_common_reg  = (SPT_PMC_MPHY_COM_STS_0 << 16);
444 	guard(mutex)(&pmcdev->lock);
445 
446 	err = pmc_core_send_msg(pmc, &mphy_common_reg);
447 	if (err)
448 		return err;
449 
450 	/* Observed PMC HW response latency for MTPMC-MFPMC is ~10 ms */
451 	msleep(10);
452 	val = pmc_core_reg_read(pmc, SPT_PMC_MFPMC_OFFSET);
453 
454 	for (index = 0; map[index].name ; index++) {
455 		seq_printf(s, "%-32s\tState: %s\n",
456 			   map[index].name,
457 			   map[index].bit_mask & val ? "Active" : "Idle");
458 	}
459 
460 	return 0;
461 }
462 DEFINE_SHOW_ATTRIBUTE(pmc_core_pll);
463 
pmc_core_send_ltr_ignore(struct pmc_dev * pmcdev,u32 value,int ignore)464 int pmc_core_send_ltr_ignore(struct pmc_dev *pmcdev, u32 value, int ignore)
465 {
466 	struct pmc *pmc;
467 	const struct pmc_reg_map *map;
468 	u32 reg;
469 	unsigned int pmc_index;
470 	int ltr_index;
471 
472 	ltr_index = value;
473 	/* For platforms with multiple pmcs, ltr index value given by user
474 	 * is based on the contiguous indexes from ltr_show output.
475 	 * pmc index and ltr index needs to be calculated from it.
476 	 */
477 	for (pmc_index = 0; pmc_index < ARRAY_SIZE(pmcdev->pmcs) && ltr_index >= 0; pmc_index++) {
478 		pmc = pmcdev->pmcs[pmc_index];
479 
480 		if (!pmc)
481 			continue;
482 
483 		map = pmc->map;
484 		if (ltr_index <= map->ltr_ignore_max)
485 			break;
486 
487 		/* Along with IP names, ltr_show map includes CURRENT_PLATFORM
488 		 * and AGGREGATED_SYSTEM values per PMC. Take these two index
489 		 * values into account in ltr_index calculation. Also, to start
490 		 * ltr index from zero for next pmc, subtract it by 1.
491 		 */
492 		ltr_index = ltr_index - (map->ltr_ignore_max + 2) - 1;
493 	}
494 
495 	if (pmc_index >= ARRAY_SIZE(pmcdev->pmcs) || ltr_index < 0)
496 		return -EINVAL;
497 
498 	pr_debug("ltr_ignore for pmc%d: ltr_index:%d\n", pmc_index, ltr_index);
499 
500 	guard(mutex)(&pmcdev->lock);
501 
502 	reg = pmc_core_reg_read(pmc, map->ltr_ignore_offset);
503 	if (ignore)
504 		reg |= BIT(ltr_index);
505 	else
506 		reg &= ~BIT(ltr_index);
507 	pmc_core_reg_write(pmc, map->ltr_ignore_offset, reg);
508 
509 	return 0;
510 }
511 
pmc_core_ltr_write(struct pmc_dev * pmcdev,const char __user * userbuf,size_t count,int ignore)512 static ssize_t pmc_core_ltr_write(struct pmc_dev *pmcdev,
513 				  const char __user *userbuf,
514 				  size_t count, int ignore)
515 {
516 	u32 value;
517 	int err;
518 
519 	err = kstrtou32_from_user(userbuf, count, 10, &value);
520 	if (err)
521 		return err;
522 
523 	err = pmc_core_send_ltr_ignore(pmcdev, value, ignore);
524 
525 	return err ?: count;
526 }
527 
pmc_core_ltr_ignore_write(struct file * file,const char __user * userbuf,size_t count,loff_t * ppos)528 static ssize_t pmc_core_ltr_ignore_write(struct file *file,
529 					 const char __user *userbuf,
530 					 size_t count, loff_t *ppos)
531 {
532 	struct seq_file *s = file->private_data;
533 	struct pmc_dev *pmcdev = s->private;
534 
535 	return pmc_core_ltr_write(pmcdev, userbuf, count, 1);
536 }
537 
pmc_core_ltr_ignore_show(struct seq_file * s,void * unused)538 static int pmc_core_ltr_ignore_show(struct seq_file *s, void *unused)
539 {
540 	return 0;
541 }
542 DEFINE_SHOW_STORE_ATTRIBUTE(pmc_core_ltr_ignore);
543 
pmc_core_ltr_restore_write(struct file * file,const char __user * userbuf,size_t count,loff_t * ppos)544 static ssize_t pmc_core_ltr_restore_write(struct file *file,
545 					  const char __user *userbuf,
546 					  size_t count, loff_t *ppos)
547 {
548 	struct seq_file *s = file->private_data;
549 	struct pmc_dev *pmcdev = s->private;
550 
551 	return pmc_core_ltr_write(pmcdev, userbuf, count, 0);
552 }
553 
pmc_core_ltr_restore_show(struct seq_file * s,void * unused)554 static int pmc_core_ltr_restore_show(struct seq_file *s, void *unused)
555 {
556 	return 0;
557 }
558 DEFINE_SHOW_STORE_ATTRIBUTE(pmc_core_ltr_restore);
559 
pmc_core_slps0_dbg_latch(struct pmc_dev * pmcdev,bool reset)560 static void pmc_core_slps0_dbg_latch(struct pmc_dev *pmcdev, bool reset)
561 {
562 	struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
563 	const struct pmc_reg_map *map = pmc->map;
564 	u32 fd;
565 
566 	guard(mutex)(&pmcdev->lock);
567 
568 	if (!reset && !slps0_dbg_latch)
569 		return;
570 
571 	fd = pmc_core_reg_read(pmc, map->slps0_dbg_offset);
572 	if (reset)
573 		fd &= ~CNP_PMC_LATCH_SLPS0_EVENTS;
574 	else
575 		fd |= CNP_PMC_LATCH_SLPS0_EVENTS;
576 	pmc_core_reg_write(pmc, map->slps0_dbg_offset, fd);
577 
578 	slps0_dbg_latch = false;
579 }
580 
pmc_core_slps0_dbg_show(struct seq_file * s,void * unused)581 static int pmc_core_slps0_dbg_show(struct seq_file *s, void *unused)
582 {
583 	struct pmc_dev *pmcdev = s->private;
584 
585 	pmc_core_slps0_dbg_latch(pmcdev, false);
586 	pmc_core_slps0_display(pmcdev->pmcs[PMC_IDX_MAIN], NULL, s);
587 	pmc_core_slps0_dbg_latch(pmcdev, true);
588 
589 	return 0;
590 }
591 DEFINE_SHOW_ATTRIBUTE(pmc_core_slps0_dbg);
592 
convert_ltr_scale(u32 val)593 static u32 convert_ltr_scale(u32 val)
594 {
595 	/*
596 	 * As per PCIE specification supporting document
597 	 * ECN_LatencyTolnReporting_14Aug08.pdf the Latency
598 	 * Tolerance Reporting data payload is encoded in a
599 	 * 3 bit scale and 10 bit value fields. Values are
600 	 * multiplied by the indicated scale to yield an absolute time
601 	 * value, expressible in a range from 1 nanosecond to
602 	 * 2^25*(2^10-1) = 34,326,183,936 nanoseconds.
603 	 *
604 	 * scale encoding is as follows:
605 	 *
606 	 * ----------------------------------------------
607 	 * |scale factor	|	Multiplier (ns)	|
608 	 * ----------------------------------------------
609 	 * |	0		|	1		|
610 	 * |	1		|	32		|
611 	 * |	2		|	1024		|
612 	 * |	3		|	32768		|
613 	 * |	4		|	1048576		|
614 	 * |	5		|	33554432	|
615 	 * |	6		|	Invalid		|
616 	 * |	7		|	Invalid		|
617 	 * ----------------------------------------------
618 	 */
619 	if (val > 5) {
620 		pr_warn("Invalid LTR scale factor.\n");
621 		return 0;
622 	}
623 
624 	return 1U << (5 * val);
625 }
626 
pmc_core_ltr_show(struct seq_file * s,void * unused)627 static int pmc_core_ltr_show(struct seq_file *s, void *unused)
628 {
629 	struct pmc_dev *pmcdev = s->private;
630 	u64 decoded_snoop_ltr, decoded_non_snoop_ltr, val;
631 	u32 ltr_raw_data, scale;
632 	u16 snoop_ltr, nonsnoop_ltr;
633 	unsigned int i, index, ltr_index = 0;
634 
635 	for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
636 		struct pmc *pmc;
637 		const struct pmc_bit_map *map;
638 		u32 ltr_ign_reg;
639 
640 		pmc = pmcdev->pmcs[i];
641 		if (!pmc)
642 			continue;
643 
644 		scoped_guard(mutex, &pmcdev->lock)
645 			ltr_ign_reg = pmc_core_reg_read(pmc, pmc->map->ltr_ignore_offset);
646 
647 		map = pmc->map->ltr_show_sts;
648 		for (index = 0; map[index].name; index++) {
649 			bool ltr_ign_data;
650 
651 			if (index > pmc->map->ltr_ignore_max)
652 				ltr_ign_data = false;
653 			else
654 				ltr_ign_data = ltr_ign_reg & BIT(index);
655 
656 			decoded_snoop_ltr = decoded_non_snoop_ltr = 0;
657 			ltr_raw_data = pmc_core_reg_read(pmc,
658 							 map[index].bit_mask);
659 			snoop_ltr = ltr_raw_data & ~MTPMC_MASK;
660 			nonsnoop_ltr = (ltr_raw_data >> 0x10) & ~MTPMC_MASK;
661 
662 			if (FIELD_GET(LTR_REQ_NONSNOOP, ltr_raw_data)) {
663 				scale = FIELD_GET(LTR_DECODED_SCALE, nonsnoop_ltr);
664 				val = FIELD_GET(LTR_DECODED_VAL, nonsnoop_ltr);
665 				decoded_non_snoop_ltr = val * convert_ltr_scale(scale);
666 			}
667 			if (FIELD_GET(LTR_REQ_SNOOP, ltr_raw_data)) {
668 				scale = FIELD_GET(LTR_DECODED_SCALE, snoop_ltr);
669 				val = FIELD_GET(LTR_DECODED_VAL, snoop_ltr);
670 				decoded_snoop_ltr = val * convert_ltr_scale(scale);
671 			}
672 
673 			seq_printf(s, "%d\tPMC%d:%-32s\tLTR: RAW: 0x%-16x\tNon-Snoop(ns): %-16llu\tSnoop(ns): %-16llu\tLTR_IGNORE: %d\n",
674 				   ltr_index, i, map[index].name, ltr_raw_data,
675 				   decoded_non_snoop_ltr,
676 				   decoded_snoop_ltr, ltr_ign_data);
677 			ltr_index++;
678 		}
679 	}
680 	return 0;
681 }
682 DEFINE_SHOW_ATTRIBUTE(pmc_core_ltr);
683 
pmc_core_s0ix_blocker_show(struct seq_file * s,void * unused)684 static int pmc_core_s0ix_blocker_show(struct seq_file *s, void *unused)
685 {
686 	struct pmc_dev *pmcdev = s->private;
687 	unsigned int pmcidx;
688 
689 	for (pmcidx = 0; pmcidx < ARRAY_SIZE(pmcdev->pmcs); pmcidx++) {
690 		const struct pmc_bit_map **maps;
691 		unsigned int arr_size, r_idx;
692 		u32 offset, counter;
693 		struct pmc *pmc;
694 
695 		pmc = pmcdev->pmcs[pmcidx];
696 		if (!pmc)
697 			continue;
698 		maps = pmc->map->s0ix_blocker_maps;
699 		offset = pmc->map->s0ix_blocker_offset;
700 		arr_size = pmc_core_lpm_get_arr_size(maps);
701 
702 		for (r_idx = 0; r_idx < arr_size; r_idx++) {
703 			const struct pmc_bit_map *map;
704 
705 			for (map = maps[r_idx]; map->name; map++) {
706 				if (!map->blk)
707 					continue;
708 				counter = pmc_core_reg_read(pmc, offset);
709 				seq_printf(s, "PMC%d:%-30s %-30d\n", pmcidx,
710 					   map->name, counter);
711 				offset += map->blk * S0IX_BLK_SIZE;
712 			}
713 		}
714 	}
715 	return 0;
716 }
717 DEFINE_SHOW_ATTRIBUTE(pmc_core_s0ix_blocker);
718 
pmc_core_ltr_ignore_all(struct pmc_dev * pmcdev)719 static void pmc_core_ltr_ignore_all(struct pmc_dev *pmcdev)
720 {
721 	unsigned int i;
722 
723 	for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); i++) {
724 		struct pmc *pmc;
725 		u32 ltr_ign;
726 
727 		pmc = pmcdev->pmcs[i];
728 		if (!pmc)
729 			continue;
730 
731 		guard(mutex)(&pmcdev->lock);
732 		pmc->ltr_ign = pmc_core_reg_read(pmc, pmc->map->ltr_ignore_offset);
733 
734 		/* ltr_ignore_max is the max index value for LTR ignore register */
735 		ltr_ign = pmc->ltr_ign | GENMASK(pmc->map->ltr_ignore_max, 0);
736 		pmc_core_reg_write(pmc, pmc->map->ltr_ignore_offset, ltr_ign);
737 	}
738 
739 	/*
740 	 * Ignoring ME during suspend is blocking platforms with ADL PCH to get to
741 	 * deeper S0ix substate.
742 	 */
743 	pmc_core_send_ltr_ignore(pmcdev, 6, 0);
744 }
745 
pmc_core_ltr_restore_all(struct pmc_dev * pmcdev)746 static void pmc_core_ltr_restore_all(struct pmc_dev *pmcdev)
747 {
748 	unsigned int i;
749 
750 	for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); i++) {
751 		struct pmc *pmc;
752 
753 		pmc = pmcdev->pmcs[i];
754 		if (!pmc)
755 			continue;
756 
757 		guard(mutex)(&pmcdev->lock);
758 		pmc_core_reg_write(pmc, pmc->map->ltr_ignore_offset, pmc->ltr_ign);
759 	}
760 }
761 
adjust_lpm_residency(struct pmc * pmc,u32 offset,const int lpm_adj_x2)762 static inline u64 adjust_lpm_residency(struct pmc *pmc, u32 offset,
763 				       const int lpm_adj_x2)
764 {
765 	u64 lpm_res = pmc_core_reg_read(pmc, offset);
766 
767 	return GET_X2_COUNTER((u64)lpm_adj_x2 * lpm_res);
768 }
769 
pmc_core_substate_res_show(struct seq_file * s,void * unused)770 static int pmc_core_substate_res_show(struct seq_file *s, void *unused)
771 {
772 	struct pmc_dev *pmcdev = s->private;
773 	struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
774 	const int lpm_adj_x2 = pmc->map->lpm_res_counter_step_x2;
775 	u32 offset = pmc->map->lpm_residency_offset;
776 	int mode;
777 
778 	seq_printf(s, "%-10s %-15s\n", "Substate", "Residency");
779 
780 	pmc_for_each_mode(mode, pmcdev) {
781 		seq_printf(s, "%-10s %-15llu\n", pmc_lpm_modes[mode],
782 			   adjust_lpm_residency(pmc, offset + (4 * mode), lpm_adj_x2));
783 	}
784 
785 	return 0;
786 }
787 DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_res);
788 
pmc_core_substate_sts_regs_show(struct seq_file * s,void * unused)789 static int pmc_core_substate_sts_regs_show(struct seq_file *s, void *unused)
790 {
791 	struct pmc_dev *pmcdev = s->private;
792 	unsigned int i;
793 
794 	for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
795 		struct pmc *pmc = pmcdev->pmcs[i];
796 		const struct pmc_bit_map **maps;
797 		u32 offset;
798 
799 		if (!pmc)
800 			continue;
801 		maps = pmc->map->lpm_sts;
802 		offset = pmc->map->lpm_status_offset;
803 		pmc_core_lpm_display(pmc, NULL, s, offset, i, "STATUS", maps);
804 	}
805 
806 	return 0;
807 }
808 DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_sts_regs);
809 
pmc_core_substate_l_sts_regs_show(struct seq_file * s,void * unused)810 static int pmc_core_substate_l_sts_regs_show(struct seq_file *s, void *unused)
811 {
812 	struct pmc_dev *pmcdev = s->private;
813 	unsigned int i;
814 
815 	for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
816 		struct pmc *pmc = pmcdev->pmcs[i];
817 		const struct pmc_bit_map **maps;
818 		u32 offset;
819 
820 		if (!pmc)
821 			continue;
822 		maps = pmc->map->lpm_sts;
823 		offset = pmc->map->lpm_live_status_offset;
824 		pmc_core_lpm_display(pmc, NULL, s, offset, i, "LIVE_STATUS", maps);
825 	}
826 
827 	return 0;
828 }
829 DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_l_sts_regs);
830 
pmc_core_substate_req_header_show(struct seq_file * s,int pmc_index)831 static void pmc_core_substate_req_header_show(struct seq_file *s, int pmc_index)
832 {
833 	struct pmc_dev *pmcdev = s->private;
834 	int mode;
835 
836 	seq_printf(s, "%30s |", "Element");
837 	pmc_for_each_mode(mode, pmcdev)
838 		seq_printf(s, " %9s |", pmc_lpm_modes[mode]);
839 
840 	seq_printf(s, " %9s |", "Status");
841 	seq_printf(s, " %11s |\n", "Live Status");
842 }
843 
pmc_core_substate_req_regs_show(struct seq_file * s,void * unused)844 static int pmc_core_substate_req_regs_show(struct seq_file *s, void *unused)
845 {
846 	struct pmc_dev *pmcdev = s->private;
847 	u32 sts_offset;
848 	u32 sts_offset_live;
849 	u32 *lpm_req_regs;
850 	unsigned int mp, pmc_index;
851 	int num_maps;
852 
853 	for (pmc_index = 0; pmc_index < ARRAY_SIZE(pmcdev->pmcs); ++pmc_index) {
854 		struct pmc *pmc = pmcdev->pmcs[pmc_index];
855 		const struct pmc_bit_map **maps;
856 
857 		if (!pmc)
858 			continue;
859 
860 		maps = pmc->map->lpm_sts;
861 		num_maps = pmc->map->lpm_num_maps;
862 		sts_offset = pmc->map->lpm_status_offset;
863 		sts_offset_live = pmc->map->lpm_live_status_offset;
864 		lpm_req_regs = pmc->lpm_req_regs;
865 
866 		/*
867 		 * When there are multiple PMCs, though the PMC may exist, the
868 		 * requirement register discovery could have failed so check
869 		 * before accessing.
870 		 */
871 		if (!lpm_req_regs)
872 			continue;
873 
874 		/* Display the header */
875 		pmc_core_substate_req_header_show(s, pmc_index);
876 
877 		/* Loop over maps */
878 		for (mp = 0; mp < num_maps; mp++) {
879 			u32 req_mask = 0;
880 			u32 lpm_status;
881 			u32 lpm_status_live;
882 			const struct pmc_bit_map *map;
883 			int mode, i, len = 32;
884 
885 			/*
886 			 * Capture the requirements and create a mask so that we only
887 			 * show an element if it's required for at least one of the
888 			 * enabled low power modes
889 			 */
890 			pmc_for_each_mode(mode, pmcdev)
891 				req_mask |= lpm_req_regs[mp + (mode * num_maps)];
892 
893 			/* Get the last latched status for this map */
894 			lpm_status = pmc_core_reg_read(pmc, sts_offset + (mp * 4));
895 
896 			/* Get the runtime status for this map */
897 			lpm_status_live = pmc_core_reg_read(pmc, sts_offset_live + (mp * 4));
898 
899 			/*  Loop over elements in this map */
900 			map = maps[mp];
901 			for (i = 0; map[i].name && i < len; i++) {
902 				u32 bit_mask = map[i].bit_mask;
903 
904 				if (!(bit_mask & req_mask)) {
905 					/*
906 					 * Not required for any enabled states
907 					 * so don't display
908 					 */
909 					continue;
910 				}
911 
912 				/* Display the element name in the first column */
913 				seq_printf(s, "pmc%d: %26s |", pmc_index, map[i].name);
914 
915 				/* Loop over the enabled states and display if required */
916 				pmc_for_each_mode(mode, pmcdev) {
917 					bool required = lpm_req_regs[mp + (mode * num_maps)] &
918 							bit_mask;
919 					seq_printf(s, " %9s |", required ? "Required" : " ");
920 				}
921 
922 				/* In Status column, show the last captured state of this agent */
923 				seq_printf(s, " %9s |", lpm_status & bit_mask ? "Yes" : " ");
924 
925 				/* In Live status column, show the live state of this agent */
926 				seq_printf(s, " %11s |", lpm_status_live & bit_mask ? "Yes" : " ");
927 
928 				seq_puts(s, "\n");
929 			}
930 		}
931 	}
932 	return 0;
933 }
934 DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_req_regs);
935 
pmc_core_get_crystal_freq(void)936 static unsigned int pmc_core_get_crystal_freq(void)
937 {
938 	unsigned int eax_denominator, ebx_numerator, ecx_hz, edx;
939 
940 	if (boot_cpu_data.cpuid_level < CPUID_LEAF_TSC)
941 		return 0;
942 
943 	eax_denominator = ebx_numerator = ecx_hz = edx = 0;
944 
945 	/* TSC/Crystal ratio, plus optionally Crystal Hz */
946 	cpuid(CPUID_LEAF_TSC, &eax_denominator, &ebx_numerator, &ecx_hz, &edx);
947 
948 	if (ebx_numerator == 0 || eax_denominator == 0)
949 		return 0;
950 
951 	return ecx_hz;
952 }
953 
pmc_core_die_c6_us_show(struct seq_file * s,void * unused)954 static int pmc_core_die_c6_us_show(struct seq_file *s, void *unused)
955 {
956 	struct pmc_dev *pmcdev = s->private;
957 	u64 die_c6_res, count;
958 	int ret;
959 
960 	if (!pmcdev->crystal_freq) {
961 		dev_warn_once(&pmcdev->pdev->dev, "Crystal frequency unavailable\n");
962 		return -ENXIO;
963 	}
964 
965 	ret = pmt_telem_read(pmcdev->punit_ep, pmcdev->die_c6_offset,
966 			     &count, 1);
967 	if (ret)
968 		return ret;
969 
970 	die_c6_res = div64_u64(count * HZ_PER_MHZ, pmcdev->crystal_freq);
971 	seq_printf(s, "%llu\n", die_c6_res);
972 
973 	return 0;
974 }
975 DEFINE_SHOW_ATTRIBUTE(pmc_core_die_c6_us);
976 
pmc_core_lpm_latch_mode_show(struct seq_file * s,void * unused)977 static int pmc_core_lpm_latch_mode_show(struct seq_file *s, void *unused)
978 {
979 	struct pmc_dev *pmcdev = s->private;
980 	struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
981 	bool c10;
982 	u32 reg;
983 	int mode;
984 
985 	reg = pmc_core_reg_read(pmc, pmc->map->lpm_sts_latch_en_offset);
986 	if (reg & LPM_STS_LATCH_MODE) {
987 		seq_puts(s, "c10");
988 		c10 = false;
989 	} else {
990 		seq_puts(s, "[c10]");
991 		c10 = true;
992 	}
993 
994 	pmc_for_each_mode(mode, pmcdev) {
995 		if ((BIT(mode) & reg) && !c10)
996 			seq_printf(s, " [%s]", pmc_lpm_modes[mode]);
997 		else
998 			seq_printf(s, " %s", pmc_lpm_modes[mode]);
999 	}
1000 
1001 	seq_puts(s, " clear\n");
1002 
1003 	return 0;
1004 }
1005 
pmc_core_lpm_latch_mode_write(struct file * file,const char __user * userbuf,size_t count,loff_t * ppos)1006 static ssize_t pmc_core_lpm_latch_mode_write(struct file *file,
1007 					     const char __user *userbuf,
1008 					     size_t count, loff_t *ppos)
1009 {
1010 	struct seq_file *s = file->private_data;
1011 	struct pmc_dev *pmcdev = s->private;
1012 	struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
1013 	bool clear = false, c10 = false;
1014 	unsigned char buf[8];
1015 	int m, mode;
1016 	u32 reg;
1017 
1018 	if (count > sizeof(buf) - 1)
1019 		return -EINVAL;
1020 	if (copy_from_user(buf, userbuf, count))
1021 		return -EFAULT;
1022 	buf[count] = '\0';
1023 
1024 	/*
1025 	 * Allowed strings are:
1026 	 *	Any enabled substate, e.g. 'S0i2.0'
1027 	 *	'c10'
1028 	 *	'clear'
1029 	 */
1030 	mode = sysfs_match_string(pmc_lpm_modes, buf);
1031 
1032 	/* Check string matches enabled mode */
1033 	pmc_for_each_mode(m, pmcdev)
1034 		if (mode == m)
1035 			break;
1036 
1037 	if (mode != m || mode < 0) {
1038 		if (sysfs_streq(buf, "clear"))
1039 			clear = true;
1040 		else if (sysfs_streq(buf, "c10"))
1041 			c10 = true;
1042 		else
1043 			return -EINVAL;
1044 	}
1045 
1046 	if (clear) {
1047 		guard(mutex)(&pmcdev->lock);
1048 
1049 		reg = pmc_core_reg_read(pmc, pmc->map->etr3_offset);
1050 		reg |= ETR3_CLEAR_LPM_EVENTS;
1051 		pmc_core_reg_write(pmc, pmc->map->etr3_offset, reg);
1052 
1053 		return count;
1054 	}
1055 
1056 	if (c10) {
1057 		guard(mutex)(&pmcdev->lock);
1058 
1059 		reg = pmc_core_reg_read(pmc, pmc->map->lpm_sts_latch_en_offset);
1060 		reg &= ~LPM_STS_LATCH_MODE;
1061 		pmc_core_reg_write(pmc, pmc->map->lpm_sts_latch_en_offset, reg);
1062 
1063 		return count;
1064 	}
1065 
1066 	/*
1067 	 * For LPM mode latching we set the latch enable bit and selected mode
1068 	 * and clear everything else.
1069 	 */
1070 	reg = LPM_STS_LATCH_MODE | BIT(mode);
1071 	guard(mutex)(&pmcdev->lock);
1072 	pmc_core_reg_write(pmc, pmc->map->lpm_sts_latch_en_offset, reg);
1073 
1074 	return count;
1075 }
1076 DEFINE_PMC_CORE_ATTR_WRITE(pmc_core_lpm_latch_mode);
1077 
pmc_core_pkgc_show(struct seq_file * s,void * unused)1078 static int pmc_core_pkgc_show(struct seq_file *s, void *unused)
1079 {
1080 	struct pmc *pmc = s->private;
1081 	const struct pmc_bit_map *map = pmc->map->msr_sts;
1082 	u64 pcstate_count;
1083 	unsigned int index;
1084 
1085 	for (index = 0; map[index].name ; index++) {
1086 		if (rdmsrq_safe(map[index].bit_mask, &pcstate_count))
1087 			continue;
1088 
1089 		pcstate_count *= 1000;
1090 		do_div(pcstate_count, tsc_khz);
1091 		seq_printf(s, "%-8s : %llu\n", map[index].name,
1092 			   pcstate_count);
1093 	}
1094 
1095 	return 0;
1096 }
1097 DEFINE_SHOW_ATTRIBUTE(pmc_core_pkgc);
1098 
pmc_core_pri_verify(u32 lpm_pri,u8 * mode_order)1099 static bool pmc_core_pri_verify(u32 lpm_pri, u8 *mode_order)
1100 {
1101 	unsigned int i, j;
1102 
1103 	if (!lpm_pri)
1104 		return false;
1105 	/*
1106 	 * Each byte contains the priority level for 2 modes (7:4 and 3:0).
1107 	 * In a 32 bit register this allows for describing 8 modes. Store the
1108 	 * levels and look for values out of range.
1109 	 */
1110 	for (i = 0; i < 8; i++) {
1111 		int level = lpm_pri & GENMASK(3, 0);
1112 
1113 		if (level >= LPM_MAX_NUM_MODES)
1114 			return false;
1115 
1116 		mode_order[i] = level;
1117 		lpm_pri >>= 4;
1118 	}
1119 
1120 	/* Check that we have unique values */
1121 	for (i = 0; i < LPM_MAX_NUM_MODES - 1; i++)
1122 		for (j = i + 1; j < LPM_MAX_NUM_MODES; j++)
1123 			if (mode_order[i] == mode_order[j])
1124 				return false;
1125 
1126 	return true;
1127 }
1128 
pmc_core_get_low_power_modes(struct pmc_dev * pmcdev)1129 void pmc_core_get_low_power_modes(struct pmc_dev *pmcdev)
1130 {
1131 	struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
1132 	u8 pri_order[LPM_MAX_NUM_MODES] = LPM_DEFAULT_PRI;
1133 	u8 mode_order[LPM_MAX_NUM_MODES];
1134 	u32 lpm_pri;
1135 	u32 lpm_en;
1136 	unsigned int i;
1137 	int mode, p;
1138 
1139 	/* Use LPM Maps to indicate support for substates */
1140 	if (!pmc->map->lpm_num_maps)
1141 		return;
1142 
1143 	lpm_en = pmc_core_reg_read(pmc, pmc->map->lpm_en_offset);
1144 	/* For MTL, BIT 31 is not an lpm mode but a enable bit.
1145 	 * Lower byte is enough to cover the number of lpm modes for all
1146 	 * platforms and hence mask the upper 3 bytes.
1147 	 */
1148 	pmcdev->num_lpm_modes = hweight32(lpm_en & 0xFF);
1149 
1150 	/* Read 32 bit LPM_PRI register */
1151 	lpm_pri = pmc_core_reg_read(pmc, pmc->map->lpm_priority_offset);
1152 
1153 
1154 	/*
1155 	 * If lpm_pri value passes verification, then override the default
1156 	 * modes here. Otherwise stick with the default.
1157 	 */
1158 	if (pmc_core_pri_verify(lpm_pri, mode_order))
1159 		/* Get list of modes in priority order */
1160 		for (mode = 0; mode < LPM_MAX_NUM_MODES; mode++)
1161 			pri_order[mode_order[mode]] = mode;
1162 	else
1163 		dev_warn(&pmcdev->pdev->dev,
1164 			 "Assuming a default substate order for this platform\n");
1165 
1166 	/*
1167 	 * Loop through all modes from lowest to highest priority,
1168 	 * and capture all enabled modes in order
1169 	 */
1170 	i = 0;
1171 	for (p = LPM_MAX_NUM_MODES - 1; p >= 0; p--) {
1172 		int mode = pri_order[p];
1173 
1174 		if (!(BIT(mode) & lpm_en))
1175 			continue;
1176 
1177 		pmcdev->lpm_en_modes[i++] = mode;
1178 	}
1179 }
1180 
get_primary_reg_base(struct pmc * pmc)1181 int get_primary_reg_base(struct pmc *pmc)
1182 {
1183 	u64 slp_s0_addr;
1184 
1185 	if (lpit_read_residency_count_address(&slp_s0_addr)) {
1186 		pmc->base_addr = PMC_BASE_ADDR_DEFAULT;
1187 
1188 		if (page_is_ram(PHYS_PFN(pmc->base_addr)))
1189 			return -ENODEV;
1190 	} else {
1191 		pmc->base_addr = slp_s0_addr - pmc->map->slp_s0_offset;
1192 	}
1193 
1194 	pmc->regbase = ioremap(pmc->base_addr, pmc->map->regmap_length);
1195 	if (!pmc->regbase)
1196 		return -ENOMEM;
1197 	return 0;
1198 }
1199 
pmc_core_punit_pmt_init(struct pmc_dev * pmcdev,u32 guid)1200 void pmc_core_punit_pmt_init(struct pmc_dev *pmcdev, u32 guid)
1201 {
1202 	struct telem_endpoint *ep;
1203 	struct pci_dev *pcidev;
1204 
1205 	pcidev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(10, 0));
1206 	if (!pcidev) {
1207 		dev_err(&pmcdev->pdev->dev, "PUNIT PMT device not found.");
1208 		return;
1209 	}
1210 
1211 	ep = pmt_telem_find_and_register_endpoint(pcidev, guid, 0);
1212 	pci_dev_put(pcidev);
1213 	if (IS_ERR(ep)) {
1214 		dev_err(&pmcdev->pdev->dev,
1215 			"pmc_core: couldn't get DMU telem endpoint %ld",
1216 			PTR_ERR(ep));
1217 		return;
1218 	}
1219 
1220 	pmcdev->punit_ep = ep;
1221 
1222 	pmcdev->has_die_c6 = true;
1223 	pmcdev->die_c6_offset = MTL_PMT_DMU_DIE_C6_OFFSET;
1224 }
1225 
pmc_core_set_device_d3(unsigned int device)1226 void pmc_core_set_device_d3(unsigned int device)
1227 {
1228 	struct pci_dev *pcidev;
1229 
1230 	pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
1231 	if (pcidev) {
1232 		if (!device_trylock(&pcidev->dev)) {
1233 			pci_dev_put(pcidev);
1234 			return;
1235 		}
1236 		if (!pcidev->dev.driver) {
1237 			dev_info(&pcidev->dev, "Setting to D3hot\n");
1238 			pci_set_power_state(pcidev, PCI_D3hot);
1239 		}
1240 		device_unlock(&pcidev->dev);
1241 		pci_dev_put(pcidev);
1242 	}
1243 }
1244 
pmc_core_is_pson_residency_enabled(struct pmc_dev * pmcdev)1245 static bool pmc_core_is_pson_residency_enabled(struct pmc_dev *pmcdev)
1246 {
1247 	struct platform_device *pdev = pmcdev->pdev;
1248 	struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
1249 	u8 val;
1250 
1251 	if (!adev)
1252 		return false;
1253 
1254 	if (fwnode_property_read_u8(acpi_fwnode_handle(adev),
1255 				    "intel-cec-pson-switching-enabled-in-s0",
1256 				    &val))
1257 		return false;
1258 
1259 	return val == 1;
1260 }
1261 
pmc_core_dbgfs_unregister(struct pmc_dev * pmcdev)1262 static void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev)
1263 {
1264 	debugfs_remove_recursive(pmcdev->dbgfs_dir);
1265 }
1266 
pmc_core_dbgfs_register(struct pmc_dev * pmcdev)1267 static void pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
1268 {
1269 	struct pmc *primary_pmc = pmcdev->pmcs[PMC_IDX_MAIN];
1270 	struct dentry *dir;
1271 
1272 	dir = debugfs_create_dir("pmc_core", NULL);
1273 	pmcdev->dbgfs_dir = dir;
1274 
1275 	debugfs_create_file("slp_s0_residency_usec", 0444, dir, primary_pmc,
1276 			    &pmc_core_dev_state);
1277 
1278 	if (primary_pmc->map->pfear_sts)
1279 		debugfs_create_file("pch_ip_power_gating_status", 0444, dir,
1280 				    pmcdev, &pmc_core_ppfear_fops);
1281 
1282 	debugfs_create_file("ltr_ignore", 0644, dir, pmcdev,
1283 			    &pmc_core_ltr_ignore_fops);
1284 
1285 	debugfs_create_file("ltr_restore", 0200, dir, pmcdev, &pmc_core_ltr_restore_fops);
1286 
1287 	debugfs_create_file("ltr_show", 0444, dir, pmcdev, &pmc_core_ltr_fops);
1288 
1289 	if (primary_pmc->map->s0ix_blocker_maps)
1290 		debugfs_create_file("s0ix_blocker", 0444, dir, pmcdev, &pmc_core_s0ix_blocker_fops);
1291 
1292 	debugfs_create_file("package_cstate_show", 0444, dir, primary_pmc,
1293 			    &pmc_core_pkgc_fops);
1294 
1295 	if (primary_pmc->map->pll_sts)
1296 		debugfs_create_file("pll_status", 0444, dir, pmcdev,
1297 				    &pmc_core_pll_fops);
1298 
1299 	if (primary_pmc->map->mphy_sts)
1300 		debugfs_create_file("mphy_core_lanes_power_gating_status",
1301 				    0444, dir, pmcdev,
1302 				    &pmc_core_mphy_pg_fops);
1303 
1304 	if (primary_pmc->map->slps0_dbg_maps) {
1305 		debugfs_create_file("slp_s0_debug_status", 0444,
1306 				    dir, pmcdev,
1307 				    &pmc_core_slps0_dbg_fops);
1308 
1309 		debugfs_create_bool("slp_s0_dbg_latch", 0644,
1310 				    dir, &slps0_dbg_latch);
1311 	}
1312 
1313 	if (primary_pmc->map->lpm_en_offset) {
1314 		debugfs_create_file("substate_residencies", 0444,
1315 				    pmcdev->dbgfs_dir, pmcdev,
1316 				    &pmc_core_substate_res_fops);
1317 	}
1318 
1319 	if (primary_pmc->map->lpm_status_offset) {
1320 		debugfs_create_file("substate_status_registers", 0444,
1321 				    pmcdev->dbgfs_dir, pmcdev,
1322 				    &pmc_core_substate_sts_regs_fops);
1323 		debugfs_create_file("substate_live_status_registers", 0444,
1324 				    pmcdev->dbgfs_dir, pmcdev,
1325 				    &pmc_core_substate_l_sts_regs_fops);
1326 		debugfs_create_file("lpm_latch_mode", 0644,
1327 				    pmcdev->dbgfs_dir, pmcdev,
1328 				    &pmc_core_lpm_latch_mode_fops);
1329 	}
1330 
1331 	if (primary_pmc->lpm_req_regs) {
1332 		debugfs_create_file("substate_requirements", 0444,
1333 				    pmcdev->dbgfs_dir, pmcdev,
1334 				    &pmc_core_substate_req_regs_fops);
1335 	}
1336 
1337 	if (primary_pmc->map->pson_residency_offset && pmc_core_is_pson_residency_enabled(pmcdev)) {
1338 		debugfs_create_file("pson_residency_usec", 0444,
1339 				    pmcdev->dbgfs_dir, primary_pmc, &pmc_core_pson_residency);
1340 	}
1341 
1342 	if (pmcdev->has_die_c6) {
1343 		debugfs_create_file("die_c6_us_show", 0444,
1344 				    pmcdev->dbgfs_dir, pmcdev,
1345 				    &pmc_core_die_c6_us_fops);
1346 	}
1347 }
1348 
pmc_core_find_guid(struct pmc_info * list,const struct pmc_reg_map * map)1349 static u32 pmc_core_find_guid(struct pmc_info *list, const struct pmc_reg_map *map)
1350 {
1351 	for (; list->map; ++list)
1352 		if (list->map == map)
1353 			return list->guid;
1354 
1355 	return 0;
1356 }
1357 
1358 /*
1359  * This function retrieves low power mode requirement data from PMC Low
1360  * Power Mode (LPM) table.
1361  *
1362  * In telemetry space, the LPM table contains a 4 byte header followed
1363  * by 8 consecutive mode blocks (one for each LPM mode). Each block
1364  * has a 4 byte header followed by a set of registers that describe the
1365  * IP state requirements for the given mode. The IP mapping is platform
1366  * specific but the same for each block, making for easy analysis.
1367  * Platforms only use a subset of the space to track the requirements
1368  * for their IPs. Callers provide the requirement registers they use as
1369  * a list of indices. Each requirement register is associated with an
1370  * IP map that's maintained by the caller.
1371  *
1372  * Header
1373  * +----+----------------------------+----------------------------+
1374  * |  0 |      REVISION              |      ENABLED MODES         |
1375  * +----+--------------+-------------+-------------+--------------+
1376  *
1377  * Low Power Mode 0 Block
1378  * +----+--------------+-------------+-------------+--------------+
1379  * |  1 |     SUB ID   |     SIZE    |   MAJOR     |   MINOR      |
1380  * +----+--------------+-------------+-------------+--------------+
1381  * |  2 |           LPM0 Requirements 0                           |
1382  * +----+---------------------------------------------------------+
1383  * |    |                  ...                                    |
1384  * +----+---------------------------------------------------------+
1385  * | 29 |           LPM0 Requirements 27                          |
1386  * +----+---------------------------------------------------------+
1387  *
1388  * ...
1389  *
1390  * Low Power Mode 7 Block
1391  * +----+--------------+-------------+-------------+--------------+
1392  * |    |     SUB ID   |     SIZE    |   MAJOR     |   MINOR      |
1393  * +----+--------------+-------------+-------------+--------------+
1394  * | 60 |           LPM7 Requirements 0                           |
1395  * +----+---------------------------------------------------------+
1396  * |    |                  ...                                    |
1397  * +----+---------------------------------------------------------+
1398  * | 87 |           LPM7 Requirements 27                          |
1399  * +----+---------------------------------------------------------+
1400  *
1401  */
pmc_core_get_lpm_req(struct pmc_dev * pmcdev,struct pmc * pmc,struct pci_dev * pcidev)1402 static int pmc_core_get_lpm_req(struct pmc_dev *pmcdev, struct pmc *pmc, struct pci_dev *pcidev)
1403 {
1404 	struct telem_endpoint *ep;
1405 	const u8 *lpm_indices;
1406 	int num_maps, mode_offset = 0;
1407 	int ret, mode;
1408 	int lpm_size;
1409 	u32 guid;
1410 
1411 	lpm_indices = pmc->map->lpm_reg_index;
1412 	num_maps = pmc->map->lpm_num_maps;
1413 	lpm_size = LPM_MAX_NUM_MODES * num_maps;
1414 
1415 	guid = pmc_core_find_guid(pmcdev->regmap_list, pmc->map);
1416 	if (!guid)
1417 		return -ENXIO;
1418 
1419 	ep = pmt_telem_find_and_register_endpoint(pcidev, guid, 0);
1420 	if (IS_ERR(ep)) {
1421 		dev_dbg(&pmcdev->pdev->dev, "couldn't get telem endpoint %pe", ep);
1422 		return -EPROBE_DEFER;
1423 	}
1424 
1425 	pmc->lpm_req_regs = devm_kzalloc(&pmcdev->pdev->dev,
1426 					 lpm_size * sizeof(u32),
1427 					 GFP_KERNEL);
1428 	if (!pmc->lpm_req_regs) {
1429 		ret = -ENOMEM;
1430 		goto unregister_ep;
1431 	}
1432 
1433 	mode_offset = LPM_HEADER_OFFSET + LPM_MODE_OFFSET;
1434 	pmc_for_each_mode(mode, pmcdev) {
1435 		u32 *req_offset = pmc->lpm_req_regs + (mode * num_maps);
1436 		int m;
1437 
1438 		for (m = 0; m < num_maps; m++) {
1439 			u8 sample_id = lpm_indices[m] + mode_offset;
1440 
1441 			ret = pmt_telem_read32(ep, sample_id, req_offset, 1);
1442 			if (ret) {
1443 				dev_err(&pmcdev->pdev->dev,
1444 					"couldn't read Low Power Mode requirements: %d\n", ret);
1445 				goto unregister_ep;
1446 			}
1447 			++req_offset;
1448 		}
1449 		mode_offset += LPM_REG_COUNT + LPM_MODE_OFFSET;
1450 	}
1451 
1452 unregister_ep:
1453 	pmt_telem_unregister_endpoint(ep);
1454 
1455 	return ret;
1456 }
1457 
pmc_core_ssram_get_lpm_reqs(struct pmc_dev * pmcdev,int func)1458 static int pmc_core_ssram_get_lpm_reqs(struct pmc_dev *pmcdev, int func)
1459 {
1460 	struct pci_dev *pcidev __free(pci_dev_put) = NULL;
1461 	unsigned int i;
1462 	int ret;
1463 
1464 	pcidev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(20, func));
1465 	if (!pcidev)
1466 		return -ENODEV;
1467 
1468 	for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
1469 		if (!pmcdev->pmcs[i])
1470 			continue;
1471 
1472 		ret = pmc_core_get_lpm_req(pmcdev, pmcdev->pmcs[i], pcidev);
1473 		if (ret)
1474 			return ret;
1475 	}
1476 
1477 	return 0;
1478 }
1479 
pmc_core_find_regmap(struct pmc_info * list,u16 devid)1480 static const struct pmc_reg_map *pmc_core_find_regmap(struct pmc_info *list, u16 devid)
1481 {
1482 	for (; list->map; ++list)
1483 		if (devid == list->devid)
1484 			return list->map;
1485 
1486 	return NULL;
1487 }
1488 
pmc_core_pmc_add(struct pmc_dev * pmcdev,unsigned int pmc_index)1489 static int pmc_core_pmc_add(struct pmc_dev *pmcdev, unsigned int pmc_index)
1490 
1491 {
1492 	struct pmc_ssram_telemetry pmc_ssram_telemetry;
1493 	const struct pmc_reg_map *map;
1494 	struct pmc *pmc;
1495 	int ret;
1496 
1497 	ret = pmc_ssram_telemetry_get_pmc_info(pmc_index, &pmc_ssram_telemetry);
1498 	if (ret)
1499 		return ret;
1500 
1501 	map = pmc_core_find_regmap(pmcdev->regmap_list, pmc_ssram_telemetry.devid);
1502 	if (!map)
1503 		return -ENODEV;
1504 
1505 	pmc = pmcdev->pmcs[pmc_index];
1506 	/* Memory for primary PMC has been allocated */
1507 	if (!pmc) {
1508 		pmc = devm_kzalloc(&pmcdev->pdev->dev, sizeof(*pmc), GFP_KERNEL);
1509 		if (!pmc)
1510 			return -ENOMEM;
1511 	}
1512 
1513 	pmc->map = map;
1514 	pmc->base_addr = pmc_ssram_telemetry.base_addr;
1515 	pmc->regbase = ioremap(pmc->base_addr, pmc->map->regmap_length);
1516 
1517 	if (!pmc->regbase) {
1518 		devm_kfree(&pmcdev->pdev->dev, pmc);
1519 		return -ENOMEM;
1520 	}
1521 
1522 	pmcdev->pmcs[pmc_index] = pmc;
1523 
1524 	return 0;
1525 }
1526 
pmc_core_ssram_get_reg_base(struct pmc_dev * pmcdev)1527 static int pmc_core_ssram_get_reg_base(struct pmc_dev *pmcdev)
1528 {
1529 	int ret;
1530 
1531 	ret = pmc_core_pmc_add(pmcdev, PMC_IDX_MAIN);
1532 	if (ret)
1533 		return ret;
1534 
1535 	pmc_core_pmc_add(pmcdev, PMC_IDX_IOE);
1536 	pmc_core_pmc_add(pmcdev, PMC_IDX_PCH);
1537 
1538 	return 0;
1539 }
1540 
1541 /*
1542  * When supported, ssram init is used to achieve all available PMCs.
1543  * If ssram init fails, this function uses legacy method to at least get the
1544  * primary PMC.
1545  */
generic_core_init(struct pmc_dev * pmcdev,struct pmc_dev_info * pmc_dev_info)1546 int generic_core_init(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_info)
1547 {
1548 	struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
1549 	bool ssram;
1550 	int ret;
1551 
1552 	pmcdev->suspend = pmc_dev_info->suspend;
1553 	pmcdev->resume = pmc_dev_info->resume;
1554 
1555 	ssram = pmc_dev_info->regmap_list != NULL;
1556 	if (ssram) {
1557 		pmcdev->regmap_list = pmc_dev_info->regmap_list;
1558 		ret = pmc_core_ssram_get_reg_base(pmcdev);
1559 		/*
1560 		 * EAGAIN error code indicates Intel PMC SSRAM Telemetry driver
1561 		 * has not finished probe and PMC info is not available yet. Try
1562 		 * again later.
1563 		 */
1564 		if (ret == -EAGAIN)
1565 			return -EPROBE_DEFER;
1566 
1567 		if (ret) {
1568 			dev_warn(&pmcdev->pdev->dev,
1569 				 "Failed to get PMC info from SSRAM, %d, using legacy init\n", ret);
1570 			ssram = false;
1571 		}
1572 	}
1573 
1574 	if (!ssram) {
1575 		pmc->map = pmc_dev_info->map;
1576 		ret = get_primary_reg_base(pmc);
1577 		if (ret)
1578 			return ret;
1579 	}
1580 
1581 	pmc_core_get_low_power_modes(pmcdev);
1582 	if (pmc_dev_info->dmu_guid)
1583 		pmc_core_punit_pmt_init(pmcdev, pmc_dev_info->dmu_guid);
1584 
1585 	if (ssram) {
1586 		ret = pmc_core_ssram_get_lpm_reqs(pmcdev, pmc_dev_info->pci_func);
1587 		if (ret)
1588 			goto unmap_regbase;
1589 	}
1590 
1591 	return 0;
1592 
1593 unmap_regbase:
1594 	for (unsigned int i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
1595 		struct pmc *pmc = pmcdev->pmcs[i];
1596 
1597 		if (pmc && pmc->regbase)
1598 			iounmap(pmc->regbase);
1599 	}
1600 
1601 	if (pmcdev->punit_ep)
1602 		pmt_telem_unregister_endpoint(pmcdev->punit_ep);
1603 
1604 	return ret;
1605 }
1606 
1607 static const struct x86_cpu_id intel_pmc_core_ids[] = {
1608 	X86_MATCH_VFM(INTEL_SKYLAKE_L,		&spt_pmc_dev),
1609 	X86_MATCH_VFM(INTEL_SKYLAKE,		&spt_pmc_dev),
1610 	X86_MATCH_VFM(INTEL_KABYLAKE_L,		&spt_pmc_dev),
1611 	X86_MATCH_VFM(INTEL_KABYLAKE,		&spt_pmc_dev),
1612 	X86_MATCH_VFM(INTEL_CANNONLAKE_L,	&cnp_pmc_dev),
1613 	X86_MATCH_VFM(INTEL_ICELAKE_L,		&icl_pmc_dev),
1614 	X86_MATCH_VFM(INTEL_ICELAKE_NNPI,	&icl_pmc_dev),
1615 	X86_MATCH_VFM(INTEL_COMETLAKE,		&cnp_pmc_dev),
1616 	X86_MATCH_VFM(INTEL_COMETLAKE_L,	&cnp_pmc_dev),
1617 	X86_MATCH_VFM(INTEL_TIGERLAKE_L,	&tgl_l_pmc_dev),
1618 	X86_MATCH_VFM(INTEL_TIGERLAKE,		&tgl_pmc_dev),
1619 	X86_MATCH_VFM(INTEL_ATOM_TREMONT,	&tgl_l_pmc_dev),
1620 	X86_MATCH_VFM(INTEL_ATOM_TREMONT_L,	&icl_pmc_dev),
1621 	X86_MATCH_VFM(INTEL_ROCKETLAKE,		&tgl_pmc_dev),
1622 	X86_MATCH_VFM(INTEL_ALDERLAKE_L,	&tgl_l_pmc_dev),
1623 	X86_MATCH_VFM(INTEL_ATOM_GRACEMONT,	&tgl_l_pmc_dev),
1624 	X86_MATCH_VFM(INTEL_ALDERLAKE,		&adl_pmc_dev),
1625 	X86_MATCH_VFM(INTEL_RAPTORLAKE_P,	&tgl_l_pmc_dev),
1626 	X86_MATCH_VFM(INTEL_RAPTORLAKE,		&adl_pmc_dev),
1627 	X86_MATCH_VFM(INTEL_RAPTORLAKE_S,	&adl_pmc_dev),
1628 	X86_MATCH_VFM(INTEL_METEORLAKE_L,	&mtl_pmc_dev),
1629 	X86_MATCH_VFM(INTEL_ARROWLAKE,		&arl_pmc_dev),
1630 	X86_MATCH_VFM(INTEL_ARROWLAKE_H,	&arl_h_pmc_dev),
1631 	X86_MATCH_VFM(INTEL_ARROWLAKE_U,	&arl_h_pmc_dev),
1632 	X86_MATCH_VFM(INTEL_LUNARLAKE_M,	&lnl_pmc_dev),
1633 	X86_MATCH_VFM(INTEL_PANTHERLAKE_L,	&ptl_pmc_dev),
1634 	{}
1635 };
1636 
1637 MODULE_DEVICE_TABLE(x86cpu, intel_pmc_core_ids);
1638 
1639 /*
1640  * This quirk can be used on those platforms where
1641  * the platform BIOS enforces 24Mhz crystal to shutdown
1642  * before PMC can assert SLP_S0#.
1643  */
1644 static bool xtal_ignore;
quirk_xtal_ignore(const struct dmi_system_id * id)1645 static int quirk_xtal_ignore(const struct dmi_system_id *id)
1646 {
1647 	xtal_ignore = true;
1648 	return 0;
1649 }
1650 
pmc_core_xtal_ignore(struct pmc * pmc)1651 static void pmc_core_xtal_ignore(struct pmc *pmc)
1652 {
1653 	u32 value;
1654 
1655 	value = pmc_core_reg_read(pmc, pmc->map->pm_vric1_offset);
1656 	/* 24MHz Crystal Shutdown Qualification Disable */
1657 	value |= SPT_PMC_VRIC1_XTALSDQDIS;
1658 	/* Low Voltage Mode Enable */
1659 	value &= ~SPT_PMC_VRIC1_SLPS0LVEN;
1660 	pmc_core_reg_write(pmc, pmc->map->pm_vric1_offset, value);
1661 }
1662 
1663 static const struct dmi_system_id pmc_core_dmi_table[]  = {
1664 	{
1665 	.callback = quirk_xtal_ignore,
1666 	.ident = "HP Elite x2 1013 G3",
1667 	.matches = {
1668 		DMI_MATCH(DMI_SYS_VENDOR, "HP"),
1669 		DMI_MATCH(DMI_PRODUCT_NAME, "HP Elite x2 1013 G3"),
1670 		},
1671 	},
1672 	{}
1673 };
1674 
pmc_core_do_dmi_quirks(struct pmc * pmc)1675 static void pmc_core_do_dmi_quirks(struct pmc *pmc)
1676 {
1677 	dmi_check_system(pmc_core_dmi_table);
1678 
1679 	if (xtal_ignore)
1680 		pmc_core_xtal_ignore(pmc);
1681 }
1682 
pmc_core_clean_structure(struct platform_device * pdev)1683 static void pmc_core_clean_structure(struct platform_device *pdev)
1684 {
1685 	struct pmc_dev *pmcdev = platform_get_drvdata(pdev);
1686 	unsigned int i;
1687 
1688 	for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
1689 		struct pmc *pmc = pmcdev->pmcs[i];
1690 
1691 		if (pmc && pmc->regbase)
1692 			iounmap(pmc->regbase);
1693 	}
1694 
1695 	if (pmcdev->punit_ep)
1696 		pmt_telem_unregister_endpoint(pmcdev->punit_ep);
1697 
1698 	platform_set_drvdata(pdev, NULL);
1699 }
1700 
pmc_core_probe(struct platform_device * pdev)1701 static int pmc_core_probe(struct platform_device *pdev)
1702 {
1703 	static bool device_initialized;
1704 	struct pmc_dev *pmcdev;
1705 	const struct x86_cpu_id *cpu_id;
1706 	struct pmc_dev_info *pmc_dev_info;
1707 	struct pmc *primary_pmc;
1708 	int ret;
1709 
1710 	if (device_initialized)
1711 		return -ENODEV;
1712 
1713 	pmcdev = devm_kzalloc(&pdev->dev, sizeof(*pmcdev), GFP_KERNEL);
1714 	if (!pmcdev)
1715 		return -ENOMEM;
1716 
1717 	pmcdev->crystal_freq = pmc_core_get_crystal_freq();
1718 
1719 	platform_set_drvdata(pdev, pmcdev);
1720 	pmcdev->pdev = pdev;
1721 
1722 	cpu_id = x86_match_cpu(intel_pmc_core_ids);
1723 	if (!cpu_id)
1724 		return -ENODEV;
1725 
1726 	pmc_dev_info = (struct pmc_dev_info *)cpu_id->driver_data;
1727 
1728 	/* Primary PMC */
1729 	primary_pmc = devm_kzalloc(&pdev->dev, sizeof(*primary_pmc), GFP_KERNEL);
1730 	if (!primary_pmc)
1731 		return -ENOMEM;
1732 	pmcdev->pmcs[PMC_IDX_MAIN] = primary_pmc;
1733 
1734 	/* The last element in msr_map is empty */
1735 	pmcdev->num_of_pkgc = ARRAY_SIZE(msr_map) - 1;
1736 	pmcdev->pkgc_res_cnt = devm_kcalloc(&pdev->dev,
1737 					    pmcdev->num_of_pkgc,
1738 					    sizeof(*pmcdev->pkgc_res_cnt),
1739 					    GFP_KERNEL);
1740 	if (!pmcdev->pkgc_res_cnt)
1741 		return -ENOMEM;
1742 
1743 	ret = devm_mutex_init(&pdev->dev, &pmcdev->lock);
1744 	if (ret)
1745 		return ret;
1746 
1747 	if (pmc_dev_info->init)
1748 		ret = pmc_dev_info->init(pmcdev, pmc_dev_info);
1749 	else
1750 		ret = generic_core_init(pmcdev, pmc_dev_info);
1751 
1752 	if (ret) {
1753 		platform_set_drvdata(pdev, NULL);
1754 		return ret;
1755 	}
1756 
1757 	pmcdev->pmc_xram_read_bit = pmc_core_check_read_lock_bit(primary_pmc);
1758 	pmc_core_do_dmi_quirks(primary_pmc);
1759 
1760 	pmc_core_dbgfs_register(pmcdev);
1761 	pm_report_max_hw_sleep(FIELD_MAX(SLP_S0_RES_COUNTER_MASK) *
1762 			       pmc_core_adjust_slp_s0_step(primary_pmc, 1));
1763 
1764 	device_initialized = true;
1765 	dev_info(&pdev->dev, " initialized\n");
1766 
1767 	return 0;
1768 }
1769 
pmc_core_remove(struct platform_device * pdev)1770 static void pmc_core_remove(struct platform_device *pdev)
1771 {
1772 	struct pmc_dev *pmcdev = platform_get_drvdata(pdev);
1773 	pmc_core_dbgfs_unregister(pmcdev);
1774 	pmc_core_clean_structure(pdev);
1775 }
1776 
1777 static bool warn_on_s0ix_failures;
1778 module_param(warn_on_s0ix_failures, bool, 0644);
1779 MODULE_PARM_DESC(warn_on_s0ix_failures, "Check and warn for S0ix failures");
1780 
1781 static bool ltr_ignore_all_suspend = true;
1782 module_param(ltr_ignore_all_suspend, bool, 0644);
1783 MODULE_PARM_DESC(ltr_ignore_all_suspend, "Ignore all LTRs during suspend");
1784 
pmc_core_suspend(struct device * dev)1785 static __maybe_unused int pmc_core_suspend(struct device *dev)
1786 {
1787 	struct pmc_dev *pmcdev = dev_get_drvdata(dev);
1788 	struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
1789 	unsigned int i;
1790 
1791 	if (pmcdev->suspend)
1792 		pmcdev->suspend(pmcdev);
1793 
1794 	if (ltr_ignore_all_suspend)
1795 		pmc_core_ltr_ignore_all(pmcdev);
1796 
1797 	/* Check if the syspend will actually use S0ix */
1798 	if (pm_suspend_via_firmware())
1799 		return 0;
1800 
1801 	/* Save PKGC residency for checking later */
1802 	for (i = 0; i < pmcdev->num_of_pkgc; i++) {
1803 		if (rdmsrq_safe(msr_map[i].bit_mask, &pmcdev->pkgc_res_cnt[i]))
1804 			return -EIO;
1805 	}
1806 
1807 	/* Save S0ix residency for checking later */
1808 	if (pmc_core_dev_state_get(pmc, &pmcdev->s0ix_counter))
1809 		return -EIO;
1810 
1811 	return 0;
1812 }
1813 
pmc_core_is_deepest_pkgc_failed(struct pmc_dev * pmcdev)1814 static inline bool pmc_core_is_deepest_pkgc_failed(struct pmc_dev *pmcdev)
1815 {
1816 	u32 deepest_pkgc_msr = msr_map[pmcdev->num_of_pkgc - 1].bit_mask;
1817 	u64 deepest_pkgc_residency;
1818 
1819 	if (rdmsrq_safe(deepest_pkgc_msr, &deepest_pkgc_residency))
1820 		return false;
1821 
1822 	if (deepest_pkgc_residency == pmcdev->pkgc_res_cnt[pmcdev->num_of_pkgc - 1])
1823 		return true;
1824 
1825 	return false;
1826 }
1827 
pmc_core_is_s0ix_failed(struct pmc_dev * pmcdev)1828 static inline bool pmc_core_is_s0ix_failed(struct pmc_dev *pmcdev)
1829 {
1830 	u64 s0ix_counter;
1831 
1832 	if (pmc_core_dev_state_get(pmcdev->pmcs[PMC_IDX_MAIN], &s0ix_counter))
1833 		return false;
1834 
1835 	pm_report_hw_sleep_time((u32)(s0ix_counter - pmcdev->s0ix_counter));
1836 
1837 	if (s0ix_counter == pmcdev->s0ix_counter)
1838 		return true;
1839 
1840 	return false;
1841 }
1842 
pmc_core_resume_common(struct pmc_dev * pmcdev)1843 int pmc_core_resume_common(struct pmc_dev *pmcdev)
1844 {
1845 	struct device *dev = &pmcdev->pdev->dev;
1846 	struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
1847 	const struct pmc_bit_map **maps = pmc->map->lpm_sts;
1848 	int offset = pmc->map->lpm_status_offset;
1849 	unsigned int i;
1850 
1851 	/* Check if the syspend used S0ix */
1852 	if (pm_suspend_via_firmware())
1853 		return 0;
1854 
1855 	if (!pmc_core_is_s0ix_failed(pmcdev))
1856 		return 0;
1857 
1858 	if (!warn_on_s0ix_failures)
1859 		return 0;
1860 
1861 	if (pmc_core_is_deepest_pkgc_failed(pmcdev)) {
1862 		/* S0ix failed because of deepest PKGC entry failure */
1863 		dev_info(dev, "CPU did not enter %s!!! (%s cnt=0x%llx)\n",
1864 			 msr_map[pmcdev->num_of_pkgc - 1].name,
1865 			 msr_map[pmcdev->num_of_pkgc - 1].name,
1866 			 pmcdev->pkgc_res_cnt[pmcdev->num_of_pkgc - 1]);
1867 
1868 		for (i = 0; i < pmcdev->num_of_pkgc; i++) {
1869 			u64 pc_cnt;
1870 
1871 			if (!rdmsrq_safe(msr_map[i].bit_mask, &pc_cnt)) {
1872 				dev_info(dev, "Prev %s cnt = 0x%llx, Current %s cnt = 0x%llx\n",
1873 					 msr_map[i].name, pmcdev->pkgc_res_cnt[i],
1874 					 msr_map[i].name, pc_cnt);
1875 			}
1876 		}
1877 		return 0;
1878 	}
1879 
1880 	/* The real interesting case - S0ix failed - lets ask PMC why. */
1881 	dev_warn(dev, "CPU did not enter SLP_S0!!! (S0ix cnt=%llu)\n",
1882 		 pmcdev->s0ix_counter);
1883 
1884 	if (pmc->map->slps0_dbg_maps)
1885 		pmc_core_slps0_display(pmc, dev, NULL);
1886 
1887 	for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
1888 		struct pmc *pmc = pmcdev->pmcs[i];
1889 
1890 		if (!pmc)
1891 			continue;
1892 		if (pmc->map->lpm_sts)
1893 			pmc_core_lpm_display(pmc, dev, NULL, offset, i, "STATUS", maps);
1894 	}
1895 
1896 	return 0;
1897 }
1898 
pmc_core_resume(struct device * dev)1899 static __maybe_unused int pmc_core_resume(struct device *dev)
1900 {
1901 	struct pmc_dev *pmcdev = dev_get_drvdata(dev);
1902 
1903 	if (ltr_ignore_all_suspend)
1904 		pmc_core_ltr_restore_all(pmcdev);
1905 
1906 	if (pmcdev->resume)
1907 		return pmcdev->resume(pmcdev);
1908 
1909 	return pmc_core_resume_common(pmcdev);
1910 }
1911 
1912 static const struct dev_pm_ops pmc_core_pm_ops = {
1913 	SET_LATE_SYSTEM_SLEEP_PM_OPS(pmc_core_suspend, pmc_core_resume)
1914 };
1915 
1916 static const struct acpi_device_id pmc_core_acpi_ids[] = {
1917 	{"INT33A1", 0}, /* _HID for Intel Power Engine, _CID PNP0D80*/
1918 	{ }
1919 };
1920 MODULE_DEVICE_TABLE(acpi, pmc_core_acpi_ids);
1921 
1922 static struct platform_driver pmc_core_driver = {
1923 	.driver = {
1924 		.name = "intel_pmc_core",
1925 		.acpi_match_table = ACPI_PTR(pmc_core_acpi_ids),
1926 		.pm = &pmc_core_pm_ops,
1927 		.dev_groups = pmc_dev_groups,
1928 	},
1929 	.probe = pmc_core_probe,
1930 	.remove = pmc_core_remove,
1931 };
1932 
1933 module_platform_driver(pmc_core_driver);
1934 
1935 MODULE_IMPORT_NS("INTEL_PMT_TELEMETRY");
1936 MODULE_LICENSE("GPL v2");
1937 MODULE_DESCRIPTION("Intel PMC Core Driver");
1938