xref: /linux/drivers/platform/x86/intel/pmc/core_ssram.c (revision 84bbfe6b6435658132df2880258d34babe46d3e0)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * This file contains functions to handle discovery of PMC metrics located
4  * in the PMC SSRAM PCI device.
5  *
6  * Copyright (c) 2023, Intel Corporation.
7  * All Rights Reserved.
8  *
9  */
10 
11 #include <linux/cleanup.h>
12 #include <linux/intel_vsec.h>
13 #include <linux/pci.h>
14 #include <linux/io-64-nonatomic-lo-hi.h>
15 
16 #include "core.h"
17 #include "../pmt/telemetry.h"
18 
19 #define SSRAM_HDR_SIZE		0x100
20 #define SSRAM_PWRM_OFFSET	0x14
21 #define SSRAM_DVSEC_OFFSET	0x1C
22 #define SSRAM_DVSEC_SIZE	0x10
23 #define SSRAM_PCH_OFFSET	0x60
24 #define SSRAM_IOE_OFFSET	0x68
25 #define SSRAM_DEVID_OFFSET	0x70
26 
27 /* PCH query */
28 #define LPM_HEADER_OFFSET	1
29 #define LPM_REG_COUNT		28
30 #define LPM_MODE_OFFSET		1
31 
32 DEFINE_FREE(pmc_core_iounmap, void __iomem *, iounmap(_T));
33 
34 static u32 pmc_core_find_guid(struct pmc_info *list, const struct pmc_reg_map *map)
35 {
36 	for (; list->map; ++list)
37 		if (list->map == map)
38 			return list->guid;
39 
40 	return 0;
41 }
42 
43 static int pmc_core_get_lpm_req(struct pmc_dev *pmcdev, struct pmc *pmc)
44 {
45 	struct telem_endpoint *ep;
46 	const u8 *lpm_indices;
47 	int num_maps, mode_offset = 0;
48 	int ret, mode;
49 	int lpm_size;
50 	u32 guid;
51 
52 	lpm_indices = pmc->map->lpm_reg_index;
53 	num_maps = pmc->map->lpm_num_maps;
54 	lpm_size = LPM_MAX_NUM_MODES * num_maps;
55 
56 	guid = pmc_core_find_guid(pmcdev->regmap_list, pmc->map);
57 	if (!guid)
58 		return -ENXIO;
59 
60 	ep = pmt_telem_find_and_register_endpoint(pmcdev->ssram_pcidev, guid, 0);
61 	if (IS_ERR(ep)) {
62 		dev_dbg(&pmcdev->pdev->dev, "couldn't get telem endpoint %ld",
63 			PTR_ERR(ep));
64 		return -EPROBE_DEFER;
65 	}
66 
67 	pmc->lpm_req_regs = devm_kzalloc(&pmcdev->pdev->dev,
68 					 lpm_size * sizeof(u32),
69 					 GFP_KERNEL);
70 	if (!pmc->lpm_req_regs) {
71 		ret = -ENOMEM;
72 		goto unregister_ep;
73 	}
74 
75 	/*
76 	 * PMC Low Power Mode (LPM) table
77 	 *
78 	 * In telemetry space, the LPM table contains a 4 byte header followed
79 	 * by 8 consecutive mode blocks (one for each LPM mode). Each block
80 	 * has a 4 byte header followed by a set of registers that describe the
81 	 * IP state requirements for the given mode. The IP mapping is platform
82 	 * specific but the same for each block, making for easy analysis.
83 	 * Platforms only use a subset of the space to track the requirements
84 	 * for their IPs. Callers provide the requirement registers they use as
85 	 * a list of indices. Each requirement register is associated with an
86 	 * IP map that's maintained by the caller.
87 	 *
88 	 * Header
89 	 * +----+----------------------------+----------------------------+
90 	 * |  0 |      REVISION              |      ENABLED MODES         |
91 	 * +----+--------------+-------------+-------------+--------------+
92 	 *
93 	 * Low Power Mode 0 Block
94 	 * +----+--------------+-------------+-------------+--------------+
95 	 * |  1 |     SUB ID   |     SIZE    |   MAJOR     |   MINOR      |
96 	 * +----+--------------+-------------+-------------+--------------+
97 	 * |  2 |           LPM0 Requirements 0                           |
98 	 * +----+---------------------------------------------------------+
99 	 * |    |                  ...                                    |
100 	 * +----+---------------------------------------------------------+
101 	 * | 29 |           LPM0 Requirements 27                          |
102 	 * +----+---------------------------------------------------------+
103 	 *
104 	 * ...
105 	 *
106 	 * Low Power Mode 7 Block
107 	 * +----+--------------+-------------+-------------+--------------+
108 	 * |    |     SUB ID   |     SIZE    |   MAJOR     |   MINOR      |
109 	 * +----+--------------+-------------+-------------+--------------+
110 	 * | 60 |           LPM7 Requirements 0                           |
111 	 * +----+---------------------------------------------------------+
112 	 * |    |                  ...                                    |
113 	 * +----+---------------------------------------------------------+
114 	 * | 87 |           LPM7 Requirements 27                          |
115 	 * +----+---------------------------------------------------------+
116 	 *
117 	 */
118 	mode_offset = LPM_HEADER_OFFSET + LPM_MODE_OFFSET;
119 	pmc_for_each_mode(mode, pmcdev) {
120 		u32 *req_offset = pmc->lpm_req_regs + (mode * num_maps);
121 		int m;
122 
123 		for (m = 0; m < num_maps; m++) {
124 			u8 sample_id = lpm_indices[m] + mode_offset;
125 
126 			ret = pmt_telem_read32(ep, sample_id, req_offset, 1);
127 			if (ret) {
128 				dev_err(&pmcdev->pdev->dev,
129 					"couldn't read Low Power Mode requirements: %d\n", ret);
130 				devm_kfree(&pmcdev->pdev->dev, pmc->lpm_req_regs);
131 				goto unregister_ep;
132 			}
133 			++req_offset;
134 		}
135 		mode_offset += LPM_REG_COUNT + LPM_MODE_OFFSET;
136 	}
137 
138 unregister_ep:
139 	pmt_telem_unregister_endpoint(ep);
140 
141 	return ret;
142 }
143 
144 int pmc_core_ssram_get_lpm_reqs(struct pmc_dev *pmcdev)
145 {
146 	int ret, i;
147 
148 	if (!pmcdev->ssram_pcidev)
149 		return -ENODEV;
150 
151 	for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
152 		if (!pmcdev->pmcs[i])
153 			continue;
154 
155 		ret = pmc_core_get_lpm_req(pmcdev, pmcdev->pmcs[i]);
156 		if (ret)
157 			return ret;
158 	}
159 
160 	return 0;
161 }
162 
163 static void
164 pmc_add_pmt(struct pmc_dev *pmcdev, u64 ssram_base, void __iomem *ssram)
165 {
166 	struct pci_dev *pcidev = pmcdev->ssram_pcidev;
167 	struct intel_vsec_platform_info info = {};
168 	struct intel_vsec_header *headers[2] = {};
169 	struct intel_vsec_header header;
170 	void __iomem *dvsec;
171 	u32 dvsec_offset;
172 	u32 table, hdr;
173 
174 	ssram = ioremap(ssram_base, SSRAM_HDR_SIZE);
175 	if (!ssram)
176 		return;
177 
178 	dvsec_offset = readl(ssram + SSRAM_DVSEC_OFFSET);
179 	iounmap(ssram);
180 
181 	dvsec = ioremap(ssram_base + dvsec_offset, SSRAM_DVSEC_SIZE);
182 	if (!dvsec)
183 		return;
184 
185 	hdr = readl(dvsec + PCI_DVSEC_HEADER1);
186 	header.id = readw(dvsec + PCI_DVSEC_HEADER2);
187 	header.rev = PCI_DVSEC_HEADER1_REV(hdr);
188 	header.length = PCI_DVSEC_HEADER1_LEN(hdr);
189 	header.num_entries = readb(dvsec + INTEL_DVSEC_ENTRIES);
190 	header.entry_size = readb(dvsec + INTEL_DVSEC_SIZE);
191 
192 	table = readl(dvsec + INTEL_DVSEC_TABLE);
193 	header.tbir = INTEL_DVSEC_TABLE_BAR(table);
194 	header.offset = INTEL_DVSEC_TABLE_OFFSET(table);
195 	iounmap(dvsec);
196 
197 	headers[0] = &header;
198 	info.caps = VSEC_CAP_TELEMETRY;
199 	info.headers = headers;
200 	info.base_addr = ssram_base;
201 	info.parent = &pmcdev->pdev->dev;
202 
203 	intel_vsec_register(pcidev, &info);
204 }
205 
206 static const struct pmc_reg_map *pmc_core_find_regmap(struct pmc_info *list, u16 devid)
207 {
208 	for (; list->map; ++list)
209 		if (devid == list->devid)
210 			return list->map;
211 
212 	return NULL;
213 }
214 
215 static inline u64 get_base(void __iomem *addr, u32 offset)
216 {
217 	return lo_hi_readq(addr + offset) & GENMASK_ULL(63, 3);
218 }
219 
220 static int
221 pmc_core_pmc_add(struct pmc_dev *pmcdev, u64 pwrm_base,
222 		 const struct pmc_reg_map *reg_map, int pmc_index)
223 {
224 	struct pmc *pmc = pmcdev->pmcs[pmc_index];
225 
226 	if (!pwrm_base)
227 		return -ENODEV;
228 
229 	/* Memory for primary PMC has been allocated in core.c */
230 	if (!pmc) {
231 		pmc = devm_kzalloc(&pmcdev->pdev->dev, sizeof(*pmc), GFP_KERNEL);
232 		if (!pmc)
233 			return -ENOMEM;
234 	}
235 
236 	pmc->map = reg_map;
237 	pmc->base_addr = pwrm_base;
238 	pmc->regbase = ioremap(pmc->base_addr, pmc->map->regmap_length);
239 
240 	if (!pmc->regbase) {
241 		devm_kfree(&pmcdev->pdev->dev, pmc);
242 		return -ENOMEM;
243 	}
244 
245 	pmcdev->pmcs[pmc_index] = pmc;
246 
247 	return 0;
248 }
249 
250 static int
251 pmc_core_ssram_get_pmc(struct pmc_dev *pmcdev, int pmc_idx, u32 offset)
252 {
253 	struct pci_dev *ssram_pcidev = pmcdev->ssram_pcidev;
254 	void __iomem __free(pmc_core_iounmap) *tmp_ssram = NULL;
255 	void __iomem __free(pmc_core_iounmap) *ssram = NULL;
256 	const struct pmc_reg_map *map;
257 	u64 ssram_base, pwrm_base;
258 	u16 devid;
259 
260 	if (!pmcdev->regmap_list)
261 		return -ENOENT;
262 
263 	ssram_base = ssram_pcidev->resource[0].start;
264 	tmp_ssram = ioremap(ssram_base, SSRAM_HDR_SIZE);
265 
266 	if (pmc_idx != PMC_IDX_MAIN) {
267 		/*
268 		 * The secondary PMC BARS (which are behind hidden PCI devices)
269 		 * are read from fixed offsets in MMIO of the primary PMC BAR.
270 		 */
271 		ssram_base = get_base(tmp_ssram, offset);
272 		ssram = ioremap(ssram_base, SSRAM_HDR_SIZE);
273 		if (!ssram)
274 			return -ENOMEM;
275 
276 	} else {
277 		ssram = no_free_ptr(tmp_ssram);
278 	}
279 
280 	pwrm_base = get_base(ssram, SSRAM_PWRM_OFFSET);
281 	devid = readw(ssram + SSRAM_DEVID_OFFSET);
282 
283 	/* Find and register and PMC telemetry entries */
284 	pmc_add_pmt(pmcdev, ssram_base, ssram);
285 
286 	map = pmc_core_find_regmap(pmcdev->regmap_list, devid);
287 	if (!map)
288 		return -ENODEV;
289 
290 	return pmc_core_pmc_add(pmcdev, pwrm_base, map, pmc_idx);
291 }
292 
293 int pmc_core_ssram_init(struct pmc_dev *pmcdev, int func)
294 {
295 	struct pci_dev *pcidev;
296 	int ret;
297 
298 	pcidev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(20, func));
299 	if (!pcidev)
300 		return -ENODEV;
301 
302 	ret = pcim_enable_device(pcidev);
303 	if (ret)
304 		goto release_dev;
305 
306 	pmcdev->ssram_pcidev = pcidev;
307 
308 	ret = pmc_core_ssram_get_pmc(pmcdev, PMC_IDX_MAIN, 0);
309 	if (ret)
310 		goto disable_dev;
311 
312 	pmc_core_ssram_get_pmc(pmcdev, PMC_IDX_IOE, SSRAM_IOE_OFFSET);
313 	pmc_core_ssram_get_pmc(pmcdev, PMC_IDX_PCH, SSRAM_PCH_OFFSET);
314 
315 	return 0;
316 
317 disable_dev:
318 	pmcdev->ssram_pcidev = NULL;
319 	pci_disable_device(pcidev);
320 release_dev:
321 	pci_dev_put(pcidev);
322 
323 	return ret;
324 }
325 MODULE_IMPORT_NS(INTEL_VSEC);
326 MODULE_IMPORT_NS(INTEL_PMT_TELEMETRY);
327