xref: /linux/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c (revision 1193e205dbb6feca917dc8e1862ffcdf2194234b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * isst_tpmi.c: SST TPMI interface core
4  *
5  * Copyright (c) 2023, Intel Corporation.
6  * All Rights Reserved.
7  *
8  * This information will be useful to understand flows:
9  * In the current generation of platforms, TPMI is supported via OOB
10  * PCI device. This PCI device has one instance per CPU package.
11  * There is a unique TPMI ID for SST. Each TPMI ID also has multiple
12  * entries, representing per power domain information.
13  *
14  * There is one dev file for complete SST information and control same as the
15  * prior generation of hardware. User spaces don't need to know how the
16  * information is presented by the hardware. The TPMI core module implements
17  * the hardware mapping.
18  */
19 
20 #define dev_fmt(fmt) "tpmi_sst: " fmt
21 
22 #include <linux/auxiliary_bus.h>
23 #include <linux/delay.h>
24 #include <linux/intel_tpmi.h>
25 #include <linux/fs.h>
26 #include <linux/io.h>
27 #include <linux/kernel.h>
28 #include <linux/minmax.h>
29 #include <linux/module.h>
30 #include <asm/msr.h>
31 #include <uapi/linux/isst_if.h>
32 
33 #include "isst_tpmi_core.h"
34 #include "isst_if_common.h"
35 
36 /* Supported SST hardware version by this driver */
37 #define ISST_MAJOR_VERSION	0
38 #define ISST_MINOR_VERSION	2
39 
40 /*
41  * Used to indicate if value read from MMIO needs to get multiplied
42  * to get to a standard unit or not.
43  */
44 #define SST_MUL_FACTOR_NONE    1
45 
46 /* Define 100 as a scaling factor frequency ratio to frequency conversion */
47 #define SST_MUL_FACTOR_FREQ    100
48 
49 /* All SST regs are 64 bit size */
50 #define SST_REG_SIZE   8
51 
52 /**
53  * struct sst_header -	SST main header
54  * @interface_version:	Version number for this interface
55  * @cap_mask:		Bitmask of the supported sub features. 1=the sub feature is enabled.
56  *			0=disabled.
57  *			Bit[8]= SST_CP enable (1), disable (0)
58  *			bit[9]= SST_PP enable (1), disable (0)
59  *			other bits are reserved for future use
60  * @cp_offset:		Qword (8 bytes) offset to the SST_CP register bank
61  * @pp_offset:		Qword (8 bytes) offset to the SST_PP register bank
62  * @reserved:		Reserved for future use
63  *
64  * This register allows SW to discover SST capability and the offsets to SST-CP
65  * and SST-PP register banks.
66  */
67 struct sst_header {
68 	u8 interface_version;
69 	u8 cap_mask;
70 	u8 cp_offset;
71 	u8 pp_offset;
72 	u32 reserved;
73 } __packed;
74 
75 /**
76  * struct cp_header -	SST-CP (core-power) header
77  * @feature_id:		0=SST-CP, 1=SST-PP, 2=SST-BF, 3=SST-TF
78  * @feature_rev:	Interface Version number for this SST feature
79  * @ratio_unit:		Frequency ratio unit. 00: 100MHz. All others are reserved
80  * @reserved:		Reserved for future use
81  *
82  * This structure is used store SST-CP header. This is packed to the same
83  * format as defined in the specifications.
84  */
85 struct cp_header {
86 	u64 feature_id :4;
87 	u64 feature_rev :8;
88 	u64 ratio_unit :2;
89 	u64 reserved :50;
90 } __packed;
91 
92 /**
93  * struct pp_header -	SST-PP (Perf profile) header
94  * @feature_id:		0=SST-CP, 1=SST-PP, 2=SST-BF, 3=SST-TF
95  * @feature_rev:	Interface Version number for this SST feature
96  * @level_en_mask:	SST-PP level enable/disable fuse mask
97  * @allowed_level_mask:	Allowed level mask used for dynamic config level switching
98  * @reserved0:		Reserved for future use
99  * @ratio_unit:		Frequency ratio unit. 00: 100MHz. All others are reserved
100  * @block_size:		Size of PP block in Qword unit (8 bytes)
101  * @dynamic_switch:	If set (1), dynamic switching of SST PP is supported
102  * @memory_ratio_unit:	Memory Controller frequency ratio unit. 00: 100MHz, others reserved
103  * @reserved1:		Reserved for future use
104  *
105  * This structure is used store SST-PP header. This is packed to the same
106  * format as defined in the specifications.
107  */
108 struct pp_header {
109 	u64 feature_id :4;
110 	u64 feature_rev :8;
111 	u64 level_en_mask :8;
112 	u64 allowed_level_mask :8;
113 	u64 reserved0 :4;
114 	u64 ratio_unit :2;
115 	u64 block_size :8;
116 	u64 dynamic_switch :1;
117 	u64 memory_ratio_unit :2;
118 	u64 reserved1 :19;
119 } __packed;
120 
121 /**
122  * struct feature_offset -	Offsets to SST-PP features
123  * @pp_offset:		Qword offset within PP level for the SST_PP register bank
124  * @bf_offset:		Qword offset within PP level for the SST_BF register bank
125  * @tf_offset:		Qword offset within PP level for the SST_TF register bank
126  * @reserved:		Reserved for future use
127  *
128  * This structure is used store offsets for SST features in the register bank.
129  * This is packed to the same format as defined in the specifications.
130  */
131 struct feature_offset {
132 	u64 pp_offset :8;
133 	u64 bf_offset :8;
134 	u64 tf_offset :8;
135 	u64 reserved :40;
136 } __packed;
137 
138 /**
139  * struct levels_offset -	Offsets to each SST PP level
140  * @sst_pp_level0_offset:	Qword offset to the register block of PP level 0
141  * @sst_pp_level1_offset:	Qword offset to the register block of PP level 1
142  * @sst_pp_level2_offset:	Qword offset to the register block of PP level 2
143  * @sst_pp_level3_offset:	Qword offset to the register block of PP level 3
144  * @sst_pp_level4_offset:	Qword offset to the register block of PP level 4
145  * @reserved:			Reserved for future use
146  *
147  * This structure is used store offsets of SST PP levels in the register bank.
148  * This is packed to the same format as defined in the specifications.
149  */
150 struct levels_offset {
151 	u64 sst_pp_level0_offset :8;
152 	u64 sst_pp_level1_offset :8;
153 	u64 sst_pp_level2_offset :8;
154 	u64 sst_pp_level3_offset :8;
155 	u64 sst_pp_level4_offset :8;
156 	u64 reserved :24;
157 } __packed;
158 
159 /**
160  * struct pp_control_offset -	Offsets for SST PP controls
161  * @perf_level:		A SST-PP level that SW intends to switch to
162  * @perf_level_lock:	SST-PP level select lock. 0 - unlocked. 1 - locked till next reset
163  * @resvd0:		Reserved for future use
164  * @current_state:	Bit mask to control the enable(1)/disable(0) state of each feature
165  *			of the current PP level, bit 0 = BF, bit 1 = TF, bit 2-7 = reserved
166  * @reserved:		Reserved for future use
167  *
168  * This structure is used store offsets of SST PP controls in the register bank.
169  * This is packed to the same format as defined in the specifications.
170  */
171 struct pp_control_offset {
172 	u64 perf_level :3;
173 	u64 perf_level_lock :1;
174 	u64 resvd0 :4;
175 	u64 current_state :8;
176 	u64 reserved :48;
177 } __packed;
178 
179 /**
180  * struct pp_status_offset -	Offsets for SST PP status fields
181  * @sst_pp_level:	Returns the current SST-PP level
182  * @sst_pp_lock:	Returns the lock bit setting of perf_level_lock in pp_control_offset
183  * @error_type:		Returns last error of SST-PP level change request. 0: no error,
184  *			1: level change not allowed, others: reserved
185  * @feature_state:	Bit mask to indicate the enable(1)/disable(0) state of each feature of the
186  *			current PP level. bit 0 = BF, bit 1 = TF, bit 2-7 reserved
187  * @reserved0:		Reserved for future use
188  * @feature_error_type: Returns last error of the specific feature. Three error_type bits per
189  *			feature. i.e. ERROR_TYPE[2:0] for BF, ERROR_TYPE[5:3] for TF, etc.
190  *			0x0: no error, 0x1: The specific feature is not supported by the hardware.
191  *			0x2-0x6: Reserved. 0x7: feature state change is not allowed.
192  * @reserved1:		Reserved for future use
193  *
194  * This structure is used store offsets of SST PP status in the register bank.
195  * This is packed to the same format as defined in the specifications.
196  */
197 struct pp_status_offset {
198 	u64 sst_pp_level :3;
199 	u64 sst_pp_lock :1;
200 	u64 error_type :4;
201 	u64 feature_state :8;
202 	u64 reserved0 :16;
203 	u64 feature_error_type : 24;
204 	u64 reserved1 :8;
205 } __packed;
206 
207 /**
208  * struct perf_level -	Used to store perf level and mmio offset
209  * @mmio_offset:	mmio offset for a perf level
210  * @level:		perf level for this offset
211  *
212  * This structure is used store final mmio offset of each perf level from the
213  * SST base mmio offset.
214  */
215 struct perf_level {
216 	int mmio_offset;
217 	int level;
218 };
219 
220 /**
221  * struct tpmi_per_power_domain_info -	Store per power_domain SST info
222  * @package_id:		Package id for this power_domain
223  * @power_domain_id:	Power domain id, Each entry from the SST-TPMI instance is a power_domain.
224  * @max_level:		Max possible PP level possible for this power_domain
225  * @ratio_unit:		Ratio unit for converting to MHz
226  * @avx_levels:		Number of AVX levels
227  * @pp_block_size:	Block size from PP header
228  * @sst_header:		Store SST header for this power_domain
229  * @cp_header:		Store SST-CP header for this power_domain
230  * @pp_header:		Store SST-PP header for this power_domain
231  * @perf_levels:	Pointer to each perf level to map level to mmio offset
232  * @feature_offsets:	Store feature offsets for each PP-level
233  * @control_offset:	Store the control offset for each PP-level
234  * @status_offset:	Store the status offset for each PP-level
235  * @sst_base:		Mapped SST base IO memory
236  * @auxdev:		Auxiliary device instance enumerated this instance
237  * @saved_sst_cp_control: Save SST-CP control configuration to store restore for suspend/resume
238  * @saved_clos_configs:	Save SST-CP CLOS configuration to store restore for suspend/resume
239  * @saved_clos_assocs:	Save SST-CP CLOS association to store restore for suspend/resume
240  * @saved_pp_control:	Save SST-PP control information to store restore for suspend/resume
241  * @write_blocked:	Write operation is blocked, so can't change SST state
242  *
243  * This structure is used store complete SST information for a power_domain. This information
244  * is used to read/write request for any SST IOCTL. Each physical CPU package can have multiple
245  * power_domains. Each power domain describes its own SST information and has its own controls.
246  */
247 struct tpmi_per_power_domain_info {
248 	int package_id;
249 	int power_domain_id;
250 	int max_level;
251 	int ratio_unit;
252 	int avx_levels;
253 	int pp_block_size;
254 	struct sst_header sst_header;
255 	struct cp_header cp_header;
256 	struct pp_header pp_header;
257 	struct perf_level *perf_levels;
258 	struct feature_offset feature_offsets;
259 	struct pp_control_offset control_offset;
260 	struct pp_status_offset status_offset;
261 	void __iomem *sst_base;
262 	struct auxiliary_device *auxdev;
263 	u64 saved_sst_cp_control;
264 	u64 saved_clos_configs[4];
265 	u64 saved_clos_assocs[4];
266 	u64 saved_pp_control;
267 	bool write_blocked;
268 };
269 
270 /* Supported maximum partitions */
271 #define SST_MAX_PARTITIONS	2
272 
273 /**
274  * struct tpmi_sst_struct -	Store sst info for a package
275  * @package_id:			Package id for this aux device instance
276  * @number_of_power_domains:	Number of power_domains pointed by power_domain_info pointer
277  * @power_domain_info:		Pointer to power domains information
278  * @cdie_mask:			Mask of compute dies present in a partition from hardware.
279  *				This mask is not present in the version 1 information header.
280  * @io_dies:			Number of IO dies in a partition. This will be 0 for TPMI
281  *				version 1 information header.
282  * @partition_mask:		Mask of all partitions.
283  * @partition_mask_current:	Current partition mask as some may have been unbound.
284  *
285  * This structure is used store full SST information for a package.
286  * Each package has one or multiple OOB PCI devices. Each package can contain multiple
287  * power domains.
288  */
289 struct tpmi_sst_struct {
290 	int package_id;
291 	struct tpmi_per_power_domain_info *power_domain_info[SST_MAX_PARTITIONS];
292 	u16 cdie_mask[SST_MAX_PARTITIONS];
293 	u8 number_of_power_domains[SST_MAX_PARTITIONS];
294 	u8 io_dies[SST_MAX_PARTITIONS];
295 	u8 partition_mask;
296 	u8 partition_mask_current;
297 };
298 
299 /**
300  * struct tpmi_sst_common_struct -	Store all SST instances
301  * @max_index:		Maximum instances currently present
302  * @sst_inst:		Pointer to per package instance
303  *
304  * Stores every SST Package instance.
305  */
306 struct tpmi_sst_common_struct {
307 	int max_index;
308 	struct tpmi_sst_struct **sst_inst;
309 };
310 
311 /*
312  * Each IOCTL request is processed under this lock. Also used to protect
313  * registration functions and common data structures.
314  */
315 static DEFINE_MUTEX(isst_tpmi_dev_lock);
316 
317 /* Usage count to track, number of TPMI SST instances registered to this core. */
318 static int isst_core_usage_count;
319 
320 /* Stores complete SST information for every package and power_domain */
321 static struct tpmi_sst_common_struct isst_common;
322 
323 #define SST_MAX_AVX_LEVELS	3
324 
325 #define SST_PP_OFFSET_0		8
326 #define SST_PP_OFFSET_1		16
327 #define SST_PP_OFFSET_SIZE	8
328 
sst_add_perf_profiles(struct auxiliary_device * auxdev,struct tpmi_per_power_domain_info * pd_info,int levels)329 static int sst_add_perf_profiles(struct auxiliary_device *auxdev,
330 				 struct tpmi_per_power_domain_info *pd_info,
331 				 int levels)
332 {
333 	struct device *dev = &auxdev->dev;
334 	u64 perf_level_offsets;
335 	int i;
336 
337 	pd_info->perf_levels = devm_kcalloc(dev, levels, sizeof(struct perf_level), GFP_KERNEL);
338 	if (!pd_info->perf_levels)
339 		return 0;
340 
341 	pd_info->ratio_unit = pd_info->pp_header.ratio_unit;
342 	pd_info->avx_levels = SST_MAX_AVX_LEVELS;
343 	pd_info->pp_block_size = pd_info->pp_header.block_size;
344 
345 	/* Read PP Offset 0: Get feature offset with PP level */
346 	*((u64 *)&pd_info->feature_offsets) = readq(pd_info->sst_base +
347 						    pd_info->sst_header.pp_offset +
348 						    SST_PP_OFFSET_0);
349 
350 	perf_level_offsets = readq(pd_info->sst_base + pd_info->sst_header.pp_offset +
351 				   SST_PP_OFFSET_1);
352 
353 	for (i = 0; i < levels; ++i) {
354 		u64 offset;
355 
356 		offset = perf_level_offsets & (0xffULL << (i * SST_PP_OFFSET_SIZE));
357 		offset >>= (i * 8);
358 		offset &= 0xff;
359 		offset *= 8; /* Convert to byte from QWORD offset */
360 		pd_info->perf_levels[i].mmio_offset = pd_info->sst_header.pp_offset + offset;
361 	}
362 
363 	return 0;
364 }
365 
sst_main(struct auxiliary_device * auxdev,struct tpmi_per_power_domain_info * pd_info)366 static int sst_main(struct auxiliary_device *auxdev, struct tpmi_per_power_domain_info *pd_info)
367 {
368 	struct device *dev = &auxdev->dev;
369 	int i, mask, levels;
370 
371 	*((u64 *)&pd_info->sst_header) = readq(pd_info->sst_base);
372 	pd_info->sst_header.cp_offset *= 8;
373 	pd_info->sst_header.pp_offset *= 8;
374 
375 	if (pd_info->sst_header.interface_version == TPMI_VERSION_INVALID)
376 		return -ENODEV;
377 
378 	if (TPMI_MAJOR_VERSION(pd_info->sst_header.interface_version) != ISST_MAJOR_VERSION) {
379 		dev_err(dev, "SST: Unsupported major version:%lx\n",
380 			TPMI_MAJOR_VERSION(pd_info->sst_header.interface_version));
381 		return -ENODEV;
382 	}
383 
384 	if (TPMI_MINOR_VERSION(pd_info->sst_header.interface_version) > ISST_MINOR_VERSION)
385 		dev_info(dev, "SST: Ignore: Unsupported minor version:%lx\n",
386 			 TPMI_MINOR_VERSION(pd_info->sst_header.interface_version));
387 
388 	/* Read SST CP Header */
389 	*((u64 *)&pd_info->cp_header) = readq(pd_info->sst_base + pd_info->sst_header.cp_offset);
390 
391 	/* Read PP header */
392 	*((u64 *)&pd_info->pp_header) = readq(pd_info->sst_base + pd_info->sst_header.pp_offset);
393 
394 	mask = 0x01;
395 	levels = 0;
396 	for (i = 0; i < 8; ++i) {
397 		if (pd_info->pp_header.level_en_mask & mask)
398 			levels = i;
399 		mask <<= 1;
400 	}
401 	pd_info->max_level = levels;
402 	sst_add_perf_profiles(auxdev, pd_info, levels + 1);
403 
404 	return 0;
405 }
406 
isst_instance_count(struct tpmi_sst_struct * sst_inst)407 static u8 isst_instance_count(struct tpmi_sst_struct *sst_inst)
408 {
409 	u8 i, max_part, count = 0;
410 
411 	/* Partition mask starts from bit 0 and contains 1s only */
412 	max_part = hweight8(sst_inst->partition_mask);
413 	for (i = 0; i < max_part; i++)
414 		count += sst_inst->number_of_power_domains[i];
415 
416 	return count;
417 }
418 
419 /**
420  * map_cdies() - Map user domain ID to compute domain ID
421  * @sst_inst: TPMI Instance
422  * @id: User domain ID
423  * @partition: Resolved partition
424  *
425  * Helper function to map_partition_power_domain_id() to resolve compute
426  * domain ID and partition. Use hardware provided cdie_mask for a partition
427  * as is to resolve a compute domain ID.
428  *
429  * Return: %-EINVAL on error, otherwise mapped domain ID >= 0.
430  */
map_cdies(struct tpmi_sst_struct * sst_inst,u8 id,u8 * partition)431 static int map_cdies(struct tpmi_sst_struct *sst_inst, u8 id, u8 *partition)
432 {
433 	u8 i, max_part;
434 
435 	max_part = hweight8(sst_inst->partition_mask);
436 	for (i = 0; i < max_part; i++) {
437 		if (!(sst_inst->cdie_mask[i] & BIT(id)))
438 			continue;
439 
440 		*partition = i;
441 		return id - ffs(sst_inst->cdie_mask[i]) + 1;
442 	}
443 
444 	return -EINVAL;
445 }
446 
447 /**
448  * map_partition_power_domain_id() - Map user domain ID to partition domain ID
449  * @sst_inst: TPMI Instance
450  * @id: User domain ID
451  * @partition: Resolved partition
452  *
453  * In a partitioned system a CPU package has two separate MMIO ranges (Under
454  * two PCI devices). But the CPU package compute die/power domain IDs are
455  * unique in a package. User space can get compute die/power domain ID from
456  * CPUID and MSR 0x54 for a CPU. So, those IDs need to be preserved even if
457  * they are present in two different partitions with its own order.
458  *
459  * For example for command ISST_IF_COUNT_TPMI_INSTANCES, the valid_mask
460  * is 111111b for a 4 compute and 2 IO dies system. This is presented as
461  * provided by the hardware in a non-partitioned system with the following
462  * order:
463  *	I1-I0-C3-C2-C1-C0
464  * Here: "C": for compute and "I" for IO die.
465  * Compute dies are always present first in TPMI instances, as they have
466  * to map to the real power domain/die ID of a system. In a non-partitioned
467  * system there is no way to identify compute and IO die boundaries from
468  * this driver without reading each CPU's mapping.
469  *
470  * The same order needs to be preserved, even if those compute dies are
471  * distributed among multiple partitions. For example:
472  * Partition 1 can contain: I1-C1-C0
473  * Partition 2 can contain: I2-C3-C2
474  *
475  * This will require a conversion of user space IDs to the actual index into
476  * array of stored power domains for each partition. For the above example
477  * this function will return partition and index as follows:
478  *
479  * =============	=========	=====	========
480  * User space ID	Partition	Index	Die type
481  * =============	=========	=====	========
482  * 0			0		0	Compute
483  * 1			0		1	Compute
484  * 2			1		0	Compute
485  * 3			1		1	Compute
486  * 4			0		2	IO
487  * 5			1		2	IO
488  * =============	=========	=====	========
489  *
490  * Return: %-EINVAL on error, otherwise mapped domain ID >= 0.
491  */
map_partition_power_domain_id(struct tpmi_sst_struct * sst_inst,u8 id,u8 * partition)492 static int map_partition_power_domain_id(struct tpmi_sst_struct *sst_inst, u8 id, u8 *partition)
493 {
494 	u8 i, io_start_id, max_part;
495 
496 	*partition = 0;
497 
498 	/* If any PCI device for partition is unbound, treat this as failure */
499 	if (sst_inst->partition_mask != sst_inst->partition_mask_current)
500 		return -EINVAL;
501 
502 	max_part = hweight8(sst_inst->partition_mask);
503 
504 	/* IO Index begin here */
505 	io_start_id = fls(sst_inst->cdie_mask[max_part - 1]);
506 
507 	if (id < io_start_id)
508 		return map_cdies(sst_inst, id, partition);
509 
510 	for (i = 0; i < max_part; i++) {
511 		u8 io_id;
512 
513 		io_id = id - io_start_id;
514 		if (io_id < sst_inst->io_dies[i]) {
515 			u8 cdie_range;
516 
517 			cdie_range = fls(sst_inst->cdie_mask[i]) - ffs(sst_inst->cdie_mask[i]) + 1;
518 			*partition = i;
519 			return cdie_range + io_id;
520 		}
521 		io_start_id += sst_inst->io_dies[i];
522 	}
523 
524 	return -EINVAL;
525 }
526 
527 /*
528  * Map a package and power_domain id to SST information structure unique for a power_domain.
529  * The caller should call under isst_tpmi_dev_lock.
530  */
get_instance(int pkg_id,int power_domain_id)531 static struct tpmi_per_power_domain_info *get_instance(int pkg_id, int power_domain_id)
532 {
533 	struct tpmi_per_power_domain_info *power_domain_info;
534 	struct tpmi_sst_struct *sst_inst;
535 	u8 part;
536 
537 	if (!in_range(pkg_id, 0, topology_max_packages()) || pkg_id > isst_common.max_index)
538 		return NULL;
539 
540 	sst_inst = isst_common.sst_inst[pkg_id];
541 	if (!sst_inst)
542 		return NULL;
543 
544 	power_domain_id = map_partition_power_domain_id(sst_inst, power_domain_id, &part);
545 	if (power_domain_id < 0)
546 		return NULL;
547 
548 	power_domain_info = &sst_inst->power_domain_info[part][power_domain_id];
549 
550 	if (power_domain_info && !power_domain_info->sst_base)
551 		return NULL;
552 
553 	return power_domain_info;
554 }
555 
disable_dynamic_sst_features(void)556 static bool disable_dynamic_sst_features(void)
557 {
558 	u64 value;
559 
560 	rdmsrq(MSR_PM_ENABLE, value);
561 	return !(value & 0x1);
562 }
563 
564 #define _read_cp_info(name_str, name, offset, start, width, mult_factor)\
565 {\
566 	u64 val, mask;\
567 	\
568 	val = readq(power_domain_info->sst_base + power_domain_info->sst_header.cp_offset +\
569 			(offset));\
570 	mask = GENMASK_ULL((start + width - 1), start);\
571 	val &= mask; \
572 	val >>= start;\
573 	name = (val * mult_factor);\
574 }
575 
576 #define _write_cp_info(name_str, name, offset, start, width, div_factor)\
577 {\
578 	u64 val, mask;\
579 	\
580 	val = readq(power_domain_info->sst_base +\
581 		    power_domain_info->sst_header.cp_offset + (offset));\
582 	mask = GENMASK_ULL((start + width - 1), start);\
583 	val &= ~mask;\
584 	val |= (name / div_factor) << start;\
585 	writeq(val, power_domain_info->sst_base + power_domain_info->sst_header.cp_offset +\
586 		(offset));\
587 }
588 
589 #define	SST_CP_CONTROL_OFFSET	8
590 #define	SST_CP_STATUS_OFFSET	16
591 
592 #define SST_CP_ENABLE_START		0
593 #define SST_CP_ENABLE_WIDTH		1
594 
595 #define SST_CP_PRIORITY_TYPE_START	1
596 #define SST_CP_PRIORITY_TYPE_WIDTH	1
597 
isst_if_core_power_state(void __user * argp)598 static long isst_if_core_power_state(void __user *argp)
599 {
600 	struct tpmi_per_power_domain_info *power_domain_info;
601 	struct isst_core_power core_power;
602 
603 	if (copy_from_user(&core_power, argp, sizeof(core_power)))
604 		return -EFAULT;
605 
606 	if (core_power.get_set && disable_dynamic_sst_features())
607 		return -EFAULT;
608 
609 	power_domain_info = get_instance(core_power.socket_id, core_power.power_domain_id);
610 	if (!power_domain_info)
611 		return -EINVAL;
612 
613 	if (core_power.get_set) {
614 		_write_cp_info("cp_enable", core_power.enable, SST_CP_CONTROL_OFFSET,
615 			       SST_CP_ENABLE_START, SST_CP_ENABLE_WIDTH, SST_MUL_FACTOR_NONE)
616 		_write_cp_info("cp_prio_type", core_power.priority_type, SST_CP_CONTROL_OFFSET,
617 			       SST_CP_PRIORITY_TYPE_START, SST_CP_PRIORITY_TYPE_WIDTH,
618 			       SST_MUL_FACTOR_NONE)
619 	} else {
620 		/* get */
621 		_read_cp_info("cp_enable", core_power.enable, SST_CP_STATUS_OFFSET,
622 			      SST_CP_ENABLE_START, SST_CP_ENABLE_WIDTH, SST_MUL_FACTOR_NONE)
623 		_read_cp_info("cp_prio_type", core_power.priority_type, SST_CP_STATUS_OFFSET,
624 			      SST_CP_PRIORITY_TYPE_START, SST_CP_PRIORITY_TYPE_WIDTH,
625 			      SST_MUL_FACTOR_NONE)
626 		core_power.supported = !!(power_domain_info->sst_header.cap_mask & BIT(0));
627 		if (copy_to_user(argp, &core_power, sizeof(core_power)))
628 			return -EFAULT;
629 	}
630 
631 	return 0;
632 }
633 
634 #define SST_CLOS_CONFIG_0_OFFSET	24
635 
636 #define SST_CLOS_CONFIG_PRIO_START	4
637 #define SST_CLOS_CONFIG_PRIO_WIDTH	4
638 
639 #define SST_CLOS_CONFIG_MIN_START	8
640 #define SST_CLOS_CONFIG_MIN_WIDTH	8
641 
642 #define SST_CLOS_CONFIG_MAX_START	16
643 #define SST_CLOS_CONFIG_MAX_WIDTH	8
644 
isst_if_clos_param(void __user * argp)645 static long isst_if_clos_param(void __user *argp)
646 {
647 	struct tpmi_per_power_domain_info *power_domain_info;
648 	struct isst_clos_param clos_param;
649 
650 	if (copy_from_user(&clos_param, argp, sizeof(clos_param)))
651 		return -EFAULT;
652 
653 	power_domain_info = get_instance(clos_param.socket_id, clos_param.power_domain_id);
654 	if (!power_domain_info)
655 		return -EINVAL;
656 
657 	if (clos_param.get_set) {
658 		if (power_domain_info->write_blocked)
659 			return -EPERM;
660 
661 		_write_cp_info("clos.min_freq", clos_param.min_freq_mhz,
662 			       (SST_CLOS_CONFIG_0_OFFSET + clos_param.clos * SST_REG_SIZE),
663 			       SST_CLOS_CONFIG_MIN_START, SST_CLOS_CONFIG_MIN_WIDTH,
664 			       SST_MUL_FACTOR_FREQ);
665 		_write_cp_info("clos.max_freq", clos_param.max_freq_mhz,
666 			       (SST_CLOS_CONFIG_0_OFFSET + clos_param.clos * SST_REG_SIZE),
667 			       SST_CLOS_CONFIG_MAX_START, SST_CLOS_CONFIG_MAX_WIDTH,
668 			       SST_MUL_FACTOR_FREQ);
669 		_write_cp_info("clos.prio", clos_param.prop_prio,
670 			       (SST_CLOS_CONFIG_0_OFFSET + clos_param.clos * SST_REG_SIZE),
671 			       SST_CLOS_CONFIG_PRIO_START, SST_CLOS_CONFIG_PRIO_WIDTH,
672 			       SST_MUL_FACTOR_NONE);
673 	} else {
674 		/* get */
675 		_read_cp_info("clos.min_freq", clos_param.min_freq_mhz,
676 				(SST_CLOS_CONFIG_0_OFFSET + clos_param.clos * SST_REG_SIZE),
677 				SST_CLOS_CONFIG_MIN_START, SST_CLOS_CONFIG_MIN_WIDTH,
678 				SST_MUL_FACTOR_FREQ)
679 		_read_cp_info("clos.max_freq", clos_param.max_freq_mhz,
680 				(SST_CLOS_CONFIG_0_OFFSET + clos_param.clos * SST_REG_SIZE),
681 				SST_CLOS_CONFIG_MAX_START, SST_CLOS_CONFIG_MAX_WIDTH,
682 				SST_MUL_FACTOR_FREQ)
683 		_read_cp_info("clos.prio", clos_param.prop_prio,
684 				(SST_CLOS_CONFIG_0_OFFSET + clos_param.clos * SST_REG_SIZE),
685 				SST_CLOS_CONFIG_PRIO_START, SST_CLOS_CONFIG_PRIO_WIDTH,
686 				SST_MUL_FACTOR_NONE)
687 
688 		if (copy_to_user(argp, &clos_param, sizeof(clos_param)))
689 			return -EFAULT;
690 	}
691 
692 	return 0;
693 }
694 
695 #define SST_CLOS_ASSOC_0_OFFSET		56
696 #define SST_CLOS_ASSOC_CPUS_PER_REG	16
697 #define SST_CLOS_ASSOC_BITS_PER_CPU	4
698 
isst_if_clos_assoc(void __user * argp)699 static long isst_if_clos_assoc(void __user *argp)
700 {
701 	struct isst_if_clos_assoc_cmds assoc_cmds;
702 	unsigned char __user *ptr;
703 	int i;
704 
705 	/* Each multi command has u16 command count as the first field */
706 	if (copy_from_user(&assoc_cmds, argp, sizeof(assoc_cmds)))
707 		return -EFAULT;
708 
709 	if (!assoc_cmds.cmd_count || assoc_cmds.cmd_count > ISST_IF_CMD_LIMIT)
710 		return -EINVAL;
711 
712 	ptr = argp + offsetof(struct isst_if_clos_assoc_cmds, assoc_info);
713 	for (i = 0; i < assoc_cmds.cmd_count; ++i) {
714 		struct tpmi_per_power_domain_info *power_domain_info;
715 		struct isst_if_clos_assoc clos_assoc;
716 		int punit_id, punit_cpu_no, pkg_id;
717 		struct tpmi_sst_struct *sst_inst;
718 		int offset, shift, cpu;
719 		u64 val, mask, clos;
720 		u8 part;
721 
722 		if (copy_from_user(&clos_assoc, ptr, sizeof(clos_assoc)))
723 			return -EFAULT;
724 
725 		if (clos_assoc.socket_id > topology_max_packages())
726 			return -EINVAL;
727 
728 		cpu = clos_assoc.logical_cpu;
729 		clos = clos_assoc.clos;
730 
731 		if (assoc_cmds.punit_cpu_map)
732 			punit_cpu_no = cpu;
733 		else
734 			return -EOPNOTSUPP;
735 
736 		if (punit_cpu_no < 0)
737 			return -EINVAL;
738 
739 		punit_id = clos_assoc.power_domain_id;
740 		pkg_id = clos_assoc.socket_id;
741 
742 		sst_inst = isst_common.sst_inst[pkg_id];
743 
744 		punit_id = map_partition_power_domain_id(sst_inst, punit_id, &part);
745 		if (punit_id < 0)
746 			return -EINVAL;
747 
748 		power_domain_info = &sst_inst->power_domain_info[part][punit_id];
749 
750 		if (assoc_cmds.get_set && power_domain_info->write_blocked)
751 			return -EPERM;
752 
753 		offset = SST_CLOS_ASSOC_0_OFFSET +
754 				(punit_cpu_no / SST_CLOS_ASSOC_CPUS_PER_REG) * SST_REG_SIZE;
755 		shift = punit_cpu_no % SST_CLOS_ASSOC_CPUS_PER_REG;
756 		shift *= SST_CLOS_ASSOC_BITS_PER_CPU;
757 
758 		val = readq(power_domain_info->sst_base +
759 				power_domain_info->sst_header.cp_offset + offset);
760 		if (assoc_cmds.get_set) {
761 			mask = GENMASK_ULL((shift + SST_CLOS_ASSOC_BITS_PER_CPU - 1), shift);
762 			val &= ~mask;
763 			val |= (clos << shift);
764 			writeq(val, power_domain_info->sst_base +
765 					power_domain_info->sst_header.cp_offset + offset);
766 		} else {
767 			val >>= shift;
768 			clos_assoc.clos = val & GENMASK(SST_CLOS_ASSOC_BITS_PER_CPU - 1, 0);
769 			if (copy_to_user(ptr, &clos_assoc, sizeof(clos_assoc)))
770 				return -EFAULT;
771 		}
772 
773 		ptr += sizeof(clos_assoc);
774 	}
775 
776 	return 0;
777 }
778 
779 #define _read_pp_info(name_str, name, offset, start, width, mult_factor)\
780 {\
781 	u64 val, _mask;\
782 	\
783 	val = readq(power_domain_info->sst_base + power_domain_info->sst_header.pp_offset +\
784 		    (offset));\
785 	_mask = GENMASK_ULL((start + width - 1), start);\
786 	val &= _mask;\
787 	val >>= start;\
788 	name = (val * mult_factor);\
789 }
790 
791 #define _write_pp_info(name_str, name, offset, start, width, div_factor)\
792 {\
793 	u64 val, _mask;\
794 	\
795 	val = readq(power_domain_info->sst_base + power_domain_info->sst_header.pp_offset +\
796 		    (offset));\
797 	_mask = GENMASK((start + width - 1), start);\
798 	val &= ~_mask;\
799 	val |= (name / div_factor) << start;\
800 	writeq(val, power_domain_info->sst_base + power_domain_info->sst_header.pp_offset +\
801 	      (offset));\
802 }
803 
804 #define _read_bf_level_info(name_str, name, level, offset, start, width, mult_factor)\
805 {\
806 	u64 val, _mask;\
807 	\
808 	val = readq(power_domain_info->sst_base +\
809 		    power_domain_info->perf_levels[level].mmio_offset +\
810 		(power_domain_info->feature_offsets.bf_offset * 8) + (offset));\
811 	_mask = GENMASK_ULL((start + width - 1), start);\
812 	val &= _mask; \
813 	val >>= start;\
814 	name = (val * mult_factor);\
815 }
816 
817 #define _read_tf_level_info(name_str, name, level, offset, start, width, mult_factor)\
818 {\
819 	u64 val, _mask;\
820 	\
821 	val = readq(power_domain_info->sst_base +\
822 		    power_domain_info->perf_levels[level].mmio_offset +\
823 		(power_domain_info->feature_offsets.tf_offset * 8) + (offset));\
824 	_mask = GENMASK_ULL((start + width - 1), start);\
825 	val &= _mask; \
826 	val >>= start;\
827 	name = (val * mult_factor);\
828 }
829 
830 #define SST_PP_STATUS_OFFSET	32
831 
832 #define SST_PP_LEVEL_START	0
833 #define SST_PP_LEVEL_WIDTH	3
834 
835 #define SST_PP_LOCK_START	3
836 #define SST_PP_LOCK_WIDTH	1
837 
838 #define SST_PP_FEATURE_STATE_START	8
839 #define SST_PP_FEATURE_STATE_WIDTH	8
840 
841 #define SST_BF_FEATURE_SUPPORTED_START	12
842 #define SST_BF_FEATURE_SUPPORTED_WIDTH	1
843 
844 #define SST_TF_FEATURE_SUPPORTED_START	12
845 #define SST_TF_FEATURE_SUPPORTED_WIDTH	1
846 
isst_if_get_perf_level(void __user * argp)847 static int isst_if_get_perf_level(void __user *argp)
848 {
849 	struct isst_perf_level_info perf_level;
850 	struct tpmi_per_power_domain_info *power_domain_info;
851 	unsigned long level_mask;
852 	u8 level, support;
853 
854 	if (copy_from_user(&perf_level, argp, sizeof(perf_level)))
855 		return -EFAULT;
856 
857 	power_domain_info = get_instance(perf_level.socket_id, perf_level.power_domain_id);
858 	if (!power_domain_info)
859 		return -EINVAL;
860 
861 	perf_level.max_level = power_domain_info->max_level;
862 	perf_level.level_mask = power_domain_info->pp_header.level_en_mask;
863 	perf_level.feature_rev = power_domain_info->pp_header.feature_rev;
864 	_read_pp_info("current_level", perf_level.current_level, SST_PP_STATUS_OFFSET,
865 		      SST_PP_LEVEL_START, SST_PP_LEVEL_WIDTH, SST_MUL_FACTOR_NONE)
866 	_read_pp_info("locked", perf_level.locked, SST_PP_STATUS_OFFSET,
867 		      SST_PP_LOCK_START, SST_PP_LEVEL_WIDTH, SST_MUL_FACTOR_NONE)
868 	_read_pp_info("feature_state", perf_level.feature_state, SST_PP_STATUS_OFFSET,
869 		      SST_PP_FEATURE_STATE_START, SST_PP_FEATURE_STATE_WIDTH, SST_MUL_FACTOR_NONE)
870 	perf_level.enabled = !!(power_domain_info->sst_header.cap_mask & BIT(1));
871 
872 	level_mask = perf_level.level_mask;
873 	perf_level.sst_bf_support = 0;
874 	for_each_set_bit(level, &level_mask, BITS_PER_BYTE) {
875 		/*
876 		 * Read BF support for a level. Read output is updated
877 		 * to "support" variable by the below macro.
878 		 */
879 		_read_bf_level_info("bf_support", support, level, 0, SST_BF_FEATURE_SUPPORTED_START,
880 				    SST_BF_FEATURE_SUPPORTED_WIDTH, SST_MUL_FACTOR_NONE);
881 
882 		/* If supported set the bit for the level */
883 		if (support)
884 			perf_level.sst_bf_support |= BIT(level);
885 	}
886 
887 	perf_level.sst_tf_support = 0;
888 	for_each_set_bit(level, &level_mask, BITS_PER_BYTE) {
889 		/*
890 		 * Read TF support for a level. Read output is updated
891 		 * to "support" variable by the below macro.
892 		 */
893 		_read_tf_level_info("tf_support", support, level, 0, SST_TF_FEATURE_SUPPORTED_START,
894 				    SST_TF_FEATURE_SUPPORTED_WIDTH, SST_MUL_FACTOR_NONE);
895 
896 		/* If supported set the bit for the level */
897 		if (support)
898 			perf_level.sst_tf_support |= BIT(level);
899 	}
900 
901 	if (copy_to_user(argp, &perf_level, sizeof(perf_level)))
902 		return -EFAULT;
903 
904 	return 0;
905 }
906 
907 #define SST_PP_CONTROL_OFFSET		24
908 #define SST_PP_LEVEL_CHANGE_TIME_MS	5
909 #define SST_PP_LEVEL_CHANGE_RETRY_COUNT	3
910 
isst_if_set_perf_level(void __user * argp)911 static int isst_if_set_perf_level(void __user *argp)
912 {
913 	struct isst_perf_level_control perf_level;
914 	struct tpmi_per_power_domain_info *power_domain_info;
915 	int level, retry = 0;
916 
917 	if (disable_dynamic_sst_features())
918 		return -EFAULT;
919 
920 	if (copy_from_user(&perf_level, argp, sizeof(perf_level)))
921 		return -EFAULT;
922 
923 	power_domain_info = get_instance(perf_level.socket_id, perf_level.power_domain_id);
924 	if (!power_domain_info)
925 		return -EINVAL;
926 
927 	if (power_domain_info->write_blocked)
928 		return -EPERM;
929 
930 	if (!(power_domain_info->pp_header.allowed_level_mask & BIT(perf_level.level)))
931 		return -EINVAL;
932 
933 	_read_pp_info("current_level", level, SST_PP_STATUS_OFFSET,
934 		      SST_PP_LEVEL_START, SST_PP_LEVEL_WIDTH, SST_MUL_FACTOR_NONE)
935 
936 	/* If the requested new level is same as the current level, reject */
937 	if (perf_level.level == level)
938 		return -EINVAL;
939 
940 	_write_pp_info("perf_level", perf_level.level, SST_PP_CONTROL_OFFSET,
941 		       SST_PP_LEVEL_START, SST_PP_LEVEL_WIDTH, SST_MUL_FACTOR_NONE)
942 
943 	/* It is possible that firmware is busy (although unlikely), so retry */
944 	do {
945 		/* Give time to FW to process */
946 		msleep(SST_PP_LEVEL_CHANGE_TIME_MS);
947 
948 		_read_pp_info("current_level", level, SST_PP_STATUS_OFFSET,
949 			      SST_PP_LEVEL_START, SST_PP_LEVEL_WIDTH, SST_MUL_FACTOR_NONE)
950 
951 		/* Check if the new level is active */
952 		if (perf_level.level == level)
953 			break;
954 
955 	} while (retry++ < SST_PP_LEVEL_CHANGE_RETRY_COUNT);
956 
957 	/* If the level change didn't happen, return fault */
958 	if (perf_level.level != level)
959 		return -EFAULT;
960 
961 	/* Reset the feature state on level change */
962 	_write_pp_info("perf_feature", 0, SST_PP_CONTROL_OFFSET,
963 		       SST_PP_FEATURE_STATE_START, SST_PP_FEATURE_STATE_WIDTH,
964 		       SST_MUL_FACTOR_NONE)
965 
966 	/* Give time to FW to process */
967 	msleep(SST_PP_LEVEL_CHANGE_TIME_MS);
968 
969 	return 0;
970 }
971 
isst_if_set_perf_feature(void __user * argp)972 static int isst_if_set_perf_feature(void __user *argp)
973 {
974 	struct isst_perf_feature_control perf_feature;
975 	struct tpmi_per_power_domain_info *power_domain_info;
976 
977 	if (disable_dynamic_sst_features())
978 		return -EFAULT;
979 
980 	if (copy_from_user(&perf_feature, argp, sizeof(perf_feature)))
981 		return -EFAULT;
982 
983 	power_domain_info = get_instance(perf_feature.socket_id, perf_feature.power_domain_id);
984 	if (!power_domain_info)
985 		return -EINVAL;
986 
987 	if (power_domain_info->write_blocked)
988 		return -EPERM;
989 
990 	_write_pp_info("perf_feature", perf_feature.feature, SST_PP_CONTROL_OFFSET,
991 		       SST_PP_FEATURE_STATE_START, SST_PP_FEATURE_STATE_WIDTH,
992 		       SST_MUL_FACTOR_NONE)
993 
994 	return 0;
995 }
996 
997 #define _read_pp_level_info(name_str, name, level, offset, start, width, mult_factor)\
998 {\
999 	u64 val, _mask;\
1000 	\
1001 	val = readq(power_domain_info->sst_base +\
1002 		    power_domain_info->perf_levels[level].mmio_offset +\
1003 		(power_domain_info->feature_offsets.pp_offset * 8) + (offset));\
1004 	_mask = GENMASK_ULL((start + width - 1), start);\
1005 	val &= _mask; \
1006 	val >>= start;\
1007 	name = (val * mult_factor);\
1008 }
1009 
1010 #define SST_PP_INFO_0_OFFSET	0
1011 #define SST_PP_INFO_1_OFFSET	8
1012 #define SST_PP_INFO_2_OFFSET	16
1013 #define SST_PP_INFO_3_OFFSET	24
1014 
1015 /* SST_PP_INFO_4_OFFSET to SST_PP_INFO_9_OFFSET are trl levels */
1016 #define SST_PP_INFO_4_OFFSET	32
1017 
1018 #define SST_PP_INFO_10_OFFSET	80
1019 #define SST_PP_INFO_11_OFFSET	88
1020 #define SST_PP_INFO_12_OFFSET	96
1021 
1022 #define SST_PP_P1_SSE_START	0
1023 #define SST_PP_P1_SSE_WIDTH	8
1024 
1025 #define SST_PP_P1_AVX2_START	8
1026 #define SST_PP_P1_AVX2_WIDTH	8
1027 
1028 #define SST_PP_P1_AVX512_START	16
1029 #define SST_PP_P1_AVX512_WIDTH	8
1030 
1031 #define SST_PP_P1_AMX_START	24
1032 #define SST_PP_P1_AMX_WIDTH	8
1033 
1034 #define SST_PP_TDP_START	32
1035 #define SST_PP_TDP_WIDTH	15
1036 
1037 #define SST_PP_T_PROCHOT_START	47
1038 #define SST_PP_T_PROCHOT_WIDTH	8
1039 
1040 #define SST_PP_MAX_MEMORY_FREQ_START	55
1041 #define SST_PP_MAX_MEMORY_FREQ_WIDTH	7
1042 
1043 #define SST_PP_COOLING_TYPE_START	62
1044 #define SST_PP_COOLING_TYPE_WIDTH	2
1045 
1046 #define SST_PP_TRL_0_RATIO_0_START	0
1047 #define SST_PP_TRL_0_RATIO_0_WIDTH	8
1048 
1049 #define SST_PP_TRL_CORES_BUCKET_0_START	0
1050 #define SST_PP_TRL_CORES_BUCKET_0_WIDTH	8
1051 
1052 #define SST_PP_CORE_RATIO_P0_START	0
1053 #define SST_PP_CORE_RATIO_P0_WIDTH	8
1054 
1055 #define SST_PP_CORE_RATIO_P1_START	8
1056 #define SST_PP_CORE_RATIO_P1_WIDTH	8
1057 
1058 #define SST_PP_CORE_RATIO_PN_START	16
1059 #define SST_PP_CORE_RATIO_PN_WIDTH	8
1060 
1061 #define SST_PP_CORE_RATIO_PM_START	24
1062 #define SST_PP_CORE_RATIO_PM_WIDTH	8
1063 
1064 #define SST_PP_CORE_RATIO_P0_FABRIC_START	32
1065 #define SST_PP_CORE_RATIO_P0_FABRIC_WIDTH	8
1066 
1067 #define SST_PP_CORE_RATIO_P1_FABRIC_START	40
1068 #define SST_PP_CORE_RATIO_P1_FABRIC_WIDTH	8
1069 
1070 #define SST_PP_CORE_RATIO_PM_FABRIC_START	48
1071 #define SST_PP_CORE_RATIO_PM_FABRIC_WIDTH	8
1072 
1073 #define SST_PP_CORE_RATIO_P0_FABRIC_1_START	0
1074 #define SST_PP_CORE_RATIO_P0_FABRIC_1_WIDTH	8
1075 
1076 #define SST_PP_CORE_RATIO_P1_FABRIC_1_START	8
1077 #define SST_PP_CORE_RATIO_P1_FABRIC_1_WIDTH	8
1078 
1079 #define SST_PP_CORE_RATIO_PM_FABRIC_1_START	16
1080 #define SST_PP_CORE_RATIO_PM_FABRIC_1_WIDTH	8
1081 
isst_if_get_perf_level_info(void __user * argp)1082 static int isst_if_get_perf_level_info(void __user *argp)
1083 {
1084 	struct isst_perf_level_data_info perf_level;
1085 	struct tpmi_per_power_domain_info *power_domain_info;
1086 	int i, j;
1087 
1088 	if (copy_from_user(&perf_level, argp, sizeof(perf_level)))
1089 		return -EFAULT;
1090 
1091 	power_domain_info = get_instance(perf_level.socket_id, perf_level.power_domain_id);
1092 	if (!power_domain_info)
1093 		return -EINVAL;
1094 
1095 	if (perf_level.level > power_domain_info->max_level)
1096 		return -EINVAL;
1097 
1098 	if (!(power_domain_info->pp_header.level_en_mask & BIT(perf_level.level)))
1099 		return -EINVAL;
1100 
1101 	_read_pp_level_info("tdp_ratio", perf_level.tdp_ratio, perf_level.level,
1102 			    SST_PP_INFO_0_OFFSET, SST_PP_P1_SSE_START, SST_PP_P1_SSE_WIDTH,
1103 			    SST_MUL_FACTOR_NONE)
1104 	_read_pp_level_info("base_freq_mhz", perf_level.base_freq_mhz, perf_level.level,
1105 			    SST_PP_INFO_0_OFFSET, SST_PP_P1_SSE_START, SST_PP_P1_SSE_WIDTH,
1106 			    SST_MUL_FACTOR_FREQ)
1107 	_read_pp_level_info("base_freq_avx2_mhz", perf_level.base_freq_avx2_mhz, perf_level.level,
1108 			    SST_PP_INFO_0_OFFSET, SST_PP_P1_AVX2_START, SST_PP_P1_AVX2_WIDTH,
1109 			    SST_MUL_FACTOR_FREQ)
1110 	_read_pp_level_info("base_freq_avx512_mhz", perf_level.base_freq_avx512_mhz,
1111 			    perf_level.level, SST_PP_INFO_0_OFFSET, SST_PP_P1_AVX512_START,
1112 			    SST_PP_P1_AVX512_WIDTH, SST_MUL_FACTOR_FREQ)
1113 	_read_pp_level_info("base_freq_amx_mhz", perf_level.base_freq_amx_mhz, perf_level.level,
1114 			    SST_PP_INFO_0_OFFSET, SST_PP_P1_AMX_START, SST_PP_P1_AMX_WIDTH,
1115 			    SST_MUL_FACTOR_FREQ)
1116 
1117 	_read_pp_level_info("thermal_design_power_w", perf_level.thermal_design_power_w,
1118 			    perf_level.level, SST_PP_INFO_1_OFFSET, SST_PP_TDP_START,
1119 			    SST_PP_TDP_WIDTH, SST_MUL_FACTOR_NONE)
1120 	perf_level.thermal_design_power_w /= 8; /* units are in 1/8th watt */
1121 	_read_pp_level_info("tjunction_max_c", perf_level.tjunction_max_c, perf_level.level,
1122 			    SST_PP_INFO_1_OFFSET, SST_PP_T_PROCHOT_START, SST_PP_T_PROCHOT_WIDTH,
1123 			    SST_MUL_FACTOR_NONE)
1124 	_read_pp_level_info("max_memory_freq_mhz", perf_level.max_memory_freq_mhz,
1125 			    perf_level.level, SST_PP_INFO_1_OFFSET, SST_PP_MAX_MEMORY_FREQ_START,
1126 			    SST_PP_MAX_MEMORY_FREQ_WIDTH, SST_MUL_FACTOR_FREQ)
1127 	_read_pp_level_info("cooling_type", perf_level.cooling_type, perf_level.level,
1128 			    SST_PP_INFO_1_OFFSET, SST_PP_COOLING_TYPE_START,
1129 			    SST_PP_COOLING_TYPE_WIDTH, SST_MUL_FACTOR_NONE)
1130 
1131 	for (i = 0; i < TRL_MAX_LEVELS; ++i) {
1132 		for (j = 0; j < TRL_MAX_BUCKETS; ++j)
1133 			_read_pp_level_info("trl*_bucket*_freq_mhz",
1134 					    perf_level.trl_freq_mhz[i][j], perf_level.level,
1135 					    SST_PP_INFO_4_OFFSET + (i * SST_PP_TRL_0_RATIO_0_WIDTH),
1136 					    j * SST_PP_TRL_0_RATIO_0_WIDTH,
1137 					    SST_PP_TRL_0_RATIO_0_WIDTH,
1138 					    SST_MUL_FACTOR_FREQ);
1139 	}
1140 
1141 	for (i = 0; i < TRL_MAX_BUCKETS; ++i)
1142 		_read_pp_level_info("bucket*_core_count", perf_level.bucket_core_counts[i],
1143 				    perf_level.level, SST_PP_INFO_10_OFFSET,
1144 				    SST_PP_TRL_CORES_BUCKET_0_WIDTH * i,
1145 				    SST_PP_TRL_CORES_BUCKET_0_WIDTH, SST_MUL_FACTOR_NONE)
1146 
1147 	perf_level.max_buckets = TRL_MAX_BUCKETS;
1148 	perf_level.max_trl_levels = TRL_MAX_LEVELS;
1149 
1150 	_read_pp_level_info("p0_freq_mhz", perf_level.p0_freq_mhz, perf_level.level,
1151 			    SST_PP_INFO_11_OFFSET, SST_PP_CORE_RATIO_P0_START,
1152 			    SST_PP_CORE_RATIO_P0_WIDTH, SST_MUL_FACTOR_FREQ)
1153 	_read_pp_level_info("p1_freq_mhz", perf_level.p1_freq_mhz, perf_level.level,
1154 			    SST_PP_INFO_11_OFFSET, SST_PP_CORE_RATIO_P1_START,
1155 			    SST_PP_CORE_RATIO_P1_WIDTH, SST_MUL_FACTOR_FREQ)
1156 	_read_pp_level_info("pn_freq_mhz", perf_level.pn_freq_mhz, perf_level.level,
1157 			    SST_PP_INFO_11_OFFSET, SST_PP_CORE_RATIO_PN_START,
1158 			    SST_PP_CORE_RATIO_PN_WIDTH, SST_MUL_FACTOR_FREQ)
1159 	_read_pp_level_info("pm_freq_mhz", perf_level.pm_freq_mhz, perf_level.level,
1160 			    SST_PP_INFO_11_OFFSET, SST_PP_CORE_RATIO_PM_START,
1161 			    SST_PP_CORE_RATIO_PM_WIDTH, SST_MUL_FACTOR_FREQ)
1162 	_read_pp_level_info("p0_fabric_freq_mhz", perf_level.p0_fabric_freq_mhz,
1163 			    perf_level.level, SST_PP_INFO_11_OFFSET,
1164 			    SST_PP_CORE_RATIO_P0_FABRIC_START,
1165 			    SST_PP_CORE_RATIO_P0_FABRIC_WIDTH, SST_MUL_FACTOR_FREQ)
1166 	_read_pp_level_info("p1_fabric_freq_mhz", perf_level.p1_fabric_freq_mhz,
1167 			    perf_level.level, SST_PP_INFO_11_OFFSET,
1168 			    SST_PP_CORE_RATIO_P1_FABRIC_START,
1169 			    SST_PP_CORE_RATIO_P1_FABRIC_WIDTH, SST_MUL_FACTOR_FREQ)
1170 	_read_pp_level_info("pm_fabric_freq_mhz", perf_level.pm_fabric_freq_mhz,
1171 			    perf_level.level, SST_PP_INFO_11_OFFSET,
1172 			    SST_PP_CORE_RATIO_PM_FABRIC_START,
1173 			    SST_PP_CORE_RATIO_PM_FABRIC_WIDTH, SST_MUL_FACTOR_FREQ)
1174 
1175 	if (copy_to_user(argp, &perf_level, sizeof(perf_level)))
1176 		return -EFAULT;
1177 
1178 	return 0;
1179 }
1180 
isst_if_get_perf_level_fabric_info(void __user * argp)1181 static int isst_if_get_perf_level_fabric_info(void __user *argp)
1182 {
1183 	struct isst_perf_level_fabric_info perf_level_fabric;
1184 	struct tpmi_per_power_domain_info *power_domain_info;
1185 	int start = SST_PP_CORE_RATIO_P0_FABRIC_START;
1186 	int width = SST_PP_CORE_RATIO_P0_FABRIC_WIDTH;
1187 	int offset = SST_PP_INFO_11_OFFSET;
1188 	int i;
1189 
1190 	if (copy_from_user(&perf_level_fabric, argp, sizeof(perf_level_fabric)))
1191 		return -EFAULT;
1192 
1193 	power_domain_info = get_instance(perf_level_fabric.socket_id,
1194 					 perf_level_fabric.power_domain_id);
1195 	if (!power_domain_info)
1196 		return -EINVAL;
1197 
1198 	if (perf_level_fabric.level > power_domain_info->max_level)
1199 		return -EINVAL;
1200 
1201 	if (power_domain_info->pp_header.feature_rev < 2)
1202 		return -EINVAL;
1203 
1204 	if (!(power_domain_info->pp_header.level_en_mask & BIT(perf_level_fabric.level)))
1205 		return -EINVAL;
1206 
1207 	/* For revision 2, maximum number of fabrics is 2 */
1208 	perf_level_fabric.max_fabrics = 2;
1209 
1210 	for (i = 0; i < perf_level_fabric.max_fabrics; i++) {
1211 		_read_pp_level_info("p0_fabric_freq_mhz", perf_level_fabric.p0_fabric_freq_mhz[i],
1212 				    perf_level_fabric.level, offset, start, width,
1213 				    SST_MUL_FACTOR_FREQ)
1214 		start += width;
1215 
1216 		_read_pp_level_info("p1_fabric_freq_mhz", perf_level_fabric.p1_fabric_freq_mhz[i],
1217 				    perf_level_fabric.level, offset, start, width,
1218 				    SST_MUL_FACTOR_FREQ)
1219 		start += width;
1220 
1221 		_read_pp_level_info("pm_fabric_freq_mhz", perf_level_fabric.pm_fabric_freq_mhz[i],
1222 				    perf_level_fabric.level, offset, start, width,
1223 				    SST_MUL_FACTOR_FREQ)
1224 		offset = SST_PP_INFO_12_OFFSET;
1225 		start = SST_PP_CORE_RATIO_P0_FABRIC_1_START;
1226 	}
1227 
1228 	if (copy_to_user(argp, &perf_level_fabric, sizeof(perf_level_fabric)))
1229 		return -EFAULT;
1230 
1231 	return 0;
1232 }
1233 
1234 #define SST_PP_FUSED_CORE_COUNT_START	0
1235 #define SST_PP_FUSED_CORE_COUNT_WIDTH	8
1236 
1237 #define SST_PP_RSLVD_CORE_COUNT_START	8
1238 #define SST_PP_RSLVD_CORE_COUNT_WIDTH	8
1239 
1240 #define SST_PP_RSLVD_CORE_MASK_START	0
1241 #define SST_PP_RSLVD_CORE_MASK_WIDTH	64
1242 
isst_if_get_perf_level_mask(void __user * argp)1243 static int isst_if_get_perf_level_mask(void __user *argp)
1244 {
1245 	static struct isst_perf_level_cpu_mask cpumask;
1246 	struct tpmi_per_power_domain_info *power_domain_info;
1247 	u64 mask;
1248 
1249 	if (copy_from_user(&cpumask, argp, sizeof(cpumask)))
1250 		return -EFAULT;
1251 
1252 	power_domain_info = get_instance(cpumask.socket_id, cpumask.power_domain_id);
1253 	if (!power_domain_info)
1254 		return -EINVAL;
1255 
1256 	_read_pp_level_info("mask", mask, cpumask.level, SST_PP_INFO_2_OFFSET,
1257 			    SST_PP_RSLVD_CORE_MASK_START, SST_PP_RSLVD_CORE_MASK_WIDTH,
1258 			    SST_MUL_FACTOR_NONE)
1259 
1260 	cpumask.mask = mask;
1261 
1262 	if (!cpumask.punit_cpu_map)
1263 		return -EOPNOTSUPP;
1264 
1265 	if (copy_to_user(argp, &cpumask, sizeof(cpumask)))
1266 		return -EFAULT;
1267 
1268 	return 0;
1269 }
1270 
1271 #define SST_BF_INFO_0_OFFSET	0
1272 #define SST_BF_INFO_1_OFFSET	8
1273 
1274 #define SST_BF_P1_HIGH_START	13
1275 #define SST_BF_P1_HIGH_WIDTH	8
1276 
1277 #define SST_BF_P1_LOW_START	21
1278 #define SST_BF_P1_LOW_WIDTH	8
1279 
1280 #define SST_BF_T_PROHOT_START	38
1281 #define SST_BF_T_PROHOT_WIDTH	8
1282 
1283 #define SST_BF_TDP_START	46
1284 #define SST_BF_TDP_WIDTH	15
1285 
isst_if_get_base_freq_info(void __user * argp)1286 static int isst_if_get_base_freq_info(void __user *argp)
1287 {
1288 	static struct isst_base_freq_info base_freq;
1289 	struct tpmi_per_power_domain_info *power_domain_info;
1290 
1291 	if (copy_from_user(&base_freq, argp, sizeof(base_freq)))
1292 		return -EFAULT;
1293 
1294 	power_domain_info = get_instance(base_freq.socket_id, base_freq.power_domain_id);
1295 	if (!power_domain_info)
1296 		return -EINVAL;
1297 
1298 	if (base_freq.level > power_domain_info->max_level)
1299 		return -EINVAL;
1300 
1301 	_read_bf_level_info("p1_high", base_freq.high_base_freq_mhz, base_freq.level,
1302 			    SST_BF_INFO_0_OFFSET, SST_BF_P1_HIGH_START, SST_BF_P1_HIGH_WIDTH,
1303 			    SST_MUL_FACTOR_FREQ)
1304 	_read_bf_level_info("p1_low", base_freq.low_base_freq_mhz, base_freq.level,
1305 			    SST_BF_INFO_0_OFFSET, SST_BF_P1_LOW_START, SST_BF_P1_LOW_WIDTH,
1306 			    SST_MUL_FACTOR_FREQ)
1307 	_read_bf_level_info("BF-TJ", base_freq.tjunction_max_c, base_freq.level,
1308 			    SST_BF_INFO_0_OFFSET, SST_BF_T_PROHOT_START, SST_BF_T_PROHOT_WIDTH,
1309 			    SST_MUL_FACTOR_NONE)
1310 	_read_bf_level_info("BF-tdp", base_freq.thermal_design_power_w, base_freq.level,
1311 			    SST_BF_INFO_0_OFFSET, SST_BF_TDP_START, SST_BF_TDP_WIDTH,
1312 			    SST_MUL_FACTOR_NONE)
1313 	base_freq.thermal_design_power_w /= 8; /*unit = 1/8th watt*/
1314 
1315 	if (copy_to_user(argp, &base_freq, sizeof(base_freq)))
1316 		return -EFAULT;
1317 
1318 	return 0;
1319 }
1320 
1321 #define P1_HI_CORE_MASK_START	0
1322 #define P1_HI_CORE_MASK_WIDTH	64
1323 
isst_if_get_base_freq_mask(void __user * argp)1324 static int isst_if_get_base_freq_mask(void __user *argp)
1325 {
1326 	static struct isst_perf_level_cpu_mask cpumask;
1327 	struct tpmi_per_power_domain_info *power_domain_info;
1328 	u64 mask;
1329 
1330 	if (copy_from_user(&cpumask, argp, sizeof(cpumask)))
1331 		return -EFAULT;
1332 
1333 	power_domain_info = get_instance(cpumask.socket_id, cpumask.power_domain_id);
1334 	if (!power_domain_info)
1335 		return -EINVAL;
1336 
1337 	_read_bf_level_info("BF-cpumask", mask, cpumask.level, SST_BF_INFO_1_OFFSET,
1338 			    P1_HI_CORE_MASK_START, P1_HI_CORE_MASK_WIDTH,
1339 			    SST_MUL_FACTOR_NONE)
1340 
1341 	cpumask.mask = mask;
1342 
1343 	if (!cpumask.punit_cpu_map)
1344 		return -EOPNOTSUPP;
1345 
1346 	if (copy_to_user(argp, &cpumask, sizeof(cpumask)))
1347 		return -EFAULT;
1348 
1349 	return 0;
1350 }
1351 
isst_if_get_tpmi_instance_count(void __user * argp)1352 static int isst_if_get_tpmi_instance_count(void __user *argp)
1353 {
1354 	struct isst_tpmi_instance_count tpmi_inst;
1355 	struct tpmi_sst_struct *sst_inst;
1356 	int i;
1357 
1358 	if (copy_from_user(&tpmi_inst, argp, sizeof(tpmi_inst)))
1359 		return -EFAULT;
1360 
1361 	if (tpmi_inst.socket_id >= topology_max_packages())
1362 		return -EINVAL;
1363 
1364 	sst_inst = isst_common.sst_inst[tpmi_inst.socket_id];
1365 
1366 	tpmi_inst.count = isst_instance_count(sst_inst);
1367 
1368 	tpmi_inst.valid_mask = 0;
1369 	for (i = 0; i < tpmi_inst.count; i++) {
1370 		struct tpmi_per_power_domain_info *pd_info;
1371 		u8 part;
1372 		int pd;
1373 
1374 		pd = map_partition_power_domain_id(sst_inst, i, &part);
1375 		if (pd < 0)
1376 			continue;
1377 
1378 		pd_info = &sst_inst->power_domain_info[part][pd];
1379 		if (pd_info->sst_base)
1380 			tpmi_inst.valid_mask |= BIT(i);
1381 	}
1382 
1383 	if (!tpmi_inst.valid_mask)
1384 		tpmi_inst.count = 0;
1385 
1386 	if (copy_to_user(argp, &tpmi_inst, sizeof(tpmi_inst)))
1387 		return -EFAULT;
1388 
1389 	return 0;
1390 }
1391 
1392 #define SST_TF_INFO_0_OFFSET	0
1393 #define SST_TF_INFO_1_OFFSET	8
1394 #define SST_TF_INFO_2_OFFSET	16
1395 #define SST_TF_INFO_8_OFFSET	64
1396 #define SST_TF_INFO_8_BUCKETS	3
1397 
1398 #define SST_TF_MAX_LP_CLIP_RATIOS	TRL_MAX_LEVELS
1399 
1400 #define SST_TF_FEATURE_REV_START	4
1401 #define SST_TF_FEATURE_REV_WIDTH	8
1402 
1403 #define SST_TF_LP_CLIP_RATIO_0_START	16
1404 #define SST_TF_LP_CLIP_RATIO_0_WIDTH	8
1405 
1406 #define SST_TF_RATIO_0_START	0
1407 #define SST_TF_RATIO_0_WIDTH	8
1408 
1409 #define SST_TF_NUM_CORE_0_START 0
1410 #define SST_TF_NUM_CORE_0_WIDTH 8
1411 
1412 #define SST_TF_NUM_MOD_0_START	0
1413 #define SST_TF_NUM_MOD_0_WIDTH	16
1414 
isst_if_get_turbo_freq_info(void __user * argp)1415 static int isst_if_get_turbo_freq_info(void __user *argp)
1416 {
1417 	static struct isst_turbo_freq_info turbo_freq;
1418 	struct tpmi_per_power_domain_info *power_domain_info;
1419 	u8 feature_rev;
1420 	int i, j;
1421 
1422 	if (copy_from_user(&turbo_freq, argp, sizeof(turbo_freq)))
1423 		return -EFAULT;
1424 
1425 	power_domain_info = get_instance(turbo_freq.socket_id, turbo_freq.power_domain_id);
1426 	if (!power_domain_info)
1427 		return -EINVAL;
1428 
1429 	if (turbo_freq.level > power_domain_info->max_level)
1430 		return -EINVAL;
1431 
1432 	turbo_freq.max_buckets = TRL_MAX_BUCKETS;
1433 	turbo_freq.max_trl_levels = TRL_MAX_LEVELS;
1434 	turbo_freq.max_clip_freqs = SST_TF_MAX_LP_CLIP_RATIOS;
1435 
1436 	_read_tf_level_info("feature_rev", feature_rev, turbo_freq.level,
1437 			    SST_TF_INFO_0_OFFSET, SST_TF_FEATURE_REV_START,
1438 			    SST_TF_FEATURE_REV_WIDTH, SST_MUL_FACTOR_NONE);
1439 
1440 	for (i = 0; i < turbo_freq.max_clip_freqs; ++i)
1441 		_read_tf_level_info("lp_clip*", turbo_freq.lp_clip_freq_mhz[i],
1442 				    turbo_freq.level, SST_TF_INFO_0_OFFSET,
1443 				    SST_TF_LP_CLIP_RATIO_0_START +
1444 				    (i * SST_TF_LP_CLIP_RATIO_0_WIDTH),
1445 				    SST_TF_LP_CLIP_RATIO_0_WIDTH, SST_MUL_FACTOR_FREQ)
1446 
1447 	for (i = 0; i < TRL_MAX_LEVELS; ++i) {
1448 		for (j = 0; j < TRL_MAX_BUCKETS; ++j)
1449 			_read_tf_level_info("cydn*_bucket_*_trl",
1450 					    turbo_freq.trl_freq_mhz[i][j], turbo_freq.level,
1451 					    SST_TF_INFO_2_OFFSET + (i * SST_TF_RATIO_0_WIDTH),
1452 					    j * SST_TF_RATIO_0_WIDTH, SST_TF_RATIO_0_WIDTH,
1453 					    SST_MUL_FACTOR_FREQ)
1454 	}
1455 
1456 	if (feature_rev >= 2) {
1457 		bool has_tf_info_8 = false;
1458 
1459 		for (i = 0; i < SST_TF_INFO_8_BUCKETS; ++i) {
1460 			_read_tf_level_info("bucket_*_mod_count", turbo_freq.bucket_core_counts[i],
1461 					    turbo_freq.level, SST_TF_INFO_8_OFFSET,
1462 					    SST_TF_NUM_MOD_0_WIDTH * i, SST_TF_NUM_MOD_0_WIDTH,
1463 					    SST_MUL_FACTOR_NONE)
1464 
1465 			if (turbo_freq.bucket_core_counts[i])
1466 				has_tf_info_8 = true;
1467 		}
1468 
1469 		if (has_tf_info_8)
1470 			goto done_core_count;
1471 	}
1472 
1473 	for (i = 0; i < TRL_MAX_BUCKETS; ++i)
1474 		_read_tf_level_info("bucket_*_core_count", turbo_freq.bucket_core_counts[i],
1475 				    turbo_freq.level, SST_TF_INFO_1_OFFSET,
1476 				    SST_TF_NUM_CORE_0_WIDTH * i, SST_TF_NUM_CORE_0_WIDTH,
1477 				    SST_MUL_FACTOR_NONE)
1478 
1479 
1480 done_core_count:
1481 
1482 	if (copy_to_user(argp, &turbo_freq, sizeof(turbo_freq)))
1483 		return -EFAULT;
1484 
1485 	return 0;
1486 }
1487 
isst_if_def_ioctl(struct file * file,unsigned int cmd,unsigned long arg)1488 static long isst_if_def_ioctl(struct file *file, unsigned int cmd,
1489 			      unsigned long arg)
1490 {
1491 	void __user *argp = (void __user *)arg;
1492 	long ret = -ENOTTY;
1493 
1494 	mutex_lock(&isst_tpmi_dev_lock);
1495 	switch (cmd) {
1496 	case ISST_IF_COUNT_TPMI_INSTANCES:
1497 		ret = isst_if_get_tpmi_instance_count(argp);
1498 		break;
1499 	case ISST_IF_CORE_POWER_STATE:
1500 		ret = isst_if_core_power_state(argp);
1501 		break;
1502 	case ISST_IF_CLOS_PARAM:
1503 		ret = isst_if_clos_param(argp);
1504 		break;
1505 	case ISST_IF_CLOS_ASSOC:
1506 		ret = isst_if_clos_assoc(argp);
1507 		break;
1508 	case ISST_IF_PERF_LEVELS:
1509 		ret = isst_if_get_perf_level(argp);
1510 		break;
1511 	case ISST_IF_PERF_SET_LEVEL:
1512 		ret = isst_if_set_perf_level(argp);
1513 		break;
1514 	case ISST_IF_PERF_SET_FEATURE:
1515 		ret = isst_if_set_perf_feature(argp);
1516 		break;
1517 	case ISST_IF_GET_PERF_LEVEL_INFO:
1518 		ret = isst_if_get_perf_level_info(argp);
1519 		break;
1520 	case ISST_IF_GET_PERF_LEVEL_FABRIC_INFO:
1521 		ret = isst_if_get_perf_level_fabric_info(argp);
1522 		break;
1523 	case ISST_IF_GET_PERF_LEVEL_CPU_MASK:
1524 		ret = isst_if_get_perf_level_mask(argp);
1525 		break;
1526 	case ISST_IF_GET_BASE_FREQ_INFO:
1527 		ret = isst_if_get_base_freq_info(argp);
1528 		break;
1529 	case ISST_IF_GET_BASE_FREQ_CPU_MASK:
1530 		ret = isst_if_get_base_freq_mask(argp);
1531 		break;
1532 	case ISST_IF_GET_TURBO_FREQ_INFO:
1533 		ret = isst_if_get_turbo_freq_info(argp);
1534 		break;
1535 	default:
1536 		break;
1537 	}
1538 	mutex_unlock(&isst_tpmi_dev_lock);
1539 
1540 	return ret;
1541 }
1542 
1543 #define TPMI_SST_AUTO_SUSPEND_DELAY_MS	2000
1544 
tpmi_sst_dev_add(struct auxiliary_device * auxdev)1545 int tpmi_sst_dev_add(struct auxiliary_device *auxdev)
1546 {
1547 	struct tpmi_per_power_domain_info *pd_info;
1548 	bool read_blocked = 0, write_blocked = 0;
1549 	struct intel_tpmi_plat_info *plat_info;
1550 	struct device *dev = &auxdev->dev;
1551 	struct tpmi_sst_struct *tpmi_sst;
1552 	u8 i, num_resources, io_die_cnt;
1553 	int ret, pkg = 0, inst = 0;
1554 	bool first_enum = false;
1555 	u16 cdie_mask;
1556 	u8 partition;
1557 
1558 	ret = tpmi_get_feature_status(auxdev, TPMI_ID_SST, &read_blocked, &write_blocked);
1559 	if (ret)
1560 		dev_info(dev, "Can't read feature status: ignoring read/write blocked status\n");
1561 
1562 	if (read_blocked) {
1563 		dev_info(dev, "Firmware has blocked reads, exiting\n");
1564 		return -ENODEV;
1565 	}
1566 
1567 	plat_info = tpmi_get_platform_data(auxdev);
1568 	if (!plat_info) {
1569 		dev_err(dev, "No platform info\n");
1570 		return -EINVAL;
1571 	}
1572 
1573 	pkg = plat_info->package_id;
1574 	if (pkg >= topology_max_packages()) {
1575 		dev_err(dev, "Invalid package id :%x\n", pkg);
1576 		return -EINVAL;
1577 	}
1578 
1579 	partition = plat_info->partition;
1580 	if (partition >= SST_MAX_PARTITIONS) {
1581 		dev_err(&auxdev->dev, "Invalid partition :%x\n", partition);
1582 		return -EINVAL;
1583 	}
1584 
1585 	num_resources = tpmi_get_resource_count(auxdev);
1586 
1587 	if (!num_resources)
1588 		return -EINVAL;
1589 
1590 	mutex_lock(&isst_tpmi_dev_lock);
1591 
1592 	if (isst_common.sst_inst[pkg]) {
1593 		tpmi_sst = isst_common.sst_inst[pkg];
1594 	} else {
1595 		/*
1596 		 * tpmi_sst instance is for a package. So needs to be
1597 		 * allocated only once for both partitions. We can't use
1598 		 * devm_* allocation here as each partition is a
1599 		 * different device, which can be unbound.
1600 		 */
1601 		tpmi_sst = kzalloc(sizeof(*tpmi_sst), GFP_KERNEL);
1602 		if (!tpmi_sst) {
1603 			ret = -ENOMEM;
1604 			goto unlock_exit;
1605 		}
1606 		first_enum = true;
1607 	}
1608 
1609 	ret = 0;
1610 
1611 	pd_info = devm_kcalloc(dev, num_resources, sizeof(*pd_info), GFP_KERNEL);
1612 	if (!pd_info) {
1613 		ret = -ENOMEM;
1614 		goto unlock_free;
1615 	}
1616 
1617 	/* Get the IO die count, if cdie_mask is present */
1618 	if (plat_info->cdie_mask) {
1619 		u8 cdie_range;
1620 
1621 		cdie_mask = plat_info->cdie_mask;
1622 		cdie_range = fls(cdie_mask) - ffs(cdie_mask) + 1;
1623 		io_die_cnt = num_resources - cdie_range;
1624 	} else {
1625 		/*
1626 		 * This is a synthetic mask, careful when assuming that
1627 		 * they are compute dies only.
1628 		 */
1629 		cdie_mask = (1 << num_resources) - 1;
1630 		io_die_cnt = 0;
1631 	}
1632 
1633 	for (i = 0; i < num_resources; ++i) {
1634 		struct resource *res;
1635 
1636 		res = tpmi_get_resource_at_index(auxdev, i);
1637 		if (!res) {
1638 			pd_info[i].sst_base = NULL;
1639 			continue;
1640 		}
1641 
1642 		pd_info[i].package_id = pkg;
1643 		pd_info[i].power_domain_id = i;
1644 		pd_info[i].auxdev = auxdev;
1645 		pd_info[i].write_blocked = write_blocked;
1646 		pd_info[i].sst_base = devm_ioremap_resource(dev, res);
1647 		if (IS_ERR(pd_info[i].sst_base)) {
1648 			ret = PTR_ERR(pd_info[i].sst_base);
1649 			goto unlock_free;
1650 		}
1651 
1652 		if (sst_main(auxdev, &pd_info[i])) {
1653 			/*
1654 			 * This entry is not valid, hardware can partially
1655 			 * populate dies. In this case MMIO will have 0xFFs.
1656 			 * Also possible some pre-production hardware has
1657 			 * invalid data. But don't fail and continue to use
1658 			 * other dies with valid data.
1659 			 */
1660 			devm_iounmap(dev, pd_info[i].sst_base);
1661 			pd_info[i].sst_base = NULL;
1662 			continue;
1663 		}
1664 
1665 		++inst;
1666 	}
1667 
1668 	if (!inst) {
1669 		ret = -ENODEV;
1670 		goto unlock_free;
1671 	}
1672 
1673 	tpmi_sst->package_id = pkg;
1674 
1675 	tpmi_sst->power_domain_info[partition] = pd_info;
1676 	tpmi_sst->number_of_power_domains[partition] = num_resources;
1677 	tpmi_sst->cdie_mask[partition] = cdie_mask;
1678 	tpmi_sst->io_dies[partition] = io_die_cnt;
1679 	tpmi_sst->partition_mask |= BIT(partition);
1680 	tpmi_sst->partition_mask_current |= BIT(partition);
1681 
1682 	auxiliary_set_drvdata(auxdev, tpmi_sst);
1683 
1684 	if (isst_common.max_index < pkg)
1685 		isst_common.max_index = pkg;
1686 	isst_common.sst_inst[pkg] = tpmi_sst;
1687 
1688 unlock_free:
1689 	if (ret && first_enum)
1690 		kfree(tpmi_sst);
1691 unlock_exit:
1692 	mutex_unlock(&isst_tpmi_dev_lock);
1693 
1694 	return ret;
1695 }
1696 EXPORT_SYMBOL_NS_GPL(tpmi_sst_dev_add, "INTEL_TPMI_SST");
1697 
tpmi_sst_dev_remove(struct auxiliary_device * auxdev)1698 void tpmi_sst_dev_remove(struct auxiliary_device *auxdev)
1699 {
1700 	struct tpmi_sst_struct *tpmi_sst = auxiliary_get_drvdata(auxdev);
1701 	struct intel_tpmi_plat_info *plat_info;
1702 
1703 	plat_info = tpmi_get_platform_data(auxdev);
1704 	if (!plat_info)
1705 		return;
1706 
1707 	mutex_lock(&isst_tpmi_dev_lock);
1708 	tpmi_sst->power_domain_info[plat_info->partition] = NULL;
1709 	tpmi_sst->partition_mask_current &= ~BIT(plat_info->partition);
1710 	/* Free the package instance when the all partitions are removed */
1711 	if (!tpmi_sst->partition_mask_current) {
1712 		isst_common.sst_inst[tpmi_sst->package_id] = NULL;
1713 		kfree(tpmi_sst);
1714 	}
1715 	mutex_unlock(&isst_tpmi_dev_lock);
1716 }
1717 EXPORT_SYMBOL_NS_GPL(tpmi_sst_dev_remove, "INTEL_TPMI_SST");
1718 
tpmi_sst_dev_suspend(struct auxiliary_device * auxdev)1719 void tpmi_sst_dev_suspend(struct auxiliary_device *auxdev)
1720 {
1721 	struct tpmi_sst_struct *tpmi_sst = auxiliary_get_drvdata(auxdev);
1722 	struct tpmi_per_power_domain_info *power_domain_info;
1723 	struct intel_tpmi_plat_info *plat_info;
1724 	void __iomem *cp_base;
1725 
1726 	plat_info = tpmi_get_platform_data(auxdev);
1727 	if (!plat_info)
1728 		return;
1729 
1730 	power_domain_info = tpmi_sst->power_domain_info[plat_info->partition];
1731 
1732 	cp_base = power_domain_info->sst_base + power_domain_info->sst_header.cp_offset;
1733 	power_domain_info->saved_sst_cp_control = readq(cp_base + SST_CP_CONTROL_OFFSET);
1734 
1735 	memcpy_fromio(power_domain_info->saved_clos_configs, cp_base + SST_CLOS_CONFIG_0_OFFSET,
1736 		      sizeof(power_domain_info->saved_clos_configs));
1737 
1738 	memcpy_fromio(power_domain_info->saved_clos_assocs, cp_base + SST_CLOS_ASSOC_0_OFFSET,
1739 		      sizeof(power_domain_info->saved_clos_assocs));
1740 
1741 	power_domain_info->saved_pp_control = readq(power_domain_info->sst_base +
1742 						    power_domain_info->sst_header.pp_offset +
1743 						    SST_PP_CONTROL_OFFSET);
1744 }
1745 EXPORT_SYMBOL_NS_GPL(tpmi_sst_dev_suspend, "INTEL_TPMI_SST");
1746 
tpmi_sst_dev_resume(struct auxiliary_device * auxdev)1747 void tpmi_sst_dev_resume(struct auxiliary_device *auxdev)
1748 {
1749 	struct tpmi_sst_struct *tpmi_sst = auxiliary_get_drvdata(auxdev);
1750 	struct tpmi_per_power_domain_info *power_domain_info;
1751 	struct intel_tpmi_plat_info *plat_info;
1752 	void __iomem *cp_base;
1753 
1754 	plat_info = tpmi_get_platform_data(auxdev);
1755 	if (!plat_info)
1756 		return;
1757 
1758 	power_domain_info = tpmi_sst->power_domain_info[plat_info->partition];
1759 
1760 	cp_base = power_domain_info->sst_base + power_domain_info->sst_header.cp_offset;
1761 	writeq(power_domain_info->saved_sst_cp_control, cp_base + SST_CP_CONTROL_OFFSET);
1762 
1763 	memcpy_toio(cp_base + SST_CLOS_CONFIG_0_OFFSET, power_domain_info->saved_clos_configs,
1764 		    sizeof(power_domain_info->saved_clos_configs));
1765 
1766 	memcpy_toio(cp_base + SST_CLOS_ASSOC_0_OFFSET, power_domain_info->saved_clos_assocs,
1767 		    sizeof(power_domain_info->saved_clos_assocs));
1768 
1769 	writeq(power_domain_info->saved_pp_control, power_domain_info->sst_base +
1770 				power_domain_info->sst_header.pp_offset + SST_PP_CONTROL_OFFSET);
1771 }
1772 EXPORT_SYMBOL_NS_GPL(tpmi_sst_dev_resume, "INTEL_TPMI_SST");
1773 
1774 #define ISST_TPMI_API_VERSION	0x03
1775 
tpmi_sst_init(void)1776 int tpmi_sst_init(void)
1777 {
1778 	struct isst_if_cmd_cb cb;
1779 	int ret = 0;
1780 
1781 	mutex_lock(&isst_tpmi_dev_lock);
1782 
1783 	if (isst_core_usage_count) {
1784 		++isst_core_usage_count;
1785 		goto init_done;
1786 	}
1787 
1788 	isst_common.sst_inst = kcalloc(topology_max_packages(),
1789 				       sizeof(*isst_common.sst_inst),
1790 				       GFP_KERNEL);
1791 	if (!isst_common.sst_inst) {
1792 		ret = -ENOMEM;
1793 		goto init_done;
1794 	}
1795 
1796 	memset(&cb, 0, sizeof(cb));
1797 	cb.cmd_size = sizeof(struct isst_if_io_reg);
1798 	cb.offset = offsetof(struct isst_if_io_regs, io_reg);
1799 	cb.cmd_callback = NULL;
1800 	cb.api_version = ISST_TPMI_API_VERSION;
1801 	cb.def_ioctl = isst_if_def_ioctl;
1802 	cb.owner = THIS_MODULE;
1803 	ret = isst_if_cdev_register(ISST_IF_DEV_TPMI, &cb);
1804 	if (ret)
1805 		kfree(isst_common.sst_inst);
1806 	else
1807 		++isst_core_usage_count;
1808 init_done:
1809 	mutex_unlock(&isst_tpmi_dev_lock);
1810 	return ret;
1811 }
1812 EXPORT_SYMBOL_NS_GPL(tpmi_sst_init, "INTEL_TPMI_SST");
1813 
tpmi_sst_exit(void)1814 void tpmi_sst_exit(void)
1815 {
1816 	mutex_lock(&isst_tpmi_dev_lock);
1817 	if (isst_core_usage_count)
1818 		--isst_core_usage_count;
1819 
1820 	if (!isst_core_usage_count) {
1821 		isst_if_cdev_unregister(ISST_IF_DEV_TPMI);
1822 		kfree(isst_common.sst_inst);
1823 	}
1824 	mutex_unlock(&isst_tpmi_dev_lock);
1825 }
1826 EXPORT_SYMBOL_NS_GPL(tpmi_sst_exit, "INTEL_TPMI_SST");
1827 
1828 MODULE_IMPORT_NS("INTEL_TPMI");
1829 MODULE_IMPORT_NS("INTEL_TPMI_POWER_DOMAIN");
1830 
1831 MODULE_DESCRIPTION("ISST TPMI interface module");
1832 MODULE_LICENSE("GPL");
1833