xref: /linux/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c (revision 189f164e573e18d9f8876dbd3ad8fcbe11f93037)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * isst_tpmi.c: SST TPMI interface core
4  *
5  * Copyright (c) 2023, Intel Corporation.
6  * All Rights Reserved.
7  *
8  * This information will be useful to understand flows:
9  * In the current generation of platforms, TPMI is supported via OOB
10  * PCI device. This PCI device has one instance per CPU package.
11  * There is a unique TPMI ID for SST. Each TPMI ID also has multiple
12  * entries, representing per power domain information.
13  *
14  * There is one dev file for complete SST information and control same as the
15  * prior generation of hardware. User spaces don't need to know how the
16  * information is presented by the hardware. The TPMI core module implements
17  * the hardware mapping.
18  */
19 
20 #define dev_fmt(fmt) "tpmi_sst: " fmt
21 
22 #include <linux/auxiliary_bus.h>
23 #include <linux/delay.h>
24 #include <linux/intel_tpmi.h>
25 #include <linux/intel_vsec.h>
26 #include <linux/fs.h>
27 #include <linux/io.h>
28 #include <linux/kernel.h>
29 #include <linux/minmax.h>
30 #include <linux/module.h>
31 #include <asm/msr.h>
32 #include <uapi/linux/isst_if.h>
33 
34 #include "isst_tpmi_core.h"
35 #include "isst_if_common.h"
36 
37 /* Supported SST hardware version by this driver */
38 #define ISST_MAJOR_VERSION	0
39 #define ISST_MINOR_VERSION	2
40 
41 /*
42  * Used to indicate if value read from MMIO needs to get multiplied
43  * to get to a standard unit or not.
44  */
45 #define SST_MUL_FACTOR_NONE    1
46 
47 /* Define 100 as a scaling factor frequency ratio to frequency conversion */
48 #define SST_MUL_FACTOR_FREQ    100
49 
50 /* All SST regs are 64 bit size */
51 #define SST_REG_SIZE   8
52 
53 /**
54  * struct sst_header -	SST main header
55  * @interface_version:	Version number for this interface
56  * @cap_mask:		Bitmask of the supported sub features. 1=the sub feature is enabled.
57  *			0=disabled.
58  *			Bit[8]= SST_CP enable (1), disable (0)
59  *			bit[9]= SST_PP enable (1), disable (0)
60  *			other bits are reserved for future use
61  * @cp_offset:		Qword (8 bytes) offset to the SST_CP register bank
62  * @pp_offset:		Qword (8 bytes) offset to the SST_PP register bank
63  * @reserved:		Reserved for future use
64  *
65  * This register allows SW to discover SST capability and the offsets to SST-CP
66  * and SST-PP register banks.
67  */
68 struct sst_header {
69 	u8 interface_version;
70 	u8 cap_mask;
71 	u8 cp_offset;
72 	u8 pp_offset;
73 	u32 reserved;
74 } __packed;
75 
76 /**
77  * struct cp_header -	SST-CP (core-power) header
78  * @feature_id:		0=SST-CP, 1=SST-PP, 2=SST-BF, 3=SST-TF
79  * @feature_rev:	Interface Version number for this SST feature
80  * @ratio_unit:		Frequency ratio unit. 00: 100MHz. All others are reserved
81  * @reserved:		Reserved for future use
82  *
83  * This structure is used store SST-CP header. This is packed to the same
84  * format as defined in the specifications.
85  */
86 struct cp_header {
87 	u64 feature_id :4;
88 	u64 feature_rev :8;
89 	u64 ratio_unit :2;
90 	u64 reserved :50;
91 } __packed;
92 
93 /**
94  * struct pp_header -	SST-PP (Perf profile) header
95  * @feature_id:		0=SST-CP, 1=SST-PP, 2=SST-BF, 3=SST-TF
96  * @feature_rev:	Interface Version number for this SST feature
97  * @level_en_mask:	SST-PP level enable/disable fuse mask
98  * @allowed_level_mask:	Allowed level mask used for dynamic config level switching
99  * @reserved0:		Reserved for future use
100  * @ratio_unit:		Frequency ratio unit. 00: 100MHz. All others are reserved
101  * @block_size:		Size of PP block in Qword unit (8 bytes)
102  * @dynamic_switch:	If set (1), dynamic switching of SST PP is supported
103  * @memory_ratio_unit:	Memory Controller frequency ratio unit. 00: 100MHz, others reserved
104  * @reserved1:		Reserved for future use
105  *
106  * This structure is used store SST-PP header. This is packed to the same
107  * format as defined in the specifications.
108  */
109 struct pp_header {
110 	u64 feature_id :4;
111 	u64 feature_rev :8;
112 	u64 level_en_mask :8;
113 	u64 allowed_level_mask :8;
114 	u64 reserved0 :4;
115 	u64 ratio_unit :2;
116 	u64 block_size :8;
117 	u64 dynamic_switch :1;
118 	u64 memory_ratio_unit :2;
119 	u64 reserved1 :19;
120 } __packed;
121 
122 /**
123  * struct feature_offset -	Offsets to SST-PP features
124  * @pp_offset:		Qword offset within PP level for the SST_PP register bank
125  * @bf_offset:		Qword offset within PP level for the SST_BF register bank
126  * @tf_offset:		Qword offset within PP level for the SST_TF register bank
127  * @reserved:		Reserved for future use
128  *
129  * This structure is used store offsets for SST features in the register bank.
130  * This is packed to the same format as defined in the specifications.
131  */
132 struct feature_offset {
133 	u64 pp_offset :8;
134 	u64 bf_offset :8;
135 	u64 tf_offset :8;
136 	u64 reserved :40;
137 } __packed;
138 
139 /**
140  * struct levels_offset -	Offsets to each SST PP level
141  * @sst_pp_level0_offset:	Qword offset to the register block of PP level 0
142  * @sst_pp_level1_offset:	Qword offset to the register block of PP level 1
143  * @sst_pp_level2_offset:	Qword offset to the register block of PP level 2
144  * @sst_pp_level3_offset:	Qword offset to the register block of PP level 3
145  * @sst_pp_level4_offset:	Qword offset to the register block of PP level 4
146  * @reserved:			Reserved for future use
147  *
148  * This structure is used store offsets of SST PP levels in the register bank.
149  * This is packed to the same format as defined in the specifications.
150  */
151 struct levels_offset {
152 	u64 sst_pp_level0_offset :8;
153 	u64 sst_pp_level1_offset :8;
154 	u64 sst_pp_level2_offset :8;
155 	u64 sst_pp_level3_offset :8;
156 	u64 sst_pp_level4_offset :8;
157 	u64 reserved :24;
158 } __packed;
159 
160 /**
161  * struct pp_control_offset -	Offsets for SST PP controls
162  * @perf_level:		A SST-PP level that SW intends to switch to
163  * @perf_level_lock:	SST-PP level select lock. 0 - unlocked. 1 - locked till next reset
164  * @resvd0:		Reserved for future use
165  * @current_state:	Bit mask to control the enable(1)/disable(0) state of each feature
166  *			of the current PP level, bit 0 = BF, bit 1 = TF, bit 2-7 = reserved
167  * @reserved:		Reserved for future use
168  *
169  * This structure is used store offsets of SST PP controls in the register bank.
170  * This is packed to the same format as defined in the specifications.
171  */
172 struct pp_control_offset {
173 	u64 perf_level :3;
174 	u64 perf_level_lock :1;
175 	u64 resvd0 :4;
176 	u64 current_state :8;
177 	u64 reserved :48;
178 } __packed;
179 
180 /**
181  * struct pp_status_offset -	Offsets for SST PP status fields
182  * @sst_pp_level:	Returns the current SST-PP level
183  * @sst_pp_lock:	Returns the lock bit setting of perf_level_lock in pp_control_offset
184  * @error_type:		Returns last error of SST-PP level change request. 0: no error,
185  *			1: level change not allowed, others: reserved
186  * @feature_state:	Bit mask to indicate the enable(1)/disable(0) state of each feature of the
187  *			current PP level. bit 0 = BF, bit 1 = TF, bit 2-7 reserved
188  * @reserved0:		Reserved for future use
189  * @feature_error_type: Returns last error of the specific feature. Three error_type bits per
190  *			feature. i.e. ERROR_TYPE[2:0] for BF, ERROR_TYPE[5:3] for TF, etc.
191  *			0x0: no error, 0x1: The specific feature is not supported by the hardware.
192  *			0x2-0x6: Reserved. 0x7: feature state change is not allowed.
193  * @reserved1:		Reserved for future use
194  *
195  * This structure is used store offsets of SST PP status in the register bank.
196  * This is packed to the same format as defined in the specifications.
197  */
198 struct pp_status_offset {
199 	u64 sst_pp_level :3;
200 	u64 sst_pp_lock :1;
201 	u64 error_type :4;
202 	u64 feature_state :8;
203 	u64 reserved0 :16;
204 	u64 feature_error_type : 24;
205 	u64 reserved1 :8;
206 } __packed;
207 
208 /**
209  * struct perf_level -	Used to store perf level and mmio offset
210  * @mmio_offset:	mmio offset for a perf level
211  * @level:		perf level for this offset
212  *
213  * This structure is used store final mmio offset of each perf level from the
214  * SST base mmio offset.
215  */
216 struct perf_level {
217 	int mmio_offset;
218 	int level;
219 };
220 
221 /**
222  * struct tpmi_per_power_domain_info -	Store per power_domain SST info
223  * @package_id:		Package id for this power_domain
224  * @power_domain_id:	Power domain id, Each entry from the SST-TPMI instance is a power_domain.
225  * @max_level:		Max possible PP level possible for this power_domain
226  * @ratio_unit:		Ratio unit for converting to MHz
227  * @avx_levels:		Number of AVX levels
228  * @pp_block_size:	Block size from PP header
229  * @sst_header:		Store SST header for this power_domain
230  * @cp_header:		Store SST-CP header for this power_domain
231  * @pp_header:		Store SST-PP header for this power_domain
232  * @perf_levels:	Pointer to each perf level to map level to mmio offset
233  * @feature_offsets:	Store feature offsets for each PP-level
234  * @control_offset:	Store the control offset for each PP-level
235  * @status_offset:	Store the status offset for each PP-level
236  * @sst_base:		Mapped SST base IO memory
237  * @auxdev:		Auxiliary device instance enumerated this instance
238  * @saved_sst_cp_control: Save SST-CP control configuration to store restore for suspend/resume
239  * @saved_clos_configs:	Save SST-CP CLOS configuration to store restore for suspend/resume
240  * @saved_clos_assocs:	Save SST-CP CLOS association to store restore for suspend/resume
241  * @saved_pp_control:	Save SST-PP control information to store restore for suspend/resume
242  * @write_blocked:	Write operation is blocked, so can't change SST state
243  *
244  * This structure is used store complete SST information for a power_domain. This information
245  * is used to read/write request for any SST IOCTL. Each physical CPU package can have multiple
246  * power_domains. Each power domain describes its own SST information and has its own controls.
247  */
248 struct tpmi_per_power_domain_info {
249 	int package_id;
250 	int power_domain_id;
251 	int max_level;
252 	int ratio_unit;
253 	int avx_levels;
254 	int pp_block_size;
255 	struct sst_header sst_header;
256 	struct cp_header cp_header;
257 	struct pp_header pp_header;
258 	struct perf_level *perf_levels;
259 	struct feature_offset feature_offsets;
260 	struct pp_control_offset control_offset;
261 	struct pp_status_offset status_offset;
262 	void __iomem *sst_base;
263 	struct auxiliary_device *auxdev;
264 	u64 saved_sst_cp_control;
265 	u64 saved_clos_configs[4];
266 	u64 saved_clos_assocs[4];
267 	u64 saved_pp_control;
268 	bool write_blocked;
269 };
270 
271 /* Supported maximum partitions */
272 #define SST_MAX_PARTITIONS	2
273 
274 /**
275  * struct tpmi_sst_struct -	Store sst info for a package
276  * @package_id:			Package id for this aux device instance
277  * @number_of_power_domains:	Number of power_domains pointed by power_domain_info pointer
278  * @power_domain_info:		Pointer to power domains information
279  * @cdie_mask:			Mask of compute dies present in a partition from hardware.
280  *				This mask is not present in the version 1 information header.
281  * @io_dies:			Number of IO dies in a partition. This will be 0 for TPMI
282  *				version 1 information header.
283  * @partition_mask:		Mask of all partitions.
284  * @partition_mask_current:	Current partition mask as some may have been unbound.
285  *
286  * This structure is used store full SST information for a package.
287  * Each package has one or multiple OOB PCI devices. Each package can contain multiple
288  * power domains.
289  */
290 struct tpmi_sst_struct {
291 	int package_id;
292 	struct tpmi_per_power_domain_info *power_domain_info[SST_MAX_PARTITIONS];
293 	u16 cdie_mask[SST_MAX_PARTITIONS];
294 	u8 number_of_power_domains[SST_MAX_PARTITIONS];
295 	u8 io_dies[SST_MAX_PARTITIONS];
296 	u8 partition_mask;
297 	u8 partition_mask_current;
298 };
299 
300 /**
301  * struct tpmi_sst_common_struct -	Store all SST instances
302  * @max_index:		Maximum instances currently present
303  * @sst_inst:		Pointer to per package instance
304  *
305  * Stores every SST Package instance.
306  */
307 struct tpmi_sst_common_struct {
308 	int max_index;
309 	struct tpmi_sst_struct **sst_inst;
310 };
311 
312 /*
313  * Each IOCTL request is processed under this lock. Also used to protect
314  * registration functions and common data structures.
315  */
316 static DEFINE_MUTEX(isst_tpmi_dev_lock);
317 
318 /* Usage count to track, number of TPMI SST instances registered to this core. */
319 static int isst_core_usage_count;
320 
321 /* Stores complete SST information for every package and power_domain */
322 static struct tpmi_sst_common_struct isst_common;
323 
324 #define SST_MAX_AVX_LEVELS	3
325 
326 #define SST_PP_OFFSET_0		8
327 #define SST_PP_OFFSET_1		16
328 #define SST_PP_OFFSET_SIZE	8
329 
sst_add_perf_profiles(struct auxiliary_device * auxdev,struct tpmi_per_power_domain_info * pd_info,int levels)330 static int sst_add_perf_profiles(struct auxiliary_device *auxdev,
331 				 struct tpmi_per_power_domain_info *pd_info,
332 				 int levels)
333 {
334 	struct device *dev = &auxdev->dev;
335 	u64 perf_level_offsets;
336 	int i;
337 
338 	pd_info->perf_levels = devm_kcalloc(dev, levels, sizeof(struct perf_level), GFP_KERNEL);
339 	if (!pd_info->perf_levels)
340 		return 0;
341 
342 	pd_info->ratio_unit = pd_info->pp_header.ratio_unit;
343 	pd_info->avx_levels = SST_MAX_AVX_LEVELS;
344 	pd_info->pp_block_size = pd_info->pp_header.block_size;
345 
346 	/* Read PP Offset 0: Get feature offset with PP level */
347 	*((u64 *)&pd_info->feature_offsets) = readq(pd_info->sst_base +
348 						    pd_info->sst_header.pp_offset +
349 						    SST_PP_OFFSET_0);
350 
351 	perf_level_offsets = readq(pd_info->sst_base + pd_info->sst_header.pp_offset +
352 				   SST_PP_OFFSET_1);
353 
354 	for (i = 0; i < levels; ++i) {
355 		u64 offset;
356 
357 		offset = perf_level_offsets & (0xffULL << (i * SST_PP_OFFSET_SIZE));
358 		offset >>= (i * 8);
359 		offset &= 0xff;
360 		offset *= 8; /* Convert to byte from QWORD offset */
361 		pd_info->perf_levels[i].mmio_offset = pd_info->sst_header.pp_offset + offset;
362 	}
363 
364 	return 0;
365 }
366 
sst_main(struct auxiliary_device * auxdev,struct tpmi_per_power_domain_info * pd_info)367 static int sst_main(struct auxiliary_device *auxdev, struct tpmi_per_power_domain_info *pd_info)
368 {
369 	struct device *dev = &auxdev->dev;
370 	int i, mask, levels;
371 
372 	*((u64 *)&pd_info->sst_header) = readq(pd_info->sst_base);
373 	pd_info->sst_header.cp_offset *= 8;
374 	pd_info->sst_header.pp_offset *= 8;
375 
376 	if (pd_info->sst_header.interface_version == TPMI_VERSION_INVALID)
377 		return -ENODEV;
378 
379 	if (TPMI_MAJOR_VERSION(pd_info->sst_header.interface_version) != ISST_MAJOR_VERSION) {
380 		dev_err(dev, "SST: Unsupported major version:%lx\n",
381 			TPMI_MAJOR_VERSION(pd_info->sst_header.interface_version));
382 		return -ENODEV;
383 	}
384 
385 	if (TPMI_MINOR_VERSION(pd_info->sst_header.interface_version) > ISST_MINOR_VERSION)
386 		dev_info(dev, "SST: Ignore: Unsupported minor version:%lx\n",
387 			 TPMI_MINOR_VERSION(pd_info->sst_header.interface_version));
388 
389 	/* Read SST CP Header */
390 	*((u64 *)&pd_info->cp_header) = readq(pd_info->sst_base + pd_info->sst_header.cp_offset);
391 
392 	/* Read PP header */
393 	*((u64 *)&pd_info->pp_header) = readq(pd_info->sst_base + pd_info->sst_header.pp_offset);
394 
395 	mask = 0x01;
396 	levels = 0;
397 	for (i = 0; i < 8; ++i) {
398 		if (pd_info->pp_header.level_en_mask & mask)
399 			levels = i;
400 		mask <<= 1;
401 	}
402 	pd_info->max_level = levels;
403 	sst_add_perf_profiles(auxdev, pd_info, levels + 1);
404 
405 	return 0;
406 }
407 
isst_instance_count(struct tpmi_sst_struct * sst_inst)408 static u8 isst_instance_count(struct tpmi_sst_struct *sst_inst)
409 {
410 	u8 i, max_part, count = 0;
411 
412 	/* Partition mask starts from bit 0 and contains 1s only */
413 	max_part = hweight8(sst_inst->partition_mask);
414 	for (i = 0; i < max_part; i++)
415 		count += sst_inst->number_of_power_domains[i];
416 
417 	return count;
418 }
419 
420 /**
421  * map_cdies() - Map user domain ID to compute domain ID
422  * @sst_inst: TPMI Instance
423  * @id: User domain ID
424  * @partition: Resolved partition
425  *
426  * Helper function to map_partition_power_domain_id() to resolve compute
427  * domain ID and partition. Use hardware provided cdie_mask for a partition
428  * as is to resolve a compute domain ID.
429  *
430  * Return: %-EINVAL on error, otherwise mapped domain ID >= 0.
431  */
map_cdies(struct tpmi_sst_struct * sst_inst,u8 id,u8 * partition)432 static int map_cdies(struct tpmi_sst_struct *sst_inst, u8 id, u8 *partition)
433 {
434 	u8 i, max_part;
435 
436 	max_part = hweight8(sst_inst->partition_mask);
437 	for (i = 0; i < max_part; i++) {
438 		if (!(sst_inst->cdie_mask[i] & BIT(id)))
439 			continue;
440 
441 		*partition = i;
442 		return id - ffs(sst_inst->cdie_mask[i]) + 1;
443 	}
444 
445 	return -EINVAL;
446 }
447 
448 /**
449  * map_partition_power_domain_id() - Map user domain ID to partition domain ID
450  * @sst_inst: TPMI Instance
451  * @id: User domain ID
452  * @partition: Resolved partition
453  *
454  * In a partitioned system a CPU package has two separate MMIO ranges (Under
455  * two PCI devices). But the CPU package compute die/power domain IDs are
456  * unique in a package. User space can get compute die/power domain ID from
457  * CPUID and MSR 0x54 for a CPU. So, those IDs need to be preserved even if
458  * they are present in two different partitions with its own order.
459  *
460  * For example for command ISST_IF_COUNT_TPMI_INSTANCES, the valid_mask
461  * is 111111b for a 4 compute and 2 IO dies system. This is presented as
462  * provided by the hardware in a non-partitioned system with the following
463  * order:
464  *	I1-I0-C3-C2-C1-C0
465  * Here: "C": for compute and "I" for IO die.
466  * Compute dies are always present first in TPMI instances, as they have
467  * to map to the real power domain/die ID of a system. In a non-partitioned
468  * system there is no way to identify compute and IO die boundaries from
469  * this driver without reading each CPU's mapping.
470  *
471  * The same order needs to be preserved, even if those compute dies are
472  * distributed among multiple partitions. For example:
473  * Partition 1 can contain: I1-C1-C0
474  * Partition 2 can contain: I2-C3-C2
475  *
476  * This will require a conversion of user space IDs to the actual index into
477  * array of stored power domains for each partition. For the above example
478  * this function will return partition and index as follows:
479  *
480  * =============	=========	=====	========
481  * User space ID	Partition	Index	Die type
482  * =============	=========	=====	========
483  * 0			0		0	Compute
484  * 1			0		1	Compute
485  * 2			1		0	Compute
486  * 3			1		1	Compute
487  * 4			0		2	IO
488  * 5			1		2	IO
489  * =============	=========	=====	========
490  *
491  * Return: %-EINVAL on error, otherwise mapped domain ID >= 0.
492  */
map_partition_power_domain_id(struct tpmi_sst_struct * sst_inst,u8 id,u8 * partition)493 static int map_partition_power_domain_id(struct tpmi_sst_struct *sst_inst, u8 id, u8 *partition)
494 {
495 	u8 i, io_start_id, max_part;
496 
497 	*partition = 0;
498 
499 	/* If any PCI device for partition is unbound, treat this as failure */
500 	if (sst_inst->partition_mask != sst_inst->partition_mask_current)
501 		return -EINVAL;
502 
503 	max_part = hweight8(sst_inst->partition_mask);
504 
505 	/* IO Index begin here */
506 	io_start_id = fls(sst_inst->cdie_mask[max_part - 1]);
507 
508 	if (id < io_start_id)
509 		return map_cdies(sst_inst, id, partition);
510 
511 	for (i = 0; i < max_part; i++) {
512 		u8 io_id;
513 
514 		io_id = id - io_start_id;
515 		if (io_id < sst_inst->io_dies[i]) {
516 			u8 cdie_range;
517 
518 			cdie_range = fls(sst_inst->cdie_mask[i]) - ffs(sst_inst->cdie_mask[i]) + 1;
519 			*partition = i;
520 			return cdie_range + io_id;
521 		}
522 		io_start_id += sst_inst->io_dies[i];
523 	}
524 
525 	return -EINVAL;
526 }
527 
528 /*
529  * Map a package and power_domain id to SST information structure unique for a power_domain.
530  * The caller should call under isst_tpmi_dev_lock.
531  */
get_instance(int pkg_id,int power_domain_id)532 static struct tpmi_per_power_domain_info *get_instance(int pkg_id, int power_domain_id)
533 {
534 	struct tpmi_per_power_domain_info *power_domain_info;
535 	struct tpmi_sst_struct *sst_inst;
536 	u8 part;
537 
538 	if (!in_range(pkg_id, 0, topology_max_packages()) || pkg_id > isst_common.max_index)
539 		return NULL;
540 
541 	sst_inst = isst_common.sst_inst[pkg_id];
542 	if (!sst_inst)
543 		return NULL;
544 
545 	power_domain_id = map_partition_power_domain_id(sst_inst, power_domain_id, &part);
546 	if (power_domain_id < 0)
547 		return NULL;
548 
549 	power_domain_info = &sst_inst->power_domain_info[part][power_domain_id];
550 
551 	if (power_domain_info && !power_domain_info->sst_base)
552 		return NULL;
553 
554 	return power_domain_info;
555 }
556 
disable_dynamic_sst_features(void)557 static bool disable_dynamic_sst_features(void)
558 {
559 	u64 value;
560 
561 	rdmsrq(MSR_PM_ENABLE, value);
562 	return !(value & 0x1);
563 }
564 
565 #define _read_cp_info(name_str, name, offset, start, width, mult_factor)\
566 {\
567 	u64 val, mask;\
568 	\
569 	val = readq(power_domain_info->sst_base + power_domain_info->sst_header.cp_offset +\
570 			(offset));\
571 	mask = GENMASK_ULL((start + width - 1), start);\
572 	val &= mask; \
573 	val >>= start;\
574 	name = (val * mult_factor);\
575 }
576 
577 #define _write_cp_info(name_str, name, offset, start, width, div_factor)\
578 {\
579 	u64 val, mask;\
580 	\
581 	val = readq(power_domain_info->sst_base +\
582 		    power_domain_info->sst_header.cp_offset + (offset));\
583 	mask = GENMASK_ULL((start + width - 1), start);\
584 	val &= ~mask;\
585 	val |= (name / div_factor) << start;\
586 	writeq(val, power_domain_info->sst_base + power_domain_info->sst_header.cp_offset +\
587 		(offset));\
588 }
589 
590 #define	SST_CP_CONTROL_OFFSET	8
591 #define	SST_CP_STATUS_OFFSET	16
592 
593 #define SST_CP_ENABLE_START		0
594 #define SST_CP_ENABLE_WIDTH		1
595 
596 #define SST_CP_PRIORITY_TYPE_START	1
597 #define SST_CP_PRIORITY_TYPE_WIDTH	1
598 
isst_if_core_power_state(void __user * argp)599 static long isst_if_core_power_state(void __user *argp)
600 {
601 	struct tpmi_per_power_domain_info *power_domain_info;
602 	struct isst_core_power core_power;
603 
604 	if (copy_from_user(&core_power, argp, sizeof(core_power)))
605 		return -EFAULT;
606 
607 	if (core_power.get_set && disable_dynamic_sst_features())
608 		return -EFAULT;
609 
610 	power_domain_info = get_instance(core_power.socket_id, core_power.power_domain_id);
611 	if (!power_domain_info)
612 		return -EINVAL;
613 
614 	if (core_power.get_set) {
615 		if (power_domain_info->write_blocked || !capable(CAP_SYS_ADMIN))
616 			return -EPERM;
617 
618 		_write_cp_info("cp_enable", core_power.enable, SST_CP_CONTROL_OFFSET,
619 			       SST_CP_ENABLE_START, SST_CP_ENABLE_WIDTH, SST_MUL_FACTOR_NONE)
620 		_write_cp_info("cp_prio_type", core_power.priority_type, SST_CP_CONTROL_OFFSET,
621 			       SST_CP_PRIORITY_TYPE_START, SST_CP_PRIORITY_TYPE_WIDTH,
622 			       SST_MUL_FACTOR_NONE)
623 	} else {
624 		/* get */
625 		_read_cp_info("cp_enable", core_power.enable, SST_CP_STATUS_OFFSET,
626 			      SST_CP_ENABLE_START, SST_CP_ENABLE_WIDTH, SST_MUL_FACTOR_NONE)
627 		_read_cp_info("cp_prio_type", core_power.priority_type, SST_CP_STATUS_OFFSET,
628 			      SST_CP_PRIORITY_TYPE_START, SST_CP_PRIORITY_TYPE_WIDTH,
629 			      SST_MUL_FACTOR_NONE)
630 		core_power.supported = !!(power_domain_info->sst_header.cap_mask & BIT(0));
631 		if (copy_to_user(argp, &core_power, sizeof(core_power)))
632 			return -EFAULT;
633 	}
634 
635 	return 0;
636 }
637 
638 #define SST_CLOS_CONFIG_0_OFFSET	24
639 
640 #define SST_CLOS_CONFIG_PRIO_START	4
641 #define SST_CLOS_CONFIG_PRIO_WIDTH	4
642 
643 #define SST_CLOS_CONFIG_MIN_START	8
644 #define SST_CLOS_CONFIG_MIN_WIDTH	8
645 
646 #define SST_CLOS_CONFIG_MAX_START	16
647 #define SST_CLOS_CONFIG_MAX_WIDTH	8
648 
isst_if_clos_param(void __user * argp)649 static long isst_if_clos_param(void __user *argp)
650 {
651 	struct tpmi_per_power_domain_info *power_domain_info;
652 	struct isst_clos_param clos_param;
653 
654 	if (copy_from_user(&clos_param, argp, sizeof(clos_param)))
655 		return -EFAULT;
656 
657 	power_domain_info = get_instance(clos_param.socket_id, clos_param.power_domain_id);
658 	if (!power_domain_info)
659 		return -EINVAL;
660 
661 	if (clos_param.get_set) {
662 		if (power_domain_info->write_blocked || !capable(CAP_SYS_ADMIN))
663 			return -EPERM;
664 
665 		_write_cp_info("clos.min_freq", clos_param.min_freq_mhz,
666 			       (SST_CLOS_CONFIG_0_OFFSET + clos_param.clos * SST_REG_SIZE),
667 			       SST_CLOS_CONFIG_MIN_START, SST_CLOS_CONFIG_MIN_WIDTH,
668 			       SST_MUL_FACTOR_FREQ);
669 		_write_cp_info("clos.max_freq", clos_param.max_freq_mhz,
670 			       (SST_CLOS_CONFIG_0_OFFSET + clos_param.clos * SST_REG_SIZE),
671 			       SST_CLOS_CONFIG_MAX_START, SST_CLOS_CONFIG_MAX_WIDTH,
672 			       SST_MUL_FACTOR_FREQ);
673 		_write_cp_info("clos.prio", clos_param.prop_prio,
674 			       (SST_CLOS_CONFIG_0_OFFSET + clos_param.clos * SST_REG_SIZE),
675 			       SST_CLOS_CONFIG_PRIO_START, SST_CLOS_CONFIG_PRIO_WIDTH,
676 			       SST_MUL_FACTOR_NONE);
677 	} else {
678 		/* get */
679 		_read_cp_info("clos.min_freq", clos_param.min_freq_mhz,
680 				(SST_CLOS_CONFIG_0_OFFSET + clos_param.clos * SST_REG_SIZE),
681 				SST_CLOS_CONFIG_MIN_START, SST_CLOS_CONFIG_MIN_WIDTH,
682 				SST_MUL_FACTOR_FREQ)
683 		_read_cp_info("clos.max_freq", clos_param.max_freq_mhz,
684 				(SST_CLOS_CONFIG_0_OFFSET + clos_param.clos * SST_REG_SIZE),
685 				SST_CLOS_CONFIG_MAX_START, SST_CLOS_CONFIG_MAX_WIDTH,
686 				SST_MUL_FACTOR_FREQ)
687 		_read_cp_info("clos.prio", clos_param.prop_prio,
688 				(SST_CLOS_CONFIG_0_OFFSET + clos_param.clos * SST_REG_SIZE),
689 				SST_CLOS_CONFIG_PRIO_START, SST_CLOS_CONFIG_PRIO_WIDTH,
690 				SST_MUL_FACTOR_NONE)
691 
692 		if (copy_to_user(argp, &clos_param, sizeof(clos_param)))
693 			return -EFAULT;
694 	}
695 
696 	return 0;
697 }
698 
699 #define SST_CLOS_ASSOC_0_OFFSET		56
700 #define SST_CLOS_ASSOC_CPUS_PER_REG	16
701 #define SST_CLOS_ASSOC_BITS_PER_CPU	4
702 
isst_if_clos_assoc(void __user * argp)703 static long isst_if_clos_assoc(void __user *argp)
704 {
705 	struct isst_if_clos_assoc_cmds assoc_cmds;
706 	unsigned char __user *ptr;
707 	int i;
708 
709 	/* Each multi command has u16 command count as the first field */
710 	if (copy_from_user(&assoc_cmds, argp, sizeof(assoc_cmds)))
711 		return -EFAULT;
712 
713 	if (!assoc_cmds.cmd_count || assoc_cmds.cmd_count > ISST_IF_CMD_LIMIT)
714 		return -EINVAL;
715 
716 	ptr = argp + offsetof(struct isst_if_clos_assoc_cmds, assoc_info);
717 	for (i = 0; i < assoc_cmds.cmd_count; ++i) {
718 		struct tpmi_per_power_domain_info *power_domain_info;
719 		struct isst_if_clos_assoc clos_assoc;
720 		int punit_id, punit_cpu_no, pkg_id;
721 		struct tpmi_sst_struct *sst_inst;
722 		int offset, shift, cpu;
723 		u64 val, mask, clos;
724 		u8 part;
725 
726 		if (copy_from_user(&clos_assoc, ptr, sizeof(clos_assoc)))
727 			return -EFAULT;
728 
729 		if (clos_assoc.socket_id > topology_max_packages())
730 			return -EINVAL;
731 
732 		cpu = clos_assoc.logical_cpu;
733 		clos = clos_assoc.clos;
734 
735 		if (assoc_cmds.punit_cpu_map)
736 			punit_cpu_no = cpu;
737 		else
738 			return -EOPNOTSUPP;
739 
740 		if (punit_cpu_no < 0)
741 			return -EINVAL;
742 
743 		punit_id = clos_assoc.power_domain_id;
744 		pkg_id = clos_assoc.socket_id;
745 
746 		sst_inst = isst_common.sst_inst[pkg_id];
747 
748 		punit_id = map_partition_power_domain_id(sst_inst, punit_id, &part);
749 		if (punit_id < 0)
750 			return -EINVAL;
751 
752 		power_domain_info = &sst_inst->power_domain_info[part][punit_id];
753 
754 		if (assoc_cmds.get_set && (power_domain_info->write_blocked ||
755 					   !capable(CAP_SYS_ADMIN)))
756 			return -EPERM;
757 
758 		offset = SST_CLOS_ASSOC_0_OFFSET +
759 				(punit_cpu_no / SST_CLOS_ASSOC_CPUS_PER_REG) * SST_REG_SIZE;
760 		shift = punit_cpu_no % SST_CLOS_ASSOC_CPUS_PER_REG;
761 		shift *= SST_CLOS_ASSOC_BITS_PER_CPU;
762 
763 		val = readq(power_domain_info->sst_base +
764 				power_domain_info->sst_header.cp_offset + offset);
765 		if (assoc_cmds.get_set) {
766 			mask = GENMASK_ULL((shift + SST_CLOS_ASSOC_BITS_PER_CPU - 1), shift);
767 			val &= ~mask;
768 			val |= (clos << shift);
769 			writeq(val, power_domain_info->sst_base +
770 					power_domain_info->sst_header.cp_offset + offset);
771 		} else {
772 			val >>= shift;
773 			clos_assoc.clos = val & GENMASK(SST_CLOS_ASSOC_BITS_PER_CPU - 1, 0);
774 			if (copy_to_user(ptr, &clos_assoc, sizeof(clos_assoc)))
775 				return -EFAULT;
776 		}
777 
778 		ptr += sizeof(clos_assoc);
779 	}
780 
781 	return 0;
782 }
783 
784 #define _read_pp_info(name_str, name, offset, start, width, mult_factor)\
785 {\
786 	u64 val, _mask;\
787 	\
788 	val = readq(power_domain_info->sst_base + power_domain_info->sst_header.pp_offset +\
789 		    (offset));\
790 	_mask = GENMASK_ULL((start + width - 1), start);\
791 	val &= _mask;\
792 	val >>= start;\
793 	name = (val * mult_factor);\
794 }
795 
796 #define _write_pp_info(name_str, name, offset, start, width, div_factor)\
797 {\
798 	u64 val, _mask;\
799 	\
800 	val = readq(power_domain_info->sst_base + power_domain_info->sst_header.pp_offset +\
801 		    (offset));\
802 	_mask = GENMASK((start + width - 1), start);\
803 	val &= ~_mask;\
804 	val |= (name / div_factor) << start;\
805 	writeq(val, power_domain_info->sst_base + power_domain_info->sst_header.pp_offset +\
806 	      (offset));\
807 }
808 
809 #define _read_bf_level_info(name_str, name, level, offset, start, width, mult_factor)\
810 {\
811 	u64 val, _mask;\
812 	\
813 	val = readq(power_domain_info->sst_base +\
814 		    power_domain_info->perf_levels[level].mmio_offset +\
815 		(power_domain_info->feature_offsets.bf_offset * 8) + (offset));\
816 	_mask = GENMASK_ULL((start + width - 1), start);\
817 	val &= _mask; \
818 	val >>= start;\
819 	name = (val * mult_factor);\
820 }
821 
822 #define _read_tf_level_info(name_str, name, level, offset, start, width, mult_factor)\
823 {\
824 	u64 val, _mask;\
825 	\
826 	val = readq(power_domain_info->sst_base +\
827 		    power_domain_info->perf_levels[level].mmio_offset +\
828 		(power_domain_info->feature_offsets.tf_offset * 8) + (offset));\
829 	_mask = GENMASK_ULL((start + width - 1), start);\
830 	val &= _mask; \
831 	val >>= start;\
832 	name = (val * mult_factor);\
833 }
834 
835 #define SST_PP_STATUS_OFFSET	32
836 
837 #define SST_PP_LEVEL_START	0
838 #define SST_PP_LEVEL_WIDTH	3
839 
840 #define SST_PP_LOCK_START	3
841 #define SST_PP_LOCK_WIDTH	1
842 
843 #define SST_PP_FEATURE_STATE_START	8
844 #define SST_PP_FEATURE_STATE_WIDTH	8
845 
846 #define SST_BF_FEATURE_SUPPORTED_START	12
847 #define SST_BF_FEATURE_SUPPORTED_WIDTH	1
848 
849 #define SST_TF_FEATURE_SUPPORTED_START	12
850 #define SST_TF_FEATURE_SUPPORTED_WIDTH	1
851 
isst_if_get_perf_level(void __user * argp)852 static int isst_if_get_perf_level(void __user *argp)
853 {
854 	struct isst_perf_level_info perf_level;
855 	struct tpmi_per_power_domain_info *power_domain_info;
856 	unsigned long level_mask;
857 	u8 level, support;
858 
859 	if (copy_from_user(&perf_level, argp, sizeof(perf_level)))
860 		return -EFAULT;
861 
862 	power_domain_info = get_instance(perf_level.socket_id, perf_level.power_domain_id);
863 	if (!power_domain_info)
864 		return -EINVAL;
865 
866 	perf_level.max_level = power_domain_info->max_level;
867 	perf_level.level_mask = power_domain_info->pp_header.level_en_mask;
868 	perf_level.feature_rev = power_domain_info->pp_header.feature_rev;
869 	_read_pp_info("current_level", perf_level.current_level, SST_PP_STATUS_OFFSET,
870 		      SST_PP_LEVEL_START, SST_PP_LEVEL_WIDTH, SST_MUL_FACTOR_NONE)
871 	_read_pp_info("locked", perf_level.locked, SST_PP_STATUS_OFFSET,
872 		      SST_PP_LOCK_START, SST_PP_LEVEL_WIDTH, SST_MUL_FACTOR_NONE)
873 	_read_pp_info("feature_state", perf_level.feature_state, SST_PP_STATUS_OFFSET,
874 		      SST_PP_FEATURE_STATE_START, SST_PP_FEATURE_STATE_WIDTH, SST_MUL_FACTOR_NONE)
875 	perf_level.enabled = !!(power_domain_info->sst_header.cap_mask & BIT(1));
876 
877 	level_mask = perf_level.level_mask;
878 	perf_level.sst_bf_support = 0;
879 	for_each_set_bit(level, &level_mask, BITS_PER_BYTE) {
880 		/*
881 		 * Read BF support for a level. Read output is updated
882 		 * to "support" variable by the below macro.
883 		 */
884 		_read_bf_level_info("bf_support", support, level, 0, SST_BF_FEATURE_SUPPORTED_START,
885 				    SST_BF_FEATURE_SUPPORTED_WIDTH, SST_MUL_FACTOR_NONE);
886 
887 		/* If supported set the bit for the level */
888 		if (support)
889 			perf_level.sst_bf_support |= BIT(level);
890 	}
891 
892 	perf_level.sst_tf_support = 0;
893 	for_each_set_bit(level, &level_mask, BITS_PER_BYTE) {
894 		/*
895 		 * Read TF support for a level. Read output is updated
896 		 * to "support" variable by the below macro.
897 		 */
898 		_read_tf_level_info("tf_support", support, level, 0, SST_TF_FEATURE_SUPPORTED_START,
899 				    SST_TF_FEATURE_SUPPORTED_WIDTH, SST_MUL_FACTOR_NONE);
900 
901 		/* If supported set the bit for the level */
902 		if (support)
903 			perf_level.sst_tf_support |= BIT(level);
904 	}
905 
906 	if (copy_to_user(argp, &perf_level, sizeof(perf_level)))
907 		return -EFAULT;
908 
909 	return 0;
910 }
911 
912 #define SST_PP_CONTROL_OFFSET		24
913 #define SST_PP_LEVEL_CHANGE_TIME_MS	5
914 #define SST_PP_LEVEL_CHANGE_RETRY_COUNT	3
915 
isst_if_set_perf_level(void __user * argp)916 static int isst_if_set_perf_level(void __user *argp)
917 {
918 	struct isst_perf_level_control perf_level;
919 	struct tpmi_per_power_domain_info *power_domain_info;
920 	int level, retry = 0;
921 
922 	if (disable_dynamic_sst_features())
923 		return -EFAULT;
924 
925 	if (copy_from_user(&perf_level, argp, sizeof(perf_level)))
926 		return -EFAULT;
927 
928 	power_domain_info = get_instance(perf_level.socket_id, perf_level.power_domain_id);
929 	if (!power_domain_info)
930 		return -EINVAL;
931 
932 	if (power_domain_info->write_blocked || !capable(CAP_SYS_ADMIN))
933 		return -EPERM;
934 
935 	if (!(power_domain_info->pp_header.allowed_level_mask & BIT(perf_level.level)))
936 		return -EINVAL;
937 
938 	_read_pp_info("current_level", level, SST_PP_STATUS_OFFSET,
939 		      SST_PP_LEVEL_START, SST_PP_LEVEL_WIDTH, SST_MUL_FACTOR_NONE)
940 
941 	/* If the requested new level is same as the current level, reject */
942 	if (perf_level.level == level)
943 		return -EINVAL;
944 
945 	_write_pp_info("perf_level", perf_level.level, SST_PP_CONTROL_OFFSET,
946 		       SST_PP_LEVEL_START, SST_PP_LEVEL_WIDTH, SST_MUL_FACTOR_NONE)
947 
948 	/* It is possible that firmware is busy (although unlikely), so retry */
949 	do {
950 		/* Give time to FW to process */
951 		msleep(SST_PP_LEVEL_CHANGE_TIME_MS);
952 
953 		_read_pp_info("current_level", level, SST_PP_STATUS_OFFSET,
954 			      SST_PP_LEVEL_START, SST_PP_LEVEL_WIDTH, SST_MUL_FACTOR_NONE)
955 
956 		/* Check if the new level is active */
957 		if (perf_level.level == level)
958 			break;
959 
960 	} while (retry++ < SST_PP_LEVEL_CHANGE_RETRY_COUNT);
961 
962 	/* If the level change didn't happen, return fault */
963 	if (perf_level.level != level)
964 		return -EFAULT;
965 
966 	/* Reset the feature state on level change */
967 	_write_pp_info("perf_feature", 0, SST_PP_CONTROL_OFFSET,
968 		       SST_PP_FEATURE_STATE_START, SST_PP_FEATURE_STATE_WIDTH,
969 		       SST_MUL_FACTOR_NONE)
970 
971 	/* Give time to FW to process */
972 	msleep(SST_PP_LEVEL_CHANGE_TIME_MS);
973 
974 	return 0;
975 }
976 
isst_if_set_perf_feature(void __user * argp)977 static int isst_if_set_perf_feature(void __user *argp)
978 {
979 	struct isst_perf_feature_control perf_feature;
980 	struct tpmi_per_power_domain_info *power_domain_info;
981 
982 	if (disable_dynamic_sst_features())
983 		return -EFAULT;
984 
985 	if (copy_from_user(&perf_feature, argp, sizeof(perf_feature)))
986 		return -EFAULT;
987 
988 	power_domain_info = get_instance(perf_feature.socket_id, perf_feature.power_domain_id);
989 	if (!power_domain_info)
990 		return -EINVAL;
991 
992 	if (power_domain_info->write_blocked || !capable(CAP_SYS_ADMIN))
993 		return -EPERM;
994 
995 	_write_pp_info("perf_feature", perf_feature.feature, SST_PP_CONTROL_OFFSET,
996 		       SST_PP_FEATURE_STATE_START, SST_PP_FEATURE_STATE_WIDTH,
997 		       SST_MUL_FACTOR_NONE)
998 
999 	return 0;
1000 }
1001 
1002 #define _read_pp_level_info(name_str, name, level, offset, start, width, mult_factor)\
1003 {\
1004 	u64 val, _mask;\
1005 	\
1006 	val = readq(power_domain_info->sst_base +\
1007 		    power_domain_info->perf_levels[level].mmio_offset +\
1008 		(power_domain_info->feature_offsets.pp_offset * 8) + (offset));\
1009 	_mask = GENMASK_ULL((start + width - 1), start);\
1010 	val &= _mask; \
1011 	val >>= start;\
1012 	name = (val * mult_factor);\
1013 }
1014 
1015 #define SST_PP_INFO_0_OFFSET	0
1016 #define SST_PP_INFO_1_OFFSET	8
1017 #define SST_PP_INFO_2_OFFSET	16
1018 #define SST_PP_INFO_3_OFFSET	24
1019 
1020 /* SST_PP_INFO_4_OFFSET to SST_PP_INFO_9_OFFSET are trl levels */
1021 #define SST_PP_INFO_4_OFFSET	32
1022 
1023 #define SST_PP_INFO_10_OFFSET	80
1024 #define SST_PP_INFO_11_OFFSET	88
1025 #define SST_PP_INFO_12_OFFSET	96
1026 
1027 #define SST_PP_P1_SSE_START	0
1028 #define SST_PP_P1_SSE_WIDTH	8
1029 
1030 #define SST_PP_P1_AVX2_START	8
1031 #define SST_PP_P1_AVX2_WIDTH	8
1032 
1033 #define SST_PP_P1_AVX512_START	16
1034 #define SST_PP_P1_AVX512_WIDTH	8
1035 
1036 #define SST_PP_P1_AMX_START	24
1037 #define SST_PP_P1_AMX_WIDTH	8
1038 
1039 #define SST_PP_TDP_START	32
1040 #define SST_PP_TDP_WIDTH	15
1041 
1042 #define SST_PP_T_PROCHOT_START	47
1043 #define SST_PP_T_PROCHOT_WIDTH	8
1044 
1045 #define SST_PP_MAX_MEMORY_FREQ_START	55
1046 #define SST_PP_MAX_MEMORY_FREQ_WIDTH	7
1047 
1048 #define SST_PP_COOLING_TYPE_START	62
1049 #define SST_PP_COOLING_TYPE_WIDTH	2
1050 
1051 #define SST_PP_TRL_0_RATIO_0_START	0
1052 #define SST_PP_TRL_0_RATIO_0_WIDTH	8
1053 
1054 #define SST_PP_TRL_CORES_BUCKET_0_START	0
1055 #define SST_PP_TRL_CORES_BUCKET_0_WIDTH	8
1056 
1057 #define SST_PP_CORE_RATIO_P0_START	0
1058 #define SST_PP_CORE_RATIO_P0_WIDTH	8
1059 
1060 #define SST_PP_CORE_RATIO_P1_START	8
1061 #define SST_PP_CORE_RATIO_P1_WIDTH	8
1062 
1063 #define SST_PP_CORE_RATIO_PN_START	16
1064 #define SST_PP_CORE_RATIO_PN_WIDTH	8
1065 
1066 #define SST_PP_CORE_RATIO_PM_START	24
1067 #define SST_PP_CORE_RATIO_PM_WIDTH	8
1068 
1069 #define SST_PP_CORE_RATIO_P0_FABRIC_START	32
1070 #define SST_PP_CORE_RATIO_P0_FABRIC_WIDTH	8
1071 
1072 #define SST_PP_CORE_RATIO_P1_FABRIC_START	40
1073 #define SST_PP_CORE_RATIO_P1_FABRIC_WIDTH	8
1074 
1075 #define SST_PP_CORE_RATIO_PM_FABRIC_START	48
1076 #define SST_PP_CORE_RATIO_PM_FABRIC_WIDTH	8
1077 
1078 #define SST_PP_CORE_RATIO_P0_FABRIC_1_START	0
1079 #define SST_PP_CORE_RATIO_P0_FABRIC_1_WIDTH	8
1080 
1081 #define SST_PP_CORE_RATIO_P1_FABRIC_1_START	8
1082 #define SST_PP_CORE_RATIO_P1_FABRIC_1_WIDTH	8
1083 
1084 #define SST_PP_CORE_RATIO_PM_FABRIC_1_START	16
1085 #define SST_PP_CORE_RATIO_PM_FABRIC_1_WIDTH	8
1086 
isst_if_get_perf_level_info(void __user * argp)1087 static int isst_if_get_perf_level_info(void __user *argp)
1088 {
1089 	struct isst_perf_level_data_info perf_level;
1090 	struct tpmi_per_power_domain_info *power_domain_info;
1091 	int i, j;
1092 
1093 	if (copy_from_user(&perf_level, argp, sizeof(perf_level)))
1094 		return -EFAULT;
1095 
1096 	power_domain_info = get_instance(perf_level.socket_id, perf_level.power_domain_id);
1097 	if (!power_domain_info)
1098 		return -EINVAL;
1099 
1100 	if (perf_level.level > power_domain_info->max_level)
1101 		return -EINVAL;
1102 
1103 	if (!(power_domain_info->pp_header.level_en_mask & BIT(perf_level.level)))
1104 		return -EINVAL;
1105 
1106 	_read_pp_level_info("tdp_ratio", perf_level.tdp_ratio, perf_level.level,
1107 			    SST_PP_INFO_0_OFFSET, SST_PP_P1_SSE_START, SST_PP_P1_SSE_WIDTH,
1108 			    SST_MUL_FACTOR_NONE)
1109 	_read_pp_level_info("base_freq_mhz", perf_level.base_freq_mhz, perf_level.level,
1110 			    SST_PP_INFO_0_OFFSET, SST_PP_P1_SSE_START, SST_PP_P1_SSE_WIDTH,
1111 			    SST_MUL_FACTOR_FREQ)
1112 	_read_pp_level_info("base_freq_avx2_mhz", perf_level.base_freq_avx2_mhz, perf_level.level,
1113 			    SST_PP_INFO_0_OFFSET, SST_PP_P1_AVX2_START, SST_PP_P1_AVX2_WIDTH,
1114 			    SST_MUL_FACTOR_FREQ)
1115 	_read_pp_level_info("base_freq_avx512_mhz", perf_level.base_freq_avx512_mhz,
1116 			    perf_level.level, SST_PP_INFO_0_OFFSET, SST_PP_P1_AVX512_START,
1117 			    SST_PP_P1_AVX512_WIDTH, SST_MUL_FACTOR_FREQ)
1118 	_read_pp_level_info("base_freq_amx_mhz", perf_level.base_freq_amx_mhz, perf_level.level,
1119 			    SST_PP_INFO_0_OFFSET, SST_PP_P1_AMX_START, SST_PP_P1_AMX_WIDTH,
1120 			    SST_MUL_FACTOR_FREQ)
1121 
1122 	_read_pp_level_info("thermal_design_power_w", perf_level.thermal_design_power_w,
1123 			    perf_level.level, SST_PP_INFO_1_OFFSET, SST_PP_TDP_START,
1124 			    SST_PP_TDP_WIDTH, SST_MUL_FACTOR_NONE)
1125 	perf_level.thermal_design_power_w /= 8; /* units are in 1/8th watt */
1126 	_read_pp_level_info("tjunction_max_c", perf_level.tjunction_max_c, perf_level.level,
1127 			    SST_PP_INFO_1_OFFSET, SST_PP_T_PROCHOT_START, SST_PP_T_PROCHOT_WIDTH,
1128 			    SST_MUL_FACTOR_NONE)
1129 	_read_pp_level_info("max_memory_freq_mhz", perf_level.max_memory_freq_mhz,
1130 			    perf_level.level, SST_PP_INFO_1_OFFSET, SST_PP_MAX_MEMORY_FREQ_START,
1131 			    SST_PP_MAX_MEMORY_FREQ_WIDTH, SST_MUL_FACTOR_FREQ)
1132 	_read_pp_level_info("cooling_type", perf_level.cooling_type, perf_level.level,
1133 			    SST_PP_INFO_1_OFFSET, SST_PP_COOLING_TYPE_START,
1134 			    SST_PP_COOLING_TYPE_WIDTH, SST_MUL_FACTOR_NONE)
1135 
1136 	for (i = 0; i < TRL_MAX_LEVELS; ++i) {
1137 		for (j = 0; j < TRL_MAX_BUCKETS; ++j)
1138 			_read_pp_level_info("trl*_bucket*_freq_mhz",
1139 					    perf_level.trl_freq_mhz[i][j], perf_level.level,
1140 					    SST_PP_INFO_4_OFFSET + (i * SST_PP_TRL_0_RATIO_0_WIDTH),
1141 					    j * SST_PP_TRL_0_RATIO_0_WIDTH,
1142 					    SST_PP_TRL_0_RATIO_0_WIDTH,
1143 					    SST_MUL_FACTOR_FREQ);
1144 	}
1145 
1146 	for (i = 0; i < TRL_MAX_BUCKETS; ++i)
1147 		_read_pp_level_info("bucket*_core_count", perf_level.bucket_core_counts[i],
1148 				    perf_level.level, SST_PP_INFO_10_OFFSET,
1149 				    SST_PP_TRL_CORES_BUCKET_0_WIDTH * i,
1150 				    SST_PP_TRL_CORES_BUCKET_0_WIDTH, SST_MUL_FACTOR_NONE)
1151 
1152 	perf_level.max_buckets = TRL_MAX_BUCKETS;
1153 	perf_level.max_trl_levels = TRL_MAX_LEVELS;
1154 
1155 	_read_pp_level_info("p0_freq_mhz", perf_level.p0_freq_mhz, perf_level.level,
1156 			    SST_PP_INFO_11_OFFSET, SST_PP_CORE_RATIO_P0_START,
1157 			    SST_PP_CORE_RATIO_P0_WIDTH, SST_MUL_FACTOR_FREQ)
1158 	_read_pp_level_info("p1_freq_mhz", perf_level.p1_freq_mhz, perf_level.level,
1159 			    SST_PP_INFO_11_OFFSET, SST_PP_CORE_RATIO_P1_START,
1160 			    SST_PP_CORE_RATIO_P1_WIDTH, SST_MUL_FACTOR_FREQ)
1161 	_read_pp_level_info("pn_freq_mhz", perf_level.pn_freq_mhz, perf_level.level,
1162 			    SST_PP_INFO_11_OFFSET, SST_PP_CORE_RATIO_PN_START,
1163 			    SST_PP_CORE_RATIO_PN_WIDTH, SST_MUL_FACTOR_FREQ)
1164 	_read_pp_level_info("pm_freq_mhz", perf_level.pm_freq_mhz, perf_level.level,
1165 			    SST_PP_INFO_11_OFFSET, SST_PP_CORE_RATIO_PM_START,
1166 			    SST_PP_CORE_RATIO_PM_WIDTH, SST_MUL_FACTOR_FREQ)
1167 	_read_pp_level_info("p0_fabric_freq_mhz", perf_level.p0_fabric_freq_mhz,
1168 			    perf_level.level, SST_PP_INFO_11_OFFSET,
1169 			    SST_PP_CORE_RATIO_P0_FABRIC_START,
1170 			    SST_PP_CORE_RATIO_P0_FABRIC_WIDTH, SST_MUL_FACTOR_FREQ)
1171 	_read_pp_level_info("p1_fabric_freq_mhz", perf_level.p1_fabric_freq_mhz,
1172 			    perf_level.level, SST_PP_INFO_11_OFFSET,
1173 			    SST_PP_CORE_RATIO_P1_FABRIC_START,
1174 			    SST_PP_CORE_RATIO_P1_FABRIC_WIDTH, SST_MUL_FACTOR_FREQ)
1175 	_read_pp_level_info("pm_fabric_freq_mhz", perf_level.pm_fabric_freq_mhz,
1176 			    perf_level.level, SST_PP_INFO_11_OFFSET,
1177 			    SST_PP_CORE_RATIO_PM_FABRIC_START,
1178 			    SST_PP_CORE_RATIO_PM_FABRIC_WIDTH, SST_MUL_FACTOR_FREQ)
1179 
1180 	if (copy_to_user(argp, &perf_level, sizeof(perf_level)))
1181 		return -EFAULT;
1182 
1183 	return 0;
1184 }
1185 
isst_if_get_perf_level_fabric_info(void __user * argp)1186 static int isst_if_get_perf_level_fabric_info(void __user *argp)
1187 {
1188 	struct isst_perf_level_fabric_info perf_level_fabric;
1189 	struct tpmi_per_power_domain_info *power_domain_info;
1190 	int start = SST_PP_CORE_RATIO_P0_FABRIC_START;
1191 	int width = SST_PP_CORE_RATIO_P0_FABRIC_WIDTH;
1192 	int offset = SST_PP_INFO_11_OFFSET;
1193 	int i;
1194 
1195 	if (copy_from_user(&perf_level_fabric, argp, sizeof(perf_level_fabric)))
1196 		return -EFAULT;
1197 
1198 	power_domain_info = get_instance(perf_level_fabric.socket_id,
1199 					 perf_level_fabric.power_domain_id);
1200 	if (!power_domain_info)
1201 		return -EINVAL;
1202 
1203 	if (perf_level_fabric.level > power_domain_info->max_level)
1204 		return -EINVAL;
1205 
1206 	if (power_domain_info->pp_header.feature_rev < 2)
1207 		return -EINVAL;
1208 
1209 	if (!(power_domain_info->pp_header.level_en_mask & BIT(perf_level_fabric.level)))
1210 		return -EINVAL;
1211 
1212 	/* For revision 2, maximum number of fabrics is 2 */
1213 	perf_level_fabric.max_fabrics = 2;
1214 
1215 	for (i = 0; i < perf_level_fabric.max_fabrics; i++) {
1216 		_read_pp_level_info("p0_fabric_freq_mhz", perf_level_fabric.p0_fabric_freq_mhz[i],
1217 				    perf_level_fabric.level, offset, start, width,
1218 				    SST_MUL_FACTOR_FREQ)
1219 		start += width;
1220 
1221 		_read_pp_level_info("p1_fabric_freq_mhz", perf_level_fabric.p1_fabric_freq_mhz[i],
1222 				    perf_level_fabric.level, offset, start, width,
1223 				    SST_MUL_FACTOR_FREQ)
1224 		start += width;
1225 
1226 		_read_pp_level_info("pm_fabric_freq_mhz", perf_level_fabric.pm_fabric_freq_mhz[i],
1227 				    perf_level_fabric.level, offset, start, width,
1228 				    SST_MUL_FACTOR_FREQ)
1229 		offset = SST_PP_INFO_12_OFFSET;
1230 		start = SST_PP_CORE_RATIO_P0_FABRIC_1_START;
1231 	}
1232 
1233 	if (copy_to_user(argp, &perf_level_fabric, sizeof(perf_level_fabric)))
1234 		return -EFAULT;
1235 
1236 	return 0;
1237 }
1238 
1239 #define SST_PP_FUSED_CORE_COUNT_START	0
1240 #define SST_PP_FUSED_CORE_COUNT_WIDTH	8
1241 
1242 #define SST_PP_RSLVD_CORE_COUNT_START	8
1243 #define SST_PP_RSLVD_CORE_COUNT_WIDTH	8
1244 
1245 #define SST_PP_RSLVD_CORE_MASK_START	0
1246 #define SST_PP_RSLVD_CORE_MASK_WIDTH	64
1247 
isst_if_get_perf_level_mask(void __user * argp)1248 static int isst_if_get_perf_level_mask(void __user *argp)
1249 {
1250 	static struct isst_perf_level_cpu_mask cpumask;
1251 	struct tpmi_per_power_domain_info *power_domain_info;
1252 	u64 mask;
1253 
1254 	if (copy_from_user(&cpumask, argp, sizeof(cpumask)))
1255 		return -EFAULT;
1256 
1257 	power_domain_info = get_instance(cpumask.socket_id, cpumask.power_domain_id);
1258 	if (!power_domain_info)
1259 		return -EINVAL;
1260 
1261 	_read_pp_level_info("mask", mask, cpumask.level, SST_PP_INFO_2_OFFSET,
1262 			    SST_PP_RSLVD_CORE_MASK_START, SST_PP_RSLVD_CORE_MASK_WIDTH,
1263 			    SST_MUL_FACTOR_NONE)
1264 
1265 	cpumask.mask = mask;
1266 
1267 	if (!cpumask.punit_cpu_map)
1268 		return -EOPNOTSUPP;
1269 
1270 	if (copy_to_user(argp, &cpumask, sizeof(cpumask)))
1271 		return -EFAULT;
1272 
1273 	return 0;
1274 }
1275 
1276 #define SST_BF_INFO_0_OFFSET	0
1277 #define SST_BF_INFO_1_OFFSET	8
1278 
1279 #define SST_BF_P1_HIGH_START	13
1280 #define SST_BF_P1_HIGH_WIDTH	8
1281 
1282 #define SST_BF_P1_LOW_START	21
1283 #define SST_BF_P1_LOW_WIDTH	8
1284 
1285 #define SST_BF_T_PROHOT_START	38
1286 #define SST_BF_T_PROHOT_WIDTH	8
1287 
1288 #define SST_BF_TDP_START	46
1289 #define SST_BF_TDP_WIDTH	15
1290 
isst_if_get_base_freq_info(void __user * argp)1291 static int isst_if_get_base_freq_info(void __user *argp)
1292 {
1293 	static struct isst_base_freq_info base_freq;
1294 	struct tpmi_per_power_domain_info *power_domain_info;
1295 
1296 	if (copy_from_user(&base_freq, argp, sizeof(base_freq)))
1297 		return -EFAULT;
1298 
1299 	power_domain_info = get_instance(base_freq.socket_id, base_freq.power_domain_id);
1300 	if (!power_domain_info)
1301 		return -EINVAL;
1302 
1303 	if (base_freq.level > power_domain_info->max_level)
1304 		return -EINVAL;
1305 
1306 	_read_bf_level_info("p1_high", base_freq.high_base_freq_mhz, base_freq.level,
1307 			    SST_BF_INFO_0_OFFSET, SST_BF_P1_HIGH_START, SST_BF_P1_HIGH_WIDTH,
1308 			    SST_MUL_FACTOR_FREQ)
1309 	_read_bf_level_info("p1_low", base_freq.low_base_freq_mhz, base_freq.level,
1310 			    SST_BF_INFO_0_OFFSET, SST_BF_P1_LOW_START, SST_BF_P1_LOW_WIDTH,
1311 			    SST_MUL_FACTOR_FREQ)
1312 	_read_bf_level_info("BF-TJ", base_freq.tjunction_max_c, base_freq.level,
1313 			    SST_BF_INFO_0_OFFSET, SST_BF_T_PROHOT_START, SST_BF_T_PROHOT_WIDTH,
1314 			    SST_MUL_FACTOR_NONE)
1315 	_read_bf_level_info("BF-tdp", base_freq.thermal_design_power_w, base_freq.level,
1316 			    SST_BF_INFO_0_OFFSET, SST_BF_TDP_START, SST_BF_TDP_WIDTH,
1317 			    SST_MUL_FACTOR_NONE)
1318 	base_freq.thermal_design_power_w /= 8; /*unit = 1/8th watt*/
1319 
1320 	if (copy_to_user(argp, &base_freq, sizeof(base_freq)))
1321 		return -EFAULT;
1322 
1323 	return 0;
1324 }
1325 
1326 #define P1_HI_CORE_MASK_START	0
1327 #define P1_HI_CORE_MASK_WIDTH	64
1328 
isst_if_get_base_freq_mask(void __user * argp)1329 static int isst_if_get_base_freq_mask(void __user *argp)
1330 {
1331 	static struct isst_perf_level_cpu_mask cpumask;
1332 	struct tpmi_per_power_domain_info *power_domain_info;
1333 	u64 mask;
1334 
1335 	if (copy_from_user(&cpumask, argp, sizeof(cpumask)))
1336 		return -EFAULT;
1337 
1338 	power_domain_info = get_instance(cpumask.socket_id, cpumask.power_domain_id);
1339 	if (!power_domain_info)
1340 		return -EINVAL;
1341 
1342 	_read_bf_level_info("BF-cpumask", mask, cpumask.level, SST_BF_INFO_1_OFFSET,
1343 			    P1_HI_CORE_MASK_START, P1_HI_CORE_MASK_WIDTH,
1344 			    SST_MUL_FACTOR_NONE)
1345 
1346 	cpumask.mask = mask;
1347 
1348 	if (!cpumask.punit_cpu_map)
1349 		return -EOPNOTSUPP;
1350 
1351 	if (copy_to_user(argp, &cpumask, sizeof(cpumask)))
1352 		return -EFAULT;
1353 
1354 	return 0;
1355 }
1356 
isst_if_get_tpmi_instance_count(void __user * argp)1357 static int isst_if_get_tpmi_instance_count(void __user *argp)
1358 {
1359 	struct isst_tpmi_instance_count tpmi_inst;
1360 	struct tpmi_sst_struct *sst_inst;
1361 	int i;
1362 
1363 	if (copy_from_user(&tpmi_inst, argp, sizeof(tpmi_inst)))
1364 		return -EFAULT;
1365 
1366 	if (tpmi_inst.socket_id >= topology_max_packages())
1367 		return -EINVAL;
1368 
1369 	sst_inst = isst_common.sst_inst[tpmi_inst.socket_id];
1370 
1371 	tpmi_inst.count = isst_instance_count(sst_inst);
1372 
1373 	tpmi_inst.valid_mask = 0;
1374 	for (i = 0; i < tpmi_inst.count; i++) {
1375 		struct tpmi_per_power_domain_info *pd_info;
1376 		u8 part;
1377 		int pd;
1378 
1379 		pd = map_partition_power_domain_id(sst_inst, i, &part);
1380 		if (pd < 0)
1381 			continue;
1382 
1383 		pd_info = &sst_inst->power_domain_info[part][pd];
1384 		if (pd_info->sst_base)
1385 			tpmi_inst.valid_mask |= BIT(i);
1386 	}
1387 
1388 	if (!tpmi_inst.valid_mask)
1389 		tpmi_inst.count = 0;
1390 
1391 	if (copy_to_user(argp, &tpmi_inst, sizeof(tpmi_inst)))
1392 		return -EFAULT;
1393 
1394 	return 0;
1395 }
1396 
1397 #define SST_TF_INFO_0_OFFSET	0
1398 #define SST_TF_INFO_1_OFFSET	8
1399 #define SST_TF_INFO_2_OFFSET	16
1400 #define SST_TF_INFO_8_OFFSET	64
1401 #define SST_TF_INFO_8_BUCKETS	3
1402 
1403 #define SST_TF_MAX_LP_CLIP_RATIOS	TRL_MAX_LEVELS
1404 
1405 #define SST_TF_FEATURE_REV_START	4
1406 #define SST_TF_FEATURE_REV_WIDTH	8
1407 
1408 #define SST_TF_LP_CLIP_RATIO_0_START	16
1409 #define SST_TF_LP_CLIP_RATIO_0_WIDTH	8
1410 
1411 #define SST_TF_RATIO_0_START	0
1412 #define SST_TF_RATIO_0_WIDTH	8
1413 
1414 #define SST_TF_NUM_CORE_0_START 0
1415 #define SST_TF_NUM_CORE_0_WIDTH 8
1416 
1417 #define SST_TF_NUM_MOD_0_START	0
1418 #define SST_TF_NUM_MOD_0_WIDTH	16
1419 
isst_if_get_turbo_freq_info(void __user * argp)1420 static int isst_if_get_turbo_freq_info(void __user *argp)
1421 {
1422 	static struct isst_turbo_freq_info turbo_freq;
1423 	struct tpmi_per_power_domain_info *power_domain_info;
1424 	u8 feature_rev;
1425 	int i, j;
1426 
1427 	if (copy_from_user(&turbo_freq, argp, sizeof(turbo_freq)))
1428 		return -EFAULT;
1429 
1430 	power_domain_info = get_instance(turbo_freq.socket_id, turbo_freq.power_domain_id);
1431 	if (!power_domain_info)
1432 		return -EINVAL;
1433 
1434 	if (turbo_freq.level > power_domain_info->max_level)
1435 		return -EINVAL;
1436 
1437 	turbo_freq.max_buckets = TRL_MAX_BUCKETS;
1438 	turbo_freq.max_trl_levels = TRL_MAX_LEVELS;
1439 	turbo_freq.max_clip_freqs = SST_TF_MAX_LP_CLIP_RATIOS;
1440 
1441 	_read_tf_level_info("feature_rev", feature_rev, turbo_freq.level,
1442 			    SST_TF_INFO_0_OFFSET, SST_TF_FEATURE_REV_START,
1443 			    SST_TF_FEATURE_REV_WIDTH, SST_MUL_FACTOR_NONE);
1444 
1445 	for (i = 0; i < turbo_freq.max_clip_freqs; ++i)
1446 		_read_tf_level_info("lp_clip*", turbo_freq.lp_clip_freq_mhz[i],
1447 				    turbo_freq.level, SST_TF_INFO_0_OFFSET,
1448 				    SST_TF_LP_CLIP_RATIO_0_START +
1449 				    (i * SST_TF_LP_CLIP_RATIO_0_WIDTH),
1450 				    SST_TF_LP_CLIP_RATIO_0_WIDTH, SST_MUL_FACTOR_FREQ)
1451 
1452 	for (i = 0; i < TRL_MAX_LEVELS; ++i) {
1453 		for (j = 0; j < TRL_MAX_BUCKETS; ++j)
1454 			_read_tf_level_info("cydn*_bucket_*_trl",
1455 					    turbo_freq.trl_freq_mhz[i][j], turbo_freq.level,
1456 					    SST_TF_INFO_2_OFFSET + (i * SST_TF_RATIO_0_WIDTH),
1457 					    j * SST_TF_RATIO_0_WIDTH, SST_TF_RATIO_0_WIDTH,
1458 					    SST_MUL_FACTOR_FREQ)
1459 	}
1460 
1461 	if (feature_rev >= 2) {
1462 		bool has_tf_info_8 = false;
1463 
1464 		for (i = 0; i < SST_TF_INFO_8_BUCKETS; ++i) {
1465 			_read_tf_level_info("bucket_*_mod_count", turbo_freq.bucket_core_counts[i],
1466 					    turbo_freq.level, SST_TF_INFO_8_OFFSET,
1467 					    SST_TF_NUM_MOD_0_WIDTH * i, SST_TF_NUM_MOD_0_WIDTH,
1468 					    SST_MUL_FACTOR_NONE)
1469 
1470 			if (turbo_freq.bucket_core_counts[i])
1471 				has_tf_info_8 = true;
1472 		}
1473 
1474 		if (has_tf_info_8)
1475 			goto done_core_count;
1476 	}
1477 
1478 	for (i = 0; i < TRL_MAX_BUCKETS; ++i)
1479 		_read_tf_level_info("bucket_*_core_count", turbo_freq.bucket_core_counts[i],
1480 				    turbo_freq.level, SST_TF_INFO_1_OFFSET,
1481 				    SST_TF_NUM_CORE_0_WIDTH * i, SST_TF_NUM_CORE_0_WIDTH,
1482 				    SST_MUL_FACTOR_NONE)
1483 
1484 
1485 done_core_count:
1486 
1487 	if (copy_to_user(argp, &turbo_freq, sizeof(turbo_freq)))
1488 		return -EFAULT;
1489 
1490 	return 0;
1491 }
1492 
isst_if_def_ioctl(struct file * file,unsigned int cmd,unsigned long arg)1493 static long isst_if_def_ioctl(struct file *file, unsigned int cmd,
1494 			      unsigned long arg)
1495 {
1496 	void __user *argp = (void __user *)arg;
1497 	long ret = -ENOTTY;
1498 
1499 	mutex_lock(&isst_tpmi_dev_lock);
1500 	switch (cmd) {
1501 	case ISST_IF_COUNT_TPMI_INSTANCES:
1502 		ret = isst_if_get_tpmi_instance_count(argp);
1503 		break;
1504 	case ISST_IF_CORE_POWER_STATE:
1505 		ret = isst_if_core_power_state(argp);
1506 		break;
1507 	case ISST_IF_CLOS_PARAM:
1508 		ret = isst_if_clos_param(argp);
1509 		break;
1510 	case ISST_IF_CLOS_ASSOC:
1511 		ret = isst_if_clos_assoc(argp);
1512 		break;
1513 	case ISST_IF_PERF_LEVELS:
1514 		ret = isst_if_get_perf_level(argp);
1515 		break;
1516 	case ISST_IF_PERF_SET_LEVEL:
1517 		ret = isst_if_set_perf_level(argp);
1518 		break;
1519 	case ISST_IF_PERF_SET_FEATURE:
1520 		ret = isst_if_set_perf_feature(argp);
1521 		break;
1522 	case ISST_IF_GET_PERF_LEVEL_INFO:
1523 		ret = isst_if_get_perf_level_info(argp);
1524 		break;
1525 	case ISST_IF_GET_PERF_LEVEL_FABRIC_INFO:
1526 		ret = isst_if_get_perf_level_fabric_info(argp);
1527 		break;
1528 	case ISST_IF_GET_PERF_LEVEL_CPU_MASK:
1529 		ret = isst_if_get_perf_level_mask(argp);
1530 		break;
1531 	case ISST_IF_GET_BASE_FREQ_INFO:
1532 		ret = isst_if_get_base_freq_info(argp);
1533 		break;
1534 	case ISST_IF_GET_BASE_FREQ_CPU_MASK:
1535 		ret = isst_if_get_base_freq_mask(argp);
1536 		break;
1537 	case ISST_IF_GET_TURBO_FREQ_INFO:
1538 		ret = isst_if_get_turbo_freq_info(argp);
1539 		break;
1540 	default:
1541 		break;
1542 	}
1543 	mutex_unlock(&isst_tpmi_dev_lock);
1544 
1545 	return ret;
1546 }
1547 
1548 #define TPMI_SST_AUTO_SUSPEND_DELAY_MS	2000
1549 
tpmi_sst_dev_add(struct auxiliary_device * auxdev)1550 int tpmi_sst_dev_add(struct auxiliary_device *auxdev)
1551 {
1552 	struct tpmi_per_power_domain_info *pd_info;
1553 	bool read_blocked = 0, write_blocked = 0;
1554 	struct oobmsm_plat_info *plat_info;
1555 	struct device *dev = &auxdev->dev;
1556 	struct tpmi_sst_struct *tpmi_sst;
1557 	u8 i, num_resources, io_die_cnt;
1558 	int ret, pkg = 0, inst = 0;
1559 	bool first_enum = false;
1560 	u16 cdie_mask;
1561 	u8 partition;
1562 
1563 	ret = tpmi_get_feature_status(auxdev, TPMI_ID_SST, &read_blocked, &write_blocked);
1564 	if (ret)
1565 		dev_info(dev, "Can't read feature status: ignoring read/write blocked status\n");
1566 
1567 	if (read_blocked) {
1568 		dev_info(dev, "Firmware has blocked reads, exiting\n");
1569 		return -ENODEV;
1570 	}
1571 
1572 	plat_info = tpmi_get_platform_data(auxdev);
1573 	if (!plat_info) {
1574 		dev_err(dev, "No platform info\n");
1575 		return -EINVAL;
1576 	}
1577 
1578 	pkg = plat_info->package_id;
1579 	if (pkg >= topology_max_packages()) {
1580 		dev_err(dev, "Invalid package id :%x\n", pkg);
1581 		return -EINVAL;
1582 	}
1583 
1584 	partition = plat_info->partition;
1585 	if (partition >= SST_MAX_PARTITIONS) {
1586 		dev_err(&auxdev->dev, "Invalid partition :%x\n", partition);
1587 		return -EINVAL;
1588 	}
1589 
1590 	num_resources = tpmi_get_resource_count(auxdev);
1591 
1592 	if (!num_resources)
1593 		return -EINVAL;
1594 
1595 	mutex_lock(&isst_tpmi_dev_lock);
1596 
1597 	if (isst_common.sst_inst[pkg]) {
1598 		tpmi_sst = isst_common.sst_inst[pkg];
1599 	} else {
1600 		/*
1601 		 * tpmi_sst instance is for a package. So needs to be
1602 		 * allocated only once for both partitions. We can't use
1603 		 * devm_* allocation here as each partition is a
1604 		 * different device, which can be unbound.
1605 		 */
1606 		tpmi_sst = kzalloc_obj(*tpmi_sst);
1607 		if (!tpmi_sst) {
1608 			ret = -ENOMEM;
1609 			goto unlock_exit;
1610 		}
1611 		first_enum = true;
1612 	}
1613 
1614 	ret = 0;
1615 
1616 	pd_info = devm_kcalloc(dev, num_resources, sizeof(*pd_info), GFP_KERNEL);
1617 	if (!pd_info) {
1618 		ret = -ENOMEM;
1619 		goto unlock_free;
1620 	}
1621 
1622 	/* Get the IO die count, if cdie_mask is present */
1623 	if (plat_info->cdie_mask) {
1624 		u8 cdie_range;
1625 
1626 		cdie_mask = plat_info->cdie_mask;
1627 		cdie_range = fls(cdie_mask) - ffs(cdie_mask) + 1;
1628 		io_die_cnt = num_resources - cdie_range;
1629 	} else {
1630 		/*
1631 		 * This is a synthetic mask, careful when assuming that
1632 		 * they are compute dies only.
1633 		 */
1634 		cdie_mask = (1 << num_resources) - 1;
1635 		io_die_cnt = 0;
1636 	}
1637 
1638 	for (i = 0; i < num_resources; ++i) {
1639 		struct resource *res;
1640 
1641 		res = tpmi_get_resource_at_index(auxdev, i);
1642 		if (!res) {
1643 			pd_info[i].sst_base = NULL;
1644 			continue;
1645 		}
1646 
1647 		pd_info[i].package_id = pkg;
1648 		pd_info[i].power_domain_id = i;
1649 		pd_info[i].auxdev = auxdev;
1650 		pd_info[i].write_blocked = write_blocked;
1651 		pd_info[i].sst_base = devm_ioremap_resource(dev, res);
1652 		if (IS_ERR(pd_info[i].sst_base)) {
1653 			ret = PTR_ERR(pd_info[i].sst_base);
1654 			goto unlock_free;
1655 		}
1656 
1657 		if (sst_main(auxdev, &pd_info[i])) {
1658 			/*
1659 			 * This entry is not valid, hardware can partially
1660 			 * populate dies. In this case MMIO will have 0xFFs.
1661 			 * Also possible some pre-production hardware has
1662 			 * invalid data. But don't fail and continue to use
1663 			 * other dies with valid data.
1664 			 */
1665 			devm_iounmap(dev, pd_info[i].sst_base);
1666 			pd_info[i].sst_base = NULL;
1667 			continue;
1668 		}
1669 
1670 		++inst;
1671 	}
1672 
1673 	if (!inst) {
1674 		ret = -ENODEV;
1675 		goto unlock_free;
1676 	}
1677 
1678 	tpmi_sst->package_id = pkg;
1679 
1680 	tpmi_sst->power_domain_info[partition] = pd_info;
1681 	tpmi_sst->number_of_power_domains[partition] = num_resources;
1682 	tpmi_sst->cdie_mask[partition] = cdie_mask;
1683 	tpmi_sst->io_dies[partition] = io_die_cnt;
1684 	tpmi_sst->partition_mask |= BIT(partition);
1685 	tpmi_sst->partition_mask_current |= BIT(partition);
1686 
1687 	auxiliary_set_drvdata(auxdev, tpmi_sst);
1688 
1689 	if (isst_common.max_index < pkg)
1690 		isst_common.max_index = pkg;
1691 	isst_common.sst_inst[pkg] = tpmi_sst;
1692 
1693 unlock_free:
1694 	if (ret && first_enum)
1695 		kfree(tpmi_sst);
1696 unlock_exit:
1697 	mutex_unlock(&isst_tpmi_dev_lock);
1698 
1699 	return ret;
1700 }
1701 EXPORT_SYMBOL_NS_GPL(tpmi_sst_dev_add, "INTEL_TPMI_SST");
1702 
tpmi_sst_dev_remove(struct auxiliary_device * auxdev)1703 void tpmi_sst_dev_remove(struct auxiliary_device *auxdev)
1704 {
1705 	struct tpmi_sst_struct *tpmi_sst = auxiliary_get_drvdata(auxdev);
1706 	struct oobmsm_plat_info *plat_info;
1707 
1708 	plat_info = tpmi_get_platform_data(auxdev);
1709 	if (!plat_info)
1710 		return;
1711 
1712 	mutex_lock(&isst_tpmi_dev_lock);
1713 	tpmi_sst->power_domain_info[plat_info->partition] = NULL;
1714 	tpmi_sst->partition_mask_current &= ~BIT(plat_info->partition);
1715 	/* Free the package instance when the all partitions are removed */
1716 	if (!tpmi_sst->partition_mask_current) {
1717 		isst_common.sst_inst[tpmi_sst->package_id] = NULL;
1718 		kfree(tpmi_sst);
1719 	}
1720 	mutex_unlock(&isst_tpmi_dev_lock);
1721 }
1722 EXPORT_SYMBOL_NS_GPL(tpmi_sst_dev_remove, "INTEL_TPMI_SST");
1723 
1724 #define SST_PP_CAP_CP_ENABLE	BIT(0)
1725 #define SST_PP_CAP_PP_ENABLE	BIT(1)
1726 
tpmi_sst_dev_suspend(struct auxiliary_device * auxdev)1727 void tpmi_sst_dev_suspend(struct auxiliary_device *auxdev)
1728 {
1729 	struct tpmi_sst_struct *tpmi_sst = auxiliary_get_drvdata(auxdev);
1730 	struct tpmi_per_power_domain_info *power_domain_info, *pd_info;
1731 	struct oobmsm_plat_info *plat_info;
1732 	void __iomem *cp_base;
1733 	int num_resources, i;
1734 
1735 	plat_info = tpmi_get_platform_data(auxdev);
1736 	if (!plat_info)
1737 		return;
1738 
1739 	power_domain_info = tpmi_sst->power_domain_info[plat_info->partition];
1740 	num_resources = tpmi_sst->number_of_power_domains[plat_info->partition];
1741 
1742 	for (i = 0; i < num_resources; i++) {
1743 		pd_info = &power_domain_info[i];
1744 		if (!pd_info || !pd_info->sst_base)
1745 			continue;
1746 
1747 		if (!(pd_info->sst_header.cap_mask & SST_PP_CAP_CP_ENABLE))
1748 			goto process_pp_suspend;
1749 
1750 		cp_base = pd_info->sst_base + pd_info->sst_header.cp_offset;
1751 		pd_info->saved_sst_cp_control = readq(cp_base + SST_CP_CONTROL_OFFSET);
1752 		memcpy_fromio(pd_info->saved_clos_configs, cp_base + SST_CLOS_CONFIG_0_OFFSET,
1753 			      sizeof(pd_info->saved_clos_configs));
1754 		memcpy_fromio(pd_info->saved_clos_assocs, cp_base + SST_CLOS_ASSOC_0_OFFSET,
1755 			      sizeof(pd_info->saved_clos_assocs));
1756 
1757 process_pp_suspend:
1758 		if (!(pd_info->sst_header.cap_mask & SST_PP_CAP_PP_ENABLE))
1759 			continue;
1760 
1761 		pd_info->saved_pp_control = readq(pd_info->sst_base +
1762 						  pd_info->sst_header.pp_offset +
1763 						  SST_PP_CONTROL_OFFSET);
1764 	}
1765 }
1766 EXPORT_SYMBOL_NS_GPL(tpmi_sst_dev_suspend, "INTEL_TPMI_SST");
1767 
tpmi_sst_dev_resume(struct auxiliary_device * auxdev)1768 void tpmi_sst_dev_resume(struct auxiliary_device *auxdev)
1769 {
1770 	struct tpmi_sst_struct *tpmi_sst = auxiliary_get_drvdata(auxdev);
1771 	struct tpmi_per_power_domain_info *power_domain_info, *pd_info;
1772 	struct oobmsm_plat_info *plat_info;
1773 	void __iomem *cp_base;
1774 	int num_resources, i;
1775 
1776 	plat_info = tpmi_get_platform_data(auxdev);
1777 	if (!plat_info)
1778 		return;
1779 
1780 	power_domain_info = tpmi_sst->power_domain_info[plat_info->partition];
1781 	num_resources = tpmi_sst->number_of_power_domains[plat_info->partition];
1782 
1783 	for (i = 0; i < num_resources; i++) {
1784 		pd_info = &power_domain_info[i];
1785 		if (!pd_info || !pd_info->sst_base)
1786 			continue;
1787 
1788 		if (!(pd_info->sst_header.cap_mask & SST_PP_CAP_CP_ENABLE))
1789 			goto process_pp_resume;
1790 
1791 		cp_base = pd_info->sst_base + pd_info->sst_header.cp_offset;
1792 		writeq(pd_info->saved_sst_cp_control, cp_base + SST_CP_CONTROL_OFFSET);
1793 		memcpy_toio(cp_base + SST_CLOS_CONFIG_0_OFFSET, pd_info->saved_clos_configs,
1794 			    sizeof(pd_info->saved_clos_configs));
1795 		memcpy_toio(cp_base + SST_CLOS_ASSOC_0_OFFSET, pd_info->saved_clos_assocs,
1796 			    sizeof(pd_info->saved_clos_assocs));
1797 
1798 process_pp_resume:
1799 		if (!(pd_info->sst_header.cap_mask & SST_PP_CAP_PP_ENABLE))
1800 			continue;
1801 
1802 		writeq(pd_info->saved_pp_control, power_domain_info->sst_base +
1803 		       pd_info->sst_header.pp_offset + SST_PP_CONTROL_OFFSET);
1804 	}
1805 }
1806 EXPORT_SYMBOL_NS_GPL(tpmi_sst_dev_resume, "INTEL_TPMI_SST");
1807 
1808 #define ISST_TPMI_API_VERSION	0x03
1809 
tpmi_sst_init(void)1810 int tpmi_sst_init(void)
1811 {
1812 	struct isst_if_cmd_cb cb;
1813 	int ret = 0;
1814 
1815 	mutex_lock(&isst_tpmi_dev_lock);
1816 
1817 	if (isst_core_usage_count) {
1818 		++isst_core_usage_count;
1819 		goto init_done;
1820 	}
1821 
1822 	isst_common.sst_inst = kzalloc_objs(*isst_common.sst_inst,
1823 					    topology_max_packages());
1824 	if (!isst_common.sst_inst) {
1825 		ret = -ENOMEM;
1826 		goto init_done;
1827 	}
1828 
1829 	memset(&cb, 0, sizeof(cb));
1830 	cb.cmd_size = sizeof(struct isst_if_io_reg);
1831 	cb.offset = offsetof(struct isst_if_io_regs, io_reg);
1832 	cb.cmd_callback = NULL;
1833 	cb.api_version = ISST_TPMI_API_VERSION;
1834 	cb.def_ioctl = isst_if_def_ioctl;
1835 	cb.owner = THIS_MODULE;
1836 	ret = isst_if_cdev_register(ISST_IF_DEV_TPMI, &cb);
1837 	if (ret)
1838 		kfree(isst_common.sst_inst);
1839 	else
1840 		++isst_core_usage_count;
1841 init_done:
1842 	mutex_unlock(&isst_tpmi_dev_lock);
1843 	return ret;
1844 }
1845 EXPORT_SYMBOL_NS_GPL(tpmi_sst_init, "INTEL_TPMI_SST");
1846 
tpmi_sst_exit(void)1847 void tpmi_sst_exit(void)
1848 {
1849 	mutex_lock(&isst_tpmi_dev_lock);
1850 	if (isst_core_usage_count)
1851 		--isst_core_usage_count;
1852 
1853 	if (!isst_core_usage_count) {
1854 		isst_if_cdev_unregister(ISST_IF_DEV_TPMI);
1855 		kfree(isst_common.sst_inst);
1856 	}
1857 	mutex_unlock(&isst_tpmi_dev_lock);
1858 }
1859 EXPORT_SYMBOL_NS_GPL(tpmi_sst_exit, "INTEL_TPMI_SST");
1860 
1861 MODULE_IMPORT_NS("INTEL_TPMI");
1862 MODULE_IMPORT_NS("INTEL_TPMI_POWER_DOMAIN");
1863 
1864 MODULE_DESCRIPTION("ISST TPMI interface module");
1865 MODULE_LICENSE("GPL");
1866