xref: /linux/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c (revision cdd30ebb1b9f36159d66f088b61aee264e649d7a)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * isst_tpmi.c: SST TPMI interface core
4  *
5  * Copyright (c) 2023, Intel Corporation.
6  * All Rights Reserved.
7  *
8  * This information will be useful to understand flows:
9  * In the current generation of platforms, TPMI is supported via OOB
10  * PCI device. This PCI device has one instance per CPU package.
11  * There is a unique TPMI ID for SST. Each TPMI ID also has multiple
12  * entries, representing per power domain information.
13  *
14  * There is one dev file for complete SST information and control same as the
15  * prior generation of hardware. User spaces don't need to know how the
16  * information is presented by the hardware. The TPMI core module implements
17  * the hardware mapping.
18  */
19 
20 #define dev_fmt(fmt) "tpmi_sst: " fmt
21 
22 #include <linux/auxiliary_bus.h>
23 #include <linux/delay.h>
24 #include <linux/intel_tpmi.h>
25 #include <linux/fs.h>
26 #include <linux/io.h>
27 #include <linux/kernel.h>
28 #include <linux/minmax.h>
29 #include <linux/module.h>
30 #include <uapi/linux/isst_if.h>
31 
32 #include "isst_tpmi_core.h"
33 #include "isst_if_common.h"
34 
35 /* Supported SST hardware version by this driver */
36 #define ISST_MAJOR_VERSION	0
37 #define ISST_MINOR_VERSION	1
38 
39 /*
40  * Used to indicate if value read from MMIO needs to get multiplied
41  * to get to a standard unit or not.
42  */
43 #define SST_MUL_FACTOR_NONE    1
44 
45 /* Define 100 as a scaling factor frequency ratio to frequency conversion */
46 #define SST_MUL_FACTOR_FREQ    100
47 
48 /* All SST regs are 64 bit size */
49 #define SST_REG_SIZE   8
50 
51 /**
52  * struct sst_header -	SST main header
53  * @interface_version:	Version number for this interface
54  * @cap_mask:		Bitmask of the supported sub features. 1=the sub feature is enabled.
55  *			0=disabled.
56  *			Bit[8]= SST_CP enable (1), disable (0)
57  *			bit[9]= SST_PP enable (1), disable (0)
58  *			other bits are reserved for future use
59  * @cp_offset:		Qword (8 bytes) offset to the SST_CP register bank
60  * @pp_offset:		Qword (8 bytes) offset to the SST_PP register bank
61  * @reserved:		Reserved for future use
62  *
63  * This register allows SW to discover SST capability and the offsets to SST-CP
64  * and SST-PP register banks.
65  */
66 struct sst_header {
67 	u8 interface_version;
68 	u8 cap_mask;
69 	u8 cp_offset;
70 	u8 pp_offset;
71 	u32 reserved;
72 } __packed;
73 
74 /**
75  * struct cp_header -	SST-CP (core-power) header
76  * @feature_id:		0=SST-CP, 1=SST-PP, 2=SST-BF, 3=SST-TF
77  * @feature_rev:	Interface Version number for this SST feature
78  * @ratio_unit:		Frequency ratio unit. 00: 100MHz. All others are reserved
79  * @reserved:		Reserved for future use
80  *
81  * This structure is used store SST-CP header. This is packed to the same
82  * format as defined in the specifications.
83  */
84 struct cp_header {
85 	u64 feature_id :4;
86 	u64 feature_rev :8;
87 	u64 ratio_unit :2;
88 	u64 reserved :50;
89 } __packed;
90 
91 /**
92  * struct pp_header -	SST-PP (Perf profile) header
93  * @feature_id:		0=SST-CP, 1=SST-PP, 2=SST-BF, 3=SST-TF
94  * @feature_rev:	Interface Version number for this SST feature
95  * @level_en_mask:	SST-PP level enable/disable fuse mask
96  * @allowed_level_mask:	Allowed level mask used for dynamic config level switching
97  * @reserved0:		Reserved for future use
98  * @ratio_unit:		Frequency ratio unit. 00: 100MHz. All others are reserved
99  * @block_size:		Size of PP block in Qword unit (8 bytes)
100  * @dynamic_switch:	If set (1), dynamic switching of SST PP is supported
101  * @memory_ratio_unit:	Memory Controller frequency ratio unit. 00: 100MHz, others reserved
102  * @reserved1:		Reserved for future use
103  *
104  * This structure is used store SST-PP header. This is packed to the same
105  * format as defined in the specifications.
106  */
107 struct pp_header {
108 	u64 feature_id :4;
109 	u64 feature_rev :8;
110 	u64 level_en_mask :8;
111 	u64 allowed_level_mask :8;
112 	u64 reserved0 :4;
113 	u64 ratio_unit :2;
114 	u64 block_size :8;
115 	u64 dynamic_switch :1;
116 	u64 memory_ratio_unit :2;
117 	u64 reserved1 :19;
118 } __packed;
119 
120 /**
121  * struct feature_offset -	Offsets to SST-PP features
122  * @pp_offset:		Qword offset within PP level for the SST_PP register bank
123  * @bf_offset:		Qword offset within PP level for the SST_BF register bank
124  * @tf_offset:		Qword offset within PP level for the SST_TF register bank
125  * @reserved:		Reserved for future use
126  *
127  * This structure is used store offsets for SST features in the register bank.
128  * This is packed to the same format as defined in the specifications.
129  */
130 struct feature_offset {
131 	u64 pp_offset :8;
132 	u64 bf_offset :8;
133 	u64 tf_offset :8;
134 	u64 reserved :40;
135 } __packed;
136 
137 /**
138  * struct levels_offset -	Offsets to each SST PP level
139  * @sst_pp_level0_offset:	Qword offset to the register block of PP level 0
140  * @sst_pp_level1_offset:	Qword offset to the register block of PP level 1
141  * @sst_pp_level2_offset:	Qword offset to the register block of PP level 2
142  * @sst_pp_level3_offset:	Qword offset to the register block of PP level 3
143  * @sst_pp_level4_offset:	Qword offset to the register block of PP level 4
144  * @reserved:			Reserved for future use
145  *
146  * This structure is used store offsets of SST PP levels in the register bank.
147  * This is packed to the same format as defined in the specifications.
148  */
149 struct levels_offset {
150 	u64 sst_pp_level0_offset :8;
151 	u64 sst_pp_level1_offset :8;
152 	u64 sst_pp_level2_offset :8;
153 	u64 sst_pp_level3_offset :8;
154 	u64 sst_pp_level4_offset :8;
155 	u64 reserved :24;
156 } __packed;
157 
158 /**
159  * struct pp_control_offset -	Offsets for SST PP controls
160  * @perf_level:		A SST-PP level that SW intends to switch to
161  * @perf_level_lock:	SST-PP level select lock. 0 - unlocked. 1 - locked till next reset
162  * @resvd0:		Reserved for future use
163  * @current_state:	Bit mask to control the enable(1)/disable(0) state of each feature
164  *			of the current PP level, bit 0 = BF, bit 1 = TF, bit 2-7 = reserved
165  * @reserved:		Reserved for future use
166  *
167  * This structure is used store offsets of SST PP controls in the register bank.
168  * This is packed to the same format as defined in the specifications.
169  */
170 struct pp_control_offset {
171 	u64 perf_level :3;
172 	u64 perf_level_lock :1;
173 	u64 resvd0 :4;
174 	u64 current_state :8;
175 	u64 reserved :48;
176 } __packed;
177 
178 /**
179  * struct pp_status_offset -	Offsets for SST PP status fields
180  * @sst_pp_level:	Returns the current SST-PP level
181  * @sst_pp_lock:	Returns the lock bit setting of perf_level_lock in pp_control_offset
182  * @error_type:		Returns last error of SST-PP level change request. 0: no error,
183  *			1: level change not allowed, others: reserved
184  * @feature_state:	Bit mask to indicate the enable(1)/disable(0) state of each feature of the
185  *			current PP level. bit 0 = BF, bit 1 = TF, bit 2-7 reserved
186  * @reserved0:		Reserved for future use
187  * @feature_error_type: Returns last error of the specific feature. Three error_type bits per
188  *			feature. i.e. ERROR_TYPE[2:0] for BF, ERROR_TYPE[5:3] for TF, etc.
189  *			0x0: no error, 0x1: The specific feature is not supported by the hardware.
190  *			0x2-0x6: Reserved. 0x7: feature state change is not allowed.
191  * @reserved1:		Reserved for future use
192  *
193  * This structure is used store offsets of SST PP status in the register bank.
194  * This is packed to the same format as defined in the specifications.
195  */
196 struct pp_status_offset {
197 	u64 sst_pp_level :3;
198 	u64 sst_pp_lock :1;
199 	u64 error_type :4;
200 	u64 feature_state :8;
201 	u64 reserved0 :16;
202 	u64 feature_error_type : 24;
203 	u64 reserved1 :8;
204 } __packed;
205 
206 /**
207  * struct perf_level -	Used to store perf level and mmio offset
208  * @mmio_offset:	mmio offset for a perf level
209  * @level:		perf level for this offset
210  *
211  * This structure is used store final mmio offset of each perf level from the
212  * SST base mmio offset.
213  */
214 struct perf_level {
215 	int mmio_offset;
216 	int level;
217 };
218 
219 /**
220  * struct tpmi_per_power_domain_info -	Store per power_domain SST info
221  * @package_id:		Package id for this power_domain
222  * @power_domain_id:	Power domain id, Each entry from the SST-TPMI instance is a power_domain.
223  * @max_level:		Max possible PP level possible for this power_domain
224  * @ratio_unit:		Ratio unit for converting to MHz
225  * @avx_levels:		Number of AVX levels
226  * @pp_block_size:	Block size from PP header
227  * @sst_header:		Store SST header for this power_domain
228  * @cp_header:		Store SST-CP header for this power_domain
229  * @pp_header:		Store SST-PP header for this power_domain
230  * @perf_levels:	Pointer to each perf level to map level to mmio offset
231  * @feature_offsets:	Store feature offsets for each PP-level
232  * @control_offset:	Store the control offset for each PP-level
233  * @status_offset:	Store the status offset for each PP-level
234  * @sst_base:		Mapped SST base IO memory
235  * @auxdev:		Auxiliary device instance enumerated this instance
236  * @saved_sst_cp_control: Save SST-CP control configuration to store restore for suspend/resume
237  * @saved_clos_configs:	Save SST-CP CLOS configuration to store restore for suspend/resume
238  * @saved_clos_assocs:	Save SST-CP CLOS association to store restore for suspend/resume
239  * @saved_pp_control:	Save SST-PP control information to store restore for suspend/resume
240  * @write_blocked:	Write operation is blocked, so can't change SST state
241  *
242  * This structure is used store complete SST information for a power_domain. This information
243  * is used to read/write request for any SST IOCTL. Each physical CPU package can have multiple
244  * power_domains. Each power domain describes its own SST information and has its own controls.
245  */
246 struct tpmi_per_power_domain_info {
247 	int package_id;
248 	int power_domain_id;
249 	int max_level;
250 	int ratio_unit;
251 	int avx_levels;
252 	int pp_block_size;
253 	struct sst_header sst_header;
254 	struct cp_header cp_header;
255 	struct pp_header pp_header;
256 	struct perf_level *perf_levels;
257 	struct feature_offset feature_offsets;
258 	struct pp_control_offset control_offset;
259 	struct pp_status_offset status_offset;
260 	void __iomem *sst_base;
261 	struct auxiliary_device *auxdev;
262 	u64 saved_sst_cp_control;
263 	u64 saved_clos_configs[4];
264 	u64 saved_clos_assocs[4];
265 	u64 saved_pp_control;
266 	bool write_blocked;
267 };
268 
269 /* Supported maximum partitions */
270 #define SST_MAX_PARTITIONS	2
271 
272 /**
273  * struct tpmi_sst_struct -	Store sst info for a package
274  * @package_id:			Package id for this aux device instance
275  * @number_of_power_domains:	Number of power_domains pointed by power_domain_info pointer
276  * @power_domain_info:		Pointer to power domains information
277  * @cdie_mask:			Mask of compute dies present in a partition from hardware.
278  *				This mask is not present in the version 1 information header.
279  * @io_dies:			Number of IO dies in a partition. This will be 0 for TPMI
280  *				version 1 information header.
281  * @partition_mask:		Mask of all partitions.
282  * @partition_mask_current:	Current partition mask as some may have been unbound.
283  *
284  * This structure is used store full SST information for a package.
285  * Each package has one or multiple OOB PCI devices. Each package can contain multiple
286  * power domains.
287  */
288 struct tpmi_sst_struct {
289 	int package_id;
290 	struct tpmi_per_power_domain_info *power_domain_info[SST_MAX_PARTITIONS];
291 	u16 cdie_mask[SST_MAX_PARTITIONS];
292 	u8 number_of_power_domains[SST_MAX_PARTITIONS];
293 	u8 io_dies[SST_MAX_PARTITIONS];
294 	u8 partition_mask;
295 	u8 partition_mask_current;
296 };
297 
298 /**
299  * struct tpmi_sst_common_struct -	Store all SST instances
300  * @max_index:		Maximum instances currently present
301  * @sst_inst:		Pointer to per package instance
302  *
303  * Stores every SST Package instance.
304  */
305 struct tpmi_sst_common_struct {
306 	int max_index;
307 	struct tpmi_sst_struct **sst_inst;
308 };
309 
310 /*
311  * Each IOCTL request is processed under this lock. Also used to protect
312  * registration functions and common data structures.
313  */
314 static DEFINE_MUTEX(isst_tpmi_dev_lock);
315 
316 /* Usage count to track, number of TPMI SST instances registered to this core. */
317 static int isst_core_usage_count;
318 
319 /* Stores complete SST information for every package and power_domain */
320 static struct tpmi_sst_common_struct isst_common;
321 
322 #define SST_MAX_AVX_LEVELS	3
323 
324 #define SST_PP_OFFSET_0		8
325 #define SST_PP_OFFSET_1		16
326 #define SST_PP_OFFSET_SIZE	8
327 
sst_add_perf_profiles(struct auxiliary_device * auxdev,struct tpmi_per_power_domain_info * pd_info,int levels)328 static int sst_add_perf_profiles(struct auxiliary_device *auxdev,
329 				 struct tpmi_per_power_domain_info *pd_info,
330 				 int levels)
331 {
332 	struct device *dev = &auxdev->dev;
333 	u64 perf_level_offsets;
334 	int i;
335 
336 	pd_info->perf_levels = devm_kcalloc(dev, levels, sizeof(struct perf_level), GFP_KERNEL);
337 	if (!pd_info->perf_levels)
338 		return 0;
339 
340 	pd_info->ratio_unit = pd_info->pp_header.ratio_unit;
341 	pd_info->avx_levels = SST_MAX_AVX_LEVELS;
342 	pd_info->pp_block_size = pd_info->pp_header.block_size;
343 
344 	/* Read PP Offset 0: Get feature offset with PP level */
345 	*((u64 *)&pd_info->feature_offsets) = readq(pd_info->sst_base +
346 						    pd_info->sst_header.pp_offset +
347 						    SST_PP_OFFSET_0);
348 
349 	perf_level_offsets = readq(pd_info->sst_base + pd_info->sst_header.pp_offset +
350 				   SST_PP_OFFSET_1);
351 
352 	for (i = 0; i < levels; ++i) {
353 		u64 offset;
354 
355 		offset = perf_level_offsets & (0xffULL << (i * SST_PP_OFFSET_SIZE));
356 		offset >>= (i * 8);
357 		offset &= 0xff;
358 		offset *= 8; /* Convert to byte from QWORD offset */
359 		pd_info->perf_levels[i].mmio_offset = pd_info->sst_header.pp_offset + offset;
360 	}
361 
362 	return 0;
363 }
364 
sst_main(struct auxiliary_device * auxdev,struct tpmi_per_power_domain_info * pd_info)365 static int sst_main(struct auxiliary_device *auxdev, struct tpmi_per_power_domain_info *pd_info)
366 {
367 	struct device *dev = &auxdev->dev;
368 	int i, mask, levels;
369 
370 	*((u64 *)&pd_info->sst_header) = readq(pd_info->sst_base);
371 	pd_info->sst_header.cp_offset *= 8;
372 	pd_info->sst_header.pp_offset *= 8;
373 
374 	if (pd_info->sst_header.interface_version == TPMI_VERSION_INVALID)
375 		return -ENODEV;
376 
377 	if (TPMI_MAJOR_VERSION(pd_info->sst_header.interface_version) != ISST_MAJOR_VERSION) {
378 		dev_err(dev, "SST: Unsupported major version:%lx\n",
379 			TPMI_MAJOR_VERSION(pd_info->sst_header.interface_version));
380 		return -ENODEV;
381 	}
382 
383 	if (TPMI_MINOR_VERSION(pd_info->sst_header.interface_version) != ISST_MINOR_VERSION)
384 		dev_info(dev, "SST: Ignore: Unsupported minor version:%lx\n",
385 			 TPMI_MINOR_VERSION(pd_info->sst_header.interface_version));
386 
387 	/* Read SST CP Header */
388 	*((u64 *)&pd_info->cp_header) = readq(pd_info->sst_base + pd_info->sst_header.cp_offset);
389 
390 	/* Read PP header */
391 	*((u64 *)&pd_info->pp_header) = readq(pd_info->sst_base + pd_info->sst_header.pp_offset);
392 
393 	mask = 0x01;
394 	levels = 0;
395 	for (i = 0; i < 8; ++i) {
396 		if (pd_info->pp_header.level_en_mask & mask)
397 			levels = i;
398 		mask <<= 1;
399 	}
400 	pd_info->max_level = levels;
401 	sst_add_perf_profiles(auxdev, pd_info, levels + 1);
402 
403 	return 0;
404 }
405 
isst_instance_count(struct tpmi_sst_struct * sst_inst)406 static u8 isst_instance_count(struct tpmi_sst_struct *sst_inst)
407 {
408 	u8 i, max_part, count = 0;
409 
410 	/* Partition mask starts from bit 0 and contains 1s only */
411 	max_part = hweight8(sst_inst->partition_mask);
412 	for (i = 0; i < max_part; i++)
413 		count += sst_inst->number_of_power_domains[i];
414 
415 	return count;
416 }
417 
418 /**
419  * map_cdies() - Map user domain ID to compute domain ID
420  * @sst_inst: TPMI Instance
421  * @id: User domain ID
422  * @partition: Resolved partition
423  *
424  * Helper function to map_partition_power_domain_id() to resolve compute
425  * domain ID and partition. Use hardware provided cdie_mask for a partition
426  * as is to resolve a compute domain ID.
427  *
428  * Return: %-EINVAL on error, otherwise mapped domain ID >= 0.
429  */
map_cdies(struct tpmi_sst_struct * sst_inst,u8 id,u8 * partition)430 static int map_cdies(struct tpmi_sst_struct *sst_inst, u8 id, u8 *partition)
431 {
432 	u8 i, max_part;
433 
434 	max_part = hweight8(sst_inst->partition_mask);
435 	for (i = 0; i < max_part; i++) {
436 		if (!(sst_inst->cdie_mask[i] & BIT(id)))
437 			continue;
438 
439 		*partition = i;
440 		return id - ffs(sst_inst->cdie_mask[i]) + 1;
441 	}
442 
443 	return -EINVAL;
444 }
445 
446 /**
447  * map_partition_power_domain_id() - Map user domain ID to partition domain ID
448  * @sst_inst: TPMI Instance
449  * @id: User domain ID
450  * @partition: Resolved partition
451  *
452  * In a partitioned system a CPU package has two separate MMIO ranges (Under
453  * two PCI devices). But the CPU package compute die/power domain IDs are
454  * unique in a package. User space can get compute die/power domain ID from
455  * CPUID and MSR 0x54 for a CPU. So, those IDs need to be preserved even if
456  * they are present in two different partitions with its own order.
457  *
458  * For example for command ISST_IF_COUNT_TPMI_INSTANCES, the valid_mask
459  * is 111111b for a 4 compute and 2 IO dies system. This is presented as
460  * provided by the hardware in a non-partitioned system with the following
461  * order:
462  *	I1-I0-C3-C2-C1-C0
463  * Here: "C": for compute and "I" for IO die.
464  * Compute dies are always present first in TPMI instances, as they have
465  * to map to the real power domain/die ID of a system. In a non-partitioned
466  * system there is no way to identify compute and IO die boundaries from
467  * this driver without reading each CPU's mapping.
468  *
469  * The same order needs to be preserved, even if those compute dies are
470  * distributed among multiple partitions. For example:
471  * Partition 1 can contain: I1-C1-C0
472  * Partition 2 can contain: I2-C3-C2
473  *
474  * This will require a conversion of user space IDs to the actual index into
475  * array of stored power domains for each partition. For the above example
476  * this function will return partition and index as follows:
477  *
478  * =============	=========	=====	========
479  * User space ID	Partition	Index	Die type
480  * =============	=========	=====	========
481  * 0			0		0	Compute
482  * 1			0		1	Compute
483  * 2			1		0	Compute
484  * 3			1		1	Compute
485  * 4			0		2	IO
486  * 5			1		2	IO
487  * =============	=========	=====	========
488  *
489  * Return: %-EINVAL on error, otherwise mapped domain ID >= 0.
490  */
map_partition_power_domain_id(struct tpmi_sst_struct * sst_inst,u8 id,u8 * partition)491 static int map_partition_power_domain_id(struct tpmi_sst_struct *sst_inst, u8 id, u8 *partition)
492 {
493 	u8 i, io_start_id, max_part;
494 
495 	*partition = 0;
496 
497 	/* If any PCI device for partition is unbound, treat this as failure */
498 	if (sst_inst->partition_mask != sst_inst->partition_mask_current)
499 		return -EINVAL;
500 
501 	max_part = hweight8(sst_inst->partition_mask);
502 
503 	/* IO Index begin here */
504 	io_start_id = fls(sst_inst->cdie_mask[max_part - 1]);
505 
506 	if (id < io_start_id)
507 		return map_cdies(sst_inst, id, partition);
508 
509 	for (i = 0; i < max_part; i++) {
510 		u8 io_id;
511 
512 		io_id = id - io_start_id;
513 		if (io_id < sst_inst->io_dies[i]) {
514 			u8 cdie_range;
515 
516 			cdie_range = fls(sst_inst->cdie_mask[i]) - ffs(sst_inst->cdie_mask[i]) + 1;
517 			*partition = i;
518 			return cdie_range + io_id;
519 		}
520 		io_start_id += sst_inst->io_dies[i];
521 	}
522 
523 	return -EINVAL;
524 }
525 
526 /*
527  * Map a package and power_domain id to SST information structure unique for a power_domain.
528  * The caller should call under isst_tpmi_dev_lock.
529  */
get_instance(int pkg_id,int power_domain_id)530 static struct tpmi_per_power_domain_info *get_instance(int pkg_id, int power_domain_id)
531 {
532 	struct tpmi_per_power_domain_info *power_domain_info;
533 	struct tpmi_sst_struct *sst_inst;
534 	u8 part;
535 
536 	if (!in_range(pkg_id, 0, topology_max_packages()) || pkg_id > isst_common.max_index)
537 		return NULL;
538 
539 	sst_inst = isst_common.sst_inst[pkg_id];
540 	if (!sst_inst)
541 		return NULL;
542 
543 	power_domain_id = map_partition_power_domain_id(sst_inst, power_domain_id, &part);
544 	if (power_domain_id < 0)
545 		return NULL;
546 
547 	power_domain_info = &sst_inst->power_domain_info[part][power_domain_id];
548 
549 	if (power_domain_info && !power_domain_info->sst_base)
550 		return NULL;
551 
552 	return power_domain_info;
553 }
554 
disable_dynamic_sst_features(void)555 static bool disable_dynamic_sst_features(void)
556 {
557 	u64 value;
558 
559 	rdmsrl(MSR_PM_ENABLE, value);
560 	return !(value & 0x1);
561 }
562 
563 #define _read_cp_info(name_str, name, offset, start, width, mult_factor)\
564 {\
565 	u64 val, mask;\
566 	\
567 	val = readq(power_domain_info->sst_base + power_domain_info->sst_header.cp_offset +\
568 			(offset));\
569 	mask = GENMASK_ULL((start + width - 1), start);\
570 	val &= mask; \
571 	val >>= start;\
572 	name = (val * mult_factor);\
573 }
574 
575 #define _write_cp_info(name_str, name, offset, start, width, div_factor)\
576 {\
577 	u64 val, mask;\
578 	\
579 	val = readq(power_domain_info->sst_base +\
580 		    power_domain_info->sst_header.cp_offset + (offset));\
581 	mask = GENMASK_ULL((start + width - 1), start);\
582 	val &= ~mask;\
583 	val |= (name / div_factor) << start;\
584 	writeq(val, power_domain_info->sst_base + power_domain_info->sst_header.cp_offset +\
585 		(offset));\
586 }
587 
588 #define	SST_CP_CONTROL_OFFSET	8
589 #define	SST_CP_STATUS_OFFSET	16
590 
591 #define SST_CP_ENABLE_START		0
592 #define SST_CP_ENABLE_WIDTH		1
593 
594 #define SST_CP_PRIORITY_TYPE_START	1
595 #define SST_CP_PRIORITY_TYPE_WIDTH	1
596 
isst_if_core_power_state(void __user * argp)597 static long isst_if_core_power_state(void __user *argp)
598 {
599 	struct tpmi_per_power_domain_info *power_domain_info;
600 	struct isst_core_power core_power;
601 
602 	if (copy_from_user(&core_power, argp, sizeof(core_power)))
603 		return -EFAULT;
604 
605 	if (core_power.get_set && disable_dynamic_sst_features())
606 		return -EFAULT;
607 
608 	power_domain_info = get_instance(core_power.socket_id, core_power.power_domain_id);
609 	if (!power_domain_info)
610 		return -EINVAL;
611 
612 	if (core_power.get_set) {
613 		_write_cp_info("cp_enable", core_power.enable, SST_CP_CONTROL_OFFSET,
614 			       SST_CP_ENABLE_START, SST_CP_ENABLE_WIDTH, SST_MUL_FACTOR_NONE)
615 		_write_cp_info("cp_prio_type", core_power.priority_type, SST_CP_CONTROL_OFFSET,
616 			       SST_CP_PRIORITY_TYPE_START, SST_CP_PRIORITY_TYPE_WIDTH,
617 			       SST_MUL_FACTOR_NONE)
618 	} else {
619 		/* get */
620 		_read_cp_info("cp_enable", core_power.enable, SST_CP_STATUS_OFFSET,
621 			      SST_CP_ENABLE_START, SST_CP_ENABLE_WIDTH, SST_MUL_FACTOR_NONE)
622 		_read_cp_info("cp_prio_type", core_power.priority_type, SST_CP_STATUS_OFFSET,
623 			      SST_CP_PRIORITY_TYPE_START, SST_CP_PRIORITY_TYPE_WIDTH,
624 			      SST_MUL_FACTOR_NONE)
625 		core_power.supported = !!(power_domain_info->sst_header.cap_mask & BIT(0));
626 		if (copy_to_user(argp, &core_power, sizeof(core_power)))
627 			return -EFAULT;
628 	}
629 
630 	return 0;
631 }
632 
633 #define SST_CLOS_CONFIG_0_OFFSET	24
634 
635 #define SST_CLOS_CONFIG_PRIO_START	4
636 #define SST_CLOS_CONFIG_PRIO_WIDTH	4
637 
638 #define SST_CLOS_CONFIG_MIN_START	8
639 #define SST_CLOS_CONFIG_MIN_WIDTH	8
640 
641 #define SST_CLOS_CONFIG_MAX_START	16
642 #define SST_CLOS_CONFIG_MAX_WIDTH	8
643 
isst_if_clos_param(void __user * argp)644 static long isst_if_clos_param(void __user *argp)
645 {
646 	struct tpmi_per_power_domain_info *power_domain_info;
647 	struct isst_clos_param clos_param;
648 
649 	if (copy_from_user(&clos_param, argp, sizeof(clos_param)))
650 		return -EFAULT;
651 
652 	power_domain_info = get_instance(clos_param.socket_id, clos_param.power_domain_id);
653 	if (!power_domain_info)
654 		return -EINVAL;
655 
656 	if (clos_param.get_set) {
657 		if (power_domain_info->write_blocked)
658 			return -EPERM;
659 
660 		_write_cp_info("clos.min_freq", clos_param.min_freq_mhz,
661 			       (SST_CLOS_CONFIG_0_OFFSET + clos_param.clos * SST_REG_SIZE),
662 			       SST_CLOS_CONFIG_MIN_START, SST_CLOS_CONFIG_MIN_WIDTH,
663 			       SST_MUL_FACTOR_FREQ);
664 		_write_cp_info("clos.max_freq", clos_param.max_freq_mhz,
665 			       (SST_CLOS_CONFIG_0_OFFSET + clos_param.clos * SST_REG_SIZE),
666 			       SST_CLOS_CONFIG_MAX_START, SST_CLOS_CONFIG_MAX_WIDTH,
667 			       SST_MUL_FACTOR_FREQ);
668 		_write_cp_info("clos.prio", clos_param.prop_prio,
669 			       (SST_CLOS_CONFIG_0_OFFSET + clos_param.clos * SST_REG_SIZE),
670 			       SST_CLOS_CONFIG_PRIO_START, SST_CLOS_CONFIG_PRIO_WIDTH,
671 			       SST_MUL_FACTOR_NONE);
672 	} else {
673 		/* get */
674 		_read_cp_info("clos.min_freq", clos_param.min_freq_mhz,
675 				(SST_CLOS_CONFIG_0_OFFSET + clos_param.clos * SST_REG_SIZE),
676 				SST_CLOS_CONFIG_MIN_START, SST_CLOS_CONFIG_MIN_WIDTH,
677 				SST_MUL_FACTOR_FREQ)
678 		_read_cp_info("clos.max_freq", clos_param.max_freq_mhz,
679 				(SST_CLOS_CONFIG_0_OFFSET + clos_param.clos * SST_REG_SIZE),
680 				SST_CLOS_CONFIG_MAX_START, SST_CLOS_CONFIG_MAX_WIDTH,
681 				SST_MUL_FACTOR_FREQ)
682 		_read_cp_info("clos.prio", clos_param.prop_prio,
683 				(SST_CLOS_CONFIG_0_OFFSET + clos_param.clos * SST_REG_SIZE),
684 				SST_CLOS_CONFIG_PRIO_START, SST_CLOS_CONFIG_PRIO_WIDTH,
685 				SST_MUL_FACTOR_NONE)
686 
687 		if (copy_to_user(argp, &clos_param, sizeof(clos_param)))
688 			return -EFAULT;
689 	}
690 
691 	return 0;
692 }
693 
694 #define SST_CLOS_ASSOC_0_OFFSET		56
695 #define SST_CLOS_ASSOC_CPUS_PER_REG	16
696 #define SST_CLOS_ASSOC_BITS_PER_CPU	4
697 
isst_if_clos_assoc(void __user * argp)698 static long isst_if_clos_assoc(void __user *argp)
699 {
700 	struct isst_if_clos_assoc_cmds assoc_cmds;
701 	unsigned char __user *ptr;
702 	int i;
703 
704 	/* Each multi command has u16 command count as the first field */
705 	if (copy_from_user(&assoc_cmds, argp, sizeof(assoc_cmds)))
706 		return -EFAULT;
707 
708 	if (!assoc_cmds.cmd_count || assoc_cmds.cmd_count > ISST_IF_CMD_LIMIT)
709 		return -EINVAL;
710 
711 	ptr = argp + offsetof(struct isst_if_clos_assoc_cmds, assoc_info);
712 	for (i = 0; i < assoc_cmds.cmd_count; ++i) {
713 		struct tpmi_per_power_domain_info *power_domain_info;
714 		struct isst_if_clos_assoc clos_assoc;
715 		int punit_id, punit_cpu_no, pkg_id;
716 		struct tpmi_sst_struct *sst_inst;
717 		int offset, shift, cpu;
718 		u64 val, mask, clos;
719 		u8 part;
720 
721 		if (copy_from_user(&clos_assoc, ptr, sizeof(clos_assoc)))
722 			return -EFAULT;
723 
724 		if (clos_assoc.socket_id > topology_max_packages())
725 			return -EINVAL;
726 
727 		cpu = clos_assoc.logical_cpu;
728 		clos = clos_assoc.clos;
729 
730 		if (assoc_cmds.punit_cpu_map)
731 			punit_cpu_no = cpu;
732 		else
733 			return -EOPNOTSUPP;
734 
735 		if (punit_cpu_no < 0)
736 			return -EINVAL;
737 
738 		punit_id = clos_assoc.power_domain_id;
739 		pkg_id = clos_assoc.socket_id;
740 
741 		sst_inst = isst_common.sst_inst[pkg_id];
742 
743 		punit_id = map_partition_power_domain_id(sst_inst, punit_id, &part);
744 		if (punit_id < 0)
745 			return -EINVAL;
746 
747 		power_domain_info = &sst_inst->power_domain_info[part][punit_id];
748 
749 		if (assoc_cmds.get_set && power_domain_info->write_blocked)
750 			return -EPERM;
751 
752 		offset = SST_CLOS_ASSOC_0_OFFSET +
753 				(punit_cpu_no / SST_CLOS_ASSOC_CPUS_PER_REG) * SST_REG_SIZE;
754 		shift = punit_cpu_no % SST_CLOS_ASSOC_CPUS_PER_REG;
755 		shift *= SST_CLOS_ASSOC_BITS_PER_CPU;
756 
757 		val = readq(power_domain_info->sst_base +
758 				power_domain_info->sst_header.cp_offset + offset);
759 		if (assoc_cmds.get_set) {
760 			mask = GENMASK_ULL((shift + SST_CLOS_ASSOC_BITS_PER_CPU - 1), shift);
761 			val &= ~mask;
762 			val |= (clos << shift);
763 			writeq(val, power_domain_info->sst_base +
764 					power_domain_info->sst_header.cp_offset + offset);
765 		} else {
766 			val >>= shift;
767 			clos_assoc.clos = val & GENMASK(SST_CLOS_ASSOC_BITS_PER_CPU - 1, 0);
768 			if (copy_to_user(ptr, &clos_assoc, sizeof(clos_assoc)))
769 				return -EFAULT;
770 		}
771 
772 		ptr += sizeof(clos_assoc);
773 	}
774 
775 	return 0;
776 }
777 
778 #define _read_pp_info(name_str, name, offset, start, width, mult_factor)\
779 {\
780 	u64 val, _mask;\
781 	\
782 	val = readq(power_domain_info->sst_base + power_domain_info->sst_header.pp_offset +\
783 		    (offset));\
784 	_mask = GENMASK_ULL((start + width - 1), start);\
785 	val &= _mask;\
786 	val >>= start;\
787 	name = (val * mult_factor);\
788 }
789 
790 #define _write_pp_info(name_str, name, offset, start, width, div_factor)\
791 {\
792 	u64 val, _mask;\
793 	\
794 	val = readq(power_domain_info->sst_base + power_domain_info->sst_header.pp_offset +\
795 		    (offset));\
796 	_mask = GENMASK((start + width - 1), start);\
797 	val &= ~_mask;\
798 	val |= (name / div_factor) << start;\
799 	writeq(val, power_domain_info->sst_base + power_domain_info->sst_header.pp_offset +\
800 	      (offset));\
801 }
802 
803 #define _read_bf_level_info(name_str, name, level, offset, start, width, mult_factor)\
804 {\
805 	u64 val, _mask;\
806 	\
807 	val = readq(power_domain_info->sst_base +\
808 		    power_domain_info->perf_levels[level].mmio_offset +\
809 		(power_domain_info->feature_offsets.bf_offset * 8) + (offset));\
810 	_mask = GENMASK_ULL((start + width - 1), start);\
811 	val &= _mask; \
812 	val >>= start;\
813 	name = (val * mult_factor);\
814 }
815 
816 #define _read_tf_level_info(name_str, name, level, offset, start, width, mult_factor)\
817 {\
818 	u64 val, _mask;\
819 	\
820 	val = readq(power_domain_info->sst_base +\
821 		    power_domain_info->perf_levels[level].mmio_offset +\
822 		(power_domain_info->feature_offsets.tf_offset * 8) + (offset));\
823 	_mask = GENMASK_ULL((start + width - 1), start);\
824 	val &= _mask; \
825 	val >>= start;\
826 	name = (val * mult_factor);\
827 }
828 
829 #define SST_PP_STATUS_OFFSET	32
830 
831 #define SST_PP_LEVEL_START	0
832 #define SST_PP_LEVEL_WIDTH	3
833 
834 #define SST_PP_LOCK_START	3
835 #define SST_PP_LOCK_WIDTH	1
836 
837 #define SST_PP_FEATURE_STATE_START	8
838 #define SST_PP_FEATURE_STATE_WIDTH	8
839 
840 #define SST_BF_FEATURE_SUPPORTED_START	12
841 #define SST_BF_FEATURE_SUPPORTED_WIDTH	1
842 
843 #define SST_TF_FEATURE_SUPPORTED_START	12
844 #define SST_TF_FEATURE_SUPPORTED_WIDTH	1
845 
isst_if_get_perf_level(void __user * argp)846 static int isst_if_get_perf_level(void __user *argp)
847 {
848 	struct isst_perf_level_info perf_level;
849 	struct tpmi_per_power_domain_info *power_domain_info;
850 	unsigned long level_mask;
851 	u8 level, support;
852 
853 	if (copy_from_user(&perf_level, argp, sizeof(perf_level)))
854 		return -EFAULT;
855 
856 	power_domain_info = get_instance(perf_level.socket_id, perf_level.power_domain_id);
857 	if (!power_domain_info)
858 		return -EINVAL;
859 
860 	perf_level.max_level = power_domain_info->max_level;
861 	perf_level.level_mask = power_domain_info->pp_header.level_en_mask;
862 	perf_level.feature_rev = power_domain_info->pp_header.feature_rev;
863 	_read_pp_info("current_level", perf_level.current_level, SST_PP_STATUS_OFFSET,
864 		      SST_PP_LEVEL_START, SST_PP_LEVEL_WIDTH, SST_MUL_FACTOR_NONE)
865 	_read_pp_info("locked", perf_level.locked, SST_PP_STATUS_OFFSET,
866 		      SST_PP_LOCK_START, SST_PP_LEVEL_WIDTH, SST_MUL_FACTOR_NONE)
867 	_read_pp_info("feature_state", perf_level.feature_state, SST_PP_STATUS_OFFSET,
868 		      SST_PP_FEATURE_STATE_START, SST_PP_FEATURE_STATE_WIDTH, SST_MUL_FACTOR_NONE)
869 	perf_level.enabled = !!(power_domain_info->sst_header.cap_mask & BIT(1));
870 
871 	level_mask = perf_level.level_mask;
872 	perf_level.sst_bf_support = 0;
873 	for_each_set_bit(level, &level_mask, BITS_PER_BYTE) {
874 		/*
875 		 * Read BF support for a level. Read output is updated
876 		 * to "support" variable by the below macro.
877 		 */
878 		_read_bf_level_info("bf_support", support, level, 0, SST_BF_FEATURE_SUPPORTED_START,
879 				    SST_BF_FEATURE_SUPPORTED_WIDTH, SST_MUL_FACTOR_NONE);
880 
881 		/* If supported set the bit for the level */
882 		if (support)
883 			perf_level.sst_bf_support |= BIT(level);
884 	}
885 
886 	perf_level.sst_tf_support = 0;
887 	for_each_set_bit(level, &level_mask, BITS_PER_BYTE) {
888 		/*
889 		 * Read TF support for a level. Read output is updated
890 		 * to "support" variable by the below macro.
891 		 */
892 		_read_tf_level_info("tf_support", support, level, 0, SST_TF_FEATURE_SUPPORTED_START,
893 				    SST_TF_FEATURE_SUPPORTED_WIDTH, SST_MUL_FACTOR_NONE);
894 
895 		/* If supported set the bit for the level */
896 		if (support)
897 			perf_level.sst_tf_support |= BIT(level);
898 	}
899 
900 	if (copy_to_user(argp, &perf_level, sizeof(perf_level)))
901 		return -EFAULT;
902 
903 	return 0;
904 }
905 
906 #define SST_PP_CONTROL_OFFSET		24
907 #define SST_PP_LEVEL_CHANGE_TIME_MS	5
908 #define SST_PP_LEVEL_CHANGE_RETRY_COUNT	3
909 
isst_if_set_perf_level(void __user * argp)910 static int isst_if_set_perf_level(void __user *argp)
911 {
912 	struct isst_perf_level_control perf_level;
913 	struct tpmi_per_power_domain_info *power_domain_info;
914 	int level, retry = 0;
915 
916 	if (disable_dynamic_sst_features())
917 		return -EFAULT;
918 
919 	if (copy_from_user(&perf_level, argp, sizeof(perf_level)))
920 		return -EFAULT;
921 
922 	power_domain_info = get_instance(perf_level.socket_id, perf_level.power_domain_id);
923 	if (!power_domain_info)
924 		return -EINVAL;
925 
926 	if (power_domain_info->write_blocked)
927 		return -EPERM;
928 
929 	if (!(power_domain_info->pp_header.allowed_level_mask & BIT(perf_level.level)))
930 		return -EINVAL;
931 
932 	_read_pp_info("current_level", level, SST_PP_STATUS_OFFSET,
933 		      SST_PP_LEVEL_START, SST_PP_LEVEL_WIDTH, SST_MUL_FACTOR_NONE)
934 
935 	/* If the requested new level is same as the current level, reject */
936 	if (perf_level.level == level)
937 		return -EINVAL;
938 
939 	_write_pp_info("perf_level", perf_level.level, SST_PP_CONTROL_OFFSET,
940 		       SST_PP_LEVEL_START, SST_PP_LEVEL_WIDTH, SST_MUL_FACTOR_NONE)
941 
942 	/* It is possible that firmware is busy (although unlikely), so retry */
943 	do {
944 		/* Give time to FW to process */
945 		msleep(SST_PP_LEVEL_CHANGE_TIME_MS);
946 
947 		_read_pp_info("current_level", level, SST_PP_STATUS_OFFSET,
948 			      SST_PP_LEVEL_START, SST_PP_LEVEL_WIDTH, SST_MUL_FACTOR_NONE)
949 
950 		/* Check if the new level is active */
951 		if (perf_level.level == level)
952 			break;
953 
954 	} while (retry++ < SST_PP_LEVEL_CHANGE_RETRY_COUNT);
955 
956 	/* If the level change didn't happen, return fault */
957 	if (perf_level.level != level)
958 		return -EFAULT;
959 
960 	/* Reset the feature state on level change */
961 	_write_pp_info("perf_feature", 0, SST_PP_CONTROL_OFFSET,
962 		       SST_PP_FEATURE_STATE_START, SST_PP_FEATURE_STATE_WIDTH,
963 		       SST_MUL_FACTOR_NONE)
964 
965 	/* Give time to FW to process */
966 	msleep(SST_PP_LEVEL_CHANGE_TIME_MS);
967 
968 	return 0;
969 }
970 
isst_if_set_perf_feature(void __user * argp)971 static int isst_if_set_perf_feature(void __user *argp)
972 {
973 	struct isst_perf_feature_control perf_feature;
974 	struct tpmi_per_power_domain_info *power_domain_info;
975 
976 	if (disable_dynamic_sst_features())
977 		return -EFAULT;
978 
979 	if (copy_from_user(&perf_feature, argp, sizeof(perf_feature)))
980 		return -EFAULT;
981 
982 	power_domain_info = get_instance(perf_feature.socket_id, perf_feature.power_domain_id);
983 	if (!power_domain_info)
984 		return -EINVAL;
985 
986 	if (power_domain_info->write_blocked)
987 		return -EPERM;
988 
989 	_write_pp_info("perf_feature", perf_feature.feature, SST_PP_CONTROL_OFFSET,
990 		       SST_PP_FEATURE_STATE_START, SST_PP_FEATURE_STATE_WIDTH,
991 		       SST_MUL_FACTOR_NONE)
992 
993 	return 0;
994 }
995 
996 #define _read_pp_level_info(name_str, name, level, offset, start, width, mult_factor)\
997 {\
998 	u64 val, _mask;\
999 	\
1000 	val = readq(power_domain_info->sst_base +\
1001 		    power_domain_info->perf_levels[level].mmio_offset +\
1002 		(power_domain_info->feature_offsets.pp_offset * 8) + (offset));\
1003 	_mask = GENMASK_ULL((start + width - 1), start);\
1004 	val &= _mask; \
1005 	val >>= start;\
1006 	name = (val * mult_factor);\
1007 }
1008 
1009 #define SST_PP_INFO_0_OFFSET	0
1010 #define SST_PP_INFO_1_OFFSET	8
1011 #define SST_PP_INFO_2_OFFSET	16
1012 #define SST_PP_INFO_3_OFFSET	24
1013 
1014 /* SST_PP_INFO_4_OFFSET to SST_PP_INFO_9_OFFSET are trl levels */
1015 #define SST_PP_INFO_4_OFFSET	32
1016 
1017 #define SST_PP_INFO_10_OFFSET	80
1018 #define SST_PP_INFO_11_OFFSET	88
1019 
1020 #define SST_PP_P1_SSE_START	0
1021 #define SST_PP_P1_SSE_WIDTH	8
1022 
1023 #define SST_PP_P1_AVX2_START	8
1024 #define SST_PP_P1_AVX2_WIDTH	8
1025 
1026 #define SST_PP_P1_AVX512_START	16
1027 #define SST_PP_P1_AVX512_WIDTH	8
1028 
1029 #define SST_PP_P1_AMX_START	24
1030 #define SST_PP_P1_AMX_WIDTH	8
1031 
1032 #define SST_PP_TDP_START	32
1033 #define SST_PP_TDP_WIDTH	15
1034 
1035 #define SST_PP_T_PROCHOT_START	47
1036 #define SST_PP_T_PROCHOT_WIDTH	8
1037 
1038 #define SST_PP_MAX_MEMORY_FREQ_START	55
1039 #define SST_PP_MAX_MEMORY_FREQ_WIDTH	7
1040 
1041 #define SST_PP_COOLING_TYPE_START	62
1042 #define SST_PP_COOLING_TYPE_WIDTH	2
1043 
1044 #define SST_PP_TRL_0_RATIO_0_START	0
1045 #define SST_PP_TRL_0_RATIO_0_WIDTH	8
1046 
1047 #define SST_PP_TRL_CORES_BUCKET_0_START	0
1048 #define SST_PP_TRL_CORES_BUCKET_0_WIDTH	8
1049 
1050 #define SST_PP_CORE_RATIO_P0_START	0
1051 #define SST_PP_CORE_RATIO_P0_WIDTH	8
1052 
1053 #define SST_PP_CORE_RATIO_P1_START	8
1054 #define SST_PP_CORE_RATIO_P1_WIDTH	8
1055 
1056 #define SST_PP_CORE_RATIO_PN_START	16
1057 #define SST_PP_CORE_RATIO_PN_WIDTH	8
1058 
1059 #define SST_PP_CORE_RATIO_PM_START	24
1060 #define SST_PP_CORE_RATIO_PM_WIDTH	8
1061 
1062 #define SST_PP_CORE_RATIO_P0_FABRIC_START	32
1063 #define SST_PP_CORE_RATIO_P0_FABRIC_WIDTH	8
1064 
1065 #define SST_PP_CORE_RATIO_P1_FABRIC_START	40
1066 #define SST_PP_CORE_RATIO_P1_FABRIC_WIDTH	8
1067 
1068 #define SST_PP_CORE_RATIO_PM_FABRIC_START	48
1069 #define SST_PP_CORE_RATIO_PM_FABRIC_WIDTH	8
1070 
isst_if_get_perf_level_info(void __user * argp)1071 static int isst_if_get_perf_level_info(void __user *argp)
1072 {
1073 	struct isst_perf_level_data_info perf_level;
1074 	struct tpmi_per_power_domain_info *power_domain_info;
1075 	int i, j;
1076 
1077 	if (copy_from_user(&perf_level, argp, sizeof(perf_level)))
1078 		return -EFAULT;
1079 
1080 	power_domain_info = get_instance(perf_level.socket_id, perf_level.power_domain_id);
1081 	if (!power_domain_info)
1082 		return -EINVAL;
1083 
1084 	if (perf_level.level > power_domain_info->max_level)
1085 		return -EINVAL;
1086 
1087 	if (!(power_domain_info->pp_header.level_en_mask & BIT(perf_level.level)))
1088 		return -EINVAL;
1089 
1090 	_read_pp_level_info("tdp_ratio", perf_level.tdp_ratio, perf_level.level,
1091 			    SST_PP_INFO_0_OFFSET, SST_PP_P1_SSE_START, SST_PP_P1_SSE_WIDTH,
1092 			    SST_MUL_FACTOR_NONE)
1093 	_read_pp_level_info("base_freq_mhz", perf_level.base_freq_mhz, perf_level.level,
1094 			    SST_PP_INFO_0_OFFSET, SST_PP_P1_SSE_START, SST_PP_P1_SSE_WIDTH,
1095 			    SST_MUL_FACTOR_FREQ)
1096 	_read_pp_level_info("base_freq_avx2_mhz", perf_level.base_freq_avx2_mhz, perf_level.level,
1097 			    SST_PP_INFO_0_OFFSET, SST_PP_P1_AVX2_START, SST_PP_P1_AVX2_WIDTH,
1098 			    SST_MUL_FACTOR_FREQ)
1099 	_read_pp_level_info("base_freq_avx512_mhz", perf_level.base_freq_avx512_mhz,
1100 			    perf_level.level, SST_PP_INFO_0_OFFSET, SST_PP_P1_AVX512_START,
1101 			    SST_PP_P1_AVX512_WIDTH, SST_MUL_FACTOR_FREQ)
1102 	_read_pp_level_info("base_freq_amx_mhz", perf_level.base_freq_amx_mhz, perf_level.level,
1103 			    SST_PP_INFO_0_OFFSET, SST_PP_P1_AMX_START, SST_PP_P1_AMX_WIDTH,
1104 			    SST_MUL_FACTOR_FREQ)
1105 
1106 	_read_pp_level_info("thermal_design_power_w", perf_level.thermal_design_power_w,
1107 			    perf_level.level, SST_PP_INFO_1_OFFSET, SST_PP_TDP_START,
1108 			    SST_PP_TDP_WIDTH, SST_MUL_FACTOR_NONE)
1109 	perf_level.thermal_design_power_w /= 8; /* units are in 1/8th watt */
1110 	_read_pp_level_info("tjunction_max_c", perf_level.tjunction_max_c, perf_level.level,
1111 			    SST_PP_INFO_1_OFFSET, SST_PP_T_PROCHOT_START, SST_PP_T_PROCHOT_WIDTH,
1112 			    SST_MUL_FACTOR_NONE)
1113 	_read_pp_level_info("max_memory_freq_mhz", perf_level.max_memory_freq_mhz,
1114 			    perf_level.level, SST_PP_INFO_1_OFFSET, SST_PP_MAX_MEMORY_FREQ_START,
1115 			    SST_PP_MAX_MEMORY_FREQ_WIDTH, SST_MUL_FACTOR_FREQ)
1116 	_read_pp_level_info("cooling_type", perf_level.cooling_type, perf_level.level,
1117 			    SST_PP_INFO_1_OFFSET, SST_PP_COOLING_TYPE_START,
1118 			    SST_PP_COOLING_TYPE_WIDTH, SST_MUL_FACTOR_NONE)
1119 
1120 	for (i = 0; i < TRL_MAX_LEVELS; ++i) {
1121 		for (j = 0; j < TRL_MAX_BUCKETS; ++j)
1122 			_read_pp_level_info("trl*_bucket*_freq_mhz",
1123 					    perf_level.trl_freq_mhz[i][j], perf_level.level,
1124 					    SST_PP_INFO_4_OFFSET + (i * SST_PP_TRL_0_RATIO_0_WIDTH),
1125 					    j * SST_PP_TRL_0_RATIO_0_WIDTH,
1126 					    SST_PP_TRL_0_RATIO_0_WIDTH,
1127 					    SST_MUL_FACTOR_FREQ);
1128 	}
1129 
1130 	for (i = 0; i < TRL_MAX_BUCKETS; ++i)
1131 		_read_pp_level_info("bucket*_core_count", perf_level.bucket_core_counts[i],
1132 				    perf_level.level, SST_PP_INFO_10_OFFSET,
1133 				    SST_PP_TRL_CORES_BUCKET_0_WIDTH * i,
1134 				    SST_PP_TRL_CORES_BUCKET_0_WIDTH, SST_MUL_FACTOR_NONE)
1135 
1136 	perf_level.max_buckets = TRL_MAX_BUCKETS;
1137 	perf_level.max_trl_levels = TRL_MAX_LEVELS;
1138 
1139 	_read_pp_level_info("p0_freq_mhz", perf_level.p0_freq_mhz, perf_level.level,
1140 			    SST_PP_INFO_11_OFFSET, SST_PP_CORE_RATIO_P0_START,
1141 			    SST_PP_CORE_RATIO_P0_WIDTH, SST_MUL_FACTOR_FREQ)
1142 	_read_pp_level_info("p1_freq_mhz", perf_level.p1_freq_mhz, perf_level.level,
1143 			    SST_PP_INFO_11_OFFSET, SST_PP_CORE_RATIO_P1_START,
1144 			    SST_PP_CORE_RATIO_P1_WIDTH, SST_MUL_FACTOR_FREQ)
1145 	_read_pp_level_info("pn_freq_mhz", perf_level.pn_freq_mhz, perf_level.level,
1146 			    SST_PP_INFO_11_OFFSET, SST_PP_CORE_RATIO_PN_START,
1147 			    SST_PP_CORE_RATIO_PN_WIDTH, SST_MUL_FACTOR_FREQ)
1148 	_read_pp_level_info("pm_freq_mhz", perf_level.pm_freq_mhz, perf_level.level,
1149 			    SST_PP_INFO_11_OFFSET, SST_PP_CORE_RATIO_PM_START,
1150 			    SST_PP_CORE_RATIO_PM_WIDTH, SST_MUL_FACTOR_FREQ)
1151 	_read_pp_level_info("p0_fabric_freq_mhz", perf_level.p0_fabric_freq_mhz,
1152 			    perf_level.level, SST_PP_INFO_11_OFFSET,
1153 			    SST_PP_CORE_RATIO_P0_FABRIC_START,
1154 			    SST_PP_CORE_RATIO_P0_FABRIC_WIDTH, SST_MUL_FACTOR_FREQ)
1155 	_read_pp_level_info("p1_fabric_freq_mhz", perf_level.p1_fabric_freq_mhz,
1156 			    perf_level.level, SST_PP_INFO_11_OFFSET,
1157 			    SST_PP_CORE_RATIO_P1_FABRIC_START,
1158 			    SST_PP_CORE_RATIO_P1_FABRIC_WIDTH, SST_MUL_FACTOR_FREQ)
1159 	_read_pp_level_info("pm_fabric_freq_mhz", perf_level.pm_fabric_freq_mhz,
1160 			    perf_level.level, SST_PP_INFO_11_OFFSET,
1161 			    SST_PP_CORE_RATIO_PM_FABRIC_START,
1162 			    SST_PP_CORE_RATIO_PM_FABRIC_WIDTH, SST_MUL_FACTOR_FREQ)
1163 
1164 	if (copy_to_user(argp, &perf_level, sizeof(perf_level)))
1165 		return -EFAULT;
1166 
1167 	return 0;
1168 }
1169 
1170 #define SST_PP_FUSED_CORE_COUNT_START	0
1171 #define SST_PP_FUSED_CORE_COUNT_WIDTH	8
1172 
1173 #define SST_PP_RSLVD_CORE_COUNT_START	8
1174 #define SST_PP_RSLVD_CORE_COUNT_WIDTH	8
1175 
1176 #define SST_PP_RSLVD_CORE_MASK_START	0
1177 #define SST_PP_RSLVD_CORE_MASK_WIDTH	64
1178 
isst_if_get_perf_level_mask(void __user * argp)1179 static int isst_if_get_perf_level_mask(void __user *argp)
1180 {
1181 	static struct isst_perf_level_cpu_mask cpumask;
1182 	struct tpmi_per_power_domain_info *power_domain_info;
1183 	u64 mask;
1184 
1185 	if (copy_from_user(&cpumask, argp, sizeof(cpumask)))
1186 		return -EFAULT;
1187 
1188 	power_domain_info = get_instance(cpumask.socket_id, cpumask.power_domain_id);
1189 	if (!power_domain_info)
1190 		return -EINVAL;
1191 
1192 	_read_pp_level_info("mask", mask, cpumask.level, SST_PP_INFO_2_OFFSET,
1193 			    SST_PP_RSLVD_CORE_MASK_START, SST_PP_RSLVD_CORE_MASK_WIDTH,
1194 			    SST_MUL_FACTOR_NONE)
1195 
1196 	cpumask.mask = mask;
1197 
1198 	if (!cpumask.punit_cpu_map)
1199 		return -EOPNOTSUPP;
1200 
1201 	if (copy_to_user(argp, &cpumask, sizeof(cpumask)))
1202 		return -EFAULT;
1203 
1204 	return 0;
1205 }
1206 
1207 #define SST_BF_INFO_0_OFFSET	0
1208 #define SST_BF_INFO_1_OFFSET	8
1209 
1210 #define SST_BF_P1_HIGH_START	13
1211 #define SST_BF_P1_HIGH_WIDTH	8
1212 
1213 #define SST_BF_P1_LOW_START	21
1214 #define SST_BF_P1_LOW_WIDTH	8
1215 
1216 #define SST_BF_T_PROHOT_START	38
1217 #define SST_BF_T_PROHOT_WIDTH	8
1218 
1219 #define SST_BF_TDP_START	46
1220 #define SST_BF_TDP_WIDTH	15
1221 
isst_if_get_base_freq_info(void __user * argp)1222 static int isst_if_get_base_freq_info(void __user *argp)
1223 {
1224 	static struct isst_base_freq_info base_freq;
1225 	struct tpmi_per_power_domain_info *power_domain_info;
1226 
1227 	if (copy_from_user(&base_freq, argp, sizeof(base_freq)))
1228 		return -EFAULT;
1229 
1230 	power_domain_info = get_instance(base_freq.socket_id, base_freq.power_domain_id);
1231 	if (!power_domain_info)
1232 		return -EINVAL;
1233 
1234 	if (base_freq.level > power_domain_info->max_level)
1235 		return -EINVAL;
1236 
1237 	_read_bf_level_info("p1_high", base_freq.high_base_freq_mhz, base_freq.level,
1238 			    SST_BF_INFO_0_OFFSET, SST_BF_P1_HIGH_START, SST_BF_P1_HIGH_WIDTH,
1239 			    SST_MUL_FACTOR_FREQ)
1240 	_read_bf_level_info("p1_low", base_freq.low_base_freq_mhz, base_freq.level,
1241 			    SST_BF_INFO_0_OFFSET, SST_BF_P1_LOW_START, SST_BF_P1_LOW_WIDTH,
1242 			    SST_MUL_FACTOR_FREQ)
1243 	_read_bf_level_info("BF-TJ", base_freq.tjunction_max_c, base_freq.level,
1244 			    SST_BF_INFO_0_OFFSET, SST_BF_T_PROHOT_START, SST_BF_T_PROHOT_WIDTH,
1245 			    SST_MUL_FACTOR_NONE)
1246 	_read_bf_level_info("BF-tdp", base_freq.thermal_design_power_w, base_freq.level,
1247 			    SST_BF_INFO_0_OFFSET, SST_BF_TDP_START, SST_BF_TDP_WIDTH,
1248 			    SST_MUL_FACTOR_NONE)
1249 	base_freq.thermal_design_power_w /= 8; /*unit = 1/8th watt*/
1250 
1251 	if (copy_to_user(argp, &base_freq, sizeof(base_freq)))
1252 		return -EFAULT;
1253 
1254 	return 0;
1255 }
1256 
1257 #define P1_HI_CORE_MASK_START	0
1258 #define P1_HI_CORE_MASK_WIDTH	64
1259 
isst_if_get_base_freq_mask(void __user * argp)1260 static int isst_if_get_base_freq_mask(void __user *argp)
1261 {
1262 	static struct isst_perf_level_cpu_mask cpumask;
1263 	struct tpmi_per_power_domain_info *power_domain_info;
1264 	u64 mask;
1265 
1266 	if (copy_from_user(&cpumask, argp, sizeof(cpumask)))
1267 		return -EFAULT;
1268 
1269 	power_domain_info = get_instance(cpumask.socket_id, cpumask.power_domain_id);
1270 	if (!power_domain_info)
1271 		return -EINVAL;
1272 
1273 	_read_bf_level_info("BF-cpumask", mask, cpumask.level, SST_BF_INFO_1_OFFSET,
1274 			    P1_HI_CORE_MASK_START, P1_HI_CORE_MASK_WIDTH,
1275 			    SST_MUL_FACTOR_NONE)
1276 
1277 	cpumask.mask = mask;
1278 
1279 	if (!cpumask.punit_cpu_map)
1280 		return -EOPNOTSUPP;
1281 
1282 	if (copy_to_user(argp, &cpumask, sizeof(cpumask)))
1283 		return -EFAULT;
1284 
1285 	return 0;
1286 }
1287 
isst_if_get_tpmi_instance_count(void __user * argp)1288 static int isst_if_get_tpmi_instance_count(void __user *argp)
1289 {
1290 	struct isst_tpmi_instance_count tpmi_inst;
1291 	struct tpmi_sst_struct *sst_inst;
1292 	int i;
1293 
1294 	if (copy_from_user(&tpmi_inst, argp, sizeof(tpmi_inst)))
1295 		return -EFAULT;
1296 
1297 	if (tpmi_inst.socket_id >= topology_max_packages())
1298 		return -EINVAL;
1299 
1300 	sst_inst = isst_common.sst_inst[tpmi_inst.socket_id];
1301 
1302 	tpmi_inst.count = isst_instance_count(sst_inst);
1303 
1304 	tpmi_inst.valid_mask = 0;
1305 	for (i = 0; i < tpmi_inst.count; i++) {
1306 		struct tpmi_per_power_domain_info *pd_info;
1307 		u8 part;
1308 		int pd;
1309 
1310 		pd = map_partition_power_domain_id(sst_inst, i, &part);
1311 		if (pd < 0)
1312 			continue;
1313 
1314 		pd_info = &sst_inst->power_domain_info[part][pd];
1315 		if (pd_info->sst_base)
1316 			tpmi_inst.valid_mask |= BIT(i);
1317 	}
1318 
1319 	if (!tpmi_inst.valid_mask)
1320 		tpmi_inst.count = 0;
1321 
1322 	if (copy_to_user(argp, &tpmi_inst, sizeof(tpmi_inst)))
1323 		return -EFAULT;
1324 
1325 	return 0;
1326 }
1327 
1328 #define SST_TF_INFO_0_OFFSET	0
1329 #define SST_TF_INFO_1_OFFSET	8
1330 #define SST_TF_INFO_2_OFFSET	16
1331 
1332 #define SST_TF_MAX_LP_CLIP_RATIOS	TRL_MAX_LEVELS
1333 
1334 #define SST_TF_LP_CLIP_RATIO_0_START	16
1335 #define SST_TF_LP_CLIP_RATIO_0_WIDTH	8
1336 
1337 #define SST_TF_RATIO_0_START	0
1338 #define SST_TF_RATIO_0_WIDTH	8
1339 
1340 #define SST_TF_NUM_CORE_0_START 0
1341 #define SST_TF_NUM_CORE_0_WIDTH 8
1342 
isst_if_get_turbo_freq_info(void __user * argp)1343 static int isst_if_get_turbo_freq_info(void __user *argp)
1344 {
1345 	static struct isst_turbo_freq_info turbo_freq;
1346 	struct tpmi_per_power_domain_info *power_domain_info;
1347 	int i, j;
1348 
1349 	if (copy_from_user(&turbo_freq, argp, sizeof(turbo_freq)))
1350 		return -EFAULT;
1351 
1352 	power_domain_info = get_instance(turbo_freq.socket_id, turbo_freq.power_domain_id);
1353 	if (!power_domain_info)
1354 		return -EINVAL;
1355 
1356 	if (turbo_freq.level > power_domain_info->max_level)
1357 		return -EINVAL;
1358 
1359 	turbo_freq.max_buckets = TRL_MAX_BUCKETS;
1360 	turbo_freq.max_trl_levels = TRL_MAX_LEVELS;
1361 	turbo_freq.max_clip_freqs = SST_TF_MAX_LP_CLIP_RATIOS;
1362 
1363 	for (i = 0; i < turbo_freq.max_clip_freqs; ++i)
1364 		_read_tf_level_info("lp_clip*", turbo_freq.lp_clip_freq_mhz[i],
1365 				    turbo_freq.level, SST_TF_INFO_0_OFFSET,
1366 				    SST_TF_LP_CLIP_RATIO_0_START +
1367 				    (i * SST_TF_LP_CLIP_RATIO_0_WIDTH),
1368 				    SST_TF_LP_CLIP_RATIO_0_WIDTH, SST_MUL_FACTOR_FREQ)
1369 
1370 	for (i = 0; i < TRL_MAX_LEVELS; ++i) {
1371 		for (j = 0; j < TRL_MAX_BUCKETS; ++j)
1372 			_read_tf_level_info("cydn*_bucket_*_trl",
1373 					    turbo_freq.trl_freq_mhz[i][j], turbo_freq.level,
1374 					    SST_TF_INFO_2_OFFSET + (i * SST_TF_RATIO_0_WIDTH),
1375 					    j * SST_TF_RATIO_0_WIDTH, SST_TF_RATIO_0_WIDTH,
1376 					    SST_MUL_FACTOR_FREQ)
1377 	}
1378 
1379 	for (i = 0; i < TRL_MAX_BUCKETS; ++i)
1380 		_read_tf_level_info("bucket_*_core_count", turbo_freq.bucket_core_counts[i],
1381 				    turbo_freq.level, SST_TF_INFO_1_OFFSET,
1382 				    SST_TF_NUM_CORE_0_WIDTH * i, SST_TF_NUM_CORE_0_WIDTH,
1383 				    SST_MUL_FACTOR_NONE)
1384 
1385 	if (copy_to_user(argp, &turbo_freq, sizeof(turbo_freq)))
1386 		return -EFAULT;
1387 
1388 	return 0;
1389 }
1390 
isst_if_def_ioctl(struct file * file,unsigned int cmd,unsigned long arg)1391 static long isst_if_def_ioctl(struct file *file, unsigned int cmd,
1392 			      unsigned long arg)
1393 {
1394 	void __user *argp = (void __user *)arg;
1395 	long ret = -ENOTTY;
1396 
1397 	mutex_lock(&isst_tpmi_dev_lock);
1398 	switch (cmd) {
1399 	case ISST_IF_COUNT_TPMI_INSTANCES:
1400 		ret = isst_if_get_tpmi_instance_count(argp);
1401 		break;
1402 	case ISST_IF_CORE_POWER_STATE:
1403 		ret = isst_if_core_power_state(argp);
1404 		break;
1405 	case ISST_IF_CLOS_PARAM:
1406 		ret = isst_if_clos_param(argp);
1407 		break;
1408 	case ISST_IF_CLOS_ASSOC:
1409 		ret = isst_if_clos_assoc(argp);
1410 		break;
1411 	case ISST_IF_PERF_LEVELS:
1412 		ret = isst_if_get_perf_level(argp);
1413 		break;
1414 	case ISST_IF_PERF_SET_LEVEL:
1415 		ret = isst_if_set_perf_level(argp);
1416 		break;
1417 	case ISST_IF_PERF_SET_FEATURE:
1418 		ret = isst_if_set_perf_feature(argp);
1419 		break;
1420 	case ISST_IF_GET_PERF_LEVEL_INFO:
1421 		ret = isst_if_get_perf_level_info(argp);
1422 		break;
1423 	case ISST_IF_GET_PERF_LEVEL_CPU_MASK:
1424 		ret = isst_if_get_perf_level_mask(argp);
1425 		break;
1426 	case ISST_IF_GET_BASE_FREQ_INFO:
1427 		ret = isst_if_get_base_freq_info(argp);
1428 		break;
1429 	case ISST_IF_GET_BASE_FREQ_CPU_MASK:
1430 		ret = isst_if_get_base_freq_mask(argp);
1431 		break;
1432 	case ISST_IF_GET_TURBO_FREQ_INFO:
1433 		ret = isst_if_get_turbo_freq_info(argp);
1434 		break;
1435 	default:
1436 		break;
1437 	}
1438 	mutex_unlock(&isst_tpmi_dev_lock);
1439 
1440 	return ret;
1441 }
1442 
1443 #define TPMI_SST_AUTO_SUSPEND_DELAY_MS	2000
1444 
tpmi_sst_dev_add(struct auxiliary_device * auxdev)1445 int tpmi_sst_dev_add(struct auxiliary_device *auxdev)
1446 {
1447 	struct tpmi_per_power_domain_info *pd_info;
1448 	bool read_blocked = 0, write_blocked = 0;
1449 	struct intel_tpmi_plat_info *plat_info;
1450 	struct device *dev = &auxdev->dev;
1451 	struct tpmi_sst_struct *tpmi_sst;
1452 	u8 i, num_resources, io_die_cnt;
1453 	int ret, pkg = 0, inst = 0;
1454 	bool first_enum = false;
1455 	u16 cdie_mask;
1456 	u8 partition;
1457 
1458 	ret = tpmi_get_feature_status(auxdev, TPMI_ID_SST, &read_blocked, &write_blocked);
1459 	if (ret)
1460 		dev_info(dev, "Can't read feature status: ignoring read/write blocked status\n");
1461 
1462 	if (read_blocked) {
1463 		dev_info(dev, "Firmware has blocked reads, exiting\n");
1464 		return -ENODEV;
1465 	}
1466 
1467 	plat_info = tpmi_get_platform_data(auxdev);
1468 	if (!plat_info) {
1469 		dev_err(dev, "No platform info\n");
1470 		return -EINVAL;
1471 	}
1472 
1473 	pkg = plat_info->package_id;
1474 	if (pkg >= topology_max_packages()) {
1475 		dev_err(dev, "Invalid package id :%x\n", pkg);
1476 		return -EINVAL;
1477 	}
1478 
1479 	partition = plat_info->partition;
1480 	if (partition >= SST_MAX_PARTITIONS) {
1481 		dev_err(&auxdev->dev, "Invalid partition :%x\n", partition);
1482 		return -EINVAL;
1483 	}
1484 
1485 	num_resources = tpmi_get_resource_count(auxdev);
1486 
1487 	if (!num_resources)
1488 		return -EINVAL;
1489 
1490 	mutex_lock(&isst_tpmi_dev_lock);
1491 
1492 	if (isst_common.sst_inst[pkg]) {
1493 		tpmi_sst = isst_common.sst_inst[pkg];
1494 	} else {
1495 		/*
1496 		 * tpmi_sst instance is for a package. So needs to be
1497 		 * allocated only once for both partitions. We can't use
1498 		 * devm_* allocation here as each partition is a
1499 		 * different device, which can be unbound.
1500 		 */
1501 		tpmi_sst = kzalloc(sizeof(*tpmi_sst), GFP_KERNEL);
1502 		if (!tpmi_sst) {
1503 			ret = -ENOMEM;
1504 			goto unlock_exit;
1505 		}
1506 		first_enum = true;
1507 	}
1508 
1509 	ret = 0;
1510 
1511 	pd_info = devm_kcalloc(dev, num_resources, sizeof(*pd_info), GFP_KERNEL);
1512 	if (!pd_info) {
1513 		ret = -ENOMEM;
1514 		goto unlock_free;
1515 	}
1516 
1517 	/* Get the IO die count, if cdie_mask is present */
1518 	if (plat_info->cdie_mask) {
1519 		u8 cdie_range;
1520 
1521 		cdie_mask = plat_info->cdie_mask;
1522 		cdie_range = fls(cdie_mask) - ffs(cdie_mask) + 1;
1523 		io_die_cnt = num_resources - cdie_range;
1524 	} else {
1525 		/*
1526 		 * This is a synthetic mask, careful when assuming that
1527 		 * they are compute dies only.
1528 		 */
1529 		cdie_mask = (1 << num_resources) - 1;
1530 		io_die_cnt = 0;
1531 	}
1532 
1533 	for (i = 0; i < num_resources; ++i) {
1534 		struct resource *res;
1535 
1536 		res = tpmi_get_resource_at_index(auxdev, i);
1537 		if (!res) {
1538 			pd_info[i].sst_base = NULL;
1539 			continue;
1540 		}
1541 
1542 		pd_info[i].package_id = pkg;
1543 		pd_info[i].power_domain_id = i;
1544 		pd_info[i].auxdev = auxdev;
1545 		pd_info[i].write_blocked = write_blocked;
1546 		pd_info[i].sst_base = devm_ioremap_resource(dev, res);
1547 		if (IS_ERR(pd_info[i].sst_base)) {
1548 			ret = PTR_ERR(pd_info[i].sst_base);
1549 			goto unlock_free;
1550 		}
1551 
1552 		if (sst_main(auxdev, &pd_info[i])) {
1553 			/*
1554 			 * This entry is not valid, hardware can partially
1555 			 * populate dies. In this case MMIO will have 0xFFs.
1556 			 * Also possible some pre-production hardware has
1557 			 * invalid data. But don't fail and continue to use
1558 			 * other dies with valid data.
1559 			 */
1560 			devm_iounmap(dev, pd_info[i].sst_base);
1561 			pd_info[i].sst_base = NULL;
1562 			continue;
1563 		}
1564 
1565 		++inst;
1566 	}
1567 
1568 	if (!inst) {
1569 		ret = -ENODEV;
1570 		goto unlock_free;
1571 	}
1572 
1573 	tpmi_sst->package_id = pkg;
1574 
1575 	tpmi_sst->power_domain_info[partition] = pd_info;
1576 	tpmi_sst->number_of_power_domains[partition] = num_resources;
1577 	tpmi_sst->cdie_mask[partition] = cdie_mask;
1578 	tpmi_sst->io_dies[partition] = io_die_cnt;
1579 	tpmi_sst->partition_mask |= BIT(partition);
1580 	tpmi_sst->partition_mask_current |= BIT(partition);
1581 
1582 	auxiliary_set_drvdata(auxdev, tpmi_sst);
1583 
1584 	if (isst_common.max_index < pkg)
1585 		isst_common.max_index = pkg;
1586 	isst_common.sst_inst[pkg] = tpmi_sst;
1587 
1588 unlock_free:
1589 	if (ret && first_enum)
1590 		kfree(tpmi_sst);
1591 unlock_exit:
1592 	mutex_unlock(&isst_tpmi_dev_lock);
1593 
1594 	return ret;
1595 }
1596 EXPORT_SYMBOL_NS_GPL(tpmi_sst_dev_add, "INTEL_TPMI_SST");
1597 
tpmi_sst_dev_remove(struct auxiliary_device * auxdev)1598 void tpmi_sst_dev_remove(struct auxiliary_device *auxdev)
1599 {
1600 	struct tpmi_sst_struct *tpmi_sst = auxiliary_get_drvdata(auxdev);
1601 	struct intel_tpmi_plat_info *plat_info;
1602 
1603 	plat_info = tpmi_get_platform_data(auxdev);
1604 	if (!plat_info)
1605 		return;
1606 
1607 	mutex_lock(&isst_tpmi_dev_lock);
1608 	tpmi_sst->power_domain_info[plat_info->partition] = NULL;
1609 	tpmi_sst->partition_mask_current &= ~BIT(plat_info->partition);
1610 	/* Free the package instance when the all partitions are removed */
1611 	if (!tpmi_sst->partition_mask_current) {
1612 		isst_common.sst_inst[tpmi_sst->package_id] = NULL;
1613 		kfree(tpmi_sst);
1614 	}
1615 	mutex_unlock(&isst_tpmi_dev_lock);
1616 }
1617 EXPORT_SYMBOL_NS_GPL(tpmi_sst_dev_remove, "INTEL_TPMI_SST");
1618 
tpmi_sst_dev_suspend(struct auxiliary_device * auxdev)1619 void tpmi_sst_dev_suspend(struct auxiliary_device *auxdev)
1620 {
1621 	struct tpmi_sst_struct *tpmi_sst = auxiliary_get_drvdata(auxdev);
1622 	struct tpmi_per_power_domain_info *power_domain_info;
1623 	struct intel_tpmi_plat_info *plat_info;
1624 	void __iomem *cp_base;
1625 
1626 	plat_info = tpmi_get_platform_data(auxdev);
1627 	if (!plat_info)
1628 		return;
1629 
1630 	power_domain_info = tpmi_sst->power_domain_info[plat_info->partition];
1631 
1632 	cp_base = power_domain_info->sst_base + power_domain_info->sst_header.cp_offset;
1633 	power_domain_info->saved_sst_cp_control = readq(cp_base + SST_CP_CONTROL_OFFSET);
1634 
1635 	memcpy_fromio(power_domain_info->saved_clos_configs, cp_base + SST_CLOS_CONFIG_0_OFFSET,
1636 		      sizeof(power_domain_info->saved_clos_configs));
1637 
1638 	memcpy_fromio(power_domain_info->saved_clos_assocs, cp_base + SST_CLOS_ASSOC_0_OFFSET,
1639 		      sizeof(power_domain_info->saved_clos_assocs));
1640 
1641 	power_domain_info->saved_pp_control = readq(power_domain_info->sst_base +
1642 						    power_domain_info->sst_header.pp_offset +
1643 						    SST_PP_CONTROL_OFFSET);
1644 }
1645 EXPORT_SYMBOL_NS_GPL(tpmi_sst_dev_suspend, "INTEL_TPMI_SST");
1646 
tpmi_sst_dev_resume(struct auxiliary_device * auxdev)1647 void tpmi_sst_dev_resume(struct auxiliary_device *auxdev)
1648 {
1649 	struct tpmi_sst_struct *tpmi_sst = auxiliary_get_drvdata(auxdev);
1650 	struct tpmi_per_power_domain_info *power_domain_info;
1651 	struct intel_tpmi_plat_info *plat_info;
1652 	void __iomem *cp_base;
1653 
1654 	plat_info = tpmi_get_platform_data(auxdev);
1655 	if (!plat_info)
1656 		return;
1657 
1658 	power_domain_info = tpmi_sst->power_domain_info[plat_info->partition];
1659 
1660 	cp_base = power_domain_info->sst_base + power_domain_info->sst_header.cp_offset;
1661 	writeq(power_domain_info->saved_sst_cp_control, cp_base + SST_CP_CONTROL_OFFSET);
1662 
1663 	memcpy_toio(cp_base + SST_CLOS_CONFIG_0_OFFSET, power_domain_info->saved_clos_configs,
1664 		    sizeof(power_domain_info->saved_clos_configs));
1665 
1666 	memcpy_toio(cp_base + SST_CLOS_ASSOC_0_OFFSET, power_domain_info->saved_clos_assocs,
1667 		    sizeof(power_domain_info->saved_clos_assocs));
1668 
1669 	writeq(power_domain_info->saved_pp_control, power_domain_info->sst_base +
1670 				power_domain_info->sst_header.pp_offset + SST_PP_CONTROL_OFFSET);
1671 }
1672 EXPORT_SYMBOL_NS_GPL(tpmi_sst_dev_resume, "INTEL_TPMI_SST");
1673 
1674 #define ISST_TPMI_API_VERSION	0x03
1675 
tpmi_sst_init(void)1676 int tpmi_sst_init(void)
1677 {
1678 	struct isst_if_cmd_cb cb;
1679 	int ret = 0;
1680 
1681 	mutex_lock(&isst_tpmi_dev_lock);
1682 
1683 	if (isst_core_usage_count) {
1684 		++isst_core_usage_count;
1685 		goto init_done;
1686 	}
1687 
1688 	isst_common.sst_inst = kcalloc(topology_max_packages(),
1689 				       sizeof(*isst_common.sst_inst),
1690 				       GFP_KERNEL);
1691 	if (!isst_common.sst_inst) {
1692 		ret = -ENOMEM;
1693 		goto init_done;
1694 	}
1695 
1696 	memset(&cb, 0, sizeof(cb));
1697 	cb.cmd_size = sizeof(struct isst_if_io_reg);
1698 	cb.offset = offsetof(struct isst_if_io_regs, io_reg);
1699 	cb.cmd_callback = NULL;
1700 	cb.api_version = ISST_TPMI_API_VERSION;
1701 	cb.def_ioctl = isst_if_def_ioctl;
1702 	cb.owner = THIS_MODULE;
1703 	ret = isst_if_cdev_register(ISST_IF_DEV_TPMI, &cb);
1704 	if (ret)
1705 		kfree(isst_common.sst_inst);
1706 	else
1707 		++isst_core_usage_count;
1708 init_done:
1709 	mutex_unlock(&isst_tpmi_dev_lock);
1710 	return ret;
1711 }
1712 EXPORT_SYMBOL_NS_GPL(tpmi_sst_init, "INTEL_TPMI_SST");
1713 
tpmi_sst_exit(void)1714 void tpmi_sst_exit(void)
1715 {
1716 	mutex_lock(&isst_tpmi_dev_lock);
1717 	if (isst_core_usage_count)
1718 		--isst_core_usage_count;
1719 
1720 	if (!isst_core_usage_count) {
1721 		isst_if_cdev_unregister(ISST_IF_DEV_TPMI);
1722 		kfree(isst_common.sst_inst);
1723 	}
1724 	mutex_unlock(&isst_tpmi_dev_lock);
1725 }
1726 EXPORT_SYMBOL_NS_GPL(tpmi_sst_exit, "INTEL_TPMI_SST");
1727 
1728 MODULE_IMPORT_NS("INTEL_TPMI");
1729 MODULE_IMPORT_NS("INTEL_TPMI_POWER_DOMAIN");
1730 
1731 MODULE_DESCRIPTION("ISST TPMI interface module");
1732 MODULE_LICENSE("GPL");
1733