xref: /linux/drivers/platform/x86/intel/speed_select_if/isst_if_common.c (revision 32a92f8c89326985e05dce8b22d3f0aa07a3e1bd)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Intel Speed Select Interface: Common functions
4  * Copyright (c) 2019, Intel Corporation.
5  * All rights reserved.
6  *
7  * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
8  */
9 
10 #include <linux/cpufeature.h>
11 #include <linux/cpuhotplug.h>
12 #include <linux/fs.h>
13 #include <linux/hashtable.h>
14 #include <linux/miscdevice.h>
15 #include <linux/module.h>
16 #include <linux/pci.h>
17 #include <linux/sched/signal.h>
18 #include <linux/slab.h>
19 #include <linux/uaccess.h>
20 #include <uapi/linux/isst_if.h>
21 
22 #include <asm/cpu_device_id.h>
23 #include <asm/intel-family.h>
24 #include <asm/msr.h>
25 
26 #include "isst_if_common.h"
27 
28 #define MSR_THREAD_ID_INFO	0x53
29 #define MSR_PM_LOGICAL_ID	0x54
30 #define MSR_CPU_BUS_NUMBER	0x128
31 
32 static struct isst_if_cmd_cb punit_callbacks[ISST_IF_DEV_MAX];
33 
34 static int punit_msr_white_list[] = {
35 	MSR_TURBO_RATIO_LIMIT,
36 	MSR_CONFIG_TDP_CONTROL,
37 	MSR_TURBO_RATIO_LIMIT1,
38 	MSR_TURBO_RATIO_LIMIT2,
39 	MSR_PM_LOGICAL_ID,
40 };
41 
42 struct isst_valid_cmd_ranges {
43 	u16 cmd;
44 	u16 sub_cmd_beg;
45 	u16 sub_cmd_end;
46 };
47 
48 struct isst_cmd_set_req_type {
49 	u16 cmd;
50 	u16 sub_cmd;
51 	u16 param;
52 };
53 
54 static const struct isst_valid_cmd_ranges isst_valid_cmds[] = {
55 	{0xD0, 0x00, 0x03},
56 	{0x7F, 0x00, 0x0C},
57 	{0x7F, 0x10, 0x12},
58 	{0x7F, 0x20, 0x23},
59 	{0x94, 0x03, 0x03},
60 	{0x95, 0x03, 0x03},
61 };
62 
63 static const struct isst_cmd_set_req_type isst_cmd_set_reqs[] = {
64 	{0xD0, 0x00, 0x08},
65 	{0xD0, 0x01, 0x08},
66 	{0xD0, 0x02, 0x08},
67 	{0xD0, 0x03, 0x08},
68 	{0x7F, 0x02, 0x00},
69 	{0x7F, 0x08, 0x00},
70 	{0x95, 0x03, 0x03},
71 };
72 
73 struct isst_cmd {
74 	struct hlist_node hnode;
75 	u64 data;
76 	u32 cmd;
77 	int cpu;
78 	int mbox_cmd_type;
79 	u32 param;
80 };
81 
82 static bool isst_hpm_support;
83 
84 static DECLARE_HASHTABLE(isst_hash, 8);
85 static DEFINE_MUTEX(isst_hash_lock);
86 
isst_store_new_cmd(int cmd,u32 cpu,int mbox_cmd_type,u32 param,u64 data)87 static int isst_store_new_cmd(int cmd, u32 cpu, int mbox_cmd_type, u32 param,
88 			      u64 data)
89 {
90 	struct isst_cmd *sst_cmd;
91 
92 	sst_cmd = kmalloc_obj(*sst_cmd);
93 	if (!sst_cmd)
94 		return -ENOMEM;
95 
96 	sst_cmd->cpu = cpu;
97 	sst_cmd->cmd = cmd;
98 	sst_cmd->mbox_cmd_type = mbox_cmd_type;
99 	sst_cmd->param = param;
100 	sst_cmd->data = data;
101 
102 	hash_add(isst_hash, &sst_cmd->hnode, sst_cmd->cmd);
103 
104 	return 0;
105 }
106 
isst_delete_hash(void)107 static void isst_delete_hash(void)
108 {
109 	struct isst_cmd *sst_cmd;
110 	struct hlist_node *tmp;
111 	int i;
112 
113 	hash_for_each_safe(isst_hash, i, tmp, sst_cmd, hnode) {
114 		hash_del(&sst_cmd->hnode);
115 		kfree(sst_cmd);
116 	}
117 }
118 
119 /**
120  * isst_store_cmd() - Store command to a hash table
121  * @cmd: Mailbox command.
122  * @sub_cmd: Mailbox sub-command or MSR id.
123  * @cpu: Target CPU for the command
124  * @mbox_cmd_type: Mailbox or MSR command.
125  * @param: Mailbox parameter.
126  * @data: Mailbox request data or MSR data.
127  *
128  * Stores the command to a hash table if there is no such command already
129  * stored. If already stored update the latest parameter and data for the
130  * command.
131  *
132  * Return: Return result of store to hash table, 0 for success, others for
133  * failure.
134  */
isst_store_cmd(int cmd,int sub_cmd,u32 cpu,int mbox_cmd_type,u32 param,u64 data)135 int isst_store_cmd(int cmd, int sub_cmd, u32 cpu, int mbox_cmd_type,
136 		   u32 param, u64 data)
137 {
138 	struct isst_cmd *sst_cmd;
139 	int full_cmd, ret;
140 
141 	full_cmd = (cmd & GENMASK_ULL(15, 0)) << 16;
142 	full_cmd |= (sub_cmd & GENMASK_ULL(15, 0));
143 	mutex_lock(&isst_hash_lock);
144 	hash_for_each_possible(isst_hash, sst_cmd, hnode, full_cmd) {
145 		if (sst_cmd->cmd == full_cmd && sst_cmd->cpu == cpu &&
146 		    sst_cmd->mbox_cmd_type == mbox_cmd_type) {
147 			sst_cmd->param = param;
148 			sst_cmd->data = data;
149 			mutex_unlock(&isst_hash_lock);
150 			return 0;
151 		}
152 	}
153 
154 	ret = isst_store_new_cmd(full_cmd, cpu, mbox_cmd_type, param, data);
155 	mutex_unlock(&isst_hash_lock);
156 
157 	return ret;
158 }
159 EXPORT_SYMBOL_GPL(isst_store_cmd);
160 
isst_mbox_resume_command(struct isst_if_cmd_cb * cb,struct isst_cmd * sst_cmd)161 static void isst_mbox_resume_command(struct isst_if_cmd_cb *cb,
162 				     struct isst_cmd *sst_cmd)
163 {
164 	struct isst_if_mbox_cmd mbox_cmd;
165 	int wr_only;
166 
167 	mbox_cmd.command = (sst_cmd->cmd & GENMASK_ULL(31, 16)) >> 16;
168 	mbox_cmd.sub_command = sst_cmd->cmd & GENMASK_ULL(15, 0);
169 	mbox_cmd.parameter = sst_cmd->param;
170 	mbox_cmd.req_data = sst_cmd->data;
171 	mbox_cmd.logical_cpu = sst_cmd->cpu;
172 	(cb->cmd_callback)((u8 *)&mbox_cmd, &wr_only, 1);
173 }
174 
175 /**
176  * isst_resume_common() - Process Resume request
177  *
178  * On resume replay all mailbox commands and MSRs.
179  *
180  * Return: None.
181  */
isst_resume_common(void)182 void isst_resume_common(void)
183 {
184 	struct isst_cmd *sst_cmd;
185 	int i;
186 
187 	hash_for_each(isst_hash, i, sst_cmd, hnode) {
188 		struct isst_if_cmd_cb *cb;
189 
190 		if (sst_cmd->mbox_cmd_type) {
191 			cb = &punit_callbacks[ISST_IF_DEV_MBOX];
192 			if (cb->registered)
193 				isst_mbox_resume_command(cb, sst_cmd);
194 		} else {
195 			wrmsrq_safe_on_cpu(sst_cmd->cpu, sst_cmd->cmd,
196 					   sst_cmd->data);
197 		}
198 	}
199 }
200 EXPORT_SYMBOL_GPL(isst_resume_common);
201 
202 /**
203  * isst_if_mbox_cmd_invalid() - Check invalid mailbox commands
204  * @cmd: Pointer to the command structure to verify.
205  *
206  * Invalid command to PUNIT to may result in instability of the platform.
207  * This function has a whitelist of commands, which are allowed.
208  *
209  * Return: Return true if the command is invalid, else false.
210  */
isst_if_mbox_cmd_invalid(struct isst_if_mbox_cmd * cmd)211 bool isst_if_mbox_cmd_invalid(struct isst_if_mbox_cmd *cmd)
212 {
213 	int i;
214 
215 	if (cmd->logical_cpu >= nr_cpu_ids)
216 		return true;
217 
218 	for (i = 0; i < ARRAY_SIZE(isst_valid_cmds); ++i) {
219 		if (cmd->command == isst_valid_cmds[i].cmd &&
220 		    (cmd->sub_command >= isst_valid_cmds[i].sub_cmd_beg &&
221 		     cmd->sub_command <= isst_valid_cmds[i].sub_cmd_end)) {
222 			return false;
223 		}
224 	}
225 
226 	return true;
227 }
228 EXPORT_SYMBOL_GPL(isst_if_mbox_cmd_invalid);
229 
230 /**
231  * isst_if_mbox_cmd_set_req() - Check mailbox command is a set request
232  * @cmd: Pointer to the command structure to verify.
233  *
234  * Check if the given mail box level is set request and not a get request.
235  *
236  * Return: Return true if the command is set_req, else false.
237  */
isst_if_mbox_cmd_set_req(struct isst_if_mbox_cmd * cmd)238 bool isst_if_mbox_cmd_set_req(struct isst_if_mbox_cmd *cmd)
239 {
240 	int i;
241 
242 	for (i = 0; i < ARRAY_SIZE(isst_cmd_set_reqs); ++i) {
243 		if (cmd->command == isst_cmd_set_reqs[i].cmd &&
244 		    cmd->sub_command == isst_cmd_set_reqs[i].sub_cmd &&
245 		    cmd->parameter == isst_cmd_set_reqs[i].param) {
246 			return true;
247 		}
248 	}
249 
250 	return false;
251 }
252 EXPORT_SYMBOL_GPL(isst_if_mbox_cmd_set_req);
253 
254 static int isst_if_api_version;
255 
isst_if_get_platform_info(void __user * argp)256 static int isst_if_get_platform_info(void __user *argp)
257 {
258 	struct isst_if_platform_info info;
259 
260 	info.api_version = isst_if_api_version;
261 	info.driver_version = ISST_IF_DRIVER_VERSION;
262 	info.max_cmds_per_ioctl = ISST_IF_CMD_LIMIT;
263 	info.mbox_supported = punit_callbacks[ISST_IF_DEV_MBOX].registered;
264 	info.mmio_supported = punit_callbacks[ISST_IF_DEV_MMIO].registered;
265 
266 	if (copy_to_user(argp, &info, sizeof(info)))
267 		return -EFAULT;
268 
269 	return 0;
270 }
271 
272 #define ISST_MAX_BUS_NUMBER	2
273 
274 struct isst_if_cpu_info {
275 	/* For BUS 0 and BUS 1 only, which we need for PUNIT interface */
276 	int bus_info[ISST_MAX_BUS_NUMBER];
277 	struct pci_dev *pci_dev[ISST_MAX_BUS_NUMBER];
278 	int punit_cpu_id;
279 	int numa_node;
280 };
281 
282 struct isst_if_pkg_info {
283 	struct pci_dev *pci_dev[ISST_MAX_BUS_NUMBER];
284 };
285 
286 static struct isst_if_cpu_info *isst_cpu_info;
287 static struct isst_if_pkg_info *isst_pkg_info;
288 
_isst_if_get_pci_dev(int cpu,int bus_no,int dev,int fn)289 static struct pci_dev *_isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn)
290 {
291 	struct pci_dev *matched_pci_dev = NULL;
292 	struct pci_dev *pci_dev = NULL;
293 	struct pci_dev *_pci_dev = NULL;
294 	int no_matches = 0, pkg_id;
295 	int bus_number;
296 
297 	if (bus_no < 0 || bus_no >= ISST_MAX_BUS_NUMBER || cpu < 0 ||
298 	    cpu >= nr_cpu_ids || cpu >= num_possible_cpus())
299 		return NULL;
300 
301 	pkg_id = topology_logical_package_id(cpu);
302 	if (pkg_id >= topology_max_packages())
303 		return NULL;
304 
305 	bus_number = isst_cpu_info[cpu].bus_info[bus_no];
306 	if (bus_number < 0)
307 		return NULL;
308 
309 	for_each_pci_dev(_pci_dev) {
310 		int node;
311 
312 		if (_pci_dev->bus->number != bus_number ||
313 		    _pci_dev->devfn != PCI_DEVFN(dev, fn))
314 			continue;
315 
316 		++no_matches;
317 		if (!matched_pci_dev)
318 			matched_pci_dev = _pci_dev;
319 
320 		node = dev_to_node(&_pci_dev->dev);
321 		if (node == NUMA_NO_NODE) {
322 			pr_info_once("Fail to get numa node for CPU:%d bus:%d dev:%d fn:%d\n",
323 				     cpu, bus_no, dev, fn);
324 			continue;
325 		}
326 
327 		if (node == isst_cpu_info[cpu].numa_node) {
328 			isst_pkg_info[pkg_id].pci_dev[bus_no] = _pci_dev;
329 
330 			pci_dev = _pci_dev;
331 			break;
332 		}
333 	}
334 
335 	/*
336 	 * If there is no numa matched pci_dev, then there can be following cases:
337 	 * 1. CONFIG_NUMA is not defined: In this case if there is only single device
338 	 *    match, then we don't need numa information. Simply return last match.
339 	 *    Othewise return NULL.
340 	 * 2. NUMA information is not exposed via _SEG method. In this case it is similar
341 	 *    to case 1.
342 	 * 3. Numa information doesn't match with CPU numa node and more than one match
343 	 *    return NULL.
344 	 */
345 	if (!pci_dev && no_matches == 1)
346 		pci_dev = matched_pci_dev;
347 
348 	/* Return pci_dev pointer for any matched CPU in the package */
349 	if (!pci_dev)
350 		pci_dev = isst_pkg_info[pkg_id].pci_dev[bus_no];
351 
352 	return pci_dev;
353 }
354 
355 /**
356  * isst_if_get_pci_dev() - Get the PCI device instance for a CPU
357  * @cpu: Logical CPU number.
358  * @bus_no: The bus number assigned by the hardware.
359  * @dev: The device number assigned by the hardware.
360  * @fn: The function number assigned by the hardware.
361  *
362  * Using cached bus information, find out the PCI device for a bus number,
363  * device and function.
364  *
365  * Return: Return pci_dev pointer or NULL.
366  */
isst_if_get_pci_dev(int cpu,int bus_no,int dev,int fn)367 struct pci_dev *isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn)
368 {
369 	struct pci_dev *pci_dev;
370 
371 	if (bus_no < 0 || bus_no >= ISST_MAX_BUS_NUMBER  || cpu < 0 ||
372 	    cpu >= nr_cpu_ids || cpu >= num_possible_cpus())
373 		return NULL;
374 
375 	pci_dev = isst_cpu_info[cpu].pci_dev[bus_no];
376 
377 	if (pci_dev && pci_dev->devfn == PCI_DEVFN(dev, fn))
378 		return pci_dev;
379 
380 	return _isst_if_get_pci_dev(cpu, bus_no, dev, fn);
381 }
382 EXPORT_SYMBOL_GPL(isst_if_get_pci_dev);
383 
isst_if_cpu_online(unsigned int cpu)384 static int isst_if_cpu_online(unsigned int cpu)
385 {
386 	u64 data;
387 	int ret;
388 
389 	isst_cpu_info[cpu].numa_node = cpu_to_node(cpu);
390 
391 	ret = rdmsrq_safe(MSR_CPU_BUS_NUMBER, &data);
392 	if (ret) {
393 		/* This is not a fatal error on MSR mailbox only I/F */
394 		isst_cpu_info[cpu].bus_info[0] = -1;
395 		isst_cpu_info[cpu].bus_info[1] = -1;
396 	} else {
397 		isst_cpu_info[cpu].bus_info[0] = data & 0xff;
398 		isst_cpu_info[cpu].bus_info[1] = (data >> 8) & 0xff;
399 		isst_cpu_info[cpu].pci_dev[0] = _isst_if_get_pci_dev(cpu, 0, 0, 1);
400 		isst_cpu_info[cpu].pci_dev[1] = _isst_if_get_pci_dev(cpu, 1, 30, 1);
401 	}
402 
403 	if (isst_hpm_support) {
404 
405 		ret = rdmsrq_safe(MSR_PM_LOGICAL_ID, &data);
406 		if (!ret)
407 			goto set_punit_id;
408 	}
409 
410 	ret = rdmsrq_safe(MSR_THREAD_ID_INFO, &data);
411 	if (ret) {
412 		isst_cpu_info[cpu].punit_cpu_id = -1;
413 		return ret;
414 	}
415 
416 set_punit_id:
417 	isst_cpu_info[cpu].punit_cpu_id = data;
418 
419 	return 0;
420 }
421 
422 static int isst_if_online_id;
423 
isst_if_cpu_info_init(void)424 static int isst_if_cpu_info_init(void)
425 {
426 	int ret;
427 
428 	isst_cpu_info = kzalloc_objs(*isst_cpu_info, num_possible_cpus());
429 	if (!isst_cpu_info)
430 		return -ENOMEM;
431 
432 	isst_pkg_info = kzalloc_objs(*isst_pkg_info, topology_max_packages());
433 	if (!isst_pkg_info) {
434 		kfree(isst_cpu_info);
435 		return -ENOMEM;
436 	}
437 
438 	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
439 				"platform/x86/isst-if:online",
440 				isst_if_cpu_online, NULL);
441 	if (ret < 0) {
442 		kfree(isst_pkg_info);
443 		kfree(isst_cpu_info);
444 		return ret;
445 	}
446 
447 	isst_if_online_id = ret;
448 
449 	return 0;
450 }
451 
isst_if_cpu_info_exit(void)452 static void isst_if_cpu_info_exit(void)
453 {
454 	cpuhp_remove_state(isst_if_online_id);
455 	kfree(isst_pkg_info);
456 	kfree(isst_cpu_info);
457 };
458 
isst_if_proc_phyid_req(u8 * cmd_ptr,int * write_only,int resume)459 static long isst_if_proc_phyid_req(u8 *cmd_ptr, int *write_only, int resume)
460 {
461 	struct isst_if_cpu_map *cpu_map;
462 
463 	cpu_map = (struct isst_if_cpu_map *)cmd_ptr;
464 	if (cpu_map->logical_cpu >= nr_cpu_ids ||
465 	    cpu_map->logical_cpu >= num_possible_cpus())
466 		return -EINVAL;
467 
468 	*write_only = 0;
469 	cpu_map->physical_cpu = isst_cpu_info[cpu_map->logical_cpu].punit_cpu_id;
470 
471 	return 0;
472 }
473 
match_punit_msr_white_list(int msr)474 static bool match_punit_msr_white_list(int msr)
475 {
476 	int i;
477 
478 	for (i = 0; i < ARRAY_SIZE(punit_msr_white_list); ++i) {
479 		if (punit_msr_white_list[i] == msr)
480 			return true;
481 	}
482 
483 	return false;
484 }
485 
isst_if_msr_cmd_req(u8 * cmd_ptr,int * write_only,int resume)486 static long isst_if_msr_cmd_req(u8 *cmd_ptr, int *write_only, int resume)
487 {
488 	struct isst_if_msr_cmd *msr_cmd;
489 	int ret;
490 
491 	msr_cmd = (struct isst_if_msr_cmd *)cmd_ptr;
492 
493 	if (!match_punit_msr_white_list(msr_cmd->msr))
494 		return -EINVAL;
495 
496 	if (msr_cmd->logical_cpu >= nr_cpu_ids)
497 		return -EINVAL;
498 
499 	if (msr_cmd->read_write) {
500 		if (!capable(CAP_SYS_ADMIN))
501 			return -EPERM;
502 
503 		ret = wrmsrq_safe_on_cpu(msr_cmd->logical_cpu,
504 					 msr_cmd->msr,
505 					 msr_cmd->data);
506 		*write_only = 1;
507 		if (!ret && !resume)
508 			ret = isst_store_cmd(0, msr_cmd->msr,
509 					     msr_cmd->logical_cpu,
510 					     0, 0, msr_cmd->data);
511 	} else {
512 		u64 data;
513 
514 		ret = rdmsrq_safe_on_cpu(msr_cmd->logical_cpu,
515 					 msr_cmd->msr, &data);
516 		if (!ret) {
517 			msr_cmd->data = data;
518 			*write_only = 0;
519 		}
520 	}
521 
522 
523 	return ret;
524 }
525 
isst_if_exec_multi_cmd(void __user * argp,struct isst_if_cmd_cb * cb)526 static long isst_if_exec_multi_cmd(void __user *argp, struct isst_if_cmd_cb *cb)
527 {
528 	unsigned char __user *ptr;
529 	u32 cmd_count;
530 	u8 *cmd_ptr;
531 	long ret;
532 	int i;
533 
534 	/* Each multi command has u32 command count as the first field */
535 	if (copy_from_user(&cmd_count, argp, sizeof(cmd_count)))
536 		return -EFAULT;
537 
538 	if (!cmd_count || cmd_count > ISST_IF_CMD_LIMIT)
539 		return -EINVAL;
540 
541 	cmd_ptr = kmalloc(cb->cmd_size, GFP_KERNEL);
542 	if (!cmd_ptr)
543 		return -ENOMEM;
544 
545 	/* cb->offset points to start of the command after the command count */
546 	ptr = argp + cb->offset;
547 
548 	for (i = 0; i < cmd_count; ++i) {
549 		int wr_only;
550 
551 		if (signal_pending(current)) {
552 			ret = -EINTR;
553 			break;
554 		}
555 
556 		if (copy_from_user(cmd_ptr, ptr, cb->cmd_size)) {
557 			ret = -EFAULT;
558 			break;
559 		}
560 
561 		ret = cb->cmd_callback(cmd_ptr, &wr_only, 0);
562 		if (ret)
563 			break;
564 
565 		if (!wr_only && copy_to_user(ptr, cmd_ptr, cb->cmd_size)) {
566 			ret = -EFAULT;
567 			break;
568 		}
569 
570 		ptr += cb->cmd_size;
571 	}
572 
573 	kfree(cmd_ptr);
574 
575 	return i ? i : ret;
576 }
577 
isst_if_def_ioctl(struct file * file,unsigned int cmd,unsigned long arg)578 static long isst_if_def_ioctl(struct file *file, unsigned int cmd,
579 			      unsigned long arg)
580 {
581 	void __user *argp = (void __user *)arg;
582 	struct isst_if_cmd_cb cmd_cb;
583 	struct isst_if_cmd_cb *cb;
584 	long ret = -ENOTTY;
585 	int i;
586 
587 	switch (cmd) {
588 	case ISST_IF_GET_PLATFORM_INFO:
589 		ret = isst_if_get_platform_info(argp);
590 		break;
591 	case ISST_IF_GET_PHY_ID:
592 		cmd_cb.cmd_size = sizeof(struct isst_if_cpu_map);
593 		cmd_cb.offset = offsetof(struct isst_if_cpu_maps, cpu_map);
594 		cmd_cb.cmd_callback = isst_if_proc_phyid_req;
595 		ret = isst_if_exec_multi_cmd(argp, &cmd_cb);
596 		break;
597 	case ISST_IF_IO_CMD:
598 		cb = &punit_callbacks[ISST_IF_DEV_MMIO];
599 		if (cb->registered)
600 			ret = isst_if_exec_multi_cmd(argp, cb);
601 		break;
602 	case ISST_IF_MBOX_COMMAND:
603 		cb = &punit_callbacks[ISST_IF_DEV_MBOX];
604 		if (cb->registered)
605 			ret = isst_if_exec_multi_cmd(argp, cb);
606 		break;
607 	case ISST_IF_MSR_COMMAND:
608 		cmd_cb.cmd_size = sizeof(struct isst_if_msr_cmd);
609 		cmd_cb.offset = offsetof(struct isst_if_msr_cmds, msr_cmd);
610 		cmd_cb.cmd_callback = isst_if_msr_cmd_req;
611 		ret = isst_if_exec_multi_cmd(argp, &cmd_cb);
612 		break;
613 	default:
614 		for (i = 0; i < ISST_IF_DEV_MAX; ++i) {
615 			struct isst_if_cmd_cb *cb = &punit_callbacks[i];
616 			int ret;
617 
618 			if (cb->def_ioctl) {
619 				ret = cb->def_ioctl(file, cmd, arg);
620 				if (!ret)
621 					return ret;
622 			}
623 		}
624 		break;
625 	}
626 
627 	return ret;
628 }
629 
630 /* Lock to prevent module registration when already opened by user space */
631 static DEFINE_MUTEX(punit_misc_dev_open_lock);
632 static int misc_device_open;
633 
isst_if_open(struct inode * inode,struct file * file)634 static int isst_if_open(struct inode *inode, struct file *file)
635 {
636 	int i, ret = 0;
637 
638 	/* Fail open, if a module is going away */
639 	mutex_lock(&punit_misc_dev_open_lock);
640 	for (i = 0; i < ISST_IF_DEV_MAX; ++i) {
641 		struct isst_if_cmd_cb *cb = &punit_callbacks[i];
642 
643 		if (cb->registered && !try_module_get(cb->owner)) {
644 			ret = -ENODEV;
645 			break;
646 		}
647 	}
648 	if (ret) {
649 		int j;
650 
651 		for (j = 0; j < i; ++j) {
652 			struct isst_if_cmd_cb *cb;
653 
654 			cb = &punit_callbacks[j];
655 			if (cb->registered)
656 				module_put(cb->owner);
657 		}
658 	} else {
659 		misc_device_open++;
660 	}
661 	mutex_unlock(&punit_misc_dev_open_lock);
662 
663 	return ret;
664 }
665 
isst_if_relase(struct inode * inode,struct file * f)666 static int isst_if_relase(struct inode *inode, struct file *f)
667 {
668 	int i;
669 
670 	mutex_lock(&punit_misc_dev_open_lock);
671 	misc_device_open--;
672 	for (i = 0; i < ISST_IF_DEV_MAX; ++i) {
673 		struct isst_if_cmd_cb *cb = &punit_callbacks[i];
674 
675 		if (cb->registered)
676 			module_put(cb->owner);
677 	}
678 	mutex_unlock(&punit_misc_dev_open_lock);
679 
680 	return 0;
681 }
682 
683 static const struct file_operations isst_if_char_driver_ops = {
684 	.open = isst_if_open,
685 	.unlocked_ioctl = isst_if_def_ioctl,
686 	.release = isst_if_relase,
687 };
688 
689 static struct miscdevice isst_if_char_driver = {
690 	.minor		= MISC_DYNAMIC_MINOR,
691 	.name		= "isst_interface",
692 	.fops		= &isst_if_char_driver_ops,
693 };
694 
isst_misc_reg(void)695 static int isst_misc_reg(void)
696 {
697 	int ret;
698 
699 	ret = isst_if_cpu_info_init();
700 	if (ret)
701 		return ret;
702 
703 	ret = misc_register(&isst_if_char_driver);
704 	if (ret)
705 		isst_if_cpu_info_exit();
706 
707 	return ret;
708 }
709 
isst_misc_unreg(void)710 static void isst_misc_unreg(void)
711 {
712 	misc_deregister(&isst_if_char_driver);
713 	isst_if_cpu_info_exit();
714 }
715 
716 /**
717  * isst_if_cdev_register() - Register callback for IOCTL
718  * @device_type: The device type this callback handling.
719  * @cb:	Callback structure.
720  *
721  * This function registers a callback to device type. On very first call
722  * it will register a misc device, which is used for user kernel interface.
723  * Other calls simply increment ref count. Registry will fail, if the user
724  * already opened misc device for operation. Also if the misc device
725  * creation failed, then it will not try again and all callers will get
726  * failure code.
727  *
728  * Return: Return the return value from the misc creation device or -EINVAL
729  * for unsupported device type.
730  */
isst_if_cdev_register(int device_type,struct isst_if_cmd_cb * cb)731 int isst_if_cdev_register(int device_type, struct isst_if_cmd_cb *cb)
732 {
733 	if (device_type >= ISST_IF_DEV_MAX)
734 		return -EINVAL;
735 
736 	if (device_type < ISST_IF_DEV_TPMI && isst_hpm_support)
737 		return -ENODEV;
738 
739 	mutex_lock(&punit_misc_dev_open_lock);
740 	/* Device is already open, we don't want to add new callbacks */
741 	if (misc_device_open) {
742 		mutex_unlock(&punit_misc_dev_open_lock);
743 		return -EAGAIN;
744 	}
745 	if (!cb->api_version)
746 		cb->api_version = ISST_IF_API_VERSION;
747 	if (cb->api_version > isst_if_api_version)
748 		isst_if_api_version = cb->api_version;
749 	memcpy(&punit_callbacks[device_type], cb, sizeof(*cb));
750 	punit_callbacks[device_type].registered = 1;
751 	mutex_unlock(&punit_misc_dev_open_lock);
752 
753 	return 0;
754 }
755 EXPORT_SYMBOL_GPL(isst_if_cdev_register);
756 
757 /**
758  * isst_if_cdev_unregister() - Unregister callback for IOCTL
759  * @device_type: The device type to unregister.
760  *
761  * This function unregisters the previously registered callback. If this
762  * is the last callback unregistering, then misc device is removed.
763  *
764  * Return: None.
765  */
isst_if_cdev_unregister(int device_type)766 void isst_if_cdev_unregister(int device_type)
767 {
768 	mutex_lock(&punit_misc_dev_open_lock);
769 	punit_callbacks[device_type].def_ioctl = NULL;
770 	punit_callbacks[device_type].registered = 0;
771 	if (device_type == ISST_IF_DEV_MBOX)
772 		isst_delete_hash();
773 	mutex_unlock(&punit_misc_dev_open_lock);
774 }
775 EXPORT_SYMBOL_GPL(isst_if_cdev_unregister);
776 
777 #define SST_HPM_SUPPORTED	0x01
778 #define SST_MBOX_SUPPORTED	0x02
779 
780 static const struct x86_cpu_id isst_cpu_ids[] = {
781 	X86_MATCH_VFM(INTEL_ATOM_CRESTMONT,	SST_HPM_SUPPORTED),
782 	X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X,	SST_HPM_SUPPORTED),
783 	X86_MATCH_VFM(INTEL_ATOM_DARKMONT_X,	SST_HPM_SUPPORTED),
784 	X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X,	0),
785 	X86_MATCH_VFM(INTEL_GRANITERAPIDS_D,	SST_HPM_SUPPORTED),
786 	X86_MATCH_VFM(INTEL_GRANITERAPIDS_X,	SST_HPM_SUPPORTED),
787 	X86_MATCH_VFM(INTEL_ICELAKE_D,		0),
788 	X86_MATCH_VFM(INTEL_ICELAKE_X,		0),
789 	X86_MATCH_VFM(INTEL_DIAMONDRAPIDS_X,	SST_HPM_SUPPORTED),
790 	X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X,	0),
791 	X86_MATCH_VFM(INTEL_SKYLAKE_X,		SST_MBOX_SUPPORTED),
792 	{}
793 };
794 MODULE_DEVICE_TABLE(x86cpu, isst_cpu_ids);
795 
isst_if_common_init(void)796 static int __init isst_if_common_init(void)
797 {
798 	const struct x86_cpu_id *id;
799 
800 	id = x86_match_cpu(isst_cpu_ids);
801 	if (!id)
802 		return -ENODEV;
803 
804 	if (id->driver_data == SST_HPM_SUPPORTED) {
805 		isst_hpm_support = true;
806 	} else if (id->driver_data == SST_MBOX_SUPPORTED) {
807 		u64 data;
808 
809 		/* Can fail only on some Skylake-X generations */
810 		if (rdmsrq_safe(MSR_OS_MAILBOX_INTERFACE, &data) ||
811 		    rdmsrq_safe(MSR_OS_MAILBOX_DATA, &data))
812 			return -ENODEV;
813 	}
814 
815 	return isst_misc_reg();
816 }
module_init(isst_if_common_init)817 module_init(isst_if_common_init)
818 
819 static void __exit isst_if_common_exit(void)
820 {
821 	isst_misc_unreg();
822 }
823 module_exit(isst_if_common_exit)
824 
825 MODULE_DESCRIPTION("ISST common interface module");
826 MODULE_LICENSE("GPL v2");
827