xref: /linux/arch/powerpc/platforms/powernv/opal-prd.c (revision fd7d598270724cc787982ea48bbe17ad383a8b7f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * OPAL Runtime Diagnostics interface driver
4  * Supported on POWERNV platform
5  *
6  * Copyright IBM Corporation 2015
7  */
8 
9 #define pr_fmt(fmt) "opal-prd: " fmt
10 
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/platform_device.h>
14 #include <linux/miscdevice.h>
15 #include <linux/fs.h>
16 #include <linux/of.h>
17 #include <linux/of_address.h>
18 #include <linux/poll.h>
19 #include <linux/mm.h>
20 #include <linux/slab.h>
21 #include <asm/opal-prd.h>
22 #include <asm/opal.h>
23 #include <asm/io.h>
24 #include <linux/uaccess.h>
25 
26 
27 struct opal_prd_msg {
28 	union {
29 		struct opal_prd_msg_header header;
30 		DECLARE_FLEX_ARRAY(u8, data);
31 	};
32 };
33 
34 /*
35  * The msg member must be at the end of the struct, as it's followed by the
36  * message data.
37  */
38 struct opal_prd_msg_queue_item {
39 	struct list_head	list;
40 	struct opal_prd_msg	msg;
41 };
42 
43 static struct device_node *prd_node;
44 static LIST_HEAD(opal_prd_msg_queue);
45 static DEFINE_SPINLOCK(opal_prd_msg_queue_lock);
46 static DECLARE_WAIT_QUEUE_HEAD(opal_prd_msg_wait);
47 static atomic_t prd_usage;
48 
49 static bool opal_prd_range_is_valid(uint64_t addr, uint64_t size)
50 {
51 	struct device_node *parent, *node;
52 	bool found;
53 
54 	if (addr + size < addr)
55 		return false;
56 
57 	parent = of_find_node_by_path("/reserved-memory");
58 	if (!parent)
59 		return false;
60 
61 	found = false;
62 
63 	for_each_child_of_node(parent, node) {
64 		uint64_t range_addr, range_size, range_end;
65 		const __be32 *addrp;
66 		const char *label;
67 
68 		addrp = of_get_address(node, 0, &range_size, NULL);
69 
70 		range_addr = of_read_number(addrp, 2);
71 		range_end = range_addr + range_size;
72 
73 		label = of_get_property(node, "ibm,prd-label", NULL);
74 
75 		/* PRD ranges need a label */
76 		if (!label)
77 			continue;
78 
79 		if (range_end <= range_addr)
80 			continue;
81 
82 		if (addr >= range_addr && addr + size <= range_end) {
83 			found = true;
84 			of_node_put(node);
85 			break;
86 		}
87 	}
88 
89 	of_node_put(parent);
90 	return found;
91 }
92 
93 static int opal_prd_open(struct inode *inode, struct file *file)
94 {
95 	/*
96 	 * Prevent multiple (separate) processes from concurrent interactions
97 	 * with the FW PRD channel
98 	 */
99 	if (atomic_xchg(&prd_usage, 1) == 1)
100 		return -EBUSY;
101 
102 	return 0;
103 }
104 
105 /*
106  * opal_prd_mmap - maps firmware-provided ranges into userspace
107  * @file: file structure for the device
108  * @vma: VMA to map the registers into
109  */
110 
111 static int opal_prd_mmap(struct file *file, struct vm_area_struct *vma)
112 {
113 	size_t addr, size;
114 	pgprot_t page_prot;
115 
116 	pr_devel("opal_prd_mmap(0x%016lx, 0x%016lx, 0x%lx, 0x%lx)\n",
117 			vma->vm_start, vma->vm_end, vma->vm_pgoff,
118 			vma->vm_flags);
119 
120 	addr = vma->vm_pgoff << PAGE_SHIFT;
121 	size = vma->vm_end - vma->vm_start;
122 
123 	/* ensure we're mapping within one of the allowable ranges */
124 	if (!opal_prd_range_is_valid(addr, size))
125 		return -EINVAL;
126 
127 	page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
128 					 size, vma->vm_page_prot);
129 
130 	return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, size,
131 				page_prot);
132 }
133 
134 static bool opal_msg_queue_empty(void)
135 {
136 	unsigned long flags;
137 	bool ret;
138 
139 	spin_lock_irqsave(&opal_prd_msg_queue_lock, flags);
140 	ret = list_empty(&opal_prd_msg_queue);
141 	spin_unlock_irqrestore(&opal_prd_msg_queue_lock, flags);
142 
143 	return ret;
144 }
145 
146 static __poll_t opal_prd_poll(struct file *file,
147 		struct poll_table_struct *wait)
148 {
149 	poll_wait(file, &opal_prd_msg_wait, wait);
150 
151 	if (!opal_msg_queue_empty())
152 		return EPOLLIN | EPOLLRDNORM;
153 
154 	return 0;
155 }
156 
157 static ssize_t opal_prd_read(struct file *file, char __user *buf,
158 		size_t count, loff_t *ppos)
159 {
160 	struct opal_prd_msg_queue_item *item;
161 	unsigned long flags;
162 	ssize_t size, err;
163 	int rc;
164 
165 	/* we need at least a header's worth of data */
166 	if (count < sizeof(item->msg.header))
167 		return -EINVAL;
168 
169 	if (*ppos)
170 		return -ESPIPE;
171 
172 	item = NULL;
173 
174 	for (;;) {
175 
176 		spin_lock_irqsave(&opal_prd_msg_queue_lock, flags);
177 		if (!list_empty(&opal_prd_msg_queue)) {
178 			item = list_first_entry(&opal_prd_msg_queue,
179 					struct opal_prd_msg_queue_item, list);
180 			list_del(&item->list);
181 		}
182 		spin_unlock_irqrestore(&opal_prd_msg_queue_lock, flags);
183 
184 		if (item)
185 			break;
186 
187 		if (file->f_flags & O_NONBLOCK)
188 			return -EAGAIN;
189 
190 		rc = wait_event_interruptible(opal_prd_msg_wait,
191 				!opal_msg_queue_empty());
192 		if (rc)
193 			return -EINTR;
194 	}
195 
196 	size = be16_to_cpu(item->msg.header.size);
197 	if (size > count) {
198 		err = -EINVAL;
199 		goto err_requeue;
200 	}
201 
202 	rc = copy_to_user(buf, &item->msg, size);
203 	if (rc) {
204 		err = -EFAULT;
205 		goto err_requeue;
206 	}
207 
208 	kfree(item);
209 
210 	return size;
211 
212 err_requeue:
213 	/* eep! re-queue at the head of the list */
214 	spin_lock_irqsave(&opal_prd_msg_queue_lock, flags);
215 	list_add(&item->list, &opal_prd_msg_queue);
216 	spin_unlock_irqrestore(&opal_prd_msg_queue_lock, flags);
217 	return err;
218 }
219 
220 static ssize_t opal_prd_write(struct file *file, const char __user *buf,
221 		size_t count, loff_t *ppos)
222 {
223 	struct opal_prd_msg_header hdr;
224 	struct opal_prd_msg *msg;
225 	ssize_t size;
226 	int rc;
227 
228 	size = sizeof(hdr);
229 
230 	if (count < size)
231 		return -EINVAL;
232 
233 	/* grab the header */
234 	rc = copy_from_user(&hdr, buf, sizeof(hdr));
235 	if (rc)
236 		return -EFAULT;
237 
238 	size = be16_to_cpu(hdr.size);
239 
240 	msg = memdup_user(buf, size);
241 	if (IS_ERR(msg))
242 		return PTR_ERR(msg);
243 
244 	rc = opal_prd_msg(msg);
245 	if (rc) {
246 		pr_warn("write: opal_prd_msg returned %d\n", rc);
247 		size = -EIO;
248 	}
249 
250 	kfree(msg);
251 
252 	return size;
253 }
254 
255 static int opal_prd_release(struct inode *inode, struct file *file)
256 {
257 	struct opal_prd_msg msg;
258 
259 	msg.header.size = cpu_to_be16(sizeof(msg));
260 	msg.header.type = OPAL_PRD_MSG_TYPE_FINI;
261 
262 	opal_prd_msg(&msg);
263 
264 	atomic_xchg(&prd_usage, 0);
265 
266 	return 0;
267 }
268 
269 static long opal_prd_ioctl(struct file *file, unsigned int cmd,
270 		unsigned long param)
271 {
272 	struct opal_prd_info info;
273 	struct opal_prd_scom scom;
274 	int rc = 0;
275 
276 	switch (cmd) {
277 	case OPAL_PRD_GET_INFO:
278 		memset(&info, 0, sizeof(info));
279 		info.version = OPAL_PRD_KERNEL_VERSION;
280 		rc = copy_to_user((void __user *)param, &info, sizeof(info));
281 		if (rc)
282 			return -EFAULT;
283 		break;
284 
285 	case OPAL_PRD_SCOM_READ:
286 		rc = copy_from_user(&scom, (void __user *)param, sizeof(scom));
287 		if (rc)
288 			return -EFAULT;
289 
290 		scom.rc = opal_xscom_read(scom.chip, scom.addr,
291 				(__be64 *)&scom.data);
292 		scom.data = be64_to_cpu(scom.data);
293 		pr_devel("ioctl SCOM_READ: chip %llx addr %016llx data %016llx rc %lld\n",
294 				scom.chip, scom.addr, scom.data, scom.rc);
295 
296 		rc = copy_to_user((void __user *)param, &scom, sizeof(scom));
297 		if (rc)
298 			return -EFAULT;
299 		break;
300 
301 	case OPAL_PRD_SCOM_WRITE:
302 		rc = copy_from_user(&scom, (void __user *)param, sizeof(scom));
303 		if (rc)
304 			return -EFAULT;
305 
306 		scom.rc = opal_xscom_write(scom.chip, scom.addr, scom.data);
307 		pr_devel("ioctl SCOM_WRITE: chip %llx addr %016llx data %016llx rc %lld\n",
308 				scom.chip, scom.addr, scom.data, scom.rc);
309 
310 		rc = copy_to_user((void __user *)param, &scom, sizeof(scom));
311 		if (rc)
312 			return -EFAULT;
313 		break;
314 
315 	default:
316 		rc = -EINVAL;
317 	}
318 
319 	return rc;
320 }
321 
322 static const struct file_operations opal_prd_fops = {
323 	.open		= opal_prd_open,
324 	.mmap		= opal_prd_mmap,
325 	.poll		= opal_prd_poll,
326 	.read		= opal_prd_read,
327 	.write		= opal_prd_write,
328 	.unlocked_ioctl	= opal_prd_ioctl,
329 	.release	= opal_prd_release,
330 	.owner		= THIS_MODULE,
331 };
332 
333 static struct miscdevice opal_prd_dev = {
334 	.minor		= MISC_DYNAMIC_MINOR,
335 	.name		= "opal-prd",
336 	.fops		= &opal_prd_fops,
337 };
338 
339 /* opal interface */
340 static int opal_prd_msg_notifier(struct notifier_block *nb,
341 		unsigned long msg_type, void *_msg)
342 {
343 	struct opal_prd_msg_queue_item *item;
344 	struct opal_prd_msg_header *hdr;
345 	struct opal_msg *msg = _msg;
346 	int msg_size, item_size;
347 	unsigned long flags;
348 
349 	if (msg_type != OPAL_MSG_PRD && msg_type != OPAL_MSG_PRD2)
350 		return 0;
351 
352 	/* Calculate total size of the message and item we need to store. The
353 	 * 'size' field in the header includes the header itself. */
354 	hdr = (void *)msg->params;
355 	msg_size = be16_to_cpu(hdr->size);
356 	item_size = msg_size + sizeof(*item) - sizeof(item->msg);
357 
358 	item = kzalloc(item_size, GFP_ATOMIC);
359 	if (!item)
360 		return -ENOMEM;
361 
362 	memcpy(&item->msg.data, msg->params, msg_size);
363 
364 	spin_lock_irqsave(&opal_prd_msg_queue_lock, flags);
365 	list_add_tail(&item->list, &opal_prd_msg_queue);
366 	spin_unlock_irqrestore(&opal_prd_msg_queue_lock, flags);
367 
368 	wake_up_interruptible(&opal_prd_msg_wait);
369 
370 	return 0;
371 }
372 
373 static struct notifier_block opal_prd_event_nb = {
374 	.notifier_call	= opal_prd_msg_notifier,
375 	.next		= NULL,
376 	.priority	= 0,
377 };
378 
379 static struct notifier_block opal_prd_event_nb2 = {
380 	.notifier_call	= opal_prd_msg_notifier,
381 	.next		= NULL,
382 	.priority	= 0,
383 };
384 
385 static int opal_prd_probe(struct platform_device *pdev)
386 {
387 	int rc;
388 
389 	if (!pdev || !pdev->dev.of_node)
390 		return -ENODEV;
391 
392 	/* We should only have one prd driver instance per machine; ensure
393 	 * that we only get a valid probe on a single OF node.
394 	 */
395 	if (prd_node)
396 		return -EBUSY;
397 
398 	prd_node = pdev->dev.of_node;
399 
400 	rc = opal_message_notifier_register(OPAL_MSG_PRD, &opal_prd_event_nb);
401 	if (rc) {
402 		pr_err("Couldn't register event notifier\n");
403 		return rc;
404 	}
405 
406 	rc = opal_message_notifier_register(OPAL_MSG_PRD2, &opal_prd_event_nb2);
407 	if (rc) {
408 		pr_err("Couldn't register PRD2 event notifier\n");
409 		opal_message_notifier_unregister(OPAL_MSG_PRD, &opal_prd_event_nb);
410 		return rc;
411 	}
412 
413 	rc = misc_register(&opal_prd_dev);
414 	if (rc) {
415 		pr_err("failed to register miscdev\n");
416 		opal_message_notifier_unregister(OPAL_MSG_PRD,
417 				&opal_prd_event_nb);
418 		opal_message_notifier_unregister(OPAL_MSG_PRD2,
419 				&opal_prd_event_nb2);
420 		return rc;
421 	}
422 
423 	return 0;
424 }
425 
426 static int opal_prd_remove(struct platform_device *pdev)
427 {
428 	misc_deregister(&opal_prd_dev);
429 	opal_message_notifier_unregister(OPAL_MSG_PRD, &opal_prd_event_nb);
430 	opal_message_notifier_unregister(OPAL_MSG_PRD2, &opal_prd_event_nb2);
431 	return 0;
432 }
433 
434 static const struct of_device_id opal_prd_match[] = {
435 	{ .compatible = "ibm,opal-prd" },
436 	{ },
437 };
438 
439 static struct platform_driver opal_prd_driver = {
440 	.driver = {
441 		.name		= "opal-prd",
442 		.of_match_table	= opal_prd_match,
443 	},
444 	.probe	= opal_prd_probe,
445 	.remove	= opal_prd_remove,
446 };
447 
448 module_platform_driver(opal_prd_driver);
449 
450 MODULE_DEVICE_TABLE(of, opal_prd_match);
451 MODULE_DESCRIPTION("PowerNV OPAL runtime diagnostic driver");
452 MODULE_LICENSE("GPL");
453