xref: /linux/arch/powerpc/platforms/powernv/opal-prd.c (revision daa2be74b1b2302004945b2a5e32424e177cc7da)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * OPAL Runtime Diagnostics interface driver
4  * Supported on POWERNV platform
5  *
6  * Copyright IBM Corporation 2015
7  */
8 
9 #define pr_fmt(fmt) "opal-prd: " fmt
10 
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/platform_device.h>
14 #include <linux/miscdevice.h>
15 #include <linux/fs.h>
16 #include <linux/of.h>
17 #include <linux/of_address.h>
18 #include <linux/poll.h>
19 #include <linux/mm.h>
20 #include <linux/slab.h>
21 #include <asm/opal-prd.h>
22 #include <asm/opal.h>
23 #include <asm/io.h>
24 #include <linux/uaccess.h>
25 
26 
27 struct opal_prd_msg {
28 	union {
29 		struct opal_prd_msg_header header;
30 		DECLARE_FLEX_ARRAY(u8, data);
31 	};
32 };
33 
34 /*
35  * The msg member must be at the end of the struct, as it's followed by the
36  * message data.
37  */
38 struct opal_prd_msg_queue_item {
39 	struct list_head	list;
40 	struct opal_prd_msg	msg;
41 };
42 
43 static struct device_node *prd_node;
44 static LIST_HEAD(opal_prd_msg_queue);
45 static DEFINE_SPINLOCK(opal_prd_msg_queue_lock);
46 static DECLARE_WAIT_QUEUE_HEAD(opal_prd_msg_wait);
47 static atomic_t prd_usage;
48 
49 static bool opal_prd_range_is_valid(uint64_t addr, uint64_t size)
50 {
51 	struct device_node *parent, *node;
52 	bool found;
53 
54 	if (addr + size < addr)
55 		return false;
56 
57 	parent = of_find_node_by_path("/reserved-memory");
58 	if (!parent)
59 		return false;
60 
61 	found = false;
62 
63 	for_each_child_of_node(parent, node) {
64 		uint64_t range_addr, range_size, range_end;
65 		const __be32 *addrp;
66 		const char *label;
67 
68 		addrp = of_get_address(node, 0, &range_size, NULL);
69 		if (!addrp)
70 			continue;
71 
72 		range_addr = of_read_number(addrp, 2);
73 		range_end = range_addr + range_size;
74 
75 		label = of_get_property(node, "ibm,prd-label", NULL);
76 
77 		/* PRD ranges need a label */
78 		if (!label)
79 			continue;
80 
81 		if (range_end <= range_addr)
82 			continue;
83 
84 		if (addr >= range_addr && addr + size <= range_end) {
85 			found = true;
86 			of_node_put(node);
87 			break;
88 		}
89 	}
90 
91 	of_node_put(parent);
92 	return found;
93 }
94 
95 static int opal_prd_open(struct inode *inode, struct file *file)
96 {
97 	/*
98 	 * Prevent multiple (separate) processes from concurrent interactions
99 	 * with the FW PRD channel
100 	 */
101 	if (atomic_xchg(&prd_usage, 1) == 1)
102 		return -EBUSY;
103 
104 	return 0;
105 }
106 
107 /*
108  * opal_prd_mmap - maps firmware-provided ranges into userspace
109  * @file: file structure for the device
110  * @vma: VMA to map the registers into
111  */
112 
113 static int opal_prd_mmap(struct file *file, struct vm_area_struct *vma)
114 {
115 	size_t addr, size;
116 	pgprot_t page_prot;
117 
118 	pr_devel("opal_prd_mmap(0x%016lx, 0x%016lx, 0x%lx, 0x%lx)\n",
119 			vma->vm_start, vma->vm_end, vma->vm_pgoff,
120 			vma->vm_flags);
121 
122 	addr = vma->vm_pgoff << PAGE_SHIFT;
123 	size = vma->vm_end - vma->vm_start;
124 
125 	/* ensure we're mapping within one of the allowable ranges */
126 	if (!opal_prd_range_is_valid(addr, size))
127 		return -EINVAL;
128 
129 	page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
130 					 size, vma->vm_page_prot);
131 
132 	return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, size,
133 				page_prot);
134 }
135 
136 static bool opal_msg_queue_empty(void)
137 {
138 	unsigned long flags;
139 	bool ret;
140 
141 	spin_lock_irqsave(&opal_prd_msg_queue_lock, flags);
142 	ret = list_empty(&opal_prd_msg_queue);
143 	spin_unlock_irqrestore(&opal_prd_msg_queue_lock, flags);
144 
145 	return ret;
146 }
147 
148 static __poll_t opal_prd_poll(struct file *file,
149 		struct poll_table_struct *wait)
150 {
151 	poll_wait(file, &opal_prd_msg_wait, wait);
152 
153 	if (!opal_msg_queue_empty())
154 		return EPOLLIN | EPOLLRDNORM;
155 
156 	return 0;
157 }
158 
159 static ssize_t opal_prd_read(struct file *file, char __user *buf,
160 		size_t count, loff_t *ppos)
161 {
162 	struct opal_prd_msg_queue_item *item;
163 	unsigned long flags;
164 	ssize_t size, err;
165 	int rc;
166 
167 	/* we need at least a header's worth of data */
168 	if (count < sizeof(item->msg.header))
169 		return -EINVAL;
170 
171 	if (*ppos)
172 		return -ESPIPE;
173 
174 	item = NULL;
175 
176 	for (;;) {
177 
178 		spin_lock_irqsave(&opal_prd_msg_queue_lock, flags);
179 		if (!list_empty(&opal_prd_msg_queue)) {
180 			item = list_first_entry(&opal_prd_msg_queue,
181 					struct opal_prd_msg_queue_item, list);
182 			list_del(&item->list);
183 		}
184 		spin_unlock_irqrestore(&opal_prd_msg_queue_lock, flags);
185 
186 		if (item)
187 			break;
188 
189 		if (file->f_flags & O_NONBLOCK)
190 			return -EAGAIN;
191 
192 		rc = wait_event_interruptible(opal_prd_msg_wait,
193 				!opal_msg_queue_empty());
194 		if (rc)
195 			return -EINTR;
196 	}
197 
198 	size = be16_to_cpu(item->msg.header.size);
199 	if (size > count) {
200 		err = -EINVAL;
201 		goto err_requeue;
202 	}
203 
204 	rc = copy_to_user(buf, &item->msg, size);
205 	if (rc) {
206 		err = -EFAULT;
207 		goto err_requeue;
208 	}
209 
210 	kfree(item);
211 
212 	return size;
213 
214 err_requeue:
215 	/* eep! re-queue at the head of the list */
216 	spin_lock_irqsave(&opal_prd_msg_queue_lock, flags);
217 	list_add(&item->list, &opal_prd_msg_queue);
218 	spin_unlock_irqrestore(&opal_prd_msg_queue_lock, flags);
219 	return err;
220 }
221 
222 static ssize_t opal_prd_write(struct file *file, const char __user *buf,
223 		size_t count, loff_t *ppos)
224 {
225 	struct opal_prd_msg_header hdr;
226 	struct opal_prd_msg *msg;
227 	ssize_t size;
228 	int rc;
229 
230 	size = sizeof(hdr);
231 
232 	if (count < size)
233 		return -EINVAL;
234 
235 	/* grab the header */
236 	rc = copy_from_user(&hdr, buf, sizeof(hdr));
237 	if (rc)
238 		return -EFAULT;
239 
240 	size = be16_to_cpu(hdr.size);
241 
242 	msg = memdup_user(buf, size);
243 	if (IS_ERR(msg))
244 		return PTR_ERR(msg);
245 
246 	rc = opal_prd_msg(msg);
247 	if (rc) {
248 		pr_warn("write: opal_prd_msg returned %d\n", rc);
249 		size = -EIO;
250 	}
251 
252 	kfree(msg);
253 
254 	return size;
255 }
256 
257 static int opal_prd_release(struct inode *inode, struct file *file)
258 {
259 	struct opal_prd_msg msg;
260 
261 	msg.header.size = cpu_to_be16(sizeof(msg));
262 	msg.header.type = OPAL_PRD_MSG_TYPE_FINI;
263 
264 	opal_prd_msg(&msg);
265 
266 	atomic_xchg(&prd_usage, 0);
267 
268 	return 0;
269 }
270 
271 static long opal_prd_ioctl(struct file *file, unsigned int cmd,
272 		unsigned long param)
273 {
274 	struct opal_prd_info info;
275 	struct opal_prd_scom scom;
276 	int rc = 0;
277 
278 	switch (cmd) {
279 	case OPAL_PRD_GET_INFO:
280 		memset(&info, 0, sizeof(info));
281 		info.version = OPAL_PRD_KERNEL_VERSION;
282 		rc = copy_to_user((void __user *)param, &info, sizeof(info));
283 		if (rc)
284 			return -EFAULT;
285 		break;
286 
287 	case OPAL_PRD_SCOM_READ:
288 		rc = copy_from_user(&scom, (void __user *)param, sizeof(scom));
289 		if (rc)
290 			return -EFAULT;
291 
292 		scom.rc = opal_xscom_read(scom.chip, scom.addr,
293 				(__be64 *)&scom.data);
294 		scom.data = be64_to_cpu(scom.data);
295 		pr_devel("ioctl SCOM_READ: chip %llx addr %016llx data %016llx rc %lld\n",
296 				scom.chip, scom.addr, scom.data, scom.rc);
297 
298 		rc = copy_to_user((void __user *)param, &scom, sizeof(scom));
299 		if (rc)
300 			return -EFAULT;
301 		break;
302 
303 	case OPAL_PRD_SCOM_WRITE:
304 		rc = copy_from_user(&scom, (void __user *)param, sizeof(scom));
305 		if (rc)
306 			return -EFAULT;
307 
308 		scom.rc = opal_xscom_write(scom.chip, scom.addr, scom.data);
309 		pr_devel("ioctl SCOM_WRITE: chip %llx addr %016llx data %016llx rc %lld\n",
310 				scom.chip, scom.addr, scom.data, scom.rc);
311 
312 		rc = copy_to_user((void __user *)param, &scom, sizeof(scom));
313 		if (rc)
314 			return -EFAULT;
315 		break;
316 
317 	default:
318 		rc = -EINVAL;
319 	}
320 
321 	return rc;
322 }
323 
324 static const struct file_operations opal_prd_fops = {
325 	.open		= opal_prd_open,
326 	.mmap		= opal_prd_mmap,
327 	.poll		= opal_prd_poll,
328 	.read		= opal_prd_read,
329 	.write		= opal_prd_write,
330 	.unlocked_ioctl	= opal_prd_ioctl,
331 	.release	= opal_prd_release,
332 	.owner		= THIS_MODULE,
333 };
334 
335 static struct miscdevice opal_prd_dev = {
336 	.minor		= MISC_DYNAMIC_MINOR,
337 	.name		= "opal-prd",
338 	.fops		= &opal_prd_fops,
339 };
340 
341 /* opal interface */
342 static int opal_prd_msg_notifier(struct notifier_block *nb,
343 		unsigned long msg_type, void *_msg)
344 {
345 	struct opal_prd_msg_queue_item *item;
346 	struct opal_prd_msg_header *hdr;
347 	struct opal_msg *msg = _msg;
348 	int msg_size, item_size;
349 	unsigned long flags;
350 
351 	if (msg_type != OPAL_MSG_PRD && msg_type != OPAL_MSG_PRD2)
352 		return 0;
353 
354 	/* Calculate total size of the message and item we need to store. The
355 	 * 'size' field in the header includes the header itself. */
356 	hdr = (void *)msg->params;
357 	msg_size = be16_to_cpu(hdr->size);
358 	item_size = msg_size + sizeof(*item) - sizeof(item->msg);
359 
360 	item = kzalloc(item_size, GFP_ATOMIC);
361 	if (!item)
362 		return -ENOMEM;
363 
364 	memcpy(&item->msg.data, msg->params, msg_size);
365 
366 	spin_lock_irqsave(&opal_prd_msg_queue_lock, flags);
367 	list_add_tail(&item->list, &opal_prd_msg_queue);
368 	spin_unlock_irqrestore(&opal_prd_msg_queue_lock, flags);
369 
370 	wake_up_interruptible(&opal_prd_msg_wait);
371 
372 	return 0;
373 }
374 
375 static struct notifier_block opal_prd_event_nb = {
376 	.notifier_call	= opal_prd_msg_notifier,
377 	.next		= NULL,
378 	.priority	= 0,
379 };
380 
381 static struct notifier_block opal_prd_event_nb2 = {
382 	.notifier_call	= opal_prd_msg_notifier,
383 	.next		= NULL,
384 	.priority	= 0,
385 };
386 
387 static int opal_prd_probe(struct platform_device *pdev)
388 {
389 	int rc;
390 
391 	if (!pdev || !pdev->dev.of_node)
392 		return -ENODEV;
393 
394 	/* We should only have one prd driver instance per machine; ensure
395 	 * that we only get a valid probe on a single OF node.
396 	 */
397 	if (prd_node)
398 		return -EBUSY;
399 
400 	prd_node = pdev->dev.of_node;
401 
402 	rc = opal_message_notifier_register(OPAL_MSG_PRD, &opal_prd_event_nb);
403 	if (rc) {
404 		pr_err("Couldn't register event notifier\n");
405 		return rc;
406 	}
407 
408 	rc = opal_message_notifier_register(OPAL_MSG_PRD2, &opal_prd_event_nb2);
409 	if (rc) {
410 		pr_err("Couldn't register PRD2 event notifier\n");
411 		opal_message_notifier_unregister(OPAL_MSG_PRD, &opal_prd_event_nb);
412 		return rc;
413 	}
414 
415 	rc = misc_register(&opal_prd_dev);
416 	if (rc) {
417 		pr_err("failed to register miscdev\n");
418 		opal_message_notifier_unregister(OPAL_MSG_PRD,
419 				&opal_prd_event_nb);
420 		opal_message_notifier_unregister(OPAL_MSG_PRD2,
421 				&opal_prd_event_nb2);
422 		return rc;
423 	}
424 
425 	return 0;
426 }
427 
428 static void opal_prd_remove(struct platform_device *pdev)
429 {
430 	misc_deregister(&opal_prd_dev);
431 	opal_message_notifier_unregister(OPAL_MSG_PRD, &opal_prd_event_nb);
432 	opal_message_notifier_unregister(OPAL_MSG_PRD2, &opal_prd_event_nb2);
433 }
434 
435 static const struct of_device_id opal_prd_match[] = {
436 	{ .compatible = "ibm,opal-prd" },
437 	{ },
438 };
439 
440 static struct platform_driver opal_prd_driver = {
441 	.driver = {
442 		.name		= "opal-prd",
443 		.of_match_table	= opal_prd_match,
444 	},
445 	.probe	= opal_prd_probe,
446 	.remove_new = opal_prd_remove,
447 };
448 
449 module_platform_driver(opal_prd_driver);
450 
451 MODULE_DEVICE_TABLE(of, opal_prd_match);
452 MODULE_DESCRIPTION("PowerNV OPAL runtime diagnostic driver");
453 MODULE_LICENSE("GPL");
454