xref: /linux/drivers/vfio/platform/vfio_platform_common.c (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 /*
2  * Copyright (C) 2013 - Virtual Open Systems
3  * Author: Antonios Motakis <a.motakis@virtualopensystems.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License, version 2, as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  */
14 
15 #include <linux/device.h>
16 #include <linux/iommu.h>
17 #include <linux/module.h>
18 #include <linux/mutex.h>
19 #include <linux/slab.h>
20 #include <linux/types.h>
21 #include <linux/uaccess.h>
22 #include <linux/vfio.h>
23 
24 #include "vfio_platform_private.h"
25 
26 static DEFINE_MUTEX(driver_lock);
27 
28 static const struct vfio_platform_reset_combo reset_lookup_table[] = {
29 	{
30 		.compat = "calxeda,hb-xgmac",
31 		.reset_function_name = "vfio_platform_calxedaxgmac_reset",
32 		.module_name = "vfio-platform-calxedaxgmac",
33 	},
34 };
35 
36 static void vfio_platform_get_reset(struct vfio_platform_device *vdev,
37 				    struct device *dev)
38 {
39 	const char *compat;
40 	int (*reset)(struct vfio_platform_device *);
41 	int ret, i;
42 
43 	ret = device_property_read_string(dev, "compatible", &compat);
44 	if (ret)
45 		return;
46 
47 	for (i = 0 ; i < ARRAY_SIZE(reset_lookup_table); i++) {
48 		if (!strcmp(reset_lookup_table[i].compat, compat)) {
49 			request_module(reset_lookup_table[i].module_name);
50 			reset = __symbol_get(
51 				reset_lookup_table[i].reset_function_name);
52 			if (reset) {
53 				vdev->reset = reset;
54 				return;
55 			}
56 		}
57 	}
58 }
59 
60 static void vfio_platform_put_reset(struct vfio_platform_device *vdev)
61 {
62 	if (vdev->reset)
63 		symbol_put_addr(vdev->reset);
64 }
65 
66 static int vfio_platform_regions_init(struct vfio_platform_device *vdev)
67 {
68 	int cnt = 0, i;
69 
70 	while (vdev->get_resource(vdev, cnt))
71 		cnt++;
72 
73 	vdev->regions = kcalloc(cnt, sizeof(struct vfio_platform_region),
74 				GFP_KERNEL);
75 	if (!vdev->regions)
76 		return -ENOMEM;
77 
78 	for (i = 0; i < cnt;  i++) {
79 		struct resource *res =
80 			vdev->get_resource(vdev, i);
81 
82 		if (!res)
83 			goto err;
84 
85 		vdev->regions[i].addr = res->start;
86 		vdev->regions[i].size = resource_size(res);
87 		vdev->regions[i].flags = 0;
88 
89 		switch (resource_type(res)) {
90 		case IORESOURCE_MEM:
91 			vdev->regions[i].type = VFIO_PLATFORM_REGION_TYPE_MMIO;
92 			vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_READ;
93 			if (!(res->flags & IORESOURCE_READONLY))
94 				vdev->regions[i].flags |=
95 					VFIO_REGION_INFO_FLAG_WRITE;
96 
97 			/*
98 			 * Only regions addressed with PAGE granularity may be
99 			 * MMAPed securely.
100 			 */
101 			if (!(vdev->regions[i].addr & ~PAGE_MASK) &&
102 					!(vdev->regions[i].size & ~PAGE_MASK))
103 				vdev->regions[i].flags |=
104 					VFIO_REGION_INFO_FLAG_MMAP;
105 
106 			break;
107 		case IORESOURCE_IO:
108 			vdev->regions[i].type = VFIO_PLATFORM_REGION_TYPE_PIO;
109 			break;
110 		default:
111 			goto err;
112 		}
113 	}
114 
115 	vdev->num_regions = cnt;
116 
117 	return 0;
118 err:
119 	kfree(vdev->regions);
120 	return -EINVAL;
121 }
122 
123 static void vfio_platform_regions_cleanup(struct vfio_platform_device *vdev)
124 {
125 	int i;
126 
127 	for (i = 0; i < vdev->num_regions; i++)
128 		iounmap(vdev->regions[i].ioaddr);
129 
130 	vdev->num_regions = 0;
131 	kfree(vdev->regions);
132 }
133 
134 static void vfio_platform_release(void *device_data)
135 {
136 	struct vfio_platform_device *vdev = device_data;
137 
138 	mutex_lock(&driver_lock);
139 
140 	if (!(--vdev->refcnt)) {
141 		if (vdev->reset)
142 			vdev->reset(vdev);
143 		vfio_platform_regions_cleanup(vdev);
144 		vfio_platform_irq_cleanup(vdev);
145 	}
146 
147 	mutex_unlock(&driver_lock);
148 
149 	module_put(THIS_MODULE);
150 }
151 
152 static int vfio_platform_open(void *device_data)
153 {
154 	struct vfio_platform_device *vdev = device_data;
155 	int ret;
156 
157 	if (!try_module_get(THIS_MODULE))
158 		return -ENODEV;
159 
160 	mutex_lock(&driver_lock);
161 
162 	if (!vdev->refcnt) {
163 		ret = vfio_platform_regions_init(vdev);
164 		if (ret)
165 			goto err_reg;
166 
167 		ret = vfio_platform_irq_init(vdev);
168 		if (ret)
169 			goto err_irq;
170 
171 		if (vdev->reset)
172 			vdev->reset(vdev);
173 	}
174 
175 	vdev->refcnt++;
176 
177 	mutex_unlock(&driver_lock);
178 	return 0;
179 
180 err_irq:
181 	vfio_platform_regions_cleanup(vdev);
182 err_reg:
183 	mutex_unlock(&driver_lock);
184 	module_put(THIS_MODULE);
185 	return ret;
186 }
187 
188 static long vfio_platform_ioctl(void *device_data,
189 				unsigned int cmd, unsigned long arg)
190 {
191 	struct vfio_platform_device *vdev = device_data;
192 	unsigned long minsz;
193 
194 	if (cmd == VFIO_DEVICE_GET_INFO) {
195 		struct vfio_device_info info;
196 
197 		minsz = offsetofend(struct vfio_device_info, num_irqs);
198 
199 		if (copy_from_user(&info, (void __user *)arg, minsz))
200 			return -EFAULT;
201 
202 		if (info.argsz < minsz)
203 			return -EINVAL;
204 
205 		if (vdev->reset)
206 			vdev->flags |= VFIO_DEVICE_FLAGS_RESET;
207 		info.flags = vdev->flags;
208 		info.num_regions = vdev->num_regions;
209 		info.num_irqs = vdev->num_irqs;
210 
211 		return copy_to_user((void __user *)arg, &info, minsz);
212 
213 	} else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
214 		struct vfio_region_info info;
215 
216 		minsz = offsetofend(struct vfio_region_info, offset);
217 
218 		if (copy_from_user(&info, (void __user *)arg, minsz))
219 			return -EFAULT;
220 
221 		if (info.argsz < minsz)
222 			return -EINVAL;
223 
224 		if (info.index >= vdev->num_regions)
225 			return -EINVAL;
226 
227 		/* map offset to the physical address  */
228 		info.offset = VFIO_PLATFORM_INDEX_TO_OFFSET(info.index);
229 		info.size = vdev->regions[info.index].size;
230 		info.flags = vdev->regions[info.index].flags;
231 
232 		return copy_to_user((void __user *)arg, &info, minsz);
233 
234 	} else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
235 		struct vfio_irq_info info;
236 
237 		minsz = offsetofend(struct vfio_irq_info, count);
238 
239 		if (copy_from_user(&info, (void __user *)arg, minsz))
240 			return -EFAULT;
241 
242 		if (info.argsz < minsz)
243 			return -EINVAL;
244 
245 		if (info.index >= vdev->num_irqs)
246 			return -EINVAL;
247 
248 		info.flags = vdev->irqs[info.index].flags;
249 		info.count = vdev->irqs[info.index].count;
250 
251 		return copy_to_user((void __user *)arg, &info, minsz);
252 
253 	} else if (cmd == VFIO_DEVICE_SET_IRQS) {
254 		struct vfio_irq_set hdr;
255 		u8 *data = NULL;
256 		int ret = 0;
257 
258 		minsz = offsetofend(struct vfio_irq_set, count);
259 
260 		if (copy_from_user(&hdr, (void __user *)arg, minsz))
261 			return -EFAULT;
262 
263 		if (hdr.argsz < minsz)
264 			return -EINVAL;
265 
266 		if (hdr.index >= vdev->num_irqs)
267 			return -EINVAL;
268 
269 		if (hdr.flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK |
270 				  VFIO_IRQ_SET_ACTION_TYPE_MASK))
271 			return -EINVAL;
272 
273 		if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
274 			size_t size;
275 
276 			if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL)
277 				size = sizeof(uint8_t);
278 			else if (hdr.flags & VFIO_IRQ_SET_DATA_EVENTFD)
279 				size = sizeof(int32_t);
280 			else
281 				return -EINVAL;
282 
283 			if (hdr.argsz - minsz < size)
284 				return -EINVAL;
285 
286 			data = memdup_user((void __user *)(arg + minsz), size);
287 			if (IS_ERR(data))
288 				return PTR_ERR(data);
289 		}
290 
291 		mutex_lock(&vdev->igate);
292 
293 		ret = vfio_platform_set_irqs_ioctl(vdev, hdr.flags, hdr.index,
294 						   hdr.start, hdr.count, data);
295 		mutex_unlock(&vdev->igate);
296 		kfree(data);
297 
298 		return ret;
299 
300 	} else if (cmd == VFIO_DEVICE_RESET) {
301 		if (vdev->reset)
302 			return vdev->reset(vdev);
303 		else
304 			return -EINVAL;
305 	}
306 
307 	return -ENOTTY;
308 }
309 
310 static ssize_t vfio_platform_read_mmio(struct vfio_platform_region reg,
311 				       char __user *buf, size_t count,
312 				       loff_t off)
313 {
314 	unsigned int done = 0;
315 
316 	if (!reg.ioaddr) {
317 		reg.ioaddr =
318 			ioremap_nocache(reg.addr, reg.size);
319 
320 		if (!reg.ioaddr)
321 			return -ENOMEM;
322 	}
323 
324 	while (count) {
325 		size_t filled;
326 
327 		if (count >= 4 && !(off % 4)) {
328 			u32 val;
329 
330 			val = ioread32(reg.ioaddr + off);
331 			if (copy_to_user(buf, &val, 4))
332 				goto err;
333 
334 			filled = 4;
335 		} else if (count >= 2 && !(off % 2)) {
336 			u16 val;
337 
338 			val = ioread16(reg.ioaddr + off);
339 			if (copy_to_user(buf, &val, 2))
340 				goto err;
341 
342 			filled = 2;
343 		} else {
344 			u8 val;
345 
346 			val = ioread8(reg.ioaddr + off);
347 			if (copy_to_user(buf, &val, 1))
348 				goto err;
349 
350 			filled = 1;
351 		}
352 
353 
354 		count -= filled;
355 		done += filled;
356 		off += filled;
357 		buf += filled;
358 	}
359 
360 	return done;
361 err:
362 	return -EFAULT;
363 }
364 
365 static ssize_t vfio_platform_read(void *device_data, char __user *buf,
366 				  size_t count, loff_t *ppos)
367 {
368 	struct vfio_platform_device *vdev = device_data;
369 	unsigned int index = VFIO_PLATFORM_OFFSET_TO_INDEX(*ppos);
370 	loff_t off = *ppos & VFIO_PLATFORM_OFFSET_MASK;
371 
372 	if (index >= vdev->num_regions)
373 		return -EINVAL;
374 
375 	if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_READ))
376 		return -EINVAL;
377 
378 	if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_MMIO)
379 		return vfio_platform_read_mmio(vdev->regions[index],
380 							buf, count, off);
381 	else if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_PIO)
382 		return -EINVAL; /* not implemented */
383 
384 	return -EINVAL;
385 }
386 
387 static ssize_t vfio_platform_write_mmio(struct vfio_platform_region reg,
388 					const char __user *buf, size_t count,
389 					loff_t off)
390 {
391 	unsigned int done = 0;
392 
393 	if (!reg.ioaddr) {
394 		reg.ioaddr =
395 			ioremap_nocache(reg.addr, reg.size);
396 
397 		if (!reg.ioaddr)
398 			return -ENOMEM;
399 	}
400 
401 	while (count) {
402 		size_t filled;
403 
404 		if (count >= 4 && !(off % 4)) {
405 			u32 val;
406 
407 			if (copy_from_user(&val, buf, 4))
408 				goto err;
409 			iowrite32(val, reg.ioaddr + off);
410 
411 			filled = 4;
412 		} else if (count >= 2 && !(off % 2)) {
413 			u16 val;
414 
415 			if (copy_from_user(&val, buf, 2))
416 				goto err;
417 			iowrite16(val, reg.ioaddr + off);
418 
419 			filled = 2;
420 		} else {
421 			u8 val;
422 
423 			if (copy_from_user(&val, buf, 1))
424 				goto err;
425 			iowrite8(val, reg.ioaddr + off);
426 
427 			filled = 1;
428 		}
429 
430 		count -= filled;
431 		done += filled;
432 		off += filled;
433 		buf += filled;
434 	}
435 
436 	return done;
437 err:
438 	return -EFAULT;
439 }
440 
441 static ssize_t vfio_platform_write(void *device_data, const char __user *buf,
442 				   size_t count, loff_t *ppos)
443 {
444 	struct vfio_platform_device *vdev = device_data;
445 	unsigned int index = VFIO_PLATFORM_OFFSET_TO_INDEX(*ppos);
446 	loff_t off = *ppos & VFIO_PLATFORM_OFFSET_MASK;
447 
448 	if (index >= vdev->num_regions)
449 		return -EINVAL;
450 
451 	if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_WRITE))
452 		return -EINVAL;
453 
454 	if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_MMIO)
455 		return vfio_platform_write_mmio(vdev->regions[index],
456 							buf, count, off);
457 	else if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_PIO)
458 		return -EINVAL; /* not implemented */
459 
460 	return -EINVAL;
461 }
462 
463 static int vfio_platform_mmap_mmio(struct vfio_platform_region region,
464 				   struct vm_area_struct *vma)
465 {
466 	u64 req_len, pgoff, req_start;
467 
468 	req_len = vma->vm_end - vma->vm_start;
469 	pgoff = vma->vm_pgoff &
470 		((1U << (VFIO_PLATFORM_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
471 	req_start = pgoff << PAGE_SHIFT;
472 
473 	if (region.size < PAGE_SIZE || req_start + req_len > region.size)
474 		return -EINVAL;
475 
476 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
477 	vma->vm_pgoff = (region.addr >> PAGE_SHIFT) + pgoff;
478 
479 	return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
480 			       req_len, vma->vm_page_prot);
481 }
482 
483 static int vfio_platform_mmap(void *device_data, struct vm_area_struct *vma)
484 {
485 	struct vfio_platform_device *vdev = device_data;
486 	unsigned int index;
487 
488 	index = vma->vm_pgoff >> (VFIO_PLATFORM_OFFSET_SHIFT - PAGE_SHIFT);
489 
490 	if (vma->vm_end < vma->vm_start)
491 		return -EINVAL;
492 	if (!(vma->vm_flags & VM_SHARED))
493 		return -EINVAL;
494 	if (index >= vdev->num_regions)
495 		return -EINVAL;
496 	if (vma->vm_start & ~PAGE_MASK)
497 		return -EINVAL;
498 	if (vma->vm_end & ~PAGE_MASK)
499 		return -EINVAL;
500 
501 	if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_MMAP))
502 		return -EINVAL;
503 
504 	if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_READ)
505 			&& (vma->vm_flags & VM_READ))
506 		return -EINVAL;
507 
508 	if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_WRITE)
509 			&& (vma->vm_flags & VM_WRITE))
510 		return -EINVAL;
511 
512 	vma->vm_private_data = vdev;
513 
514 	if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_MMIO)
515 		return vfio_platform_mmap_mmio(vdev->regions[index], vma);
516 
517 	else if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_PIO)
518 		return -EINVAL; /* not implemented */
519 
520 	return -EINVAL;
521 }
522 
523 static const struct vfio_device_ops vfio_platform_ops = {
524 	.name		= "vfio-platform",
525 	.open		= vfio_platform_open,
526 	.release	= vfio_platform_release,
527 	.ioctl		= vfio_platform_ioctl,
528 	.read		= vfio_platform_read,
529 	.write		= vfio_platform_write,
530 	.mmap		= vfio_platform_mmap,
531 };
532 
533 int vfio_platform_probe_common(struct vfio_platform_device *vdev,
534 			       struct device *dev)
535 {
536 	struct iommu_group *group;
537 	int ret;
538 
539 	if (!vdev)
540 		return -EINVAL;
541 
542 	group = iommu_group_get(dev);
543 	if (!group) {
544 		pr_err("VFIO: No IOMMU group for device %s\n", vdev->name);
545 		return -EINVAL;
546 	}
547 
548 	ret = vfio_add_group_dev(dev, &vfio_platform_ops, vdev);
549 	if (ret) {
550 		iommu_group_put(group);
551 		return ret;
552 	}
553 
554 	vfio_platform_get_reset(vdev, dev);
555 
556 	mutex_init(&vdev->igate);
557 
558 	return 0;
559 }
560 EXPORT_SYMBOL_GPL(vfio_platform_probe_common);
561 
562 struct vfio_platform_device *vfio_platform_remove_common(struct device *dev)
563 {
564 	struct vfio_platform_device *vdev;
565 
566 	vdev = vfio_del_group_dev(dev);
567 
568 	if (vdev) {
569 		vfio_platform_put_reset(vdev);
570 		iommu_group_put(dev->iommu_group);
571 	}
572 
573 	return vdev;
574 }
575 EXPORT_SYMBOL_GPL(vfio_platform_remove_common);
576