1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Virtio driver for the paravirtualized IOMMU
4 *
5 * Copyright (C) 2019 Arm Limited
6 */
7
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10 #include <linux/delay.h>
11 #include <linux/dma-map-ops.h>
12 #include <linux/freezer.h>
13 #include <linux/interval_tree.h>
14 #include <linux/iommu.h>
15 #include <linux/module.h>
16 #include <linux/of.h>
17 #include <linux/pci.h>
18 #include <linux/virtio.h>
19 #include <linux/virtio_config.h>
20 #include <linux/virtio_ids.h>
21 #include <linux/wait.h>
22
23 #include <uapi/linux/virtio_iommu.h>
24
25 #include "dma-iommu.h"
26
27 #define MSI_IOVA_BASE 0x8000000
28 #define MSI_IOVA_LENGTH 0x100000
29
30 #define VIOMMU_REQUEST_VQ 0
31 #define VIOMMU_EVENT_VQ 1
32 #define VIOMMU_NR_VQS 2
33
34 struct viommu_dev {
35 struct iommu_device iommu;
36 struct device *dev;
37 struct virtio_device *vdev;
38
39 struct ida domain_ids;
40
41 struct virtqueue *vqs[VIOMMU_NR_VQS];
42 spinlock_t request_lock;
43 struct list_head requests;
44 void *evts;
45
46 /* Device configuration */
47 struct iommu_domain_geometry geometry;
48 u64 pgsize_bitmap;
49 u32 first_domain;
50 u32 last_domain;
51 u32 identity_domain_id;
52 /* Supported MAP flags */
53 u32 map_flags;
54 u32 probe_size;
55 };
56
57 struct viommu_mapping {
58 phys_addr_t paddr;
59 struct interval_tree_node iova;
60 u32 flags;
61 };
62
63 struct viommu_domain {
64 struct iommu_domain domain;
65 struct viommu_dev *viommu;
66 unsigned int id;
67 u32 map_flags;
68
69 spinlock_t mappings_lock;
70 struct rb_root_cached mappings;
71
72 unsigned long nr_endpoints;
73 };
74
75 struct viommu_endpoint {
76 struct device *dev;
77 struct viommu_dev *viommu;
78 struct viommu_domain *vdomain;
79 struct list_head resv_regions;
80 };
81
82 struct viommu_request {
83 struct list_head list;
84 void *writeback;
85 unsigned int write_offset;
86 unsigned int len;
87 char buf[] __counted_by(len);
88 };
89
90 #define VIOMMU_FAULT_RESV_MASK 0xffffff00
91
92 struct viommu_event {
93 union {
94 u32 head;
95 struct virtio_iommu_fault fault;
96 };
97 };
98
99 static struct viommu_domain viommu_identity_domain;
100
101 #define to_viommu_domain(domain) \
102 container_of(domain, struct viommu_domain, domain)
103
viommu_get_req_errno(void * buf,size_t len)104 static int viommu_get_req_errno(void *buf, size_t len)
105 {
106 struct virtio_iommu_req_tail *tail = buf + len - sizeof(*tail);
107
108 switch (tail->status) {
109 case VIRTIO_IOMMU_S_OK:
110 return 0;
111 case VIRTIO_IOMMU_S_UNSUPP:
112 return -ENOSYS;
113 case VIRTIO_IOMMU_S_INVAL:
114 return -EINVAL;
115 case VIRTIO_IOMMU_S_RANGE:
116 return -ERANGE;
117 case VIRTIO_IOMMU_S_NOENT:
118 return -ENOENT;
119 case VIRTIO_IOMMU_S_FAULT:
120 return -EFAULT;
121 case VIRTIO_IOMMU_S_NOMEM:
122 return -ENOMEM;
123 case VIRTIO_IOMMU_S_IOERR:
124 case VIRTIO_IOMMU_S_DEVERR:
125 default:
126 return -EIO;
127 }
128 }
129
viommu_set_req_status(void * buf,size_t len,int status)130 static void viommu_set_req_status(void *buf, size_t len, int status)
131 {
132 struct virtio_iommu_req_tail *tail = buf + len - sizeof(*tail);
133
134 tail->status = status;
135 }
136
viommu_get_write_desc_offset(struct viommu_dev * viommu,struct virtio_iommu_req_head * req,size_t len)137 static off_t viommu_get_write_desc_offset(struct viommu_dev *viommu,
138 struct virtio_iommu_req_head *req,
139 size_t len)
140 {
141 size_t tail_size = sizeof(struct virtio_iommu_req_tail);
142
143 if (req->type == VIRTIO_IOMMU_T_PROBE)
144 return len - viommu->probe_size - tail_size;
145
146 return len - tail_size;
147 }
148
149 /*
150 * __viommu_sync_req - Complete all in-flight requests
151 *
152 * Wait for all added requests to complete. When this function returns, all
153 * requests that were in-flight at the time of the call have completed.
154 */
__viommu_sync_req(struct viommu_dev * viommu)155 static int __viommu_sync_req(struct viommu_dev *viommu)
156 {
157 unsigned int len;
158 size_t write_len;
159 struct viommu_request *req;
160 struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ];
161
162 assert_spin_locked(&viommu->request_lock);
163
164 virtqueue_kick(vq);
165
166 while (!list_empty(&viommu->requests)) {
167 len = 0;
168 req = virtqueue_get_buf(vq, &len);
169 if (!req)
170 continue;
171
172 if (!len)
173 viommu_set_req_status(req->buf, req->len,
174 VIRTIO_IOMMU_S_IOERR);
175
176 write_len = req->len - req->write_offset;
177 if (req->writeback && len == write_len)
178 memcpy(req->writeback, req->buf + req->write_offset,
179 write_len);
180
181 list_del(&req->list);
182 kfree(req);
183 }
184
185 return 0;
186 }
187
viommu_sync_req(struct viommu_dev * viommu)188 static int viommu_sync_req(struct viommu_dev *viommu)
189 {
190 int ret;
191 unsigned long flags;
192
193 spin_lock_irqsave(&viommu->request_lock, flags);
194 ret = __viommu_sync_req(viommu);
195 if (ret)
196 dev_dbg(viommu->dev, "could not sync requests (%d)\n", ret);
197 spin_unlock_irqrestore(&viommu->request_lock, flags);
198
199 return ret;
200 }
201
202 /*
203 * __viommu_add_request - Add one request to the queue
204 * @buf: pointer to the request buffer
205 * @len: length of the request buffer
206 * @writeback: copy data back to the buffer when the request completes.
207 *
208 * Add a request to the queue. Only synchronize the queue if it's already full.
209 * Otherwise don't kick the queue nor wait for requests to complete.
210 *
211 * When @writeback is true, data written by the device, including the request
212 * status, is copied into @buf after the request completes. This is unsafe if
213 * the caller allocates @buf on stack and drops the lock between add_req() and
214 * sync_req().
215 *
216 * Return 0 if the request was successfully added to the queue.
217 */
__viommu_add_req(struct viommu_dev * viommu,void * buf,size_t len,bool writeback)218 static int __viommu_add_req(struct viommu_dev *viommu, void *buf, size_t len,
219 bool writeback)
220 {
221 int ret;
222 off_t write_offset;
223 struct viommu_request *req;
224 struct scatterlist top_sg, bottom_sg;
225 struct scatterlist *sg[2] = { &top_sg, &bottom_sg };
226 struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ];
227
228 assert_spin_locked(&viommu->request_lock);
229
230 write_offset = viommu_get_write_desc_offset(viommu, buf, len);
231 if (write_offset <= 0)
232 return -EINVAL;
233
234 req = kzalloc(struct_size(req, buf, len), GFP_ATOMIC);
235 if (!req)
236 return -ENOMEM;
237
238 req->len = len;
239 if (writeback) {
240 req->writeback = buf + write_offset;
241 req->write_offset = write_offset;
242 }
243 memcpy(&req->buf, buf, write_offset);
244
245 sg_init_one(&top_sg, req->buf, write_offset);
246 sg_init_one(&bottom_sg, req->buf + write_offset, len - write_offset);
247
248 ret = virtqueue_add_sgs(vq, sg, 1, 1, req, GFP_ATOMIC);
249 if (ret == -ENOSPC) {
250 /* If the queue is full, sync and retry */
251 if (!__viommu_sync_req(viommu))
252 ret = virtqueue_add_sgs(vq, sg, 1, 1, req, GFP_ATOMIC);
253 }
254 if (ret)
255 goto err_free;
256
257 list_add_tail(&req->list, &viommu->requests);
258 return 0;
259
260 err_free:
261 kfree(req);
262 return ret;
263 }
264
viommu_add_req(struct viommu_dev * viommu,void * buf,size_t len)265 static int viommu_add_req(struct viommu_dev *viommu, void *buf, size_t len)
266 {
267 int ret;
268 unsigned long flags;
269
270 spin_lock_irqsave(&viommu->request_lock, flags);
271 ret = __viommu_add_req(viommu, buf, len, false);
272 if (ret)
273 dev_dbg(viommu->dev, "could not add request: %d\n", ret);
274 spin_unlock_irqrestore(&viommu->request_lock, flags);
275
276 return ret;
277 }
278
279 /*
280 * Send a request and wait for it to complete. Return the request status (as an
281 * errno)
282 */
viommu_send_req_sync(struct viommu_dev * viommu,void * buf,size_t len)283 static int viommu_send_req_sync(struct viommu_dev *viommu, void *buf,
284 size_t len)
285 {
286 int ret;
287 unsigned long flags;
288
289 spin_lock_irqsave(&viommu->request_lock, flags);
290
291 ret = __viommu_add_req(viommu, buf, len, true);
292 if (ret) {
293 dev_dbg(viommu->dev, "could not add request (%d)\n", ret);
294 goto out_unlock;
295 }
296
297 ret = __viommu_sync_req(viommu);
298 if (ret) {
299 dev_dbg(viommu->dev, "could not sync requests (%d)\n", ret);
300 /* Fall-through (get the actual request status) */
301 }
302
303 ret = viommu_get_req_errno(buf, len);
304 out_unlock:
305 spin_unlock_irqrestore(&viommu->request_lock, flags);
306 return ret;
307 }
308
viommu_send_attach_req(struct viommu_dev * viommu,struct device * dev,struct virtio_iommu_req_attach * req)309 static int viommu_send_attach_req(struct viommu_dev *viommu, struct device *dev,
310 struct virtio_iommu_req_attach *req)
311 {
312 int ret;
313 unsigned int i;
314 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
315
316 for (i = 0; i < fwspec->num_ids; i++) {
317 req->endpoint = cpu_to_le32(fwspec->ids[i]);
318 ret = viommu_send_req_sync(viommu, req, sizeof(*req));
319 if (ret)
320 return ret;
321 }
322 return 0;
323 }
324
325 /*
326 * viommu_add_mapping - add a mapping to the internal tree
327 *
328 * On success, return the new mapping. Otherwise return NULL.
329 */
viommu_add_mapping(struct viommu_domain * vdomain,u64 iova,u64 end,phys_addr_t paddr,u32 flags)330 static int viommu_add_mapping(struct viommu_domain *vdomain, u64 iova, u64 end,
331 phys_addr_t paddr, u32 flags)
332 {
333 unsigned long irqflags;
334 struct viommu_mapping *mapping;
335
336 mapping = kzalloc(sizeof(*mapping), GFP_ATOMIC);
337 if (!mapping)
338 return -ENOMEM;
339
340 mapping->paddr = paddr;
341 mapping->iova.start = iova;
342 mapping->iova.last = end;
343 mapping->flags = flags;
344
345 spin_lock_irqsave(&vdomain->mappings_lock, irqflags);
346 interval_tree_insert(&mapping->iova, &vdomain->mappings);
347 spin_unlock_irqrestore(&vdomain->mappings_lock, irqflags);
348
349 return 0;
350 }
351
352 /*
353 * viommu_del_mappings - remove mappings from the internal tree
354 *
355 * @vdomain: the domain
356 * @iova: start of the range
357 * @end: end of the range
358 *
359 * On success, returns the number of unmapped bytes
360 */
viommu_del_mappings(struct viommu_domain * vdomain,u64 iova,u64 end)361 static size_t viommu_del_mappings(struct viommu_domain *vdomain,
362 u64 iova, u64 end)
363 {
364 size_t unmapped = 0;
365 unsigned long flags;
366 struct viommu_mapping *mapping = NULL;
367 struct interval_tree_node *node, *next;
368
369 spin_lock_irqsave(&vdomain->mappings_lock, flags);
370 next = interval_tree_iter_first(&vdomain->mappings, iova, end);
371 while (next) {
372 node = next;
373 mapping = container_of(node, struct viommu_mapping, iova);
374 next = interval_tree_iter_next(node, iova, end);
375
376 /* Trying to split a mapping? */
377 if (mapping->iova.start < iova)
378 break;
379
380 /*
381 * Virtio-iommu doesn't allow UNMAP to split a mapping created
382 * with a single MAP request, so remove the full mapping.
383 */
384 unmapped += mapping->iova.last - mapping->iova.start + 1;
385
386 interval_tree_remove(node, &vdomain->mappings);
387 kfree(mapping);
388 }
389 spin_unlock_irqrestore(&vdomain->mappings_lock, flags);
390
391 return unmapped;
392 }
393
394 /*
395 * Fill the domain with identity mappings, skipping the device's reserved
396 * regions.
397 */
viommu_domain_map_identity(struct viommu_endpoint * vdev,struct viommu_domain * vdomain)398 static int viommu_domain_map_identity(struct viommu_endpoint *vdev,
399 struct viommu_domain *vdomain)
400 {
401 int ret;
402 struct iommu_resv_region *resv;
403 u64 iova = vdomain->domain.geometry.aperture_start;
404 u64 limit = vdomain->domain.geometry.aperture_end;
405 u32 flags = VIRTIO_IOMMU_MAP_F_READ | VIRTIO_IOMMU_MAP_F_WRITE;
406 unsigned long granule = 1UL << __ffs(vdomain->domain.pgsize_bitmap);
407
408 iova = ALIGN(iova, granule);
409 limit = ALIGN_DOWN(limit + 1, granule) - 1;
410
411 list_for_each_entry(resv, &vdev->resv_regions, list) {
412 u64 resv_start = ALIGN_DOWN(resv->start, granule);
413 u64 resv_end = ALIGN(resv->start + resv->length, granule) - 1;
414
415 if (resv_end < iova || resv_start > limit)
416 /* No overlap */
417 continue;
418
419 if (resv_start > iova) {
420 ret = viommu_add_mapping(vdomain, iova, resv_start - 1,
421 (phys_addr_t)iova, flags);
422 if (ret)
423 goto err_unmap;
424 }
425
426 if (resv_end >= limit)
427 return 0;
428
429 iova = resv_end + 1;
430 }
431
432 ret = viommu_add_mapping(vdomain, iova, limit, (phys_addr_t)iova,
433 flags);
434 if (ret)
435 goto err_unmap;
436 return 0;
437
438 err_unmap:
439 viommu_del_mappings(vdomain, 0, iova);
440 return ret;
441 }
442
443 /*
444 * viommu_replay_mappings - re-send MAP requests
445 *
446 * When reattaching a domain that was previously detached from all endpoints,
447 * mappings were deleted from the device. Re-create the mappings available in
448 * the internal tree.
449 */
viommu_replay_mappings(struct viommu_domain * vdomain)450 static int viommu_replay_mappings(struct viommu_domain *vdomain)
451 {
452 int ret = 0;
453 unsigned long flags;
454 struct viommu_mapping *mapping;
455 struct interval_tree_node *node;
456 struct virtio_iommu_req_map map;
457
458 spin_lock_irqsave(&vdomain->mappings_lock, flags);
459 node = interval_tree_iter_first(&vdomain->mappings, 0, -1UL);
460 while (node) {
461 mapping = container_of(node, struct viommu_mapping, iova);
462 map = (struct virtio_iommu_req_map) {
463 .head.type = VIRTIO_IOMMU_T_MAP,
464 .domain = cpu_to_le32(vdomain->id),
465 .virt_start = cpu_to_le64(mapping->iova.start),
466 .virt_end = cpu_to_le64(mapping->iova.last),
467 .phys_start = cpu_to_le64(mapping->paddr),
468 .flags = cpu_to_le32(mapping->flags),
469 };
470
471 ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
472 if (ret)
473 break;
474
475 node = interval_tree_iter_next(node, 0, -1UL);
476 }
477 spin_unlock_irqrestore(&vdomain->mappings_lock, flags);
478
479 return ret;
480 }
481
viommu_add_resv_mem(struct viommu_endpoint * vdev,struct virtio_iommu_probe_resv_mem * mem,size_t len)482 static int viommu_add_resv_mem(struct viommu_endpoint *vdev,
483 struct virtio_iommu_probe_resv_mem *mem,
484 size_t len)
485 {
486 size_t size;
487 u64 start64, end64;
488 phys_addr_t start, end;
489 struct iommu_resv_region *region = NULL, *next;
490 unsigned long prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
491
492 start = start64 = le64_to_cpu(mem->start);
493 end = end64 = le64_to_cpu(mem->end);
494 size = end64 - start64 + 1;
495
496 /* Catch any overflow, including the unlikely end64 - start64 + 1 = 0 */
497 if (start != start64 || end != end64 || size < end64 - start64)
498 return -EOVERFLOW;
499
500 if (len < sizeof(*mem))
501 return -EINVAL;
502
503 switch (mem->subtype) {
504 default:
505 dev_warn(vdev->dev, "unknown resv mem subtype 0x%x\n",
506 mem->subtype);
507 fallthrough;
508 case VIRTIO_IOMMU_RESV_MEM_T_RESERVED:
509 region = iommu_alloc_resv_region(start, size, 0,
510 IOMMU_RESV_RESERVED,
511 GFP_KERNEL);
512 break;
513 case VIRTIO_IOMMU_RESV_MEM_T_MSI:
514 region = iommu_alloc_resv_region(start, size, prot,
515 IOMMU_RESV_MSI,
516 GFP_KERNEL);
517 break;
518 }
519 if (!region)
520 return -ENOMEM;
521
522 /* Keep the list sorted */
523 list_for_each_entry(next, &vdev->resv_regions, list) {
524 if (next->start > region->start)
525 break;
526 }
527 list_add_tail(®ion->list, &next->list);
528 return 0;
529 }
530
viommu_probe_endpoint(struct viommu_dev * viommu,struct device * dev)531 static int viommu_probe_endpoint(struct viommu_dev *viommu, struct device *dev)
532 {
533 int ret;
534 u16 type, len;
535 size_t cur = 0;
536 size_t probe_len;
537 struct virtio_iommu_req_probe *probe;
538 struct virtio_iommu_probe_property *prop;
539 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
540 struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
541
542 if (!fwspec->num_ids)
543 return -EINVAL;
544
545 probe_len = sizeof(*probe) + viommu->probe_size +
546 sizeof(struct virtio_iommu_req_tail);
547 probe = kzalloc(probe_len, GFP_KERNEL);
548 if (!probe)
549 return -ENOMEM;
550
551 probe->head.type = VIRTIO_IOMMU_T_PROBE;
552 /*
553 * For now, assume that properties of an endpoint that outputs multiple
554 * IDs are consistent. Only probe the first one.
555 */
556 probe->endpoint = cpu_to_le32(fwspec->ids[0]);
557
558 ret = viommu_send_req_sync(viommu, probe, probe_len);
559 if (ret)
560 goto out_free;
561
562 prop = (void *)probe->properties;
563 type = le16_to_cpu(prop->type) & VIRTIO_IOMMU_PROBE_T_MASK;
564
565 while (type != VIRTIO_IOMMU_PROBE_T_NONE &&
566 cur < viommu->probe_size) {
567 len = le16_to_cpu(prop->length) + sizeof(*prop);
568
569 switch (type) {
570 case VIRTIO_IOMMU_PROBE_T_RESV_MEM:
571 ret = viommu_add_resv_mem(vdev, (void *)prop, len);
572 break;
573 default:
574 dev_err(dev, "unknown viommu prop 0x%x\n", type);
575 }
576
577 if (ret)
578 dev_err(dev, "failed to parse viommu prop 0x%x\n", type);
579
580 cur += len;
581 if (cur >= viommu->probe_size)
582 break;
583
584 prop = (void *)probe->properties + cur;
585 type = le16_to_cpu(prop->type) & VIRTIO_IOMMU_PROBE_T_MASK;
586 }
587
588 out_free:
589 kfree(probe);
590 return ret;
591 }
592
viommu_fault_handler(struct viommu_dev * viommu,struct virtio_iommu_fault * fault)593 static int viommu_fault_handler(struct viommu_dev *viommu,
594 struct virtio_iommu_fault *fault)
595 {
596 char *reason_str;
597
598 u8 reason = fault->reason;
599 u32 flags = le32_to_cpu(fault->flags);
600 u32 endpoint = le32_to_cpu(fault->endpoint);
601 u64 address = le64_to_cpu(fault->address);
602
603 switch (reason) {
604 case VIRTIO_IOMMU_FAULT_R_DOMAIN:
605 reason_str = "domain";
606 break;
607 case VIRTIO_IOMMU_FAULT_R_MAPPING:
608 reason_str = "page";
609 break;
610 case VIRTIO_IOMMU_FAULT_R_UNKNOWN:
611 default:
612 reason_str = "unknown";
613 break;
614 }
615
616 /* TODO: find EP by ID and report_iommu_fault */
617 if (flags & VIRTIO_IOMMU_FAULT_F_ADDRESS)
618 dev_err_ratelimited(viommu->dev, "%s fault from EP %u at %#llx [%s%s%s]\n",
619 reason_str, endpoint, address,
620 flags & VIRTIO_IOMMU_FAULT_F_READ ? "R" : "",
621 flags & VIRTIO_IOMMU_FAULT_F_WRITE ? "W" : "",
622 flags & VIRTIO_IOMMU_FAULT_F_EXEC ? "X" : "");
623 else
624 dev_err_ratelimited(viommu->dev, "%s fault from EP %u\n",
625 reason_str, endpoint);
626 return 0;
627 }
628
viommu_event_handler(struct virtqueue * vq)629 static void viommu_event_handler(struct virtqueue *vq)
630 {
631 int ret;
632 unsigned int len;
633 struct scatterlist sg[1];
634 struct viommu_event *evt;
635 struct viommu_dev *viommu = vq->vdev->priv;
636
637 while ((evt = virtqueue_get_buf(vq, &len)) != NULL) {
638 if (len > sizeof(*evt)) {
639 dev_err(viommu->dev,
640 "invalid event buffer (len %u != %zu)\n",
641 len, sizeof(*evt));
642 } else if (!(evt->head & VIOMMU_FAULT_RESV_MASK)) {
643 viommu_fault_handler(viommu, &evt->fault);
644 }
645
646 sg_init_one(sg, evt, sizeof(*evt));
647 ret = virtqueue_add_inbuf(vq, sg, 1, evt, GFP_ATOMIC);
648 if (ret)
649 dev_err(viommu->dev, "could not add event buffer\n");
650 }
651
652 virtqueue_kick(vq);
653 }
654
655 /* IOMMU API */
656
viommu_domain_alloc_paging(struct device * dev)657 static struct iommu_domain *viommu_domain_alloc_paging(struct device *dev)
658 {
659 struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
660 struct viommu_dev *viommu = vdev->viommu;
661 unsigned long viommu_page_size;
662 struct viommu_domain *vdomain;
663 int ret;
664
665 viommu_page_size = 1UL << __ffs(viommu->pgsize_bitmap);
666 if (viommu_page_size > PAGE_SIZE) {
667 dev_err(vdev->dev,
668 "granule 0x%lx larger than system page size 0x%lx\n",
669 viommu_page_size, PAGE_SIZE);
670 return ERR_PTR(-ENODEV);
671 }
672
673 vdomain = kzalloc(sizeof(*vdomain), GFP_KERNEL);
674 if (!vdomain)
675 return ERR_PTR(-ENOMEM);
676
677 spin_lock_init(&vdomain->mappings_lock);
678 vdomain->mappings = RB_ROOT_CACHED;
679
680 ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain,
681 viommu->last_domain, GFP_KERNEL);
682 if (ret < 0) {
683 kfree(vdomain);
684 return ERR_PTR(ret);
685 }
686
687 vdomain->id = (unsigned int)ret;
688
689 vdomain->domain.pgsize_bitmap = viommu->pgsize_bitmap;
690 vdomain->domain.geometry = viommu->geometry;
691
692 vdomain->map_flags = viommu->map_flags;
693 vdomain->viommu = viommu;
694
695 return &vdomain->domain;
696 }
697
viommu_domain_free(struct iommu_domain * domain)698 static void viommu_domain_free(struct iommu_domain *domain)
699 {
700 struct viommu_domain *vdomain = to_viommu_domain(domain);
701
702 /* Free all remaining mappings */
703 viommu_del_mappings(vdomain, 0, ULLONG_MAX);
704
705 if (vdomain->viommu)
706 ida_free(&vdomain->viommu->domain_ids, vdomain->id);
707
708 kfree(vdomain);
709 }
710
viommu_domain_alloc_identity(struct device * dev)711 static struct iommu_domain *viommu_domain_alloc_identity(struct device *dev)
712 {
713 struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
714 struct iommu_domain *domain;
715 int ret;
716
717 if (virtio_has_feature(vdev->viommu->vdev,
718 VIRTIO_IOMMU_F_BYPASS_CONFIG))
719 return &viommu_identity_domain.domain;
720
721 domain = viommu_domain_alloc_paging(dev);
722 if (IS_ERR(domain))
723 return domain;
724
725 ret = viommu_domain_map_identity(vdev, to_viommu_domain(domain));
726 if (ret) {
727 viommu_domain_free(domain);
728 return ERR_PTR(ret);
729 }
730 return domain;
731 }
732
viommu_attach_dev(struct iommu_domain * domain,struct device * dev,struct iommu_domain * old)733 static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev,
734 struct iommu_domain *old)
735 {
736 int ret = 0;
737 struct virtio_iommu_req_attach req;
738 struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
739 struct viommu_domain *vdomain = to_viommu_domain(domain);
740
741 if (vdomain->viommu != vdev->viommu)
742 return -EINVAL;
743
744 /*
745 * In the virtio-iommu device, when attaching the endpoint to a new
746 * domain, it is detached from the old one and, if as a result the
747 * old domain isn't attached to any endpoint, all mappings are removed
748 * from the old domain and it is freed.
749 *
750 * In the driver the old domain still exists, and its mappings will be
751 * recreated if it gets reattached to an endpoint. Otherwise it will be
752 * freed explicitly.
753 *
754 * vdev->vdomain is protected by group->mutex
755 */
756 if (vdev->vdomain)
757 vdev->vdomain->nr_endpoints--;
758
759 req = (struct virtio_iommu_req_attach) {
760 .head.type = VIRTIO_IOMMU_T_ATTACH,
761 .domain = cpu_to_le32(vdomain->id),
762 };
763
764 ret = viommu_send_attach_req(vdomain->viommu, dev, &req);
765 if (ret)
766 return ret;
767
768 if (!vdomain->nr_endpoints) {
769 /*
770 * This endpoint is the first to be attached to the domain.
771 * Replay existing mappings (e.g. SW MSI).
772 */
773 ret = viommu_replay_mappings(vdomain);
774 if (ret)
775 return ret;
776 }
777
778 vdomain->nr_endpoints++;
779 vdev->vdomain = vdomain;
780
781 return 0;
782 }
783
viommu_attach_identity_domain(struct iommu_domain * domain,struct device * dev,struct iommu_domain * old)784 static int viommu_attach_identity_domain(struct iommu_domain *domain,
785 struct device *dev,
786 struct iommu_domain *old)
787 {
788 int ret = 0;
789 struct virtio_iommu_req_attach req;
790 struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
791 struct viommu_domain *vdomain = to_viommu_domain(domain);
792
793 req = (struct virtio_iommu_req_attach) {
794 .head.type = VIRTIO_IOMMU_T_ATTACH,
795 .domain = cpu_to_le32(vdev->viommu->identity_domain_id),
796 .flags = cpu_to_le32(VIRTIO_IOMMU_ATTACH_F_BYPASS),
797 };
798
799 ret = viommu_send_attach_req(vdev->viommu, dev, &req);
800 if (ret)
801 return ret;
802
803 if (vdev->vdomain)
804 vdev->vdomain->nr_endpoints--;
805 vdomain->nr_endpoints++;
806 vdev->vdomain = vdomain;
807 return 0;
808 }
809
810 static struct viommu_domain viommu_identity_domain = {
811 .domain = {
812 .type = IOMMU_DOMAIN_IDENTITY,
813 .ops = &(const struct iommu_domain_ops) {
814 .attach_dev = viommu_attach_identity_domain,
815 },
816 },
817 };
818
viommu_detach_dev(struct viommu_endpoint * vdev)819 static void viommu_detach_dev(struct viommu_endpoint *vdev)
820 {
821 int i;
822 struct virtio_iommu_req_detach req;
823 struct viommu_domain *vdomain = vdev->vdomain;
824 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(vdev->dev);
825
826 if (!vdomain)
827 return;
828
829 req = (struct virtio_iommu_req_detach) {
830 .head.type = VIRTIO_IOMMU_T_DETACH,
831 .domain = cpu_to_le32(vdomain->id),
832 };
833
834 for (i = 0; i < fwspec->num_ids; i++) {
835 req.endpoint = cpu_to_le32(fwspec->ids[i]);
836 WARN_ON(viommu_send_req_sync(vdev->viommu, &req, sizeof(req)));
837 }
838 vdomain->nr_endpoints--;
839 vdev->vdomain = NULL;
840 }
841
viommu_map_pages(struct iommu_domain * domain,unsigned long iova,phys_addr_t paddr,size_t pgsize,size_t pgcount,int prot,gfp_t gfp,size_t * mapped)842 static int viommu_map_pages(struct iommu_domain *domain, unsigned long iova,
843 phys_addr_t paddr, size_t pgsize, size_t pgcount,
844 int prot, gfp_t gfp, size_t *mapped)
845 {
846 int ret;
847 u32 flags;
848 size_t size = pgsize * pgcount;
849 u64 end = iova + size - 1;
850 struct virtio_iommu_req_map map;
851 struct viommu_domain *vdomain = to_viommu_domain(domain);
852
853 flags = (prot & IOMMU_READ ? VIRTIO_IOMMU_MAP_F_READ : 0) |
854 (prot & IOMMU_WRITE ? VIRTIO_IOMMU_MAP_F_WRITE : 0) |
855 (prot & IOMMU_MMIO ? VIRTIO_IOMMU_MAP_F_MMIO : 0);
856
857 if (flags & ~vdomain->map_flags)
858 return -EINVAL;
859
860 ret = viommu_add_mapping(vdomain, iova, end, paddr, flags);
861 if (ret)
862 return ret;
863
864 if (vdomain->nr_endpoints) {
865 map = (struct virtio_iommu_req_map) {
866 .head.type = VIRTIO_IOMMU_T_MAP,
867 .domain = cpu_to_le32(vdomain->id),
868 .virt_start = cpu_to_le64(iova),
869 .phys_start = cpu_to_le64(paddr),
870 .virt_end = cpu_to_le64(end),
871 .flags = cpu_to_le32(flags),
872 };
873
874 ret = viommu_add_req(vdomain->viommu, &map, sizeof(map));
875 if (ret) {
876 viommu_del_mappings(vdomain, iova, end);
877 return ret;
878 }
879 }
880 if (mapped)
881 *mapped = size;
882
883 return 0;
884 }
885
viommu_unmap_pages(struct iommu_domain * domain,unsigned long iova,size_t pgsize,size_t pgcount,struct iommu_iotlb_gather * gather)886 static size_t viommu_unmap_pages(struct iommu_domain *domain, unsigned long iova,
887 size_t pgsize, size_t pgcount,
888 struct iommu_iotlb_gather *gather)
889 {
890 int ret = 0;
891 size_t unmapped;
892 struct virtio_iommu_req_unmap unmap;
893 struct viommu_domain *vdomain = to_viommu_domain(domain);
894 size_t size = pgsize * pgcount;
895
896 unmapped = viommu_del_mappings(vdomain, iova, iova + size - 1);
897 if (unmapped < size)
898 return 0;
899
900 /* Device already removed all mappings after detach. */
901 if (!vdomain->nr_endpoints)
902 return unmapped;
903
904 unmap = (struct virtio_iommu_req_unmap) {
905 .head.type = VIRTIO_IOMMU_T_UNMAP,
906 .domain = cpu_to_le32(vdomain->id),
907 .virt_start = cpu_to_le64(iova),
908 .virt_end = cpu_to_le64(iova + unmapped - 1),
909 };
910
911 ret = viommu_add_req(vdomain->viommu, &unmap, sizeof(unmap));
912 return ret ? 0 : unmapped;
913 }
914
viommu_iova_to_phys(struct iommu_domain * domain,dma_addr_t iova)915 static phys_addr_t viommu_iova_to_phys(struct iommu_domain *domain,
916 dma_addr_t iova)
917 {
918 u64 paddr = 0;
919 unsigned long flags;
920 struct viommu_mapping *mapping;
921 struct interval_tree_node *node;
922 struct viommu_domain *vdomain = to_viommu_domain(domain);
923
924 spin_lock_irqsave(&vdomain->mappings_lock, flags);
925 node = interval_tree_iter_first(&vdomain->mappings, iova, iova);
926 if (node) {
927 mapping = container_of(node, struct viommu_mapping, iova);
928 paddr = mapping->paddr + (iova - mapping->iova.start);
929 }
930 spin_unlock_irqrestore(&vdomain->mappings_lock, flags);
931
932 return paddr;
933 }
934
viommu_iotlb_sync(struct iommu_domain * domain,struct iommu_iotlb_gather * gather)935 static void viommu_iotlb_sync(struct iommu_domain *domain,
936 struct iommu_iotlb_gather *gather)
937 {
938 struct viommu_domain *vdomain = to_viommu_domain(domain);
939
940 viommu_sync_req(vdomain->viommu);
941 }
942
viommu_iotlb_sync_map(struct iommu_domain * domain,unsigned long iova,size_t size)943 static int viommu_iotlb_sync_map(struct iommu_domain *domain,
944 unsigned long iova, size_t size)
945 {
946 struct viommu_domain *vdomain = to_viommu_domain(domain);
947
948 /*
949 * May be called before the viommu is initialized including
950 * while creating direct mapping
951 */
952 if (!vdomain->nr_endpoints)
953 return 0;
954 return viommu_sync_req(vdomain->viommu);
955 }
956
viommu_flush_iotlb_all(struct iommu_domain * domain)957 static void viommu_flush_iotlb_all(struct iommu_domain *domain)
958 {
959 struct viommu_domain *vdomain = to_viommu_domain(domain);
960
961 /*
962 * May be called before the viommu is initialized including
963 * while creating direct mapping
964 */
965 if (!vdomain->nr_endpoints)
966 return;
967 viommu_sync_req(vdomain->viommu);
968 }
969
viommu_get_resv_regions(struct device * dev,struct list_head * head)970 static void viommu_get_resv_regions(struct device *dev, struct list_head *head)
971 {
972 struct iommu_resv_region *entry, *new_entry, *msi = NULL;
973 struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
974 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
975
976 list_for_each_entry(entry, &vdev->resv_regions, list) {
977 if (entry->type == IOMMU_RESV_MSI)
978 msi = entry;
979
980 new_entry = kmemdup(entry, sizeof(*entry), GFP_KERNEL);
981 if (!new_entry)
982 return;
983 list_add_tail(&new_entry->list, head);
984 }
985
986 /*
987 * If the device didn't register any bypass MSI window, add a
988 * software-mapped region.
989 */
990 if (!msi) {
991 msi = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
992 prot, IOMMU_RESV_SW_MSI,
993 GFP_KERNEL);
994 if (!msi)
995 return;
996
997 list_add_tail(&msi->list, head);
998 }
999
1000 iommu_dma_get_resv_regions(dev, head);
1001 }
1002
1003 static const struct bus_type *virtio_bus_type;
1004
viommu_match_node(struct device * dev,const void * data)1005 static int viommu_match_node(struct device *dev, const void *data)
1006 {
1007 return device_match_fwnode(dev->parent, data);
1008 }
1009
viommu_get_by_fwnode(struct fwnode_handle * fwnode)1010 static struct viommu_dev *viommu_get_by_fwnode(struct fwnode_handle *fwnode)
1011 {
1012 struct device *dev = bus_find_device(virtio_bus_type, NULL, fwnode,
1013 viommu_match_node);
1014
1015 put_device(dev);
1016
1017 return dev ? dev_to_virtio(dev)->priv : NULL;
1018 }
1019
viommu_probe_device(struct device * dev)1020 static struct iommu_device *viommu_probe_device(struct device *dev)
1021 {
1022 int ret;
1023 struct viommu_endpoint *vdev;
1024 struct viommu_dev *viommu = NULL;
1025 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
1026
1027 viommu = viommu_get_by_fwnode(fwspec->iommu_fwnode);
1028 if (!viommu)
1029 return ERR_PTR(-ENODEV);
1030
1031 vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
1032 if (!vdev)
1033 return ERR_PTR(-ENOMEM);
1034
1035 vdev->dev = dev;
1036 vdev->viommu = viommu;
1037 INIT_LIST_HEAD(&vdev->resv_regions);
1038 dev_iommu_priv_set(dev, vdev);
1039
1040 if (viommu->probe_size) {
1041 /* Get additional information for this endpoint */
1042 ret = viommu_probe_endpoint(viommu, dev);
1043 if (ret)
1044 goto err_free_dev;
1045 }
1046
1047 return &viommu->iommu;
1048
1049 err_free_dev:
1050 iommu_put_resv_regions(dev, &vdev->resv_regions);
1051 kfree(vdev);
1052
1053 return ERR_PTR(ret);
1054 }
1055
viommu_release_device(struct device * dev)1056 static void viommu_release_device(struct device *dev)
1057 {
1058 struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
1059
1060 viommu_detach_dev(vdev);
1061 iommu_put_resv_regions(dev, &vdev->resv_regions);
1062 kfree(vdev);
1063 }
1064
viommu_device_group(struct device * dev)1065 static struct iommu_group *viommu_device_group(struct device *dev)
1066 {
1067 if (dev_is_pci(dev))
1068 return pci_device_group(dev);
1069 else
1070 return generic_device_group(dev);
1071 }
1072
viommu_of_xlate(struct device * dev,const struct of_phandle_args * args)1073 static int viommu_of_xlate(struct device *dev,
1074 const struct of_phandle_args *args)
1075 {
1076 return iommu_fwspec_add_ids(dev, args->args, 1);
1077 }
1078
viommu_capable(struct device * dev,enum iommu_cap cap)1079 static bool viommu_capable(struct device *dev, enum iommu_cap cap)
1080 {
1081 switch (cap) {
1082 case IOMMU_CAP_CACHE_COHERENCY:
1083 return true;
1084 case IOMMU_CAP_DEFERRED_FLUSH:
1085 return true;
1086 default:
1087 return false;
1088 }
1089 }
1090
1091 static const struct iommu_ops viommu_ops = {
1092 .capable = viommu_capable,
1093 .domain_alloc_identity = viommu_domain_alloc_identity,
1094 .domain_alloc_paging = viommu_domain_alloc_paging,
1095 .probe_device = viommu_probe_device,
1096 .release_device = viommu_release_device,
1097 .device_group = viommu_device_group,
1098 .get_resv_regions = viommu_get_resv_regions,
1099 .of_xlate = viommu_of_xlate,
1100 .owner = THIS_MODULE,
1101 .default_domain_ops = &(const struct iommu_domain_ops) {
1102 .attach_dev = viommu_attach_dev,
1103 .map_pages = viommu_map_pages,
1104 .unmap_pages = viommu_unmap_pages,
1105 .iova_to_phys = viommu_iova_to_phys,
1106 .flush_iotlb_all = viommu_flush_iotlb_all,
1107 .iotlb_sync = viommu_iotlb_sync,
1108 .iotlb_sync_map = viommu_iotlb_sync_map,
1109 .free = viommu_domain_free,
1110 }
1111 };
1112
viommu_init_vqs(struct viommu_dev * viommu)1113 static int viommu_init_vqs(struct viommu_dev *viommu)
1114 {
1115 struct virtio_device *vdev = dev_to_virtio(viommu->dev);
1116 struct virtqueue_info vqs_info[] = {
1117 { "request" },
1118 { "event", viommu_event_handler },
1119 };
1120
1121 return virtio_find_vqs(vdev, VIOMMU_NR_VQS, viommu->vqs,
1122 vqs_info, NULL);
1123 }
1124
viommu_fill_evtq(struct viommu_dev * viommu)1125 static int viommu_fill_evtq(struct viommu_dev *viommu)
1126 {
1127 int i, ret;
1128 struct scatterlist sg[1];
1129 struct viommu_event *evts;
1130 struct virtqueue *vq = viommu->vqs[VIOMMU_EVENT_VQ];
1131 size_t nr_evts = vq->num_free;
1132
1133 viommu->evts = evts = devm_kmalloc_array(viommu->dev, nr_evts,
1134 sizeof(*evts), GFP_KERNEL);
1135 if (!evts)
1136 return -ENOMEM;
1137
1138 for (i = 0; i < nr_evts; i++) {
1139 sg_init_one(sg, &evts[i], sizeof(*evts));
1140 ret = virtqueue_add_inbuf(vq, sg, 1, &evts[i], GFP_KERNEL);
1141 if (ret)
1142 return ret;
1143 }
1144
1145 return 0;
1146 }
1147
viommu_probe(struct virtio_device * vdev)1148 static int viommu_probe(struct virtio_device *vdev)
1149 {
1150 struct device *parent_dev = vdev->dev.parent;
1151 struct viommu_dev *viommu = NULL;
1152 struct device *dev = &vdev->dev;
1153 u64 input_start = 0;
1154 u64 input_end = -1UL;
1155 int ret;
1156
1157 if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1) ||
1158 !virtio_has_feature(vdev, VIRTIO_IOMMU_F_MAP_UNMAP))
1159 return -ENODEV;
1160
1161 viommu = devm_kzalloc(dev, sizeof(*viommu), GFP_KERNEL);
1162 if (!viommu)
1163 return -ENOMEM;
1164
1165 /* Borrow this for easy lookups later */
1166 virtio_bus_type = dev->bus;
1167
1168 spin_lock_init(&viommu->request_lock);
1169 ida_init(&viommu->domain_ids);
1170 viommu->dev = dev;
1171 viommu->vdev = vdev;
1172 INIT_LIST_HEAD(&viommu->requests);
1173
1174 ret = viommu_init_vqs(viommu);
1175 if (ret)
1176 return ret;
1177
1178 virtio_cread_le(vdev, struct virtio_iommu_config, page_size_mask,
1179 &viommu->pgsize_bitmap);
1180
1181 if (!viommu->pgsize_bitmap) {
1182 ret = -EINVAL;
1183 goto err_free_vqs;
1184 }
1185
1186 viommu->map_flags = VIRTIO_IOMMU_MAP_F_READ | VIRTIO_IOMMU_MAP_F_WRITE;
1187 viommu->last_domain = ~0U;
1188
1189 /* Optional features */
1190 virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE,
1191 struct virtio_iommu_config, input_range.start,
1192 &input_start);
1193
1194 virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE,
1195 struct virtio_iommu_config, input_range.end,
1196 &input_end);
1197
1198 virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE,
1199 struct virtio_iommu_config, domain_range.start,
1200 &viommu->first_domain);
1201
1202 virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE,
1203 struct virtio_iommu_config, domain_range.end,
1204 &viommu->last_domain);
1205
1206 virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_PROBE,
1207 struct virtio_iommu_config, probe_size,
1208 &viommu->probe_size);
1209
1210 viommu->geometry = (struct iommu_domain_geometry) {
1211 .aperture_start = input_start,
1212 .aperture_end = input_end,
1213 .force_aperture = true,
1214 };
1215
1216 if (virtio_has_feature(vdev, VIRTIO_IOMMU_F_MMIO))
1217 viommu->map_flags |= VIRTIO_IOMMU_MAP_F_MMIO;
1218
1219 /* Reserve an ID to use as the bypass domain */
1220 if (virtio_has_feature(viommu->vdev, VIRTIO_IOMMU_F_BYPASS_CONFIG)) {
1221 viommu->identity_domain_id = viommu->first_domain;
1222 viommu->first_domain++;
1223 }
1224
1225 virtio_device_ready(vdev);
1226
1227 /* Populate the event queue with buffers */
1228 ret = viommu_fill_evtq(viommu);
1229 if (ret)
1230 goto err_free_vqs;
1231
1232 ret = iommu_device_sysfs_add(&viommu->iommu, dev, NULL, "%s",
1233 virtio_bus_name(vdev));
1234 if (ret)
1235 goto err_free_vqs;
1236
1237 vdev->priv = viommu;
1238
1239 iommu_device_register(&viommu->iommu, &viommu_ops, parent_dev);
1240
1241 dev_info(dev, "input address: %u bits\n",
1242 order_base_2(viommu->geometry.aperture_end));
1243 dev_info(dev, "page mask: %#llx\n", viommu->pgsize_bitmap);
1244
1245 return 0;
1246
1247 err_free_vqs:
1248 vdev->config->del_vqs(vdev);
1249
1250 return ret;
1251 }
1252
viommu_remove(struct virtio_device * vdev)1253 static void viommu_remove(struct virtio_device *vdev)
1254 {
1255 struct viommu_dev *viommu = vdev->priv;
1256
1257 iommu_device_sysfs_remove(&viommu->iommu);
1258 iommu_device_unregister(&viommu->iommu);
1259
1260 /* Stop all virtqueues */
1261 virtio_reset_device(vdev);
1262 vdev->config->del_vqs(vdev);
1263
1264 dev_info(&vdev->dev, "device removed\n");
1265 }
1266
viommu_config_changed(struct virtio_device * vdev)1267 static void viommu_config_changed(struct virtio_device *vdev)
1268 {
1269 dev_warn(&vdev->dev, "config changed\n");
1270 }
1271
1272 static unsigned int features[] = {
1273 VIRTIO_IOMMU_F_MAP_UNMAP,
1274 VIRTIO_IOMMU_F_INPUT_RANGE,
1275 VIRTIO_IOMMU_F_DOMAIN_RANGE,
1276 VIRTIO_IOMMU_F_PROBE,
1277 VIRTIO_IOMMU_F_MMIO,
1278 VIRTIO_IOMMU_F_BYPASS_CONFIG,
1279 };
1280
1281 static struct virtio_device_id id_table[] = {
1282 { VIRTIO_ID_IOMMU, VIRTIO_DEV_ANY_ID },
1283 { 0 },
1284 };
1285 MODULE_DEVICE_TABLE(virtio, id_table);
1286
1287 static struct virtio_driver virtio_iommu_drv = {
1288 .driver.name = KBUILD_MODNAME,
1289 .id_table = id_table,
1290 .feature_table = features,
1291 .feature_table_size = ARRAY_SIZE(features),
1292 .probe = viommu_probe,
1293 .remove = viommu_remove,
1294 .config_changed = viommu_config_changed,
1295 };
1296
1297 module_virtio_driver(virtio_iommu_drv);
1298
1299 MODULE_DESCRIPTION("Virtio IOMMU driver");
1300 MODULE_AUTHOR("Jean-Philippe Brucker <jean-philippe.brucker@arm.com>");
1301 MODULE_LICENSE("GPL v2");
1302