xref: /linux/virt/kvm/coalesced_mmio.c (revision 55d0969c451159cff86949b38c39171cab962069)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * KVM coalesced MMIO
4  *
5  * Copyright (c) 2008 Bull S.A.S.
6  * Copyright 2009 Red Hat, Inc. and/or its affiliates.
7  *
8  *  Author: Laurent Vivier <Laurent.Vivier@bull.net>
9  *
10  */
11 
12 #include <kvm/iodev.h>
13 
14 #include <linux/kvm_host.h>
15 #include <linux/slab.h>
16 #include <linux/kvm.h>
17 
18 #include "coalesced_mmio.h"
19 
20 static inline struct kvm_coalesced_mmio_dev *to_mmio(struct kvm_io_device *dev)
21 {
22 	return container_of(dev, struct kvm_coalesced_mmio_dev, dev);
23 }
24 
25 static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
26 				   gpa_t addr, int len)
27 {
28 	/* is it in a batchable area ?
29 	 * (addr,len) is fully included in
30 	 * (zone->addr, zone->size)
31 	 */
32 	if (len < 0)
33 		return 0;
34 	if (addr + len < addr)
35 		return 0;
36 	if (addr < dev->zone.addr)
37 		return 0;
38 	if (addr + len > dev->zone.addr + dev->zone.size)
39 		return 0;
40 	return 1;
41 }
42 
43 static int coalesced_mmio_write(struct kvm_vcpu *vcpu,
44 				struct kvm_io_device *this, gpa_t addr,
45 				int len, const void *val)
46 {
47 	struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
48 	struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
49 	__u32 insert;
50 
51 	if (!coalesced_mmio_in_range(dev, addr, len))
52 		return -EOPNOTSUPP;
53 
54 	spin_lock(&dev->kvm->ring_lock);
55 
56 	/*
57 	 * last is the index of the entry to fill.  Verify userspace hasn't
58 	 * set last to be out of range, and that there is room in the ring.
59 	 * Leave one entry free in the ring so that userspace can differentiate
60 	 * between an empty ring and a full ring.
61 	 */
62 	insert = READ_ONCE(ring->last);
63 	if (insert >= KVM_COALESCED_MMIO_MAX ||
64 	    (insert + 1) % KVM_COALESCED_MMIO_MAX == READ_ONCE(ring->first)) {
65 		spin_unlock(&dev->kvm->ring_lock);
66 		return -EOPNOTSUPP;
67 	}
68 
69 	/* copy data in first free entry of the ring */
70 
71 	ring->coalesced_mmio[insert].phys_addr = addr;
72 	ring->coalesced_mmio[insert].len = len;
73 	memcpy(ring->coalesced_mmio[insert].data, val, len);
74 	ring->coalesced_mmio[insert].pio = dev->zone.pio;
75 	smp_wmb();
76 	ring->last = (insert + 1) % KVM_COALESCED_MMIO_MAX;
77 	spin_unlock(&dev->kvm->ring_lock);
78 	return 0;
79 }
80 
81 static void coalesced_mmio_destructor(struct kvm_io_device *this)
82 {
83 	struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
84 
85 	list_del(&dev->list);
86 
87 	kfree(dev);
88 }
89 
90 static const struct kvm_io_device_ops coalesced_mmio_ops = {
91 	.write      = coalesced_mmio_write,
92 	.destructor = coalesced_mmio_destructor,
93 };
94 
95 int kvm_coalesced_mmio_init(struct kvm *kvm)
96 {
97 	struct page *page;
98 
99 	page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
100 	if (!page)
101 		return -ENOMEM;
102 
103 	kvm->coalesced_mmio_ring = page_address(page);
104 
105 	/*
106 	 * We're using this spinlock to sync access to the coalesced ring.
107 	 * The list doesn't need its own lock since device registration and
108 	 * unregistration should only happen when kvm->slots_lock is held.
109 	 */
110 	spin_lock_init(&kvm->ring_lock);
111 	INIT_LIST_HEAD(&kvm->coalesced_zones);
112 
113 	return 0;
114 }
115 
116 void kvm_coalesced_mmio_free(struct kvm *kvm)
117 {
118 	if (kvm->coalesced_mmio_ring)
119 		free_page((unsigned long)kvm->coalesced_mmio_ring);
120 }
121 
122 int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
123 					 struct kvm_coalesced_mmio_zone *zone)
124 {
125 	int ret;
126 	struct kvm_coalesced_mmio_dev *dev;
127 
128 	if (zone->pio != 1 && zone->pio != 0)
129 		return -EINVAL;
130 
131 	dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev),
132 		      GFP_KERNEL_ACCOUNT);
133 	if (!dev)
134 		return -ENOMEM;
135 
136 	kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops);
137 	dev->kvm = kvm;
138 	dev->zone = *zone;
139 
140 	mutex_lock(&kvm->slots_lock);
141 	ret = kvm_io_bus_register_dev(kvm,
142 				zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS,
143 				zone->addr, zone->size, &dev->dev);
144 	if (ret < 0)
145 		goto out_free_dev;
146 	list_add_tail(&dev->list, &kvm->coalesced_zones);
147 	mutex_unlock(&kvm->slots_lock);
148 
149 	return 0;
150 
151 out_free_dev:
152 	mutex_unlock(&kvm->slots_lock);
153 	kfree(dev);
154 
155 	return ret;
156 }
157 
158 int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
159 					   struct kvm_coalesced_mmio_zone *zone)
160 {
161 	struct kvm_coalesced_mmio_dev *dev, *tmp;
162 	int r;
163 
164 	if (zone->pio != 1 && zone->pio != 0)
165 		return -EINVAL;
166 
167 	mutex_lock(&kvm->slots_lock);
168 
169 	list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list) {
170 		if (zone->pio == dev->zone.pio &&
171 		    coalesced_mmio_in_range(dev, zone->addr, zone->size)) {
172 			r = kvm_io_bus_unregister_dev(kvm,
173 				zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS, &dev->dev);
174 			/*
175 			 * On failure, unregister destroys all devices on the
176 			 * bus, including the target device. There's no need
177 			 * to restart the walk as there aren't any zones left.
178 			 */
179 			if (r)
180 				break;
181 		}
182 	}
183 
184 	mutex_unlock(&kvm->slots_lock);
185 
186 	/*
187 	 * Ignore the result of kvm_io_bus_unregister_dev(), from userspace's
188 	 * perspective, the coalesced MMIO is most definitely unregistered.
189 	 */
190 	return 0;
191 }
192