xref: /linux/virt/kvm/coalesced_mmio.c (revision 60e13231561b3a4c5269bfa1ef6c0569ad6f28ec)
1 /*
2  * KVM coalesced MMIO
3  *
4  * Copyright (c) 2008 Bull S.A.S.
5  * Copyright 2009 Red Hat, Inc. and/or its affiliates.
6  *
7  *  Author: Laurent Vivier <Laurent.Vivier@bull.net>
8  *
9  */
10 
11 #include "iodev.h"
12 
13 #include <linux/kvm_host.h>
14 #include <linux/slab.h>
15 #include <linux/kvm.h>
16 
17 #include "coalesced_mmio.h"
18 
19 static inline struct kvm_coalesced_mmio_dev *to_mmio(struct kvm_io_device *dev)
20 {
21 	return container_of(dev, struct kvm_coalesced_mmio_dev, dev);
22 }
23 
24 static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
25 				   gpa_t addr, int len)
26 {
27 	struct kvm_coalesced_mmio_zone *zone;
28 	struct kvm_coalesced_mmio_ring *ring;
29 	unsigned avail;
30 	int i;
31 
32 	/* Are we able to batch it ? */
33 
34 	/* last is the first free entry
35 	 * check if we don't meet the first used entry
36 	 * there is always one unused entry in the buffer
37 	 */
38 	ring = dev->kvm->coalesced_mmio_ring;
39 	avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX;
40 	if (avail < KVM_MAX_VCPUS) {
41 		/* full */
42 		return 0;
43 	}
44 
45 	/* is it in a batchable area ? */
46 
47 	for (i = 0; i < dev->nb_zones; i++) {
48 		zone = &dev->zone[i];
49 
50 		/* (addr,len) is fully included in
51 		 * (zone->addr, zone->size)
52 		 */
53 
54 		if (zone->addr <= addr &&
55 		    addr + len <= zone->addr + zone->size)
56 			return 1;
57 	}
58 	return 0;
59 }
60 
61 static int coalesced_mmio_write(struct kvm_io_device *this,
62 				gpa_t addr, int len, const void *val)
63 {
64 	struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
65 	struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
66 	if (!coalesced_mmio_in_range(dev, addr, len))
67 		return -EOPNOTSUPP;
68 
69 	spin_lock(&dev->lock);
70 
71 	/* copy data in first free entry of the ring */
72 
73 	ring->coalesced_mmio[ring->last].phys_addr = addr;
74 	ring->coalesced_mmio[ring->last].len = len;
75 	memcpy(ring->coalesced_mmio[ring->last].data, val, len);
76 	smp_wmb();
77 	ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
78 	spin_unlock(&dev->lock);
79 	return 0;
80 }
81 
82 static void coalesced_mmio_destructor(struct kvm_io_device *this)
83 {
84 	struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
85 
86 	kfree(dev);
87 }
88 
89 static const struct kvm_io_device_ops coalesced_mmio_ops = {
90 	.write      = coalesced_mmio_write,
91 	.destructor = coalesced_mmio_destructor,
92 };
93 
94 int kvm_coalesced_mmio_init(struct kvm *kvm)
95 {
96 	struct kvm_coalesced_mmio_dev *dev;
97 	struct page *page;
98 	int ret;
99 
100 	ret = -ENOMEM;
101 	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
102 	if (!page)
103 		goto out_err;
104 	kvm->coalesced_mmio_ring = page_address(page);
105 
106 	ret = -ENOMEM;
107 	dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL);
108 	if (!dev)
109 		goto out_free_page;
110 	spin_lock_init(&dev->lock);
111 	kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops);
112 	dev->kvm = kvm;
113 	kvm->coalesced_mmio_dev = dev;
114 
115 	mutex_lock(&kvm->slots_lock);
116 	ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, &dev->dev);
117 	mutex_unlock(&kvm->slots_lock);
118 	if (ret < 0)
119 		goto out_free_dev;
120 
121 	return ret;
122 
123 out_free_dev:
124 	kvm->coalesced_mmio_dev = NULL;
125 	kfree(dev);
126 out_free_page:
127 	kvm->coalesced_mmio_ring = NULL;
128 	__free_page(page);
129 out_err:
130 	return ret;
131 }
132 
133 void kvm_coalesced_mmio_free(struct kvm *kvm)
134 {
135 	if (kvm->coalesced_mmio_ring)
136 		free_page((unsigned long)kvm->coalesced_mmio_ring);
137 }
138 
139 int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
140 					 struct kvm_coalesced_mmio_zone *zone)
141 {
142 	struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev;
143 
144 	if (dev == NULL)
145 		return -ENXIO;
146 
147 	mutex_lock(&kvm->slots_lock);
148 	if (dev->nb_zones >= KVM_COALESCED_MMIO_ZONE_MAX) {
149 		mutex_unlock(&kvm->slots_lock);
150 		return -ENOBUFS;
151 	}
152 
153 	dev->zone[dev->nb_zones] = *zone;
154 	dev->nb_zones++;
155 
156 	mutex_unlock(&kvm->slots_lock);
157 	return 0;
158 }
159 
160 int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
161 					   struct kvm_coalesced_mmio_zone *zone)
162 {
163 	int i;
164 	struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev;
165 	struct kvm_coalesced_mmio_zone *z;
166 
167 	if (dev == NULL)
168 		return -ENXIO;
169 
170 	mutex_lock(&kvm->slots_lock);
171 
172 	i = dev->nb_zones;
173 	while (i) {
174 		z = &dev->zone[i - 1];
175 
176 		/* unregister all zones
177 		 * included in (zone->addr, zone->size)
178 		 */
179 
180 		if (zone->addr <= z->addr &&
181 		    z->addr + z->size <= zone->addr + zone->size) {
182 			dev->nb_zones--;
183 			*z = dev->zone[dev->nb_zones];
184 		}
185 		i--;
186 	}
187 
188 	mutex_unlock(&kvm->slots_lock);
189 
190 	return 0;
191 }
192