xref: /linux/virt/kvm/coalesced_mmio.c (revision f2ee442115c9b6219083c019939a9cc0c9abb2f8)
1 /*
2  * KVM coalesced MMIO
3  *
4  * Copyright (c) 2008 Bull S.A.S.
5  * Copyright 2009 Red Hat, Inc. and/or its affiliates.
6  *
7  *  Author: Laurent Vivier <Laurent.Vivier@bull.net>
8  *
9  */
10 
11 #include "iodev.h"
12 
13 #include <linux/kvm_host.h>
14 #include <linux/slab.h>
15 #include <linux/kvm.h>
16 
17 #include "coalesced_mmio.h"
18 
19 static inline struct kvm_coalesced_mmio_dev *to_mmio(struct kvm_io_device *dev)
20 {
21 	return container_of(dev, struct kvm_coalesced_mmio_dev, dev);
22 }
23 
24 static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
25 				   gpa_t addr, int len)
26 {
27 	/* is it in a batchable area ?
28 	 * (addr,len) is fully included in
29 	 * (zone->addr, zone->size)
30 	 */
31 
32 	return (dev->zone.addr <= addr &&
33 		addr + len <= dev->zone.addr + dev->zone.size);
34 }
35 
36 static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev)
37 {
38 	struct kvm_coalesced_mmio_ring *ring;
39 	unsigned avail;
40 
41 	/* Are we able to batch it ? */
42 
43 	/* last is the first free entry
44 	 * check if we don't meet the first used entry
45 	 * there is always one unused entry in the buffer
46 	 */
47 	ring = dev->kvm->coalesced_mmio_ring;
48 	avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX;
49 	if (avail == 0) {
50 		/* full */
51 		return 0;
52 	}
53 
54 	return 1;
55 }
56 
57 static int coalesced_mmio_write(struct kvm_io_device *this,
58 				gpa_t addr, int len, const void *val)
59 {
60 	struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
61 	struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
62 
63 	if (!coalesced_mmio_in_range(dev, addr, len))
64 		return -EOPNOTSUPP;
65 
66 	spin_lock(&dev->kvm->ring_lock);
67 
68 	if (!coalesced_mmio_has_room(dev)) {
69 		spin_unlock(&dev->kvm->ring_lock);
70 		return -EOPNOTSUPP;
71 	}
72 
73 	/* copy data in first free entry of the ring */
74 
75 	ring->coalesced_mmio[ring->last].phys_addr = addr;
76 	ring->coalesced_mmio[ring->last].len = len;
77 	memcpy(ring->coalesced_mmio[ring->last].data, val, len);
78 	smp_wmb();
79 	ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
80 	spin_unlock(&dev->kvm->ring_lock);
81 	return 0;
82 }
83 
84 static void coalesced_mmio_destructor(struct kvm_io_device *this)
85 {
86 	struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
87 
88 	list_del(&dev->list);
89 
90 	kfree(dev);
91 }
92 
93 static const struct kvm_io_device_ops coalesced_mmio_ops = {
94 	.write      = coalesced_mmio_write,
95 	.destructor = coalesced_mmio_destructor,
96 };
97 
98 int kvm_coalesced_mmio_init(struct kvm *kvm)
99 {
100 	struct page *page;
101 	int ret;
102 
103 	ret = -ENOMEM;
104 	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
105 	if (!page)
106 		goto out_err;
107 
108 	ret = 0;
109 	kvm->coalesced_mmio_ring = page_address(page);
110 
111 	/*
112 	 * We're using this spinlock to sync access to the coalesced ring.
113 	 * The list doesn't need it's own lock since device registration and
114 	 * unregistration should only happen when kvm->slots_lock is held.
115 	 */
116 	spin_lock_init(&kvm->ring_lock);
117 	INIT_LIST_HEAD(&kvm->coalesced_zones);
118 
119 out_err:
120 	return ret;
121 }
122 
123 void kvm_coalesced_mmio_free(struct kvm *kvm)
124 {
125 	if (kvm->coalesced_mmio_ring)
126 		free_page((unsigned long)kvm->coalesced_mmio_ring);
127 }
128 
129 int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
130 					 struct kvm_coalesced_mmio_zone *zone)
131 {
132 	int ret;
133 	struct kvm_coalesced_mmio_dev *dev;
134 
135 	dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL);
136 	if (!dev)
137 		return -ENOMEM;
138 
139 	kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops);
140 	dev->kvm = kvm;
141 	dev->zone = *zone;
142 
143 	mutex_lock(&kvm->slots_lock);
144 	ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, zone->addr,
145 				      zone->size, &dev->dev);
146 	if (ret < 0)
147 		goto out_free_dev;
148 	list_add_tail(&dev->list, &kvm->coalesced_zones);
149 	mutex_unlock(&kvm->slots_lock);
150 
151 	return ret;
152 
153 out_free_dev:
154 	mutex_unlock(&kvm->slots_lock);
155 
156 	kfree(dev);
157 
158 	if (dev == NULL)
159 		return -ENXIO;
160 
161 	return 0;
162 }
163 
164 int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
165 					   struct kvm_coalesced_mmio_zone *zone)
166 {
167 	struct kvm_coalesced_mmio_dev *dev, *tmp;
168 
169 	mutex_lock(&kvm->slots_lock);
170 
171 	list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list)
172 		if (coalesced_mmio_in_range(dev, zone->addr, zone->size)) {
173 			kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &dev->dev);
174 			kvm_iodevice_destructor(&dev->dev);
175 		}
176 
177 	mutex_unlock(&kvm->slots_lock);
178 
179 	return 0;
180 }
181