xref: /linux/drivers/virtio/virtio_balloon.c (revision 988addf82e4c03739375279de73929580a2d4a6a)
1 /* Virtio balloon implementation, inspired by Dor Loar and Marcelo
2  * Tosatti's implementations.
3  *
4  *  Copyright 2008 Rusty Russell IBM Corporation
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, write to the Free Software
18  *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
19  */
20 //#define DEBUG
21 #include <linux/virtio.h>
22 #include <linux/virtio_balloon.h>
23 #include <linux/swap.h>
24 #include <linux/kthread.h>
25 #include <linux/freezer.h>
26 #include <linux/delay.h>
27 
28 struct virtio_balloon
29 {
30 	struct virtio_device *vdev;
31 	struct virtqueue *inflate_vq, *deflate_vq, *stats_vq;
32 
33 	/* Where the ballooning thread waits for config to change. */
34 	wait_queue_head_t config_change;
35 
36 	/* The thread servicing the balloon. */
37 	struct task_struct *thread;
38 
39 	/* Waiting for host to ack the pages we released. */
40 	struct completion acked;
41 
42 	/* Do we have to tell Host *before* we reuse pages? */
43 	bool tell_host_first;
44 
45 	/* The pages we've told the Host we're not using. */
46 	unsigned int num_pages;
47 	struct list_head pages;
48 
49 	/* The array of pfns we tell the Host about. */
50 	unsigned int num_pfns;
51 	u32 pfns[256];
52 
53 	/* Memory statistics */
54 	int need_stats_update;
55 	struct virtio_balloon_stat stats[VIRTIO_BALLOON_S_NR];
56 };
57 
58 static struct virtio_device_id id_table[] = {
59 	{ VIRTIO_ID_BALLOON, VIRTIO_DEV_ANY_ID },
60 	{ 0 },
61 };
62 
63 static u32 page_to_balloon_pfn(struct page *page)
64 {
65 	unsigned long pfn = page_to_pfn(page);
66 
67 	BUILD_BUG_ON(PAGE_SHIFT < VIRTIO_BALLOON_PFN_SHIFT);
68 	/* Convert pfn from Linux page size to balloon page size. */
69 	return pfn >> (PAGE_SHIFT - VIRTIO_BALLOON_PFN_SHIFT);
70 }
71 
72 static void balloon_ack(struct virtqueue *vq)
73 {
74 	struct virtio_balloon *vb;
75 	unsigned int len;
76 
77 	vb = vq->vq_ops->get_buf(vq, &len);
78 	if (vb)
79 		complete(&vb->acked);
80 }
81 
82 static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq)
83 {
84 	struct scatterlist sg;
85 
86 	sg_init_one(&sg, vb->pfns, sizeof(vb->pfns[0]) * vb->num_pfns);
87 
88 	init_completion(&vb->acked);
89 
90 	/* We should always be able to add one buffer to an empty queue. */
91 	if (vq->vq_ops->add_buf(vq, &sg, 1, 0, vb) < 0)
92 		BUG();
93 	vq->vq_ops->kick(vq);
94 
95 	/* When host has read buffer, this completes via balloon_ack */
96 	wait_for_completion(&vb->acked);
97 }
98 
99 static void fill_balloon(struct virtio_balloon *vb, size_t num)
100 {
101 	/* We can only do one array worth at a time. */
102 	num = min(num, ARRAY_SIZE(vb->pfns));
103 
104 	for (vb->num_pfns = 0; vb->num_pfns < num; vb->num_pfns++) {
105 		struct page *page = alloc_page(GFP_HIGHUSER | __GFP_NORETRY);
106 		if (!page) {
107 			if (printk_ratelimit())
108 				dev_printk(KERN_INFO, &vb->vdev->dev,
109 					   "Out of puff! Can't get %zu pages\n",
110 					   num);
111 			/* Sleep for at least 1/5 of a second before retry. */
112 			msleep(200);
113 			break;
114 		}
115 		vb->pfns[vb->num_pfns] = page_to_balloon_pfn(page);
116 		totalram_pages--;
117 		vb->num_pages++;
118 		list_add(&page->lru, &vb->pages);
119 	}
120 
121 	/* Didn't get any?  Oh well. */
122 	if (vb->num_pfns == 0)
123 		return;
124 
125 	tell_host(vb, vb->inflate_vq);
126 }
127 
128 static void release_pages_by_pfn(const u32 pfns[], unsigned int num)
129 {
130 	unsigned int i;
131 
132 	for (i = 0; i < num; i++) {
133 		__free_page(pfn_to_page(pfns[i]));
134 		totalram_pages++;
135 	}
136 }
137 
138 static void leak_balloon(struct virtio_balloon *vb, size_t num)
139 {
140 	struct page *page;
141 
142 	/* We can only do one array worth at a time. */
143 	num = min(num, ARRAY_SIZE(vb->pfns));
144 
145 	for (vb->num_pfns = 0; vb->num_pfns < num; vb->num_pfns++) {
146 		page = list_first_entry(&vb->pages, struct page, lru);
147 		list_del(&page->lru);
148 		vb->pfns[vb->num_pfns] = page_to_balloon_pfn(page);
149 		vb->num_pages--;
150 	}
151 
152 	if (vb->tell_host_first) {
153 		tell_host(vb, vb->deflate_vq);
154 		release_pages_by_pfn(vb->pfns, vb->num_pfns);
155 	} else {
156 		release_pages_by_pfn(vb->pfns, vb->num_pfns);
157 		tell_host(vb, vb->deflate_vq);
158 	}
159 }
160 
161 static inline void update_stat(struct virtio_balloon *vb, int idx,
162 			       u16 tag, u64 val)
163 {
164 	BUG_ON(idx >= VIRTIO_BALLOON_S_NR);
165 	vb->stats[idx].tag = tag;
166 	vb->stats[idx].val = val;
167 }
168 
169 #define pages_to_bytes(x) ((u64)(x) << PAGE_SHIFT)
170 
171 static void update_balloon_stats(struct virtio_balloon *vb)
172 {
173 	unsigned long events[NR_VM_EVENT_ITEMS];
174 	struct sysinfo i;
175 	int idx = 0;
176 
177 	all_vm_events(events);
178 	si_meminfo(&i);
179 
180 	update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_IN,
181 				pages_to_bytes(events[PSWPIN]));
182 	update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_OUT,
183 				pages_to_bytes(events[PSWPOUT]));
184 	update_stat(vb, idx++, VIRTIO_BALLOON_S_MAJFLT, events[PGMAJFAULT]);
185 	update_stat(vb, idx++, VIRTIO_BALLOON_S_MINFLT, events[PGFAULT]);
186 	update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMFREE,
187 				pages_to_bytes(i.freeram));
188 	update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMTOT,
189 				pages_to_bytes(i.totalram));
190 }
191 
192 /*
193  * While most virtqueues communicate guest-initiated requests to the hypervisor,
194  * the stats queue operates in reverse.  The driver initializes the virtqueue
195  * with a single buffer.  From that point forward, all conversations consist of
196  * a hypervisor request (a call to this function) which directs us to refill
197  * the virtqueue with a fresh stats buffer.  Since stats collection can sleep,
198  * we notify our kthread which does the actual work via stats_handle_request().
199  */
200 static void stats_request(struct virtqueue *vq)
201 {
202 	struct virtio_balloon *vb;
203 	unsigned int len;
204 
205 	vb = vq->vq_ops->get_buf(vq, &len);
206 	if (!vb)
207 		return;
208 	vb->need_stats_update = 1;
209 	wake_up(&vb->config_change);
210 }
211 
212 static void stats_handle_request(struct virtio_balloon *vb)
213 {
214 	struct virtqueue *vq;
215 	struct scatterlist sg;
216 
217 	vb->need_stats_update = 0;
218 	update_balloon_stats(vb);
219 
220 	vq = vb->stats_vq;
221 	sg_init_one(&sg, vb->stats, sizeof(vb->stats));
222 	if (vq->vq_ops->add_buf(vq, &sg, 1, 0, vb) < 0)
223 		BUG();
224 	vq->vq_ops->kick(vq);
225 }
226 
227 static void virtballoon_changed(struct virtio_device *vdev)
228 {
229 	struct virtio_balloon *vb = vdev->priv;
230 
231 	wake_up(&vb->config_change);
232 }
233 
234 static inline s64 towards_target(struct virtio_balloon *vb)
235 {
236 	u32 v;
237 	vb->vdev->config->get(vb->vdev,
238 			      offsetof(struct virtio_balloon_config, num_pages),
239 			      &v, sizeof(v));
240 	return (s64)v - vb->num_pages;
241 }
242 
243 static void update_balloon_size(struct virtio_balloon *vb)
244 {
245 	__le32 actual = cpu_to_le32(vb->num_pages);
246 
247 	vb->vdev->config->set(vb->vdev,
248 			      offsetof(struct virtio_balloon_config, actual),
249 			      &actual, sizeof(actual));
250 }
251 
252 static int balloon(void *_vballoon)
253 {
254 	struct virtio_balloon *vb = _vballoon;
255 
256 	set_freezable();
257 	while (!kthread_should_stop()) {
258 		s64 diff;
259 
260 		try_to_freeze();
261 		wait_event_interruptible(vb->config_change,
262 					 (diff = towards_target(vb)) != 0
263 					 || vb->need_stats_update
264 					 || kthread_should_stop()
265 					 || freezing(current));
266 		if (vb->need_stats_update)
267 			stats_handle_request(vb);
268 		if (diff > 0)
269 			fill_balloon(vb, diff);
270 		else if (diff < 0)
271 			leak_balloon(vb, -diff);
272 		update_balloon_size(vb);
273 	}
274 	return 0;
275 }
276 
277 static int virtballoon_probe(struct virtio_device *vdev)
278 {
279 	struct virtio_balloon *vb;
280 	struct virtqueue *vqs[3];
281 	vq_callback_t *callbacks[] = { balloon_ack, balloon_ack, stats_request };
282 	const char *names[] = { "inflate", "deflate", "stats" };
283 	int err, nvqs;
284 
285 	vdev->priv = vb = kmalloc(sizeof(*vb), GFP_KERNEL);
286 	if (!vb) {
287 		err = -ENOMEM;
288 		goto out;
289 	}
290 
291 	INIT_LIST_HEAD(&vb->pages);
292 	vb->num_pages = 0;
293 	init_waitqueue_head(&vb->config_change);
294 	vb->vdev = vdev;
295 	vb->need_stats_update = 0;
296 
297 	/* We expect two virtqueues: inflate and deflate,
298 	 * and optionally stat. */
299 	nvqs = virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ) ? 3 : 2;
300 	err = vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names);
301 	if (err)
302 		goto out_free_vb;
303 
304 	vb->inflate_vq = vqs[0];
305 	vb->deflate_vq = vqs[1];
306 	if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) {
307 		struct scatterlist sg;
308 		vb->stats_vq = vqs[2];
309 
310 		/*
311 		 * Prime this virtqueue with one buffer so the hypervisor can
312 		 * use it to signal us later.
313 		 */
314 		sg_init_one(&sg, vb->stats, sizeof vb->stats);
315 		if (vb->stats_vq->vq_ops->add_buf(vb->stats_vq,
316 						  &sg, 1, 0, vb) < 0)
317 			BUG();
318 		vb->stats_vq->vq_ops->kick(vb->stats_vq);
319 	}
320 
321 	vb->thread = kthread_run(balloon, vb, "vballoon");
322 	if (IS_ERR(vb->thread)) {
323 		err = PTR_ERR(vb->thread);
324 		goto out_del_vqs;
325 	}
326 
327 	vb->tell_host_first
328 		= virtio_has_feature(vdev, VIRTIO_BALLOON_F_MUST_TELL_HOST);
329 
330 	return 0;
331 
332 out_del_vqs:
333 	vdev->config->del_vqs(vdev);
334 out_free_vb:
335 	kfree(vb);
336 out:
337 	return err;
338 }
339 
340 static void __devexit virtballoon_remove(struct virtio_device *vdev)
341 {
342 	struct virtio_balloon *vb = vdev->priv;
343 
344 	kthread_stop(vb->thread);
345 
346 	/* There might be pages left in the balloon: free them. */
347 	while (vb->num_pages)
348 		leak_balloon(vb, vb->num_pages);
349 
350 	/* Now we reset the device so we can clean up the queues. */
351 	vdev->config->reset(vdev);
352 
353 	vdev->config->del_vqs(vdev);
354 	kfree(vb);
355 }
356 
357 static unsigned int features[] = {
358 	VIRTIO_BALLOON_F_MUST_TELL_HOST,
359 	VIRTIO_BALLOON_F_STATS_VQ,
360 };
361 
362 static struct virtio_driver virtio_balloon_driver = {
363 	.feature_table = features,
364 	.feature_table_size = ARRAY_SIZE(features),
365 	.driver.name =	KBUILD_MODNAME,
366 	.driver.owner =	THIS_MODULE,
367 	.id_table =	id_table,
368 	.probe =	virtballoon_probe,
369 	.remove =	__devexit_p(virtballoon_remove),
370 	.config_changed = virtballoon_changed,
371 };
372 
373 static int __init init(void)
374 {
375 	return register_virtio_driver(&virtio_balloon_driver);
376 }
377 
378 static void __exit fini(void)
379 {
380 	unregister_virtio_driver(&virtio_balloon_driver);
381 }
382 module_init(init);
383 module_exit(fini);
384 
385 MODULE_DEVICE_TABLE(virtio, id_table);
386 MODULE_DESCRIPTION("Virtio balloon driver");
387 MODULE_LICENSE("GPL");
388