xref: /linux/drivers/xen/xen-front-pgdir-shbuf.c (revision 151ebcf0797b1a3ba53c8843dc21748c80e098c7)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 
3 /*
4  * Xen frontend/backend page directory based shared buffer
5  * helper module.
6  *
7  * Copyright (C) 2018 EPAM Systems Inc.
8  *
9  * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
10  */
11 
12 #include <linux/module.h>
13 #include <linux/errno.h>
14 #include <linux/mm.h>
15 
16 #include <asm/xen/hypervisor.h>
17 #include <xen/balloon.h>
18 #include <xen/xen.h>
19 #include <xen/xenbus.h>
20 #include <xen/interface/io/ring.h>
21 
22 #include <xen/xen-front-pgdir-shbuf.h>
23 
24 /*
25  * This structure represents the structure of a shared page
26  * that contains grant references to the pages of the shared
27  * buffer. This structure is common to many Xen para-virtualized
28  * protocols at include/xen/interface/io/
29  */
30 struct xen_page_directory {
31 	grant_ref_t gref_dir_next_page;
32 #define XEN_GREF_LIST_END	0
33 	grant_ref_t gref[]; /* Variable length */
34 };
35 
36 /*
37  * Shared buffer ops which are differently implemented
38  * depending on the allocation mode, e.g. if the buffer
39  * is allocated by the corresponding backend or frontend.
40  * Some of the operations.
41  */
42 struct xen_front_pgdir_shbuf_ops {
43 	/*
44 	 * Calculate number of grefs required to handle this buffer,
45 	 * e.g. if grefs are required for page directory only or the buffer
46 	 * pages as well.
47 	 */
48 	void (*calc_num_grefs)(struct xen_front_pgdir_shbuf *buf);
49 
50 	/* Fill page directory according to para-virtual display protocol. */
51 	void (*fill_page_dir)(struct xen_front_pgdir_shbuf *buf);
52 
53 	/* Claim grant references for the pages of the buffer. */
54 	int (*grant_refs_for_buffer)(struct xen_front_pgdir_shbuf *buf,
55 				     grant_ref_t *priv_gref_head, int gref_idx);
56 
57 	/* Map grant references of the buffer. */
58 	int (*map)(struct xen_front_pgdir_shbuf *buf);
59 
60 	/* Unmap grant references of the buffer. */
61 	int (*unmap)(struct xen_front_pgdir_shbuf *buf);
62 };
63 
64 /*
65  * Get granted reference to the very first page of the
66  * page directory. Usually this is passed to the backend,
67  * so it can find/fill the grant references to the buffer's
68  * pages.
69  *
70  * \param buf shared buffer which page directory is of interest.
71  * \return granted reference to the very first page of the
72  * page directory.
73  */
74 grant_ref_t
75 xen_front_pgdir_shbuf_get_dir_start(struct xen_front_pgdir_shbuf *buf)
76 {
77 	if (!buf->grefs)
78 		return INVALID_GRANT_REF;
79 
80 	return buf->grefs[0];
81 }
82 EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_get_dir_start);
83 
84 /*
85  * Map granted references of the shared buffer.
86  *
87  * Depending on the shared buffer mode of allocation
88  * (be_alloc flag) this can either do nothing (for buffers
89  * shared by the frontend itself) or map the provided granted
90  * references onto the backing storage (buf->pages).
91  *
92  * \param buf shared buffer which grants to be mapped.
93  * \return zero on success or a negative number on failure.
94  */
95 int xen_front_pgdir_shbuf_map(struct xen_front_pgdir_shbuf *buf)
96 {
97 	if (buf->ops && buf->ops->map)
98 		return buf->ops->map(buf);
99 
100 	/* No need to map own grant references. */
101 	return 0;
102 }
103 EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_map);
104 
105 /*
106  * Unmap granted references of the shared buffer.
107  *
108  * Depending on the shared buffer mode of allocation
109  * (be_alloc flag) this can either do nothing (for buffers
110  * shared by the frontend itself) or unmap the provided granted
111  * references.
112  *
113  * \param buf shared buffer which grants to be unmapped.
114  * \return zero on success or a negative number on failure.
115  */
116 int xen_front_pgdir_shbuf_unmap(struct xen_front_pgdir_shbuf *buf)
117 {
118 	if (buf->ops && buf->ops->unmap)
119 		return buf->ops->unmap(buf);
120 
121 	/* No need to unmap own grant references. */
122 	return 0;
123 }
124 EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_unmap);
125 
126 /*
127  * Free all the resources of the shared buffer.
128  *
129  * \param buf shared buffer which resources to be freed.
130  */
131 void xen_front_pgdir_shbuf_free(struct xen_front_pgdir_shbuf *buf)
132 {
133 	if (buf->grefs) {
134 		int i;
135 
136 		for (i = 0; i < buf->num_grefs; i++)
137 			if (buf->grefs[i] != INVALID_GRANT_REF)
138 				gnttab_end_foreign_access(buf->grefs[i], NULL);
139 	}
140 	kfree(buf->grefs);
141 	kfree(buf->directory);
142 }
143 EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_free);
144 
145 /*
146  * Number of grefs a page can hold with respect to the
147  * struct xen_page_directory header.
148  */
149 #define XEN_NUM_GREFS_PER_PAGE ((PAGE_SIZE - \
150 				 offsetof(struct xen_page_directory, \
151 					  gref)) / sizeof(grant_ref_t))
152 
153 /*
154  * Get the number of pages the page directory consumes itself.
155  *
156  * \param buf shared buffer.
157  */
158 static int get_num_pages_dir(struct xen_front_pgdir_shbuf *buf)
159 {
160 	return DIV_ROUND_UP(buf->num_pages, XEN_NUM_GREFS_PER_PAGE);
161 }
162 
163 /*
164  * Calculate the number of grant references needed to share the buffer
165  * and its pages when backend allocates the buffer.
166  *
167  * \param buf shared buffer.
168  */
169 static void backend_calc_num_grefs(struct xen_front_pgdir_shbuf *buf)
170 {
171 	/* Only for pages the page directory consumes itself. */
172 	buf->num_grefs = get_num_pages_dir(buf);
173 }
174 
175 /*
176  * Calculate the number of grant references needed to share the buffer
177  * and its pages when frontend allocates the buffer.
178  *
179  * \param buf shared buffer.
180  */
181 static void guest_calc_num_grefs(struct xen_front_pgdir_shbuf *buf)
182 {
183 	/*
184 	 * Number of pages the page directory consumes itself
185 	 * plus grefs for the buffer pages.
186 	 */
187 	buf->num_grefs = get_num_pages_dir(buf) + buf->num_pages;
188 }
189 
190 #define xen_page_to_vaddr(page) \
191 	((uintptr_t)pfn_to_kaddr(page_to_xen_pfn(page)))
192 
193 /*
194  * Unmap the buffer previously mapped with grant references
195  * provided by the backend.
196  *
197  * \param buf shared buffer.
198  * \return zero on success or a negative number on failure.
199  */
200 static int backend_unmap(struct xen_front_pgdir_shbuf *buf)
201 {
202 	struct gnttab_unmap_grant_ref *unmap_ops;
203 	int i, ret;
204 
205 	if (!buf->pages || !buf->backend_map_handles || !buf->grefs)
206 		return 0;
207 
208 	unmap_ops = kcalloc(buf->num_pages, sizeof(*unmap_ops),
209 			    GFP_KERNEL);
210 	if (!unmap_ops)
211 		return -ENOMEM;
212 
213 	for (i = 0; i < buf->num_pages; i++) {
214 		phys_addr_t addr;
215 
216 		addr = xen_page_to_vaddr(buf->pages[i]);
217 		gnttab_set_unmap_op(&unmap_ops[i], addr, GNTMAP_host_map,
218 				    buf->backend_map_handles[i]);
219 	}
220 
221 	ret = gnttab_unmap_refs(unmap_ops, NULL, buf->pages,
222 				buf->num_pages);
223 
224 	for (i = 0; i < buf->num_pages; i++) {
225 		if (unlikely(unmap_ops[i].status != GNTST_okay))
226 			dev_err(&buf->xb_dev->dev,
227 				"Failed to unmap page %d: %d\n",
228 				i, unmap_ops[i].status);
229 	}
230 
231 	if (ret)
232 		dev_err(&buf->xb_dev->dev,
233 			"Failed to unmap grant references, ret %d", ret);
234 
235 	kfree(unmap_ops);
236 	kfree(buf->backend_map_handles);
237 	buf->backend_map_handles = NULL;
238 	return ret;
239 }
240 
241 /*
242  * Map the buffer with grant references provided by the backend.
243  *
244  * \param buf shared buffer.
245  * \return zero on success or a negative number on failure.
246  */
247 static int backend_map(struct xen_front_pgdir_shbuf *buf)
248 {
249 	struct gnttab_map_grant_ref *map_ops = NULL;
250 	unsigned char *ptr;
251 	int ret, cur_gref, cur_dir_page, cur_page, grefs_left;
252 
253 	map_ops = kcalloc(buf->num_pages, sizeof(*map_ops), GFP_KERNEL);
254 	if (!map_ops)
255 		return -ENOMEM;
256 
257 	buf->backend_map_handles = kcalloc(buf->num_pages,
258 					   sizeof(*buf->backend_map_handles),
259 					   GFP_KERNEL);
260 	if (!buf->backend_map_handles) {
261 		kfree(map_ops);
262 		return -ENOMEM;
263 	}
264 
265 	/*
266 	 * Read page directory to get grefs from the backend: for external
267 	 * buffer we only allocate buf->grefs for the page directory,
268 	 * so buf->num_grefs has number of pages in the page directory itself.
269 	 */
270 	ptr = buf->directory;
271 	grefs_left = buf->num_pages;
272 	cur_page = 0;
273 	for (cur_dir_page = 0; cur_dir_page < buf->num_grefs; cur_dir_page++) {
274 		struct xen_page_directory *page_dir =
275 			(struct xen_page_directory *)ptr;
276 		int to_copy = XEN_NUM_GREFS_PER_PAGE;
277 
278 		if (to_copy > grefs_left)
279 			to_copy = grefs_left;
280 
281 		for (cur_gref = 0; cur_gref < to_copy; cur_gref++) {
282 			phys_addr_t addr;
283 
284 			addr = xen_page_to_vaddr(buf->pages[cur_page]);
285 			gnttab_set_map_op(&map_ops[cur_page], addr,
286 					  GNTMAP_host_map,
287 					  page_dir->gref[cur_gref],
288 					  buf->xb_dev->otherend_id);
289 			cur_page++;
290 		}
291 
292 		grefs_left -= to_copy;
293 		ptr += PAGE_SIZE;
294 	}
295 	ret = gnttab_map_refs(map_ops, NULL, buf->pages, buf->num_pages);
296 
297 	/* Save handles even if error, so we can unmap. */
298 	for (cur_page = 0; cur_page < buf->num_pages; cur_page++) {
299 		if (likely(map_ops[cur_page].status == GNTST_okay)) {
300 			buf->backend_map_handles[cur_page] =
301 				map_ops[cur_page].handle;
302 		} else {
303 			buf->backend_map_handles[cur_page] =
304 				INVALID_GRANT_HANDLE;
305 			if (!ret)
306 				ret = -ENXIO;
307 			dev_err(&buf->xb_dev->dev,
308 				"Failed to map page %d: %d\n",
309 				cur_page, map_ops[cur_page].status);
310 		}
311 	}
312 
313 	if (ret) {
314 		dev_err(&buf->xb_dev->dev,
315 			"Failed to map grant references, ret %d", ret);
316 		backend_unmap(buf);
317 	}
318 
319 	kfree(map_ops);
320 	return ret;
321 }
322 
323 /*
324  * Fill page directory with grant references to the pages of the
325  * page directory itself.
326  *
327  * The grant references to the buffer pages are provided by the
328  * backend in this case.
329  *
330  * \param buf shared buffer.
331  */
332 static void backend_fill_page_dir(struct xen_front_pgdir_shbuf *buf)
333 {
334 	struct xen_page_directory *page_dir;
335 	unsigned char *ptr;
336 	int i, num_pages_dir;
337 
338 	ptr = buf->directory;
339 	num_pages_dir = get_num_pages_dir(buf);
340 
341 	/* Fill only grefs for the page directory itself. */
342 	for (i = 0; i < num_pages_dir - 1; i++) {
343 		page_dir = (struct xen_page_directory *)ptr;
344 
345 		page_dir->gref_dir_next_page = buf->grefs[i + 1];
346 		ptr += PAGE_SIZE;
347 	}
348 	/* Last page must say there is no more pages. */
349 	page_dir = (struct xen_page_directory *)ptr;
350 	page_dir->gref_dir_next_page = XEN_GREF_LIST_END;
351 }
352 
353 /*
354  * Fill page directory with grant references to the pages of the
355  * page directory and the buffer we share with the backend.
356  *
357  * \param buf shared buffer.
358  */
359 static void guest_fill_page_dir(struct xen_front_pgdir_shbuf *buf)
360 {
361 	unsigned char *ptr;
362 	int cur_gref, grefs_left, to_copy, i, num_pages_dir;
363 
364 	ptr = buf->directory;
365 	num_pages_dir = get_num_pages_dir(buf);
366 
367 	/*
368 	 * While copying, skip grefs at start, they are for pages
369 	 * granted for the page directory itself.
370 	 */
371 	cur_gref = num_pages_dir;
372 	grefs_left = buf->num_pages;
373 	for (i = 0; i < num_pages_dir; i++) {
374 		struct xen_page_directory *page_dir =
375 			(struct xen_page_directory *)ptr;
376 
377 		if (grefs_left <= XEN_NUM_GREFS_PER_PAGE) {
378 			to_copy = grefs_left;
379 			page_dir->gref_dir_next_page = XEN_GREF_LIST_END;
380 		} else {
381 			to_copy = XEN_NUM_GREFS_PER_PAGE;
382 			page_dir->gref_dir_next_page = buf->grefs[i + 1];
383 		}
384 		memcpy(&page_dir->gref, &buf->grefs[cur_gref],
385 		       to_copy * sizeof(grant_ref_t));
386 		ptr += PAGE_SIZE;
387 		grefs_left -= to_copy;
388 		cur_gref += to_copy;
389 	}
390 }
391 
392 /*
393  * Grant references to the frontend's buffer pages.
394  *
395  * These will be shared with the backend, so it can
396  * access the buffer's data.
397  *
398  * \param buf shared buffer.
399  * \return zero on success or a negative number on failure.
400  */
401 static int guest_grant_refs_for_buffer(struct xen_front_pgdir_shbuf *buf,
402 				       grant_ref_t *priv_gref_head,
403 				       int gref_idx)
404 {
405 	int i, cur_ref, otherend_id;
406 
407 	otherend_id = buf->xb_dev->otherend_id;
408 	for (i = 0; i < buf->num_pages; i++) {
409 		cur_ref = gnttab_claim_grant_reference(priv_gref_head);
410 		if (cur_ref < 0)
411 			return cur_ref;
412 
413 		gnttab_grant_foreign_access_ref(cur_ref, otherend_id,
414 						xen_page_to_gfn(buf->pages[i]),
415 						0);
416 		buf->grefs[gref_idx++] = cur_ref;
417 	}
418 	return 0;
419 }
420 
421 /*
422  * Grant all the references needed to share the buffer.
423  *
424  * Grant references to the page directory pages and, if
425  * needed, also to the pages of the shared buffer data.
426  *
427  * \param buf shared buffer.
428  * \return zero on success or a negative number on failure.
429  */
430 static int grant_references(struct xen_front_pgdir_shbuf *buf)
431 {
432 	grant_ref_t priv_gref_head;
433 	int ret, i, j, cur_ref;
434 	int otherend_id, num_pages_dir;
435 
436 	ret = gnttab_alloc_grant_references(buf->num_grefs, &priv_gref_head);
437 	if (ret < 0) {
438 		dev_err(&buf->xb_dev->dev,
439 			"Cannot allocate grant references\n");
440 		return ret;
441 	}
442 
443 	otherend_id = buf->xb_dev->otherend_id;
444 	j = 0;
445 	num_pages_dir = get_num_pages_dir(buf);
446 	for (i = 0; i < num_pages_dir; i++) {
447 		unsigned long frame;
448 
449 		cur_ref = gnttab_claim_grant_reference(&priv_gref_head);
450 		if (cur_ref < 0)
451 			return cur_ref;
452 
453 		frame = xen_page_to_gfn(virt_to_page(buf->directory +
454 						     PAGE_SIZE * i));
455 		gnttab_grant_foreign_access_ref(cur_ref, otherend_id, frame, 0);
456 		buf->grefs[j++] = cur_ref;
457 	}
458 
459 	if (buf->ops->grant_refs_for_buffer) {
460 		ret = buf->ops->grant_refs_for_buffer(buf, &priv_gref_head, j);
461 		if (ret)
462 			return ret;
463 	}
464 
465 	gnttab_free_grant_references(priv_gref_head);
466 	return 0;
467 }
468 
469 /*
470  * Allocate all required structures to mange shared buffer.
471  *
472  * \param buf shared buffer.
473  * \return zero on success or a negative number on failure.
474  */
475 static int alloc_storage(struct xen_front_pgdir_shbuf *buf)
476 {
477 	buf->grefs = kcalloc(buf->num_grefs, sizeof(*buf->grefs), GFP_KERNEL);
478 	if (!buf->grefs)
479 		return -ENOMEM;
480 
481 	buf->directory = kcalloc(get_num_pages_dir(buf), PAGE_SIZE, GFP_KERNEL);
482 	if (!buf->directory)
483 		return -ENOMEM;
484 
485 	return 0;
486 }
487 
488 /*
489  * For backend allocated buffers we don't need grant_refs_for_buffer
490  * as those grant references are allocated at backend side.
491  */
492 static const struct xen_front_pgdir_shbuf_ops backend_ops = {
493 	.calc_num_grefs = backend_calc_num_grefs,
494 	.fill_page_dir = backend_fill_page_dir,
495 	.map = backend_map,
496 	.unmap = backend_unmap
497 };
498 
499 /*
500  * For locally granted references we do not need to map/unmap
501  * the references.
502  */
503 static const struct xen_front_pgdir_shbuf_ops local_ops = {
504 	.calc_num_grefs = guest_calc_num_grefs,
505 	.fill_page_dir = guest_fill_page_dir,
506 	.grant_refs_for_buffer = guest_grant_refs_for_buffer,
507 };
508 
509 /*
510  * Allocate a new instance of a shared buffer.
511  *
512  * \param cfg configuration to be used while allocating a new shared buffer.
513  * \return zero on success or a negative number on failure.
514  */
515 int xen_front_pgdir_shbuf_alloc(struct xen_front_pgdir_shbuf_cfg *cfg)
516 {
517 	struct xen_front_pgdir_shbuf *buf = cfg->pgdir;
518 	int ret;
519 
520 	if (cfg->be_alloc)
521 		buf->ops = &backend_ops;
522 	else
523 		buf->ops = &local_ops;
524 	buf->xb_dev = cfg->xb_dev;
525 	buf->num_pages = cfg->num_pages;
526 	buf->pages = cfg->pages;
527 
528 	buf->ops->calc_num_grefs(buf);
529 
530 	ret = alloc_storage(buf);
531 	if (ret)
532 		goto fail;
533 
534 	ret = grant_references(buf);
535 	if (ret)
536 		goto fail;
537 
538 	buf->ops->fill_page_dir(buf);
539 
540 	return 0;
541 
542 fail:
543 	xen_front_pgdir_shbuf_free(buf);
544 	return ret;
545 }
546 EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_alloc);
547 
548 MODULE_DESCRIPTION("Xen frontend/backend page directory based "
549 		   "shared buffer handling");
550 MODULE_AUTHOR("Oleksandr Andrushchenko");
551 MODULE_LICENSE("GPL");
552