xref: /illumos-gate/usr/src/uts/i86xpv/vm/seg_mf.c (revision ce17336ed725d3b7fdff67bf0a0ee2b55018fec6)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  * Copyright 2018 Joyent, Inc.
26  */
27 
28 /*
29  * Machine frame segment driver.  This segment driver allows dom0 processes to
30  * map pages of other domains or Xen (e.g. during save/restore).  ioctl()s on
31  * the privcmd driver provide the MFN values backing each mapping, and we map
32  * them into the process's address space at this time.  Demand-faulting is not
33  * supported by this driver due to the requirements upon some of the ioctl()s.
34  */
35 
36 
37 #include <sys/types.h>
38 #include <sys/systm.h>
39 #include <sys/vmsystm.h>
40 #include <sys/mman.h>
41 #include <sys/errno.h>
42 #include <sys/kmem.h>
43 #include <sys/cmn_err.h>
44 #include <sys/vnode.h>
45 #include <sys/conf.h>
46 #include <sys/debug.h>
47 #include <sys/lgrp.h>
48 #include <sys/hypervisor.h>
49 
50 #include <vm/page.h>
51 #include <vm/hat.h>
52 #include <vm/as.h>
53 #include <vm/seg.h>
54 
55 #include <vm/hat_pte.h>
56 #include <vm/hat_i86.h>
57 #include <vm/seg_mf.h>
58 
59 #include <sys/fs/snode.h>
60 
61 #define	VTOCVP(vp)	(VTOS(vp)->s_commonvp)
62 
63 typedef struct segmf_mfn_s {
64 	mfn_t		m_mfn;
65 } segmf_mfn_t;
66 
67 /* g_flags */
68 #define	SEGMF_GFLAGS_WR		0x1
69 #define	SEGMF_GFLAGS_MAPPED	0x2
70 typedef struct segmf_gref_s {
71 	uint64_t	g_ptep;
72 	grant_ref_t	g_gref;
73 	uint32_t	g_flags;
74 	grant_handle_t	g_handle;
75 } segmf_gref_t;
76 
77 typedef union segmf_mu_u {
78 	segmf_mfn_t	m;
79 	segmf_gref_t	g;
80 } segmf_mu_t;
81 
82 typedef enum {
83 	SEGMF_MAP_EMPTY = 0,
84 	SEGMF_MAP_MFN,
85 	SEGMF_MAP_GREF
86 } segmf_map_type_t;
87 
88 typedef struct segmf_map_s {
89 	segmf_map_type_t	t_type;
90 	segmf_mu_t		u;
91 } segmf_map_t;
92 
93 struct segmf_data {
94 	kmutex_t	lock;
95 	struct vnode	*vp;
96 	uchar_t		prot;
97 	uchar_t		maxprot;
98 	size_t		softlockcnt;
99 	domid_t		domid;
100 	segmf_map_t	*map;
101 };
102 
103 static struct seg_ops segmf_ops;
104 
105 static int segmf_fault_gref_range(struct seg *seg, caddr_t addr, size_t len);
106 
107 static struct segmf_data *
108 segmf_data_zalloc(struct seg *seg)
109 {
110 	struct segmf_data *data = kmem_zalloc(sizeof (*data), KM_SLEEP);
111 
112 	mutex_init(&data->lock, "segmf.lock", MUTEX_DEFAULT, NULL);
113 	seg->s_ops = &segmf_ops;
114 	seg->s_data = data;
115 	return (data);
116 }
117 
118 int
119 segmf_create(struct seg **segpp, void *args)
120 {
121 	struct seg *seg = *segpp;
122 	struct segmf_crargs *a = args;
123 	struct segmf_data *data;
124 	struct as *as = seg->s_as;
125 	pgcnt_t i, npages = seg_pages(seg);
126 	int error;
127 
128 	hat_map(as->a_hat, seg->s_base, seg->s_size, HAT_MAP);
129 
130 	data = segmf_data_zalloc(seg);
131 	data->vp = specfind(a->dev, VCHR);
132 	data->prot = a->prot;
133 	data->maxprot = a->maxprot;
134 
135 	data->map = kmem_alloc(npages * sizeof (segmf_map_t), KM_SLEEP);
136 	for (i = 0; i < npages; i++) {
137 		data->map[i].t_type = SEGMF_MAP_EMPTY;
138 	}
139 
140 	error = VOP_ADDMAP(VTOCVP(data->vp), 0, as, seg->s_base, seg->s_size,
141 	    data->prot, data->maxprot, MAP_SHARED, CRED(), NULL);
142 
143 	if (error != 0)
144 		hat_unload(as->a_hat,
145 		    seg->s_base, seg->s_size, HAT_UNLOAD_UNMAP);
146 	return (error);
147 }
148 
149 /*
150  * Duplicate a seg and return new segment in newseg.
151  */
152 static int
153 segmf_dup(struct seg *seg, struct seg *newseg)
154 {
155 	struct segmf_data *data = seg->s_data;
156 	struct segmf_data *ndata;
157 	pgcnt_t npages = seg_pages(newseg);
158 	size_t sz;
159 
160 	ndata = segmf_data_zalloc(newseg);
161 
162 	VN_HOLD(data->vp);
163 	ndata->vp = data->vp;
164 	ndata->prot = data->prot;
165 	ndata->maxprot = data->maxprot;
166 	ndata->domid = data->domid;
167 
168 	sz = npages * sizeof (segmf_map_t);
169 	ndata->map = kmem_alloc(sz, KM_SLEEP);
170 	bcopy(data->map, ndata->map, sz);
171 
172 	return (VOP_ADDMAP(VTOCVP(ndata->vp), 0, newseg->s_as,
173 	    newseg->s_base, newseg->s_size, ndata->prot, ndata->maxprot,
174 	    MAP_SHARED, CRED(), NULL));
175 }
176 
177 /*
178  * We only support unmapping the whole segment, and we automatically unlock
179  * what we previously soft-locked.
180  */
181 static int
182 segmf_unmap(struct seg *seg, caddr_t addr, size_t len)
183 {
184 	struct segmf_data *data = seg->s_data;
185 	offset_t off;
186 
187 	if (addr < seg->s_base || addr + len > seg->s_base + seg->s_size ||
188 	    (len & PAGEOFFSET) || ((uintptr_t)addr & PAGEOFFSET))
189 		panic("segmf_unmap");
190 
191 	if (addr != seg->s_base || len != seg->s_size)
192 		return (ENOTSUP);
193 
194 	hat_unload(seg->s_as->a_hat, addr, len,
195 	    HAT_UNLOAD_UNMAP | HAT_UNLOAD_UNLOCK);
196 
197 	off = (offset_t)seg_page(seg, addr);
198 
199 	ASSERT(data->vp != NULL);
200 
201 	(void) VOP_DELMAP(VTOCVP(data->vp), off, seg->s_as, addr, len,
202 	    data->prot, data->maxprot, MAP_SHARED, CRED(), NULL);
203 
204 	seg_free(seg);
205 	return (0);
206 }
207 
208 static void
209 segmf_free(struct seg *seg)
210 {
211 	struct segmf_data *data = seg->s_data;
212 	pgcnt_t npages = seg_pages(seg);
213 
214 	kmem_free(data->map, npages * sizeof (segmf_map_t));
215 	VN_RELE(data->vp);
216 	mutex_destroy(&data->lock);
217 	kmem_free(data, sizeof (*data));
218 }
219 
220 static int segmf_faultpage_debug = 0;
221 /*ARGSUSED*/
222 static int
223 segmf_faultpage(struct hat *hat, struct seg *seg, caddr_t addr,
224     enum fault_type type, uint_t prot)
225 {
226 	struct segmf_data *data = seg->s_data;
227 	uint_t hat_flags = HAT_LOAD_NOCONSIST;
228 	mfn_t mfn;
229 	x86pte_t pte;
230 	segmf_map_t *map;
231 	uint_t idx;
232 
233 
234 	idx = seg_page(seg, addr);
235 	map = &data->map[idx];
236 	ASSERT(map->t_type == SEGMF_MAP_MFN);
237 
238 	mfn = map->u.m.m_mfn;
239 
240 	if (type == F_SOFTLOCK) {
241 		mutex_enter(&freemem_lock);
242 		data->softlockcnt++;
243 		mutex_exit(&freemem_lock);
244 		hat_flags |= HAT_LOAD_LOCK;
245 	} else
246 		hat_flags |= HAT_LOAD;
247 
248 	if (segmf_faultpage_debug > 0) {
249 		uprintf("segmf_faultpage: addr %p domid %x mfn %lx prot %x\n",
250 		    (void *)addr, data->domid, mfn, prot);
251 		segmf_faultpage_debug--;
252 	}
253 
254 	/*
255 	 * Ask the HAT to load a throwaway mapping to page zero, then
256 	 * overwrite it with our foreign domain mapping. It gets removed
257 	 * later via hat_unload()
258 	 */
259 	hat_devload(hat, addr, MMU_PAGESIZE, (pfn_t)0,
260 	    PROT_READ | HAT_UNORDERED_OK, hat_flags);
261 
262 	pte = mmu_ptob((x86pte_t)mfn) | PT_VALID | PT_USER | PT_FOREIGN;
263 	if (prot & PROT_WRITE)
264 		pte |= PT_WRITABLE;
265 
266 	if (HYPERVISOR_update_va_mapping_otherdomain((uintptr_t)addr, pte,
267 	    UVMF_INVLPG | UVMF_ALL, data->domid) != 0) {
268 		hat_flags = HAT_UNLOAD_UNMAP;
269 
270 		if (type == F_SOFTLOCK) {
271 			hat_flags |= HAT_UNLOAD_UNLOCK;
272 			mutex_enter(&freemem_lock);
273 			data->softlockcnt--;
274 			mutex_exit(&freemem_lock);
275 		}
276 
277 		hat_unload(hat, addr, MMU_PAGESIZE, hat_flags);
278 		return (FC_MAKE_ERR(EFAULT));
279 	}
280 
281 	return (0);
282 }
283 
284 static int
285 seg_rw_to_prot(enum seg_rw rw)
286 {
287 	switch (rw) {
288 	case S_READ:
289 		return (PROT_READ);
290 	case S_WRITE:
291 		return (PROT_WRITE);
292 	case S_EXEC:
293 		return (PROT_EXEC);
294 	case S_OTHER:
295 	default:
296 		break;
297 	}
298 	return (PROT_READ | PROT_WRITE | PROT_EXEC);
299 }
300 
301 static void
302 segmf_softunlock(struct hat *hat, struct seg *seg, caddr_t addr, size_t len)
303 {
304 	struct segmf_data *data = seg->s_data;
305 
306 	hat_unlock(hat, addr, len);
307 
308 	mutex_enter(&freemem_lock);
309 	ASSERT(data->softlockcnt >= btopr(len));
310 	data->softlockcnt -= btopr(len);
311 	mutex_exit(&freemem_lock);
312 
313 	if (data->softlockcnt == 0) {
314 		struct as *as = seg->s_as;
315 
316 		if (AS_ISUNMAPWAIT(as)) {
317 			mutex_enter(&as->a_contents);
318 			if (AS_ISUNMAPWAIT(as)) {
319 				AS_CLRUNMAPWAIT(as);
320 				cv_broadcast(&as->a_cv);
321 			}
322 			mutex_exit(&as->a_contents);
323 		}
324 	}
325 }
326 
327 static int
328 segmf_fault_range(struct hat *hat, struct seg *seg, caddr_t addr, size_t len,
329     enum fault_type type, enum seg_rw rw)
330 {
331 	struct segmf_data *data = seg->s_data;
332 	int error = 0;
333 	caddr_t a;
334 
335 	if ((data->prot & seg_rw_to_prot(rw)) == 0)
336 		return (FC_PROT);
337 
338 	/* loop over the address range handling each fault */
339 
340 	for (a = addr; a < addr + len; a += PAGESIZE) {
341 		error = segmf_faultpage(hat, seg, a, type, data->prot);
342 		if (error != 0)
343 			break;
344 	}
345 
346 	if (error != 0 && type == F_SOFTLOCK) {
347 		size_t done = (size_t)(a - addr);
348 
349 		/*
350 		 * Undo what's been done so far.
351 		 */
352 		if (done > 0)
353 			segmf_softunlock(hat, seg, addr, done);
354 	}
355 
356 	return (error);
357 }
358 
359 /*
360  * We never demand-fault for seg_mf.
361  */
362 /*ARGSUSED*/
363 static int
364 segmf_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t len,
365     enum fault_type type, enum seg_rw rw)
366 {
367 	return (FC_MAKE_ERR(EFAULT));
368 }
369 
370 /*ARGSUSED*/
371 static int
372 segmf_faulta(struct seg *seg, caddr_t addr)
373 {
374 	return (0);
375 }
376 
377 /*ARGSUSED*/
378 static int
379 segmf_setprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
380 {
381 	return (EINVAL);
382 }
383 
384 /*ARGSUSED*/
385 static int
386 segmf_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
387 {
388 	return (EINVAL);
389 }
390 
391 /*ARGSUSED*/
392 static int
393 segmf_kluster(struct seg *seg, caddr_t addr, ssize_t delta)
394 {
395 	return (-1);
396 }
397 
398 /*ARGSUSED*/
399 static int
400 segmf_sync(struct seg *seg, caddr_t addr, size_t len, int attr, uint_t flags)
401 {
402 	return (0);
403 }
404 
405 /*
406  * XXPV	Hmm.  Should we say that mf mapping are "in core?"
407  */
408 
409 /*ARGSUSED*/
410 static size_t
411 segmf_incore(struct seg *seg, caddr_t addr, size_t len, char *vec)
412 {
413 	size_t v;
414 
415 	for (v = 0, len = (len + PAGEOFFSET) & PAGEMASK; len;
416 	    len -= PAGESIZE, v += PAGESIZE)
417 		*vec++ = 1;
418 	return (v);
419 }
420 
421 /*ARGSUSED*/
422 static int
423 segmf_lockop(struct seg *seg, caddr_t addr,
424     size_t len, int attr, int op, ulong_t *lockmap, size_t pos)
425 {
426 	return (0);
427 }
428 
429 static int
430 segmf_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
431 {
432 	struct segmf_data *data = seg->s_data;
433 	pgcnt_t pgno = seg_page(seg, addr + len) - seg_page(seg, addr) + 1;
434 
435 	if (pgno != 0) {
436 		do
437 			protv[--pgno] = data->prot;
438 		while (pgno != 0)
439 			;
440 	}
441 	return (0);
442 }
443 
444 static u_offset_t
445 segmf_getoffset(struct seg *seg, caddr_t addr)
446 {
447 	return (addr - seg->s_base);
448 }
449 
450 /*ARGSUSED*/
451 static int
452 segmf_gettype(struct seg *seg, caddr_t addr)
453 {
454 	return (MAP_SHARED);
455 }
456 
457 /*ARGSUSED1*/
458 static int
459 segmf_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
460 {
461 	struct segmf_data *data = seg->s_data;
462 
463 	*vpp = VTOCVP(data->vp);
464 	return (0);
465 }
466 
467 /*ARGSUSED*/
468 static int
469 segmf_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
470 {
471 	return (0);
472 }
473 
474 /*ARGSUSED*/
475 static void
476 segmf_dump(struct seg *seg)
477 {}
478 
479 /*ARGSUSED*/
480 static int
481 segmf_pagelock(struct seg *seg, caddr_t addr, size_t len,
482     struct page ***ppp, enum lock_type type, enum seg_rw rw)
483 {
484 	return (ENOTSUP);
485 }
486 
487 /*ARGSUSED*/
488 static int
489 segmf_setpagesize(struct seg *seg, caddr_t addr, size_t len, uint_t szc)
490 {
491 	return (ENOTSUP);
492 }
493 
494 static int
495 segmf_getmemid(struct seg *seg, caddr_t addr, memid_t *memid)
496 {
497 	struct segmf_data *data = seg->s_data;
498 
499 	memid->val[0] = (uintptr_t)VTOCVP(data->vp);
500 	memid->val[1] = (uintptr_t)seg_page(seg, addr);
501 	return (0);
502 }
503 
504 /*ARGSUSED*/
505 static lgrp_mem_policy_info_t *
506 segmf_getpolicy(struct seg *seg, caddr_t addr)
507 {
508 	return (NULL);
509 }
510 
511 /*ARGSUSED*/
512 static int
513 segmf_capable(struct seg *seg, segcapability_t capability)
514 {
515 	return (0);
516 }
517 
518 /*
519  * Add a set of contiguous foreign MFNs to the segment. soft-locking them.  The
520  * pre-faulting is necessary due to live migration; in particular we must
521  * return an error in response to IOCTL_PRIVCMD_MMAPBATCH rather than faulting
522  * later on a bad MFN.  Whilst this isn't necessary for the other MMAP
523  * ioctl()s, we lock them too, as they should be transitory.
524  */
525 int
526 segmf_add_mfns(struct seg *seg, caddr_t addr, mfn_t mfn,
527     pgcnt_t pgcnt, domid_t domid)
528 {
529 	struct segmf_data *data = seg->s_data;
530 	pgcnt_t base;
531 	faultcode_t fc;
532 	pgcnt_t i;
533 	int error = 0;
534 
535 	if (seg->s_ops != &segmf_ops)
536 		return (EINVAL);
537 
538 	/*
539 	 * Don't mess with dom0.
540 	 *
541 	 * Only allow the domid to be set once for the segment.
542 	 * After that attempts to add mappings to this segment for
543 	 * other domains explicitly fails.
544 	 */
545 
546 	if (domid == 0 || domid == DOMID_SELF)
547 		return (EACCES);
548 
549 	mutex_enter(&data->lock);
550 
551 	if (data->domid == 0)
552 		data->domid = domid;
553 
554 	if (data->domid != domid) {
555 		error = EINVAL;
556 		goto out;
557 	}
558 
559 	base = seg_page(seg, addr);
560 
561 	for (i = 0; i < pgcnt; i++) {
562 		data->map[base + i].t_type = SEGMF_MAP_MFN;
563 		data->map[base + i].u.m.m_mfn = mfn++;
564 	}
565 
566 	fc = segmf_fault_range(seg->s_as->a_hat, seg, addr,
567 	    pgcnt * MMU_PAGESIZE, F_SOFTLOCK, S_OTHER);
568 
569 	if (fc != 0) {
570 		error = fc_decode(fc);
571 		for (i = 0; i < pgcnt; i++) {
572 			data->map[base + i].t_type = SEGMF_MAP_EMPTY;
573 		}
574 	}
575 
576 out:
577 	mutex_exit(&data->lock);
578 	return (error);
579 }
580 
581 int
582 segmf_add_grefs(struct seg *seg, caddr_t addr, uint_t flags,
583     grant_ref_t *grefs, uint_t cnt, domid_t domid)
584 {
585 	struct segmf_data *data;
586 	segmf_map_t *map;
587 	faultcode_t fc;
588 	uint_t idx;
589 	uint_t i;
590 	int e;
591 
592 	if (seg->s_ops != &segmf_ops)
593 		return (EINVAL);
594 
595 	/*
596 	 * Don't mess with dom0.
597 	 *
598 	 * Only allow the domid to be set once for the segment.
599 	 * After that attempts to add mappings to this segment for
600 	 * other domains explicitly fails.
601 	 */
602 
603 	if (domid == 0 || domid == DOMID_SELF)
604 		return (EACCES);
605 
606 	data = seg->s_data;
607 	idx = seg_page(seg, addr);
608 	map = &data->map[idx];
609 	e = 0;
610 
611 	mutex_enter(&data->lock);
612 
613 	if (data->domid == 0)
614 		data->domid = domid;
615 
616 	if (data->domid != domid) {
617 		e = EINVAL;
618 		goto out;
619 	}
620 
621 	/* store away the grefs passed in then fault in the pages */
622 	for (i = 0; i < cnt; i++) {
623 		map[i].t_type = SEGMF_MAP_GREF;
624 		map[i].u.g.g_gref = grefs[i];
625 		map[i].u.g.g_handle = 0;
626 		map[i].u.g.g_flags = 0;
627 		if (flags & SEGMF_GREF_WR) {
628 			map[i].u.g.g_flags |= SEGMF_GFLAGS_WR;
629 		}
630 	}
631 	fc = segmf_fault_gref_range(seg, addr, cnt);
632 	if (fc != 0) {
633 		e = fc_decode(fc);
634 		for (i = 0; i < cnt; i++) {
635 			data->map[i].t_type = SEGMF_MAP_EMPTY;
636 		}
637 	}
638 
639 out:
640 	mutex_exit(&data->lock);
641 	return (e);
642 }
643 
644 int
645 segmf_release_grefs(struct seg *seg, caddr_t addr, uint_t cnt)
646 {
647 	gnttab_unmap_grant_ref_t mapop[SEGMF_MAX_GREFS];
648 	struct segmf_data *data;
649 	segmf_map_t *map;
650 	uint_t idx;
651 	long e;
652 	int i;
653 	int n;
654 
655 
656 	if (cnt > SEGMF_MAX_GREFS) {
657 		return (-1);
658 	}
659 
660 	idx = seg_page(seg, addr);
661 	data = seg->s_data;
662 	map = &data->map[idx];
663 
664 	bzero(mapop, sizeof (gnttab_unmap_grant_ref_t) * cnt);
665 
666 	/*
667 	 * for each entry which isn't empty and is currently mapped,
668 	 * set it up for an unmap then mark them empty.
669 	 */
670 	n = 0;
671 	for (i = 0; i < cnt; i++) {
672 		ASSERT(map[i].t_type != SEGMF_MAP_MFN);
673 		if ((map[i].t_type == SEGMF_MAP_GREF) &&
674 		    (map[i].u.g.g_flags & SEGMF_GFLAGS_MAPPED)) {
675 			mapop[n].handle = map[i].u.g.g_handle;
676 			mapop[n].host_addr = map[i].u.g.g_ptep;
677 			mapop[n].dev_bus_addr = 0;
678 			n++;
679 		}
680 		map[i].t_type = SEGMF_MAP_EMPTY;
681 	}
682 
683 	/* if there's nothing to unmap, just return */
684 	if (n == 0) {
685 		return (0);
686 	}
687 
688 	e = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &mapop, n);
689 	if (e != 0) {
690 		return (-1);
691 	}
692 
693 	return (0);
694 }
695 
696 
697 void
698 segmf_add_gref_pte(struct seg *seg, caddr_t addr, uint64_t pte_ma)
699 {
700 	struct segmf_data *data;
701 	uint_t idx;
702 
703 	idx = seg_page(seg, addr);
704 	data = seg->s_data;
705 
706 	data->map[idx].u.g.g_ptep = pte_ma;
707 }
708 
709 
710 static int
711 segmf_fault_gref_range(struct seg *seg, caddr_t addr, size_t cnt)
712 {
713 	gnttab_map_grant_ref_t mapop[SEGMF_MAX_GREFS];
714 	struct segmf_data *data;
715 	segmf_map_t *map;
716 	uint_t idx;
717 	int e;
718 	int i;
719 
720 
721 	if (cnt > SEGMF_MAX_GREFS) {
722 		return (-1);
723 	}
724 
725 	data = seg->s_data;
726 	idx = seg_page(seg, addr);
727 	map = &data->map[idx];
728 
729 	bzero(mapop, sizeof (gnttab_map_grant_ref_t) * cnt);
730 
731 	ASSERT(map->t_type == SEGMF_MAP_GREF);
732 
733 	/*
734 	 * map in each page passed in into the user apps AS. We do this by
735 	 * passing the MA of the actual pte of the mapping to the hypervisor.
736 	 */
737 	for (i = 0; i < cnt; i++) {
738 		mapop[i].host_addr = map[i].u.g.g_ptep;
739 		mapop[i].dom = data->domid;
740 		mapop[i].ref = map[i].u.g.g_gref;
741 		mapop[i].flags = GNTMAP_host_map | GNTMAP_application_map |
742 		    GNTMAP_contains_pte;
743 		if (!(map[i].u.g.g_flags & SEGMF_GFLAGS_WR)) {
744 			mapop[i].flags |= GNTMAP_readonly;
745 		}
746 	}
747 	e = xen_map_gref(GNTTABOP_map_grant_ref, mapop, cnt, B_TRUE);
748 	if ((e != 0) || (mapop[0].status != GNTST_okay)) {
749 		return (FC_MAKE_ERR(EFAULT));
750 	}
751 
752 	/* save handle for segmf_release_grefs() and mark it as mapped */
753 	for (i = 0; i < cnt; i++) {
754 		ASSERT(mapop[i].status == GNTST_okay);
755 		map[i].u.g.g_handle = mapop[i].handle;
756 		map[i].u.g.g_flags |= SEGMF_GFLAGS_MAPPED;
757 	}
758 
759 	return (0);
760 }
761 
762 static struct seg_ops segmf_ops = {
763 	segmf_dup,
764 	segmf_unmap,
765 	segmf_free,
766 	segmf_fault,
767 	segmf_faulta,
768 	segmf_setprot,
769 	segmf_checkprot,
770 	(int (*)())segmf_kluster,
771 	(size_t (*)(struct seg *))NULL,	/* swapout */
772 	segmf_sync,
773 	segmf_incore,
774 	segmf_lockop,
775 	segmf_getprot,
776 	segmf_getoffset,
777 	segmf_gettype,
778 	segmf_getvp,
779 	segmf_advise,
780 	segmf_dump,
781 	segmf_pagelock,
782 	segmf_setpagesize,
783 	segmf_getmemid,
784 	segmf_getpolicy,
785 	segmf_capable,
786 	seg_inherit_notsup
787 };
788