xref: /freebsd/lib/libkvm/kvm_amd64.c (revision 8baaf913be7543273fb411228b26206440c21978)
1f95a0250SRodney W. Grimes /*-
2f95a0250SRodney W. Grimes  * Copyright (c) 1989, 1992, 1993
3f95a0250SRodney W. Grimes  *	The Regents of the University of California.  All rights reserved.
4f95a0250SRodney W. Grimes  *
5f95a0250SRodney W. Grimes  * This code is derived from software developed by the Computer Systems
6f95a0250SRodney W. Grimes  * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
7f95a0250SRodney W. Grimes  * BG 91-66 and contributed to Berkeley.
8f95a0250SRodney W. Grimes  *
9f95a0250SRodney W. Grimes  * Redistribution and use in source and binary forms, with or without
10f95a0250SRodney W. Grimes  * modification, are permitted provided that the following conditions
11f95a0250SRodney W. Grimes  * are met:
12f95a0250SRodney W. Grimes  * 1. Redistributions of source code must retain the above copyright
13f95a0250SRodney W. Grimes  *    notice, this list of conditions and the following disclaimer.
14f95a0250SRodney W. Grimes  * 2. Redistributions in binary form must reproduce the above copyright
15f95a0250SRodney W. Grimes  *    notice, this list of conditions and the following disclaimer in the
16f95a0250SRodney W. Grimes  *    documentation and/or other materials provided with the distribution.
17fbbd9655SWarner Losh  * 3. Neither the name of the University nor the names of its contributors
18f95a0250SRodney W. Grimes  *    may be used to endorse or promote products derived from this software
19f95a0250SRodney W. Grimes  *    without specific prior written permission.
20f95a0250SRodney W. Grimes  *
21f95a0250SRodney W. Grimes  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22f95a0250SRodney W. Grimes  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23f95a0250SRodney W. Grimes  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24f95a0250SRodney W. Grimes  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25f95a0250SRodney W. Grimes  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26f95a0250SRodney W. Grimes  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27f95a0250SRodney W. Grimes  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28f95a0250SRodney W. Grimes  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29f95a0250SRodney W. Grimes  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30f95a0250SRodney W. Grimes  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31f95a0250SRodney W. Grimes  * SUCH DAMAGE.
32f95a0250SRodney W. Grimes  */
33f95a0250SRodney W. Grimes 
34e67f5b9fSMatthew Dillon #include <sys/cdefs.h>
35e67f5b9fSMatthew Dillon __FBSDID("$FreeBSD$");
36e67f5b9fSMatthew Dillon 
37f95a0250SRodney W. Grimes #if defined(LIBC_SCCS) && !defined(lint)
38c4a7cdb3SPeter Wemm #if 0
39f95a0250SRodney W. Grimes static char sccsid[] = "@(#)kvm_hp300.c	8.1 (Berkeley) 6/4/93";
40c4a7cdb3SPeter Wemm #endif
41f95a0250SRodney W. Grimes #endif /* LIBC_SCCS and not lint */
42f95a0250SRodney W. Grimes 
43f95a0250SRodney W. Grimes /*
44a1de871dSPeter Wemm  * AMD64 machine dependent routines for kvm.  Hopefully, the forthcoming
45f95a0250SRodney W. Grimes  * vm code will one day obsolete this module.
46f95a0250SRodney W. Grimes  */
47f95a0250SRodney W. Grimes 
48f95a0250SRodney W. Grimes #include <sys/param.h>
497f911abeSJohn Baldwin #include <sys/endian.h>
507f911abeSJohn Baldwin #include <stdint.h>
5151295a4dSJordan K. Hubbard #include <stdlib.h>
52953e4134SEd Schouten #include <string.h>
53f95a0250SRodney W. Grimes #include <unistd.h>
54*8baaf913SWill Andrews #include <vm/vm.h>
55f95a0250SRodney W. Grimes #include <kvm.h>
56f95a0250SRodney W. Grimes 
57f95a0250SRodney W. Grimes #include <limits.h>
58f95a0250SRodney W. Grimes 
59f95a0250SRodney W. Grimes #include "kvm_private.h"
607f911abeSJohn Baldwin #include "kvm_amd64.h"
61f95a0250SRodney W. Grimes 
62f95a0250SRodney W. Grimes struct vmstate {
637f911abeSJohn Baldwin 	size_t		phnum;
647f911abeSJohn Baldwin 	GElf_Phdr	*phdr;
657f911abeSJohn Baldwin 	amd64_pml4e_t	*PML4;
66f95a0250SRodney W. Grimes };
67f95a0250SRodney W. Grimes 
68e55a0cd8SPeter Wemm /*
69e55a0cd8SPeter Wemm  * Translate a physical memory address to a file-offset in the crash-dump.
70e55a0cd8SPeter Wemm  */
71e55a0cd8SPeter Wemm static size_t
72e55a0cd8SPeter Wemm _kvm_pa2off(kvm_t *kd, uint64_t pa, off_t *ofs)
73e55a0cd8SPeter Wemm {
747f911abeSJohn Baldwin 	struct vmstate *vm = kd->vmst;
757f911abeSJohn Baldwin 	GElf_Phdr *p;
767f911abeSJohn Baldwin 	size_t n;
77e55a0cd8SPeter Wemm 
78d7dc9f76SHidetoshi Shimokawa 	if (kd->rawdump) {
79d7dc9f76SHidetoshi Shimokawa 		*ofs = pa;
807f911abeSJohn Baldwin 		return (AMD64_PAGE_SIZE - (pa & AMD64_PAGE_MASK));
81d7dc9f76SHidetoshi Shimokawa 	}
82d7dc9f76SHidetoshi Shimokawa 
837f911abeSJohn Baldwin 	p = vm->phdr;
847f911abeSJohn Baldwin 	n = vm->phnum;
85e55a0cd8SPeter Wemm 	while (n && (pa < p->p_paddr || pa >= p->p_paddr + p->p_memsz))
86e55a0cd8SPeter Wemm 		p++, n--;
87e55a0cd8SPeter Wemm 	if (n == 0)
88e55a0cd8SPeter Wemm 		return (0);
89e55a0cd8SPeter Wemm 	*ofs = (pa - p->p_paddr) + p->p_offset;
907f911abeSJohn Baldwin 	return (AMD64_PAGE_SIZE - (pa & AMD64_PAGE_MASK));
91e55a0cd8SPeter Wemm }
92e55a0cd8SPeter Wemm 
937f911abeSJohn Baldwin static void
947f911abeSJohn Baldwin _amd64_freevtop(kvm_t *kd)
952f85bf6eSPeter Wemm {
96e55a0cd8SPeter Wemm 	struct vmstate *vm = kd->vmst;
97e55a0cd8SPeter Wemm 
98e55a0cd8SPeter Wemm 	if (vm->PML4)
99e55a0cd8SPeter Wemm 		free(vm->PML4);
1007f911abeSJohn Baldwin 	free(vm->phdr);
101e55a0cd8SPeter Wemm 	free(vm);
102e55a0cd8SPeter Wemm 	kd->vmst = NULL;
10321d54b07SRodney W. Grimes }
104f95a0250SRodney W. Grimes 
1057f911abeSJohn Baldwin static int
1067f911abeSJohn Baldwin _amd64_probe(kvm_t *kd)
1072f85bf6eSPeter Wemm {
108e9ca6fe4SPeter Wemm 
1097f911abeSJohn Baldwin 	return (_kvm_probe_elf_kernel(kd, ELFCLASS64, EM_X86_64) &&
1107f911abeSJohn Baldwin 	    !_kvm_is_minidump(kd));
1117f911abeSJohn Baldwin }
1127f911abeSJohn Baldwin 
1137f911abeSJohn Baldwin static int
1147f911abeSJohn Baldwin _amd64_initvtop(kvm_t *kd)
1157f911abeSJohn Baldwin {
1167f911abeSJohn Baldwin 	struct kvm_nlist nl[2];
1177f911abeSJohn Baldwin 	amd64_physaddr_t pa;
1187f911abeSJohn Baldwin 	kvaddr_t kernbase;
1197f911abeSJohn Baldwin 	amd64_pml4e_t *PML4;
120f95a0250SRodney W. Grimes 
121e55a0cd8SPeter Wemm 	kd->vmst = (struct vmstate *)_kvm_malloc(kd, sizeof(*kd->vmst));
122fb0e1892SEnji Cooper 	if (kd->vmst == NULL) {
12321d54b07SRodney W. Grimes 		_kvm_err(kd, kd->program, "cannot allocate vm");
124f95a0250SRodney W. Grimes 		return (-1);
12521d54b07SRodney W. Grimes 	}
126e55a0cd8SPeter Wemm 	kd->vmst->PML4 = 0;
127e55a0cd8SPeter Wemm 
128d7dc9f76SHidetoshi Shimokawa 	if (kd->rawdump == 0) {
1297f911abeSJohn Baldwin 		if (_kvm_read_core_phdrs(kd, &kd->vmst->phnum,
1307f911abeSJohn Baldwin 		    &kd->vmst->phdr) == -1)
131e55a0cd8SPeter Wemm 			return (-1);
132d7dc9f76SHidetoshi Shimokawa 	}
133f95a0250SRodney W. Grimes 
134c10970ddSUlrich Spörlein 	nl[0].n_name = "kernbase";
135c10970ddSUlrich Spörlein 	nl[1].n_name = 0;
136f85f3040SPeter Wemm 
1377f911abeSJohn Baldwin 	if (kvm_nlist2(kd, nl) != 0) {
138f2b29125SPeter Wemm 		_kvm_err(kd, kd->program, "bad namelist - no kernbase");
139f2b29125SPeter Wemm 		return (-1);
140f2b29125SPeter Wemm 	}
141c10970ddSUlrich Spörlein 	kernbase = nl[0].n_value;
142f85f3040SPeter Wemm 
143c10970ddSUlrich Spörlein 	nl[0].n_name = "KPML4phys";
144c10970ddSUlrich Spörlein 	nl[1].n_name = 0;
145f95a0250SRodney W. Grimes 
1467f911abeSJohn Baldwin 	if (kvm_nlist2(kd, nl) != 0) {
147f2b29125SPeter Wemm 		_kvm_err(kd, kd->program, "bad namelist - no KPML4phys");
148f95a0250SRodney W. Grimes 		return (-1);
149f95a0250SRodney W. Grimes 	}
1507f911abeSJohn Baldwin 	if (kvm_read2(kd, (nl[0].n_value - kernbase), &pa, sizeof(pa)) !=
151f85f3040SPeter Wemm 	    sizeof(pa)) {
152f2b29125SPeter Wemm 		_kvm_err(kd, kd->program, "cannot read KPML4phys");
153f95a0250SRodney W. Grimes 		return (-1);
154f95a0250SRodney W. Grimes 	}
1557f911abeSJohn Baldwin 	pa = le64toh(pa);
1567f911abeSJohn Baldwin 	PML4 = _kvm_malloc(kd, AMD64_PAGE_SIZE);
157fb0e1892SEnji Cooper 	if (PML4 == NULL) {
158fb0e1892SEnji Cooper 		_kvm_err(kd, kd->program, "cannot allocate PML4");
159fb0e1892SEnji Cooper 		return (-1);
160fb0e1892SEnji Cooper 	}
1617f911abeSJohn Baldwin 	if (kvm_read2(kd, pa, PML4, AMD64_PAGE_SIZE) != AMD64_PAGE_SIZE) {
162f2b29125SPeter Wemm 		_kvm_err(kd, kd->program, "cannot read KPML4phys");
1638b4e5ab9SEnji Cooper 		free(PML4);
164f95a0250SRodney W. Grimes 		return (-1);
165f95a0250SRodney W. Grimes 	}
166e55a0cd8SPeter Wemm 	kd->vmst->PML4 = PML4;
167f95a0250SRodney W. Grimes 	return (0);
168f95a0250SRodney W. Grimes }
169f95a0250SRodney W. Grimes 
170f95a0250SRodney W. Grimes static int
1717f911abeSJohn Baldwin _amd64_vatop(kvm_t *kd, kvaddr_t va, off_t *pa)
1722f85bf6eSPeter Wemm {
1732f85bf6eSPeter Wemm 	struct vmstate *vm;
1747f911abeSJohn Baldwin 	amd64_physaddr_t offset;
1757f911abeSJohn Baldwin 	amd64_physaddr_t pdpe_pa;
1767f911abeSJohn Baldwin 	amd64_physaddr_t pde_pa;
1777f911abeSJohn Baldwin 	amd64_physaddr_t pte_pa;
1787f911abeSJohn Baldwin 	amd64_pml4e_t pml4e;
1797f911abeSJohn Baldwin 	amd64_pdpe_t pdpe;
1807f911abeSJohn Baldwin 	amd64_pde_t pde;
1817f911abeSJohn Baldwin 	amd64_pte_t pte;
1827f911abeSJohn Baldwin 	kvaddr_t pml4eindex;
1837f911abeSJohn Baldwin 	kvaddr_t pdpeindex;
1847f911abeSJohn Baldwin 	kvaddr_t pdeindex;
1857f911abeSJohn Baldwin 	kvaddr_t pteindex;
1867f911abeSJohn Baldwin 	amd64_physaddr_t a;
187e55a0cd8SPeter Wemm 	off_t ofs;
188e55a0cd8SPeter Wemm 	size_t s;
1892f85bf6eSPeter Wemm 
1902f85bf6eSPeter Wemm 	vm = kd->vmst;
1917f911abeSJohn Baldwin 	offset = va & AMD64_PAGE_MASK;
1922f85bf6eSPeter Wemm 
1932f85bf6eSPeter Wemm 	/*
1942f85bf6eSPeter Wemm 	 * If we are initializing (kernel page table descriptor pointer
1952f85bf6eSPeter Wemm 	 * not yet set) then return pa == va to avoid infinite recursion.
1962f85bf6eSPeter Wemm 	 */
197fb0e1892SEnji Cooper 	if (vm->PML4 == NULL) {
198e55a0cd8SPeter Wemm 		s = _kvm_pa2off(kd, va, pa);
199e55a0cd8SPeter Wemm 		if (s == 0) {
200e55a0cd8SPeter Wemm 			_kvm_err(kd, kd->program,
2017f911abeSJohn Baldwin 			    "_amd64_vatop: bootstrap data not in dump");
202e55a0cd8SPeter Wemm 			goto invalid;
203e55a0cd8SPeter Wemm 		} else
2047f911abeSJohn Baldwin 			return (AMD64_PAGE_SIZE - offset);
2052f85bf6eSPeter Wemm 	}
2062f85bf6eSPeter Wemm 
2077f911abeSJohn Baldwin 	pml4eindex = (va >> AMD64_PML4SHIFT) & (AMD64_NPML4EPG - 1);
2087f911abeSJohn Baldwin 	pml4e = le64toh(vm->PML4[pml4eindex]);
2097f911abeSJohn Baldwin 	if ((pml4e & AMD64_PG_V) == 0) {
2107f911abeSJohn Baldwin 		_kvm_err(kd, kd->program, "_amd64_vatop: pml4e not valid");
211f2b29125SPeter Wemm 		goto invalid;
212e55a0cd8SPeter Wemm 	}
213f2b29125SPeter Wemm 
2147f911abeSJohn Baldwin 	pdpeindex = (va >> AMD64_PDPSHIFT) & (AMD64_NPDPEPG - 1);
2157f911abeSJohn Baldwin 	pdpe_pa = (pml4e & AMD64_PG_FRAME) + (pdpeindex * sizeof(amd64_pdpe_t));
216f2b29125SPeter Wemm 
217e55a0cd8SPeter Wemm 	s = _kvm_pa2off(kd, pdpe_pa, &ofs);
2187f911abeSJohn Baldwin 	if (s < sizeof(pdpe)) {
2197f911abeSJohn Baldwin 		_kvm_err(kd, kd->program, "_amd64_vatop: pdpe_pa not found");
220e55a0cd8SPeter Wemm 		goto invalid;
221e55a0cd8SPeter Wemm 	}
2227f911abeSJohn Baldwin 	if (pread(kd->pmfd, &pdpe, sizeof(pdpe), ofs) != sizeof(pdpe)) {
2237f911abeSJohn Baldwin 		_kvm_syserr(kd, kd->program, "_amd64_vatop: read pdpe");
224f2b29125SPeter Wemm 		goto invalid;
225f2b29125SPeter Wemm 	}
2267f911abeSJohn Baldwin 	pdpe = le64toh(pdpe);
2277f911abeSJohn Baldwin 	if ((pdpe & AMD64_PG_V) == 0) {
2287f911abeSJohn Baldwin 		_kvm_err(kd, kd->program, "_amd64_vatop: pdpe not valid");
229f2b29125SPeter Wemm 		goto invalid;
230e55a0cd8SPeter Wemm 	}
231f2b29125SPeter Wemm 
2327f911abeSJohn Baldwin 	if (pdpe & AMD64_PG_PS) {
2334afb0d5aSTor Egge 		/*
2347f911abeSJohn Baldwin 		 * No next-level page table; pdpe describes one 1GB page.
2354afb0d5aSTor Egge 		 */
236ad3ecc20SJohn Baldwin 		a = (pdpe & AMD64_PG_1GB_FRAME) + (va & AMD64_PDPMASK);
237e55a0cd8SPeter Wemm 		s = _kvm_pa2off(kd, a, pa);
238e55a0cd8SPeter Wemm 		if (s == 0) {
239e55a0cd8SPeter Wemm 			_kvm_err(kd, kd->program,
2407f911abeSJohn Baldwin 			    "_amd64_vatop: 1GB page address not in dump");
241e55a0cd8SPeter Wemm 			goto invalid;
242e55a0cd8SPeter Wemm 		} else
2437f911abeSJohn Baldwin 			return (AMD64_NBPDP - (va & AMD64_PDPMASK));
2444afb0d5aSTor Egge 	}
2454afb0d5aSTor Egge 
2467f911abeSJohn Baldwin 	pdeindex = (va >> AMD64_PDRSHIFT) & (AMD64_NPDEPG - 1);
2477f911abeSJohn Baldwin 	pde_pa = (pdpe & AMD64_PG_FRAME) + (pdeindex * sizeof(amd64_pde_t));
2482f85bf6eSPeter Wemm 
2497f911abeSJohn Baldwin 	s = _kvm_pa2off(kd, pde_pa, &ofs);
2507f911abeSJohn Baldwin 	if (s < sizeof(pde)) {
2517f911abeSJohn Baldwin 		_kvm_syserr(kd, kd->program, "_amd64_vatop: pde_pa not found");
252e55a0cd8SPeter Wemm 		goto invalid;
253e55a0cd8SPeter Wemm 	}
2547f911abeSJohn Baldwin 	if (pread(kd->pmfd, &pde, sizeof(pde), ofs) != sizeof(pde)) {
2557f911abeSJohn Baldwin 		_kvm_syserr(kd, kd->program, "_amd64_vatop: read pde");
2562f85bf6eSPeter Wemm 		goto invalid;
2572f85bf6eSPeter Wemm 	}
2587f911abeSJohn Baldwin 	pde = le64toh(pde);
2597f911abeSJohn Baldwin 	if ((pde & AMD64_PG_V) == 0) {
2607f911abeSJohn Baldwin 		_kvm_err(kd, kd->program, "_amd64_vatop: pde not valid");
2612f85bf6eSPeter Wemm 		goto invalid;
262e55a0cd8SPeter Wemm 	}
2632f85bf6eSPeter Wemm 
2647f911abeSJohn Baldwin 	if (pde & AMD64_PG_PS) {
2657f911abeSJohn Baldwin 		/*
2667f911abeSJohn Baldwin 		 * No final-level page table; pde describes one 2MB page.
2677f911abeSJohn Baldwin 		 */
2687f911abeSJohn Baldwin 		a = (pde & AMD64_PG_PS_FRAME) + (va & AMD64_PDRMASK);
269e55a0cd8SPeter Wemm 		s = _kvm_pa2off(kd, a, pa);
270e55a0cd8SPeter Wemm 		if (s == 0) {
2717f911abeSJohn Baldwin 			_kvm_err(kd, kd->program,
2727f911abeSJohn Baldwin 			    "_amd64_vatop: 2MB page address not in dump");
273e55a0cd8SPeter Wemm 			goto invalid;
274e55a0cd8SPeter Wemm 		} else
2757f911abeSJohn Baldwin 			return (AMD64_NBPDR - (va & AMD64_PDRMASK));
2767f911abeSJohn Baldwin 	}
2777f911abeSJohn Baldwin 
2787f911abeSJohn Baldwin 	pteindex = (va >> AMD64_PAGE_SHIFT) & (AMD64_NPTEPG - 1);
2797f911abeSJohn Baldwin 	pte_pa = (pde & AMD64_PG_FRAME) + (pteindex * sizeof(amd64_pte_t));
2807f911abeSJohn Baldwin 
2817f911abeSJohn Baldwin 	s = _kvm_pa2off(kd, pte_pa, &ofs);
2827f911abeSJohn Baldwin 	if (s < sizeof(pte)) {
2837f911abeSJohn Baldwin 		_kvm_err(kd, kd->program, "_amd64_vatop: pte_pa not found");
2847f911abeSJohn Baldwin 		goto invalid;
2857f911abeSJohn Baldwin 	}
2867f911abeSJohn Baldwin 	if (pread(kd->pmfd, &pte, sizeof(pte), ofs) != sizeof(pte)) {
2877f911abeSJohn Baldwin 		_kvm_syserr(kd, kd->program, "_amd64_vatop: read");
2887f911abeSJohn Baldwin 		goto invalid;
2897f911abeSJohn Baldwin 	}
2907f911abeSJohn Baldwin 	if ((pte & AMD64_PG_V) == 0) {
2917f911abeSJohn Baldwin 		_kvm_err(kd, kd->program, "_amd64_vatop: pte not valid");
2927f911abeSJohn Baldwin 		goto invalid;
2937f911abeSJohn Baldwin 	}
2947f911abeSJohn Baldwin 
2957f911abeSJohn Baldwin 	a = (pte & AMD64_PG_FRAME) + offset;
2967f911abeSJohn Baldwin 	s = _kvm_pa2off(kd, a, pa);
2977f911abeSJohn Baldwin 	if (s == 0) {
2987f911abeSJohn Baldwin 		_kvm_err(kd, kd->program, "_amd64_vatop: address not in dump");
2997f911abeSJohn Baldwin 		goto invalid;
3007f911abeSJohn Baldwin 	} else
3017f911abeSJohn Baldwin 		return (AMD64_PAGE_SIZE - offset);
3022f85bf6eSPeter Wemm 
3032f85bf6eSPeter Wemm invalid:
3047f911abeSJohn Baldwin 	_kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va);
3052f85bf6eSPeter Wemm 	return (0);
306f95a0250SRodney W. Grimes }
307f95a0250SRodney W. Grimes 
3087f911abeSJohn Baldwin static int
3097f911abeSJohn Baldwin _amd64_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa)
3102f85bf6eSPeter Wemm {
311e55a0cd8SPeter Wemm 
312e55a0cd8SPeter Wemm 	if (ISALIVE(kd)) {
313e55a0cd8SPeter Wemm 		_kvm_err(kd, 0, "kvm_kvatop called in live kernel!");
314e55a0cd8SPeter Wemm 		return (0);
315e55a0cd8SPeter Wemm 	}
3167f911abeSJohn Baldwin 	return (_amd64_vatop(kd, va, pa));
317f95a0250SRodney W. Grimes }
3187f911abeSJohn Baldwin 
3197f911abeSJohn Baldwin int
320881b0edbSEnji Cooper _amd64_native(kvm_t *kd __unused)
3217f911abeSJohn Baldwin {
3227f911abeSJohn Baldwin 
3237f911abeSJohn Baldwin #ifdef __amd64__
3247f911abeSJohn Baldwin 	return (1);
3257f911abeSJohn Baldwin #else
3267f911abeSJohn Baldwin 	return (0);
3277f911abeSJohn Baldwin #endif
3287f911abeSJohn Baldwin }
3297f911abeSJohn Baldwin 
330881b0edbSEnji Cooper static struct kvm_arch kvm_amd64 = {
3317f911abeSJohn Baldwin 	.ka_probe = _amd64_probe,
3327f911abeSJohn Baldwin 	.ka_initvtop = _amd64_initvtop,
3337f911abeSJohn Baldwin 	.ka_freevtop = _amd64_freevtop,
3347f911abeSJohn Baldwin 	.ka_kvatop = _amd64_kvatop,
3357f911abeSJohn Baldwin 	.ka_native = _amd64_native,
3367f911abeSJohn Baldwin };
3377f911abeSJohn Baldwin 
3387f911abeSJohn Baldwin KVM_ARCH(kvm_amd64);
339