xref: /linux/scripts/gdb/linux/mm.py (revision 2b7a25df823dc7d8f56f8ce7c2d2dac391cea9c2)
1# SPDX-License-Identifier: GPL-2.0
2#
3# Copyright (c) 2023 MediaTek Inc.
4#
5# Authors:
6#  Kuan-Ying Lee <Kuan-Ying.Lee@mediatek.com>
7#
8
9import gdb
10import math
11from linux import utils, constants
12
13def DIV_ROUND_UP(n,d):
14    return ((n) + (d) - 1) // (d)
15
16def test_bit(nr, addr):
17    if addr.dereference() & (0x1 << nr):
18        return True
19    else:
20        return False
21
22class page_ops():
23    ops = None
24    def __init__(self):
25        if not constants.LX_CONFIG_SPARSEMEM_VMEMMAP:
26            raise gdb.GdbError('Only support CONFIG_SPARSEMEM_VMEMMAP now')
27        if constants.LX_CONFIG_ARM64 and utils.is_target_arch('aarch64'):
28            self.ops = aarch64_page_ops()
29        elif utils.is_target_arch('x86_64') or utils.is_target_arch('x86-64'):
30            self.ops = x86_page_ops()
31        else:
32            raise gdb.GdbError('Only support aarch64 and x86_64 now')
33
34class x86_page_ops():
35    def __init__(self):
36        self.struct_page_size = utils.get_page_type().sizeof
37        self.PAGE_SHIFT = constants.LX_CONFIG_PAGE_SHIFT
38        self.PAGE_SIZE = 1 << self.PAGE_SHIFT
39        self.PAGE_MASK = (~(self.PAGE_SIZE - 1)) & ((1 << 64) - 1)
40
41        self.PAGE_OFFSET = int(gdb.parse_and_eval("page_offset_base"))
42        self.VMEMMAP_START = int(gdb.parse_and_eval("vmemmap_base"))
43        self.PHYS_BASE = int(gdb.parse_and_eval("phys_base"))
44        self.START_KERNEL_map = 0xffffffff80000000
45
46        self.KERNEL_START = gdb.parse_and_eval("_text")
47        self.KERNEL_END = gdb.parse_and_eval("_end")
48
49        self.VMALLOC_START = int(gdb.parse_and_eval("vmalloc_base"))
50        if self.VMALLOC_START == 0xffffc90000000000:
51            self.VMALLOC_END = self.VMALLOC_START + (32 * 1024 * 1024 * 1024 * 1024) - 1
52        elif self.VMALLOC_START == 0xffa0000000000000:
53            self.VMALLOC_END = self.VMALLOC_START + (12800 * 1024 * 1024 * 1024 * 1024) - 1
54        else:
55            self.VMALLOC_END = self.VMALLOC_START + (12800 * 1024 * 1024 * 1024 * 1024) - 1
56
57        self.MAX_PHYSMEM_BITS = 46
58        self.SECTION_SIZE_BITS = 27
59        self.MAX_ORDER = 10
60
61        self.SECTIONS_SHIFT = self.MAX_PHYSMEM_BITS - self.SECTION_SIZE_BITS
62        self.NR_MEM_SECTIONS = 1 << self.SECTIONS_SHIFT
63        self.PFN_SECTION_SHIFT = self.SECTION_SIZE_BITS - self.PAGE_SHIFT
64        self.PAGES_PER_SECTION = 1 << self.PFN_SECTION_SHIFT
65        self.PAGE_SECTION_MASK = (~(self.PAGES_PER_SECTION - 1)) & ((1 << 64) - 1)
66
67        if constants.LX_CONFIG_SPARSEMEM_EXTREME:
68            self.SECTIONS_PER_ROOT = self.PAGE_SIZE // gdb.lookup_type("struct mem_section").sizeof
69        else:
70            self.SECTIONS_PER_ROOT = 1
71
72        self.NR_SECTION_ROOTS = DIV_ROUND_UP(self.NR_MEM_SECTIONS, self.SECTIONS_PER_ROOT)
73        self.SECTION_ROOT_MASK = self.SECTIONS_PER_ROOT - 1
74
75        try:
76            self.SECTION_HAS_MEM_MAP = 1 << int(gdb.parse_and_eval('SECTION_HAS_MEM_MAP_BIT'))
77            self.SECTION_IS_EARLY = 1 << int(gdb.parse_and_eval('SECTION_IS_EARLY_BIT'))
78        except:
79            self.SECTION_HAS_MEM_MAP = 1 << 0
80            self.SECTION_IS_EARLY = 1 << 3
81
82        self.SUBSECTION_SHIFT = 21
83        self.PAGES_PER_SUBSECTION = 1 << (self.SUBSECTION_SHIFT - self.PAGE_SHIFT)
84
85        if constants.LX_CONFIG_NUMA and constants.LX_CONFIG_NODES_SHIFT:
86            self.NODE_SHIFT = constants.LX_CONFIG_NODES_SHIFT
87        else:
88            self.NODE_SHIFT = 0
89
90        self.MAX_NUMNODES = 1 << self.NODE_SHIFT
91
92        self.vmemmap = gdb.Value(self.VMEMMAP_START).cast(utils.get_page_type().pointer())
93
94    def kasan_reset_tag(self, addr):
95        return addr
96
97    def SECTION_NR_TO_ROOT(self, sec):
98        return sec // self.SECTIONS_PER_ROOT
99
100    def __nr_to_section(self, nr):
101        root = self.SECTION_NR_TO_ROOT(nr)
102        mem_section = gdb.parse_and_eval("mem_section")
103        return mem_section[root][nr & self.SECTION_ROOT_MASK]
104
105    def pfn_to_section_nr(self, pfn):
106        return pfn >> self.PFN_SECTION_SHIFT
107
108    def section_nr_to_pfn(self, sec):
109        return sec << self.PFN_SECTION_SHIFT
110
111    def __pfn_to_section(self, pfn):
112        return self.__nr_to_section(self.pfn_to_section_nr(pfn))
113
114    def pfn_to_section(self, pfn):
115        return self.__pfn_to_section(pfn)
116
117    def subsection_map_index(self, pfn):
118        return (pfn & ~(self.PAGE_SECTION_MASK)) // self.PAGES_PER_SUBSECTION
119
120    def pfn_section_valid(self, ms, pfn):
121        if constants.LX_CONFIG_SPARSEMEM_VMEMMAP:
122            idx = self.subsection_map_index(pfn)
123            return test_bit(idx, ms['usage']['subsection_map'])
124        else:
125            return True
126
127    def valid_section(self, mem_section):
128        if mem_section != None and (mem_section['section_mem_map'] & self.SECTION_HAS_MEM_MAP):
129            return True
130        return False
131
132    def early_section(self, mem_section):
133        if mem_section != None and (mem_section['section_mem_map'] & self.SECTION_IS_EARLY):
134            return True
135        return False
136
137    def pfn_valid(self, pfn):
138        ms = None
139        if self.PHYS_PFN(self.PFN_PHYS(pfn)) != pfn:
140            return False
141        if self.pfn_to_section_nr(pfn) >= self.NR_MEM_SECTIONS:
142            return False
143        ms = self.__pfn_to_section(pfn)
144
145        if not self.valid_section(ms):
146            return False
147        return self.early_section(ms) or self.pfn_section_valid(ms, pfn)
148
149    def PFN_PHYS(self, pfn):
150        return pfn << self.PAGE_SHIFT
151
152    def PHYS_PFN(self, phys):
153        return phys >> self.PAGE_SHIFT
154
155    def __phys_to_virt(self, pa):
156        return pa + self.PAGE_OFFSET
157
158    def __virt_to_phys(self, va):
159        if va >= self.START_KERNEL_map:
160            return va - self.START_KERNEL_map + self.PHYS_BASE
161        else:
162            return va - self.PAGE_OFFSET
163
164    def virt_to_phys(self, va):
165        return self.__virt_to_phys(va)
166
167    def virt_to_page(self, va):
168        return self.pfn_to_page(self.virt_to_pfn(va))
169
170    def __pa(self, va):
171        return self.__virt_to_phys(va)
172
173    def __va(self, pa):
174        return self.__phys_to_virt(pa)
175
176    def pfn_to_kaddr(self, pfn):
177        return self.__va(pfn << self.PAGE_SHIFT)
178
179    def virt_to_pfn(self, va):
180        return self.PHYS_PFN(self.__virt_to_phys(va))
181
182    def sym_to_pfn(self, x):
183        return self.PHYS_PFN(self.__virt_to_phys(x))
184
185    def page_to_pfn(self, page):
186        return int(page.cast(utils.get_page_type().pointer()) - self.vmemmap)
187
188    def pfn_to_page(self, pfn):
189        return self.vmemmap + pfn
190
191    def page_to_phys(self, page):
192        return self.PFN_PHYS(self.page_to_pfn(page))
193
194    def page_to_virt(self, page):
195        return self.__va(self.page_to_phys(page))
196
197    def page_address(self, page):
198        return self.page_to_virt(page)
199
200    def folio_address(self, folio):
201        return self.page_address(folio['page'].address)
202
203class aarch64_page_ops():
204    def __init__(self):
205        self.SUBSECTION_SHIFT = 21
206        self.SEBSECTION_SIZE = 1 << self.SUBSECTION_SHIFT
207        self.MODULES_VSIZE = 2 * 1024 * 1024 * 1024
208
209        if constants.LX_CONFIG_ARM64_64K_PAGES:
210            self.SECTION_SIZE_BITS = 29
211        else:
212            self.SECTION_SIZE_BITS = 27
213        self.MAX_PHYSMEM_BITS = constants.LX_CONFIG_ARM64_VA_BITS
214
215        self.PAGE_SHIFT = constants.LX_CONFIG_PAGE_SHIFT
216        self.PAGE_SIZE = 1 << self.PAGE_SHIFT
217        self.PAGE_MASK = (~(self.PAGE_SIZE - 1)) & ((1 << 64) - 1)
218
219        self.VA_BITS = constants.LX_CONFIG_ARM64_VA_BITS
220        if self.VA_BITS > 48:
221            if constants.LX_CONFIG_ARM64_16K_PAGES:
222                self.VA_BITS_MIN = 47
223            else:
224                self.VA_BITS_MIN = 48
225            tcr_el1 = gdb.execute("info registers $TCR_EL1", to_string=True)
226            tcr_el1 = int(tcr_el1.split()[1], 16)
227            self.vabits_actual = 64 - ((tcr_el1 >> 16) & 63)
228        else:
229            self.VA_BITS_MIN = self.VA_BITS
230            self.vabits_actual = self.VA_BITS
231        self.kimage_voffset = gdb.parse_and_eval('kimage_voffset') & ((1 << 64) - 1)
232
233        self.SECTIONS_SHIFT = self.MAX_PHYSMEM_BITS - self.SECTION_SIZE_BITS
234
235        if str(constants.LX_CONFIG_ARCH_FORCE_MAX_ORDER).isdigit():
236            self.MAX_ORDER = constants.LX_CONFIG_ARCH_FORCE_MAX_ORDER
237        else:
238            self.MAX_ORDER = 10
239
240        self.MAX_ORDER_NR_PAGES = 1 << (self.MAX_ORDER)
241        self.PFN_SECTION_SHIFT = self.SECTION_SIZE_BITS - self.PAGE_SHIFT
242        self.NR_MEM_SECTIONS = 1 << self.SECTIONS_SHIFT
243        self.PAGES_PER_SECTION = 1 << self.PFN_SECTION_SHIFT
244        self.PAGE_SECTION_MASK = (~(self.PAGES_PER_SECTION - 1)) & ((1 << 64) - 1)
245
246        if constants.LX_CONFIG_SPARSEMEM_EXTREME:
247            self.SECTIONS_PER_ROOT = self.PAGE_SIZE // gdb.lookup_type("struct mem_section").sizeof
248        else:
249            self.SECTIONS_PER_ROOT = 1
250
251        self.NR_SECTION_ROOTS = DIV_ROUND_UP(self.NR_MEM_SECTIONS, self.SECTIONS_PER_ROOT)
252        self.SECTION_ROOT_MASK = self.SECTIONS_PER_ROOT - 1
253        self.SUBSECTION_SHIFT = 21
254        self.SEBSECTION_SIZE = 1 << self.SUBSECTION_SHIFT
255        self.PFN_SUBSECTION_SHIFT = self.SUBSECTION_SHIFT - self.PAGE_SHIFT
256        self.PAGES_PER_SUBSECTION = 1 << self.PFN_SUBSECTION_SHIFT
257
258        self.SECTION_HAS_MEM_MAP = 1 << int(gdb.parse_and_eval('SECTION_HAS_MEM_MAP_BIT'))
259        self.SECTION_IS_EARLY = 1 << int(gdb.parse_and_eval('SECTION_IS_EARLY_BIT'))
260
261        self.struct_page_size = utils.get_page_type().sizeof
262        self.STRUCT_PAGE_MAX_SHIFT = (int)(math.log(self.struct_page_size, 2))
263
264        self.PAGE_OFFSET = self._PAGE_OFFSET(self.VA_BITS)
265        self.MODULES_VADDR = self._PAGE_END(self.VA_BITS_MIN)
266        self.MODULES_END = self.MODULES_VADDR + self.MODULES_VSIZE
267
268        self.VMEMMAP_RANGE = self._PAGE_END(self.VA_BITS_MIN) - self.PAGE_OFFSET
269        self.VMEMMAP_SIZE = (self.VMEMMAP_RANGE >> self.PAGE_SHIFT) * self.struct_page_size
270        self.VMEMMAP_END = (-(1 * 1024 * 1024 * 1024)) & 0xffffffffffffffff
271        self.VMEMMAP_START = self.VMEMMAP_END - self.VMEMMAP_SIZE
272
273        self.VMALLOC_START = self.MODULES_END
274        self.VMALLOC_END = self.VMEMMAP_START - 256 * 1024 * 1024
275
276        self.memstart_addr = gdb.parse_and_eval("memstart_addr")
277        self.PHYS_OFFSET = self.memstart_addr
278        self.vmemmap = gdb.Value(self.VMEMMAP_START).cast(utils.get_page_type().pointer()) - (self.memstart_addr >> self.PAGE_SHIFT)
279
280        self.KERNEL_START = gdb.parse_and_eval("_text")
281        self.KERNEL_END = gdb.parse_and_eval("_end")
282
283        if constants.LX_CONFIG_KASAN_GENERIC or constants.LX_CONFIG_KASAN_SW_TAGS:
284            if constants.LX_CONFIG_KASAN_GENERIC:
285                self.KASAN_SHADOW_SCALE_SHIFT = 3
286            else:
287                self.KASAN_SHADOW_SCALE_SHIFT = 4
288            self.KASAN_SHADOW_OFFSET = constants.LX_CONFIG_KASAN_SHADOW_OFFSET
289            self.KASAN_SHADOW_END = (1 << (64 - self.KASAN_SHADOW_SCALE_SHIFT)) + self.KASAN_SHADOW_OFFSET
290            self.PAGE_END = self.KASAN_SHADOW_END - (1 << (self.vabits_actual - self.KASAN_SHADOW_SCALE_SHIFT))
291        else:
292            self.PAGE_END = self._PAGE_END(self.VA_BITS_MIN)
293
294        if constants.LX_CONFIG_NUMA and constants.LX_CONFIG_NODES_SHIFT:
295            self.NODE_SHIFT = constants.LX_CONFIG_NODES_SHIFT
296        else:
297            self.NODE_SHIFT = 0
298
299        self.MAX_NUMNODES = 1 << self.NODE_SHIFT
300
301    def SECTION_NR_TO_ROOT(self, sec):
302        return sec // self.SECTIONS_PER_ROOT
303
304    def __nr_to_section(self, nr):
305        root = self.SECTION_NR_TO_ROOT(nr)
306        mem_section = gdb.parse_and_eval("mem_section")
307        return mem_section[root][nr & self.SECTION_ROOT_MASK]
308
309    def pfn_to_section_nr(self, pfn):
310        return pfn >> self.PFN_SECTION_SHIFT
311
312    def section_nr_to_pfn(self, sec):
313        return sec << self.PFN_SECTION_SHIFT
314
315    def __pfn_to_section(self, pfn):
316        return self.__nr_to_section(self.pfn_to_section_nr(pfn))
317
318    def pfn_to_section(self, pfn):
319        return self.__pfn_to_section(pfn)
320
321    def subsection_map_index(self, pfn):
322        return (pfn & ~(self.PAGE_SECTION_MASK)) // self.PAGES_PER_SUBSECTION
323
324    def pfn_section_valid(self, ms, pfn):
325        if constants.LX_CONFIG_SPARSEMEM_VMEMMAP:
326            idx = self.subsection_map_index(pfn)
327            return test_bit(idx, ms['usage']['subsection_map'])
328        else:
329            return True
330
331    def valid_section(self, mem_section):
332        if mem_section != None and (mem_section['section_mem_map'] & self.SECTION_HAS_MEM_MAP):
333            return True
334        return False
335
336    def early_section(self, mem_section):
337        if mem_section != None and (mem_section['section_mem_map'] & self.SECTION_IS_EARLY):
338            return True
339        return False
340
341    def pfn_valid(self, pfn):
342        ms = None
343        if self.PHYS_PFN(self.PFN_PHYS(pfn)) != pfn:
344            return False
345        if self.pfn_to_section_nr(pfn) >= self.NR_MEM_SECTIONS:
346            return False
347        ms = self.__pfn_to_section(pfn)
348
349        if not self.valid_section(ms):
350            return False
351        return self.early_section(ms) or self.pfn_section_valid(ms, pfn)
352
353    def _PAGE_OFFSET(self, va):
354        return (-(1 << (va))) & 0xffffffffffffffff
355
356    def _PAGE_END(self, va):
357        return (-(1 << (va - 1))) & 0xffffffffffffffff
358
359    def kasan_reset_tag(self, addr):
360        if constants.LX_CONFIG_KASAN_SW_TAGS or constants.LX_CONFIG_KASAN_HW_TAGS:
361            return int(addr) | (0xff << 56)
362        else:
363            return addr
364
365    def __is_lm_address(self, addr):
366        if (addr - self.PAGE_OFFSET) < (self.PAGE_END - self.PAGE_OFFSET):
367            return True
368        else:
369            return False
370    def __lm_to_phys(self, addr):
371        return addr - self.PAGE_OFFSET + self.PHYS_OFFSET
372
373    def __kimg_to_phys(self, addr):
374        return addr - self.kimage_voffset
375
376    def __virt_to_phys_nodebug(self, va):
377        untagged_va = self.kasan_reset_tag(va)
378        if self.__is_lm_address(untagged_va):
379            return self.__lm_to_phys(untagged_va)
380        else:
381            return self.__kimg_to_phys(untagged_va)
382
383    def __virt_to_phys(self, va):
384        if constants.LX_CONFIG_DEBUG_VIRTUAL:
385            if not self.__is_lm_address(self.kasan_reset_tag(va)):
386                raise gdb.GdbError("Warning: virt_to_phys used for non-linear address: 0x%lx\n" % va)
387        return self.__virt_to_phys_nodebug(va)
388
389    def virt_to_phys(self, va):
390        return self.__virt_to_phys(va)
391
392    def PFN_PHYS(self, pfn):
393        return pfn << self.PAGE_SHIFT
394
395    def PHYS_PFN(self, phys):
396        return phys >> self.PAGE_SHIFT
397
398    def __phys_to_virt(self, pa):
399        return (pa - self.PHYS_OFFSET) | self.PAGE_OFFSET
400
401    def __phys_to_pfn(self, pa):
402        return self.PHYS_PFN(pa)
403
404    def __pfn_to_phys(self, pfn):
405        return self.PFN_PHYS(pfn)
406
407    def __pa_symbol_nodebug(self, x):
408        return self.__kimg_to_phys(x)
409
410    def __phys_addr_symbol(self, x):
411        if constants.LX_CONFIG_DEBUG_VIRTUAL:
412            if x < self.KERNEL_START or x > self.KERNEL_END:
413                raise gdb.GdbError("0x%x exceed kernel range" % x)
414        return self.__pa_symbol_nodebug(x)
415
416    def __pa_symbol(self, x):
417        return self.__phys_addr_symbol(x)
418
419    def __va(self, pa):
420        return self.__phys_to_virt(pa)
421
422    def pfn_to_kaddr(self, pfn):
423        return self.__va(pfn << self.PAGE_SHIFT)
424
425    def virt_to_pfn(self, va):
426        return self.__phys_to_pfn(self.__virt_to_phys(va))
427
428    def sym_to_pfn(self, x):
429        return self.__phys_to_pfn(self.__pa_symbol(x))
430
431    def page_to_pfn(self, page):
432        return int(page.cast(utils.get_page_type().pointer()) - self.vmemmap.cast(utils.get_page_type().pointer()))
433
434    def page_to_phys(self, page):
435        return self.__pfn_to_phys(self.page_to_pfn(page))
436
437    def pfn_to_page(self, pfn):
438        return (self.vmemmap + pfn).cast(utils.get_page_type().pointer())
439
440    def page_to_virt(self, page):
441        if constants.LX_CONFIG_DEBUG_VIRTUAL:
442            return self.__va(self.page_to_phys(page))
443        else:
444            __idx = int((page.cast(gdb.lookup_type("unsigned long")) - self.VMEMMAP_START).cast(utils.get_ulong_type())) // self.struct_page_size
445            return self.PAGE_OFFSET + (__idx * self.PAGE_SIZE)
446
447    def virt_to_page(self, va):
448        if constants.LX_CONFIG_DEBUG_VIRTUAL:
449            return self.pfn_to_page(self.virt_to_pfn(va))
450        else:
451            __idx = int(self.kasan_reset_tag(va) - self.PAGE_OFFSET) // self.PAGE_SIZE
452            addr = self.VMEMMAP_START + (__idx * self.struct_page_size)
453            return gdb.Value(addr).cast(utils.get_page_type().pointer())
454
455    def page_address(self, page):
456        return self.page_to_virt(page)
457
458    def folio_address(self, folio):
459        return self.page_address(folio['page'].address)
460
461class LxPFN2Page(gdb.Command):
462    """PFN to struct page"""
463
464    def __init__(self):
465        super(LxPFN2Page, self).__init__("lx-pfn_to_page", gdb.COMMAND_USER)
466
467    def invoke(self, arg, from_tty):
468        argv = gdb.string_to_argv(arg)
469        pfn = int(argv[0])
470        page = page_ops().ops.pfn_to_page(pfn)
471        gdb.write("pfn_to_page(0x%x) = 0x%x\n" % (pfn, page))
472
473LxPFN2Page()
474
475class LxPage2PFN(gdb.Command):
476    """struct page to PFN"""
477
478    def __init__(self):
479        super(LxPage2PFN, self).__init__("lx-page_to_pfn", gdb.COMMAND_USER)
480
481    def invoke(self, arg, from_tty):
482        argv = gdb.string_to_argv(arg)
483        struct_page_addr = int(argv[0], 16)
484        page = gdb.Value(struct_page_addr).cast(utils.get_page_type().pointer())
485        pfn = page_ops().ops.page_to_pfn(page)
486        gdb.write("page_to_pfn(0x%x) = 0x%x\n" % (page, pfn))
487
488LxPage2PFN()
489
490class LxPageAddress(gdb.Command):
491    """struct page to linear mapping address"""
492
493    def __init__(self):
494        super(LxPageAddress, self).__init__("lx-page_address", gdb.COMMAND_USER)
495
496    def invoke(self, arg, from_tty):
497        argv = gdb.string_to_argv(arg)
498        struct_page_addr = int(argv[0], 16)
499        page = gdb.Value(struct_page_addr).cast(utils.get_page_type().pointer())
500        addr = page_ops().ops.page_address(page)
501        gdb.write("page_address(0x%x) = 0x%x\n" % (page, addr))
502
503LxPageAddress()
504
505class LxPage2Phys(gdb.Command):
506    """struct page to physical address"""
507
508    def __init__(self):
509        super(LxPage2Phys, self).__init__("lx-page_to_phys", gdb.COMMAND_USER)
510
511    def invoke(self, arg, from_tty):
512        argv = gdb.string_to_argv(arg)
513        struct_page_addr = int(argv[0], 16)
514        page = gdb.Value(struct_page_addr).cast(utils.get_page_type().pointer())
515        phys_addr = page_ops().ops.page_to_phys(page)
516        gdb.write("page_to_phys(0x%x) = 0x%x\n" % (page, phys_addr))
517
518LxPage2Phys()
519
520class LxVirt2Phys(gdb.Command):
521    """virtual address to physical address"""
522
523    def __init__(self):
524        super(LxVirt2Phys, self).__init__("lx-virt_to_phys", gdb.COMMAND_USER)
525
526    def invoke(self, arg, from_tty):
527        argv = gdb.string_to_argv(arg)
528        linear_addr = int(argv[0], 16)
529        phys_addr = page_ops().ops.virt_to_phys(linear_addr)
530        gdb.write("virt_to_phys(0x%x) = 0x%x\n" % (linear_addr, phys_addr))
531
532LxVirt2Phys()
533
534class LxVirt2Page(gdb.Command):
535    """virtual address to struct page"""
536
537    def __init__(self):
538        super(LxVirt2Page, self).__init__("lx-virt_to_page", gdb.COMMAND_USER)
539
540    def invoke(self, arg, from_tty):
541        argv = gdb.string_to_argv(arg)
542        linear_addr = int(argv[0], 16)
543        page = page_ops().ops.virt_to_page(linear_addr)
544        gdb.write("virt_to_page(0x%x) = 0x%x\n" % (linear_addr, page))
545
546LxVirt2Page()
547
548class LxSym2PFN(gdb.Command):
549    """symbol address to PFN"""
550
551    def __init__(self):
552        super(LxSym2PFN, self).__init__("lx-sym_to_pfn", gdb.COMMAND_USER)
553
554    def invoke(self, arg, from_tty):
555        argv = gdb.string_to_argv(arg)
556        sym_addr = int(argv[0], 16)
557        pfn = page_ops().ops.sym_to_pfn(sym_addr)
558        gdb.write("sym_to_pfn(0x%x) = %d\n" % (sym_addr, pfn))
559
560LxSym2PFN()
561
562class LxPFN2Kaddr(gdb.Command):
563    """PFN to kernel address"""
564
565    def __init__(self):
566        super(LxPFN2Kaddr, self).__init__("lx-pfn_to_kaddr", gdb.COMMAND_USER)
567
568    def invoke(self, arg, from_tty):
569        argv = gdb.string_to_argv(arg)
570        pfn = int(argv[0])
571        kaddr = page_ops().ops.pfn_to_kaddr(pfn)
572        gdb.write("pfn_to_kaddr(%d) = 0x%x\n" % (pfn, kaddr))
573
574LxPFN2Kaddr()
575