xref: /freebsd/contrib/processor-trace/libipt/internal/include/pt_mapped_section.h (revision 47dd1d1b619cc035b82b49a91a25544309ff95ae)
1 /*
2  * Copyright (c) 2014-2018, Intel Corporation
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  *  * Redistributions of source code must retain the above copyright notice,
8  *    this list of conditions and the following disclaimer.
9  *  * Redistributions in binary form must reproduce the above copyright notice,
10  *    this list of conditions and the following disclaimer in the documentation
11  *    and/or other materials provided with the distribution.
12  *  * Neither the name of Intel Corporation nor the names of its contributors
13  *    may be used to endorse or promote products derived from this software
14  *    without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #ifndef PT_MAPPED_SECTION_H
30 #define PT_MAPPED_SECTION_H
31 
32 #include "intel-pt.h"
33 #include "pt_section.h"
34 
35 #include <stdint.h>
36 
37 
38 /* A section mapped into memory. */
39 struct pt_mapped_section {
40 	/* The section that is mapped. */
41 	struct pt_section *section;
42 
43 	/* The address space into which the section is mapped. */
44 	struct pt_asid asid;
45 
46 	/* The virtual address at which the section is mapped. */
47 	uint64_t vaddr;
48 
49 	/* The offset into the section.
50 	 *
51 	 * This is normally zero but when @section is split, @offset is added to
52 	 * the section/file offset when accessing @section.
53 	 */
54 	uint64_t offset;
55 
56 	/* The size of the section.
57 	 *
58 	 * This is normally @section->size but when @section is split, this is
59 	 * used to determine the size of the sub-section.
60 	 */
61 	uint64_t size;
62 };
63 
64 
65 static inline void pt_msec_init(struct pt_mapped_section *msec,
66 				struct pt_section *section,
67 				const struct pt_asid *asid,
68 				uint64_t vaddr, uint64_t offset, uint64_t size)
69 {
70 	if (!msec)
71 		return;
72 
73 	msec->section = section;
74 	msec->vaddr = vaddr;
75 	msec->offset = offset;
76 	msec->size = size;
77 
78 	if (asid)
79 		msec->asid = *asid;
80 	else
81 		pt_asid_init(&msec->asid);
82 }
83 
84 /* Destroy a mapped section - does not free @msec->section. */
85 static inline void pt_msec_fini(struct pt_mapped_section *msec)
86 {
87 	(void) msec;
88 
89 	/* Nothing to do. */
90 }
91 
92 /* Return the virtual address of the beginning of the memory region. */
93 static inline uint64_t pt_msec_begin(const struct pt_mapped_section *msec)
94 {
95 	if (!msec)
96 		return 0ull;
97 
98 	return msec->vaddr;
99 }
100 
101 /* Return the virtual address one byte past the end of the memory region. */
102 static inline uint64_t pt_msec_end(const struct pt_mapped_section *msec)
103 {
104 	if (!msec)
105 		return 0ull;
106 
107 	return msec->vaddr + msec->size;
108 }
109 
110 /* Return the section/file offset. */
111 static inline uint64_t pt_msec_offset(const struct pt_mapped_section *msec)
112 {
113 	if (!msec)
114 		return 0ull;
115 
116 	return msec->offset;
117 }
118 
119 /* Return the section size. */
120 static inline uint64_t pt_msec_size(const struct pt_mapped_section *msec)
121 {
122 	if (!msec)
123 		return 0ull;
124 
125 	return msec->size;
126 }
127 
128 /* Return the underlying section. */
129 static inline struct pt_section *
130 pt_msec_section(const struct pt_mapped_section *msec)
131 {
132 	return msec->section;
133 }
134 
135 /* Return an identifier for the address-space the section is mapped into. */
136 static inline const struct pt_asid *
137 pt_msec_asid(const struct pt_mapped_section *msec)
138 {
139 	if (!msec)
140 		return NULL;
141 
142 	return &msec->asid;
143 }
144 
145 /* Translate a section/file offset into a virtual address. */
146 static inline uint64_t pt_msec_map(const struct pt_mapped_section *msec,
147 				   uint64_t offset)
148 {
149 	return (offset - msec->offset) + msec->vaddr;
150 }
151 
152 /* Translate a virtual address into a section/file offset. */
153 static inline uint64_t pt_msec_unmap(const struct pt_mapped_section *msec,
154 				     uint64_t vaddr)
155 {
156 	return (vaddr - msec->vaddr) + msec->offset;
157 }
158 
159 /* Read memory from a mapped section.
160  *
161  * The caller must check @msec->asid.
162  * The caller must ensure that @msec->section is mapped.
163  *
164  * Returns the number of bytes read on success.
165  * Returns a negative error code otherwise.
166  */
167 static inline int pt_msec_read(const struct pt_mapped_section *msec,
168 			       uint8_t *buffer, uint16_t size,
169 			       uint64_t vaddr)
170 {
171 	struct pt_section *section;
172 	uint64_t begin, end, mbegin, mend, offset;
173 
174 	if (!msec)
175 		return -pte_internal;
176 
177 	begin = vaddr;
178 	end = begin + size;
179 	if (end < begin)
180 		end = UINT64_MAX;
181 
182 	mbegin = pt_msec_begin(msec);
183 	mend = pt_msec_end(msec);
184 
185 	if (begin < mbegin || mend <= begin)
186 		return -pte_nomap;
187 
188 	if (mend < end)
189 		end = mend;
190 
191 	size = (uint16_t) (end - begin);
192 
193 	section = pt_msec_section(msec);
194 	offset = pt_msec_unmap(msec, begin);
195 
196 	return pt_section_read(section, buffer, size, offset);
197 }
198 
199 #endif /* PT_MAPPED_SECTION_H */
200