xref: /freebsd/contrib/processor-trace/libipt/internal/include/pt_section.h (revision 8a272653d9fbd9fc37691c9aad6a05089b4ecb4d)
1 /*
2  * Copyright (c) 2013-2019, Intel Corporation
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  *  * Redistributions of source code must retain the above copyright notice,
8  *    this list of conditions and the following disclaimer.
9  *  * Redistributions in binary form must reproduce the above copyright notice,
10  *    this list of conditions and the following disclaimer in the documentation
11  *    and/or other materials provided with the distribution.
12  *  * Neither the name of Intel Corporation nor the names of its contributors
13  *    may be used to endorse or promote products derived from this software
14  *    without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #ifndef PT_SECTION_H
30 #define PT_SECTION_H
31 
32 #include <stdint.h>
33 #include <stddef.h>
34 
35 #if defined(FEATURE_THREADS)
36 #  include <threads.h>
37 #endif /* defined(FEATURE_THREADS) */
38 
39 #include "intel-pt.h"
40 
41 struct pt_block_cache;
42 
43 
44 /* A section of contiguous memory loaded from a file. */
45 struct pt_section {
46 	/* The name of the file. */
47 	char *filename;
48 
49 	/* The offset into the file. */
50 	uint64_t offset;
51 
52 	/* The (adjusted) size in bytes.  The size is truncated to match the
53 	 * actual file size.
54 	 */
55 	uint64_t size;
56 
57 	/* A pointer to OS-specific file status for detecting changes.
58 	 *
59 	 * The status is initialized on first pt_section_map() and will be
60 	 * left in the section until the section is destroyed.  This field
61 	 * is owned by the OS-specific mmap-based section implementation.
62 	 */
63 	void *status;
64 
65 	/* A pointer to implementation-specific mapping information - NULL if
66 	 * the section is currently not mapped.
67 	 *
68 	 * This field is set in pt_section_map() and owned by the mapping
69 	 * implementation.
70 	 */
71 	void *mapping;
72 
73 	/* A pointer to an optional block cache.
74 	 *
75 	 * The cache is created on request and destroyed implicitly when the
76 	 * section is unmapped.
77 	 *
78 	 * We read this field without locking and only lock the section in order
79 	 * to install the block cache.
80 	 *
81 	 * We rely on guaranteed atomic operations as specified in section 8.1.1
82 	 * in Volume 3A of the Intel(R) Software Developer's Manual at
83 	 * http://www.intel.com/sdm.
84 	 */
85 	struct pt_block_cache *bcache;
86 
87 	/* A pointer to the iscache attached to this section.
88 	 *
89 	 * The pointer is initialized when the iscache attaches and cleared when
90 	 * it detaches again.  There can be at most one iscache attached to this
91 	 * section at any time.
92 	 *
93 	 * In addition to attaching, the iscache will need to obtain a reference
94 	 * to the section, which it needs to drop again after detaching.
95 	 */
96 	struct pt_image_section_cache *iscache;
97 
98 	/* A pointer to the unmap function - NULL if the section is currently
99 	 * not mapped.
100 	 *
101 	 * This field is set in pt_section_map() and owned by the mapping
102 	 * implementation.
103 	 */
104 	int (*unmap)(struct pt_section *sec);
105 
106 	/* A pointer to the read function - NULL if the section is currently
107 	 * not mapped.
108 	 *
109 	 * This field is set in pt_section_map() and owned by the mapping
110 	 * implementation.
111 	 */
112 	int (*read)(const struct pt_section *sec, uint8_t *buffer,
113 		    uint16_t size, uint64_t offset);
114 
115 	/* A pointer to the memsize function - NULL if the section is currently
116 	 * not mapped.
117 	 *
118 	 * This field is set in pt_section_map() and owned by the mapping
119 	 * implementation.
120 	 */
121 	int (*memsize)(const struct pt_section *section, uint64_t *size);
122 
123 #if defined(FEATURE_THREADS)
124 	/* A lock protecting this section.
125 	 *
126 	 * Most operations do not require the section to be locked.  All
127 	 * actual locking should be handled by pt_section_* functions.
128 	 */
129 	mtx_t lock;
130 
131 	/* A lock protecting the @iscache and @acount fields.
132 	 *
133 	 * We need separate locks to protect against a deadlock scenario when
134 	 * the iscache is mapping or unmapping this section.
135 	 *
136 	 * The attach lock must not be taken while holding the section lock; the
137 	 * other way round is OK.
138 	 */
139 	mtx_t alock;
140 #endif /* defined(FEATURE_THREADS) */
141 
142 	/* The number of current users.  The last user destroys the section. */
143 	uint16_t ucount;
144 
145 	/* The number of attaches.  This must be <= @ucount. */
146 	uint16_t acount;
147 
148 	/* The number of current mappers.  The last unmaps the section. */
149 	uint16_t mcount;
150 };
151 
152 /* Create a section.
153  *
154  * The returned section describes the contents of @file starting at @offset
155  * for @size bytes.
156  *
157  * If @file is shorter than the requested @size, the section is silently
158  * truncated to the size of @file.
159  *
160  * If @offset lies beyond the end of @file, no section is created.
161  *
162  * The returned section is not mapped and starts with a user count of one and
163  * instruction caching enabled.
164  *
165  * Returns zero on success, a negative pt_error_code otherwise.
166  * Returns -pte_internal if @psection is NULL.
167  * Returns -pte_nomem when running out of memory.
168  * Returns -pte_bad_file if @filename cannot be opened.
169  * Returns -pte_invalid if @offset lies beyond @file.
170  * Returns -pte_invalid if @filename is too long.
171  */
172 extern int pt_mk_section(struct pt_section **psection, const char *filename,
173 			 uint64_t offset, uint64_t size);
174 
175 /* Lock a section.
176  *
177  * Locks @section.  The section must not be locked.
178  *
179  * Returns a new section on success, NULL otherwise.
180  * Returns -pte_bad_lock on any locking error.
181  */
182 extern int pt_section_lock(struct pt_section *section);
183 
184 /* Unlock a section.
185  *
186  * Unlocks @section.  The section must be locked.
187  *
188  * Returns a new section on success, NULL otherwise.
189  * Returns -pte_bad_lock on any locking error.
190  */
191 extern int pt_section_unlock(struct pt_section *section);
192 
193 /* Add another user.
194  *
195  * Increments the user count of @section.
196  *
197  * Returns zero on success, a negative error code otherwise.
198  * Returns -pte_internal if @section is NULL.
199  * Returns -pte_overflow if the user count would overflow.
200  * Returns -pte_bad_lock on any locking error.
201  */
202 extern int pt_section_get(struct pt_section *section);
203 
204 /* Remove a user.
205  *
206  * Decrements the user count of @section.  Destroys the section if the
207  * count reaches zero.
208  *
209  * Returns zero on success, a negative error code otherwise.
210  * Returns -pte_internal if @section is NULL.
211  * Returns -pte_internal if the user count is already zero.
212  * Returns -pte_bad_lock on any locking error.
213  */
214 extern int pt_section_put(struct pt_section *section);
215 
216 /* Attaches the image section cache user.
217  *
218  * Similar to pt_section_get() but sets @section->iscache to @iscache.
219  *
220  * Returns zero on success, a negative error code otherwise.
221  * Returns -pte_internal if @section or @iscache is NULL.
222  * Returns -pte_internal if a different cache is already attached.
223  * Returns -pte_overflow if the attach count would overflow.
224  * Returns -pte_bad_lock on any locking error.
225  */
226 extern int pt_section_attach(struct pt_section *section,
227 			     struct pt_image_section_cache *iscache);
228 
229 /* Detaches the image section cache user.
230  *
231  * Similar to pt_section_put() but clears @section->iscache.
232  *
233  * Returns zero on success, a negative error code otherwise.
234  * Returns -pte_internal if @section or @iscache is NULL.
235  * Returns -pte_internal if the attach count is already zero.
236  * Returns -pte_internal if @section->iscache is not equal to @iscache.
237  * Returns -pte_bad_lock on any locking error.
238  */
239 extern int pt_section_detach(struct pt_section *section,
240 			     struct pt_image_section_cache *iscache);
241 
242 /* Return the filename of @section. */
243 extern const char *pt_section_filename(const struct pt_section *section);
244 
245 /* Return the offset of the section in bytes. */
246 extern uint64_t pt_section_offset(const struct pt_section *section);
247 
248 /* Return the size of the section in bytes. */
249 extern uint64_t pt_section_size(const struct pt_section *section);
250 
251 /* Return the amount of memory currently used by the section in bytes.
252  *
253  * We only consider the amount of memory required for mapping @section; we
254  * ignore the size of the section object itself and the size of the status
255  * object.
256  *
257  * If @section is currently not mapped, the size is zero.
258  *
259  * Returns zero on success, a negative pt_error_code otherwise.
260  * Returns -pte_internal if @size of @section is NULL.
261  */
262 extern int pt_section_memsize(struct pt_section *section, uint64_t *size);
263 
264 /* Allocate a block cache.
265  *
266  * Returns zero on success, a negative error code otherwise.
267  * Returns -pte_internal if @section is NULL.
268  * Returns -pte_nomem if the block cache can't be allocated.
269  * Returns -pte_bad_lock on any locking error.
270  */
271 extern int pt_section_alloc_bcache(struct pt_section *section);
272 
273 /* Request block caching.
274  *
275  * The caller must ensure that @section is mapped.
276  */
277 static inline int pt_section_request_bcache(struct pt_section *section)
278 {
279 	if (!section)
280 		return -pte_internal;
281 
282 	if (section->bcache)
283 		return 0;
284 
285 	return pt_section_alloc_bcache(section);
286 }
287 
288 /* Return @section's block cache, if available.
289  *
290  * The caller must ensure that @section is mapped.
291  *
292  * The cache is not use-counted.  It is only valid as long as the caller keeps
293  * @section mapped.
294  */
295 static inline struct pt_block_cache *
296 pt_section_bcache(const struct pt_section *section)
297 {
298 	if (!section)
299 		return NULL;
300 
301 	return section->bcache;
302 }
303 
304 /* Create the OS-specific file status.
305  *
306  * On success, allocates a status object, provides a pointer to it in @pstatus
307  * and provides the file size in @psize.
308  *
309  * The status object will be free()'ed when its section is.
310  *
311  * This function is implemented in the OS-specific section implementation.
312  *
313  * Returns zero on success, a negative error code otherwise.
314  * Returns -pte_internal if @pstatus, @psize, or @filename is NULL.
315  * Returns -pte_bad_image if @filename can't be opened.
316  * Returns -pte_nomem if the status object can't be allocated.
317  */
318 extern int pt_section_mk_status(void **pstatus, uint64_t *psize,
319 				const char *filename);
320 
321 /* Perform on-map maintenance work.
322  *
323  * Notifies an attached image section cache about the mapping of @section.
324  *
325  * This function is called by the OS-specific pt_section_map() implementation
326  * after @section has been successfully mapped and @section has been unlocked.
327  *
328  * Returns zero on success, a negative error code otherwise.
329  * Returns -pte_internal if @section is NULL.
330  * Returns -pte_bad_lock on any locking error.
331  */
332 extern int pt_section_on_map_lock(struct pt_section *section);
333 
334 static inline int pt_section_on_map(struct pt_section *section)
335 {
336 	if (section && !section->iscache)
337 		return 0;
338 
339 	return pt_section_on_map_lock(section);
340 }
341 
342 /* Map a section.
343  *
344  * Maps @section into memory.  Mappings are use-counted.  The number of
345  * pt_section_map() calls must match the number of pt_section_unmap()
346  * calls.
347  *
348  * This function is implemented in the OS-specific section implementation.
349  *
350  * Returns zero on success, a negative error code otherwise.
351  * Returns -pte_internal if @section is NULL.
352  * Returns -pte_bad_image if @section changed or can't be opened.
353  * Returns -pte_bad_lock on any locking error.
354  * Returns -pte_nomem if @section can't be mapped into memory.
355  * Returns -pte_overflow if the map count would overflow.
356  */
357 extern int pt_section_map(struct pt_section *section);
358 
359 /* Share a section mapping.
360  *
361  * Increases the map count for @section without notifying an attached image
362  * section cache.
363  *
364  * This function should only be used by the attached image section cache to
365  * resolve a deadlock scenario when mapping a section it intends to cache.
366  *
367  * Returns zero on success, a negative error code otherwise.
368  * Returns -pte_internal if @section is NULL.
369  * Returns -pte_internal if @section->mcount is zero.
370  * Returns -pte_bad_lock on any locking error.
371  */
372 extern int pt_section_map_share(struct pt_section *section);
373 
374 /* Unmap a section.
375  *
376  * Unmaps @section from memory.
377  *
378  * Returns zero on success, a negative error code otherwise.
379  * Returns -pte_internal if @section is NULL.
380  * Returns -pte_bad_lock on any locking error.
381  * Returns -pte_internal if @section has not been mapped.
382  */
383 extern int pt_section_unmap(struct pt_section *section);
384 
385 /* Read memory from a section.
386  *
387  * Reads at most @size bytes from @section at @offset into @buffer.  @section
388  * must be mapped.
389  *
390  * Returns the number of bytes read on success, a negative error code otherwise.
391  * Returns -pte_internal if @section or @buffer are NULL.
392  * Returns -pte_nomap if @offset is beyond the end of the section.
393  */
394 extern int pt_section_read(const struct pt_section *section, uint8_t *buffer,
395 			   uint16_t size, uint64_t offset);
396 
397 #endif /* PT_SECTION_H */
398