xref: /freebsd/contrib/processor-trace/libipt/internal/include/pt_block_cache.h (revision f6a3b357e9be4c6423c85eff9a847163a0d307c8)
1 /*
2  * Copyright (c) 2016-2018, Intel Corporation
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  *  * Redistributions of source code must retain the above copyright notice,
8  *    this list of conditions and the following disclaimer.
9  *  * Redistributions in binary form must reproduce the above copyright notice,
10  *    this list of conditions and the following disclaimer in the documentation
11  *    and/or other materials provided with the distribution.
12  *  * Neither the name of Intel Corporation nor the names of its contributors
13  *    may be used to endorse or promote products derived from this software
14  *    without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #ifndef PT_BLOCK_CACHE_H
30 #define PT_BLOCK_CACHE_H
31 
32 #include "intel-pt.h"
33 
34 #include <stdint.h>
35 
36 
37 /* A block cache entry qualifier.
38  *
39  * This describes what to do at the decision point determined by a block cache
40  * entry.
41  */
42 enum pt_bcache_qualifier {
43 	/* This is not a decision point.
44 	 *
45 	 * The next decision point is too far away and one or more fields
46 	 * threatened to overflow so we had to stop somewhere on our way.
47 	 *
48 	 * Apply the displacement and number of instructions and continue from
49 	 * the resulting IP.
50 	 */
51 	ptbq_again,
52 
53 	/* The decision point is a conditional branch.
54 	 *
55 	 * This requires a conditional branch query.
56 	 *
57 	 * The isize field should provide the size of the branch instruction so
58 	 * only taken branches require the instruction to be decoded.
59 	 */
60 	ptbq_cond,
61 
62 	/* The decision point is a near indirect call.
63 	 *
64 	 * This requires a return-address stack update and an indirect branch
65 	 * query.
66 	 *
67 	 * The isize field should provide the size of the call instruction so
68 	 * the return address can be computed by adding it to the displacement
69 	 * that brings us to the call instruction.
70 	 *
71 	 * No instruction decode is required.
72 	 */
73 	ptbq_ind_call,
74 
75 	/* The decision point is a near return.
76 	 *
77 	 * The return may be compressed so this requires a conditional branch
78 	 * query to determine the compression state and either a return-address
79 	 * stack lookup or an indirect branch query.
80 	 *
81 	 * No instruction decode is required.
82 	 */
83 	ptbq_return,
84 
85 	/* The decision point is an indirect jump or far branch.
86 	 *
87 	 * This requires an indirect branch query.
88 	 *
89 	 * No instruction decode is required.
90 	 */
91 	ptbq_indirect,
92 
93 	/* The decision point requires the instruction at the decision point IP
94 	 * to be decoded to determine the next step.
95 	 *
96 	 * This is used for
97 	 *
98 	 *   - near direct calls that need to maintain the return-address stack.
99 	 *
100 	 *   - near direct jumps that are too far away to be handled with a
101 	 *     block cache entry as they would overflow the displacement field.
102 	 */
103 	ptbq_decode
104 };
105 
106 /* A block cache entry.
107  *
108  * There will be one such entry per byte of decoded memory image.  Each entry
109  * corresponds to an IP in the traced memory image.  The cache is initialized
110  * with invalid entries for all IPs.
111  *
112  * Only entries for the first byte of each instruction will be used; other
113  * entries are ignored and will remain invalid.
114  *
115  * Each valid entry gives the distance from the entry's IP to the next decision
116  * point both in bytes and in the number of instructions.
117  */
118 struct pt_bcache_entry {
119 	/* The displacement to the next decision point in bytes.
120 	 *
121 	 * This is zero if we are at a decision point except for ptbq_again
122 	 * where it gives the displacement to the next block cache entry to be
123 	 * used.
124 	 */
125 	int32_t displacement:16;
126 
127 	/* The number of instructions to the next decision point.
128 	 *
129 	 * This is typically one at a decision point since we are already
130 	 * accounting for the instruction at the decision point.
131 	 *
132 	 * Note that this field must be smaller than the respective struct
133 	 * pt_block field so we can fit one block cache entry into an empty
134 	 * block.
135 	 */
136 	uint32_t ninsn:8;
137 
138 	/* The execution mode for all instruction between here and the next
139 	 * decision point.
140 	 *
141 	 * This is enum pt_exec_mode.
142 	 *
143 	 * This is ptem_unknown if the entry is not valid.
144 	 */
145 	uint32_t mode:2;
146 
147 	/* The decision point qualifier.
148 	 *
149 	 * This is enum pt_bcache_qualifier.
150 	 */
151 	uint32_t qualifier:3;
152 
153 	/* The size of the instruction at the decision point.
154 	 *
155 	 * This is zero if the size is too big to fit into the field.  In this
156 	 * case, the instruction needs to be decoded to determine its size.
157 	 */
158 	uint32_t isize:3;
159 };
160 
161 /* Get the execution mode of a block cache entry. */
162 static inline enum pt_exec_mode pt_bce_exec_mode(struct pt_bcache_entry bce)
163 {
164 	return (enum pt_exec_mode) bce.mode;
165 }
166 
167 /* Get the block cache qualifier of a block cache entry. */
168 static inline enum pt_bcache_qualifier
169 pt_bce_qualifier(struct pt_bcache_entry bce)
170 {
171 	return (enum pt_bcache_qualifier) bce.qualifier;
172 }
173 
174 /* Check if a block cache entry is valid. */
175 static inline int pt_bce_is_valid(struct pt_bcache_entry bce)
176 {
177 	return pt_bce_exec_mode(bce) != ptem_unknown;
178 }
179 
180 
181 
182 /* A block cache. */
183 struct pt_block_cache {
184 	/* The number of cache entries. */
185 	uint32_t nentries;
186 
187 	/* A variable-length array of @nentries entries. */
188 	struct pt_bcache_entry entry[];
189 };
190 
191 /* Create a block cache.
192  *
193  * @nentries is the number of entries in the cache and should match the size of
194  * the to-be-cached section in bytes.
195  */
196 extern struct pt_block_cache *pt_bcache_alloc(uint64_t nentries);
197 
198 /* Destroy a block cache. */
199 extern void pt_bcache_free(struct pt_block_cache *bcache);
200 
201 /* Cache a block.
202  *
203  * It is expected that all calls for the same @index write the same @bce.
204  *
205  * Returns zero on success, a negative error code otherwise.
206  * Returns -pte_internal if @bcache is NULL.
207  * Returns -pte_internal if @index is outside of @bcache.
208  */
209 extern int pt_bcache_add(struct pt_block_cache *bcache, uint64_t index,
210 			 struct pt_bcache_entry bce);
211 
212 /* Lookup a cached block.
213  *
214  * The returned cache entry need not be valid.  The caller is expected to check
215  * for validity using pt_bce_is_valid(*@bce).
216  *
217  * Returns zero on success, a negative error code otherwise.
218  * Returns -pte_internal if @bcache or @bce is NULL.
219  * Returns -pte_internal if @index is outside of @bcache.
220  */
221 extern int pt_bcache_lookup(struct pt_bcache_entry *bce,
222 			    const struct pt_block_cache *bcache,
223 			    uint64_t index);
224 
225 #endif /* PT_BLOCK_CACHE_H */
226