xref: /freebsd/contrib/processor-trace/libipt/src/pt_image_section_cache.c (revision 85f87cf491bec6f90948a85b10f5523ea24db9e3)
1 /*
2  * Copyright (c) 2016-2019, Intel Corporation
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  *  * Redistributions of source code must retain the above copyright notice,
8  *    this list of conditions and the following disclaimer.
9  *  * Redistributions in binary form must reproduce the above copyright notice,
10  *    this list of conditions and the following disclaimer in the documentation
11  *    and/or other materials provided with the distribution.
12  *  * Neither the name of Intel Corporation nor the names of its contributors
13  *    may be used to endorse or promote products derived from this software
14  *    without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include "pt_image_section_cache.h"
30 #include "pt_section.h"
31 
32 #include "intel-pt.h"
33 
34 #include <stdlib.h>
35 
36 
dupstr(const char * str)37 static char *dupstr(const char *str)
38 {
39 	char *dup;
40 	size_t len;
41 
42 	if (!str)
43 		return NULL;
44 
45 	/* Silently truncate the name if it gets too big. */
46 	len = strnlen(str, 4096ul);
47 
48 	dup = malloc(len + 1);
49 	if (!dup)
50 		return NULL;
51 
52 	dup[len] = 0;
53 
54 	return memcpy(dup, str, len);
55 }
56 
pt_iscache_init(struct pt_image_section_cache * iscache,const char * name)57 int pt_iscache_init(struct pt_image_section_cache *iscache, const char *name)
58 {
59 	if (!iscache)
60 		return -pte_internal;
61 
62 	memset(iscache, 0, sizeof(*iscache));
63 	iscache->limit = UINT64_MAX;
64 	if (name) {
65 		iscache->name = dupstr(name);
66 		if (!iscache->name)
67 			return -pte_nomem;
68 	}
69 
70 #if defined(FEATURE_THREADS)
71 	{
72 		int errcode;
73 
74 		errcode = mtx_init(&iscache->lock, mtx_plain);
75 		if (errcode != thrd_success)
76 			return -pte_bad_lock;
77 	}
78 #endif /* defined(FEATURE_THREADS) */
79 
80 	return 0;
81 }
82 
pt_iscache_fini(struct pt_image_section_cache * iscache)83 void pt_iscache_fini(struct pt_image_section_cache *iscache)
84 {
85 	if (!iscache)
86 		return;
87 
88 	(void) pt_iscache_clear(iscache);
89 	free(iscache->name);
90 
91 #if defined(FEATURE_THREADS)
92 
93 	mtx_destroy(&iscache->lock);
94 
95 #endif /* defined(FEATURE_THREADS) */
96 }
97 
pt_iscache_lock(struct pt_image_section_cache * iscache)98 static inline int pt_iscache_lock(struct pt_image_section_cache *iscache)
99 {
100 	if (!iscache)
101 		return -pte_internal;
102 
103 #if defined(FEATURE_THREADS)
104 	{
105 		int errcode;
106 
107 		errcode = mtx_lock(&iscache->lock);
108 		if (errcode != thrd_success)
109 			return -pte_bad_lock;
110 	}
111 #endif /* defined(FEATURE_THREADS) */
112 
113 	return 0;
114 }
115 
pt_iscache_unlock(struct pt_image_section_cache * iscache)116 static inline int pt_iscache_unlock(struct pt_image_section_cache *iscache)
117 {
118 	if (!iscache)
119 		return -pte_internal;
120 
121 #if defined(FEATURE_THREADS)
122 	{
123 		int errcode;
124 
125 		errcode = mtx_unlock(&iscache->lock);
126 		if (errcode != thrd_success)
127 			return -pte_bad_lock;
128 	}
129 #endif /* defined(FEATURE_THREADS) */
130 
131 	return 0;
132 }
133 
isid_from_index(uint16_t index)134 static inline int isid_from_index(uint16_t index)
135 {
136 	return index + 1;
137 }
138 
pt_iscache_expand(struct pt_image_section_cache * iscache)139 static int pt_iscache_expand(struct pt_image_section_cache *iscache)
140 {
141 	struct pt_iscache_entry *entries;
142 	uint16_t capacity, target;
143 
144 	if (!iscache)
145 		return -pte_internal;
146 
147 	capacity = iscache->capacity;
148 	target = capacity + 8;
149 
150 	/* Check for overflows. */
151 	if (target < capacity)
152 		return -pte_nomem;
153 
154 	entries = realloc(iscache->entries, target * sizeof(*entries));
155 	if (!entries)
156 		return -pte_nomem;
157 
158 	iscache->capacity = target;
159 	iscache->entries = entries;
160 	return 0;
161 }
162 
pt_iscache_find_locked(struct pt_image_section_cache * iscache,const char * filename,uint64_t offset,uint64_t size,uint64_t laddr)163 static int pt_iscache_find_locked(struct pt_image_section_cache *iscache,
164 				  const char *filename, uint64_t offset,
165 				  uint64_t size, uint64_t laddr)
166 {
167 	uint16_t idx, end;
168 
169 	if (!iscache || !filename)
170 		return -pte_internal;
171 
172 	end = iscache->size;
173 	for (idx = 0; idx < end; ++idx) {
174 		const struct pt_iscache_entry *entry;
175 		const struct pt_section *section;
176 		const char *sec_filename;
177 		uint64_t sec_offset, sec_size;
178 
179 		entry = &iscache->entries[idx];
180 
181 		/* We do not zero-initialize the array - a NULL check is
182 		 * pointless.
183 		 */
184 		section = entry->section;
185 		sec_filename = pt_section_filename(section);
186 		sec_offset = pt_section_offset(section);
187 		sec_size = pt_section_size(section);
188 
189 		if (entry->laddr != laddr)
190 			continue;
191 
192 		if (sec_offset != offset)
193 			continue;
194 
195 		if (sec_size != size)
196 			continue;
197 
198 		/* We should not have a section without a filename. */
199 		if (!sec_filename)
200 			return -pte_internal;
201 
202 		if (strcmp(sec_filename, filename) != 0)
203 			continue;
204 
205 		return isid_from_index(idx);
206 	}
207 
208 	return 0;
209 }
210 
pt_iscache_lru_free(struct pt_iscache_lru_entry * lru)211 static int pt_iscache_lru_free(struct pt_iscache_lru_entry *lru)
212 {
213 	while (lru) {
214 		struct pt_iscache_lru_entry *trash;
215 		int errcode;
216 
217 		trash = lru;
218 		lru = lru->next;
219 
220 		errcode = pt_section_unmap(trash->section);
221 		if (errcode < 0)
222 			return errcode;
223 
224 		free(trash);
225 	}
226 
227 	return 0;
228 }
229 
pt_iscache_lru_prune(struct pt_image_section_cache * iscache,struct pt_iscache_lru_entry ** tail)230 static int pt_iscache_lru_prune(struct pt_image_section_cache *iscache,
231 				struct pt_iscache_lru_entry **tail)
232 {
233 	struct pt_iscache_lru_entry *lru, **pnext;
234 	uint64_t limit, used;
235 
236 	if (!iscache || !tail)
237 		return -pte_internal;
238 
239 	limit = iscache->limit;
240 	used = 0ull;
241 
242 	pnext = &iscache->lru;
243 	for (lru = *pnext; lru; pnext = &lru->next, lru = *pnext) {
244 
245 		used += lru->size;
246 		if (used <= limit)
247 			continue;
248 
249 		/* The cache got too big; prune it starting from @lru. */
250 		iscache->used = used - lru->size;
251 		*pnext = NULL;
252 		*tail = lru;
253 
254 		return 0;
255 	}
256 
257 	/* We shouldn't prune the cache unnecessarily. */
258 	return -pte_internal;
259 }
260 
261 /* Add @section to the front of @iscache->lru.
262  *
263  * Returns a positive integer if we need to prune the cache.
264  * Returns zero if we don't need to prune the cache.
265  * Returns a negative pt_error_code otherwise.
266  */
pt_isache_lru_new(struct pt_image_section_cache * iscache,struct pt_section * section)267 static int pt_isache_lru_new(struct pt_image_section_cache *iscache,
268 			     struct pt_section *section)
269 {
270 	struct pt_iscache_lru_entry *lru;
271 	uint64_t memsize, used, total, limit;
272 	int errcode;
273 
274 	if (!iscache)
275 		return -pte_internal;
276 
277 	errcode = pt_section_memsize(section, &memsize);
278 	if (errcode < 0)
279 		return errcode;
280 
281 	/* Don't try to add the section if it is too big.  We'd prune it again
282 	 * together with all other sections in our cache.
283 	 */
284 	limit = iscache->limit;
285 	if (limit < memsize)
286 		return 0;
287 
288 	errcode = pt_section_map_share(section);
289 	if (errcode < 0)
290 		return errcode;
291 
292 	lru = malloc(sizeof(*lru));
293 	if (!lru) {
294 		(void) pt_section_unmap(section);
295 		return -pte_nomem;
296 	}
297 
298 	lru->section = section;
299 	lru->size = memsize;
300 
301 	lru->next = iscache->lru;
302 	iscache->lru = lru;
303 
304 	used = iscache->used;
305 	total = used + memsize;
306 	if (total < used || total < memsize)
307 		return -pte_overflow;
308 
309 	iscache->used = total;
310 
311 	return (limit < total) ? 1 : 0;
312 }
313 
314 /* Add or move @section to the front of @iscache->lru.
315  *
316  * Returns a positive integer if we need to prune the cache.
317  * Returns zero if we don't need to prune the cache.
318  * Returns a negative pt_error_code otherwise.
319  */
pt_iscache_lru_add(struct pt_image_section_cache * iscache,struct pt_section * section)320 static int pt_iscache_lru_add(struct pt_image_section_cache *iscache,
321 			      struct pt_section *section)
322 {
323 	struct pt_iscache_lru_entry *lru, **pnext;
324 
325 	if (!iscache)
326 		return -pte_internal;
327 
328 	pnext = &iscache->lru;
329 	for (lru = *pnext; lru; pnext = &lru->next, lru = *pnext) {
330 
331 		if (lru->section != section)
332 			continue;
333 
334 		/* We found it in the cache.  Move it to the front. */
335 		*pnext = lru->next;
336 		lru->next = iscache->lru;
337 		iscache->lru = lru;
338 
339 		return 0;
340 	}
341 
342 	/* We didn't find it in the cache.  Add it. */
343 	return pt_isache_lru_new(iscache, section);
344 }
345 
346 
347 /* Remove @section from @iscache->lru.
348  *
349  * Returns zero on success, a negative pt_error_code otherwise.
350  */
pt_iscache_lru_remove(struct pt_image_section_cache * iscache,const struct pt_section * section)351 static int pt_iscache_lru_remove(struct pt_image_section_cache *iscache,
352 				 const struct pt_section *section)
353 {
354 	struct pt_iscache_lru_entry *lru, **pnext;
355 
356 	if (!iscache)
357 		return -pte_internal;
358 
359 	pnext = &iscache->lru;
360 	for (lru = *pnext; lru; pnext = &lru->next, lru = *pnext) {
361 
362 		if (lru->section != section)
363 			continue;
364 
365 		/* We found it in the cache.  Remove it. */
366 		*pnext = lru->next;
367 		lru->next = NULL;
368 		break;
369 	}
370 
371 	return pt_iscache_lru_free(lru);
372 }
373 
374 
375 /* Add or move @section to the front of @iscache->lru and update its size.
376  *
377  * Returns a positive integer if we need to prune the cache.
378  * Returns zero if we don't need to prune the cache.
379  * Returns a negative pt_error_code otherwise.
380  */
pt_iscache_lru_resize(struct pt_image_section_cache * iscache,struct pt_section * section,uint64_t memsize)381 static int pt_iscache_lru_resize(struct pt_image_section_cache *iscache,
382 				 struct pt_section *section, uint64_t memsize)
383 {
384 	struct pt_iscache_lru_entry *lru;
385 	uint64_t oldsize, used;
386 	int status;
387 
388 	if (!iscache)
389 		return -pte_internal;
390 
391 	status = pt_iscache_lru_add(iscache, section);
392 	if (status < 0)
393 		return status;
394 
395 	lru = iscache->lru;
396 	if (!lru) {
397 		if (status)
398 			return -pte_internal;
399 		return 0;
400 	}
401 
402 	/* If @section is cached, it must be first.
403 	 *
404 	 * We may choose not to cache it, though, e.g. if it is too big.
405 	 */
406 	if (lru->section != section) {
407 		if (iscache->limit < memsize)
408 			return 0;
409 
410 		return -pte_internal;
411 	}
412 
413 	oldsize = lru->size;
414 	lru->size = memsize;
415 
416 	/* If we need to prune anyway, we're done. */
417 	if (status)
418 		return status;
419 
420 	used = iscache->used;
421 	used -= oldsize;
422 	used += memsize;
423 
424 	iscache->used = used;
425 
426 	return (iscache->limit < used) ? 1 : 0;
427 }
428 
429 /* Clear @iscache->lru.
430  *
431  * Unlike other iscache_lru functions, the caller does not lock @iscache.
432  *
433  * Return zero on success, a negative pt_error_code otherwise.
434  */
pt_iscache_lru_clear(struct pt_image_section_cache * iscache)435 static int pt_iscache_lru_clear(struct pt_image_section_cache *iscache)
436 {
437 	struct pt_iscache_lru_entry *lru;
438 	int errcode;
439 
440 	errcode = pt_iscache_lock(iscache);
441 	if (errcode < 0)
442 		return errcode;
443 
444 	lru = iscache->lru;
445 	iscache->lru = NULL;
446 	iscache->used = 0ull;
447 
448 	errcode = pt_iscache_unlock(iscache);
449 	if (errcode < 0)
450 		return errcode;
451 
452 	return pt_iscache_lru_free(lru);
453 }
454 
455 /* Search @iscache for a partial or exact match of @section loaded at @laddr and
456  * return the corresponding index or @iscache->size if no match is found.
457  *
458  * The caller must lock @iscache.
459  *
460  * Returns a non-zero index on success, a negative pt_error_code otherwise.
461  */
462 static int
pt_iscache_find_section_locked(const struct pt_image_section_cache * iscache,const char * filename,uint64_t offset,uint64_t size,uint64_t laddr)463 pt_iscache_find_section_locked(const struct pt_image_section_cache *iscache,
464 			       const char *filename, uint64_t offset,
465 			       uint64_t size, uint64_t laddr)
466 {
467 	const struct pt_section *section;
468 	uint16_t idx, end;
469 	int match;
470 
471 	if (!iscache || !filename)
472 		return -pte_internal;
473 
474 	section = NULL;
475 	match = end = iscache->size;
476 	for (idx = 0; idx < end; ++idx) {
477 		const struct pt_iscache_entry *entry;
478 		const struct pt_section *sec;
479 
480 		entry = &iscache->entries[idx];
481 
482 		/* We do not zero-initialize the array - a NULL check is
483 		 * pointless.
484 		 */
485 		sec = entry->section;
486 
487 		/* Avoid redundant match checks. */
488 		if (sec != section) {
489 			const char *sec_filename;
490 
491 			/* We don't have duplicates.  Skip the check. */
492 			if (section)
493 				continue;
494 
495 			if (offset != pt_section_offset(sec))
496 				continue;
497 
498 			if (size != pt_section_size(sec))
499 				continue;
500 
501 			sec_filename = pt_section_filename(sec);
502 			if (!sec_filename)
503 				return -pte_internal;
504 
505 			if (strcmp(filename, sec_filename) != 0)
506 				continue;
507 
508 			/* Use the cached section instead. */
509 			section = sec;
510 			match = idx;
511 		}
512 
513 		/* If we didn't continue, @section == @sec and we have a match.
514 		 *
515 		 * If we also find a matching load address, we're done.
516 		 */
517 		if (laddr == entry->laddr)
518 			return idx;
519 	}
520 
521 	return match;
522 }
523 
pt_iscache_add(struct pt_image_section_cache * iscache,struct pt_section * section,uint64_t laddr)524 int pt_iscache_add(struct pt_image_section_cache *iscache,
525 		   struct pt_section *section, uint64_t laddr)
526 {
527 	const char *filename;
528 	uint64_t offset, size;
529 	uint16_t idx;
530 	int errcode;
531 
532 	if (!iscache || !section)
533 		return -pte_internal;
534 
535 	/* We must have a filename for @section. */
536 	filename = pt_section_filename(section);
537 	if (!filename)
538 		return -pte_internal;
539 
540 	offset = pt_section_offset(section);
541 	size = pt_section_size(section);
542 
543 	/* Adding a section is slightly complicated by a potential deadlock
544 	 * scenario:
545 	 *
546 	 *   - in order to add a section, we need to attach to it, which
547 	 *     requires taking the section's attach lock.
548 	 *
549 	 *   - if we are already attached to it, we may receive on-map
550 	 *     notifications, which will be sent while holding the attach lock
551 	 *     and require taking the iscache lock.
552 	 *
553 	 * Hence we can't attach to a section while holding the iscache lock.
554 	 *
555 	 *
556 	 * We therefore attach to @section first and then lock @iscache.
557 	 *
558 	 * This opens a small window where an existing @section may be removed
559 	 * from @iscache and replaced by a new matching section.  We would want
560 	 * to share that new section rather than adding a duplicate @section.
561 	 *
562 	 * After locking @iscache, we therefore check for existing matching
563 	 * sections and, if one is found, update @section.  This involves
564 	 * detaching from @section and attaching to the existing section.
565 	 *
566 	 * And for this, we will have to temporarily unlock @iscache again.
567 	 */
568 	errcode = pt_section_get(section);
569 	if (errcode < 0)
570 		return errcode;
571 
572 	errcode = pt_section_attach(section, iscache);
573 	if (errcode < 0)
574 		goto out_put;
575 
576 	errcode = pt_iscache_lock(iscache);
577 	if (errcode < 0)
578 		goto out_detach;
579 
580 	/* We may need to repeat this step.
581 	 *
582 	 * Typically we don't and this takes only a single iteration.  One
583 	 * scenario where we do repeat this is when adding a section with an
584 	 * out-of-bounds size.
585 	 *
586 	 * We will not find a matching section in pt_iscache_add_file() so we
587 	 * create a new section.  This will have its size reduced to match the
588 	 * actual file size.
589 	 *
590 	 * For this reduced size, we may now find an existing section, and we
591 	 * will take another trip in the below loop.
592 	 */
593 	for (;;) {
594 		const struct pt_iscache_entry *entry;
595 		struct pt_section *sec;
596 		int match;
597 
598 		/* Find an existing section matching @section that we'd share
599 		 * rather than adding @section.
600 		 */
601 		match = pt_iscache_find_section_locked(iscache, filename,
602 						       offset, size, laddr);
603 		if (match < 0) {
604 			errcode = match;
605 			goto out_unlock_detach;
606 		}
607 
608 		/* We're done if we have not found a matching section. */
609 		if (iscache->size <= match)
610 			break;
611 
612 		entry = &iscache->entries[match];
613 
614 		/* We're also done if we found the same section again.
615 		 *
616 		 * We further check for a perfect match.  In that case, we don't
617 		 * need to insert anything, at all.
618 		 */
619 		sec = entry->section;
620 		if (sec == section) {
621 			if (entry->laddr == laddr) {
622 				errcode = pt_iscache_unlock(iscache);
623 				if (errcode < 0)
624 					goto out_detach;
625 
626 				errcode = pt_section_detach(section, iscache);
627 				if (errcode < 0)
628 					goto out_lru;
629 
630 				errcode = pt_section_put(section);
631 				if (errcode < 0)
632 					return errcode;
633 
634 				return isid_from_index((uint16_t) match);
635 			}
636 
637 			break;
638 		}
639 
640 		/* We update @section to share the existing @sec.
641 		 *
642 		 * This requires detaching from @section, which, in turn,
643 		 * requires temporarily unlocking @iscache.
644 		 *
645 		 * We further need to remove @section from @iscache->lru.
646 		 */
647 		errcode = pt_section_get(sec);
648 		if (errcode < 0)
649 			goto out_unlock_detach;
650 
651 		errcode = pt_iscache_unlock(iscache);
652 		if (errcode < 0) {
653 			(void) pt_section_put(sec);
654 			goto out_detach;
655 		}
656 
657 		errcode = pt_section_detach(section, iscache);
658 		if (errcode < 0) {
659 			(void) pt_section_put(sec);
660 			goto out_lru;
661 		}
662 
663 		errcode = pt_section_attach(sec, iscache);
664 		if (errcode < 0) {
665 			(void) pt_section_put(sec);
666 			goto out_lru;
667 		}
668 
669 		errcode = pt_iscache_lock(iscache);
670 		if (errcode < 0) {
671 			(void) pt_section_put(section);
672 			/* Complete the swap for cleanup. */
673 			section = sec;
674 			goto out_detach;
675 		}
676 
677 		/* We may have received on-map notifications for @section and we
678 		 * may have added @section to @iscache->lru.
679 		 *
680 		 * Since we're still holding a reference to it, no harm has been
681 		 * done.  But we need to remove it before we drop our reference.
682 		 */
683 		errcode = pt_iscache_lru_remove(iscache, section);
684 		if (errcode < 0) {
685 			(void) pt_section_put(section);
686 			/* Complete the swap for cleanup. */
687 			section = sec;
688 			goto out_unlock_detach;
689 		}
690 
691 		/* Drop the reference to @section. */
692 		errcode = pt_section_put(section);
693 		if (errcode < 0) {
694 			/* Complete the swap for cleanup. */
695 			section = sec;
696 			goto out_unlock_detach;
697 		}
698 
699 		/* Swap sections.
700 		 *
701 		 * We will try again in the next iteration.
702 		 */
703 		section = sec;
704 	}
705 
706 	/* Expand the cache, if necessary. */
707 	if (iscache->capacity <= iscache->size) {
708 		/* We must never exceed the capacity. */
709 		if (iscache->capacity < iscache->size) {
710 			errcode = -pte_internal;
711 			goto out_unlock_detach;
712 		}
713 
714 		errcode = pt_iscache_expand(iscache);
715 		if (errcode < 0)
716 			goto out_unlock_detach;
717 
718 		/* Make sure it is big enough, now. */
719 		if (iscache->capacity <= iscache->size) {
720 			errcode = -pte_internal;
721 			goto out_unlock_detach;
722 		}
723 	}
724 
725 	/* Insert a new entry for @section at @laddr.
726 	 *
727 	 * This hands both attach and reference over to @iscache.  We will
728 	 * detach and drop the reference again when the entry is removed.
729 	 */
730 	idx = iscache->size++;
731 
732 	iscache->entries[idx].section = section;
733 	iscache->entries[idx].laddr = laddr;
734 
735 	errcode = pt_iscache_unlock(iscache);
736 	if (errcode < 0)
737 		return errcode;
738 
739 	return isid_from_index(idx);
740 
741  out_unlock_detach:
742 	(void) pt_iscache_unlock(iscache);
743 
744  out_detach:
745 	(void) pt_section_detach(section, iscache);
746 
747  out_lru:
748 	(void) pt_iscache_lru_clear(iscache);
749 
750  out_put:
751 	(void) pt_section_put(section);
752 
753 	return errcode;
754 }
755 
pt_iscache_find(struct pt_image_section_cache * iscache,const char * filename,uint64_t offset,uint64_t size,uint64_t laddr)756 int pt_iscache_find(struct pt_image_section_cache *iscache,
757 		    const char *filename, uint64_t offset, uint64_t size,
758 		    uint64_t laddr)
759 {
760 	int errcode, isid;
761 
762 	errcode = pt_iscache_lock(iscache);
763 	if (errcode < 0)
764 		return errcode;
765 
766 	isid = pt_iscache_find_locked(iscache, filename, offset, size, laddr);
767 
768 	errcode = pt_iscache_unlock(iscache);
769 	if (errcode < 0)
770 		return errcode;
771 
772 	return isid;
773 }
774 
pt_iscache_lookup(struct pt_image_section_cache * iscache,struct pt_section ** section,uint64_t * laddr,int isid)775 int pt_iscache_lookup(struct pt_image_section_cache *iscache,
776 		      struct pt_section **section, uint64_t *laddr, int isid)
777 {
778 	uint16_t index;
779 	int errcode, status;
780 
781 	if (!iscache || !section || !laddr)
782 		return -pte_internal;
783 
784 	if (isid <= 0)
785 		return -pte_bad_image;
786 
787 	isid -= 1;
788 	if (isid > UINT16_MAX)
789 		return -pte_internal;
790 
791 	index = (uint16_t) isid;
792 
793 	errcode = pt_iscache_lock(iscache);
794 	if (errcode < 0)
795 		return errcode;
796 
797 	if (iscache->size <= index)
798 		status = -pte_bad_image;
799 	else {
800 		const struct pt_iscache_entry *entry;
801 
802 		entry = &iscache->entries[index];
803 		*section = entry->section;
804 		*laddr = entry->laddr;
805 
806 		status = pt_section_get(*section);
807 	}
808 
809 	errcode = pt_iscache_unlock(iscache);
810 	if (errcode < 0)
811 		return errcode;
812 
813 	return status;
814 }
815 
pt_iscache_clear(struct pt_image_section_cache * iscache)816 int pt_iscache_clear(struct pt_image_section_cache *iscache)
817 {
818 	struct pt_iscache_lru_entry *lru;
819 	struct pt_iscache_entry *entries;
820 	uint16_t idx, end;
821 	int errcode;
822 
823 	if (!iscache)
824 		return -pte_internal;
825 
826 	errcode = pt_iscache_lock(iscache);
827 	if (errcode < 0)
828 		return errcode;
829 
830 	entries = iscache->entries;
831 	end = iscache->size;
832 	lru = iscache->lru;
833 
834 	iscache->entries = NULL;
835 	iscache->capacity = 0;
836 	iscache->size = 0;
837 	iscache->lru = NULL;
838 	iscache->used = 0ull;
839 
840 	errcode = pt_iscache_unlock(iscache);
841 	if (errcode < 0)
842 		return errcode;
843 
844 	errcode = pt_iscache_lru_free(lru);
845 	if (errcode < 0)
846 		return errcode;
847 
848 	for (idx = 0; idx < end; ++idx) {
849 		struct pt_section *section;
850 
851 		section = entries[idx].section;
852 
853 		/* We do not zero-initialize the array - a NULL check is
854 		 * pointless.
855 		 */
856 		errcode = pt_section_detach(section, iscache);
857 		if (errcode < 0)
858 			return errcode;
859 
860 		errcode = pt_section_put(section);
861 		if (errcode < 0)
862 			return errcode;
863 	}
864 
865 	free(entries);
866 	return 0;
867 }
868 
pt_iscache_alloc(const char * name)869 struct pt_image_section_cache *pt_iscache_alloc(const char *name)
870 {
871 	struct pt_image_section_cache *iscache;
872 
873 	iscache = malloc(sizeof(*iscache));
874 	if (iscache)
875 		pt_iscache_init(iscache, name);
876 
877 	return iscache;
878 }
879 
pt_iscache_free(struct pt_image_section_cache * iscache)880 void pt_iscache_free(struct pt_image_section_cache *iscache)
881 {
882 	if (!iscache)
883 		return;
884 
885 	pt_iscache_fini(iscache);
886 	free(iscache);
887 }
888 
pt_iscache_set_limit(struct pt_image_section_cache * iscache,uint64_t limit)889 int pt_iscache_set_limit(struct pt_image_section_cache *iscache, uint64_t limit)
890 {
891 	struct pt_iscache_lru_entry *tail;
892 	int errcode, status;
893 
894 	if (!iscache)
895 		return -pte_invalid;
896 
897 	status = 0;
898 	tail = NULL;
899 
900 	errcode = pt_iscache_lock(iscache);
901 	if (errcode < 0)
902 		return errcode;
903 
904 	iscache->limit = limit;
905 	if (limit < iscache->used)
906 		status = pt_iscache_lru_prune(iscache, &tail);
907 
908 	errcode = pt_iscache_unlock(iscache);
909 
910 	if (errcode < 0 || status < 0)
911 		return (status < 0) ? status : errcode;
912 
913 	return pt_iscache_lru_free(tail);
914 }
915 
pt_iscache_name(const struct pt_image_section_cache * iscache)916 const char *pt_iscache_name(const struct pt_image_section_cache *iscache)
917 {
918 	if (!iscache)
919 		return NULL;
920 
921 	return iscache->name;
922 }
923 
pt_iscache_add_file(struct pt_image_section_cache * iscache,const char * filename,uint64_t offset,uint64_t size,uint64_t vaddr)924 int pt_iscache_add_file(struct pt_image_section_cache *iscache,
925 			const char *filename, uint64_t offset, uint64_t size,
926 			uint64_t vaddr)
927 {
928 	struct pt_section *section;
929 	int errcode, match, isid;
930 
931 	if (!iscache || !filename)
932 		return -pte_invalid;
933 
934 	errcode = pt_iscache_lock(iscache);
935 	if (errcode < 0)
936 		return errcode;
937 
938 	match = pt_iscache_find_section_locked(iscache, filename, offset,
939 					       size, vaddr);
940 	if (match < 0) {
941 		(void) pt_iscache_unlock(iscache);
942 		return match;
943 	}
944 
945 	/* If we found a perfect match, we will share the existing entry.
946 	 *
947 	 * If we found a section, we need to grab a reference before we unlock.
948 	 *
949 	 * If we didn't find a matching section, we create a new section, which
950 	 * implicitly gives us a reference to it.
951 	 */
952 	if (match < iscache->size) {
953 		const struct pt_iscache_entry *entry;
954 
955 		entry = &iscache->entries[match];
956 		if (entry->laddr == vaddr) {
957 			errcode = pt_iscache_unlock(iscache);
958 			if (errcode < 0)
959 				return errcode;
960 
961 			return isid_from_index((uint16_t) match);
962 		}
963 
964 		section = entry->section;
965 
966 		errcode = pt_section_get(section);
967 		if (errcode < 0) {
968 			(void) pt_iscache_unlock(iscache);
969 			return errcode;
970 		}
971 
972 		errcode = pt_iscache_unlock(iscache);
973 		if (errcode < 0) {
974 			(void) pt_section_put(section);
975 			return errcode;
976 		}
977 	} else {
978 		errcode = pt_iscache_unlock(iscache);
979 		if (errcode < 0)
980 			return errcode;
981 
982 		section = NULL;
983 		errcode = pt_mk_section(&section, filename, offset, size);
984 		if (errcode < 0)
985 			return errcode;
986 	}
987 
988 	/* We unlocked @iscache and hold a reference to @section. */
989 	isid = pt_iscache_add(iscache, section, vaddr);
990 
991 	/* We grab a reference when we add the section.  Drop the one we
992 	 * obtained before.
993 	 */
994 	errcode = pt_section_put(section);
995 	if (errcode < 0)
996 		return errcode;
997 
998 	return isid;
999 }
1000 
1001 
pt_iscache_read(struct pt_image_section_cache * iscache,uint8_t * buffer,uint64_t size,int isid,uint64_t vaddr)1002 int pt_iscache_read(struct pt_image_section_cache *iscache, uint8_t *buffer,
1003 		    uint64_t size, int isid, uint64_t vaddr)
1004 {
1005 	struct pt_section *section;
1006 	uint64_t laddr;
1007 	int errcode, status;
1008 
1009 	if (!iscache || !buffer || !size)
1010 		return -pte_invalid;
1011 
1012 	errcode = pt_iscache_lookup(iscache, &section, &laddr, isid);
1013 	if (errcode < 0)
1014 		return errcode;
1015 
1016 	if (vaddr < laddr) {
1017 		(void) pt_section_put(section);
1018 		return -pte_nomap;
1019 	}
1020 
1021 	vaddr -= laddr;
1022 
1023 	errcode = pt_section_map(section);
1024 	if (errcode < 0) {
1025 		(void) pt_section_put(section);
1026 		return errcode;
1027 	}
1028 
1029 	/* We truncate the read if it gets too big.  The user is expected to
1030 	 * issue further reads for the remaining part.
1031 	 */
1032 	if (UINT16_MAX < size)
1033 		size = UINT16_MAX;
1034 
1035 	status = pt_section_read(section, buffer, (uint16_t) size, vaddr);
1036 
1037 	errcode = pt_section_unmap(section);
1038 	if (errcode < 0) {
1039 		(void) pt_section_put(section);
1040 		return errcode;
1041 	}
1042 
1043 	errcode = pt_section_put(section);
1044 	if (errcode < 0)
1045 		return errcode;
1046 
1047 	return status;
1048 }
1049 
pt_iscache_notify_map(struct pt_image_section_cache * iscache,struct pt_section * section)1050 int pt_iscache_notify_map(struct pt_image_section_cache *iscache,
1051 			  struct pt_section *section)
1052 {
1053 	struct pt_iscache_lru_entry *tail;
1054 	int errcode, status;
1055 
1056 	tail = NULL;
1057 
1058 	errcode = pt_iscache_lock(iscache);
1059 	if (errcode < 0)
1060 		return errcode;
1061 
1062 	status = pt_iscache_lru_add(iscache, section);
1063 	if (status > 0)
1064 		status = pt_iscache_lru_prune(iscache, &tail);
1065 
1066 	errcode = pt_iscache_unlock(iscache);
1067 
1068 	if (errcode < 0 || status < 0)
1069 		return (status < 0) ? status : errcode;
1070 
1071 	return pt_iscache_lru_free(tail);
1072 }
1073 
pt_iscache_notify_resize(struct pt_image_section_cache * iscache,struct pt_section * section,uint64_t memsize)1074 int pt_iscache_notify_resize(struct pt_image_section_cache *iscache,
1075 			     struct pt_section *section, uint64_t memsize)
1076 {
1077 	struct pt_iscache_lru_entry *tail;
1078 	int errcode, status;
1079 
1080 	tail = NULL;
1081 
1082 	errcode = pt_iscache_lock(iscache);
1083 	if (errcode < 0)
1084 		return errcode;
1085 
1086 	status = pt_iscache_lru_resize(iscache, section, memsize);
1087 	if (status > 0)
1088 		status = pt_iscache_lru_prune(iscache, &tail);
1089 
1090 	errcode = pt_iscache_unlock(iscache);
1091 
1092 	if (errcode < 0 || status < 0)
1093 		return (status < 0) ? status : errcode;
1094 
1095 	return pt_iscache_lru_free(tail);
1096 }
1097