xref: /freebsd/contrib/processor-trace/libipt/src/pt_section.c (revision bdd1243df58e60e85101c09001d9812a789b6bc4)
1 /*
2  * Copyright (c) 2013-2019, Intel Corporation
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  *  * Redistributions of source code must retain the above copyright notice,
8  *    this list of conditions and the following disclaimer.
9  *  * Redistributions in binary form must reproduce the above copyright notice,
10  *    this list of conditions and the following disclaimer in the documentation
11  *    and/or other materials provided with the distribution.
12  *  * Neither the name of Intel Corporation nor the names of its contributors
13  *    may be used to endorse or promote products derived from this software
14  *    without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include "pt_section.h"
30 #include "pt_block_cache.h"
31 #include "pt_image_section_cache.h"
32 
33 #include "intel-pt.h"
34 
35 #include <stdlib.h>
36 #include <stdio.h>
37 #include <string.h>
38 
39 
40 int pt_mk_section(struct pt_section **psection, const char *filename,
41 		  uint64_t offset, uint64_t size)
42 {
43 	struct pt_section *section;
44 	uint64_t fsize;
45 	size_t flen;
46 	void *status;
47 	char *fname;
48 	int errcode;
49 
50 	if (!psection)
51 		return -pte_internal;
52 
53 	flen = strnlen(filename, FILENAME_MAX);
54 	if (FILENAME_MAX <= flen)
55 		return -pte_invalid;
56 
57 	flen += 1;
58 
59 	fname = malloc(flen);
60 	if (!fname)
61 		return -pte_nomem;
62 
63 	memcpy(fname, filename, flen);
64 
65 	errcode = pt_section_mk_status(&status, &fsize, fname);
66 	if (errcode < 0)
67 		goto out_fname;
68 
69 	/* Fail if the requested @offset lies beyond the end of @file. */
70 	if (fsize <= offset) {
71 		errcode = -pte_invalid;
72 		goto out_status;
73 	}
74 
75 	/* Truncate @size so the entire range lies within @file. */
76 	fsize -= offset;
77 	if (fsize < size)
78 		size = fsize;
79 
80 	section = malloc(sizeof(*section));
81 	if (!section) {
82 		errcode = -pte_nomem;
83 		goto out_status;
84 	}
85 
86 	memset(section, 0, sizeof(*section));
87 
88 	section->filename = fname;
89 	section->status = status;
90 	section->offset = offset;
91 	section->size = size;
92 	section->ucount = 1;
93 
94 #if defined(FEATURE_THREADS)
95 
96 	errcode = mtx_init(&section->lock, mtx_plain);
97 	if (errcode != thrd_success) {
98 		free(section);
99 
100 		errcode = -pte_bad_lock;
101 		goto out_status;
102 	}
103 
104 	errcode = mtx_init(&section->alock, mtx_plain);
105 	if (errcode != thrd_success) {
106 		mtx_destroy(&section->lock);
107 		free(section);
108 
109 		errcode = -pte_bad_lock;
110 		goto out_status;
111 	}
112 
113 #endif /* defined(FEATURE_THREADS) */
114 
115 	*psection = section;
116 	return 0;
117 
118 out_status:
119 	free(status);
120 
121 out_fname:
122 	free(fname);
123 	return errcode;
124 }
125 
126 int pt_section_lock(struct pt_section *section)
127 {
128 	if (!section)
129 		return -pte_internal;
130 
131 #if defined(FEATURE_THREADS)
132 	{
133 		int errcode;
134 
135 		errcode = mtx_lock(&section->lock);
136 		if (errcode != thrd_success)
137 			return -pte_bad_lock;
138 	}
139 #endif /* defined(FEATURE_THREADS) */
140 
141 	return 0;
142 }
143 
144 int pt_section_unlock(struct pt_section *section)
145 {
146 	if (!section)
147 		return -pte_internal;
148 
149 #if defined(FEATURE_THREADS)
150 	{
151 		int errcode;
152 
153 		errcode = mtx_unlock(&section->lock);
154 		if (errcode != thrd_success)
155 			return -pte_bad_lock;
156 	}
157 #endif /* defined(FEATURE_THREADS) */
158 
159 	return 0;
160 }
161 
162 static void pt_section_free(struct pt_section *section)
163 {
164 	if (!section)
165 		return;
166 
167 #if defined(FEATURE_THREADS)
168 
169 	mtx_destroy(&section->alock);
170 	mtx_destroy(&section->lock);
171 
172 #endif /* defined(FEATURE_THREADS) */
173 
174 	free(section->filename);
175 	free(section->status);
176 	free(section);
177 }
178 
179 int pt_section_get(struct pt_section *section)
180 {
181 	uint16_t ucount;
182 	int errcode;
183 
184 	if (!section)
185 		return -pte_internal;
186 
187 	errcode = pt_section_lock(section);
188 	if (errcode < 0)
189 		return errcode;
190 
191 	ucount = section->ucount + 1;
192 	if (!ucount) {
193 		(void) pt_section_unlock(section);
194 		return -pte_overflow;
195 	}
196 
197 	section->ucount = ucount;
198 
199 	return pt_section_unlock(section);
200 }
201 
202 int pt_section_put(struct pt_section *section)
203 {
204 	uint16_t ucount, mcount;
205 	int errcode;
206 
207 	if (!section)
208 		return -pte_internal;
209 
210 	errcode = pt_section_lock(section);
211 	if (errcode < 0)
212 		return errcode;
213 
214 	mcount = section->mcount;
215 	ucount = section->ucount;
216 	if (ucount > 1) {
217 		section->ucount = ucount - 1;
218 		return pt_section_unlock(section);
219 	}
220 
221 	errcode = pt_section_unlock(section);
222 	if (errcode < 0)
223 		return errcode;
224 
225 	if (!ucount || mcount)
226 		return -pte_internal;
227 
228 	pt_section_free(section);
229 	return 0;
230 }
231 
232 static int pt_section_lock_attach(struct pt_section *section)
233 {
234 	if (!section)
235 		return -pte_internal;
236 
237 #if defined(FEATURE_THREADS)
238 	{
239 		int errcode;
240 
241 		errcode = mtx_lock(&section->alock);
242 		if (errcode != thrd_success)
243 			return -pte_bad_lock;
244 	}
245 #endif /* defined(FEATURE_THREADS) */
246 
247 	return 0;
248 }
249 
250 static int pt_section_unlock_attach(struct pt_section *section)
251 {
252 	if (!section)
253 		return -pte_internal;
254 
255 #if defined(FEATURE_THREADS)
256 	{
257 		int errcode;
258 
259 		errcode = mtx_unlock(&section->alock);
260 		if (errcode != thrd_success)
261 			return -pte_bad_lock;
262 	}
263 #endif /* defined(FEATURE_THREADS) */
264 
265 	return 0;
266 }
267 
268 int pt_section_attach(struct pt_section *section,
269 		      struct pt_image_section_cache *iscache)
270 {
271 	uint16_t acount, ucount;
272 	int errcode;
273 
274 	if (!section || !iscache)
275 		return -pte_internal;
276 
277 	errcode = pt_section_lock_attach(section);
278 	if (errcode < 0)
279 		return errcode;
280 
281 	ucount = section->ucount;
282 	acount = section->acount;
283 	if (!acount) {
284 		if (section->iscache || !ucount)
285 			goto out_unlock;
286 
287 		section->iscache = iscache;
288 		section->acount = 1;
289 
290 		return pt_section_unlock_attach(section);
291 	}
292 
293 	acount += 1;
294 	if (!acount) {
295 		(void) pt_section_unlock_attach(section);
296 		return -pte_overflow;
297 	}
298 
299 	if (ucount < acount)
300 		goto out_unlock;
301 
302 	if (section->iscache != iscache)
303 		goto out_unlock;
304 
305 	section->acount = acount;
306 
307 	return pt_section_unlock_attach(section);
308 
309  out_unlock:
310 	(void) pt_section_unlock_attach(section);
311 	return -pte_internal;
312 }
313 
314 int pt_section_detach(struct pt_section *section,
315 		      struct pt_image_section_cache *iscache)
316 {
317 	uint16_t acount, ucount;
318 	int errcode;
319 
320 	if (!section || !iscache)
321 		return -pte_internal;
322 
323 	errcode = pt_section_lock_attach(section);
324 	if (errcode < 0)
325 		return errcode;
326 
327 	if (section->iscache != iscache)
328 		goto out_unlock;
329 
330 	acount = section->acount;
331 	if (!acount)
332 		goto out_unlock;
333 
334 	acount -= 1;
335 	ucount = section->ucount;
336 	if (ucount < acount)
337 		goto out_unlock;
338 
339 	section->acount = acount;
340 	if (!acount)
341 		section->iscache = NULL;
342 
343 	return pt_section_unlock_attach(section);
344 
345  out_unlock:
346 	(void) pt_section_unlock_attach(section);
347 	return -pte_internal;
348 }
349 
350 const char *pt_section_filename(const struct pt_section *section)
351 {
352 	if (!section)
353 		return NULL;
354 
355 	return section->filename;
356 }
357 
358 uint64_t pt_section_size(const struct pt_section *section)
359 {
360 	if (!section)
361 		return 0ull;
362 
363 	return section->size;
364 }
365 
366 static int pt_section_bcache_memsize(const struct pt_section *section,
367 				     uint64_t *psize)
368 {
369 	struct pt_block_cache *bcache;
370 
371 	if (!section || !psize)
372 		return -pte_internal;
373 
374 	bcache = section->bcache;
375 	if (!bcache) {
376 		*psize = 0ull;
377 		return 0;
378 	}
379 
380 	*psize = sizeof(*bcache) +
381 		(bcache->nentries * sizeof(struct pt_bcache_entry));
382 
383 	return 0;
384 }
385 
386 static int pt_section_memsize_locked(const struct pt_section *section,
387 				     uint64_t *psize)
388 {
389 	uint64_t msize, bcsize;
390 	int (*memsize)(const struct pt_section *section, uint64_t *size);
391 	int errcode;
392 
393 	if (!section || !psize)
394 		return -pte_internal;
395 
396 	memsize = section->memsize;
397 	if (!memsize) {
398 		if (section->mcount)
399 			return -pte_internal;
400 
401 		*psize = 0ull;
402 		return 0;
403 	}
404 
405 	errcode = memsize(section, &msize);
406 	if (errcode < 0)
407 		return errcode;
408 
409 	errcode = pt_section_bcache_memsize(section, &bcsize);
410 	if (errcode < 0)
411 		return errcode;
412 
413 	*psize = msize + bcsize;
414 
415 	return 0;
416 }
417 
418 int pt_section_memsize(struct pt_section *section, uint64_t *size)
419 {
420 	int errcode, status;
421 
422 	errcode = pt_section_lock(section);
423 	if (errcode < 0)
424 		return errcode;
425 
426 	status = pt_section_memsize_locked(section, size);
427 
428 	errcode = pt_section_unlock(section);
429 	if (errcode < 0)
430 		return errcode;
431 
432 	return status;
433 }
434 
435 uint64_t pt_section_offset(const struct pt_section *section)
436 {
437 	if (!section)
438 		return 0ull;
439 
440 	return section->offset;
441 }
442 
443 int pt_section_alloc_bcache(struct pt_section *section)
444 {
445 	struct pt_image_section_cache *iscache;
446 	struct pt_block_cache *bcache;
447 	uint64_t ssize, memsize;
448 	uint32_t csize;
449 	int errcode;
450 
451 	if (!section)
452 		return -pte_internal;
453 
454 	if (!section->mcount)
455 		return -pte_internal;
456 
457 	ssize = pt_section_size(section);
458 	csize = (uint32_t) ssize;
459 
460 	if (csize != ssize)
461 		return -pte_not_supported;
462 
463 	memsize = 0ull;
464 
465 	/* We need to take both the attach and the section lock in order to pair
466 	 * the block cache allocation and the resize notification.
467 	 *
468 	 * This allows map notifications in between but they only change the
469 	 * order of sections in the cache.
470 	 *
471 	 * The attach lock needs to be taken first.
472 	 */
473 	errcode = pt_section_lock_attach(section);
474 	if (errcode < 0)
475 		return errcode;
476 
477 	errcode = pt_section_lock(section);
478 	if (errcode < 0)
479 		goto out_alock;
480 
481 	bcache = pt_section_bcache(section);
482 	if (bcache) {
483 		errcode = 0;
484 		goto out_lock;
485 	}
486 
487 	bcache = pt_bcache_alloc(csize);
488 	if (!bcache) {
489 		errcode = -pte_nomem;
490 		goto out_lock;
491 	}
492 
493 	/* Install the block cache.  It will become visible and may be used
494 	 * immediately.
495 	 *
496 	 * If we fail later on, we leave the block cache and report the error to
497 	 * the allocating decoder thread.
498 	 */
499 	section->bcache = bcache;
500 
501 	errcode = pt_section_memsize_locked(section, &memsize);
502 	if (errcode < 0)
503 		goto out_lock;
504 
505 	errcode = pt_section_unlock(section);
506 	if (errcode < 0)
507 		goto out_alock;
508 
509 	if (memsize) {
510 		iscache = section->iscache;
511 		if (iscache) {
512 			errcode = pt_iscache_notify_resize(iscache, section,
513 							  memsize);
514 			if (errcode < 0)
515 				goto out_alock;
516 		}
517 	}
518 
519 	return pt_section_unlock_attach(section);
520 
521 
522 out_lock:
523 	(void) pt_section_unlock(section);
524 
525 out_alock:
526 	(void) pt_section_unlock_attach(section);
527 	return errcode;
528 }
529 
530 int pt_section_on_map_lock(struct pt_section *section)
531 {
532 	struct pt_image_section_cache *iscache;
533 	int errcode, status;
534 
535 	if (!section)
536 		return -pte_internal;
537 
538 	errcode = pt_section_lock_attach(section);
539 	if (errcode < 0)
540 		return errcode;
541 
542 	iscache = section->iscache;
543 	if (!iscache)
544 		return pt_section_unlock_attach(section);
545 
546 	/* There is a potential deadlock when @section was unmapped again and
547 	 * @iscache tries to map it.  This would cause this function to be
548 	 * re-entered while we're still holding the attach lock.
549 	 *
550 	 * This scenario is very unlikely, though, since our caller does not yet
551 	 * know whether pt_section_map() succeeded.
552 	 */
553 	status = pt_iscache_notify_map(iscache, section);
554 
555 	errcode = pt_section_unlock_attach(section);
556 	if (errcode < 0)
557 		return errcode;
558 
559 	return status;
560 }
561 
562 int pt_section_map_share(struct pt_section *section)
563 {
564 	uint16_t mcount;
565 	int errcode;
566 
567 	if (!section)
568 		return -pte_internal;
569 
570 	errcode = pt_section_lock(section);
571 	if (errcode < 0)
572 		return errcode;
573 
574 	mcount = section->mcount;
575 	if (!mcount) {
576 		(void) pt_section_unlock(section);
577 		return -pte_internal;
578 	}
579 
580 	mcount += 1;
581 	if (!mcount) {
582 		(void) pt_section_unlock(section);
583 		return -pte_overflow;
584 	}
585 
586 	section->mcount = mcount;
587 
588 	return pt_section_unlock(section);
589 }
590 
591 int pt_section_unmap(struct pt_section *section)
592 {
593 	uint16_t mcount;
594 	int errcode, status;
595 
596 	if (!section)
597 		return -pte_internal;
598 
599 	errcode = pt_section_lock(section);
600 	if (errcode < 0)
601 		return errcode;
602 
603 	mcount = section->mcount;
604 
605 	errcode = -pte_nomap;
606 	if (!mcount)
607 		goto out_unlock;
608 
609 	section->mcount = mcount -= 1;
610 	if (mcount)
611 		return pt_section_unlock(section);
612 
613 	errcode = -pte_internal;
614 	if (!section->unmap)
615 		goto out_unlock;
616 
617 	status = section->unmap(section);
618 
619 	pt_bcache_free(section->bcache);
620 	section->bcache = NULL;
621 
622 	errcode = pt_section_unlock(section);
623 	if (errcode < 0)
624 		return errcode;
625 
626 	return status;
627 
628 out_unlock:
629 	(void) pt_section_unlock(section);
630 	return errcode;
631 }
632 
633 int pt_section_read(const struct pt_section *section, uint8_t *buffer,
634 		    uint16_t size, uint64_t offset)
635 {
636 	uint64_t limit, space;
637 
638 	if (!section)
639 		return -pte_internal;
640 
641 	if (!section->read)
642 		return -pte_nomap;
643 
644 	limit = section->size;
645 	if (limit <= offset)
646 		return -pte_nomap;
647 
648 	/* Truncate if we try to read past the end of the section. */
649 	space = limit - offset;
650 	if (space < size)
651 		size = (uint16_t) space;
652 
653 	return section->read(section, buffer, size, offset);
654 }
655