xref: /freebsd/contrib/processor-trace/libipt/src/pt_section.c (revision 52f72944b8f5abb2386eae924357dee8aea17d5b)
1 /*
2  * Copyright (c) 2013-2018, Intel Corporation
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  *  * Redistributions of source code must retain the above copyright notice,
8  *    this list of conditions and the following disclaimer.
9  *  * Redistributions in binary form must reproduce the above copyright notice,
10  *    this list of conditions and the following disclaimer in the documentation
11  *    and/or other materials provided with the distribution.
12  *  * Neither the name of Intel Corporation nor the names of its contributors
13  *    may be used to endorse or promote products derived from this software
14  *    without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include "pt_section.h"
30 #include "pt_block_cache.h"
31 #include "pt_image_section_cache.h"
32 
33 #include "intel-pt.h"
34 
35 #include <stdlib.h>
36 #include <stdio.h>
37 #include <string.h>
38 
39 
40 static char *dupstr(const char *str)
41 {
42 	char *dup;
43 	size_t len;
44 
45 	if (!str)
46 		return NULL;
47 
48 	len = strlen(str);
49 	dup = malloc(len + 1);
50 	if (!dup)
51 		return NULL;
52 
53 	return strcpy(dup, str);
54 }
55 
56 struct pt_section *pt_mk_section(const char *filename, uint64_t offset,
57 				 uint64_t size)
58 {
59 	struct pt_section *section;
60 	uint64_t fsize;
61 	void *status;
62 	int errcode;
63 
64 	errcode = pt_section_mk_status(&status, &fsize, filename);
65 	if (errcode < 0)
66 		return NULL;
67 
68 	/* Fail if the requested @offset lies beyond the end of @file. */
69 	if (fsize <= offset)
70 		goto out_status;
71 
72 	/* Truncate @size so the entire range lies within @file. */
73 	fsize -= offset;
74 	if (fsize < size)
75 		size = fsize;
76 
77 	section = malloc(sizeof(*section));
78 	if (!section)
79 		goto out_status;
80 
81 	memset(section, 0, sizeof(*section));
82 
83 	section->filename = dupstr(filename);
84 	section->status = status;
85 	section->offset = offset;
86 	section->size = size;
87 	section->ucount = 1;
88 
89 #if defined(FEATURE_THREADS)
90 
91 	errcode = mtx_init(&section->lock, mtx_plain);
92 	if (errcode != thrd_success) {
93 		free(section->filename);
94 		free(section);
95 		goto out_status;
96 	}
97 
98 	errcode = mtx_init(&section->alock, mtx_plain);
99 	if (errcode != thrd_success) {
100 		mtx_destroy(&section->lock);
101 		free(section->filename);
102 		free(section);
103 		goto out_status;
104 	}
105 
106 #endif /* defined(FEATURE_THREADS) */
107 
108 	return section;
109 
110 out_status:
111 	free(status);
112 	return NULL;
113 }
114 
115 int pt_section_lock(struct pt_section *section)
116 {
117 	if (!section)
118 		return -pte_internal;
119 
120 #if defined(FEATURE_THREADS)
121 	{
122 		int errcode;
123 
124 		errcode = mtx_lock(&section->lock);
125 		if (errcode != thrd_success)
126 			return -pte_bad_lock;
127 	}
128 #endif /* defined(FEATURE_THREADS) */
129 
130 	return 0;
131 }
132 
133 int pt_section_unlock(struct pt_section *section)
134 {
135 	if (!section)
136 		return -pte_internal;
137 
138 #if defined(FEATURE_THREADS)
139 	{
140 		int errcode;
141 
142 		errcode = mtx_unlock(&section->lock);
143 		if (errcode != thrd_success)
144 			return -pte_bad_lock;
145 	}
146 #endif /* defined(FEATURE_THREADS) */
147 
148 	return 0;
149 }
150 
151 static void pt_section_free(struct pt_section *section)
152 {
153 	if (!section)
154 		return;
155 
156 #if defined(FEATURE_THREADS)
157 
158 	mtx_destroy(&section->alock);
159 	mtx_destroy(&section->lock);
160 
161 #endif /* defined(FEATURE_THREADS) */
162 
163 	free(section->filename);
164 	free(section->status);
165 	free(section);
166 }
167 
168 int pt_section_get(struct pt_section *section)
169 {
170 	uint16_t ucount;
171 	int errcode;
172 
173 	if (!section)
174 		return -pte_internal;
175 
176 	errcode = pt_section_lock(section);
177 	if (errcode < 0)
178 		return errcode;
179 
180 	ucount = section->ucount + 1;
181 	if (!ucount) {
182 		(void) pt_section_unlock(section);
183 		return -pte_overflow;
184 	}
185 
186 	section->ucount = ucount;
187 
188 	return pt_section_unlock(section);
189 }
190 
191 int pt_section_put(struct pt_section *section)
192 {
193 	uint16_t ucount, mcount;
194 	int errcode;
195 
196 	if (!section)
197 		return -pte_internal;
198 
199 	errcode = pt_section_lock(section);
200 	if (errcode < 0)
201 		return errcode;
202 
203 	mcount = section->mcount;
204 	ucount = section->ucount;
205 	if (ucount > 1) {
206 		section->ucount = ucount - 1;
207 		return pt_section_unlock(section);
208 	}
209 
210 	errcode = pt_section_unlock(section);
211 	if (errcode < 0)
212 		return errcode;
213 
214 	if (!ucount || mcount)
215 		return -pte_internal;
216 
217 	pt_section_free(section);
218 	return 0;
219 }
220 
221 static int pt_section_lock_attach(struct pt_section *section)
222 {
223 	if (!section)
224 		return -pte_internal;
225 
226 #if defined(FEATURE_THREADS)
227 	{
228 		int errcode;
229 
230 		errcode = mtx_lock(&section->alock);
231 		if (errcode != thrd_success)
232 			return -pte_bad_lock;
233 	}
234 #endif /* defined(FEATURE_THREADS) */
235 
236 	return 0;
237 }
238 
239 static int pt_section_unlock_attach(struct pt_section *section)
240 {
241 	if (!section)
242 		return -pte_internal;
243 
244 #if defined(FEATURE_THREADS)
245 	{
246 		int errcode;
247 
248 		errcode = mtx_unlock(&section->alock);
249 		if (errcode != thrd_success)
250 			return -pte_bad_lock;
251 	}
252 #endif /* defined(FEATURE_THREADS) */
253 
254 	return 0;
255 }
256 
257 int pt_section_attach(struct pt_section *section,
258 		      struct pt_image_section_cache *iscache)
259 {
260 	uint16_t acount, ucount;
261 	int errcode;
262 
263 	if (!section || !iscache)
264 		return -pte_internal;
265 
266 	errcode = pt_section_lock_attach(section);
267 	if (errcode < 0)
268 		return errcode;
269 
270 	ucount = section->ucount;
271 	acount = section->acount;
272 	if (!acount) {
273 		if (section->iscache || !ucount)
274 			goto out_unlock;
275 
276 		section->iscache = iscache;
277 		section->acount = 1;
278 
279 		return pt_section_unlock_attach(section);
280 	}
281 
282 	acount += 1;
283 	if (!acount) {
284 		(void) pt_section_unlock_attach(section);
285 		return -pte_overflow;
286 	}
287 
288 	if (ucount < acount)
289 		goto out_unlock;
290 
291 	if (section->iscache != iscache)
292 		goto out_unlock;
293 
294 	section->acount = acount;
295 
296 	return pt_section_unlock_attach(section);
297 
298  out_unlock:
299 	(void) pt_section_unlock_attach(section);
300 	return -pte_internal;
301 }
302 
303 int pt_section_detach(struct pt_section *section,
304 		      struct pt_image_section_cache *iscache)
305 {
306 	uint16_t acount, ucount;
307 	int errcode;
308 
309 	if (!section || !iscache)
310 		return -pte_internal;
311 
312 	errcode = pt_section_lock_attach(section);
313 	if (errcode < 0)
314 		return errcode;
315 
316 	if (section->iscache != iscache)
317 		goto out_unlock;
318 
319 	acount = section->acount;
320 	if (!acount)
321 		goto out_unlock;
322 
323 	acount -= 1;
324 	ucount = section->ucount;
325 	if (ucount < acount)
326 		goto out_unlock;
327 
328 	section->acount = acount;
329 	if (!acount)
330 		section->iscache = NULL;
331 
332 	return pt_section_unlock_attach(section);
333 
334  out_unlock:
335 	(void) pt_section_unlock_attach(section);
336 	return -pte_internal;
337 }
338 
339 const char *pt_section_filename(const struct pt_section *section)
340 {
341 	if (!section)
342 		return NULL;
343 
344 	return section->filename;
345 }
346 
347 uint64_t pt_section_size(const struct pt_section *section)
348 {
349 	if (!section)
350 		return 0ull;
351 
352 	return section->size;
353 }
354 
355 static int pt_section_bcache_memsize(const struct pt_section *section,
356 				     uint64_t *psize)
357 {
358 	struct pt_block_cache *bcache;
359 
360 	if (!section || !psize)
361 		return -pte_internal;
362 
363 	bcache = section->bcache;
364 	if (!bcache) {
365 		*psize = 0ull;
366 		return 0;
367 	}
368 
369 	*psize = sizeof(*bcache) +
370 		(bcache->nentries * sizeof(struct pt_bcache_entry));
371 
372 	return 0;
373 }
374 
375 static int pt_section_memsize_locked(const struct pt_section *section,
376 				     uint64_t *psize)
377 {
378 	uint64_t msize, bcsize;
379 	int (*memsize)(const struct pt_section *section, uint64_t *size);
380 	int errcode;
381 
382 	if (!section || !psize)
383 		return -pte_internal;
384 
385 	memsize = section->memsize;
386 	if (!memsize) {
387 		if (section->mcount)
388 			return -pte_internal;
389 
390 		*psize = 0ull;
391 		return 0;
392 	}
393 
394 	errcode = memsize(section, &msize);
395 	if (errcode < 0)
396 		return errcode;
397 
398 	errcode = pt_section_bcache_memsize(section, &bcsize);
399 	if (errcode < 0)
400 		return errcode;
401 
402 	*psize = msize + bcsize;
403 
404 	return 0;
405 }
406 
407 int pt_section_memsize(struct pt_section *section, uint64_t *size)
408 {
409 	int errcode, status;
410 
411 	errcode = pt_section_lock(section);
412 	if (errcode < 0)
413 		return errcode;
414 
415 	status = pt_section_memsize_locked(section, size);
416 
417 	errcode = pt_section_unlock(section);
418 	if (errcode < 0)
419 		return errcode;
420 
421 	return status;
422 }
423 
424 uint64_t pt_section_offset(const struct pt_section *section)
425 {
426 	if (!section)
427 		return 0ull;
428 
429 	return section->offset;
430 }
431 
432 int pt_section_alloc_bcache(struct pt_section *section)
433 {
434 	struct pt_image_section_cache *iscache;
435 	struct pt_block_cache *bcache;
436 	uint64_t ssize, memsize;
437 	uint32_t csize;
438 	int errcode;
439 
440 	if (!section)
441 		return -pte_internal;
442 
443 	if (!section->mcount)
444 		return -pte_internal;
445 
446 	ssize = pt_section_size(section);
447 	csize = (uint32_t) ssize;
448 
449 	if (csize != ssize)
450 		return -pte_not_supported;
451 
452 	memsize = 0ull;
453 
454 	/* We need to take both the attach and the section lock in order to pair
455 	 * the block cache allocation and the resize notification.
456 	 *
457 	 * This allows map notifications in between but they only change the
458 	 * order of sections in the cache.
459 	 *
460 	 * The attach lock needs to be taken first.
461 	 */
462 	errcode = pt_section_lock_attach(section);
463 	if (errcode < 0)
464 		return errcode;
465 
466 	errcode = pt_section_lock(section);
467 	if (errcode < 0)
468 		goto out_alock;
469 
470 	bcache = pt_section_bcache(section);
471 	if (bcache) {
472 		errcode = 0;
473 		goto out_lock;
474 	}
475 
476 	bcache = pt_bcache_alloc(csize);
477 	if (!bcache) {
478 		errcode = -pte_nomem;
479 		goto out_lock;
480 	}
481 
482 	/* Install the block cache.  It will become visible and may be used
483 	 * immediately.
484 	 *
485 	 * If we fail later on, we leave the block cache and report the error to
486 	 * the allocating decoder thread.
487 	 */
488 	section->bcache = bcache;
489 
490 	errcode = pt_section_memsize_locked(section, &memsize);
491 	if (errcode < 0)
492 		goto out_lock;
493 
494 	errcode = pt_section_unlock(section);
495 	if (errcode < 0)
496 		goto out_alock;
497 
498 	if (memsize) {
499 		iscache = section->iscache;
500 		if (iscache) {
501 			errcode = pt_iscache_notify_resize(iscache, section,
502 							  memsize);
503 			if (errcode < 0)
504 				goto out_alock;
505 		}
506 	}
507 
508 	return pt_section_unlock_attach(section);
509 
510 
511 out_lock:
512 	(void) pt_section_unlock(section);
513 
514 out_alock:
515 	(void) pt_section_unlock_attach(section);
516 	return errcode;
517 }
518 
519 int pt_section_on_map_lock(struct pt_section *section)
520 {
521 	struct pt_image_section_cache *iscache;
522 	int errcode, status;
523 
524 	if (!section)
525 		return -pte_internal;
526 
527 	errcode = pt_section_lock_attach(section);
528 	if (errcode < 0)
529 		return errcode;
530 
531 	iscache = section->iscache;
532 	if (!iscache)
533 		return pt_section_unlock_attach(section);
534 
535 	/* There is a potential deadlock when @section was unmapped again and
536 	 * @iscache tries to map it.  This would cause this function to be
537 	 * re-entered while we're still holding the attach lock.
538 	 *
539 	 * This scenario is very unlikely, though, since our caller does not yet
540 	 * know whether pt_section_map() succeeded.
541 	 */
542 	status = pt_iscache_notify_map(iscache, section);
543 
544 	errcode = pt_section_unlock_attach(section);
545 	if (errcode < 0)
546 		return errcode;
547 
548 	return status;
549 }
550 
551 int pt_section_map_share(struct pt_section *section)
552 {
553 	uint16_t mcount;
554 	int errcode;
555 
556 	if (!section)
557 		return -pte_internal;
558 
559 	errcode = pt_section_lock(section);
560 	if (errcode < 0)
561 		return errcode;
562 
563 	mcount = section->mcount;
564 	if (!mcount) {
565 		(void) pt_section_unlock(section);
566 		return -pte_internal;
567 	}
568 
569 	mcount += 1;
570 	if (!mcount) {
571 		(void) pt_section_unlock(section);
572 		return -pte_overflow;
573 	}
574 
575 	section->mcount = mcount;
576 
577 	return pt_section_unlock(section);
578 }
579 
580 int pt_section_unmap(struct pt_section *section)
581 {
582 	uint16_t mcount;
583 	int errcode, status;
584 
585 	if (!section)
586 		return -pte_internal;
587 
588 	errcode = pt_section_lock(section);
589 	if (errcode < 0)
590 		return errcode;
591 
592 	mcount = section->mcount;
593 
594 	errcode = -pte_nomap;
595 	if (!mcount)
596 		goto out_unlock;
597 
598 	section->mcount = mcount -= 1;
599 	if (mcount)
600 		return pt_section_unlock(section);
601 
602 	errcode = -pte_internal;
603 	if (!section->unmap)
604 		goto out_unlock;
605 
606 	status = section->unmap(section);
607 
608 	pt_bcache_free(section->bcache);
609 	section->bcache = NULL;
610 
611 	errcode = pt_section_unlock(section);
612 	if (errcode < 0)
613 		return errcode;
614 
615 	return status;
616 
617 out_unlock:
618 	(void) pt_section_unlock(section);
619 	return errcode;
620 }
621 
622 int pt_section_read(const struct pt_section *section, uint8_t *buffer,
623 		    uint16_t size, uint64_t offset)
624 {
625 	uint64_t limit, space;
626 
627 	if (!section)
628 		return -pte_internal;
629 
630 	if (!section->read)
631 		return -pte_nomap;
632 
633 	limit = section->size;
634 	if (limit <= offset)
635 		return -pte_nomap;
636 
637 	/* Truncate if we try to read past the end of the section. */
638 	space = limit - offset;
639 	if (space < size)
640 		size = (uint16_t) space;
641 
642 	return section->read(section, buffer, size, offset);
643 }
644