xref: /freebsd/contrib/processor-trace/libipt/src/pt_block_cache.c (revision 85f87cf491bec6f90948a85b10f5523ea24db9e3)
1 /*
2  * Copyright (c) 2016-2019, Intel Corporation
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  *  * Redistributions of source code must retain the above copyright notice,
8  *    this list of conditions and the following disclaimer.
9  *  * Redistributions in binary form must reproduce the above copyright notice,
10  *    this list of conditions and the following disclaimer in the documentation
11  *    and/or other materials provided with the distribution.
12  *  * Neither the name of Intel Corporation nor the names of its contributors
13  *    may be used to endorse or promote products derived from this software
14  *    without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include "pt_block_cache.h"
30 
31 #include <stdlib.h>
32 #include <string.h>
33 
34 
pt_bcache_alloc(uint64_t nentries)35 struct pt_block_cache *pt_bcache_alloc(uint64_t nentries)
36 {
37 	struct pt_block_cache *bcache;
38 	uint64_t size;
39 
40 	if (!nentries || (UINT32_MAX < nentries))
41 		return NULL;
42 
43 	size = sizeof(*bcache) + (nentries * sizeof(struct pt_bcache_entry));
44 	if (SIZE_MAX < size)
45 		return NULL;
46 
47 	bcache = malloc((size_t) size);
48 	if (!bcache)
49 		return NULL;
50 
51 	memset(bcache, 0, (size_t) size);
52 	bcache->nentries = (uint32_t) nentries;
53 
54 	return bcache;
55 }
56 
pt_bcache_free(struct pt_block_cache * bcache)57 void pt_bcache_free(struct pt_block_cache *bcache)
58 {
59 	free(bcache);
60 }
61 
pt_bcache_add(struct pt_block_cache * bcache,uint64_t index,struct pt_bcache_entry bce)62 int pt_bcache_add(struct pt_block_cache *bcache, uint64_t index,
63 		  struct pt_bcache_entry bce)
64 {
65 	if (!bcache)
66 		return -pte_internal;
67 
68 	if (bcache->nentries <= index)
69 		return -pte_internal;
70 
71 	/* We rely on guaranteed atomic operations as specified in section 8.1.1
72 	 * in Volume 3A of the Intel(R) Software Developer's Manual at
73 	 * http://www.intel.com/sdm.
74 	 */
75 	bcache->entry[(uint32_t) index] = bce;
76 
77 	return 0;
78 }
79 
pt_bcache_lookup(struct pt_bcache_entry * bce,const struct pt_block_cache * bcache,uint64_t index)80 int pt_bcache_lookup(struct pt_bcache_entry *bce,
81 		     const struct pt_block_cache *bcache, uint64_t index)
82 {
83 	if (!bce || !bcache)
84 		return -pte_internal;
85 
86 	if (bcache->nentries <= index)
87 		return -pte_internal;
88 
89 	/* We rely on guaranteed atomic operations as specified in section 8.1.1
90 	 * in Volume 3A of the Intel(R) Software Developer's Manual at
91 	 * http://www.intel.com/sdm.
92 	 */
93 	*bce = bcache->entry[(uint32_t) index];
94 
95 	return 0;
96 }
97