xref: /linux/tools/testing/selftests/bpf/prog_tests/mmap.c (revision d9c00c3b1639a3c8f46663cc042a3768d222021f)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <test_progs.h>
3 #include <sys/mman.h>
4 #include "test_mmap.skel.h"
5 
6 struct map_data {
7 	__u64 val[512 * 4];
8 };
9 
10 static size_t roundup_page(size_t sz)
11 {
12 	long page_size = sysconf(_SC_PAGE_SIZE);
13 	return (sz + page_size - 1) / page_size * page_size;
14 }
15 
16 BPF_EMBED_OBJ(test_mmap, "test_mmap.o");
17 
18 void test_mmap(void)
19 {
20 	const size_t bss_sz = roundup_page(sizeof(struct test_mmap__bss));
21 	const size_t map_sz = roundup_page(sizeof(struct map_data));
22 	const int zero = 0, one = 1, two = 2, far = 1500;
23 	const long page_size = sysconf(_SC_PAGE_SIZE);
24 	int err, duration = 0, i, data_map_fd;
25 	struct bpf_map *data_map, *bss_map;
26 	void *bss_mmaped = NULL, *map_mmaped = NULL, *tmp1, *tmp2;
27 	struct test_mmap__bss *bss_data;
28 	struct map_data *map_data;
29 	struct test_mmap *skel;
30 	__u64 val = 0;
31 
32 
33 	skel = test_mmap__open_and_load(&test_mmap_embed);
34 	if (CHECK(!skel, "skel_open_and_load", "skeleton open/load failed\n"))
35 		return;
36 
37 	bss_map = skel->maps.bss;
38 	data_map = skel->maps.data_map;
39 	data_map_fd = bpf_map__fd(data_map);
40 
41 	bss_mmaped = mmap(NULL, bss_sz, PROT_READ | PROT_WRITE, MAP_SHARED,
42 			  bpf_map__fd(bss_map), 0);
43 	if (CHECK(bss_mmaped == MAP_FAILED, "bss_mmap",
44 		  ".bss mmap failed: %d\n", errno)) {
45 		bss_mmaped = NULL;
46 		goto cleanup;
47 	}
48 	/* map as R/W first */
49 	map_mmaped = mmap(NULL, map_sz, PROT_READ | PROT_WRITE, MAP_SHARED,
50 			  data_map_fd, 0);
51 	if (CHECK(map_mmaped == MAP_FAILED, "data_mmap",
52 		  "data_map mmap failed: %d\n", errno)) {
53 		map_mmaped = NULL;
54 		goto cleanup;
55 	}
56 
57 	bss_data = bss_mmaped;
58 	map_data = map_mmaped;
59 
60 	CHECK_FAIL(bss_data->in_val);
61 	CHECK_FAIL(bss_data->out_val);
62 	CHECK_FAIL(skel->bss->in_val);
63 	CHECK_FAIL(skel->bss->out_val);
64 	CHECK_FAIL(map_data->val[0]);
65 	CHECK_FAIL(map_data->val[1]);
66 	CHECK_FAIL(map_data->val[2]);
67 	CHECK_FAIL(map_data->val[far]);
68 
69 	err = test_mmap__attach(skel);
70 	if (CHECK(err, "attach_raw_tp", "err %d\n", err))
71 		goto cleanup;
72 
73 	bss_data->in_val = 123;
74 	val = 111;
75 	CHECK_FAIL(bpf_map_update_elem(data_map_fd, &zero, &val, 0));
76 
77 	usleep(1);
78 
79 	CHECK_FAIL(bss_data->in_val != 123);
80 	CHECK_FAIL(bss_data->out_val != 123);
81 	CHECK_FAIL(skel->bss->in_val != 123);
82 	CHECK_FAIL(skel->bss->out_val != 123);
83 	CHECK_FAIL(map_data->val[0] != 111);
84 	CHECK_FAIL(map_data->val[1] != 222);
85 	CHECK_FAIL(map_data->val[2] != 123);
86 	CHECK_FAIL(map_data->val[far] != 3 * 123);
87 
88 	CHECK_FAIL(bpf_map_lookup_elem(data_map_fd, &zero, &val));
89 	CHECK_FAIL(val != 111);
90 	CHECK_FAIL(bpf_map_lookup_elem(data_map_fd, &one, &val));
91 	CHECK_FAIL(val != 222);
92 	CHECK_FAIL(bpf_map_lookup_elem(data_map_fd, &two, &val));
93 	CHECK_FAIL(val != 123);
94 	CHECK_FAIL(bpf_map_lookup_elem(data_map_fd, &far, &val));
95 	CHECK_FAIL(val != 3 * 123);
96 
97 	/* data_map freeze should fail due to R/W mmap() */
98 	err = bpf_map_freeze(data_map_fd);
99 	if (CHECK(!err || errno != EBUSY, "no_freeze",
100 		  "data_map freeze succeeded: err=%d, errno=%d\n", err, errno))
101 		goto cleanup;
102 
103 	/* unmap R/W mapping */
104 	err = munmap(map_mmaped, map_sz);
105 	map_mmaped = NULL;
106 	if (CHECK(err, "data_map_munmap", "data_map munmap failed: %d\n", errno))
107 		goto cleanup;
108 
109 	/* re-map as R/O now */
110 	map_mmaped = mmap(NULL, map_sz, PROT_READ, MAP_SHARED, data_map_fd, 0);
111 	if (CHECK(map_mmaped == MAP_FAILED, "data_mmap",
112 		  "data_map R/O mmap failed: %d\n", errno)) {
113 		map_mmaped = NULL;
114 		goto cleanup;
115 	}
116 	map_data = map_mmaped;
117 
118 	/* map/unmap in a loop to test ref counting */
119 	for (i = 0; i < 10; i++) {
120 		int flags = i % 2 ? PROT_READ : PROT_WRITE;
121 		void *p;
122 
123 		p = mmap(NULL, map_sz, flags, MAP_SHARED, data_map_fd, 0);
124 		if (CHECK_FAIL(p == MAP_FAILED))
125 			goto cleanup;
126 		err = munmap(p, map_sz);
127 		if (CHECK_FAIL(err))
128 			goto cleanup;
129 	}
130 
131 	/* data_map freeze should now succeed due to no R/W mapping */
132 	err = bpf_map_freeze(data_map_fd);
133 	if (CHECK(err, "freeze", "data_map freeze failed: err=%d, errno=%d\n",
134 		  err, errno))
135 		goto cleanup;
136 
137 	/* mapping as R/W now should fail */
138 	tmp1 = mmap(NULL, map_sz, PROT_READ | PROT_WRITE, MAP_SHARED,
139 		    data_map_fd, 0);
140 	if (CHECK(tmp1 != MAP_FAILED, "data_mmap", "mmap succeeded\n")) {
141 		munmap(tmp1, map_sz);
142 		goto cleanup;
143 	}
144 
145 	bss_data->in_val = 321;
146 	usleep(1);
147 	CHECK_FAIL(bss_data->in_val != 321);
148 	CHECK_FAIL(bss_data->out_val != 321);
149 	CHECK_FAIL(skel->bss->in_val != 321);
150 	CHECK_FAIL(skel->bss->out_val != 321);
151 	CHECK_FAIL(map_data->val[0] != 111);
152 	CHECK_FAIL(map_data->val[1] != 222);
153 	CHECK_FAIL(map_data->val[2] != 321);
154 	CHECK_FAIL(map_data->val[far] != 3 * 321);
155 
156 	/* check some more advanced mmap() manipulations */
157 
158 	/* map all but last page: pages 1-3 mapped */
159 	tmp1 = mmap(NULL, 3 * page_size, PROT_READ, MAP_SHARED,
160 			  data_map_fd, 0);
161 	if (CHECK(tmp1 == MAP_FAILED, "adv_mmap1", "errno %d\n", errno))
162 		goto cleanup;
163 
164 	/* unmap second page: pages 1, 3 mapped */
165 	err = munmap(tmp1 + page_size, page_size);
166 	if (CHECK(err, "adv_mmap2", "errno %d\n", errno)) {
167 		munmap(tmp1, map_sz);
168 		goto cleanup;
169 	}
170 
171 	/* map page 2 back */
172 	tmp2 = mmap(tmp1 + page_size, page_size, PROT_READ,
173 		    MAP_SHARED | MAP_FIXED, data_map_fd, 0);
174 	if (CHECK(tmp2 == MAP_FAILED, "adv_mmap3", "errno %d\n", errno)) {
175 		munmap(tmp1, page_size);
176 		munmap(tmp1 + 2*page_size, page_size);
177 		goto cleanup;
178 	}
179 	CHECK(tmp1 + page_size != tmp2, "adv_mmap4",
180 	      "tmp1: %p, tmp2: %p\n", tmp1, tmp2);
181 
182 	/* re-map all 4 pages */
183 	tmp2 = mmap(tmp1, 4 * page_size, PROT_READ, MAP_SHARED | MAP_FIXED,
184 		    data_map_fd, 0);
185 	if (CHECK(tmp2 == MAP_FAILED, "adv_mmap5", "errno %d\n", errno)) {
186 		munmap(tmp1, 3 * page_size); /* unmap page 1 */
187 		goto cleanup;
188 	}
189 	CHECK(tmp1 != tmp2, "adv_mmap6", "tmp1: %p, tmp2: %p\n", tmp1, tmp2);
190 
191 	map_data = tmp2;
192 	CHECK_FAIL(bss_data->in_val != 321);
193 	CHECK_FAIL(bss_data->out_val != 321);
194 	CHECK_FAIL(skel->bss->in_val != 321);
195 	CHECK_FAIL(skel->bss->out_val != 321);
196 	CHECK_FAIL(map_data->val[0] != 111);
197 	CHECK_FAIL(map_data->val[1] != 222);
198 	CHECK_FAIL(map_data->val[2] != 321);
199 	CHECK_FAIL(map_data->val[far] != 3 * 321);
200 
201 	munmap(tmp2, 4 * page_size);
202 cleanup:
203 	if (bss_mmaped)
204 		CHECK_FAIL(munmap(bss_mmaped, bss_sz));
205 	if (map_mmaped)
206 		CHECK_FAIL(munmap(map_mmaped, map_sz));
207 	test_mmap__destroy(skel);
208 }
209