1 /* $NetBSD: t_uvm_physseg_load.c,v 1.2 2016/12/22 08:15:20 cherry Exp $ */
2
3 /*-
4 * Copyright (c) 2015, 2016 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Santhosh N. Raju <santhosh.raju@gmail.com> and
9 * by Cherry G. Mathew
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <sys/cdefs.h>
34 __RCSID("$NetBSD: t_uvm_physseg_load.c,v 1.2 2016/12/22 08:15:20 cherry Exp $");
35
36 /*
37 * If this line is commented out tests related touvm_physseg_get_pmseg()
38 * wont run.
39 *
40 * Have a look at machine/uvm_physseg.h for more details.
41 */
42 #define __HAVE_PMAP_PHYSSEG
43
44 /*
45 * This is a dummy struct used for testing purposes
46 *
47 * In reality this struct would exist in the MD part of the code residing in
48 * machines/vmparam.h
49 */
50
51 #ifdef __HAVE_PMAP_PHYSSEG
52 struct pmap_physseg {
53 int dummy_variable; /* Dummy variable use for testing */
54 };
55 #endif
56
57 /* Testing API - assumes userland */
58 /* Provide Kernel API equivalents */
59 #include <assert.h>
60 #include <stdbool.h>
61 #include <string.h> /* memset(3) et. al */
62 #include <stdio.h> /* printf(3) */
63 #include <stdlib.h> /* malloc(3) */
64 #include <stdarg.h>
65 #include <stddef.h>
66 #include <time.h>
67
68 #define PRIxPADDR "lx"
69 #define PRIxPSIZE "lx"
70 #define PRIuPSIZE "lu"
71 #define PRIxVADDR "lx"
72 #define PRIxVSIZE "lx"
73 #define PRIuVSIZE "lu"
74
75 #define UVM_HOTPLUG /* Enable hotplug with rbtree. */
76 #define PMAP_STEAL_MEMORY
77 #define DEBUG /* Enable debug functionality. */
78
79 typedef unsigned long vaddr_t;
80 typedef unsigned long paddr_t;
81 typedef unsigned long psize_t;
82 typedef unsigned long vsize_t;
83
84 #include <uvm/uvm_physseg.h>
85 #include <uvm/uvm_page.h>
86
87 #ifndef DIAGNOSTIC
88 #define KASSERTMSG(e, msg, ...) /* NOTHING */
89 #define KASSERT(e) /* NOTHING */
90 #else
91 #define KASSERT(a) assert(a)
92 #define KASSERTMSG(exp, ...) printf(__VA_ARGS__); assert((exp))
93 #endif
94
95 #define VM_PHYSSEG_STRAT VM_PSTRAT_BSEARCH
96
97 #define VM_NFREELIST 4
98 #define VM_FREELIST_DEFAULT 0
99 #define VM_FREELIST_FIRST16 3
100 #define VM_FREELIST_FIRST1G 2
101 #define VM_FREELIST_FIRST4G 1
102
103 /*
104 * Used in tests when Array implementation is tested
105 */
106 #if !defined(VM_PHYSSEG_MAX)
107 #define VM_PHYSSEG_MAX 32
108 #endif
109
110 #define PAGE_SIZE 4096
111 #define PAGE_SHIFT 12
112 #define atop(x) (((paddr_t)(x)) >> PAGE_SHIFT)
113
114 #define mutex_enter(l)
115 #define mutex_exit(l)
116
117 #define _SYS_KMEM_H_ /* Disallow the real kmem API (see below) */
118 /* free(p) XXX: pgs management need more thought */
119 #define kmem_alloc(size, flags) malloc(size)
120 #define kmem_zalloc(size, flags) malloc(size)
121 #define kmem_free(p, size) free(p)
122
123 psize_t physmem;
124
125 struct uvmexp uvmexp; /* decl */
126
127 /*
128 * uvm structure borrowed from uvm.h
129 *
130 * Remember this is a dummy structure used within the ATF Tests and
131 * uses only necessary fields from the original uvm struct.
132 * See uvm/uvm.h for the full struct.
133 */
134
135 struct uvm {
136 /* vm_page related parameters */
137
138 bool page_init_done; /* TRUE if uvm_page_init() finished */
139 } uvm;
140
141 static void
panic(const char * fmt,...)142 panic(const char *fmt, ...)
143 {
144 va_list ap;
145
146 va_start(ap, fmt);
147 vprintf(fmt, ap);
148 printf("\n");
149 va_end(ap);
150 KASSERT(false);
151
152 /*NOTREACHED*/
153 }
154
155 static void
uvm_pagefree(struct vm_page * pg)156 uvm_pagefree(struct vm_page *pg)
157 {
158 return;
159 }
160
161 #if defined(UVM_HOTPLUG)
162 static void
uvmpdpol_reinit(void)163 uvmpdpol_reinit(void)
164 {
165 return;
166 }
167 #endif /* UVM_HOTPLUG */
168
169 /* end - Provide Kernel API equivalents */
170
171 #include "uvm/uvm_physseg.c"
172
173 #include <atf-c.h>
174
175 #define ONE_MEGABYTE 1024 * 1024
176
177 /* Sample Page Frame Numbers */
178 #define VALID_START_PFN_1 atop(0)
179 #define VALID_END_PFN_1 atop(ONE_MEGABYTE)
180 #define VALID_AVAIL_START_PFN_1 atop(0)
181 #define VALID_AVAIL_END_PFN_1 atop(ONE_MEGABYTE)
182
183 #define VALID_START_PFN_2 atop(ONE_MEGABYTE + 1)
184 #define VALID_END_PFN_2 atop(ONE_MEGABYTE * 2)
185 #define VALID_AVAIL_START_PFN_2 atop(ONE_MEGABYTE + 1)
186 #define VALID_AVAIL_END_PFN_2 atop(ONE_MEGABYTE * 2)
187
188 #define VALID_START_PFN_3 atop((ONE_MEGABYTE * 2) + 1)
189 #define VALID_END_PFN_3 atop(ONE_MEGABYTE * 3)
190 #define VALID_AVAIL_START_PFN_3 atop((ONE_MEGABYTE * 2) + 1)
191 #define VALID_AVAIL_END_PFN_3 atop(ONE_MEGABYTE * 3)
192
193 #define VALID_START_PFN_4 atop(ONE_MEGABYTE + 1)
194 #define VALID_END_PFN_4 atop(ONE_MEGABYTE * 128)
195 #define VALID_AVAIL_START_PFN_4 atop(ONE_MEGABYTE + 1)
196 #define VALID_AVAIL_END_PFN_4 atop(ONE_MEGABYTE * 128)
197
198 #define VALID_START_PFN_5 atop(ONE_MEGABYTE + 1)
199 #define VALID_END_PFN_5 atop(ONE_MEGABYTE * 256)
200 #define VALID_AVAIL_START_PFN_5 atop(ONE_MEGABYTE + 1)
201 #define VALID_AVAIL_END_PFN_5 atop(ONE_MEGABYTE * 256)
202
203 /*
204 * Total number of pages (of 4K size each) should be 256 for 1MB of memory.
205 */
206 #define PAGE_COUNT_1M 256
207
208 /*
209 * The number of Page Frames to allot per segment
210 */
211 #define PF_STEP 8
212
213 /*
214 * A debug fucntion to print the content of upm.
215 */
216 static inline void
uvm_physseg_dump_seg(uvm_physseg_t upm)217 uvm_physseg_dump_seg(uvm_physseg_t upm)
218 {
219 #if defined(DEBUG)
220 printf("%s: seg->start == %ld\n", __func__,
221 uvm_physseg_get_start(upm));
222 printf("%s: seg->end == %ld\n", __func__,
223 uvm_physseg_get_end(upm));
224 printf("%s: seg->avail_start == %ld\n", __func__,
225 uvm_physseg_get_avail_start(upm));
226 printf("%s: seg->avail_end == %ld\n", __func__,
227 uvm_physseg_get_avail_end(upm));
228
229 printf("====\n\n");
230 #else
231 return;
232 #endif /* DEBUG */
233 }
234
235 /*
236 * Private accessor that gets the value of vm_physmem.nentries
237 */
238 static int
uvm_physseg_get_entries(void)239 uvm_physseg_get_entries(void)
240 {
241 #if defined(UVM_HOTPLUG)
242 return uvm_physseg_graph.nentries;
243 #else
244 return vm_nphysmem;
245 #endif /* UVM_HOTPLUG */
246 }
247
248 /*
249 * Note: This function replicates verbatim what happens in
250 * uvm_page.c:uvm_page_init().
251 *
252 * Please track any changes that happen there.
253 */
254 static void
uvm_page_init_fake(struct vm_page * pagearray,psize_t pagecount)255 uvm_page_init_fake(struct vm_page *pagearray, psize_t pagecount)
256 {
257 uvm_physseg_t bank;
258 size_t n;
259
260 for (bank = uvm_physseg_get_first(),
261 uvm_physseg_seg_chomp_slab(bank, pagearray, pagecount);
262 uvm_physseg_valid_p(bank);
263 bank = uvm_physseg_get_next(bank)) {
264
265 n = uvm_physseg_get_end(bank) - uvm_physseg_get_start(bank);
266 uvm_physseg_seg_alloc_from_slab(bank, n);
267 uvm_physseg_init_seg(bank, pagearray);
268
269 /* set up page array pointers */
270 pagearray += n;
271 pagecount -= n;
272 }
273
274 uvm.page_init_done = true;
275 }
276
277 /*
278 * PHYS_TO_VM_PAGE: find vm_page for a PA. used by MI code to get vm_pages
279 * back from an I/O mapping (ugh!). used in some MD code as well.
280 */
281 static struct vm_page *
uvm_phys_to_vm_page(paddr_t pa)282 uvm_phys_to_vm_page(paddr_t pa)
283 {
284 paddr_t pf = atop(pa);
285 paddr_t off;
286 uvm_physseg_t psi;
287
288 psi = uvm_physseg_find(pf, &off);
289 if (psi != UVM_PHYSSEG_TYPE_INVALID)
290 return uvm_physseg_get_pg(psi, off);
291 return(NULL);
292 }
293
294 //static paddr_t
295 //uvm_vm_page_to_phys(const struct vm_page *pg)
296 //{
297 //
298 // return pg->phys_addr;
299 //}
300
301 /*
302 * XXX: To do, write control test cases for uvm_vm_page_to_phys().
303 */
304
305 /* #define VM_PAGE_TO_PHYS(entry) uvm_vm_page_to_phys(entry) */
306
307 #define PHYS_TO_VM_PAGE(pa) uvm_phys_to_vm_page(pa)
308
309 /*
310 * Test Fixture SetUp().
311 */
312 static void
setup(void)313 setup(void)
314 {
315 /* Prerequisites for running certain calls in uvm_physseg */
316 uvmexp.pagesize = PAGE_SIZE;
317 uvmexp.npages = 0;
318 uvm.page_init_done = false;
319 uvm_physseg_init();
320 }
321
322 ATF_TC(uvm_physseg_100);
ATF_TC_HEAD(uvm_physseg_100,tc)323 ATF_TC_HEAD(uvm_physseg_100, tc)
324 {
325 atf_tc_set_md_var(tc, "descr", "Load test uvm_phys_to_vm_page() with \
326 100 calls, VM_PHYSSEG_MAX is 32.");
327 }
ATF_TC_BODY(uvm_physseg_100,tc)328 ATF_TC_BODY(uvm_physseg_100, tc)
329 {
330 paddr_t pa;
331
332 setup();
333
334 for(paddr_t i = VALID_START_PFN_1;
335 i < VALID_END_PFN_1; i += PF_STEP) {
336 uvm_page_physload(i, i + PF_STEP, i, i + PF_STEP,
337 VM_FREELIST_DEFAULT);
338 }
339
340 ATF_REQUIRE_EQ(VM_PHYSSEG_MAX, uvm_physseg_get_entries());
341
342 srandom((unsigned)time(NULL));
343 for(int i = 0; i < 100; i++) {
344 pa = (paddr_t) random() % (paddr_t) ctob(VALID_END_PFN_1);
345 PHYS_TO_VM_PAGE(pa);
346 }
347
348 ATF_CHECK_EQ(true, true);
349 }
350
351 ATF_TC(uvm_physseg_1K);
ATF_TC_HEAD(uvm_physseg_1K,tc)352 ATF_TC_HEAD(uvm_physseg_1K, tc)
353 {
354 atf_tc_set_md_var(tc, "descr", "Load test uvm_phys_to_vm_page() with \
355 1000 calls, VM_PHYSSEG_MAX is 32.");
356 }
ATF_TC_BODY(uvm_physseg_1K,tc)357 ATF_TC_BODY(uvm_physseg_1K, tc)
358 {
359 paddr_t pa;
360
361 setup();
362
363 for(paddr_t i = VALID_START_PFN_1;
364 i < VALID_END_PFN_1; i += PF_STEP) {
365 uvm_page_physload(i, i + PF_STEP, i, i + PF_STEP,
366 VM_FREELIST_DEFAULT);
367 }
368
369 ATF_REQUIRE_EQ(VM_PHYSSEG_MAX, uvm_physseg_get_entries());
370
371 srandom((unsigned)time(NULL));
372 for(int i = 0; i < 1000; i++) {
373 pa = (paddr_t) random() % (paddr_t) ctob(VALID_END_PFN_1);
374 PHYS_TO_VM_PAGE(pa);
375 }
376
377 ATF_CHECK_EQ(true, true);
378 }
379
380 ATF_TC(uvm_physseg_10K);
ATF_TC_HEAD(uvm_physseg_10K,tc)381 ATF_TC_HEAD(uvm_physseg_10K, tc)
382 {
383 atf_tc_set_md_var(tc, "descr", "Load test uvm_phys_to_vm_page() with \
384 10,000 calls, VM_PHYSSEG_MAX is 32.");
385 }
ATF_TC_BODY(uvm_physseg_10K,tc)386 ATF_TC_BODY(uvm_physseg_10K, tc)
387 {
388 paddr_t pa;
389
390 setup();
391
392 for(paddr_t i = VALID_START_PFN_1;
393 i < VALID_END_PFN_1; i += PF_STEP) {
394 uvm_page_physload(i, i + PF_STEP, i, i + PF_STEP,
395 VM_FREELIST_DEFAULT);
396 }
397
398 ATF_REQUIRE_EQ(VM_PHYSSEG_MAX, uvm_physseg_get_entries());
399
400 srandom((unsigned)time(NULL));
401 for(int i = 0; i < 10000; i++) {
402 pa = (paddr_t) random() % (paddr_t) ctob(VALID_END_PFN_1);
403 PHYS_TO_VM_PAGE(pa);
404 }
405
406 ATF_CHECK_EQ(true, true);
407 }
408
409 ATF_TC(uvm_physseg_100K);
ATF_TC_HEAD(uvm_physseg_100K,tc)410 ATF_TC_HEAD(uvm_physseg_100K, tc)
411 {
412 atf_tc_set_md_var(tc, "descr", "Load test uvm_phys_to_vm_page() with \
413 100,000 calls, VM_PHYSSEG_MAX is 32.");
414 }
ATF_TC_BODY(uvm_physseg_100K,tc)415 ATF_TC_BODY(uvm_physseg_100K, tc)
416 {
417 paddr_t pa;
418
419 setup();
420
421 for(paddr_t i = VALID_START_PFN_1;
422 i < VALID_END_PFN_1; i += PF_STEP) {
423 uvm_page_physload(i, i + PF_STEP, i, i + PF_STEP,
424 VM_FREELIST_DEFAULT);
425 }
426
427 ATF_REQUIRE_EQ(VM_PHYSSEG_MAX, uvm_physseg_get_entries());
428
429 srandom((unsigned)time(NULL));
430 for(int i = 0; i < 100000; i++) {
431 pa = (paddr_t) random() % (paddr_t) ctob(VALID_END_PFN_1);
432 PHYS_TO_VM_PAGE(pa);
433 }
434
435 ATF_CHECK_EQ(true, true);
436 }
437
438 ATF_TC(uvm_physseg_1M);
ATF_TC_HEAD(uvm_physseg_1M,tc)439 ATF_TC_HEAD(uvm_physseg_1M, tc)
440 {
441 atf_tc_set_md_var(tc, "descr", "Load test uvm_phys_to_vm_page() with \
442 1,000,000 calls, VM_PHYSSEG_MAX is 32.");
443 }
ATF_TC_BODY(uvm_physseg_1M,tc)444 ATF_TC_BODY(uvm_physseg_1M, tc)
445 {
446 paddr_t pa;
447
448 setup();
449
450 for(paddr_t i = VALID_START_PFN_1;
451 i < VALID_END_PFN_1; i += PF_STEP) {
452 uvm_page_physload(i, i + PF_STEP, i, i + PF_STEP,
453 VM_FREELIST_DEFAULT);
454 }
455
456 ATF_REQUIRE_EQ(VM_PHYSSEG_MAX, uvm_physseg_get_entries());
457
458 srandom((unsigned)time(NULL));
459 for(int i = 0; i < 1000000; i++) {
460 pa = (paddr_t) random() % (paddr_t) ctob(VALID_END_PFN_1);
461 PHYS_TO_VM_PAGE(pa);
462 }
463
464 ATF_CHECK_EQ(true, true);
465 }
466
467 ATF_TC(uvm_physseg_10M);
ATF_TC_HEAD(uvm_physseg_10M,tc)468 ATF_TC_HEAD(uvm_physseg_10M, tc)
469 {
470 atf_tc_set_md_var(tc, "descr", "Load test uvm_phys_to_vm_page() with \
471 10,000,000 calls, VM_PHYSSEG_MAX is 32.");
472 }
ATF_TC_BODY(uvm_physseg_10M,tc)473 ATF_TC_BODY(uvm_physseg_10M, tc)
474 {
475 paddr_t pa;
476
477 setup();
478
479 for(paddr_t i = VALID_START_PFN_1;
480 i < VALID_END_PFN_1; i += PF_STEP) {
481 uvm_page_physload(i, i + PF_STEP, i, i + PF_STEP,
482 VM_FREELIST_DEFAULT);
483 }
484
485 ATF_REQUIRE_EQ(VM_PHYSSEG_MAX, uvm_physseg_get_entries());
486
487 srandom((unsigned)time(NULL));
488 for(int i = 0; i < 10000000; i++) {
489 pa = (paddr_t) random() % (paddr_t) ctob(VALID_END_PFN_1);
490 PHYS_TO_VM_PAGE(pa);
491 }
492
493 ATF_CHECK_EQ(true, true);
494 }
495
496 ATF_TC(uvm_physseg_100M);
ATF_TC_HEAD(uvm_physseg_100M,tc)497 ATF_TC_HEAD(uvm_physseg_100M, tc)
498 {
499 atf_tc_set_md_var(tc, "descr", "Load test uvm_phys_to_vm_page() with \
500 100,000,000 calls, VM_PHYSSEG_MAX is 32.");
501 }
ATF_TC_BODY(uvm_physseg_100M,tc)502 ATF_TC_BODY(uvm_physseg_100M, tc)
503 {
504 paddr_t pa;
505
506 setup();
507
508 for(paddr_t i = VALID_START_PFN_1;
509 i < VALID_END_PFN_1; i += PF_STEP) {
510 uvm_page_physload(i, i + PF_STEP, i, i + PF_STEP,
511 VM_FREELIST_DEFAULT);
512 }
513
514 ATF_REQUIRE_EQ(VM_PHYSSEG_MAX, uvm_physseg_get_entries());
515
516 srandom((unsigned)time(NULL));
517 for(int i = 0; i < 100000000; i++) {
518 pa = (paddr_t) random() % (paddr_t) ctob(VALID_END_PFN_1);
519 PHYS_TO_VM_PAGE(pa);
520 }
521
522 ATF_CHECK_EQ(true, true);
523 }
524
525 ATF_TC(uvm_physseg_1MB);
ATF_TC_HEAD(uvm_physseg_1MB,tc)526 ATF_TC_HEAD(uvm_physseg_1MB, tc)
527 {
528 atf_tc_set_md_var(tc, "descr", "Load test uvm_phys_to_vm_page() with \
529 10,000,000 calls, VM_PHYSSEG_MAX is 32 on 1 MB Segment.");
530 }
ATF_TC_BODY(uvm_physseg_1MB,t)531 ATF_TC_BODY(uvm_physseg_1MB, t)
532 {
533 paddr_t pa = 0;
534
535 paddr_t pf = 0;
536
537 psize_t pf_chunk_size = 0;
538
539 psize_t npages1 = (VALID_END_PFN_1 - VALID_START_PFN_1);
540
541 psize_t npages2 = (VALID_END_PFN_2 - VALID_START_PFN_2);
542
543 struct vm_page *slab = malloc(sizeof(struct vm_page) *
544 (npages1 + npages2));
545
546 setup();
547
548 /* We start with zero segments */
549 ATF_REQUIRE_EQ(true, uvm_physseg_plug(VALID_START_PFN_1, npages1, NULL));
550 ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
551
552 /* Post boot: Fake all segments and pages accounted for. */
553 uvm_page_init_fake(slab, npages1 + npages2);
554
555 ATF_REQUIRE_EQ(true, uvm_physseg_plug(VALID_START_PFN_2, npages2, NULL));
556 ATF_REQUIRE_EQ(2, uvm_physseg_get_entries());
557
558 srandom((unsigned)time(NULL));
559 for(pf = VALID_START_PFN_2; pf < VALID_END_PFN_2; pf += PF_STEP) {
560 pf_chunk_size = (psize_t) random() % (psize_t) (PF_STEP - 1) + 1;
561 uvm_physseg_unplug(pf, pf_chunk_size);
562 }
563
564 for(int i = 0; i < 10000000; i++) {
565 pa = (paddr_t) random() % (paddr_t) ctob(VALID_END_PFN_2);
566 if(pa < ctob(VALID_START_PFN_2))
567 pa += ctob(VALID_START_PFN_2);
568 PHYS_TO_VM_PAGE(pa);
569 }
570
571 ATF_CHECK_EQ(true, true);
572 }
573
574 ATF_TC(uvm_physseg_64MB);
ATF_TC_HEAD(uvm_physseg_64MB,tc)575 ATF_TC_HEAD(uvm_physseg_64MB, tc)
576 {
577 atf_tc_set_md_var(tc, "descr", "Load test uvm_phys_to_vm_page() with \
578 10,000,000 calls, VM_PHYSSEG_MAX is 32 on 64 MB Segment.");
579 }
ATF_TC_BODY(uvm_physseg_64MB,t)580 ATF_TC_BODY(uvm_physseg_64MB, t)
581 {
582 paddr_t pa = 0;
583
584 paddr_t pf = 0;
585
586 psize_t pf_chunk_size = 0;
587
588 psize_t npages1 = (VALID_END_PFN_1 - VALID_START_PFN_1);
589
590 psize_t npages2 = (VALID_END_PFN_3 - VALID_START_PFN_3);
591
592 struct vm_page *slab = malloc(sizeof(struct vm_page) *
593 (npages1 + npages2));
594
595 setup();
596
597 /* We start with zero segments */
598 ATF_REQUIRE_EQ(true, uvm_physseg_plug(VALID_START_PFN_1, npages1, NULL));
599 ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
600
601 /* Post boot: Fake all segments and pages accounted for. */
602 uvm_page_init_fake(slab, npages1 + npages2);
603
604 ATF_REQUIRE_EQ(true, uvm_physseg_plug(VALID_START_PFN_3, npages2, NULL));
605 ATF_REQUIRE_EQ(2, uvm_physseg_get_entries());
606
607 srandom((unsigned)time(NULL));
608 for(pf = VALID_START_PFN_3; pf < VALID_END_PFN_3; pf += PF_STEP) {
609 pf_chunk_size = (psize_t) random() % (psize_t) (PF_STEP - 1) + 1;
610 uvm_physseg_unplug(pf, pf_chunk_size);
611 }
612
613 for(int i = 0; i < 10000000; i++) {
614 pa = (paddr_t) random() % (paddr_t) ctob(VALID_END_PFN_3);
615 if(pa < ctob(VALID_START_PFN_3))
616 pa += ctob(VALID_START_PFN_3);
617 PHYS_TO_VM_PAGE(pa);
618 }
619
620 ATF_CHECK_EQ(true, true);
621 }
622
623 ATF_TC(uvm_physseg_128MB);
ATF_TC_HEAD(uvm_physseg_128MB,tc)624 ATF_TC_HEAD(uvm_physseg_128MB, tc)
625 {
626 atf_tc_set_md_var(tc, "descr", "Load test uvm_phys_to_vm_page() with \
627 10,000,000 calls, VM_PHYSSEG_MAX is 32 on 128 MB Segment.");
628 }
ATF_TC_BODY(uvm_physseg_128MB,t)629 ATF_TC_BODY(uvm_physseg_128MB, t)
630 {
631 paddr_t pa = 0;
632
633 paddr_t pf = 0;
634
635 psize_t pf_chunk_size = 0;
636
637 psize_t npages1 = (VALID_END_PFN_1 - VALID_START_PFN_1);
638
639 psize_t npages2 = (VALID_END_PFN_4 - VALID_START_PFN_4);
640
641 struct vm_page *slab = malloc(sizeof(struct vm_page)
642 * (npages1 + npages2));
643
644 setup();
645
646 /* We start with zero segments */
647 ATF_REQUIRE_EQ(true, uvm_physseg_plug(VALID_START_PFN_1, npages1, NULL));
648 ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
649
650 /* Post boot: Fake all segments and pages accounted for. */
651 uvm_page_init_fake(slab, npages1 + npages2);
652
653 ATF_REQUIRE_EQ(true, uvm_physseg_plug(VALID_START_PFN_2, npages2, NULL));
654 ATF_REQUIRE_EQ(2, uvm_physseg_get_entries());
655
656 srandom((unsigned)time(NULL));
657 for(pf = VALID_START_PFN_4; pf < VALID_END_PFN_4; pf += PF_STEP) {
658 pf_chunk_size = (psize_t) random() % (psize_t) (PF_STEP - 1) + 1;
659 uvm_physseg_unplug(pf, pf_chunk_size);
660 }
661
662 for(int i = 0; i < 10000000; i++) {
663 pa = (paddr_t) random() % (paddr_t) ctob(VALID_END_PFN_4);
664 if(pa < ctob(VALID_START_PFN_4))
665 pa += ctob(VALID_START_PFN_4);
666 PHYS_TO_VM_PAGE(pa);
667 }
668
669 ATF_CHECK_EQ(true, true);
670 }
671
672 ATF_TC(uvm_physseg_256MB);
ATF_TC_HEAD(uvm_physseg_256MB,tc)673 ATF_TC_HEAD(uvm_physseg_256MB, tc)
674 {
675 atf_tc_set_md_var(tc, "descr", "Load test uvm_phys_to_vm_page() with \
676 10,000,000 calls, VM_PHYSSEG_MAX is 32 on 256 MB Segment.");
677 }
ATF_TC_BODY(uvm_physseg_256MB,t)678 ATF_TC_BODY(uvm_physseg_256MB, t)
679 {
680 paddr_t pa = 0;
681
682 paddr_t pf = 0;
683
684 psize_t pf_chunk_size = 0;
685
686 psize_t npages1 = (VALID_END_PFN_1 - VALID_START_PFN_1);
687
688 psize_t npages2 = (VALID_END_PFN_5 - VALID_START_PFN_5);
689
690 struct vm_page *slab = malloc(sizeof(struct vm_page) * (npages1 + npages2));
691
692 setup();
693
694 /* We start with zero segments */
695 ATF_REQUIRE_EQ(true, uvm_physseg_plug(VALID_START_PFN_1, npages1, NULL));
696 ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
697
698 /* Post boot: Fake all segments and pages accounted for. */
699 uvm_page_init_fake(slab, npages1 + npages2);
700
701 ATF_REQUIRE_EQ(true, uvm_physseg_plug(VALID_START_PFN_2, npages2, NULL));
702 ATF_REQUIRE_EQ(2, uvm_physseg_get_entries());
703
704 srandom((unsigned)time(NULL));
705 for(pf = VALID_START_PFN_5; pf < VALID_END_PFN_5; pf += PF_STEP) {
706 pf_chunk_size = (psize_t) random() % (psize_t) (PF_STEP - 1) + 1;
707 uvm_physseg_unplug(pf, pf_chunk_size);
708 }
709
710 for(int i = 0; i < 10000000; i++) {
711 pa = (paddr_t) random() % (paddr_t) ctob(VALID_END_PFN_5);
712 if(pa < ctob(VALID_END_PFN_5))
713 pa += ctob(VALID_START_PFN_5);
714 PHYS_TO_VM_PAGE(pa);
715 }
716
717 ATF_CHECK_EQ(true, true);
718 }
719
ATF_TP_ADD_TCS(tp)720 ATF_TP_ADD_TCS(tp)
721 {
722 /* Fixed memory size tests. */
723 ATF_TP_ADD_TC(tp, uvm_physseg_100);
724 ATF_TP_ADD_TC(tp, uvm_physseg_1K);
725 ATF_TP_ADD_TC(tp, uvm_physseg_10K);
726 ATF_TP_ADD_TC(tp, uvm_physseg_100K);
727 ATF_TP_ADD_TC(tp, uvm_physseg_1M);
728 ATF_TP_ADD_TC(tp, uvm_physseg_10M);
729 ATF_TP_ADD_TC(tp, uvm_physseg_100M);
730
731 #if defined(UVM_HOTPLUG)
732 /* Variable memory size tests. */
733 ATF_TP_ADD_TC(tp, uvm_physseg_1MB);
734 ATF_TP_ADD_TC(tp, uvm_physseg_64MB);
735 ATF_TP_ADD_TC(tp, uvm_physseg_128MB);
736 ATF_TP_ADD_TC(tp, uvm_physseg_256MB);
737 #endif /* UVM_HOTPLUG */
738
739 return atf_no_error();
740 }
741