Lines Matching full:seg

174     "Phys Seg Info");
323 struct vm_phys_seg *seg; in sysctl_vm_phys_segs() local
332 seg = &vm_phys_segs[segind]; in sysctl_vm_phys_segs()
334 (uintmax_t)seg->start); in sysctl_vm_phys_segs()
336 (uintmax_t)seg->end); in sysctl_vm_phys_segs()
337 sbuf_printf(&sbuf, "domain: %d\n", seg->domain); in sysctl_vm_phys_segs()
338 sbuf_printf(&sbuf, "free list: %p\n", seg->free_queues); in sysctl_vm_phys_segs()
420 struct vm_phys_seg *seg; in _vm_phys_create_seg() local
426 seg = &vm_phys_segs[vm_phys_nsegs++]; in _vm_phys_create_seg()
427 while (seg > vm_phys_segs && (seg - 1)->start >= end) { in _vm_phys_create_seg()
428 *seg = *(seg - 1); in _vm_phys_create_seg()
429 seg--; in _vm_phys_create_seg()
431 seg->start = start; in _vm_phys_create_seg()
432 seg->end = end; in _vm_phys_create_seg()
433 seg->domain = domain; in _vm_phys_create_seg()
511 struct vm_phys_seg *end_seg, *prev_seg, *seg, *tmp_seg; in vm_phys_init() local
528 seg = &vm_phys_segs[segind]; in vm_phys_init()
530 if (seg->end <= VM_LOWMEM_BOUNDARY) in vm_phys_init()
544 seg->end <= VM_DMA32_BOUNDARY) in vm_phys_init()
550 npages += atop(seg->end - seg->start); in vm_phys_init()
574 seg = &vm_phys_segs[segind]; in vm_phys_init()
576 seg->first_page = &vm_page_array[npages]; in vm_phys_init()
577 npages += atop(seg->end - seg->start); in vm_phys_init()
579 seg->first_page = PHYS_TO_VM_PAGE(seg->start); in vm_phys_init()
582 if (seg->end <= VM_LOWMEM_BOUNDARY) { in vm_phys_init()
589 if (seg->end <= VM_DMA32_BOUNDARY) { in vm_phys_init()
600 seg->free_queues = &vm_phys_free_queues[seg->domain][flind]; in vm_phys_init()
608 seg = &vm_phys_segs[1]; in vm_phys_init()
610 while (seg < end_seg) { in vm_phys_init()
611 if (prev_seg->end == seg->start && in vm_phys_init()
612 prev_seg->free_queues == seg->free_queues) { in vm_phys_init()
613 prev_seg->end = seg->end; in vm_phys_init()
614 KASSERT(prev_seg->domain == seg->domain, in vm_phys_init()
618 for (tmp_seg = seg; tmp_seg < end_seg; tmp_seg++) in vm_phys_init()
621 prev_seg = seg; in vm_phys_init()
622 seg++; in vm_phys_init()
995 vm_phys_seg_paddr_to_vm_page(struct vm_phys_seg *seg, vm_paddr_t pa) in vm_phys_seg_paddr_to_vm_page() argument
997 KASSERT(pa >= seg->start && pa < seg->end, in vm_phys_seg_paddr_to_vm_page()
1000 return (&seg->first_page[atop(pa - seg->start)]); in vm_phys_seg_paddr_to_vm_page()
1009 struct vm_phys_seg *seg; in vm_phys_paddr_to_vm_page() local
1011 if ((seg = vm_phys_paddr_to_seg(pa)) != NULL) in vm_phys_paddr_to_vm_page()
1012 return (vm_phys_seg_paddr_to_vm_page(seg, pa)); in vm_phys_paddr_to_vm_page()
1019 struct vm_phys_fictitious_seg tmp, *seg; in vm_phys_fictitious_to_vm_page() local
1027 seg = RB_FIND(fict_tree, &vm_phys_fictitious_tree, &tmp); in vm_phys_fictitious_to_vm_page()
1029 if (seg == NULL) in vm_phys_fictitious_to_vm_page()
1032 m = &seg->first_page[atop(pa - seg->start)]; in vm_phys_fictitious_to_vm_page()
1056 struct vm_phys_fictitious_seg *seg; in vm_phys_fictitious_reg_range() local
1125 seg = malloc(sizeof(*seg), M_FICT_PAGES, M_WAITOK | M_ZERO); in vm_phys_fictitious_reg_range()
1126 seg->start = start; in vm_phys_fictitious_reg_range()
1127 seg->end = end; in vm_phys_fictitious_reg_range()
1128 seg->first_page = fp; in vm_phys_fictitious_reg_range()
1131 RB_INSERT(fict_tree, &vm_phys_fictitious_tree, seg); in vm_phys_fictitious_reg_range()
1140 struct vm_phys_fictitious_seg *seg, tmp; in vm_phys_fictitious_unreg_range() local
1186 seg = RB_FIND(fict_tree, &vm_phys_fictitious_tree, &tmp); in vm_phys_fictitious_unreg_range()
1187 if (seg->start != start || seg->end != end) { in vm_phys_fictitious_unreg_range()
1193 RB_REMOVE(fict_tree, &vm_phys_fictitious_tree, seg); in vm_phys_fictitious_unreg_range()
1195 free(seg->first_page, M_FICT_PAGES); in vm_phys_fictitious_unreg_range()
1196 free(seg, M_FICT_PAGES); in vm_phys_fictitious_unreg_range()
1208 struct vm_phys_seg *seg; in vm_phys_free_pages() local
1220 seg = &vm_phys_segs[m->segind]; in vm_phys_free_pages()
1221 vm_domain_free_assert_locked(VM_DOMAIN(seg->domain)); in vm_phys_free_pages()
1226 if (pa < seg->start || pa >= seg->end) in vm_phys_free_pages()
1228 m_buddy = vm_phys_seg_paddr_to_vm_page(seg, pa); in vm_phys_free_pages()
1231 fl = (*seg->free_queues)[m_buddy->pool]; in vm_phys_free_pages()
1237 m = vm_phys_seg_paddr_to_vm_page(seg, pa); in vm_phys_free_pages()
1240 fl = (*seg->free_queues)[m->pool]; in vm_phys_free_pages()
1355 struct vm_phys_seg *seg; in vm_phys_enqueue_contig() local
1365 seg = &vm_phys_segs[m->segind]; in vm_phys_enqueue_contig()
1366 fl = (*seg->free_queues)[m->pool]; in vm_phys_enqueue_contig()
1379 KASSERT(seg == &vm_phys_segs[m->segind], in vm_phys_enqueue_contig()
1436 struct vm_phys_seg *end_seg, *seg; in vm_phys_find_range() local
1441 for (seg = &vm_phys_segs[segind]; seg < end_seg; seg++) { in vm_phys_find_range()
1442 if (seg->domain != domain) in vm_phys_find_range()
1444 if (seg->start >= high) in vm_phys_find_range()
1446 pa_start = MAX(low, seg->start); in vm_phys_find_range()
1447 pa_end = MIN(high, seg->end); in vm_phys_find_range()
1456 bounds[0] = vm_phys_seg_paddr_to_vm_page(seg, pa_start); in vm_phys_find_range()
1457 bounds[1] = &seg->first_page[atop(pa_end - seg->start)]; in vm_phys_find_range()
1458 return (seg - vm_phys_segs); in vm_phys_find_range()
1474 struct vm_phys_seg *seg; in vm_phys_unfree_page() local
1479 seg = vm_phys_paddr_to_seg(pa); in vm_phys_unfree_page()
1480 vm_domain_free_assert_locked(VM_DOMAIN(seg->domain)); in vm_phys_unfree_page()
1486 vm_phys_lazy_init_domain(seg->domain, true); in vm_phys_unfree_page()
1499 if (pa >= seg->start) in vm_phys_unfree_page()
1500 m_set = vm_phys_seg_paddr_to_vm_page(seg, pa); in vm_phys_unfree_page()
1518 fl = (*seg->free_queues)[m_set->pool]; in vm_phys_unfree_page()
1525 m_tmp = vm_phys_seg_paddr_to_vm_page(seg, pa_half); in vm_phys_unfree_page()
1528 m_set = vm_phys_seg_paddr_to_vm_page(seg, pa_half); in vm_phys_unfree_page()
1545 struct vm_phys_seg *seg; in vm_phys_find_freelist_contig() local
1567 seg = &vm_phys_segs[m->segind]; in vm_phys_find_freelist_contig()
1568 if (VM_PAGE_TO_PHYS(m) < MAX(low, seg->start)) in vm_phys_find_freelist_contig()
1571 VM_PAGE_TO_PHYS(m) - max_size >= MAX(low, seg->start) && in vm_phys_find_freelist_contig()
1583 VM_PAGE_TO_PHYS(m_ret) + size <= MIN(high, seg->end) && in vm_phys_find_freelist_contig()
1592 if (VM_PAGE_TO_PHYS(m_ret) + size > MIN(high, seg->end)) in vm_phys_find_freelist_contig()
1680 struct vm_phys_seg *seg; in vm_phys_alloc_contig() local
1693 seg = &vm_phys_segs[segind]; in vm_phys_alloc_contig()
1694 if (seg->start >= high || seg->domain != domain) in vm_phys_alloc_contig()
1696 if (low >= seg->end) in vm_phys_alloc_contig()
1698 if (low <= seg->start) in vm_phys_alloc_contig()
1699 pa_start = seg->start; in vm_phys_alloc_contig()
1702 if (high < seg->end) in vm_phys_alloc_contig()
1705 pa_end = seg->end; in vm_phys_alloc_contig()
1714 if (seg->free_queues == queues) in vm_phys_alloc_contig()
1716 queues = seg->free_queues; in vm_phys_alloc_contig()
1743 seg = &vm_phys_segs[m_run->segind]; in vm_phys_alloc_contig()
1744 KASSERT(seg->domain == domain, in vm_phys_alloc_contig()
1877 struct vm_phys_seg *seg; in vm_phys_early_add_seg() local
1884 seg = &vm_phys_early_segs[vm_phys_early_nsegs++]; in vm_phys_early_add_seg()
1885 seg->start = start; in vm_phys_early_add_seg()
1886 seg->end = end; in vm_phys_early_add_seg()
1982 struct vm_phys_seg *seg; in vm_phys_early_startup() local
1991 seg = &vm_phys_early_segs[i]; in vm_phys_early_startup()
1992 vm_phys_add_seg(seg->start, seg->end); in vm_phys_early_startup()