Lines Matching defs:addr
359 boot_mapin(caddr_t addr, size_t size)
368 for (eaddr = addr + size; addr < eaddr; addr += PAGESIZE) {
369 pfnum = va_to_pfn(addr);
397 (void) page_hashin(pp, &kvp, (u_offset_t)(uintptr_t)addr, NULL);
414 caddr_t addr = inaddr;
422 if (bop_alloc_chunk(addr, size, align) != (caddr_t)addr)
425 if (BOP_ALLOC(bootops, addr, size, align) != addr)
428 boot_mapin((caddr_t)addr, size);
429 return (addr);
442 segkmem_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t size,
453 addr < seg->s_base || addr + size > seg->s_base + seg->s_size)
460 BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
461 return (SEGOP_FAULT(hat, segkp, addr, size, type, rw));
471 pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr,
478 if (!hat_probe(kas.a_hat, addr)) {
479 addr -= PAGESIZE;
482 (uintptr_t)addr);
485 addr -= PAGESIZE;
490 addr += PAGESIZE;
493 hat_reserve(seg->s_as, addr, size);
497 pp = page_find(vp, (u_offset_t)(uintptr_t)addr);
500 addr += PAGESIZE;
510 segkmem_setprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot)
515 addr < seg->s_base || addr + size > seg->s_base + seg->s_size)
522 BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
523 return (SEGOP_SETPROT(segkp, addr, size, prot));
526 hat_unload(kas.a_hat, addr, size, HAT_UNLOAD);
528 hat_chgprot(kas.a_hat, addr, size, prot);
538 segkmem_checkprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot)
549 BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
550 return (SEGOP_CHECKPROT(segkp, addr, size, prot));
562 segkmem_kluster(struct seg *seg, caddr_t addr, ssize_t delta)
573 BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
574 return (SEGOP_KLUSTER(segkp, addr, delta));
584 caddr_t addr = start;
585 caddr_t addr_end = addr + size;
587 while (addr < addr_end) {
588 pfn_t pfn = hat_getpfnum(kas.a_hat, addr);
590 dump_addpage(as, addr, pfn);
591 addr += PAGESIZE;
599 caddr_t addr = start;
600 caddr_t addr_end = addr + size;
608 addr == heap_lp_base && addr_end == heap_lp_end &&
671 * lock/unlock kmem pages over a given range [addr, addr+len).
679 segkmem_pagelock(struct seg *seg, caddr_t addr, size_t len,
694 BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
695 return (SEGOP_PAGELOCK(segkp, addr, len, ppp, type, rw));
721 pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr, SE_SHARED);
730 addr += PAGESIZE;
743 segkmem_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
754 BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
755 return (SEGOP_GETMEMID(segkp, addr, memidp));
763 segkmem_getpolicy(struct seg *seg, caddr_t addr)
826 segkmem_page_create(void *addr, size_t size, int vmflag, void *arg)
851 return (page_create_va(vp, (u_offset_t)(uintptr_t)addr, size,
852 pgflags, &kseg, addr));
856 * Allocate pages to back the virtual address range [addr, addr + size).
857 * If addr is NULL, allocate the virtual address space as well.
864 caddr_t addr = inaddr;
868 if (inaddr == NULL && (addr = vmem_alloc(vmp, size, vmflag)) == NULL)
871 ASSERT(((uintptr_t)addr & PAGEOFFSET) == 0);
875 vmem_free(vmp, addr, size);
879 ppl = page_create_func(addr, size, vmflag, pcarg);
882 vmem_free(vmp, addr, size);
926 return (addr);
932 void *addr;
956 addr = vmem_alloc(vmp, size, vmflag | VM_PANIC);
957 if (boot_alloc(addr, size, BO_NO_ALIGN) != addr)
959 return (addr);
988 caddr_t addr = inaddr;
992 ASSERT(((uintptr_t)addr & PAGEOFFSET) == 0);
1004 hat_unload(kas.a_hat, addr, size, HAT_UNLOAD_UNLOCK);
1006 for (eaddr = addr + size; addr < eaddr; addr += PAGESIZE) {
1008 pp = page_find(vp, (u_offset_t)(uintptr_t)addr);
1017 pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr,
1021 pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr, SE_EXCL);
1073 segkmem_mapin(struct seg *seg, void *addr, size_t size, uint_t vprot,
1076 hat_unload(seg->s_as->a_hat, addr, size, HAT_UNLOAD_UNLOCK);
1077 hat_devload(seg->s_as->a_hat, addr, size, pfn, vprot,
1082 segkmem_mapout(struct seg *seg, void *addr, size_t size)
1084 hat_unload(seg->s_as->a_hat, addr, size, HAT_UNLOAD_UNLOCK);
1094 kmem_freepages(void *addr, pgcnt_t npages)
1096 kmem_free(addr, ptob(npages));
1105 segkmem_page_create_large(void *addr, size_t size, int vmflag, void *arg)
1120 return (page_create_va_large(&kvp, (u_offset_t)(uintptr_t)addr, size,
1121 pgflags, &kvseg, addr, arg));
1126 * [addr, addr + size). If addr is NULL, allocate the virtual address
1134 caddr_t addr = inaddr, pa;
1158 if (inaddr == NULL && (addr = vmem_alloc(vmp, size, vmflag)) == NULL)
1161 ASSERT(((uintptr_t)addr & (lpsize - 1)) == 0);
1164 for (pa = addr, i = 0; i < nlpages; i++, pa += lpsize) {
1196 return (addr);
1210 vmem_free(vmp, addr, size);
1222 segkmem_free_one_lp(caddr_t addr, size_t size)
1229 hat_unload(kas.a_hat, addr, size, HAT_UNLOAD_UNLOCK);
1231 for (; pgs_left > 0; addr += PAGESIZE, pgs_left--) {
1232 pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)addr, SE_EXCL);
1271 void *addr = NULL;
1347 addr = vmem_alloc(kmem_lp_arena, asize,
1359 if (addr != NULL) {
1362 return (addr);
1398 void *addr;
1409 addr = segkmem_xalloc_lp(vmp, NULL, size, vmflag, 0,
1411 return (addr);
1424 caddr_t addr = inaddr;
1429 ASSERT(IS_KMEM_VA_LARGEPAGE(addr));
1433 segkmem_free_one_lp(addr, lpsize);
1434 addr += lpsize;
1544 void *addr;
1551 addr = vmem_xalloc(vmp, size, ppaquantum, 0, 0, NULL, NULL, vmflag);
1552 if (addr != NULL && segkmem_xalloc(vmp, addr, size, vmflag, 0,
1554 vmem_xfree(vmp, addr, size);
1555 addr = NULL;
1558 return (addr);
1562 segkmem_free_ppa(vmem_t *vmp, void *addr, size_t size)
1566 ASSERT(addr != NULL);
1569 segkmem_free(vmp, addr, size);
1571 segkmem_free(NULL, addr, size);
1572 vmem_xfree(vmp, addr, size);
1583 void *addr;
1633 if ((addr = segkmem_alloc_lpi(heap_lp_arena,
1636 addr = vmem_add(kmem_lp_arena, addr,
1638 ASSERT(addr != NULL);