Lines Matching defs:addr

103 static int	segvn_unmap(struct seg *seg, caddr_t addr, size_t len);
106 caddr_t addr, size_t len, enum fault_type type,
108 static faultcode_t segvn_faulta(struct seg *seg, caddr_t addr);
109 static int segvn_setprot(struct seg *seg, caddr_t addr,
111 static int segvn_checkprot(struct seg *seg, caddr_t addr,
113 static int segvn_kluster(struct seg *seg, caddr_t addr, ssize_t delta);
115 static int segvn_sync(struct seg *seg, caddr_t addr, size_t len,
117 static size_t segvn_incore(struct seg *seg, caddr_t addr, size_t len,
119 static int segvn_lockop(struct seg *seg, caddr_t addr, size_t len,
121 static int segvn_getprot(struct seg *seg, caddr_t addr, size_t len,
123 static u_offset_t segvn_getoffset(struct seg *seg, caddr_t addr);
124 static int segvn_gettype(struct seg *seg, caddr_t addr);
125 static int segvn_getvp(struct seg *seg, caddr_t addr,
127 static int segvn_advise(struct seg *seg, caddr_t addr, size_t len,
130 static int segvn_pagelock(struct seg *seg, caddr_t addr, size_t len,
132 static int segvn_setpagesize(struct seg *seg, caddr_t addr, size_t len,
134 static int segvn_getmemid(struct seg *seg, caddr_t addr,
249 #define CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr) { \
251 lpgaddr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz); \
253 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)((addr) + \
258 lpgeaddr = lpgaddr = (addr); \
914 caddr_t addr;
935 for (anon_idx = anon_num, addr = seg->s_base;
936 addr < eaddr; addr += PAGESIZE, anon_idx++) {
948 pp = anon_zero(seg, addr, &ap, cred);
967 hat_memload(seg->s_as->a_hat, addr, pp,
1506 caddr_t addr;
1514 addr = seg->s_base;
1525 vpp = &svd->vpage[seg_page(seg, addr)];
1540 pp = anon_zero(newseg, addr, &newap,
1546 PAGESIZE, seg, addr, S_READ, svd->cred);
1550 pp = anon_private(&newap, newseg, addr, prot,
1560 addr += PAGESIZE;
1855 segvn_unmap(struct seg *seg, caddr_t addr, size_t len)
1909 if (addr < seg->s_base || addr + len > seg->s_base + seg->s_size ||
1910 (len & PAGEOFFSET) || ((uintptr_t)addr & PAGEOFFSET)) {
1918 if (!IS_P2ALIGNED(addr, pgsz) || !IS_P2ALIGNED(len, pgsz)) {
1919 ASSERT(seg->s_base != addr || seg->s_size != len);
1941 err = segvn_demote_range(seg, addr, len, SDR_END, 0);
1954 (offset_t)svd->offset + (uintptr_t)(addr - seg->s_base),
1955 seg->s_as, addr, len, svd->prot, svd->maxprot,
1968 (void) segvn_lockop(seg, addr, len, 0, MC_UNLOCK, NULL, 0);
1997 hat_unload_callback(seg->s_as->a_hat, addr, len,
2010 if (addr == seg->s_base && len == seg->s_size) {
2024 if (addr == seg->s_base) {
2130 if (addr + len == seg->s_base + seg->s_size) {
2236 nbase = addr + len; /* new seg base */
2238 seg->s_size = addr - seg->s_base; /* shrink old seg */
2299 opages = btop((uintptr_t)(addr - seg->s_base));
2560 * Caller must always match addr and len of a softunlock with a previous
2561 * softlock with exactly the same addr and len.
2564 segvn_softunlock(struct seg *seg, caddr_t addr, size_t len, enum seg_rw rw)
2579 anon_index = svd->anon_index + seg_page(seg, addr);
2583 hat_unlock_region(seg->s_as->a_hat, addr, len, svd->rcookie);
2585 hat_unlock(seg->s_as->a_hat, addr, len);
2587 for (adr = addr; adr < addr + len; adr += PAGESIZE) {
2612 "segvn_softunlock: addr %p, ap %p, vp %p, off %llx",
2703 caddr_t addr, /* address in as */
2773 * allocating separate anon slots for the same "addr".
2778 anon_index = svd->anon_index + seg_page(seg, addr);
2800 if ((pp = anon_zero(seg, addr, &ap,
2818 page_migrate(seg, addr, &pp, 1);
2880 hat_memload(hat, addr, pp, prot, hat_flag);
2898 seg, addr, rw, svd->cred);
2977 page_migrate(seg, addr, &opp, 1);
2990 hat_memload_region(hat, addr, opp, prot & vpprot, hat_flag,
3038 hat_unload(seg->s_as->a_hat, addr, PAGESIZE,
3079 pp = anon_private(&ap, seg, addr, prot, opp, pageflags, svd->cred);
3103 page_migrate(seg, addr, &pp, 1);
3116 hat_memload(hat, addr, pp, prot, hat_flag);
3825 caddr_t lpgeaddr, enum fault_type type, enum seg_rw rw, caddr_t addr,
3955 MAX(a, addr),
4567 if (a < addr) {
4575 a = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz);
4634 caddr_t lpgeaddr, enum fault_type type, enum seg_rw rw, caddr_t addr,
4867 if (a < addr) {
4875 a = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz);
4922 segvn_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t len,
4958 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr);
5090 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr,
5163 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr);
5166 lpgeaddr, type, rw, addr, addr + len, brkcow);
5169 lpgeaddr, type, rw, addr, addr + len, brkcow);
5181 page = seg_page(seg, addr);
5202 for (a = addr; a < addr + len; a += PAGESIZE, index++) {
5211 hat_chgprot(seg->s_as->a_hat, addr, len, svd->prot);
5224 off = svd->offset + (uintptr_t)(addr - seg->s_base);
5383 (((size_t)(addr + PAGESIZE) <
5385 hat_probe(as->a_hat, addr + PAGESIZE))) {
5400 ((seg->s_base + seg->s_size) - addr));
5401 ASSERT((addr + plsz) <=
5424 "segvn_getpage:seg %p addr %p vp %p",
5425 seg, addr, vp);
5427 &vpprot, plp, plsz, seg, addr + (vp_off - off), arw,
5449 * the same "addr".
5454 * on the same "addr" in the same address space.
5484 for (a = addr; a < addr + len; a += PAGESIZE, off += PAGESIZE) {
5490 if (type == F_SOFTLOCK && a > addr) {
5491 segvn_softunlock(seg, addr, (a - addr),
5503 page = seg_page(seg, addr);
5615 segvn_faulta(struct seg *seg, caddr_t addr)
5635 svd->anon_index + seg_page(seg, addr))) != NULL) {
5638 0, seg, addr, S_READ, svd->cred);
5656 "segvn_getpage:seg %p addr %p vp %p", seg, addr, vp);
5658 (offset_t)(svd->offset + (uintptr_t)(addr - seg->s_base)),
5659 PAGESIZE, NULL, NULL, 0, seg, addr,
5669 segvn_setprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
5748 if (!IS_P2ALIGNED(addr, pgsz) || !IS_P2ALIGNED(len, pgsz)) {
5750 ASSERT(seg->s_base != addr || seg->s_size != len);
5760 err = segvn_demote_range(seg, addr, len,
5766 err = segvn_demote_range(seg, addr, len,
5800 if (addr == seg->s_base &&
5818 svp = &svd->vpage[seg_page(seg, addr)];
5820 addr + len)];
5876 svd->amp == NULL && addr == seg->s_base &&
5888 if (addr == seg->s_base && len == seg->s_size && svd->vpage == NULL) {
5917 anon_idx = svd->anon_index + seg_page(seg, addr);
5923 offset = svd->offset + (uintptr_t)(addr - seg->s_base);
5924 evp = &svd->vpage[seg_page(seg, addr + len)];
5930 for (svp = &svd->vpage[seg_page(seg, addr)]; svp < evp; svp++) {
6008 len = (svp - &svd->vpage[seg_page(seg, addr)]) *
6012 hat_unload(seg->s_as->a_hat, addr,
6024 evp = &svd->vpage[seg_page(seg, addr + len)];
6025 for (svp = &svd->vpage[seg_page(seg, addr)]; svp < evp; svp++) {
6048 hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD);
6057 hat_chgattr(seg->s_as->a_hat, addr, len, prot);
6070 segvn_setpagesize(struct seg *seg, caddr_t addr, size_t len, uint_t szc)
6076 caddr_t eaddr = addr + len, a;
6080 u_offset_t off = svd->offset + (uintptr_t)(addr - seg->s_base);
6083 ASSERT(addr >= seg->s_base && eaddr <= seg->s_base + seg->s_size);
6090 * addr should always be pgsz aligned but eaddr may be misaligned if
6096 if (!IS_P2ALIGNED(addr, pgsz) ||
6105 ulong_t an_idx = svd->anon_index + seg_page(seg, addr);
6125 map_addr_vacalign_check(addr, off)) {
6134 for (a = addr; a < eaddr; a += pgsz) {
6193 if (addr != seg->s_base || eaddr != (seg->s_base + seg->s_size)) {
6196 err = segvn_demote_range(seg, addr, len, SDR_RANGE, 0);
6205 if (addr != seg->s_base) {
6206 nseg = segvn_split_seg(seg, addr);
6479 caddr_t addr = ((uintptr_t)(svp - svd->vpage) << PAGESHIFT)
6494 ASSERT(sameprot(seg, addr, pgcnt << PAGESHIFT));
6575 segvn_split_seg(struct seg *seg, caddr_t addr)
6585 ASSERT(addr >= seg->s_base);
6586 ASSERT(addr <= seg->s_base + seg->s_size);
6589 if (addr == seg->s_base || addr == seg->s_base + seg->s_size)
6592 nsize = seg->s_base + seg->s_size - addr;
6593 seg->s_size = addr - seg->s_base;
6594 nseg = seg_alloc(seg->s_as, addr, nsize);
6714 * or the ends (SDR_END) by addr/len.
6721 caddr_t addr,
6726 caddr_t eaddr = addr + len;
6741 ASSERT(seg->s_base != addr || seg->s_size != len);
6742 ASSERT(addr >= seg->s_base && eaddr <= seg->s_base + seg->s_size);
6747 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr);
6748 ASSERT(flag == SDR_RANGE || eaddr < lpgeaddr || addr > lpgaddr);
6755 } else if (addr != lpgaddr) {
6799 caddr_t ta = MAX(addr, badseg1->s_base);
6846 ASSERT(badseg2->s_base > addr);
6861 segvn_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
6883 evp = &svd->vpage[seg_page(seg, addr + len)];
6884 for (vp = &svd->vpage[seg_page(seg, addr)]; vp < evp; vp++) {
6895 segvn_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
6898 size_t pgno = seg_page(seg, addr + len) - seg_page(seg, addr) + 1;
6909 size_t pgoff = seg_page(seg, addr);
6922 segvn_getoffset(struct seg *seg, caddr_t addr)
6928 return (svd->offset + (uintptr_t)(addr - seg->s_base));
6933 segvn_gettype(struct seg *seg, caddr_t addr)
6945 segvn_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
6957 * addr + delta relative to the mapping at addr. We assume here
6966 segvn_kluster(struct seg *seg, caddr_t addr, ssize_t delta)
6980 if (addr + delta < seg->s_base ||
6981 addr + delta >= (seg->s_base + seg->s_size))
6985 page = seg_page(seg, addr);
6988 * Check to see if either of the pages addr or addr + delta
7242 segvn_sync(struct seg *seg, caddr_t addr, size_t len, int attr, uint_t flags)
7309 offset = svd->offset + (uintptr_t)(addr - seg->s_base);
7333 vpp = &svd->vpage[seg_page(seg, addr)];
7349 anon_index = svd->anon_index + seg_page(seg, addr);
7351 for (eaddr = addr + len; addr < eaddr; addr += PAGESIZE) {
7468 segvn_incore(struct seg *seg, caddr_t addr, size_t len, char *vec)
7492 p = seg_page(seg, addr);
7493 ep = seg_page(seg, addr + len);
7497 for (; p < ep; p++, addr += PAGESIZE) {
7521 if ((hat_getattr(seg->s_as->a_hat, addr,
7541 offset = svd->offset + (uintptr_t)(addr - seg->s_base);
7629 segvn_lockop(struct seg *seg, caddr_t addr, size_t len,
7737 anon_index = svd->anon_index + seg_page(seg, addr);
7740 offset = svd->offset + (uintptr_t)(addr - seg->s_base);
7741 evp = &svd->vpage[seg_page(seg, addr + len)];
7750 for (vpp = &svd->vpage[seg_page(seg, addr)]; vpp < evp;
7763 i_edx = svd->anon_index + seg_page(seg, addr + len);
7803 for (vpp = &svd->vpage[seg_page(seg, addr)]; vpp < evp;
7804 vpp++, pos++, addr += PAGESIZE, offset += PAGESIZE, anon_index++) {
7825 pp = anon_zero(seg, addr, &ap,
7890 (uint_t *)NULL, pl, PAGESIZE, seg, addr,
7915 * the failure as an error, unless the addr
8063 segvn_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
8186 page = seg_page(seg, addr);
8224 if ((addr == seg->s_base) && (len == seg->s_size)) {
8263 page_mark_migrate(seg, addr, len, amp, svd->anon_index,
8284 addr + len == next->s_base)
8288 addr == prev->s_base + prev->s_size) {
8310 hat_unload(seg->s_as->a_hat, addr, len,
8333 page = seg_page(seg, addr);
8360 addr = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz);
8370 off = svd->offset + (uintptr_t)(addr - seg->s_base);
8402 page_mark_migrate(seg, addr, len, amp, svd->anon_index,
8427 eaddr = addr + len;
8429 if (addr > seg->s_base) {
8441 new_seg = segvn_split_seg(seg, addr);
8516 if (addr == seg->s_base) {
8528 addr == prev->s_base +
8544 hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD);
8573 segvn_inherit(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
8611 if ((addr == seg->s_base) && (len == seg->s_size)) {
8630 page = seg_page(seg, addr);
8707 caddr_t addr;
8713 addr = seg->s_base;
8744 dump_addpage(seg->s_as, addr, pfn);
8748 addr += PAGESIZE;
8806 segvn_pagelock(struct seg *seg, caddr_t addr, size_t len, struct page ***ppp,
8847 "segvn_pagelock: start seg %p addr %p", seg, addr);
8900 * as_pagelock() call's addr and len.
8908 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr);
8909 adjustpages = btop((uintptr_t)(addr - lpgaddr));
8911 lpgaddr = addr;
8912 lpgeaddr = addr + len;
8946 lpgaddr = (caddr_t)P2ALIGN((uintptr_t)addr,
8953 ulong_t aix = svd->anon_index + seg_page(seg, addr);
8959 lpgaddr = addr - ptob(aix - aaix);
8963 if (svd->pageprot && lpgaddr != addr) {
8965 struct vpage *evp = &svd->vpage[seg_page(seg, addr)];
8973 lpgaddr = addr;
8977 lpgeaddr = addr + len;
9000 if (svd->pageprot && lpgeaddr != addr + len) {
9004 vp = &svd->vpage[seg_page(seg, addr + len)];
9014 lpgeaddr = addr + len;
9017 adjustpages = btop((uintptr_t)(addr - lpgaddr));
9047 for (a = addr; a < addr + len; a += PAGESIZE) {
9072 paddr = (caddr_t)((addr - seg->s_base) +
9076 paddr = addr;
9138 "segvn_pagelock: unlock seg %p addr %p", seg, addr);
9168 a = addr;
9169 ea = addr + len;
9205 "segvn_pagelock: cache hit seg %p addr %p", seg, addr);
9266 addr = lpgaddr;
9291 page = seg_page(seg, addr);
9298 for (a = addr; a < addr + len; a += PAGESIZE, anon_index++) {
9311 if (a == addr || P2PHASE(anon_index, anpgcnt) == 0) {
9381 if (a >= addr + len) {
9401 "segvn_pagelock: cache fill seg %p addr %p", seg, addr);
9406 np = ((uintptr_t)(a - addr)) >> PAGESHIFT;
9418 "segvn_pagelock: cache miss seg %p addr %p", seg, addr);
9458 segvn_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist,
9535 shamp_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist,
9580 * get a memory ID for an addr in a given segment
9586 segvn_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
9596 memidp->val[1] = (uintptr_t)addr;
9604 (uintptr_t)(addr - seg->s_base);
9611 seg_page(seg, addr);
9623 pp = anon_zero(seg, addr, &ap, svd->cred);
9640 memidp->val[1] = (uintptr_t)addr & PAGEOFFSET;
9676 segvn_getpolicy(struct seg *seg, caddr_t addr)
9704 anon_index = svn_data->anon_index + seg_page(seg, addr);
9706 vn_off = svn_data->offset + (uintptr_t)(addr - seg->s_base);