Lines Matching defs:hmeblkp

113 #define	SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid)		\
119 int _ttesz = get_hblk_ttesz(hmeblkp); \
128 _hsva = (caddr_t)get_hblk_base(hmeblkp); \
129 _heva = get_hblk_endaddr(hmeblkp); \
146 #define SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid)
852 #define SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, ismhat) \
854 int ttesz = get_hblk_ttesz(hmeblkp); \
859 (caddr_t)get_hblk_base(hmeblkp); \
860 caddr_t eva = sva + get_hblk_span(hmeblkp); \
991 #define HBLKTOHME(hment, hmeblkp, addr) \
994 HBLKTOHME_IDX(hment, hmeblkp, addr, index) \
998 * Version of HBLKTOHME that also returns the index in hmeblkp
1001 #define HBLKTOHME_IDX(hment, hmeblkp, addr, idx) \
1003 ASSERT(in_hblk_range((hmeblkp), (addr))); \
1005 if (get_hblk_ttesz(hmeblkp) == TTE8K) { \
1010 (hment) = &(hmeblkp)->hblk_hme[idx]; \
1992 struct hme_blk *hmeblkp;
2024 hmeblkp = hmebp->hmeblkp;
2026 while (hmeblkp) {
2028 if ((hmeblkp->hblk_tag.htag_id == sfmmup) &&
2029 !hmeblkp->hblk_shw_bit && !hmeblkp->hblk_lckcnt) {
2030 ASSERT(!hmeblkp->hblk_shared);
2031 (void) sfmmu_hblk_unload(sfmmup, hmeblkp,
2032 (caddr_t)get_hblk_base(hmeblkp),
2033 get_hblk_endaddr(hmeblkp),
2036 nx_hblk = hmeblkp->hblk_next;
2037 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) {
2038 ASSERT(!hmeblkp->hblk_lckcnt);
2039 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
2042 pr_hblk = hmeblkp;
2044 hmeblkp = nx_hblk;
2563 struct hme_blk *hmeblkp;
2577 hmeblkp = sfmmu_tteload_find_hmeblk(hat, hmebp, vaddr,
2579 ASSERT(hmeblkp);
2591 (void) sfmmu_tteload_addentry(hat, hmeblkp, &tte,
2880 struct hme_blk *hmeblkp;
2899 hmeblkp = sfmmu_tteload_find_hmeblk(sfmmup, hmebp, vaddr, size, flags,
2901 ASSERT(hmeblkp);
2906 ret = sfmmu_tteload_addentry(sfmmup, hmeblkp, ttep, vaddr, pps, flags,
2950 struct hme_blk *hmeblkp, *pr_hblk, *list = NULL;
2963 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list);
2970 if (hmeblkp == (struct hme_blk *)hblk_reserve &&
2980 if (hmeblkp == NULL) {
2981 hmeblkp = sfmmu_hblk_alloc(sfmmup, vaddr, hmebp, size,
2983 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || hmeblkp->hblk_shared);
2984 ASSERT(SFMMU_IS_SHMERID_VALID(rid) || !hmeblkp->hblk_shared);
2992 if (get_hblk_ttesz(hmeblkp) != size) {
2993 ASSERT(!hmeblkp->hblk_vcnt);
2994 ASSERT(!hmeblkp->hblk_hmecnt);
2995 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
2999 if (hmeblkp->hblk_shw_bit) {
3004 ASSERT(!hmeblkp->hblk_shared);
3005 if (hmeblkp->hblk_shw_mask) {
3006 sfmmu_shadow_hcleanup(sfmmup, hmeblkp, hmebp);
3010 hmeblkp->hblk_shw_bit = 0;
3024 ASSERT(get_hblk_ttesz(hmeblkp) == size);
3025 ASSERT(!hmeblkp->hblk_shw_bit);
3026 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || hmeblkp->hblk_shared);
3027 ASSERT(SFMMU_IS_SHMERID_VALID(rid) || !hmeblkp->hblk_shared);
3028 ASSERT(hmeblkp->hblk_tag.htag_rid == rid);
3030 return (hmeblkp);
3038 sfmmu_tteload_addentry(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, tte_t *ttep,
3111 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || hmeblkp->hblk_shared);
3112 ASSERT(SFMMU_IS_SHMERID_VALID(rid) || !hmeblkp->hblk_shared);
3114 HBLKTOHME_IDX(sfhme, hmeblkp, vaddr, hmenum);
3146 panic("sfmmu_tteload - tte remap, hmeblkp 0x%p",
3147 (void *)hmeblkp);
3194 if ((hmeblkp->hblk_lckcnt + 1) >= MAX_HBLK_LCKCNT) {
3196 (void *)hmeblkp);
3198 atomic_inc_32(&hmeblkp->hblk_lckcnt);
3200 HBLK_STACK_TRACE(hmeblkp, HBLK_LOCK);
3226 chk_tte(&orig_old, &tteold, ttep, hmeblkp);
3233 atomic_inc_16(&hmeblkp->hblk_vcnt);
3316 if (hmeblkp->hblk_shared) {
3318 sfmmup->sfmmu_srdp->srd_hmergnp[rid], hmeblkp, 1);
3322 sfmmu_tlb_demap(vaddr, sfmmup, hmeblkp, 0, 0);
3352 atomic_inc_16(&hmeblkp->hblk_hmecnt);
3353 ASSERT(hmeblkp->hblk_hmecnt > 0);
3356 * Cannot ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS)
3594 struct hme_blk *hmeblkp;
3620 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp);
3621 ASSERT(hmeblkp != (struct hme_blk *)hblk_reserve);
3622 if (hmeblkp == NULL) {
3623 hmeblkp = sfmmu_hblk_alloc(sfmmup, vaddr, hmebp, size,
3626 ASSERT(hmeblkp);
3627 if (!hmeblkp->hblk_shw_mask) {
3633 ASSERT(!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt);
3634 hmeblkp->hblk_shw_bit = 1;
3635 } else if (hmeblkp->hblk_shw_bit == 0) {
3636 panic("sfmmu_shadow_hcreate: shw bit not set in hmeblkp 0x%p",
3637 (void *)hmeblkp);
3639 ASSERT(hmeblkp->hblk_shw_bit == 1);
3640 ASSERT(!hmeblkp->hblk_shared);
3647 shw_mask = hmeblkp->hblk_shw_mask;
3649 newshw_mask = atomic_cas_32(&hmeblkp->hblk_shw_mask, shw_mask,
3655 return (hmeblkp);
3668 sfmmu_shadow_hcleanup(sfmmu_t *sfmmup, struct hme_blk *hmeblkp,
3674 ASSERT(hmeblkp->hblk_shw_bit);
3675 ASSERT(!hmeblkp->hblk_shared);
3679 if (!hmeblkp->hblk_shw_mask) {
3680 hmeblkp->hblk_shw_bit = 0;
3683 addr = (caddr_t)get_hblk_base(hmeblkp);
3684 endaddr = get_hblk_endaddr(hmeblkp);
3685 size = get_hblk_ttesz(hmeblkp);
3702 struct hme_blk *hmeblkp;
3717 hmeblkp = hmebp->hmeblkp;
3719 while (hmeblkp) {
3720 if (HTAGS_EQ(hmeblkp->hblk_tag, hblktag)) {
3722 ASSERT(!hmeblkp->hblk_shared);
3723 if (hmeblkp->hblk_shw_bit) {
3724 if (hmeblkp->hblk_shw_mask) {
3727 hmeblkp, hmebp);
3730 hmeblkp->hblk_shw_bit = 0;
3743 nx_hblk = hmeblkp->hblk_next;
3744 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) {
3745 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
3748 pr_hblk = hmeblkp;
3750 hmeblkp = nx_hblk;
3781 struct hme_blk *hmeblkp;
3796 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list);
3797 if (hmeblkp != NULL) {
3798 ASSERT(hmeblkp->hblk_shared);
3799 ASSERT(!hmeblkp->hblk_shw_bit);
3800 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) {
3803 ASSERT(!hmeblkp->hblk_lckcnt);
3804 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
3829 struct hme_blk *hmeblkp;
3845 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list);
3846 if (hmeblkp != NULL) {
3847 ASSERT(hmeblkp->hblk_shared);
3848 ASSERT(!hmeblkp->hblk_lckcnt);
3849 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) {
3850 *eaddrp = sfmmu_hblk_unload(NULL, hmeblkp, addr,
3854 ASSERT(!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt);
3855 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
3938 struct hme_blk *hmeblkp, *list = NULL;
3964 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list);
3965 if (hmeblkp != NULL) {
3966 ASSERT(!hmeblkp->hblk_shared);
3974 if (hmeblkp->hblk_shw_bit) {
3977 addr = sfmmu_hblk_unlock(hmeblkp, addr,
4015 struct hme_blk *hmeblkp;
4060 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk,
4062 if (hmeblkp == NULL) {
4067 ASSERT(hmeblkp->hblk_shared);
4068 va = sfmmu_hblk_unlock(hmeblkp, va, eaddr);
4088 sfmmu_hblk_unlock(struct hme_blk *hmeblkp, caddr_t addr, caddr_t endaddr)
4094 ASSERT(in_hblk_range(hmeblkp, addr));
4095 ASSERT(hmeblkp->hblk_shw_bit == 0);
4097 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
4098 ttesz = get_hblk_ttesz(hmeblkp);
4100 HBLKTOHME(sfhme, hmeblkp, addr);
4114 if (hmeblkp->hblk_lckcnt == 0)
4121 ASSERT(hmeblkp->hblk_lckcnt > 0);
4122 atomic_dec_32(&hmeblkp->hblk_lckcnt);
4123 HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK);
4291 struct hme_blk *hmeblkp;
4335 for (hashno = TTE64K, hmeblkp = NULL;
4336 hmeblkp == NULL && hashno <= mmu_hashcnt;
4347 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp);
4349 if (hmeblkp == NULL)
4353 if (hmeblkp == NULL) {
4359 ASSERT(!hmeblkp->hblk_shared);
4361 HBLKTOHME(osfhmep, hmeblkp, saddr);
4375 baseaddr = (caddr_t)get_hblk_base(hmeblkp);
4520 struct hme_blk *hmeblkp;
4543 for (hashno = TTE64K, hmeblkp = NULL;
4544 hmeblkp == NULL && hashno <= mmu_hashcnt;
4555 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp);
4557 if (hmeblkp == NULL)
4561 if (hmeblkp == NULL)
4564 ASSERT(!hmeblkp->hblk_shared);
4566 HBLKTOHME(osfhmep, hmeblkp, saddr);
4839 struct hme_blk *hmeblkp, *list = NULL;
4869 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list);
4870 if (hmeblkp != NULL) {
4871 ASSERT(!hmeblkp->hblk_shared);
4876 if (hmeblkp->hblk_shw_bit) {
4883 hmeblkp, addr, endaddr, &dmr, attr, mode);
4929 sfmmu_hblk_chgattr(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
4943 ASSERT(in_hblk_range(hmeblkp, addr));
4944 ASSERT(hmeblkp->hblk_shw_bit == 0);
4945 ASSERT(!hmeblkp->hblk_shared);
4947 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
4948 ttesz = get_hblk_ttesz(hmeblkp);
4967 HBLKTOHME(sfhmep, hmeblkp, addr);
5040 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0);
5168 struct hme_blk *hmeblkp, *list = NULL;
5198 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list);
5199 if (hmeblkp != NULL) {
5200 ASSERT(!hmeblkp->hblk_shared);
5205 if (hmeblkp->hblk_shw_bit) {
5211 addr = sfmmu_hblk_chgprot(sfmmup, hmeblkp,
5257 sfmmu_hblk_chgprot(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
5273 ASSERT(in_hblk_range(hmeblkp, addr));
5274 ASSERT(hmeblkp->hblk_shw_bit == 0);
5275 ASSERT(!hmeblkp->hblk_shared);
5278 if (get_hblk_ttesz(hmeblkp) != TTE8K &&
5279 (endaddr < get_hblk_endaddr(hmeblkp))) {
5284 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
5285 ttesz = get_hblk_ttesz(hmeblkp);
5293 HBLKTOHME(sfhmep, hmeblkp, addr);
5368 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0);
5452 struct hme_blk *hmeblkp;
5480 hmeblkp = hmebp->hmeblkp;
5482 while (hmeblkp) {
5483 nx_hblk = hmeblkp->hblk_next;
5489 if (hmeblkp->hblk_tag.htag_id != sfmmup ||
5490 hmeblkp->hblk_shw_bit ||
5491 (sa = (caddr_t)get_hblk_base(hmeblkp)) >= endaddr ||
5492 (ea = get_hblk_endaddr(hmeblkp)) <= startaddr) {
5493 pr_hblk = hmeblkp;
5497 ASSERT(!hmeblkp->hblk_shared);
5501 if (hmeblkp->hblk_vcnt != 0 ||
5502 hmeblkp->hblk_hmecnt != 0)
5503 (void) sfmmu_hblk_unload(sfmmup, hmeblkp,
5511 !hmeblkp->hblk_vcnt &&
5512 !hmeblkp->hblk_hmecnt) {
5513 ASSERT(!hmeblkp->hblk_lckcnt);
5514 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
5517 pr_hblk = hmeblkp;
5551 hmeblkp = nx_hblk;
5593 struct hme_blk *hmeblkp, *pr_hblk, *list = NULL;
5678 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list);
5679 if (hmeblkp == NULL) {
5722 ASSERT(hmeblkp);
5723 ASSERT(!hmeblkp->hblk_shared);
5724 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) {
5734 get_hblk_span(hmeblkp));
5737 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
5770 if (hmeblkp->hblk_shw_bit) {
5794 addr = sfmmu_hblk_unload(sfmmup, hmeblkp, addr, endaddr,
5801 !hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) {
5802 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, &list, 0);
5910 sfmmu_hblk_unload(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
5922 ASSERT(in_hblk_range(hmeblkp, addr));
5923 ASSERT(!hmeblkp->hblk_shw_bit);
5924 ASSERT(sfmmup != NULL || hmeblkp->hblk_shared);
5925 ASSERT(sfmmup == NULL || !hmeblkp->hblk_shared);
5926 ASSERT(dmrp == NULL || !hmeblkp->hblk_shared);
5929 if (get_hblk_ttesz(hmeblkp) != TTE8K &&
5930 (endaddr < get_hblk_endaddr(hmeblkp))) {
5935 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
5936 ttesz = get_hblk_ttesz(hmeblkp);
5947 HBLKTOHME(sfhmep, hmeblkp, addr);
6020 ASSERT(hmeblkp->hblk_lckcnt > 0);
6021 atomic_dec_32(&hmeblkp->hblk_lckcnt);
6022 HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK);
6058 ASSERT(!hmeblkp->hblk_shared);
6059 sfmmu_tlb_demap(addr, sfmmup, hmeblkp,
6067 ASSERT(hmeblkp->hblk_hmecnt > 0);
6071 * ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS);
6075 atomic_dec_16(&hmeblkp->hblk_hmecnt);
6078 ASSERT(hmeblkp->hblk_vcnt > 0);
6079 atomic_dec_16(&hmeblkp->hblk_vcnt);
6081 ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt ||
6082 !hmeblkp->hblk_lckcnt);
6120 } else if (hmeblkp->hblk_hmecnt != 0) {
6215 struct hme_blk *hmeblkp, *list = NULL;
6246 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list);
6247 if (hmeblkp != NULL) {
6248 ASSERT(!hmeblkp->hblk_shared);
6253 if (hmeblkp->hblk_shw_bit) {
6259 addr = sfmmu_hblk_sync(sfmmup, hmeblkp,
6294 sfmmu_hblk_sync(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
6304 ASSERT(hmeblkp->hblk_shw_bit == 0);
6305 ASSERT(!hmeblkp->hblk_shared);
6307 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
6309 ttesz = get_hblk_ttesz(hmeblkp);
6310 HBLKTOHME(sfhmep, hmeblkp, addr);
6346 hmeblkp, 0, 0);
6606 struct hme_blk *hmeblkp;
6653 hmeblkp = sfmmu_hmetohblk(sfhmep);
6655 sfmmup = hblktosfmmu(hmeblkp);
6657 ASSERT(!hmeblkp->hblk_shared);
6659 addr = tte_to_vaddr(hmeblkp, tte);
6666 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0);
7017 struct hme_blk *hmeblkp;
7059 hmeblkp = sfmmu_hmetohblk(sfhme);
7065 if (forceflag == SFMMU_KERNEL_RELOC && hmeblkp->hblk_lckcnt &&
7066 hmeblkp->hblk_tag.htag_id == ksfmmup)
7142 struct hme_blk *hmeblkp;
7159 hmeblkp = sfmmu_hmetohblk(sfhme);
7164 sfmmup = hblktosfmmu(hmeblkp);
7165 ttesz = get_hblk_ttesz(hmeblkp);
7188 chk_tte(&orig_old, &tte, &ttemod, hmeblkp);
7197 addr = tte_to_vaddr(hmeblkp, tte);
7199 if (hmeblkp->hblk_shared) {
7201 uint_t rid = hmeblkp->hblk_tag.htag_rid;
7207 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid);
7208 cpuset = sfmmu_rgntlb_demap(addr, rgnp, hmeblkp, 1);
7243 sfmmu_ismtlbcache_demap(addr, sfmmup, hmeblkp,
7250 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0);
7263 * We can not make ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS)
7270 * we are done with hmeblkp so that this hmeblk won't be
7273 ASSERT(hmeblkp->hblk_hmecnt > 0);
7274 ASSERT(hmeblkp->hblk_vcnt > 0);
7275 atomic_dec_16(&hmeblkp->hblk_vcnt);
7276 atomic_dec_16(&hmeblkp->hblk_hmecnt);
7280 * ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt ||
7281 * !hmeblkp->hblk_lckcnt);
7365 struct hme_blk *hmeblkp;
7415 hmeblkp = sfmmu_hmetohblk(sfhme);
7421 if (hmeblkp->hblk_shared) {
7422 sf_srd_t *srdp = hblktosrd(hmeblkp);
7423 uint_t rid = hmeblkp->hblk_tag.htag_rid;
7429 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp,
7485 struct hme_blk *hmeblkp;
7503 hmeblkp = sfmmu_hmetohblk(sfhme);
7504 sfmmup = hblktosfmmu(hmeblkp);
7505 addr = tte_to_vaddr(hmeblkp, tte);
7521 if (hmeblkp->hblk_shared) {
7524 hmeblkp->hblk_tag.htag_rid;
7530 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp,
7533 rgnp, hmeblkp, 1);
7535 sfmmu_tlb_demap(addr, sfmmup, hmeblkp,
7541 sfmmu_ttesync(hmeblkp->hblk_shared ? NULL : sfmmup, addr,
7559 struct hme_blk *hmeblkp;
7574 hmeblkp = sfmmu_hmetohblk(sfhme);
7575 sfmmup = hblktosfmmu(hmeblkp);
7576 addr = tte_to_vaddr(hmeblkp, tte);
7592 if (hmeblkp->hblk_shared) {
7594 uint_t rid = hmeblkp->hblk_tag.htag_rid;
7600 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp,
7603 rgnp, hmeblkp, 1);
7605 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0);
7870 struct hme_blk *hmeblkp = NULL;
7929 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp);
7930 if (hmeblkp != NULL) {
7931 ASSERT(!hmeblkp->hblk_shared);
7932 HBLKTOHME(sfhmep, hmeblkp, vaddr);
7960 for (hmeblkp = hmebp->hmeblkp; hmeblkp != NULL;
7961 hmeblkp = hmeblkp->hblk_next) {
7967 if (!HTAGS_EQ_SHME(hmeblkp->hblk_tag, hblktag,
7971 ASSERT(hmeblkp->hblk_shared);
7972 rid = hmeblkp->hblk_tag.htag_rid;
7976 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid);
7977 HBLKTOHME(sfhmep, hmeblkp, sv_vaddr);
7983 get_hblk_ttesz(hmeblkp) > TTE8K) {
7984 caddr_t eva = tte_to_evaddr(hmeblkp, ttep);
8000 } else if (get_hblk_ttesz(hmeblkp) > TTE8K ||
8086 struct hme_blk *hmeblkp;
8113 hmeblkp = sfmmu_hmetohblk(sfhme);
8118 if (hmeblkp->hblk_shared) {
8119 sf_srd_t *srdp = hblktosrd(hmeblkp);
8120 uint_t rid = hmeblkp->hblk_tag.htag_rid;
8126 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp,
8186 struct hme_blk *hmeblkp;
8224 hmeblkp = sfmmu_hmetohblk(sfhme);
8883 struct hme_blk *hmeblkp;
8886 hmeblkp = (struct hme_blk *)buf;
8887 hmeblkp->hblk_nextpa = va_to_pa((caddr_t)hmeblkp);
8890 mutex_init(&hmeblkp->hblk_audit_lock, NULL, MUTEX_DEFAULT, NULL);
8903 struct hme_blk *hmeblkp;
8905 hmeblkp = (struct hme_blk *)buf;
8906 mutex_destroy(&hmeblkp->hblk_audit_lock);
8925 struct hme_blk *hmeblkp, *nx_hblk, *pr_hblk = NULL;
8971 hmeblkp = hmebp->hmeblkp;
8973 while (hmeblkp) {
8974 nx_hblk = hmeblkp->hblk_next;
8975 if (!hmeblkp->hblk_vcnt &&
8976 !hmeblkp->hblk_hmecnt) {
8977 sfmmu_hblk_hash_rm(hmebp, hmeblkp,
8980 pr_hblk = hmeblkp;
8982 hmeblkp = nx_hblk;
8997 hmeblkp = hmebp->hmeblkp;
8999 while (hmeblkp) {
9000 nx_hblk = hmeblkp->hblk_next;
9001 if (!hmeblkp->hblk_vcnt &&
9002 !hmeblkp->hblk_hmecnt) {
9003 sfmmu_hblk_hash_rm(hmebp, hmeblkp,
9006 pr_hblk = hmeblkp;
9008 hmeblkp = nx_hblk;
9078 struct hme_blk *hmeblkp;
9150 hmeblkp = sfmmu_hmetohblk(sfhmep);
9151 tmphat = hblktosfmmu(hmeblkp);
9154 if (hmeblkp->hblk_shared || tmphat == hat ||
9155 hmeblkp->hblk_lckcnt) {
9176 hmeblkp = sfmmu_hmetohblk(sfhmep);
9177 ASSERT(!hmeblkp->hblk_shared);
9286 struct hme_blk *hmeblkp;
9323 hmeblkp = sfmmu_hmetohblk(sfhme);
9328 vaddr = tte_to_vaddr(hmeblkp, tte);
9455 struct hme_blk *hmeblkp;
9469 hmeblkp = sfmmu_hmetohblk(sfhme);
9473 vaddr = tte_to_vaddr(hmeblkp, tte);
9499 sfmmup = hblktosfmmu(hmeblkp);
9504 if (hmeblkp->hblk_shared) {
9506 uint_t rid = hmeblkp->hblk_tag.htag_rid;
9512 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp,
9515 hmeblkp, 0);
9523 sfmmu_ismtlbcache_demap(vaddr, sfmmup, hmeblkp,
9526 sfmmu_tlbcache_demap(vaddr, sfmmup, hmeblkp,
9539 if (hmeblkp->hblk_shared) {
9541 uint_t rid = hmeblkp->hblk_tag.htag_rid;
9547 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp,
9550 hmeblkp, 0);
9557 sfmmu_ismtlbcache_demap(vaddr, sfmmup, hmeblkp,
9560 sfmmu_tlb_demap(vaddr, sfmmup, hmeblkp, 0, 1);
10566 sfmmu_put_free_hblk(struct hme_blk *hmeblkp, uint_t critical)
10570 ASSERT(hmeblkp->hblk_hmecnt == 0);
10571 ASSERT(hmeblkp->hblk_vcnt == 0);
10572 ASSERT(hmeblkp->hblk_nextpa == va_to_pa((caddr_t)hmeblkp));
10590 hmeblkp->hblk_next = freehblkp;
10591 freehblkp = hmeblkp;
10672 hblkp = hmebp->hmeblkp; hblkp != NULL && hblkp != old;
10925 struct hme_blk *hmeblkp = NULL;
10963 hmeblkp =
10978 hmeblkp =
11053 if (!sfmmu_get_free_hblk(&hmeblkp, 1))
11066 if ((hmeblkp = kmem_cache_alloc(sfmmu_cache, sleep)) == NULL) {
11067 hmeblkp = sfmmu_hblk_steal(size);
11071 * swap hblk_reserve with hmeblkp and
11077 sfmmu_hblk_swap(hmeblkp);
11087 if (sfmmu_put_free_hblk(hmeblkp, 0))
11098 if (!sfmmu_get_free_hblk(&hmeblkp, 0)) {
11105 hmeblkp = HBLK_RESERVE;
11111 ASSERT(hmeblkp != NULL);
11112 set_hblk_sz(hmeblkp, size);
11113 ASSERT(hmeblkp->hblk_nextpa == va_to_pa((caddr_t)hmeblkp));
11118 if (hmeblkp != HBLK_RESERVE) {
11153 sfmmu_put_free_hblk(hmeblkp, forcefree)) {
11157 kmem_cache_free(get_hblk_cache(hmeblkp), hmeblkp);
11217 hmeblkp->hblk_shared = 1;
11219 hmeblkp->hblk_shared = 0;
11221 set_hblk_sz(hmeblkp, size);
11223 hmeblkp->hblk_next = (struct hme_blk *)NULL;
11224 hmeblkp->hblk_tag = hblktag;
11225 hmeblkp->hblk_shadow = shw_hblkp;
11226 hblkpa = hmeblkp->hblk_nextpa;
11227 hmeblkp->hblk_nextpa = HMEBLK_ENDPA;
11229 ASSERT(get_hblk_ttesz(hmeblkp) == size);
11230 ASSERT(get_hblk_span(hmeblkp) == HMEBLK_SPAN(size));
11231 ASSERT(hmeblkp->hblk_hmecnt == 0);
11232 ASSERT(hmeblkp->hblk_vcnt == 0);
11233 ASSERT(hmeblkp->hblk_lckcnt == 0);
11234 ASSERT(hblkpa == va_to_pa((caddr_t)hmeblkp));
11235 sfmmu_hblk_hash_add(hmebp, hmeblkp, hblkpa);
11236 return (hmeblkp);
11246 struct hme_blk *hmeblkp, *next_hmeblkp;
11253 hmeblkp = *listp;
11254 while (hmeblkp != NULL) {
11255 next_hmeblkp = hmeblkp->hblk_next;
11256 ASSERT(!hmeblkp->hblk_hmecnt);
11257 ASSERT(!hmeblkp->hblk_vcnt);
11258 ASSERT(!hmeblkp->hblk_lckcnt);
11259 ASSERT(hmeblkp != (struct hme_blk *)hblk_reserve);
11260 ASSERT(hmeblkp->hblk_shared == 0);
11261 ASSERT(hmeblkp->hblk_shw_bit == 0);
11262 ASSERT(hmeblkp->hblk_shadow == NULL);
11264 hblkpa = va_to_pa((caddr_t)hmeblkp);
11266 critical = (hblktosfmmu(hmeblkp) == KHATID) ? 1 : 0;
11268 size = get_hblk_ttesz(hmeblkp);
11269 hmeblkp->hblk_next = NULL;
11270 hmeblkp->hblk_nextpa = hblkpa;
11272 if (hmeblkp->hblk_nuc_bit == 0) {
11275 !sfmmu_put_free_hblk(hmeblkp, critical))
11276 kmem_cache_free(get_hblk_cache(hmeblkp),
11277 hmeblkp);
11279 hmeblkp = next_hmeblkp;
11301 struct hme_blk *hmeblkp = NULL, *pr_hblk;
11308 if ((hmeblkp = sfmmu_check_pending_hblks(size)) != NULL) {
11309 hmeblkp->hblk_nextpa = va_to_pa((caddr_t)hmeblkp);
11310 ASSERT(hmeblkp->hblk_hmecnt == 0);
11311 ASSERT(hmeblkp->hblk_vcnt == 0);
11312 return (hmeblkp);
11318 if (sfmmu_get_free_hblk(&hmeblkp, critical))
11319 return (hmeblkp);
11326 for (i = 0; hmeblkp == NULL && i <= UHMEHASH_SZ +
11329 hmeblkp = hmebp->hmeblkp;
11332 while (hmeblkp) {
11338 if ((get_hblk_ttesz(hmeblkp) == size) &&
11339 (hmeblkp->hblk_shw_bit == 0 ||
11340 hmeblkp->hblk_vcnt == 0) &&
11341 (hmeblkp->hblk_lckcnt == 0)) {
11348 if ((hmeblkp->hblk_vcnt == 0 &&
11349 hmeblkp->hblk_hmecnt == 0) || (i >=
11352 hmeblkp, hblkpa, pr_hblk)) {
11361 pr_hblk = hmeblkp;
11362 hblkpa = hmeblkp->hblk_nextpa;
11363 hmeblkp = hmeblkp->hblk_next;
11372 if (hmeblkp != NULL)
11381 hmeblkp = hmebp->hmeblkp;
11384 while (hmeblkp) {
11388 if ((get_hblk_ttesz(hmeblkp) == size) &&
11389 (hmeblkp->hblk_lckcnt == 0) &&
11390 (hmeblkp->hblk_vcnt == 0) &&
11391 (hmeblkp->hblk_hmecnt == 0)) {
11393 hmeblkp, hblkpa, pr_hblk)) {
11404 pr_hblk = hmeblkp;
11405 hblkpa = hmeblkp->hblk_nextpa;
11406 hmeblkp = hmeblkp->hblk_next;
11414 if (hmeblkp != NULL)
11418 return (hmeblkp);
11429 sfmmu_steal_this_hblk(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
11443 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) {
11447 sfmmup = hblktosfmmu(hmeblkp);
11448 if (hmeblkp->hblk_shared || sfmmup->sfmmu_ismhat) {
11452 (void) sfmmu_hblk_unload(sfmmup, hmeblkp,
11453 (caddr_t)get_hblk_base(hmeblkp),
11454 get_hblk_endaddr(hmeblkp), &dmr, HAT_UNLOAD);
11456 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) {
11466 ASSERT(hmeblkp->hblk_lckcnt == 0);
11467 ASSERT(hmeblkp->hblk_vcnt == 0 && hmeblkp->hblk_hmecnt == 0);
11469 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, &list, 1);
11470 hmeblkp->hblk_nextpa = hblkpa;
11472 shw_hblkp = hmeblkp->hblk_shadow;
11474 ASSERT(!hmeblkp->hblk_shared);
11476 vaddr = (caddr_t)get_hblk_base(hmeblkp);
11489 hmeblkp->hblk_shadow = NULL;
11497 hmeblkp->hblk_shw_bit = 0;
11499 if (hmeblkp->hblk_shared) {
11504 srdp = hblktosrd(hmeblkp);
11506 rid = hmeblkp->hblk_tag.htag_rid;
11511 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid);
11512 hmeblkp->hblk_shared = 0;
11524 struct hme_blk *hmeblkp;
11534 hmeblkp = (struct hme_blk *)((uintptr_t)sfhme0 -
11537 return (hmeblkp);
11937 struct hme_blk *hmeblkp, int uselocks)
11947 ASSERT(hmeblkp->hblk_shared);
11980 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0);
12055 struct hme_blk *hmeblkp, pfn_t pfnum, int cache_flush_flag)
12068 ASSERT(!hmeblkp->hblk_shared);
12115 SFMMU_UNLOAD_TSB(va, sfmmup, hmeblkp, 1);
12151 sfmmu_tlbcache_demap(caddr_t addr, sfmmu_t *sfmmup, struct hme_blk *hmeblkp,
12161 ASSERT(!hmeblkp->hblk_shared);
12194 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0);
12240 sfmmu_tlb_demap(caddr_t addr, sfmmu_t *sfmmup, struct hme_blk *hmeblkp,
12246 ASSERT(!hmeblkp->hblk_shared);
12259 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0);
13162 struct hme_blk *hmeblkp;
13185 hmeblkp = (struct hme_blk *)addr;
13187 hmeblkp->hblk_nuc_bit = 1;
13188 hmeblkp->hblk_nextpa = cached_va_to_pa((caddr_t)hmeblkp);
13197 hmeblkp = (struct hme_blk *)addr;
13199 hmeblkp->hblk_nuc_bit = 1;
13200 hmeblkp->hblk_nextpa = cached_va_to_pa((caddr_t)hmeblkp);
13230 struct hme_blk *hmeblkp;
13249 hmeblkp = sfmmu_hmetohblk(sfhmep);
13393 * *hmeblkp is currently unused.
13398 chk_tte(tte_t *orig_old, tte_t *cur, tte_t *new, struct hme_blk *hmeblkp)
13408 hmeblkp = hmeblkp;
15446 sfmmu_hblk_hash_add(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
15451 if (hmebp->hmeblkp == NULL) {
15456 hmeblkp->hblk_nextpa = hmebp->hmeh_nextpa;
15465 hmeblkp->hblk_next = hmebp->hmeblkp;
15466 hmebp->hmeblkp = hmeblkp;
15491 * hmeblkp - address of hmeblk to be removed
15492 * pr_hblk - virtual address of previous hmeblkp
15502 sfmmu_hblk_hash_rm(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
15514 if (hmebp->hmeblkp == hmeblkp) {
15515 hmebp->hmeh_nextpa = hmeblkp->hblk_nextpa;
15516 hmebp->hmeblkp = hmeblkp->hblk_next;
15518 pr_hblk->hblk_nextpa = hmeblkp->hblk_nextpa;
15519 pr_hblk->hblk_next = hmeblkp->hblk_next;
15522 size = get_hblk_ttesz(hmeblkp);
15523 shw_hblkp = hmeblkp->hblk_shadow;
15525 ASSERT(hblktosfmmu(hmeblkp) != KHATID);
15526 ASSERT(!hmeblkp->hblk_shared);
15536 vaddr = (caddr_t)get_hblk_base(hmeblkp);
15549 hmeblkp->hblk_shadow = NULL;
15551 hmeblkp->hblk_shw_bit = 0;
15553 if (hmeblkp->hblk_shared) {
15559 srdp = hblktosrd(hmeblkp);
15561 rid = hmeblkp->hblk_tag.htag_rid;
15566 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid);
15568 hmeblkp->hblk_shared = 0;
15577 hmeblkp->hblk_nextpa = HMEBLK_ENDPA;
15578 hmeblkp->hblk_next = NULL;
15580 /* Append hmeblkp to listp for processing later. */
15581 hmeblkp->hblk_next = *listp;
15582 *listp = hmeblkp;
15594 struct hme_blk *hmeblkp = NULL, *last_hmeblkp;
15610 for (hmeblkp = cpuhp->chp_listp; hmeblkp != NULL;
15611 hmeblkp = hmeblkp->hblk_next) {
15612 if (get_hblk_ttesz(hmeblkp) == size) {
15615 hmeblkp->hblk_next;
15618 hmeblkp->hblk_next;
15625 last_hmeblkp = hmeblkp;
15636 return (hmeblkp);