1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */ 28 /* All Rights Reserved */ 29 30 /* 31 * Portions of this source code were derived from Berkeley 4.3 BSD 32 * under license from the Regents of the University of California. 33 */ 34 35 #pragma ident "%Z%%M% %I% %E% SMI" 36 37 /* 38 * VM - generic vnode mapping segment. 39 * 40 * The segmap driver is used only by the kernel to get faster (than seg_vn) 41 * mappings [lower routine overhead; more persistent cache] to random 42 * vnode/offsets. Note than the kernel may (and does) use seg_vn as well. 43 */ 44 45 #include <sys/types.h> 46 #include <sys/t_lock.h> 47 #include <sys/param.h> 48 #include <sys/sysmacros.h> 49 #include <sys/buf.h> 50 #include <sys/systm.h> 51 #include <sys/vnode.h> 52 #include <sys/mman.h> 53 #include <sys/errno.h> 54 #include <sys/cred.h> 55 #include <sys/kmem.h> 56 #include <sys/vtrace.h> 57 #include <sys/cmn_err.h> 58 #include <sys/debug.h> 59 #include <sys/thread.h> 60 #include <sys/dumphdr.h> 61 #include <sys/bitmap.h> 62 #include <sys/lgrp.h> 63 64 #include <vm/seg_kmem.h> 65 #include <vm/hat.h> 66 #include <vm/as.h> 67 #include <vm/seg.h> 68 #include <vm/seg_kpm.h> 69 #include <vm/seg_map.h> 70 #include <vm/page.h> 71 #include <vm/pvn.h> 72 #include <vm/rm.h> 73 74 /* 75 * Private seg op routines. 76 */ 77 static void segmap_free(struct seg *seg); 78 faultcode_t segmap_fault(struct hat *hat, struct seg *seg, caddr_t addr, 79 size_t len, enum fault_type type, enum seg_rw rw); 80 static faultcode_t segmap_faulta(struct seg *seg, caddr_t addr); 81 static int segmap_checkprot(struct seg *seg, caddr_t addr, size_t len, 82 uint_t prot); 83 static int segmap_kluster(struct seg *seg, caddr_t addr, ssize_t); 84 static int segmap_getprot(struct seg *seg, caddr_t addr, size_t len, 85 uint_t *protv); 86 static u_offset_t segmap_getoffset(struct seg *seg, caddr_t addr); 87 static int segmap_gettype(struct seg *seg, caddr_t addr); 88 static int segmap_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp); 89 static void segmap_dump(struct seg *seg); 90 static int segmap_pagelock(struct seg *seg, caddr_t addr, size_t len, 91 struct page ***ppp, enum lock_type type, 92 enum seg_rw rw); 93 static void segmap_badop(void); 94 static int segmap_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp); 95 static lgrp_mem_policy_info_t *segmap_getpolicy(struct seg *seg, 96 caddr_t addr); 97 98 /* segkpm support */ 99 static caddr_t segmap_pagecreate_kpm(struct seg *, vnode_t *, u_offset_t, 100 struct smap *, enum seg_rw); 101 struct smap *get_smap_kpm(caddr_t, page_t **); 102 103 #define SEGMAP_BADOP(t) (t(*)())segmap_badop 104 105 static struct seg_ops segmap_ops = { 106 SEGMAP_BADOP(int), /* dup */ 107 SEGMAP_BADOP(int), /* unmap */ 108 segmap_free, 109 segmap_fault, 110 segmap_faulta, 111 SEGMAP_BADOP(int), /* setprot */ 112 segmap_checkprot, 113 segmap_kluster, 114 SEGMAP_BADOP(size_t), /* swapout */ 115 SEGMAP_BADOP(int), /* sync */ 116 SEGMAP_BADOP(size_t), /* incore */ 117 SEGMAP_BADOP(int), /* lockop */ 118 segmap_getprot, 119 segmap_getoffset, 120 segmap_gettype, 121 segmap_getvp, 122 SEGMAP_BADOP(int), /* advise */ 123 segmap_dump, 124 segmap_pagelock, /* pagelock */ 125 SEGMAP_BADOP(int), /* setpgsz */ 126 segmap_getmemid, /* getmemid */ 127 segmap_getpolicy, /* getpolicy */ 128 }; 129 130 /* 131 * Private segmap routines. 132 */ 133 static void segmap_unlock(struct hat *hat, struct seg *seg, caddr_t addr, 134 size_t len, enum seg_rw rw, struct smap *smp); 135 static void segmap_smapadd(struct smap *smp); 136 static struct smap *segmap_hashin(struct smap *smp, struct vnode *vp, 137 u_offset_t off, int hashid); 138 static void segmap_hashout(struct smap *smp); 139 140 141 /* 142 * Statistics for segmap operations. 143 * 144 * No explicit locking to protect these stats. 145 */ 146 struct segmapcnt segmapcnt = { 147 { "fault", KSTAT_DATA_ULONG }, 148 { "faulta", KSTAT_DATA_ULONG }, 149 { "getmap", KSTAT_DATA_ULONG }, 150 { "get_use", KSTAT_DATA_ULONG }, 151 { "get_reclaim", KSTAT_DATA_ULONG }, 152 { "get_reuse", KSTAT_DATA_ULONG }, 153 { "get_unused", KSTAT_DATA_ULONG }, 154 { "get_nofree", KSTAT_DATA_ULONG }, 155 { "rel_async", KSTAT_DATA_ULONG }, 156 { "rel_write", KSTAT_DATA_ULONG }, 157 { "rel_free", KSTAT_DATA_ULONG }, 158 { "rel_abort", KSTAT_DATA_ULONG }, 159 { "rel_dontneed", KSTAT_DATA_ULONG }, 160 { "release", KSTAT_DATA_ULONG }, 161 { "pagecreate", KSTAT_DATA_ULONG }, 162 { "free_notfree", KSTAT_DATA_ULONG }, 163 { "free_dirty", KSTAT_DATA_ULONG }, 164 { "free", KSTAT_DATA_ULONG }, 165 { "stolen", KSTAT_DATA_ULONG }, 166 { "get_nomtx", KSTAT_DATA_ULONG } 167 }; 168 169 kstat_named_t *segmapcnt_ptr = (kstat_named_t *)&segmapcnt; 170 uint_t segmapcnt_ndata = sizeof (segmapcnt) / sizeof (kstat_named_t); 171 172 /* 173 * Return number of map pages in segment. 174 */ 175 #define MAP_PAGES(seg) ((seg)->s_size >> MAXBSHIFT) 176 177 /* 178 * Translate addr into smap number within segment. 179 */ 180 #define MAP_PAGE(seg, addr) (((addr) - (seg)->s_base) >> MAXBSHIFT) 181 182 /* 183 * Translate addr in seg into struct smap pointer. 184 */ 185 #define GET_SMAP(seg, addr) \ 186 &(((struct segmap_data *)((seg)->s_data))->smd_sm[MAP_PAGE(seg, addr)]) 187 188 /* 189 * Bit in map (16 bit bitmap). 190 */ 191 #define SMAP_BIT_MASK(bitindex) (1 << ((bitindex) & 0xf)) 192 193 static int smd_colormsk = 0; 194 static int smd_ncolor = 0; 195 static int smd_nfree = 0; 196 static int smd_freemsk = 0; 197 #ifdef DEBUG 198 static int *colors_used; 199 #endif 200 static struct smap *smd_smap; 201 static struct smaphash *smd_hash; 202 #ifdef SEGMAP_HASHSTATS 203 static unsigned int *smd_hash_len; 204 #endif 205 static struct smfree *smd_free; 206 static ulong_t smd_hashmsk = 0; 207 208 #define SEGMAP_MAXCOLOR 2 209 #define SEGMAP_CACHE_PAD 64 210 211 union segmap_cpu { 212 struct { 213 uint32_t scpu_free_ndx[SEGMAP_MAXCOLOR]; 214 struct smap *scpu_last_smap; 215 ulong_t scpu_getmap; 216 ulong_t scpu_release; 217 ulong_t scpu_get_reclaim; 218 ulong_t scpu_fault; 219 ulong_t scpu_pagecreate; 220 ulong_t scpu_get_reuse; 221 } scpu; 222 char scpu_pad[SEGMAP_CACHE_PAD]; 223 }; 224 static union segmap_cpu *smd_cpu; 225 226 /* 227 * There are three locks in seg_map: 228 * - per freelist mutexes 229 * - per hashchain mutexes 230 * - per smap mutexes 231 * 232 * The lock ordering is to get the smap mutex to lock down the slot 233 * first then the hash lock (for hash in/out (vp, off) list) or the 234 * freelist lock to put the slot back on the free list. 235 * 236 * The hash search is done by only holding the hashchain lock, when a wanted 237 * slot is found, we drop the hashchain lock then lock the slot so there 238 * is no overlapping of hashchain and smap locks. After the slot is 239 * locked, we verify again if the slot is still what we are looking 240 * for. 241 * 242 * Allocation of a free slot is done by holding the freelist lock, 243 * then locking the smap slot at the head of the freelist. This is 244 * in reversed lock order so mutex_tryenter() is used. 245 * 246 * The smap lock protects all fields in smap structure except for 247 * the link fields for hash/free lists which are protected by 248 * hashchain and freelist locks. 249 */ 250 251 #define SHASHMTX(hashid) (&smd_hash[hashid].sh_mtx) 252 253 #define SMP2SMF(smp) (&smd_free[(smp - smd_smap) & smd_freemsk]) 254 #define SMP2SMF_NDX(smp) (ushort_t)((smp - smd_smap) & smd_freemsk) 255 256 #define SMAPMTX(smp) (&smp->sm_mtx) 257 258 #define SMAP_HASHFUNC(vp, off, hashid) \ 259 { \ 260 hashid = ((((uintptr_t)(vp) >> 6) + ((uintptr_t)(vp) >> 3) + \ 261 ((off) >> MAXBSHIFT)) & smd_hashmsk); \ 262 } 263 264 /* 265 * The most frequently updated kstat counters are kept in the 266 * per cpu array to avoid hot cache blocks. The update function 267 * sums the cpu local counters to update the global counters. 268 */ 269 270 /* ARGSUSED */ 271 int 272 segmap_kstat_update(kstat_t *ksp, int rw) 273 { 274 int i; 275 ulong_t getmap, release, get_reclaim; 276 ulong_t fault, pagecreate, get_reuse; 277 278 if (rw == KSTAT_WRITE) 279 return (EACCES); 280 getmap = release = get_reclaim = (ulong_t)0; 281 fault = pagecreate = get_reuse = (ulong_t)0; 282 for (i = 0; i < max_ncpus; i++) { 283 getmap += smd_cpu[i].scpu.scpu_getmap; 284 release += smd_cpu[i].scpu.scpu_release; 285 get_reclaim += smd_cpu[i].scpu.scpu_get_reclaim; 286 fault += smd_cpu[i].scpu.scpu_fault; 287 pagecreate += smd_cpu[i].scpu.scpu_pagecreate; 288 get_reuse += smd_cpu[i].scpu.scpu_get_reuse; 289 } 290 segmapcnt.smp_getmap.value.ul = getmap; 291 segmapcnt.smp_release.value.ul = release; 292 segmapcnt.smp_get_reclaim.value.ul = get_reclaim; 293 segmapcnt.smp_fault.value.ul = fault; 294 segmapcnt.smp_pagecreate.value.ul = pagecreate; 295 segmapcnt.smp_get_reuse.value.ul = get_reuse; 296 return (0); 297 } 298 299 int 300 segmap_create(struct seg *seg, void *argsp) 301 { 302 struct segmap_data *smd; 303 struct smap *smp; 304 struct smfree *sm; 305 struct segmap_crargs *a = (struct segmap_crargs *)argsp; 306 struct smaphash *shashp; 307 union segmap_cpu *scpu; 308 long i, npages; 309 size_t hashsz; 310 uint_t nfreelist; 311 extern void prefetch_smap_w(void *); 312 extern int max_ncpus; 313 314 ASSERT(seg->s_as && RW_WRITE_HELD(&seg->s_as->a_lock)); 315 316 if (((uintptr_t)seg->s_base | seg->s_size) & MAXBOFFSET) { 317 panic("segkmap not MAXBSIZE aligned"); 318 /*NOTREACHED*/ 319 } 320 321 smd = kmem_zalloc(sizeof (struct segmap_data), KM_SLEEP); 322 323 seg->s_data = (void *)smd; 324 seg->s_ops = &segmap_ops; 325 smd->smd_prot = a->prot; 326 327 /* 328 * Scale the number of smap freelists to be 329 * proportional to max_ncpus * number of virtual colors. 330 * The caller can over-ride this scaling by providing 331 * a non-zero a->nfreelist argument. 332 */ 333 nfreelist = a->nfreelist; 334 if (nfreelist == 0) 335 nfreelist = max_ncpus; 336 else if (nfreelist < 0 || nfreelist > 4 * max_ncpus) { 337 cmn_err(CE_WARN, "segmap_create: nfreelist out of range " 338 "%d, using %d", nfreelist, max_ncpus); 339 nfreelist = max_ncpus; 340 } 341 if (nfreelist & (nfreelist - 1)) { 342 /* round up nfreelist to the next power of two. */ 343 nfreelist = 1 << (highbit(nfreelist)); 344 } 345 346 /* 347 * Get the number of virtual colors - must be a power of 2. 348 */ 349 if (a->shmsize) 350 smd_ncolor = a->shmsize >> MAXBSHIFT; 351 else 352 smd_ncolor = 1; 353 ASSERT((smd_ncolor & (smd_ncolor - 1)) == 0); 354 ASSERT(smd_ncolor <= SEGMAP_MAXCOLOR); 355 smd_colormsk = smd_ncolor - 1; 356 smd->smd_nfree = smd_nfree = smd_ncolor * nfreelist; 357 smd_freemsk = smd_nfree - 1; 358 359 /* 360 * Allocate and initialize the freelist headers. 361 * Note that sm_freeq[1] starts out as the release queue. This 362 * is known when the smap structures are initialized below. 363 */ 364 smd_free = smd->smd_free = 365 kmem_zalloc(smd_nfree * sizeof (struct smfree), KM_SLEEP); 366 for (i = 0; i < smd_nfree; i++) { 367 sm = &smd->smd_free[i]; 368 mutex_init(&sm->sm_freeq[0].smq_mtx, NULL, MUTEX_DEFAULT, NULL); 369 mutex_init(&sm->sm_freeq[1].smq_mtx, NULL, MUTEX_DEFAULT, NULL); 370 sm->sm_allocq = &sm->sm_freeq[0]; 371 sm->sm_releq = &sm->sm_freeq[1]; 372 } 373 374 /* 375 * Allocate and initialize the smap hash chain headers. 376 * Compute hash size rounding down to the next power of two. 377 */ 378 npages = MAP_PAGES(seg); 379 smd->smd_npages = npages; 380 hashsz = npages / SMAP_HASHAVELEN; 381 hashsz = 1 << (highbit(hashsz)-1); 382 smd_hashmsk = hashsz - 1; 383 smd_hash = smd->smd_hash = 384 kmem_alloc(hashsz * sizeof (struct smaphash), KM_SLEEP); 385 #ifdef SEGMAP_HASHSTATS 386 smd_hash_len = 387 kmem_zalloc(hashsz * sizeof (unsigned int), KM_SLEEP); 388 #endif 389 for (i = 0, shashp = smd_hash; i < hashsz; i++, shashp++) { 390 shashp->sh_hash_list = NULL; 391 mutex_init(&shashp->sh_mtx, NULL, MUTEX_DEFAULT, NULL); 392 } 393 394 /* 395 * Allocate and initialize the smap structures. 396 * Link all slots onto the appropriate freelist. 397 * The smap array is large enough to affect boot time 398 * on large systems, so use memory prefetching and only 399 * go through the array 1 time. Inline a optimized version 400 * of segmap_smapadd to add structures to freelists with 401 * knowledge that no locks are needed here. 402 */ 403 smd_smap = smd->smd_sm = 404 kmem_alloc(sizeof (struct smap) * npages, KM_SLEEP); 405 406 for (smp = &smd->smd_sm[MAP_PAGES(seg) - 1]; 407 smp >= smd->smd_sm; smp--) { 408 struct smap *smpfreelist; 409 struct sm_freeq *releq; 410 411 prefetch_smap_w((char *)smp); 412 413 smp->sm_vp = NULL; 414 smp->sm_hash = NULL; 415 smp->sm_off = 0; 416 smp->sm_bitmap = 0; 417 smp->sm_refcnt = 0; 418 mutex_init(&smp->sm_mtx, NULL, MUTEX_DEFAULT, NULL); 419 smp->sm_free_ndx = SMP2SMF_NDX(smp); 420 421 sm = SMP2SMF(smp); 422 releq = sm->sm_releq; 423 424 smpfreelist = releq->smq_free; 425 if (smpfreelist == 0) { 426 releq->smq_free = smp->sm_next = smp->sm_prev = smp; 427 } else { 428 smp->sm_next = smpfreelist; 429 smp->sm_prev = smpfreelist->sm_prev; 430 smpfreelist->sm_prev = smp; 431 smp->sm_prev->sm_next = smp; 432 releq->smq_free = smp->sm_next; 433 } 434 435 /* 436 * sm_flag = 0 (no SM_QNDX_ZERO) implies smap on sm_freeq[1] 437 */ 438 smp->sm_flags = 0; 439 440 #ifdef SEGKPM_SUPPORT 441 /* 442 * Due to the fragile prefetch loop no 443 * separate function is used here. 444 */ 445 smp->sm_kpme_next = NULL; 446 smp->sm_kpme_prev = NULL; 447 smp->sm_kpme_page = NULL; 448 #endif 449 } 450 451 /* 452 * Allocate the per color indices that distribute allocation 453 * requests over the free lists. Each cpu will have a private 454 * rotor index to spread the allocations even across the available 455 * smap freelists. Init the scpu_last_smap field to the first 456 * smap element so there is no need to check for NULL. 457 */ 458 smd_cpu = 459 kmem_zalloc(sizeof (union segmap_cpu) * max_ncpus, KM_SLEEP); 460 for (i = 0, scpu = smd_cpu; i < max_ncpus; i++, scpu++) { 461 int j; 462 for (j = 0; j < smd_ncolor; j++) 463 scpu->scpu.scpu_free_ndx[j] = j; 464 scpu->scpu.scpu_last_smap = smd_smap; 465 } 466 467 #ifdef DEBUG 468 /* 469 * Keep track of which colors are used more often. 470 */ 471 colors_used = kmem_zalloc(smd_nfree * sizeof (int), KM_SLEEP); 472 #endif /* DEBUG */ 473 474 return (0); 475 } 476 477 static void 478 segmap_free(seg) 479 struct seg *seg; 480 { 481 ASSERT(seg->s_as && RW_WRITE_HELD(&seg->s_as->a_lock)); 482 } 483 484 /* 485 * Do a F_SOFTUNLOCK call over the range requested. 486 * The range must have already been F_SOFTLOCK'ed. 487 */ 488 static void 489 segmap_unlock( 490 struct hat *hat, 491 struct seg *seg, 492 caddr_t addr, 493 size_t len, 494 enum seg_rw rw, 495 struct smap *smp) 496 { 497 page_t *pp; 498 caddr_t adr; 499 u_offset_t off; 500 struct vnode *vp; 501 kmutex_t *smtx; 502 503 ASSERT(smp->sm_refcnt > 0); 504 505 #ifdef lint 506 seg = seg; 507 #endif 508 509 if (segmap_kpm && IS_KPM_ADDR(addr)) { 510 511 /* 512 * We're called only from segmap_fault and this was a 513 * NOP in case of a kpm based smap, so dangerous things 514 * must have happened in the meantime. Pages are prefaulted 515 * and locked in segmap_getmapflt and they will not be 516 * unlocked until segmap_release. 517 */ 518 panic("segmap_unlock: called with kpm addr %p", (void *)addr); 519 /*NOTREACHED*/ 520 } 521 522 vp = smp->sm_vp; 523 off = smp->sm_off + (u_offset_t)((uintptr_t)addr & MAXBOFFSET); 524 525 hat_unlock(hat, addr, P2ROUNDUP(len, PAGESIZE)); 526 for (adr = addr; adr < addr + len; adr += PAGESIZE, off += PAGESIZE) { 527 ushort_t bitmask; 528 529 /* 530 * Use page_find() instead of page_lookup() to 531 * find the page since we know that it has 532 * "shared" lock. 533 */ 534 pp = page_find(vp, off); 535 if (pp == NULL) { 536 panic("segmap_unlock: page not found"); 537 /*NOTREACHED*/ 538 } 539 540 if (rw == S_WRITE) { 541 hat_setrefmod(pp); 542 } else if (rw != S_OTHER) { 543 TRACE_3(TR_FAC_VM, TR_SEGMAP_FAULT, 544 "segmap_fault:pp %p vp %p offset %llx", 545 pp, vp, off); 546 hat_setref(pp); 547 } 548 549 /* 550 * Clear bitmap, if the bit corresponding to "off" is set, 551 * since the page and translation are being unlocked. 552 */ 553 bitmask = SMAP_BIT_MASK((off - smp->sm_off) >> PAGESHIFT); 554 555 /* 556 * Large Files: Following assertion is to verify 557 * the correctness of the cast to (int) above. 558 */ 559 ASSERT((u_offset_t)(off - smp->sm_off) <= INT_MAX); 560 smtx = SMAPMTX(smp); 561 mutex_enter(smtx); 562 if (smp->sm_bitmap & bitmask) { 563 smp->sm_bitmap &= ~bitmask; 564 } 565 mutex_exit(smtx); 566 567 page_unlock(pp); 568 } 569 } 570 571 #define MAXPPB (MAXBSIZE/4096) /* assumes minimum page size of 4k */ 572 573 /* 574 * This routine is called via a machine specific fault handling 575 * routine. It is also called by software routines wishing to 576 * lock or unlock a range of addresses. 577 * 578 * Note that this routine expects a page-aligned "addr". 579 */ 580 faultcode_t 581 segmap_fault( 582 struct hat *hat, 583 struct seg *seg, 584 caddr_t addr, 585 size_t len, 586 enum fault_type type, 587 enum seg_rw rw) 588 { 589 struct segmap_data *smd = (struct segmap_data *)seg->s_data; 590 struct smap *smp; 591 page_t *pp, **ppp; 592 struct vnode *vp; 593 u_offset_t off; 594 page_t *pl[MAXPPB + 1]; 595 uint_t prot; 596 u_offset_t addroff; 597 caddr_t adr; 598 int err; 599 u_offset_t sm_off; 600 int hat_flag; 601 602 if (segmap_kpm && IS_KPM_ADDR(addr)) { 603 int newpage; 604 kmutex_t *smtx; 605 606 /* 607 * Pages are successfully prefaulted and locked in 608 * segmap_getmapflt and can't be unlocked until 609 * segmap_release. No hat mappings have to be locked 610 * and they also can't be unlocked as long as the 611 * caller owns an active kpm addr. 612 */ 613 #ifndef DEBUG 614 if (type != F_SOFTUNLOCK) 615 return (0); 616 #endif 617 618 if ((smp = get_smap_kpm(addr, NULL)) == NULL) { 619 panic("segmap_fault: smap not found " 620 "for addr %p", (void *)addr); 621 /*NOTREACHED*/ 622 } 623 624 smtx = SMAPMTX(smp); 625 #ifdef DEBUG 626 newpage = smp->sm_flags & SM_KPM_NEWPAGE; 627 if (newpage) { 628 cmn_err(CE_WARN, "segmap_fault: newpage? smp %p", 629 (void *)smp); 630 } 631 632 if (type != F_SOFTUNLOCK) { 633 mutex_exit(smtx); 634 return (0); 635 } 636 #endif 637 mutex_exit(smtx); 638 vp = smp->sm_vp; 639 sm_off = smp->sm_off; 640 641 if (vp == NULL) 642 return (FC_MAKE_ERR(EIO)); 643 644 ASSERT(smp->sm_refcnt > 0); 645 646 addroff = (u_offset_t)((uintptr_t)addr & MAXBOFFSET); 647 if (addroff + len > MAXBSIZE) 648 panic("segmap_fault: endaddr %p exceeds MAXBSIZE chunk", 649 (void *)(addr + len)); 650 651 off = sm_off + addroff; 652 653 pp = page_find(vp, off); 654 655 if (pp == NULL) 656 panic("segmap_fault: softunlock page not found"); 657 658 /* 659 * Set ref bit also here in case of S_OTHER to avoid the 660 * overhead of supporting other cases than F_SOFTUNLOCK 661 * with segkpm. We can do this because the underlying 662 * pages are locked anyway. 663 */ 664 if (rw == S_WRITE) { 665 hat_setrefmod(pp); 666 } else { 667 TRACE_3(TR_FAC_VM, TR_SEGMAP_FAULT, 668 "segmap_fault:pp %p vp %p offset %llx", 669 pp, vp, off); 670 hat_setref(pp); 671 } 672 673 return (0); 674 } 675 676 smd_cpu[CPU->cpu_seqid].scpu.scpu_fault++; 677 smp = GET_SMAP(seg, addr); 678 vp = smp->sm_vp; 679 sm_off = smp->sm_off; 680 681 if (vp == NULL) 682 return (FC_MAKE_ERR(EIO)); 683 684 ASSERT(smp->sm_refcnt > 0); 685 686 addroff = (u_offset_t)((uintptr_t)addr & MAXBOFFSET); 687 if (addroff + len > MAXBSIZE) { 688 panic("segmap_fault: endaddr %p " 689 "exceeds MAXBSIZE chunk", (void *)(addr + len)); 690 /*NOTREACHED*/ 691 } 692 off = sm_off + addroff; 693 694 /* 695 * First handle the easy stuff 696 */ 697 if (type == F_SOFTUNLOCK) { 698 segmap_unlock(hat, seg, addr, len, rw, smp); 699 return (0); 700 } 701 702 TRACE_3(TR_FAC_VM, TR_SEGMAP_GETPAGE, 703 "segmap_getpage:seg %p addr %p vp %p", seg, addr, vp); 704 err = VOP_GETPAGE(vp, (offset_t)off, len, &prot, pl, MAXBSIZE, 705 seg, addr, rw, CRED()); 706 707 if (err) 708 return (FC_MAKE_ERR(err)); 709 710 prot &= smd->smd_prot; 711 712 /* 713 * Handle all pages returned in the pl[] array. 714 * This loop is coded on the assumption that if 715 * there was no error from the VOP_GETPAGE routine, 716 * that the page list returned will contain all the 717 * needed pages for the vp from [off..off + len]. 718 */ 719 ppp = pl; 720 while ((pp = *ppp++) != NULL) { 721 u_offset_t poff; 722 ASSERT(pp->p_vnode == vp); 723 hat_flag = HAT_LOAD; 724 725 /* 726 * Verify that the pages returned are within the range 727 * of this segmap region. Note that it is theoretically 728 * possible for pages outside this range to be returned, 729 * but it is not very likely. If we cannot use the 730 * page here, just release it and go on to the next one. 731 */ 732 if (pp->p_offset < sm_off || 733 pp->p_offset >= sm_off + MAXBSIZE) { 734 (void) page_release(pp, 1); 735 continue; 736 } 737 738 ASSERT(hat == kas.a_hat); 739 poff = pp->p_offset; 740 adr = addr + (poff - off); 741 if (adr >= addr && adr < addr + len) { 742 hat_setref(pp); 743 TRACE_3(TR_FAC_VM, TR_SEGMAP_FAULT, 744 "segmap_fault:pp %p vp %p offset %llx", 745 pp, vp, poff); 746 if (type == F_SOFTLOCK) 747 hat_flag = HAT_LOAD_LOCK; 748 } 749 750 /* 751 * Deal with VMODSORT pages here. If we know this is a write 752 * do the setmod now and allow write protection. 753 * As long as it's modified or not S_OTHER, remove write 754 * protection. With S_OTHER it's up to the FS to deal with this. 755 */ 756 if (IS_VMODSORT(vp)) { 757 if (rw == S_WRITE) 758 hat_setmod(pp); 759 else if (rw != S_OTHER && !hat_ismod(pp)) 760 prot &= ~PROT_WRITE; 761 } 762 763 hat_memload(hat, adr, pp, prot, hat_flag); 764 if (hat_flag != HAT_LOAD_LOCK) 765 page_unlock(pp); 766 } 767 return (0); 768 } 769 770 /* 771 * This routine is used to start I/O on pages asynchronously. 772 */ 773 static faultcode_t 774 segmap_faulta(struct seg *seg, caddr_t addr) 775 { 776 struct smap *smp; 777 struct vnode *vp; 778 u_offset_t off; 779 int err; 780 781 if (segmap_kpm && IS_KPM_ADDR(addr)) { 782 int newpage; 783 kmutex_t *smtx; 784 785 /* 786 * Pages are successfully prefaulted and locked in 787 * segmap_getmapflt and can't be unlocked until 788 * segmap_release. No hat mappings have to be locked 789 * and they also can't be unlocked as long as the 790 * caller owns an active kpm addr. 791 */ 792 #ifdef DEBUG 793 if ((smp = get_smap_kpm(addr, NULL)) == NULL) { 794 panic("segmap_faulta: smap not found " 795 "for addr %p", (void *)addr); 796 /*NOTREACHED*/ 797 } 798 799 smtx = SMAPMTX(smp); 800 newpage = smp->sm_flags & SM_KPM_NEWPAGE; 801 mutex_exit(smtx); 802 if (newpage) 803 cmn_err(CE_WARN, "segmap_faulta: newpage? smp %p", 804 (void *)smp); 805 #endif 806 return (0); 807 } 808 809 segmapcnt.smp_faulta.value.ul++; 810 smp = GET_SMAP(seg, addr); 811 812 ASSERT(smp->sm_refcnt > 0); 813 814 vp = smp->sm_vp; 815 off = smp->sm_off; 816 817 if (vp == NULL) { 818 cmn_err(CE_WARN, "segmap_faulta - no vp"); 819 return (FC_MAKE_ERR(EIO)); 820 } 821 822 TRACE_3(TR_FAC_VM, TR_SEGMAP_GETPAGE, 823 "segmap_getpage:seg %p addr %p vp %p", seg, addr, vp); 824 825 err = VOP_GETPAGE(vp, (offset_t)(off + ((offset_t)((uintptr_t)addr 826 & MAXBOFFSET))), PAGESIZE, (uint_t *)NULL, (page_t **)NULL, 0, 827 seg, addr, S_READ, CRED()); 828 829 if (err) 830 return (FC_MAKE_ERR(err)); 831 return (0); 832 } 833 834 /*ARGSUSED*/ 835 static int 836 segmap_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot) 837 { 838 struct segmap_data *smd = (struct segmap_data *)seg->s_data; 839 840 ASSERT(seg->s_as && RW_LOCK_HELD(&seg->s_as->a_lock)); 841 842 /* 843 * Need not acquire the segment lock since 844 * "smd_prot" is a read-only field. 845 */ 846 return (((smd->smd_prot & prot) != prot) ? EACCES : 0); 847 } 848 849 static int 850 segmap_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv) 851 { 852 struct segmap_data *smd = (struct segmap_data *)seg->s_data; 853 size_t pgno = seg_page(seg, addr + len) - seg_page(seg, addr) + 1; 854 855 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 856 857 if (pgno != 0) { 858 do 859 protv[--pgno] = smd->smd_prot; 860 while (pgno != 0); 861 } 862 return (0); 863 } 864 865 static u_offset_t 866 segmap_getoffset(struct seg *seg, caddr_t addr) 867 { 868 struct segmap_data *smd = (struct segmap_data *)seg->s_data; 869 870 ASSERT(seg->s_as && RW_READ_HELD(&seg->s_as->a_lock)); 871 872 return ((u_offset_t)smd->smd_sm->sm_off + (addr - seg->s_base)); 873 } 874 875 /*ARGSUSED*/ 876 static int 877 segmap_gettype(struct seg *seg, caddr_t addr) 878 { 879 ASSERT(seg->s_as && RW_READ_HELD(&seg->s_as->a_lock)); 880 881 return (MAP_SHARED); 882 } 883 884 /*ARGSUSED*/ 885 static int 886 segmap_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp) 887 { 888 struct segmap_data *smd = (struct segmap_data *)seg->s_data; 889 890 ASSERT(seg->s_as && RW_READ_HELD(&seg->s_as->a_lock)); 891 892 /* XXX - This doesn't make any sense */ 893 *vpp = smd->smd_sm->sm_vp; 894 return (0); 895 } 896 897 /* 898 * Check to see if it makes sense to do kluster/read ahead to 899 * addr + delta relative to the mapping at addr. We assume here 900 * that delta is a signed PAGESIZE'd multiple (which can be negative). 901 * 902 * For segmap we always "approve" of this action from our standpoint. 903 */ 904 /*ARGSUSED*/ 905 static int 906 segmap_kluster(struct seg *seg, caddr_t addr, ssize_t delta) 907 { 908 return (0); 909 } 910 911 static void 912 segmap_badop() 913 { 914 panic("segmap_badop"); 915 /*NOTREACHED*/ 916 } 917 918 /* 919 * Special private segmap operations 920 */ 921 922 /* 923 * Add smap to the appropriate free list. 924 */ 925 static void 926 segmap_smapadd(struct smap *smp) 927 { 928 struct smfree *sm; 929 struct smap *smpfreelist; 930 struct sm_freeq *releq; 931 932 ASSERT(MUTEX_HELD(SMAPMTX(smp))); 933 934 if (smp->sm_refcnt != 0) { 935 panic("segmap_smapadd"); 936 /*NOTREACHED*/ 937 } 938 939 sm = &smd_free[smp->sm_free_ndx]; 940 /* 941 * Add to the tail of the release queue 942 * Note that sm_releq and sm_allocq could toggle 943 * before we get the lock. This does not affect 944 * correctness as the 2 queues are only maintained 945 * to reduce lock pressure. 946 */ 947 releq = sm->sm_releq; 948 if (releq == &sm->sm_freeq[0]) 949 smp->sm_flags |= SM_QNDX_ZERO; 950 else 951 smp->sm_flags &= ~SM_QNDX_ZERO; 952 mutex_enter(&releq->smq_mtx); 953 smpfreelist = releq->smq_free; 954 if (smpfreelist == 0) { 955 int want; 956 957 releq->smq_free = smp->sm_next = smp->sm_prev = smp; 958 /* 959 * Both queue mutexes held to set sm_want; 960 * snapshot the value before dropping releq mutex. 961 * If sm_want appears after the releq mutex is dropped, 962 * then the smap just freed is already gone. 963 */ 964 want = sm->sm_want; 965 mutex_exit(&releq->smq_mtx); 966 /* 967 * See if there was a waiter before dropping the releq mutex 968 * then recheck after obtaining sm_freeq[0] mutex as 969 * the another thread may have already signaled. 970 */ 971 if (want) { 972 mutex_enter(&sm->sm_freeq[0].smq_mtx); 973 if (sm->sm_want) 974 cv_signal(&sm->sm_free_cv); 975 mutex_exit(&sm->sm_freeq[0].smq_mtx); 976 } 977 } else { 978 smp->sm_next = smpfreelist; 979 smp->sm_prev = smpfreelist->sm_prev; 980 smpfreelist->sm_prev = smp; 981 smp->sm_prev->sm_next = smp; 982 mutex_exit(&releq->smq_mtx); 983 } 984 } 985 986 987 static struct smap * 988 segmap_hashin(struct smap *smp, struct vnode *vp, u_offset_t off, int hashid) 989 { 990 struct smap **hpp; 991 struct smap *tmp; 992 kmutex_t *hmtx; 993 994 ASSERT(MUTEX_HELD(SMAPMTX(smp))); 995 ASSERT(smp->sm_vp == NULL); 996 ASSERT(smp->sm_hash == NULL); 997 ASSERT(smp->sm_prev == NULL); 998 ASSERT(smp->sm_next == NULL); 999 ASSERT(hashid >= 0 && hashid <= smd_hashmsk); 1000 1001 hmtx = SHASHMTX(hashid); 1002 1003 mutex_enter(hmtx); 1004 /* 1005 * First we need to verify that no one has created a smp 1006 * with (vp,off) as its tag before we us. 1007 */ 1008 for (tmp = smd_hash[hashid].sh_hash_list; 1009 tmp != NULL; tmp = tmp->sm_hash) 1010 if (tmp->sm_vp == vp && tmp->sm_off == off) 1011 break; 1012 1013 if (tmp == NULL) { 1014 /* 1015 * No one created one yet. 1016 * 1017 * Funniness here - we don't increment the ref count on the 1018 * vnode * even though we have another pointer to it here. 1019 * The reason for this is that we don't want the fact that 1020 * a seg_map entry somewhere refers to a vnode to prevent the 1021 * vnode * itself from going away. This is because this 1022 * reference to the vnode is a "soft one". In the case where 1023 * a mapping is being used by a rdwr [or directory routine?] 1024 * there already has to be a non-zero ref count on the vnode. 1025 * In the case where the vp has been freed and the the smap 1026 * structure is on the free list, there are no pages in memory 1027 * that can refer to the vnode. Thus even if we reuse the same 1028 * vnode/smap structure for a vnode which has the same 1029 * address but represents a different object, we are ok. 1030 */ 1031 smp->sm_vp = vp; 1032 smp->sm_off = off; 1033 1034 hpp = &smd_hash[hashid].sh_hash_list; 1035 smp->sm_hash = *hpp; 1036 *hpp = smp; 1037 #ifdef SEGMAP_HASHSTATS 1038 smd_hash_len[hashid]++; 1039 #endif 1040 } 1041 mutex_exit(hmtx); 1042 1043 return (tmp); 1044 } 1045 1046 static void 1047 segmap_hashout(struct smap *smp) 1048 { 1049 struct smap **hpp, *hp; 1050 struct vnode *vp; 1051 kmutex_t *mtx; 1052 int hashid; 1053 u_offset_t off; 1054 1055 ASSERT(MUTEX_HELD(SMAPMTX(smp))); 1056 1057 vp = smp->sm_vp; 1058 off = smp->sm_off; 1059 1060 SMAP_HASHFUNC(vp, off, hashid); /* macro assigns hashid */ 1061 mtx = SHASHMTX(hashid); 1062 mutex_enter(mtx); 1063 1064 hpp = &smd_hash[hashid].sh_hash_list; 1065 for (;;) { 1066 hp = *hpp; 1067 if (hp == NULL) { 1068 panic("segmap_hashout"); 1069 /*NOTREACHED*/ 1070 } 1071 if (hp == smp) 1072 break; 1073 hpp = &hp->sm_hash; 1074 } 1075 1076 *hpp = smp->sm_hash; 1077 smp->sm_hash = NULL; 1078 #ifdef SEGMAP_HASHSTATS 1079 smd_hash_len[hashid]--; 1080 #endif 1081 mutex_exit(mtx); 1082 1083 smp->sm_vp = NULL; 1084 smp->sm_off = (u_offset_t)0; 1085 1086 } 1087 1088 /* 1089 * Attempt to free unmodified, unmapped, and non locked segmap 1090 * pages. 1091 */ 1092 void 1093 segmap_pagefree(struct vnode *vp, u_offset_t off) 1094 { 1095 u_offset_t pgoff; 1096 page_t *pp; 1097 1098 for (pgoff = off; pgoff < off + MAXBSIZE; pgoff += PAGESIZE) { 1099 1100 if ((pp = page_lookup_nowait(vp, pgoff, SE_EXCL)) == NULL) 1101 continue; 1102 1103 switch (page_release(pp, 1)) { 1104 case PGREL_NOTREL: 1105 segmapcnt.smp_free_notfree.value.ul++; 1106 break; 1107 case PGREL_MOD: 1108 segmapcnt.smp_free_dirty.value.ul++; 1109 break; 1110 case PGREL_CLEAN: 1111 segmapcnt.smp_free.value.ul++; 1112 break; 1113 } 1114 } 1115 } 1116 1117 /* 1118 * Locks held on entry: smap lock 1119 * Locks held on exit : smap lock. 1120 */ 1121 1122 static void 1123 grab_smp(struct smap *smp, page_t *pp) 1124 { 1125 ASSERT(MUTEX_HELD(SMAPMTX(smp))); 1126 ASSERT(smp->sm_refcnt == 0); 1127 1128 if (smp->sm_vp != (struct vnode *)NULL) { 1129 struct vnode *vp = smp->sm_vp; 1130 u_offset_t off = smp->sm_off; 1131 /* 1132 * Destroy old vnode association and 1133 * unload any hardware translations to 1134 * the old object. 1135 */ 1136 smd_cpu[CPU->cpu_seqid].scpu.scpu_get_reuse++; 1137 segmap_hashout(smp); 1138 1139 /* 1140 * This node is off freelist and hashlist, 1141 * so there is no reason to drop/reacquire sm_mtx 1142 * across calls to hat_unload. 1143 */ 1144 if (segmap_kpm) { 1145 caddr_t vaddr; 1146 int hat_unload_needed = 0; 1147 1148 /* 1149 * unload kpm mapping 1150 */ 1151 if (pp != NULL) { 1152 vaddr = hat_kpm_page2va(pp, 1); 1153 hat_kpm_mapout(pp, GET_KPME(smp), vaddr); 1154 page_unlock(pp); 1155 } 1156 1157 /* 1158 * Check if we have (also) the rare case of a 1159 * non kpm mapping. 1160 */ 1161 if (smp->sm_flags & SM_NOTKPM_RELEASED) { 1162 hat_unload_needed = 1; 1163 smp->sm_flags &= ~SM_NOTKPM_RELEASED; 1164 } 1165 1166 if (hat_unload_needed) { 1167 hat_unload(kas.a_hat, segkmap->s_base + 1168 ((smp - smd_smap) * MAXBSIZE), 1169 MAXBSIZE, HAT_UNLOAD); 1170 } 1171 1172 } else { 1173 ASSERT(smp->sm_flags & SM_NOTKPM_RELEASED); 1174 smp->sm_flags &= ~SM_NOTKPM_RELEASED; 1175 hat_unload(kas.a_hat, segkmap->s_base + 1176 ((smp - smd_smap) * MAXBSIZE), 1177 MAXBSIZE, HAT_UNLOAD); 1178 } 1179 segmap_pagefree(vp, off); 1180 } 1181 } 1182 1183 static struct smap * 1184 get_free_smp(int free_ndx) 1185 { 1186 struct smfree *sm; 1187 kmutex_t *smtx; 1188 struct smap *smp, *first; 1189 struct sm_freeq *allocq, *releq; 1190 struct kpme *kpme; 1191 page_t *pp = NULL; 1192 int end_ndx, page_locked = 0; 1193 1194 end_ndx = free_ndx; 1195 sm = &smd_free[free_ndx]; 1196 1197 retry_queue: 1198 allocq = sm->sm_allocq; 1199 mutex_enter(&allocq->smq_mtx); 1200 1201 if ((smp = allocq->smq_free) == NULL) { 1202 1203 skip_queue: 1204 /* 1205 * The alloc list is empty or this queue is being skipped; 1206 * first see if the allocq toggled. 1207 */ 1208 if (sm->sm_allocq != allocq) { 1209 /* queue changed */ 1210 mutex_exit(&allocq->smq_mtx); 1211 goto retry_queue; 1212 } 1213 releq = sm->sm_releq; 1214 if (!mutex_tryenter(&releq->smq_mtx)) { 1215 /* cannot get releq; a free smp may be there now */ 1216 mutex_exit(&allocq->smq_mtx); 1217 1218 /* 1219 * This loop could spin forever if this thread has 1220 * higher priority than the thread that is holding 1221 * releq->smq_mtx. In order to force the other thread 1222 * to run, we'll lock/unlock the mutex which is safe 1223 * since we just unlocked the allocq mutex. 1224 */ 1225 mutex_enter(&releq->smq_mtx); 1226 mutex_exit(&releq->smq_mtx); 1227 goto retry_queue; 1228 } 1229 if (releq->smq_free == NULL) { 1230 /* 1231 * This freelist is empty. 1232 * This should not happen unless clients 1233 * are failing to release the segmap 1234 * window after accessing the data. 1235 * Before resorting to sleeping, try 1236 * the next list of the same color. 1237 */ 1238 free_ndx = (free_ndx + smd_ncolor) & smd_freemsk; 1239 if (free_ndx != end_ndx) { 1240 mutex_exit(&releq->smq_mtx); 1241 mutex_exit(&allocq->smq_mtx); 1242 sm = &smd_free[free_ndx]; 1243 goto retry_queue; 1244 } 1245 /* 1246 * Tried all freelists of the same color once, 1247 * wait on this list and hope something gets freed. 1248 */ 1249 segmapcnt.smp_get_nofree.value.ul++; 1250 sm->sm_want++; 1251 mutex_exit(&sm->sm_freeq[1].smq_mtx); 1252 cv_wait(&sm->sm_free_cv, 1253 &sm->sm_freeq[0].smq_mtx); 1254 sm->sm_want--; 1255 mutex_exit(&sm->sm_freeq[0].smq_mtx); 1256 sm = &smd_free[free_ndx]; 1257 goto retry_queue; 1258 } else { 1259 /* 1260 * Something on the rele queue; flip the alloc 1261 * and rele queues and retry. 1262 */ 1263 sm->sm_allocq = releq; 1264 sm->sm_releq = allocq; 1265 mutex_exit(&allocq->smq_mtx); 1266 mutex_exit(&releq->smq_mtx); 1267 if (page_locked) { 1268 delay(hz >> 2); 1269 page_locked = 0; 1270 } 1271 goto retry_queue; 1272 } 1273 } else { 1274 /* 1275 * Fastpath the case we get the smap mutex 1276 * on the first try. 1277 */ 1278 first = smp; 1279 next_smap: 1280 smtx = SMAPMTX(smp); 1281 if (!mutex_tryenter(smtx)) { 1282 /* 1283 * Another thread is trying to reclaim this slot. 1284 * Skip to the next queue or smap. 1285 */ 1286 if ((smp = smp->sm_next) == first) { 1287 goto skip_queue; 1288 } else { 1289 goto next_smap; 1290 } 1291 } else { 1292 /* 1293 * if kpme exists, get shared lock on the page 1294 */ 1295 if (segmap_kpm && smp->sm_vp != NULL) { 1296 1297 kpme = GET_KPME(smp); 1298 pp = kpme->kpe_page; 1299 1300 if (pp != NULL) { 1301 if (!page_trylock(pp, SE_SHARED)) { 1302 smp = smp->sm_next; 1303 mutex_exit(smtx); 1304 page_locked = 1; 1305 1306 pp = NULL; 1307 1308 if (smp == first) { 1309 goto skip_queue; 1310 } else { 1311 goto next_smap; 1312 } 1313 } else { 1314 if (kpme->kpe_page == NULL) { 1315 page_unlock(pp); 1316 pp = NULL; 1317 } 1318 } 1319 } 1320 } 1321 1322 /* 1323 * At this point, we've selected smp. Remove smp 1324 * from its freelist. If smp is the first one in 1325 * the freelist, update the head of the freelist. 1326 */ 1327 if (first == smp) { 1328 ASSERT(first == allocq->smq_free); 1329 allocq->smq_free = smp->sm_next; 1330 } 1331 1332 /* 1333 * if the head of the freelist still points to smp, 1334 * then there are no more free smaps in that list. 1335 */ 1336 if (allocq->smq_free == smp) 1337 /* 1338 * Took the last one 1339 */ 1340 allocq->smq_free = NULL; 1341 else { 1342 smp->sm_prev->sm_next = smp->sm_next; 1343 smp->sm_next->sm_prev = smp->sm_prev; 1344 } 1345 mutex_exit(&allocq->smq_mtx); 1346 smp->sm_prev = smp->sm_next = NULL; 1347 1348 /* 1349 * if pp != NULL, pp must have been locked; 1350 * grab_smp() unlocks pp. 1351 */ 1352 ASSERT((pp == NULL) || PAGE_LOCKED(pp)); 1353 grab_smp(smp, pp); 1354 /* return smp locked. */ 1355 ASSERT(SMAPMTX(smp) == smtx); 1356 ASSERT(MUTEX_HELD(smtx)); 1357 return (smp); 1358 } 1359 } 1360 } 1361 1362 /* 1363 * Special public segmap operations 1364 */ 1365 1366 /* 1367 * Create pages (without using VOP_GETPAGE) and load up tranlations to them. 1368 * If softlock is TRUE, then set things up so that it looks like a call 1369 * to segmap_fault with F_SOFTLOCK. 1370 * 1371 * Returns 1, if a page is created by calling page_create_va(), or 0 otherwise. 1372 * 1373 * All fields in the generic segment (struct seg) are considered to be 1374 * read-only for "segmap" even though the kernel address space (kas) may 1375 * not be locked, hence no lock is needed to access them. 1376 */ 1377 int 1378 segmap_pagecreate(struct seg *seg, caddr_t addr, size_t len, int softlock) 1379 { 1380 struct segmap_data *smd = (struct segmap_data *)seg->s_data; 1381 page_t *pp; 1382 u_offset_t off; 1383 struct smap *smp; 1384 struct vnode *vp; 1385 caddr_t eaddr; 1386 int newpage = 0; 1387 uint_t prot; 1388 kmutex_t *smtx; 1389 int hat_flag; 1390 1391 ASSERT(seg->s_as == &kas); 1392 1393 if (segmap_kpm && IS_KPM_ADDR(addr)) { 1394 /* 1395 * Pages are successfully prefaulted and locked in 1396 * segmap_getmapflt and can't be unlocked until 1397 * segmap_release. The SM_KPM_NEWPAGE flag is set 1398 * in segmap_pagecreate_kpm when new pages are created. 1399 * and it is returned as "newpage" indication here. 1400 */ 1401 if ((smp = get_smap_kpm(addr, NULL)) == NULL) { 1402 panic("segmap_pagecreate: smap not found " 1403 "for addr %p", (void *)addr); 1404 /*NOTREACHED*/ 1405 } 1406 1407 smtx = SMAPMTX(smp); 1408 newpage = smp->sm_flags & SM_KPM_NEWPAGE; 1409 smp->sm_flags &= ~SM_KPM_NEWPAGE; 1410 mutex_exit(smtx); 1411 1412 return (newpage); 1413 } 1414 1415 smd_cpu[CPU->cpu_seqid].scpu.scpu_pagecreate++; 1416 1417 eaddr = addr + len; 1418 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK); 1419 1420 smp = GET_SMAP(seg, addr); 1421 1422 /* 1423 * We don't grab smp mutex here since we assume the smp 1424 * has a refcnt set already which prevents the slot from 1425 * changing its id. 1426 */ 1427 ASSERT(smp->sm_refcnt > 0); 1428 1429 vp = smp->sm_vp; 1430 off = smp->sm_off + ((u_offset_t)((uintptr_t)addr & MAXBOFFSET)); 1431 prot = smd->smd_prot; 1432 1433 for (; addr < eaddr; addr += PAGESIZE, off += PAGESIZE) { 1434 hat_flag = HAT_LOAD; 1435 pp = page_lookup(vp, off, SE_SHARED); 1436 if (pp == NULL) { 1437 ushort_t bitindex; 1438 1439 if ((pp = page_create_va(vp, off, 1440 PAGESIZE, PG_WAIT, seg, addr)) == NULL) { 1441 panic("segmap_pagecreate: page_create failed"); 1442 /*NOTREACHED*/ 1443 } 1444 newpage = 1; 1445 page_io_unlock(pp); 1446 1447 /* 1448 * Since pages created here do not contain valid 1449 * data until the caller writes into them, the 1450 * "exclusive" lock will not be dropped to prevent 1451 * other users from accessing the page. We also 1452 * have to lock the translation to prevent a fault 1453 * from occuring when the virtual address mapped by 1454 * this page is written into. This is necessary to 1455 * avoid a deadlock since we haven't dropped the 1456 * "exclusive" lock. 1457 */ 1458 bitindex = (ushort_t)((off - smp->sm_off) >> PAGESHIFT); 1459 1460 /* 1461 * Large Files: The following assertion is to 1462 * verify the cast above. 1463 */ 1464 ASSERT((u_offset_t)(off - smp->sm_off) <= INT_MAX); 1465 smtx = SMAPMTX(smp); 1466 mutex_enter(smtx); 1467 smp->sm_bitmap |= SMAP_BIT_MASK(bitindex); 1468 mutex_exit(smtx); 1469 1470 hat_flag = HAT_LOAD_LOCK; 1471 } else if (softlock) { 1472 hat_flag = HAT_LOAD_LOCK; 1473 } 1474 1475 if (IS_VMODSORT(pp->p_vnode) && (prot & PROT_WRITE)) 1476 hat_setmod(pp); 1477 1478 hat_memload(kas.a_hat, addr, pp, prot, hat_flag); 1479 1480 if (hat_flag != HAT_LOAD_LOCK) 1481 page_unlock(pp); 1482 1483 TRACE_5(TR_FAC_VM, TR_SEGMAP_PAGECREATE, 1484 "segmap_pagecreate:seg %p addr %p pp %p vp %p offset %llx", 1485 seg, addr, pp, vp, off); 1486 } 1487 1488 return (newpage); 1489 } 1490 1491 void 1492 segmap_pageunlock(struct seg *seg, caddr_t addr, size_t len, enum seg_rw rw) 1493 { 1494 struct smap *smp; 1495 ushort_t bitmask; 1496 page_t *pp; 1497 struct vnode *vp; 1498 u_offset_t off; 1499 caddr_t eaddr; 1500 kmutex_t *smtx; 1501 1502 ASSERT(seg->s_as == &kas); 1503 1504 eaddr = addr + len; 1505 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK); 1506 1507 if (segmap_kpm && IS_KPM_ADDR(addr)) { 1508 /* 1509 * Pages are successfully prefaulted and locked in 1510 * segmap_getmapflt and can't be unlocked until 1511 * segmap_release, so no pages or hat mappings have 1512 * to be unlocked at this point. 1513 */ 1514 #ifdef DEBUG 1515 if ((smp = get_smap_kpm(addr, NULL)) == NULL) { 1516 panic("segmap_pageunlock: smap not found " 1517 "for addr %p", (void *)addr); 1518 /*NOTREACHED*/ 1519 } 1520 1521 ASSERT(smp->sm_refcnt > 0); 1522 mutex_exit(SMAPMTX(smp)); 1523 #endif 1524 return; 1525 } 1526 1527 smp = GET_SMAP(seg, addr); 1528 smtx = SMAPMTX(smp); 1529 1530 ASSERT(smp->sm_refcnt > 0); 1531 1532 vp = smp->sm_vp; 1533 off = smp->sm_off + ((u_offset_t)((uintptr_t)addr & MAXBOFFSET)); 1534 1535 for (; addr < eaddr; addr += PAGESIZE, off += PAGESIZE) { 1536 bitmask = SMAP_BIT_MASK((int)(off - smp->sm_off) >> PAGESHIFT); 1537 1538 /* 1539 * Large Files: Following assertion is to verify 1540 * the correctness of the cast to (int) above. 1541 */ 1542 ASSERT((u_offset_t)(off - smp->sm_off) <= INT_MAX); 1543 1544 /* 1545 * If the bit corresponding to "off" is set, 1546 * clear this bit in the bitmap, unlock translations, 1547 * and release the "exclusive" lock on the page. 1548 */ 1549 if (smp->sm_bitmap & bitmask) { 1550 mutex_enter(smtx); 1551 smp->sm_bitmap &= ~bitmask; 1552 mutex_exit(smtx); 1553 1554 hat_unlock(kas.a_hat, addr, PAGESIZE); 1555 1556 /* 1557 * Use page_find() instead of page_lookup() to 1558 * find the page since we know that it has 1559 * "exclusive" lock. 1560 */ 1561 pp = page_find(vp, off); 1562 if (pp == NULL) { 1563 panic("segmap_pageunlock: page not found"); 1564 /*NOTREACHED*/ 1565 } 1566 if (rw == S_WRITE) { 1567 hat_setrefmod(pp); 1568 } else if (rw != S_OTHER) { 1569 hat_setref(pp); 1570 } 1571 1572 page_unlock(pp); 1573 } 1574 } 1575 } 1576 1577 caddr_t 1578 segmap_getmap(struct seg *seg, struct vnode *vp, u_offset_t off) 1579 { 1580 return (segmap_getmapflt(seg, vp, off, MAXBSIZE, 0, S_OTHER)); 1581 } 1582 1583 /* 1584 * This is the magic virtual address that offset 0 of an ELF 1585 * file gets mapped to in user space. This is used to pick 1586 * the vac color on the freelist. 1587 */ 1588 #define ELF_OFFZERO_VA (0x10000) 1589 /* 1590 * segmap_getmap allocates a MAXBSIZE big slot to map the vnode vp 1591 * in the range <off, off + len). off doesn't need to be MAXBSIZE aligned. 1592 * The return address is always MAXBSIZE aligned. 1593 * 1594 * If forcefault is nonzero and the MMU translations haven't yet been created, 1595 * segmap_getmap will call segmap_fault(..., F_INVAL, rw) to create them. 1596 */ 1597 caddr_t 1598 segmap_getmapflt( 1599 struct seg *seg, 1600 struct vnode *vp, 1601 u_offset_t off, 1602 size_t len, 1603 int forcefault, 1604 enum seg_rw rw) 1605 { 1606 struct smap *smp, *nsmp; 1607 extern struct vnode *common_specvp(); 1608 caddr_t baseaddr; /* MAXBSIZE aligned */ 1609 u_offset_t baseoff; 1610 int newslot; 1611 caddr_t vaddr; 1612 int color, hashid; 1613 kmutex_t *hashmtx, *smapmtx; 1614 struct smfree *sm; 1615 page_t *pp; 1616 struct kpme *kpme; 1617 uint_t prot; 1618 caddr_t base; 1619 page_t *pl[MAXPPB + 1]; 1620 int error; 1621 int is_kpm = 1; 1622 1623 ASSERT(seg->s_as == &kas); 1624 ASSERT(seg == segkmap); 1625 1626 baseoff = off & (offset_t)MAXBMASK; 1627 if (off + len > baseoff + MAXBSIZE) { 1628 panic("segmap_getmap bad len"); 1629 /*NOTREACHED*/ 1630 } 1631 1632 /* 1633 * If this is a block device we have to be sure to use the 1634 * "common" block device vnode for the mapping. 1635 */ 1636 if (vp->v_type == VBLK) 1637 vp = common_specvp(vp); 1638 1639 smd_cpu[CPU->cpu_seqid].scpu.scpu_getmap++; 1640 1641 if (segmap_kpm == 0 || 1642 (forcefault == SM_PAGECREATE && rw != S_WRITE)) { 1643 is_kpm = 0; 1644 } 1645 1646 SMAP_HASHFUNC(vp, off, hashid); /* macro assigns hashid */ 1647 hashmtx = SHASHMTX(hashid); 1648 1649 retry_hash: 1650 mutex_enter(hashmtx); 1651 for (smp = smd_hash[hashid].sh_hash_list; 1652 smp != NULL; smp = smp->sm_hash) 1653 if (smp->sm_vp == vp && smp->sm_off == baseoff) 1654 break; 1655 mutex_exit(hashmtx); 1656 1657 vrfy_smp: 1658 if (smp != NULL) { 1659 1660 ASSERT(vp->v_count != 0); 1661 1662 /* 1663 * Get smap lock and recheck its tag. The hash lock 1664 * is dropped since the hash is based on (vp, off) 1665 * and (vp, off) won't change when we have smap mtx. 1666 */ 1667 smapmtx = SMAPMTX(smp); 1668 mutex_enter(smapmtx); 1669 if (smp->sm_vp != vp || smp->sm_off != baseoff) { 1670 mutex_exit(smapmtx); 1671 goto retry_hash; 1672 } 1673 1674 if (smp->sm_refcnt == 0) { 1675 1676 smd_cpu[CPU->cpu_seqid].scpu.scpu_get_reclaim++; 1677 1678 /* 1679 * Could still be on the free list. However, this 1680 * could also be an smp that is transitioning from 1681 * the free list when we have too much contention 1682 * for the smapmtx's. In this case, we have an 1683 * unlocked smp that is not on the free list any 1684 * longer, but still has a 0 refcnt. The only way 1685 * to be sure is to check the freelist pointers. 1686 * Since we now have the smapmtx, we are guaranteed 1687 * that the (vp, off) won't change, so we are safe 1688 * to reclaim it. get_free_smp() knows that this 1689 * can happen, and it will check the refcnt. 1690 */ 1691 1692 if ((smp->sm_next != NULL)) { 1693 struct sm_freeq *freeq; 1694 1695 ASSERT(smp->sm_prev != NULL); 1696 sm = &smd_free[smp->sm_free_ndx]; 1697 1698 if (smp->sm_flags & SM_QNDX_ZERO) 1699 freeq = &sm->sm_freeq[0]; 1700 else 1701 freeq = &sm->sm_freeq[1]; 1702 1703 mutex_enter(&freeq->smq_mtx); 1704 if (freeq->smq_free != smp) { 1705 /* 1706 * fastpath normal case 1707 */ 1708 smp->sm_prev->sm_next = smp->sm_next; 1709 smp->sm_next->sm_prev = smp->sm_prev; 1710 } else if (smp == smp->sm_next) { 1711 /* 1712 * Taking the last smap on freelist 1713 */ 1714 freeq->smq_free = NULL; 1715 } else { 1716 /* 1717 * Reclaiming 1st smap on list 1718 */ 1719 freeq->smq_free = smp->sm_next; 1720 smp->sm_prev->sm_next = smp->sm_next; 1721 smp->sm_next->sm_prev = smp->sm_prev; 1722 } 1723 mutex_exit(&freeq->smq_mtx); 1724 smp->sm_prev = smp->sm_next = NULL; 1725 } else { 1726 ASSERT(smp->sm_prev == NULL); 1727 segmapcnt.smp_stolen.value.ul++; 1728 } 1729 1730 } else { 1731 segmapcnt.smp_get_use.value.ul++; 1732 } 1733 smp->sm_refcnt++; /* another user */ 1734 1735 /* 1736 * We don't invoke segmap_fault via TLB miss, so we set ref 1737 * and mod bits in advance. For S_OTHER we set them in 1738 * segmap_fault F_SOFTUNLOCK. 1739 */ 1740 if (is_kpm) { 1741 if (rw == S_WRITE) { 1742 smp->sm_flags |= SM_WRITE_DATA; 1743 } else if (rw == S_READ) { 1744 smp->sm_flags |= SM_READ_DATA; 1745 } 1746 } 1747 mutex_exit(smapmtx); 1748 1749 newslot = 0; 1750 } else { 1751 1752 uint32_t free_ndx, *free_ndxp; 1753 union segmap_cpu *scpu; 1754 1755 /* 1756 * On a PAC machine or a machine with anti-alias 1757 * hardware, smd_colormsk will be zero. 1758 * 1759 * On a VAC machine- pick color by offset in the file 1760 * so we won't get VAC conflicts on elf files. 1761 * On data files, color does not matter but we 1762 * don't know what kind of file it is so we always 1763 * pick color by offset. This causes color 1764 * corresponding to file offset zero to be used more 1765 * heavily. 1766 */ 1767 color = (baseoff >> MAXBSHIFT) & smd_colormsk; 1768 scpu = smd_cpu+CPU->cpu_seqid; 1769 free_ndxp = &scpu->scpu.scpu_free_ndx[color]; 1770 free_ndx = (*free_ndxp += smd_ncolor) & smd_freemsk; 1771 #ifdef DEBUG 1772 colors_used[free_ndx]++; 1773 #endif /* DEBUG */ 1774 1775 /* 1776 * Get a locked smp slot from the free list. 1777 */ 1778 smp = get_free_smp(free_ndx); 1779 smapmtx = SMAPMTX(smp); 1780 1781 ASSERT(smp->sm_vp == NULL); 1782 1783 if ((nsmp = segmap_hashin(smp, vp, baseoff, hashid)) != NULL) { 1784 /* 1785 * Failed to hashin, there exists one now. 1786 * Return the smp we just allocated. 1787 */ 1788 segmap_smapadd(smp); 1789 mutex_exit(smapmtx); 1790 1791 smp = nsmp; 1792 goto vrfy_smp; 1793 } 1794 smp->sm_refcnt++; /* another user */ 1795 1796 /* 1797 * We don't invoke segmap_fault via TLB miss, so we set ref 1798 * and mod bits in advance. For S_OTHER we set them in 1799 * segmap_fault F_SOFTUNLOCK. 1800 */ 1801 if (is_kpm) { 1802 if (rw == S_WRITE) { 1803 smp->sm_flags |= SM_WRITE_DATA; 1804 } else if (rw == S_READ) { 1805 smp->sm_flags |= SM_READ_DATA; 1806 } 1807 } 1808 mutex_exit(smapmtx); 1809 1810 newslot = 1; 1811 } 1812 1813 if (!is_kpm) 1814 goto use_segmap_range; 1815 1816 /* 1817 * Use segkpm 1818 */ 1819 ASSERT(PAGESIZE == MAXBSIZE); 1820 1821 /* 1822 * remember the last smp faulted on this cpu. 1823 */ 1824 (smd_cpu+CPU->cpu_seqid)->scpu.scpu_last_smap = smp; 1825 1826 if (forcefault == SM_PAGECREATE) { 1827 baseaddr = segmap_pagecreate_kpm(seg, vp, baseoff, smp, rw); 1828 return (baseaddr); 1829 } 1830 1831 if (newslot == 0 && 1832 (pp = GET_KPME(smp)->kpe_page) != NULL) { 1833 1834 /* fastpath */ 1835 switch (rw) { 1836 case S_READ: 1837 case S_WRITE: 1838 if (page_trylock(pp, SE_SHARED)) { 1839 if (PP_ISFREE(pp) || 1840 !(pp->p_vnode == vp && 1841 pp->p_offset == baseoff)) { 1842 page_unlock(pp); 1843 pp = page_lookup(vp, baseoff, 1844 SE_SHARED); 1845 } 1846 } else { 1847 pp = page_lookup(vp, baseoff, SE_SHARED); 1848 } 1849 1850 if (pp == NULL) { 1851 ASSERT(GET_KPME(smp)->kpe_page == NULL); 1852 break; 1853 } 1854 1855 if (rw == S_WRITE && 1856 hat_page_getattr(pp, P_MOD | P_REF) != 1857 (P_MOD | P_REF)) { 1858 page_unlock(pp); 1859 break; 1860 } 1861 1862 /* 1863 * We have the p_selock as reader, grab_smp 1864 * can't hit us, we have bumped the smap 1865 * refcnt and hat_pageunload needs the 1866 * p_selock exclusive. 1867 */ 1868 kpme = GET_KPME(smp); 1869 if (kpme->kpe_page == pp) { 1870 baseaddr = hat_kpm_page2va(pp, 0); 1871 } else if (kpme->kpe_page == NULL) { 1872 baseaddr = hat_kpm_mapin(pp, kpme); 1873 } else { 1874 panic("segmap_getmapflt: stale " 1875 "kpme page, kpme %p", (void *)kpme); 1876 /*NOTREACHED*/ 1877 } 1878 1879 /* 1880 * We don't invoke segmap_fault via TLB miss, 1881 * so we set ref and mod bits in advance. 1882 * For S_OTHER and we set them in segmap_fault 1883 * F_SOFTUNLOCK. 1884 */ 1885 if (rw == S_READ && !hat_isref(pp)) 1886 hat_setref(pp); 1887 1888 return (baseaddr); 1889 default: 1890 break; 1891 } 1892 } 1893 1894 base = segkpm_create_va(baseoff); 1895 error = VOP_GETPAGE(vp, (offset_t)baseoff, len, &prot, pl, MAXBSIZE, 1896 seg, base, rw, CRED()); 1897 1898 pp = pl[0]; 1899 if (error || pp == NULL) { 1900 /* 1901 * Use segmap address slot and let segmap_fault deal 1902 * with the error cases. There is no error return 1903 * possible here. 1904 */ 1905 goto use_segmap_range; 1906 } 1907 1908 ASSERT(pl[1] == NULL); 1909 1910 /* 1911 * When prot is not returned w/ PROT_ALL the returned pages 1912 * are not backed by fs blocks. For most of the segmap users 1913 * this is no problem, they don't write to the pages in the 1914 * same request and therefore don't rely on a following 1915 * trap driven segmap_fault. With SM_LOCKPROTO users it 1916 * is more secure to use segkmap adresses to allow 1917 * protection segmap_fault's. 1918 */ 1919 if (prot != PROT_ALL && forcefault == SM_LOCKPROTO) { 1920 /* 1921 * Use segmap address slot and let segmap_fault 1922 * do the error return. 1923 */ 1924 ASSERT(rw != S_WRITE); 1925 ASSERT(PAGE_LOCKED(pp)); 1926 page_unlock(pp); 1927 forcefault = 0; 1928 goto use_segmap_range; 1929 } 1930 1931 /* 1932 * We have the p_selock as reader, grab_smp can't hit us, we 1933 * have bumped the smap refcnt and hat_pageunload needs the 1934 * p_selock exclusive. 1935 */ 1936 kpme = GET_KPME(smp); 1937 if (kpme->kpe_page == pp) { 1938 baseaddr = hat_kpm_page2va(pp, 0); 1939 } else if (kpme->kpe_page == NULL) { 1940 baseaddr = hat_kpm_mapin(pp, kpme); 1941 } else { 1942 panic("segmap_getmapflt: stale kpme page after " 1943 "VOP_GETPAGE, kpme %p", (void *)kpme); 1944 /*NOTREACHED*/ 1945 } 1946 1947 smd_cpu[CPU->cpu_seqid].scpu.scpu_fault++; 1948 1949 return (baseaddr); 1950 1951 1952 use_segmap_range: 1953 baseaddr = seg->s_base + ((smp - smd_smap) * MAXBSIZE); 1954 TRACE_4(TR_FAC_VM, TR_SEGMAP_GETMAP, 1955 "segmap_getmap:seg %p addr %p vp %p offset %llx", 1956 seg, baseaddr, vp, baseoff); 1957 1958 /* 1959 * Prefault the translations 1960 */ 1961 vaddr = baseaddr + (off - baseoff); 1962 if (forcefault && (newslot || !hat_probe(kas.a_hat, vaddr))) { 1963 1964 caddr_t pgaddr = (caddr_t)((uintptr_t)vaddr & 1965 (uintptr_t)PAGEMASK); 1966 1967 (void) segmap_fault(kas.a_hat, seg, pgaddr, 1968 (vaddr + len - pgaddr + PAGESIZE - 1) & (uintptr_t)PAGEMASK, 1969 F_INVAL, rw); 1970 } 1971 1972 return (baseaddr); 1973 } 1974 1975 int 1976 segmap_release(struct seg *seg, caddr_t addr, uint_t flags) 1977 { 1978 struct smap *smp; 1979 int error; 1980 int bflags = 0; 1981 struct vnode *vp; 1982 u_offset_t offset; 1983 kmutex_t *smtx; 1984 int is_kpm = 0; 1985 page_t *pp; 1986 1987 if (segmap_kpm && IS_KPM_ADDR(addr)) { 1988 1989 if (((uintptr_t)addr & MAXBOFFSET) != 0) { 1990 panic("segmap_release: addr %p not " 1991 "MAXBSIZE aligned", (void *)addr); 1992 /*NOTREACHED*/ 1993 } 1994 1995 if ((smp = get_smap_kpm(addr, &pp)) == NULL) { 1996 panic("segmap_release: smap not found " 1997 "for addr %p", (void *)addr); 1998 /*NOTREACHED*/ 1999 } 2000 2001 TRACE_3(TR_FAC_VM, TR_SEGMAP_RELMAP, 2002 "segmap_relmap:seg %p addr %p smp %p", 2003 seg, addr, smp); 2004 2005 smtx = SMAPMTX(smp); 2006 2007 /* 2008 * For compatibilty reasons segmap_pagecreate_kpm sets this 2009 * flag to allow a following segmap_pagecreate to return 2010 * this as "newpage" flag. When segmap_pagecreate is not 2011 * called at all we clear it now. 2012 */ 2013 smp->sm_flags &= ~SM_KPM_NEWPAGE; 2014 is_kpm = 1; 2015 if (smp->sm_flags & SM_WRITE_DATA) { 2016 hat_setrefmod(pp); 2017 } else if (smp->sm_flags & SM_READ_DATA) { 2018 hat_setref(pp); 2019 } 2020 } else { 2021 if (addr < seg->s_base || addr >= seg->s_base + seg->s_size || 2022 ((uintptr_t)addr & MAXBOFFSET) != 0) { 2023 panic("segmap_release: bad addr %p", (void *)addr); 2024 /*NOTREACHED*/ 2025 } 2026 smp = GET_SMAP(seg, addr); 2027 2028 TRACE_3(TR_FAC_VM, TR_SEGMAP_RELMAP, 2029 "segmap_relmap:seg %p addr %p smp %p", 2030 seg, addr, smp); 2031 2032 smtx = SMAPMTX(smp); 2033 mutex_enter(smtx); 2034 smp->sm_flags |= SM_NOTKPM_RELEASED; 2035 } 2036 2037 ASSERT(smp->sm_refcnt > 0); 2038 2039 /* 2040 * Need to call VOP_PUTPAGE() if any flags (except SM_DONTNEED) 2041 * are set. 2042 */ 2043 if ((flags & ~SM_DONTNEED) != 0) { 2044 if (flags & SM_WRITE) 2045 segmapcnt.smp_rel_write.value.ul++; 2046 if (flags & SM_ASYNC) { 2047 bflags |= B_ASYNC; 2048 segmapcnt.smp_rel_async.value.ul++; 2049 } 2050 if (flags & SM_INVAL) { 2051 bflags |= B_INVAL; 2052 segmapcnt.smp_rel_abort.value.ul++; 2053 } 2054 if (flags & SM_DESTROY) { 2055 bflags |= (B_INVAL|B_TRUNC); 2056 segmapcnt.smp_rel_abort.value.ul++; 2057 } 2058 if (smp->sm_refcnt == 1) { 2059 /* 2060 * We only bother doing the FREE and DONTNEED flags 2061 * if no one else is still referencing this mapping. 2062 */ 2063 if (flags & SM_FREE) { 2064 bflags |= B_FREE; 2065 segmapcnt.smp_rel_free.value.ul++; 2066 } 2067 if (flags & SM_DONTNEED) { 2068 bflags |= B_DONTNEED; 2069 segmapcnt.smp_rel_dontneed.value.ul++; 2070 } 2071 } 2072 } else { 2073 smd_cpu[CPU->cpu_seqid].scpu.scpu_release++; 2074 } 2075 2076 vp = smp->sm_vp; 2077 offset = smp->sm_off; 2078 2079 if (--smp->sm_refcnt == 0) { 2080 2081 if (is_kpm) { 2082 smp->sm_flags &= ~(SM_WRITE_DATA | SM_READ_DATA); 2083 } 2084 if (flags & (SM_INVAL|SM_DESTROY)) { 2085 segmap_hashout(smp); /* remove map info */ 2086 if (is_kpm) { 2087 hat_kpm_mapout(pp, GET_KPME(smp), addr); 2088 if (smp->sm_flags & SM_NOTKPM_RELEASED) { 2089 smp->sm_flags &= ~SM_NOTKPM_RELEASED; 2090 hat_unload(kas.a_hat, addr, MAXBSIZE, 2091 HAT_UNLOAD); 2092 } 2093 2094 } else { 2095 if (segmap_kpm) 2096 segkpm_mapout_validkpme(GET_KPME(smp)); 2097 2098 smp->sm_flags &= ~SM_NOTKPM_RELEASED; 2099 hat_unload(kas.a_hat, addr, MAXBSIZE, 2100 HAT_UNLOAD); 2101 } 2102 } 2103 segmap_smapadd(smp); /* add to free list */ 2104 } 2105 2106 mutex_exit(smtx); 2107 2108 if (is_kpm) 2109 page_unlock(pp); 2110 /* 2111 * Now invoke VOP_PUTPAGE() if any flags (except SM_DONTNEED) 2112 * are set. 2113 */ 2114 if ((flags & ~SM_DONTNEED) != 0) { 2115 error = VOP_PUTPAGE(vp, offset, MAXBSIZE, 2116 bflags, CRED()); 2117 } else { 2118 error = 0; 2119 } 2120 2121 return (error); 2122 } 2123 2124 /* 2125 * Dump the pages belonging to this segmap segment. 2126 */ 2127 static void 2128 segmap_dump(struct seg *seg) 2129 { 2130 struct segmap_data *smd; 2131 struct smap *smp, *smp_end; 2132 page_t *pp; 2133 pfn_t pfn; 2134 u_offset_t off; 2135 caddr_t addr; 2136 2137 smd = (struct segmap_data *)seg->s_data; 2138 addr = seg->s_base; 2139 for (smp = smd->smd_sm, smp_end = smp + smd->smd_npages; 2140 smp < smp_end; smp++) { 2141 2142 if (smp->sm_refcnt) { 2143 for (off = 0; off < MAXBSIZE; off += PAGESIZE) { 2144 int we_own_it = 0; 2145 2146 /* 2147 * If pp == NULL, the page either does 2148 * not exist or is exclusively locked. 2149 * So determine if it exists before 2150 * searching for it. 2151 */ 2152 if ((pp = page_lookup_nowait(smp->sm_vp, 2153 smp->sm_off + off, SE_SHARED))) 2154 we_own_it = 1; 2155 else 2156 pp = page_exists(smp->sm_vp, 2157 smp->sm_off + off); 2158 2159 if (pp) { 2160 pfn = page_pptonum(pp); 2161 dump_addpage(seg->s_as, 2162 addr + off, pfn); 2163 if (we_own_it) 2164 page_unlock(pp); 2165 } 2166 dump_timeleft = dump_timeout; 2167 } 2168 } 2169 addr += MAXBSIZE; 2170 } 2171 } 2172 2173 /*ARGSUSED*/ 2174 static int 2175 segmap_pagelock(struct seg *seg, caddr_t addr, size_t len, 2176 struct page ***ppp, enum lock_type type, enum seg_rw rw) 2177 { 2178 return (ENOTSUP); 2179 } 2180 2181 static int 2182 segmap_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp) 2183 { 2184 struct segmap_data *smd = (struct segmap_data *)seg->s_data; 2185 2186 memidp->val[0] = (uintptr_t)smd->smd_sm->sm_vp; 2187 memidp->val[1] = smd->smd_sm->sm_off + (uintptr_t)(addr - seg->s_base); 2188 return (0); 2189 } 2190 2191 /*ARGSUSED*/ 2192 static lgrp_mem_policy_info_t * 2193 segmap_getpolicy(struct seg *seg, caddr_t addr) 2194 { 2195 return (NULL); 2196 } 2197 2198 2199 #ifdef SEGKPM_SUPPORT 2200 2201 /* 2202 * segkpm support routines 2203 */ 2204 2205 static caddr_t 2206 segmap_pagecreate_kpm(struct seg *seg, vnode_t *vp, u_offset_t off, 2207 struct smap *smp, enum seg_rw rw) 2208 { 2209 caddr_t base; 2210 page_t *pp; 2211 int newpage = 0; 2212 struct kpme *kpme; 2213 2214 ASSERT(smp->sm_refcnt > 0); 2215 2216 if ((pp = page_lookup(vp, off, SE_SHARED)) == NULL) { 2217 kmutex_t *smtx; 2218 2219 base = segkpm_create_va(off); 2220 2221 if ((pp = page_create_va(vp, off, PAGESIZE, PG_WAIT, 2222 seg, base)) == NULL) { 2223 panic("segmap_pagecreate_kpm: " 2224 "page_create failed"); 2225 /*NOTREACHED*/ 2226 } 2227 2228 newpage = 1; 2229 page_io_unlock(pp); 2230 ASSERT((u_offset_t)(off - smp->sm_off) <= INT_MAX); 2231 2232 /* 2233 * Mark this here until the following segmap_pagecreate 2234 * or segmap_release. 2235 */ 2236 smtx = SMAPMTX(smp); 2237 mutex_enter(smtx); 2238 smp->sm_flags |= SM_KPM_NEWPAGE; 2239 mutex_exit(smtx); 2240 } 2241 2242 kpme = GET_KPME(smp); 2243 if (!newpage && kpme->kpe_page == pp) 2244 base = hat_kpm_page2va(pp, 0); 2245 else 2246 base = hat_kpm_mapin(pp, kpme); 2247 2248 /* 2249 * FS code may decide not to call segmap_pagecreate and we 2250 * don't invoke segmap_fault via TLB miss, so we have to set 2251 * ref and mod bits in advance. 2252 */ 2253 if (rw == S_WRITE) { 2254 hat_setrefmod(pp); 2255 } else { 2256 ASSERT(rw == S_READ); 2257 hat_setref(pp); 2258 } 2259 2260 smd_cpu[CPU->cpu_seqid].scpu.scpu_pagecreate++; 2261 2262 return (base); 2263 } 2264 2265 /* 2266 * Find the smap structure corresponding to the 2267 * KPM addr and return it locked. 2268 */ 2269 struct smap * 2270 get_smap_kpm(caddr_t addr, page_t **ppp) 2271 { 2272 struct smap *smp; 2273 struct vnode *vp; 2274 u_offset_t offset; 2275 caddr_t baseaddr = (caddr_t)((uintptr_t)addr & MAXBMASK); 2276 int hashid; 2277 kmutex_t *hashmtx; 2278 page_t *pp; 2279 union segmap_cpu *scpu; 2280 2281 pp = hat_kpm_vaddr2page(baseaddr); 2282 2283 ASSERT(pp && !PP_ISFREE(pp)); 2284 ASSERT(PAGE_LOCKED(pp)); 2285 ASSERT(((uintptr_t)pp->p_offset & MAXBOFFSET) == 0); 2286 2287 vp = pp->p_vnode; 2288 offset = pp->p_offset; 2289 ASSERT(vp != NULL); 2290 2291 /* 2292 * Assume the last smap used on this cpu is the one needed. 2293 */ 2294 scpu = smd_cpu+CPU->cpu_seqid; 2295 smp = scpu->scpu.scpu_last_smap; 2296 mutex_enter(&smp->sm_mtx); 2297 if (smp->sm_vp == vp && smp->sm_off == offset) { 2298 ASSERT(smp->sm_refcnt > 0); 2299 } else { 2300 /* 2301 * Assumption wrong, find the smap on the hash chain. 2302 */ 2303 mutex_exit(&smp->sm_mtx); 2304 SMAP_HASHFUNC(vp, offset, hashid); /* macro assigns hashid */ 2305 hashmtx = SHASHMTX(hashid); 2306 2307 mutex_enter(hashmtx); 2308 smp = smd_hash[hashid].sh_hash_list; 2309 for (; smp != NULL; smp = smp->sm_hash) { 2310 if (smp->sm_vp == vp && smp->sm_off == offset) 2311 break; 2312 } 2313 mutex_exit(hashmtx); 2314 if (smp) { 2315 mutex_enter(&smp->sm_mtx); 2316 ASSERT(smp->sm_vp == vp && smp->sm_off == offset); 2317 } 2318 } 2319 2320 if (ppp) 2321 *ppp = smp ? pp : NULL; 2322 2323 return (smp); 2324 } 2325 2326 #else /* SEGKPM_SUPPORT */ 2327 2328 /* segkpm stubs */ 2329 2330 /*ARGSUSED*/ 2331 static caddr_t 2332 segmap_pagecreate_kpm(struct seg *seg, vnode_t *vp, u_offset_t off, 2333 struct smap *smp, enum seg_rw rw) 2334 { 2335 return (NULL); 2336 } 2337 2338 /*ARGSUSED*/ 2339 struct smap * 2340 get_smap_kpm(caddr_t addr, page_t **ppp) 2341 { 2342 return (NULL); 2343 } 2344 2345 #endif /* SEGKPM_SUPPORT */ 2346