1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 27 /* All Rights Reserved */ 28 29 /* 30 * University Copyright- Copyright (c) 1982, 1986, 1988 31 * The Regents of the University of California 32 * All Rights Reserved 33 * 34 * University Acknowledgment- Portions of this document are derived from 35 * software developed by the University of California, Berkeley, and its 36 * contributors. 37 */ 38 39 /* 40 * VM - shared or copy-on-write from a vnode/anonymous memory. 41 */ 42 43 #include <sys/types.h> 44 #include <sys/param.h> 45 #include <sys/t_lock.h> 46 #include <sys/errno.h> 47 #include <sys/systm.h> 48 #include <sys/mman.h> 49 #include <sys/debug.h> 50 #include <sys/cred.h> 51 #include <sys/vmsystm.h> 52 #include <sys/tuneable.h> 53 #include <sys/bitmap.h> 54 #include <sys/swap.h> 55 #include <sys/kmem.h> 56 #include <sys/sysmacros.h> 57 #include <sys/vtrace.h> 58 #include <sys/cmn_err.h> 59 #include <sys/callb.h> 60 #include <sys/vm.h> 61 #include <sys/dumphdr.h> 62 #include <sys/lgrp.h> 63 64 #include <vm/hat.h> 65 #include <vm/as.h> 66 #include <vm/seg.h> 67 #include <vm/seg_vn.h> 68 #include <vm/pvn.h> 69 #include <vm/anon.h> 70 #include <vm/page.h> 71 #include <vm/vpage.h> 72 #include <sys/proc.h> 73 #include <sys/task.h> 74 #include <sys/project.h> 75 #include <sys/zone.h> 76 #include <sys/shm_impl.h> 77 /* 78 * Private seg op routines. 79 */ 80 static int segvn_dup(struct seg *seg, struct seg *newseg); 81 static int segvn_unmap(struct seg *seg, caddr_t addr, size_t len); 82 static void segvn_free(struct seg *seg); 83 static faultcode_t segvn_fault(struct hat *hat, struct seg *seg, 84 caddr_t addr, size_t len, enum fault_type type, 85 enum seg_rw rw); 86 static faultcode_t segvn_faulta(struct seg *seg, caddr_t addr); 87 static int segvn_setprot(struct seg *seg, caddr_t addr, 88 size_t len, uint_t prot); 89 static int segvn_checkprot(struct seg *seg, caddr_t addr, 90 size_t len, uint_t prot); 91 static int segvn_kluster(struct seg *seg, caddr_t addr, ssize_t delta); 92 static size_t segvn_swapout(struct seg *seg); 93 static int segvn_sync(struct seg *seg, caddr_t addr, size_t len, 94 int attr, uint_t flags); 95 static size_t segvn_incore(struct seg *seg, caddr_t addr, size_t len, 96 char *vec); 97 static int segvn_lockop(struct seg *seg, caddr_t addr, size_t len, 98 int attr, int op, ulong_t *lockmap, size_t pos); 99 static int segvn_getprot(struct seg *seg, caddr_t addr, size_t len, 100 uint_t *protv); 101 static u_offset_t segvn_getoffset(struct seg *seg, caddr_t addr); 102 static int segvn_gettype(struct seg *seg, caddr_t addr); 103 static int segvn_getvp(struct seg *seg, caddr_t addr, 104 struct vnode **vpp); 105 static int segvn_advise(struct seg *seg, caddr_t addr, size_t len, 106 uint_t behav); 107 static void segvn_dump(struct seg *seg); 108 static int segvn_pagelock(struct seg *seg, caddr_t addr, size_t len, 109 struct page ***ppp, enum lock_type type, enum seg_rw rw); 110 static int segvn_setpagesize(struct seg *seg, caddr_t addr, size_t len, 111 uint_t szc); 112 static int segvn_getmemid(struct seg *seg, caddr_t addr, 113 memid_t *memidp); 114 static lgrp_mem_policy_info_t *segvn_getpolicy(struct seg *, caddr_t); 115 static int segvn_capable(struct seg *seg, segcapability_t capable); 116 117 struct seg_ops segvn_ops = { 118 segvn_dup, 119 segvn_unmap, 120 segvn_free, 121 segvn_fault, 122 segvn_faulta, 123 segvn_setprot, 124 segvn_checkprot, 125 segvn_kluster, 126 segvn_swapout, 127 segvn_sync, 128 segvn_incore, 129 segvn_lockop, 130 segvn_getprot, 131 segvn_getoffset, 132 segvn_gettype, 133 segvn_getvp, 134 segvn_advise, 135 segvn_dump, 136 segvn_pagelock, 137 segvn_setpagesize, 138 segvn_getmemid, 139 segvn_getpolicy, 140 segvn_capable, 141 }; 142 143 /* 144 * Common zfod structures, provided as a shorthand for others to use. 145 */ 146 static segvn_crargs_t zfod_segvn_crargs = 147 SEGVN_ZFOD_ARGS(PROT_ZFOD, PROT_ALL); 148 static segvn_crargs_t kzfod_segvn_crargs = 149 SEGVN_ZFOD_ARGS(PROT_ZFOD & ~PROT_USER, 150 PROT_ALL & ~PROT_USER); 151 static segvn_crargs_t stack_noexec_crargs = 152 SEGVN_ZFOD_ARGS(PROT_ZFOD & ~PROT_EXEC, PROT_ALL); 153 154 caddr_t zfod_argsp = (caddr_t)&zfod_segvn_crargs; /* user zfod argsp */ 155 caddr_t kzfod_argsp = (caddr_t)&kzfod_segvn_crargs; /* kernel zfod argsp */ 156 caddr_t stack_exec_argsp = (caddr_t)&zfod_segvn_crargs; /* executable stack */ 157 caddr_t stack_noexec_argsp = (caddr_t)&stack_noexec_crargs; /* noexec stack */ 158 159 #define vpgtob(n) ((n) * sizeof (struct vpage)) /* For brevity */ 160 161 size_t segvn_comb_thrshld = UINT_MAX; /* patchable -- see 1196681 */ 162 163 size_t segvn_pglock_comb_thrshld = (1UL << 16); /* 64K */ 164 size_t segvn_pglock_comb_balign = (1UL << 16); /* 64K */ 165 uint_t segvn_pglock_comb_bshift; 166 size_t segvn_pglock_comb_palign; 167 168 static int segvn_concat(struct seg *, struct seg *, int); 169 static int segvn_extend_prev(struct seg *, struct seg *, 170 struct segvn_crargs *, size_t); 171 static int segvn_extend_next(struct seg *, struct seg *, 172 struct segvn_crargs *, size_t); 173 static void segvn_softunlock(struct seg *, caddr_t, size_t, enum seg_rw); 174 static void segvn_pagelist_rele(page_t **); 175 static void segvn_setvnode_mpss(vnode_t *); 176 static void segvn_relocate_pages(page_t **, page_t *); 177 static int segvn_full_szcpages(page_t **, uint_t, int *, uint_t *); 178 static int segvn_fill_vp_pages(struct segvn_data *, vnode_t *, u_offset_t, 179 uint_t, page_t **, page_t **, uint_t *, int *); 180 static faultcode_t segvn_fault_vnodepages(struct hat *, struct seg *, caddr_t, 181 caddr_t, enum fault_type, enum seg_rw, caddr_t, caddr_t, int); 182 static faultcode_t segvn_fault_anonpages(struct hat *, struct seg *, caddr_t, 183 caddr_t, enum fault_type, enum seg_rw, caddr_t, caddr_t, int); 184 static faultcode_t segvn_faultpage(struct hat *, struct seg *, caddr_t, 185 u_offset_t, struct vpage *, page_t **, uint_t, 186 enum fault_type, enum seg_rw, int); 187 static void segvn_vpage(struct seg *); 188 static size_t segvn_count_swap_by_vpages(struct seg *); 189 190 static void segvn_purge(struct seg *seg); 191 static int segvn_reclaim(void *, caddr_t, size_t, struct page **, 192 enum seg_rw, int); 193 static int shamp_reclaim(void *, caddr_t, size_t, struct page **, 194 enum seg_rw, int); 195 196 static int sameprot(struct seg *, caddr_t, size_t); 197 198 static int segvn_demote_range(struct seg *, caddr_t, size_t, int, uint_t); 199 static int segvn_clrszc(struct seg *); 200 static struct seg *segvn_split_seg(struct seg *, caddr_t); 201 static int segvn_claim_pages(struct seg *, struct vpage *, u_offset_t, 202 ulong_t, uint_t); 203 204 static void segvn_hat_rgn_unload_callback(caddr_t, caddr_t, caddr_t, 205 size_t, void *, u_offset_t); 206 207 static struct kmem_cache *segvn_cache; 208 static struct kmem_cache **segvn_szc_cache; 209 210 #ifdef VM_STATS 211 static struct segvnvmstats_str { 212 ulong_t fill_vp_pages[31]; 213 ulong_t fltvnpages[49]; 214 ulong_t fullszcpages[10]; 215 ulong_t relocatepages[3]; 216 ulong_t fltanpages[17]; 217 ulong_t pagelock[2]; 218 ulong_t demoterange[3]; 219 } segvnvmstats; 220 #endif /* VM_STATS */ 221 222 #define SDR_RANGE 1 /* demote entire range */ 223 #define SDR_END 2 /* demote non aligned ends only */ 224 225 #define CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr) { \ 226 if ((len) != 0) { \ 227 lpgaddr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz); \ 228 ASSERT(lpgaddr >= (seg)->s_base); \ 229 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)((addr) + \ 230 (len)), pgsz); \ 231 ASSERT(lpgeaddr > lpgaddr); \ 232 ASSERT(lpgeaddr <= (seg)->s_base + (seg)->s_size); \ 233 } else { \ 234 lpgeaddr = lpgaddr = (addr); \ 235 } \ 236 } 237 238 /*ARGSUSED*/ 239 static int 240 segvn_cache_constructor(void *buf, void *cdrarg, int kmflags) 241 { 242 struct segvn_data *svd = buf; 243 244 rw_init(&svd->lock, NULL, RW_DEFAULT, NULL); 245 mutex_init(&svd->segfree_syncmtx, NULL, MUTEX_DEFAULT, NULL); 246 svd->svn_trnext = svd->svn_trprev = NULL; 247 return (0); 248 } 249 250 /*ARGSUSED1*/ 251 static void 252 segvn_cache_destructor(void *buf, void *cdrarg) 253 { 254 struct segvn_data *svd = buf; 255 256 rw_destroy(&svd->lock); 257 mutex_destroy(&svd->segfree_syncmtx); 258 } 259 260 /*ARGSUSED*/ 261 static int 262 svntr_cache_constructor(void *buf, void *cdrarg, int kmflags) 263 { 264 bzero(buf, sizeof (svntr_t)); 265 return (0); 266 } 267 268 /* 269 * Patching this variable to non-zero allows the system to run with 270 * stacks marked as "not executable". It's a bit of a kludge, but is 271 * provided as a tweakable for platforms that export those ABIs 272 * (e.g. sparc V8) that have executable stacks enabled by default. 273 * There are also some restrictions for platforms that don't actually 274 * implement 'noexec' protections. 275 * 276 * Once enabled, the system is (therefore) unable to provide a fully 277 * ABI-compliant execution environment, though practically speaking, 278 * most everything works. The exceptions are generally some interpreters 279 * and debuggers that create executable code on the stack and jump 280 * into it (without explicitly mprotecting the address range to include 281 * PROT_EXEC). 282 * 283 * One important class of applications that are disabled are those 284 * that have been transformed into malicious agents using one of the 285 * numerous "buffer overflow" attacks. See 4007890. 286 */ 287 int noexec_user_stack = 0; 288 int noexec_user_stack_log = 1; 289 290 int segvn_lpg_disable = 0; 291 uint_t segvn_maxpgszc = 0; 292 293 ulong_t segvn_vmpss_clrszc_cnt; 294 ulong_t segvn_vmpss_clrszc_err; 295 ulong_t segvn_fltvnpages_clrszc_cnt; 296 ulong_t segvn_fltvnpages_clrszc_err; 297 ulong_t segvn_setpgsz_align_err; 298 ulong_t segvn_setpgsz_anon_align_err; 299 ulong_t segvn_setpgsz_getattr_err; 300 ulong_t segvn_setpgsz_eof_err; 301 ulong_t segvn_faultvnmpss_align_err1; 302 ulong_t segvn_faultvnmpss_align_err2; 303 ulong_t segvn_faultvnmpss_align_err3; 304 ulong_t segvn_faultvnmpss_align_err4; 305 ulong_t segvn_faultvnmpss_align_err5; 306 ulong_t segvn_vmpss_pageio_deadlk_err; 307 308 int segvn_use_regions = 1; 309 310 /* 311 * Segvn supports text replication optimization for NUMA platforms. Text 312 * replica's are represented by anon maps (amp). There's one amp per text file 313 * region per lgroup. A process chooses the amp for each of its text mappings 314 * based on the lgroup assignment of its main thread (t_tid = 1). All 315 * processes that want a replica on a particular lgroup for the same text file 316 * mapping share the same amp. amp's are looked up in svntr_hashtab hash table 317 * with vp,off,size,szc used as a key. Text replication segments are read only 318 * MAP_PRIVATE|MAP_TEXT segments that map vnode. Replication is achieved by 319 * forcing COW faults from vnode to amp and mapping amp pages instead of vnode 320 * pages. Replication amp is assigned to a segment when it gets its first 321 * pagefault. To handle main thread lgroup rehoming segvn_trasync_thread 322 * rechecks periodically if the process still maps an amp local to the main 323 * thread. If not async thread forces process to remap to an amp in the new 324 * home lgroup of the main thread. Current text replication implementation 325 * only provides the benefit to workloads that do most of their work in the 326 * main thread of a process or all the threads of a process run in the same 327 * lgroup. To extend text replication benefit to different types of 328 * multithreaded workloads further work would be needed in the hat layer to 329 * allow the same virtual address in the same hat to simultaneously map 330 * different physical addresses (i.e. page table replication would be needed 331 * for x86). 332 * 333 * amp pages are used instead of vnode pages as long as segment has a very 334 * simple life cycle. It's created via segvn_create(), handles S_EXEC 335 * (S_READ) pagefaults and is fully unmapped. If anything more complicated 336 * happens such as protection is changed, real COW fault happens, pagesize is 337 * changed, MC_LOCK is requested or segment is partially unmapped we turn off 338 * text replication by converting the segment back to vnode only segment 339 * (unmap segment's address range and set svd->amp to NULL). 340 * 341 * The original file can be changed after amp is inserted into 342 * svntr_hashtab. Processes that are launched after the file is already 343 * changed can't use the replica's created prior to the file change. To 344 * implement this functionality hash entries are timestamped. Replica's can 345 * only be used if current file modification time is the same as the timestamp 346 * saved when hash entry was created. However just timestamps alone are not 347 * sufficient to detect file modification via mmap(MAP_SHARED) mappings. We 348 * deal with file changes via MAP_SHARED mappings differently. When writable 349 * MAP_SHARED mappings are created to vnodes marked as executable we mark all 350 * existing replica's for this vnode as not usable for future text 351 * mappings. And we don't create new replica's for files that currently have 352 * potentially writable MAP_SHARED mappings (i.e. vn_is_mapped(V_WRITE) is 353 * true). 354 */ 355 356 #define SEGVN_TEXTREPL_MAXBYTES_FACTOR (20) 357 size_t segvn_textrepl_max_bytes_factor = SEGVN_TEXTREPL_MAXBYTES_FACTOR; 358 359 static ulong_t svntr_hashtab_sz = 512; 360 static svntr_bucket_t *svntr_hashtab = NULL; 361 static struct kmem_cache *svntr_cache; 362 static svntr_stats_t *segvn_textrepl_stats; 363 static ksema_t segvn_trasync_sem; 364 365 int segvn_disable_textrepl = 1; 366 size_t textrepl_size_thresh = (size_t)-1; 367 size_t segvn_textrepl_bytes = 0; 368 size_t segvn_textrepl_max_bytes = 0; 369 clock_t segvn_update_textrepl_interval = 0; 370 int segvn_update_tr_time = 10; 371 int segvn_disable_textrepl_update = 0; 372 373 static void segvn_textrepl(struct seg *); 374 static void segvn_textunrepl(struct seg *, int); 375 static void segvn_inval_trcache(vnode_t *); 376 static void segvn_trasync_thread(void); 377 static void segvn_trupdate_wakeup(void *); 378 static void segvn_trupdate(void); 379 static void segvn_trupdate_seg(struct seg *, segvn_data_t *, svntr_t *, 380 ulong_t); 381 382 /* 383 * Initialize segvn data structures 384 */ 385 void 386 segvn_init(void) 387 { 388 uint_t maxszc; 389 uint_t szc; 390 size_t pgsz; 391 392 segvn_cache = kmem_cache_create("segvn_cache", 393 sizeof (struct segvn_data), 0, 394 segvn_cache_constructor, segvn_cache_destructor, NULL, 395 NULL, NULL, 0); 396 397 if (segvn_lpg_disable == 0) { 398 szc = maxszc = page_num_pagesizes() - 1; 399 if (szc == 0) { 400 segvn_lpg_disable = 1; 401 } 402 if (page_get_pagesize(0) != PAGESIZE) { 403 panic("segvn_init: bad szc 0"); 404 /*NOTREACHED*/ 405 } 406 while (szc != 0) { 407 pgsz = page_get_pagesize(szc); 408 if (pgsz <= PAGESIZE || !IS_P2ALIGNED(pgsz, pgsz)) { 409 panic("segvn_init: bad szc %d", szc); 410 /*NOTREACHED*/ 411 } 412 szc--; 413 } 414 if (segvn_maxpgszc == 0 || segvn_maxpgszc > maxszc) 415 segvn_maxpgszc = maxszc; 416 } 417 418 if (segvn_maxpgszc) { 419 segvn_szc_cache = (struct kmem_cache **)kmem_alloc( 420 (segvn_maxpgszc + 1) * sizeof (struct kmem_cache *), 421 KM_SLEEP); 422 } 423 424 for (szc = 1; szc <= segvn_maxpgszc; szc++) { 425 char str[32]; 426 427 (void) sprintf(str, "segvn_szc_cache%d", szc); 428 segvn_szc_cache[szc] = kmem_cache_create(str, 429 page_get_pagecnt(szc) * sizeof (page_t *), 0, 430 NULL, NULL, NULL, NULL, NULL, KMC_NODEBUG); 431 } 432 433 434 if (segvn_use_regions && !hat_supported(HAT_SHARED_REGIONS, NULL)) 435 segvn_use_regions = 0; 436 437 /* 438 * For now shared regions and text replication segvn support 439 * are mutually exclusive. This is acceptable because 440 * currently significant benefit from text replication was 441 * only observed on AMD64 NUMA platforms (due to relatively 442 * small L2$ size) and currently we don't support shared 443 * regions on x86. 444 */ 445 if (segvn_use_regions && !segvn_disable_textrepl) { 446 segvn_disable_textrepl = 1; 447 } 448 449 #if defined(_LP64) 450 if (lgrp_optimizations() && textrepl_size_thresh != (size_t)-1 && 451 !segvn_disable_textrepl) { 452 ulong_t i; 453 size_t hsz = svntr_hashtab_sz * sizeof (svntr_bucket_t); 454 455 svntr_cache = kmem_cache_create("svntr_cache", 456 sizeof (svntr_t), 0, svntr_cache_constructor, NULL, 457 NULL, NULL, NULL, 0); 458 svntr_hashtab = kmem_zalloc(hsz, KM_SLEEP); 459 for (i = 0; i < svntr_hashtab_sz; i++) { 460 mutex_init(&svntr_hashtab[i].tr_lock, NULL, 461 MUTEX_DEFAULT, NULL); 462 } 463 segvn_textrepl_max_bytes = ptob(physmem) / 464 segvn_textrepl_max_bytes_factor; 465 segvn_textrepl_stats = kmem_zalloc(NCPU * 466 sizeof (svntr_stats_t), KM_SLEEP); 467 sema_init(&segvn_trasync_sem, 0, NULL, SEMA_DEFAULT, NULL); 468 (void) thread_create(NULL, 0, segvn_trasync_thread, 469 NULL, 0, &p0, TS_RUN, minclsyspri); 470 } 471 #endif 472 473 if (!ISP2(segvn_pglock_comb_balign) || 474 segvn_pglock_comb_balign < PAGESIZE) { 475 segvn_pglock_comb_balign = 1UL << 16; /* 64K */ 476 } 477 segvn_pglock_comb_bshift = highbit(segvn_pglock_comb_balign) - 1; 478 segvn_pglock_comb_palign = btop(segvn_pglock_comb_balign); 479 } 480 481 #define SEGVN_PAGEIO ((void *)0x1) 482 #define SEGVN_NOPAGEIO ((void *)0x2) 483 484 static void 485 segvn_setvnode_mpss(vnode_t *vp) 486 { 487 int err; 488 489 ASSERT(vp->v_mpssdata == NULL || 490 vp->v_mpssdata == SEGVN_PAGEIO || 491 vp->v_mpssdata == SEGVN_NOPAGEIO); 492 493 if (vp->v_mpssdata == NULL) { 494 if (vn_vmpss_usepageio(vp)) { 495 err = VOP_PAGEIO(vp, (page_t *)NULL, 496 (u_offset_t)0, 0, 0, CRED(), NULL); 497 } else { 498 err = ENOSYS; 499 } 500 /* 501 * set v_mpssdata just once per vnode life 502 * so that it never changes. 503 */ 504 mutex_enter(&vp->v_lock); 505 if (vp->v_mpssdata == NULL) { 506 if (err == EINVAL) { 507 vp->v_mpssdata = SEGVN_PAGEIO; 508 } else { 509 vp->v_mpssdata = SEGVN_NOPAGEIO; 510 } 511 } 512 mutex_exit(&vp->v_lock); 513 } 514 } 515 516 int 517 segvn_create(struct seg *seg, void *argsp) 518 { 519 struct segvn_crargs *a = (struct segvn_crargs *)argsp; 520 struct segvn_data *svd; 521 size_t swresv = 0; 522 struct cred *cred; 523 struct anon_map *amp; 524 int error = 0; 525 size_t pgsz; 526 lgrp_mem_policy_t mpolicy = LGRP_MEM_POLICY_DEFAULT; 527 int use_rgn = 0; 528 int trok = 0; 529 530 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 531 532 if (a->type != MAP_PRIVATE && a->type != MAP_SHARED) { 533 panic("segvn_create type"); 534 /*NOTREACHED*/ 535 } 536 537 /* 538 * Check arguments. If a shared anon structure is given then 539 * it is illegal to also specify a vp. 540 */ 541 if (a->amp != NULL && a->vp != NULL) { 542 panic("segvn_create anon_map"); 543 /*NOTREACHED*/ 544 } 545 546 if (a->type == MAP_PRIVATE && (a->flags & MAP_TEXT) && 547 a->vp != NULL && a->prot == (PROT_USER | PROT_READ | PROT_EXEC) && 548 segvn_use_regions) { 549 use_rgn = 1; 550 } 551 552 /* MAP_NORESERVE on a MAP_SHARED segment is meaningless. */ 553 if (a->type == MAP_SHARED) 554 a->flags &= ~MAP_NORESERVE; 555 556 if (a->szc != 0) { 557 if (segvn_lpg_disable != 0 || (a->szc == AS_MAP_NO_LPOOB) || 558 (a->amp != NULL && a->type == MAP_PRIVATE) || 559 (a->flags & MAP_NORESERVE) || seg->s_as == &kas) { 560 a->szc = 0; 561 } else { 562 if (a->szc > segvn_maxpgszc) 563 a->szc = segvn_maxpgszc; 564 pgsz = page_get_pagesize(a->szc); 565 if (!IS_P2ALIGNED(seg->s_base, pgsz) || 566 !IS_P2ALIGNED(seg->s_size, pgsz)) { 567 a->szc = 0; 568 } else if (a->vp != NULL) { 569 extern struct vnode kvp; 570 if (IS_SWAPFSVP(a->vp) || VN_ISKAS(a->vp)) { 571 /* 572 * paranoid check. 573 * hat_page_demote() is not supported 574 * on swapfs pages. 575 */ 576 a->szc = 0; 577 } else if (map_addr_vacalign_check(seg->s_base, 578 a->offset & PAGEMASK)) { 579 a->szc = 0; 580 } 581 } else if (a->amp != NULL) { 582 pgcnt_t anum = btopr(a->offset); 583 pgcnt_t pgcnt = page_get_pagecnt(a->szc); 584 if (!IS_P2ALIGNED(anum, pgcnt)) { 585 a->szc = 0; 586 } 587 } 588 } 589 } 590 591 /* 592 * If segment may need private pages, reserve them now. 593 */ 594 if (!(a->flags & MAP_NORESERVE) && ((a->vp == NULL && a->amp == NULL) || 595 (a->type == MAP_PRIVATE && (a->prot & PROT_WRITE)))) { 596 if (anon_resv_zone(seg->s_size, 597 seg->s_as->a_proc->p_zone) == 0) 598 return (EAGAIN); 599 swresv = seg->s_size; 600 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u", 601 seg, swresv, 1); 602 } 603 604 /* 605 * Reserve any mapping structures that may be required. 606 * 607 * Don't do it for segments that may use regions. It's currently a 608 * noop in the hat implementations anyway. 609 */ 610 if (!use_rgn) { 611 hat_map(seg->s_as->a_hat, seg->s_base, seg->s_size, HAT_MAP); 612 } 613 614 if (a->cred) { 615 cred = a->cred; 616 crhold(cred); 617 } else { 618 crhold(cred = CRED()); 619 } 620 621 /* Inform the vnode of the new mapping */ 622 if (a->vp != NULL) { 623 error = VOP_ADDMAP(a->vp, a->offset & PAGEMASK, 624 seg->s_as, seg->s_base, seg->s_size, a->prot, 625 a->maxprot, a->type, cred, NULL); 626 if (error) { 627 if (swresv != 0) { 628 anon_unresv_zone(swresv, 629 seg->s_as->a_proc->p_zone); 630 TRACE_3(TR_FAC_VM, TR_ANON_PROC, 631 "anon proc:%p %lu %u", seg, swresv, 0); 632 } 633 crfree(cred); 634 if (!use_rgn) { 635 hat_unload(seg->s_as->a_hat, seg->s_base, 636 seg->s_size, HAT_UNLOAD_UNMAP); 637 } 638 return (error); 639 } 640 /* 641 * svntr_hashtab will be NULL if we support shared regions. 642 */ 643 trok = ((a->flags & MAP_TEXT) && 644 (seg->s_size > textrepl_size_thresh || 645 (a->flags & _MAP_TEXTREPL)) && 646 lgrp_optimizations() && svntr_hashtab != NULL && 647 a->type == MAP_PRIVATE && swresv == 0 && 648 !(a->flags & MAP_NORESERVE) && 649 seg->s_as != &kas && a->vp->v_type == VREG); 650 651 ASSERT(!trok || !use_rgn); 652 } 653 654 /* 655 * If more than one segment in the address space, and they're adjacent 656 * virtually, try to concatenate them. Don't concatenate if an 657 * explicit anon_map structure was supplied (e.g., SystemV shared 658 * memory) or if we'll use text replication for this segment. 659 */ 660 if (a->amp == NULL && !use_rgn && !trok) { 661 struct seg *pseg, *nseg; 662 struct segvn_data *psvd, *nsvd; 663 lgrp_mem_policy_t ppolicy, npolicy; 664 uint_t lgrp_mem_policy_flags = 0; 665 extern lgrp_mem_policy_t lgrp_mem_default_policy; 666 667 /* 668 * Memory policy flags (lgrp_mem_policy_flags) is valid when 669 * extending stack/heap segments. 670 */ 671 if ((a->vp == NULL) && (a->type == MAP_PRIVATE) && 672 !(a->flags & MAP_NORESERVE) && (seg->s_as != &kas)) { 673 lgrp_mem_policy_flags = a->lgrp_mem_policy_flags; 674 } else { 675 /* 676 * Get policy when not extending it from another segment 677 */ 678 mpolicy = lgrp_mem_policy_default(seg->s_size, a->type); 679 } 680 681 /* 682 * First, try to concatenate the previous and new segments 683 */ 684 pseg = AS_SEGPREV(seg->s_as, seg); 685 if (pseg != NULL && 686 pseg->s_base + pseg->s_size == seg->s_base && 687 pseg->s_ops == &segvn_ops) { 688 /* 689 * Get memory allocation policy from previous segment. 690 * When extension is specified (e.g. for heap) apply 691 * this policy to the new segment regardless of the 692 * outcome of segment concatenation. Extension occurs 693 * for non-default policy otherwise default policy is 694 * used and is based on extended segment size. 695 */ 696 psvd = (struct segvn_data *)pseg->s_data; 697 ppolicy = psvd->policy_info.mem_policy; 698 if (lgrp_mem_policy_flags == 699 LGRP_MP_FLAG_EXTEND_UP) { 700 if (ppolicy != lgrp_mem_default_policy) { 701 mpolicy = ppolicy; 702 } else { 703 mpolicy = lgrp_mem_policy_default( 704 pseg->s_size + seg->s_size, 705 a->type); 706 } 707 } 708 709 if (mpolicy == ppolicy && 710 (pseg->s_size + seg->s_size <= 711 segvn_comb_thrshld || psvd->amp == NULL) && 712 segvn_extend_prev(pseg, seg, a, swresv) == 0) { 713 /* 714 * success! now try to concatenate 715 * with following seg 716 */ 717 crfree(cred); 718 nseg = AS_SEGNEXT(pseg->s_as, pseg); 719 if (nseg != NULL && 720 nseg != pseg && 721 nseg->s_ops == &segvn_ops && 722 pseg->s_base + pseg->s_size == 723 nseg->s_base) 724 (void) segvn_concat(pseg, nseg, 0); 725 ASSERT(pseg->s_szc == 0 || 726 (a->szc == pseg->s_szc && 727 IS_P2ALIGNED(pseg->s_base, pgsz) && 728 IS_P2ALIGNED(pseg->s_size, pgsz))); 729 return (0); 730 } 731 } 732 733 /* 734 * Failed, so try to concatenate with following seg 735 */ 736 nseg = AS_SEGNEXT(seg->s_as, seg); 737 if (nseg != NULL && 738 seg->s_base + seg->s_size == nseg->s_base && 739 nseg->s_ops == &segvn_ops) { 740 /* 741 * Get memory allocation policy from next segment. 742 * When extension is specified (e.g. for stack) apply 743 * this policy to the new segment regardless of the 744 * outcome of segment concatenation. Extension occurs 745 * for non-default policy otherwise default policy is 746 * used and is based on extended segment size. 747 */ 748 nsvd = (struct segvn_data *)nseg->s_data; 749 npolicy = nsvd->policy_info.mem_policy; 750 if (lgrp_mem_policy_flags == 751 LGRP_MP_FLAG_EXTEND_DOWN) { 752 if (npolicy != lgrp_mem_default_policy) { 753 mpolicy = npolicy; 754 } else { 755 mpolicy = lgrp_mem_policy_default( 756 nseg->s_size + seg->s_size, 757 a->type); 758 } 759 } 760 761 if (mpolicy == npolicy && 762 segvn_extend_next(seg, nseg, a, swresv) == 0) { 763 crfree(cred); 764 ASSERT(nseg->s_szc == 0 || 765 (a->szc == nseg->s_szc && 766 IS_P2ALIGNED(nseg->s_base, pgsz) && 767 IS_P2ALIGNED(nseg->s_size, pgsz))); 768 return (0); 769 } 770 } 771 } 772 773 if (a->vp != NULL) { 774 VN_HOLD(a->vp); 775 if (a->type == MAP_SHARED) 776 lgrp_shm_policy_init(NULL, a->vp); 777 } 778 svd = kmem_cache_alloc(segvn_cache, KM_SLEEP); 779 780 seg->s_ops = &segvn_ops; 781 seg->s_data = (void *)svd; 782 seg->s_szc = a->szc; 783 784 svd->seg = seg; 785 svd->vp = a->vp; 786 /* 787 * Anonymous mappings have no backing file so the offset is meaningless. 788 */ 789 svd->offset = a->vp ? (a->offset & PAGEMASK) : 0; 790 svd->prot = a->prot; 791 svd->maxprot = a->maxprot; 792 svd->pageprot = 0; 793 svd->type = a->type; 794 svd->vpage = NULL; 795 svd->cred = cred; 796 svd->advice = MADV_NORMAL; 797 svd->pageadvice = 0; 798 svd->flags = (ushort_t)a->flags; 799 svd->softlockcnt = 0; 800 svd->softlockcnt_sbase = 0; 801 svd->softlockcnt_send = 0; 802 svd->rcookie = HAT_INVALID_REGION_COOKIE; 803 svd->pageswap = 0; 804 805 if (a->szc != 0 && a->vp != NULL) { 806 segvn_setvnode_mpss(a->vp); 807 } 808 if (svd->type == MAP_SHARED && svd->vp != NULL && 809 (svd->vp->v_flag & VVMEXEC) && (svd->prot & PROT_WRITE)) { 810 ASSERT(vn_is_mapped(svd->vp, V_WRITE)); 811 segvn_inval_trcache(svd->vp); 812 } 813 814 amp = a->amp; 815 if ((svd->amp = amp) == NULL) { 816 svd->anon_index = 0; 817 if (svd->type == MAP_SHARED) { 818 svd->swresv = 0; 819 /* 820 * Shared mappings to a vp need no other setup. 821 * If we have a shared mapping to an anon_map object 822 * which hasn't been allocated yet, allocate the 823 * struct now so that it will be properly shared 824 * by remembering the swap reservation there. 825 */ 826 if (a->vp == NULL) { 827 svd->amp = anonmap_alloc(seg->s_size, swresv, 828 ANON_SLEEP); 829 svd->amp->a_szc = seg->s_szc; 830 } 831 } else { 832 /* 833 * Private mapping (with or without a vp). 834 * Allocate anon_map when needed. 835 */ 836 svd->swresv = swresv; 837 } 838 } else { 839 pgcnt_t anon_num; 840 841 /* 842 * Mapping to an existing anon_map structure without a vp. 843 * For now we will insure that the segment size isn't larger 844 * than the size - offset gives us. Later on we may wish to 845 * have the anon array dynamically allocated itself so that 846 * we don't always have to allocate all the anon pointer slots. 847 * This of course involves adding extra code to check that we 848 * aren't trying to use an anon pointer slot beyond the end 849 * of the currently allocated anon array. 850 */ 851 if ((amp->size - a->offset) < seg->s_size) { 852 panic("segvn_create anon_map size"); 853 /*NOTREACHED*/ 854 } 855 856 anon_num = btopr(a->offset); 857 858 if (a->type == MAP_SHARED) { 859 /* 860 * SHARED mapping to a given anon_map. 861 */ 862 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 863 amp->refcnt++; 864 if (a->szc > amp->a_szc) { 865 amp->a_szc = a->szc; 866 } 867 ANON_LOCK_EXIT(&->a_rwlock); 868 svd->anon_index = anon_num; 869 svd->swresv = 0; 870 } else { 871 /* 872 * PRIVATE mapping to a given anon_map. 873 * Make sure that all the needed anon 874 * structures are created (so that we will 875 * share the underlying pages if nothing 876 * is written by this mapping) and then 877 * duplicate the anon array as is done 878 * when a privately mapped segment is dup'ed. 879 */ 880 struct anon *ap; 881 caddr_t addr; 882 caddr_t eaddr; 883 ulong_t anon_idx; 884 int hat_flag = HAT_LOAD; 885 886 if (svd->flags & MAP_TEXT) { 887 hat_flag |= HAT_LOAD_TEXT; 888 } 889 890 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP); 891 svd->amp->a_szc = seg->s_szc; 892 svd->anon_index = 0; 893 svd->swresv = swresv; 894 895 /* 896 * Prevent 2 threads from allocating anon 897 * slots simultaneously. 898 */ 899 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 900 eaddr = seg->s_base + seg->s_size; 901 902 for (anon_idx = anon_num, addr = seg->s_base; 903 addr < eaddr; addr += PAGESIZE, anon_idx++) { 904 page_t *pp; 905 906 if ((ap = anon_get_ptr(amp->ahp, 907 anon_idx)) != NULL) 908 continue; 909 910 /* 911 * Allocate the anon struct now. 912 * Might as well load up translation 913 * to the page while we're at it... 914 */ 915 pp = anon_zero(seg, addr, &ap, cred); 916 if (ap == NULL || pp == NULL) { 917 panic("segvn_create anon_zero"); 918 /*NOTREACHED*/ 919 } 920 921 /* 922 * Re-acquire the anon_map lock and 923 * initialize the anon array entry. 924 */ 925 ASSERT(anon_get_ptr(amp->ahp, 926 anon_idx) == NULL); 927 (void) anon_set_ptr(amp->ahp, anon_idx, ap, 928 ANON_SLEEP); 929 930 ASSERT(seg->s_szc == 0); 931 ASSERT(!IS_VMODSORT(pp->p_vnode)); 932 933 ASSERT(use_rgn == 0); 934 hat_memload(seg->s_as->a_hat, addr, pp, 935 svd->prot & ~PROT_WRITE, hat_flag); 936 937 page_unlock(pp); 938 } 939 ASSERT(seg->s_szc == 0); 940 anon_dup(amp->ahp, anon_num, svd->amp->ahp, 941 0, seg->s_size); 942 ANON_LOCK_EXIT(&->a_rwlock); 943 } 944 } 945 946 /* 947 * Set default memory allocation policy for segment 948 * 949 * Always set policy for private memory at least for initialization 950 * even if this is a shared memory segment 951 */ 952 (void) lgrp_privm_policy_set(mpolicy, &svd->policy_info, seg->s_size); 953 954 if (svd->type == MAP_SHARED) 955 (void) lgrp_shm_policy_set(mpolicy, svd->amp, svd->anon_index, 956 svd->vp, svd->offset, seg->s_size); 957 958 if (use_rgn) { 959 ASSERT(!trok); 960 ASSERT(svd->amp == NULL); 961 svd->rcookie = hat_join_region(seg->s_as->a_hat, seg->s_base, 962 seg->s_size, (void *)svd->vp, svd->offset, svd->prot, 963 (uchar_t)seg->s_szc, segvn_hat_rgn_unload_callback, 964 HAT_REGION_TEXT); 965 } 966 967 ASSERT(!trok || !(svd->prot & PROT_WRITE)); 968 svd->tr_state = trok ? SEGVN_TR_INIT : SEGVN_TR_OFF; 969 970 return (0); 971 } 972 973 /* 974 * Concatenate two existing segments, if possible. 975 * Return 0 on success, -1 if two segments are not compatible 976 * or -2 on memory allocation failure. 977 * If amp_cat == 1 then try and concat segments with anon maps 978 */ 979 static int 980 segvn_concat(struct seg *seg1, struct seg *seg2, int amp_cat) 981 { 982 struct segvn_data *svd1 = seg1->s_data; 983 struct segvn_data *svd2 = seg2->s_data; 984 struct anon_map *amp1 = svd1->amp; 985 struct anon_map *amp2 = svd2->amp; 986 struct vpage *vpage1 = svd1->vpage; 987 struct vpage *vpage2 = svd2->vpage, *nvpage = NULL; 988 size_t size, nvpsize; 989 pgcnt_t npages1, npages2; 990 991 ASSERT(seg1->s_as && seg2->s_as && seg1->s_as == seg2->s_as); 992 ASSERT(AS_WRITE_HELD(seg1->s_as, &seg1->s_as->a_lock)); 993 ASSERT(seg1->s_ops == seg2->s_ops); 994 995 if (HAT_IS_REGION_COOKIE_VALID(svd1->rcookie) || 996 HAT_IS_REGION_COOKIE_VALID(svd2->rcookie)) { 997 return (-1); 998 } 999 1000 /* both segments exist, try to merge them */ 1001 #define incompat(x) (svd1->x != svd2->x) 1002 if (incompat(vp) || incompat(maxprot) || 1003 (!svd1->pageadvice && !svd2->pageadvice && incompat(advice)) || 1004 (!svd1->pageprot && !svd2->pageprot && incompat(prot)) || 1005 incompat(type) || incompat(cred) || incompat(flags) || 1006 seg1->s_szc != seg2->s_szc || incompat(policy_info.mem_policy) || 1007 (svd2->softlockcnt > 0) || svd1->softlockcnt_send > 0) 1008 return (-1); 1009 #undef incompat 1010 1011 /* 1012 * vp == NULL implies zfod, offset doesn't matter 1013 */ 1014 if (svd1->vp != NULL && 1015 svd1->offset + seg1->s_size != svd2->offset) { 1016 return (-1); 1017 } 1018 1019 /* 1020 * Don't concatenate if either segment uses text replication. 1021 */ 1022 if (svd1->tr_state != SEGVN_TR_OFF || svd2->tr_state != SEGVN_TR_OFF) { 1023 return (-1); 1024 } 1025 1026 /* 1027 * Fail early if we're not supposed to concatenate 1028 * segments with non NULL amp. 1029 */ 1030 if (amp_cat == 0 && (amp1 != NULL || amp2 != NULL)) { 1031 return (-1); 1032 } 1033 1034 if (svd1->vp == NULL && svd1->type == MAP_SHARED) { 1035 if (amp1 != amp2) { 1036 return (-1); 1037 } 1038 if (amp1 != NULL && svd1->anon_index + btop(seg1->s_size) != 1039 svd2->anon_index) { 1040 return (-1); 1041 } 1042 ASSERT(amp1 == NULL || amp1->refcnt >= 2); 1043 } 1044 1045 /* 1046 * If either seg has vpages, create a new merged vpage array. 1047 */ 1048 if (vpage1 != NULL || vpage2 != NULL) { 1049 struct vpage *vp, *evp; 1050 1051 npages1 = seg_pages(seg1); 1052 npages2 = seg_pages(seg2); 1053 nvpsize = vpgtob(npages1 + npages2); 1054 1055 if ((nvpage = kmem_zalloc(nvpsize, KM_NOSLEEP)) == NULL) { 1056 return (-2); 1057 } 1058 1059 if (vpage1 != NULL) { 1060 bcopy(vpage1, nvpage, vpgtob(npages1)); 1061 } else { 1062 evp = nvpage + npages1; 1063 for (vp = nvpage; vp < evp; vp++) { 1064 VPP_SETPROT(vp, svd1->prot); 1065 VPP_SETADVICE(vp, svd1->advice); 1066 } 1067 } 1068 1069 if (vpage2 != NULL) { 1070 bcopy(vpage2, nvpage + npages1, vpgtob(npages2)); 1071 } else { 1072 evp = nvpage + npages1 + npages2; 1073 for (vp = nvpage + npages1; vp < evp; vp++) { 1074 VPP_SETPROT(vp, svd2->prot); 1075 VPP_SETADVICE(vp, svd2->advice); 1076 } 1077 } 1078 1079 if (svd2->pageswap && (!svd1->pageswap && svd1->swresv)) { 1080 ASSERT(svd1->swresv == seg1->s_size); 1081 ASSERT(!(svd1->flags & MAP_NORESERVE)); 1082 ASSERT(!(svd2->flags & MAP_NORESERVE)); 1083 evp = nvpage + npages1; 1084 for (vp = nvpage; vp < evp; vp++) { 1085 VPP_SETSWAPRES(vp); 1086 } 1087 } 1088 1089 if (svd1->pageswap && (!svd2->pageswap && svd2->swresv)) { 1090 ASSERT(svd2->swresv == seg2->s_size); 1091 ASSERT(!(svd1->flags & MAP_NORESERVE)); 1092 ASSERT(!(svd2->flags & MAP_NORESERVE)); 1093 vp = nvpage + npages1; 1094 evp = vp + npages2; 1095 for (; vp < evp; vp++) { 1096 VPP_SETSWAPRES(vp); 1097 } 1098 } 1099 } 1100 ASSERT((vpage1 != NULL || vpage2 != NULL) || 1101 (svd1->pageswap == 0 && svd2->pageswap == 0)); 1102 1103 /* 1104 * If either segment has private pages, create a new merged anon 1105 * array. If mergeing shared anon segments just decrement anon map's 1106 * refcnt. 1107 */ 1108 if (amp1 != NULL && svd1->type == MAP_SHARED) { 1109 ASSERT(amp1 == amp2 && svd1->vp == NULL); 1110 ANON_LOCK_ENTER(&1->a_rwlock, RW_WRITER); 1111 ASSERT(amp1->refcnt >= 2); 1112 amp1->refcnt--; 1113 ANON_LOCK_EXIT(&1->a_rwlock); 1114 svd2->amp = NULL; 1115 } else if (amp1 != NULL || amp2 != NULL) { 1116 struct anon_hdr *nahp; 1117 struct anon_map *namp = NULL; 1118 size_t asize; 1119 1120 ASSERT(svd1->type == MAP_PRIVATE); 1121 1122 asize = seg1->s_size + seg2->s_size; 1123 if ((nahp = anon_create(btop(asize), ANON_NOSLEEP)) == NULL) { 1124 if (nvpage != NULL) { 1125 kmem_free(nvpage, nvpsize); 1126 } 1127 return (-2); 1128 } 1129 if (amp1 != NULL) { 1130 /* 1131 * XXX anon rwlock is not really needed because 1132 * this is a private segment and we are writers. 1133 */ 1134 ANON_LOCK_ENTER(&1->a_rwlock, RW_WRITER); 1135 ASSERT(amp1->refcnt == 1); 1136 if (anon_copy_ptr(amp1->ahp, svd1->anon_index, 1137 nahp, 0, btop(seg1->s_size), ANON_NOSLEEP)) { 1138 anon_release(nahp, btop(asize)); 1139 ANON_LOCK_EXIT(&1->a_rwlock); 1140 if (nvpage != NULL) { 1141 kmem_free(nvpage, nvpsize); 1142 } 1143 return (-2); 1144 } 1145 } 1146 if (amp2 != NULL) { 1147 ANON_LOCK_ENTER(&2->a_rwlock, RW_WRITER); 1148 ASSERT(amp2->refcnt == 1); 1149 if (anon_copy_ptr(amp2->ahp, svd2->anon_index, 1150 nahp, btop(seg1->s_size), btop(seg2->s_size), 1151 ANON_NOSLEEP)) { 1152 anon_release(nahp, btop(asize)); 1153 ANON_LOCK_EXIT(&2->a_rwlock); 1154 if (amp1 != NULL) { 1155 ANON_LOCK_EXIT(&1->a_rwlock); 1156 } 1157 if (nvpage != NULL) { 1158 kmem_free(nvpage, nvpsize); 1159 } 1160 return (-2); 1161 } 1162 } 1163 if (amp1 != NULL) { 1164 namp = amp1; 1165 anon_release(amp1->ahp, btop(amp1->size)); 1166 } 1167 if (amp2 != NULL) { 1168 if (namp == NULL) { 1169 ASSERT(amp1 == NULL); 1170 namp = amp2; 1171 anon_release(amp2->ahp, btop(amp2->size)); 1172 } else { 1173 amp2->refcnt--; 1174 ANON_LOCK_EXIT(&2->a_rwlock); 1175 anonmap_free(amp2); 1176 } 1177 svd2->amp = NULL; /* needed for seg_free */ 1178 } 1179 namp->ahp = nahp; 1180 namp->size = asize; 1181 svd1->amp = namp; 1182 svd1->anon_index = 0; 1183 ANON_LOCK_EXIT(&namp->a_rwlock); 1184 } 1185 /* 1186 * Now free the old vpage structures. 1187 */ 1188 if (nvpage != NULL) { 1189 if (vpage1 != NULL) { 1190 kmem_free(vpage1, vpgtob(npages1)); 1191 } 1192 if (vpage2 != NULL) { 1193 svd2->vpage = NULL; 1194 kmem_free(vpage2, vpgtob(npages2)); 1195 } 1196 if (svd2->pageprot) { 1197 svd1->pageprot = 1; 1198 } 1199 if (svd2->pageadvice) { 1200 svd1->pageadvice = 1; 1201 } 1202 if (svd2->pageswap) { 1203 svd1->pageswap = 1; 1204 } 1205 svd1->vpage = nvpage; 1206 } 1207 1208 /* all looks ok, merge segments */ 1209 svd1->swresv += svd2->swresv; 1210 svd2->swresv = 0; /* so seg_free doesn't release swap space */ 1211 size = seg2->s_size; 1212 seg_free(seg2); 1213 seg1->s_size += size; 1214 return (0); 1215 } 1216 1217 /* 1218 * Extend the previous segment (seg1) to include the 1219 * new segment (seg2 + a), if possible. 1220 * Return 0 on success. 1221 */ 1222 static int 1223 segvn_extend_prev(seg1, seg2, a, swresv) 1224 struct seg *seg1, *seg2; 1225 struct segvn_crargs *a; 1226 size_t swresv; 1227 { 1228 struct segvn_data *svd1 = (struct segvn_data *)seg1->s_data; 1229 size_t size; 1230 struct anon_map *amp1; 1231 struct vpage *new_vpage; 1232 1233 /* 1234 * We don't need any segment level locks for "segvn" data 1235 * since the address space is "write" locked. 1236 */ 1237 ASSERT(seg1->s_as && AS_WRITE_HELD(seg1->s_as, &seg1->s_as->a_lock)); 1238 1239 if (HAT_IS_REGION_COOKIE_VALID(svd1->rcookie)) { 1240 return (-1); 1241 } 1242 1243 /* second segment is new, try to extend first */ 1244 /* XXX - should also check cred */ 1245 if (svd1->vp != a->vp || svd1->maxprot != a->maxprot || 1246 (!svd1->pageprot && (svd1->prot != a->prot)) || 1247 svd1->type != a->type || svd1->flags != a->flags || 1248 seg1->s_szc != a->szc || svd1->softlockcnt_send > 0) 1249 return (-1); 1250 1251 /* vp == NULL implies zfod, offset doesn't matter */ 1252 if (svd1->vp != NULL && 1253 svd1->offset + seg1->s_size != (a->offset & PAGEMASK)) 1254 return (-1); 1255 1256 if (svd1->tr_state != SEGVN_TR_OFF) { 1257 return (-1); 1258 } 1259 1260 amp1 = svd1->amp; 1261 if (amp1) { 1262 pgcnt_t newpgs; 1263 1264 /* 1265 * Segment has private pages, can data structures 1266 * be expanded? 1267 * 1268 * Acquire the anon_map lock to prevent it from changing, 1269 * if it is shared. This ensures that the anon_map 1270 * will not change while a thread which has a read/write 1271 * lock on an address space references it. 1272 * XXX - Don't need the anon_map lock at all if "refcnt" 1273 * is 1. 1274 * 1275 * Can't grow a MAP_SHARED segment with an anonmap because 1276 * there may be existing anon slots where we want to extend 1277 * the segment and we wouldn't know what to do with them 1278 * (e.g., for tmpfs right thing is to just leave them there, 1279 * for /dev/zero they should be cleared out). 1280 */ 1281 if (svd1->type == MAP_SHARED) 1282 return (-1); 1283 1284 ANON_LOCK_ENTER(&1->a_rwlock, RW_WRITER); 1285 if (amp1->refcnt > 1) { 1286 ANON_LOCK_EXIT(&1->a_rwlock); 1287 return (-1); 1288 } 1289 newpgs = anon_grow(amp1->ahp, &svd1->anon_index, 1290 btop(seg1->s_size), btop(seg2->s_size), ANON_NOSLEEP); 1291 1292 if (newpgs == 0) { 1293 ANON_LOCK_EXIT(&1->a_rwlock); 1294 return (-1); 1295 } 1296 amp1->size = ptob(newpgs); 1297 ANON_LOCK_EXIT(&1->a_rwlock); 1298 } 1299 if (svd1->vpage != NULL) { 1300 struct vpage *vp, *evp; 1301 new_vpage = 1302 kmem_zalloc(vpgtob(seg_pages(seg1) + seg_pages(seg2)), 1303 KM_NOSLEEP); 1304 if (new_vpage == NULL) 1305 return (-1); 1306 bcopy(svd1->vpage, new_vpage, vpgtob(seg_pages(seg1))); 1307 kmem_free(svd1->vpage, vpgtob(seg_pages(seg1))); 1308 svd1->vpage = new_vpage; 1309 1310 vp = new_vpage + seg_pages(seg1); 1311 evp = vp + seg_pages(seg2); 1312 for (; vp < evp; vp++) 1313 VPP_SETPROT(vp, a->prot); 1314 if (svd1->pageswap && swresv) { 1315 ASSERT(!(svd1->flags & MAP_NORESERVE)); 1316 ASSERT(swresv == seg2->s_size); 1317 vp = new_vpage + seg_pages(seg1); 1318 for (; vp < evp; vp++) { 1319 VPP_SETSWAPRES(vp); 1320 } 1321 } 1322 } 1323 ASSERT(svd1->vpage != NULL || svd1->pageswap == 0); 1324 size = seg2->s_size; 1325 seg_free(seg2); 1326 seg1->s_size += size; 1327 svd1->swresv += swresv; 1328 if (svd1->pageprot && (a->prot & PROT_WRITE) && 1329 svd1->type == MAP_SHARED && svd1->vp != NULL && 1330 (svd1->vp->v_flag & VVMEXEC)) { 1331 ASSERT(vn_is_mapped(svd1->vp, V_WRITE)); 1332 segvn_inval_trcache(svd1->vp); 1333 } 1334 return (0); 1335 } 1336 1337 /* 1338 * Extend the next segment (seg2) to include the 1339 * new segment (seg1 + a), if possible. 1340 * Return 0 on success. 1341 */ 1342 static int 1343 segvn_extend_next( 1344 struct seg *seg1, 1345 struct seg *seg2, 1346 struct segvn_crargs *a, 1347 size_t swresv) 1348 { 1349 struct segvn_data *svd2 = (struct segvn_data *)seg2->s_data; 1350 size_t size; 1351 struct anon_map *amp2; 1352 struct vpage *new_vpage; 1353 1354 /* 1355 * We don't need any segment level locks for "segvn" data 1356 * since the address space is "write" locked. 1357 */ 1358 ASSERT(seg2->s_as && AS_WRITE_HELD(seg2->s_as, &seg2->s_as->a_lock)); 1359 1360 if (HAT_IS_REGION_COOKIE_VALID(svd2->rcookie)) { 1361 return (-1); 1362 } 1363 1364 /* first segment is new, try to extend second */ 1365 /* XXX - should also check cred */ 1366 if (svd2->vp != a->vp || svd2->maxprot != a->maxprot || 1367 (!svd2->pageprot && (svd2->prot != a->prot)) || 1368 svd2->type != a->type || svd2->flags != a->flags || 1369 seg2->s_szc != a->szc || svd2->softlockcnt_sbase > 0) 1370 return (-1); 1371 /* vp == NULL implies zfod, offset doesn't matter */ 1372 if (svd2->vp != NULL && 1373 (a->offset & PAGEMASK) + seg1->s_size != svd2->offset) 1374 return (-1); 1375 1376 if (svd2->tr_state != SEGVN_TR_OFF) { 1377 return (-1); 1378 } 1379 1380 amp2 = svd2->amp; 1381 if (amp2) { 1382 pgcnt_t newpgs; 1383 1384 /* 1385 * Segment has private pages, can data structures 1386 * be expanded? 1387 * 1388 * Acquire the anon_map lock to prevent it from changing, 1389 * if it is shared. This ensures that the anon_map 1390 * will not change while a thread which has a read/write 1391 * lock on an address space references it. 1392 * 1393 * XXX - Don't need the anon_map lock at all if "refcnt" 1394 * is 1. 1395 */ 1396 if (svd2->type == MAP_SHARED) 1397 return (-1); 1398 1399 ANON_LOCK_ENTER(&2->a_rwlock, RW_WRITER); 1400 if (amp2->refcnt > 1) { 1401 ANON_LOCK_EXIT(&2->a_rwlock); 1402 return (-1); 1403 } 1404 newpgs = anon_grow(amp2->ahp, &svd2->anon_index, 1405 btop(seg2->s_size), btop(seg1->s_size), 1406 ANON_NOSLEEP | ANON_GROWDOWN); 1407 1408 if (newpgs == 0) { 1409 ANON_LOCK_EXIT(&2->a_rwlock); 1410 return (-1); 1411 } 1412 amp2->size = ptob(newpgs); 1413 ANON_LOCK_EXIT(&2->a_rwlock); 1414 } 1415 if (svd2->vpage != NULL) { 1416 struct vpage *vp, *evp; 1417 new_vpage = 1418 kmem_zalloc(vpgtob(seg_pages(seg1) + seg_pages(seg2)), 1419 KM_NOSLEEP); 1420 if (new_vpage == NULL) { 1421 /* Not merging segments so adjust anon_index back */ 1422 if (amp2) 1423 svd2->anon_index += seg_pages(seg1); 1424 return (-1); 1425 } 1426 bcopy(svd2->vpage, new_vpage + seg_pages(seg1), 1427 vpgtob(seg_pages(seg2))); 1428 kmem_free(svd2->vpage, vpgtob(seg_pages(seg2))); 1429 svd2->vpage = new_vpage; 1430 1431 vp = new_vpage; 1432 evp = vp + seg_pages(seg1); 1433 for (; vp < evp; vp++) 1434 VPP_SETPROT(vp, a->prot); 1435 if (svd2->pageswap && swresv) { 1436 ASSERT(!(svd2->flags & MAP_NORESERVE)); 1437 ASSERT(swresv == seg1->s_size); 1438 vp = new_vpage; 1439 for (; vp < evp; vp++) { 1440 VPP_SETSWAPRES(vp); 1441 } 1442 } 1443 } 1444 ASSERT(svd2->vpage != NULL || svd2->pageswap == 0); 1445 size = seg1->s_size; 1446 seg_free(seg1); 1447 seg2->s_size += size; 1448 seg2->s_base -= size; 1449 svd2->offset -= size; 1450 svd2->swresv += swresv; 1451 if (svd2->pageprot && (a->prot & PROT_WRITE) && 1452 svd2->type == MAP_SHARED && svd2->vp != NULL && 1453 (svd2->vp->v_flag & VVMEXEC)) { 1454 ASSERT(vn_is_mapped(svd2->vp, V_WRITE)); 1455 segvn_inval_trcache(svd2->vp); 1456 } 1457 return (0); 1458 } 1459 1460 static int 1461 segvn_dup(struct seg *seg, struct seg *newseg) 1462 { 1463 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 1464 struct segvn_data *newsvd; 1465 pgcnt_t npages = seg_pages(seg); 1466 int error = 0; 1467 uint_t prot; 1468 size_t len; 1469 struct anon_map *amp; 1470 1471 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 1472 ASSERT(newseg->s_as->a_proc->p_parent == curproc); 1473 1474 /* 1475 * If segment has anon reserved, reserve more for the new seg. 1476 * For a MAP_NORESERVE segment swresv will be a count of all the 1477 * allocated anon slots; thus we reserve for the child as many slots 1478 * as the parent has allocated. This semantic prevents the child or 1479 * parent from dieing during a copy-on-write fault caused by trying 1480 * to write a shared pre-existing anon page. 1481 */ 1482 if ((len = svd->swresv) != 0) { 1483 if (anon_resv(svd->swresv) == 0) 1484 return (ENOMEM); 1485 1486 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u", 1487 seg, len, 0); 1488 } 1489 1490 newsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP); 1491 1492 newseg->s_ops = &segvn_ops; 1493 newseg->s_data = (void *)newsvd; 1494 newseg->s_szc = seg->s_szc; 1495 1496 newsvd->seg = newseg; 1497 if ((newsvd->vp = svd->vp) != NULL) { 1498 VN_HOLD(svd->vp); 1499 if (svd->type == MAP_SHARED) 1500 lgrp_shm_policy_init(NULL, svd->vp); 1501 } 1502 newsvd->offset = svd->offset; 1503 newsvd->prot = svd->prot; 1504 newsvd->maxprot = svd->maxprot; 1505 newsvd->pageprot = svd->pageprot; 1506 newsvd->type = svd->type; 1507 newsvd->cred = svd->cred; 1508 crhold(newsvd->cred); 1509 newsvd->advice = svd->advice; 1510 newsvd->pageadvice = svd->pageadvice; 1511 newsvd->swresv = svd->swresv; 1512 newsvd->pageswap = svd->pageswap; 1513 newsvd->flags = svd->flags; 1514 newsvd->softlockcnt = 0; 1515 newsvd->softlockcnt_sbase = 0; 1516 newsvd->softlockcnt_send = 0; 1517 newsvd->policy_info = svd->policy_info; 1518 newsvd->rcookie = HAT_INVALID_REGION_COOKIE; 1519 1520 if ((amp = svd->amp) == NULL || svd->tr_state == SEGVN_TR_ON) { 1521 /* 1522 * Not attaching to a shared anon object. 1523 */ 1524 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie) || 1525 svd->tr_state == SEGVN_TR_OFF); 1526 if (svd->tr_state == SEGVN_TR_ON) { 1527 ASSERT(newsvd->vp != NULL && amp != NULL); 1528 newsvd->tr_state = SEGVN_TR_INIT; 1529 } else { 1530 newsvd->tr_state = svd->tr_state; 1531 } 1532 newsvd->amp = NULL; 1533 newsvd->anon_index = 0; 1534 } else { 1535 /* regions for now are only used on pure vnode segments */ 1536 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 1537 ASSERT(svd->tr_state == SEGVN_TR_OFF); 1538 newsvd->tr_state = SEGVN_TR_OFF; 1539 if (svd->type == MAP_SHARED) { 1540 newsvd->amp = amp; 1541 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 1542 amp->refcnt++; 1543 ANON_LOCK_EXIT(&->a_rwlock); 1544 newsvd->anon_index = svd->anon_index; 1545 } else { 1546 int reclaim = 1; 1547 1548 /* 1549 * Allocate and initialize new anon_map structure. 1550 */ 1551 newsvd->amp = anonmap_alloc(newseg->s_size, 0, 1552 ANON_SLEEP); 1553 newsvd->amp->a_szc = newseg->s_szc; 1554 newsvd->anon_index = 0; 1555 1556 /* 1557 * We don't have to acquire the anon_map lock 1558 * for the new segment (since it belongs to an 1559 * address space that is still not associated 1560 * with any process), or the segment in the old 1561 * address space (since all threads in it 1562 * are stopped while duplicating the address space). 1563 */ 1564 1565 /* 1566 * The goal of the following code is to make sure that 1567 * softlocked pages do not end up as copy on write 1568 * pages. This would cause problems where one 1569 * thread writes to a page that is COW and a different 1570 * thread in the same process has softlocked it. The 1571 * softlock lock would move away from this process 1572 * because the write would cause this process to get 1573 * a copy (without the softlock). 1574 * 1575 * The strategy here is to just break the 1576 * sharing on pages that could possibly be 1577 * softlocked. 1578 */ 1579 retry: 1580 if (svd->softlockcnt) { 1581 struct anon *ap, *newap; 1582 size_t i; 1583 uint_t vpprot; 1584 page_t *anon_pl[1+1], *pp; 1585 caddr_t addr; 1586 ulong_t old_idx = svd->anon_index; 1587 ulong_t new_idx = 0; 1588 1589 /* 1590 * The softlock count might be non zero 1591 * because some pages are still stuck in the 1592 * cache for lazy reclaim. Flush the cache 1593 * now. This should drop the count to zero. 1594 * [or there is really I/O going on to these 1595 * pages]. Note, we have the writers lock so 1596 * nothing gets inserted during the flush. 1597 */ 1598 if (reclaim == 1) { 1599 segvn_purge(seg); 1600 reclaim = 0; 1601 goto retry; 1602 } 1603 i = btopr(seg->s_size); 1604 addr = seg->s_base; 1605 /* 1606 * XXX break cow sharing using PAGESIZE 1607 * pages. They will be relocated into larger 1608 * pages at fault time. 1609 */ 1610 while (i-- > 0) { 1611 if (ap = anon_get_ptr(amp->ahp, 1612 old_idx)) { 1613 error = anon_getpage(&ap, 1614 &vpprot, anon_pl, PAGESIZE, 1615 seg, addr, S_READ, 1616 svd->cred); 1617 if (error) { 1618 newsvd->vpage = NULL; 1619 goto out; 1620 } 1621 /* 1622 * prot need not be computed 1623 * below 'cause anon_private is 1624 * going to ignore it anyway 1625 * as child doesn't inherit 1626 * pagelock from parent. 1627 */ 1628 prot = svd->pageprot ? 1629 VPP_PROT( 1630 &svd->vpage[ 1631 seg_page(seg, addr)]) 1632 : svd->prot; 1633 pp = anon_private(&newap, 1634 newseg, addr, prot, 1635 anon_pl[0], 0, 1636 newsvd->cred); 1637 if (pp == NULL) { 1638 /* no mem abort */ 1639 newsvd->vpage = NULL; 1640 error = ENOMEM; 1641 goto out; 1642 } 1643 (void) anon_set_ptr( 1644 newsvd->amp->ahp, new_idx, 1645 newap, ANON_SLEEP); 1646 page_unlock(pp); 1647 } 1648 addr += PAGESIZE; 1649 old_idx++; 1650 new_idx++; 1651 } 1652 } else { /* common case */ 1653 if (seg->s_szc != 0) { 1654 /* 1655 * If at least one of anon slots of a 1656 * large page exists then make sure 1657 * all anon slots of a large page 1658 * exist to avoid partial cow sharing 1659 * of a large page in the future. 1660 */ 1661 anon_dup_fill_holes(amp->ahp, 1662 svd->anon_index, newsvd->amp->ahp, 1663 0, seg->s_size, seg->s_szc, 1664 svd->vp != NULL); 1665 } else { 1666 anon_dup(amp->ahp, svd->anon_index, 1667 newsvd->amp->ahp, 0, seg->s_size); 1668 } 1669 1670 hat_clrattr(seg->s_as->a_hat, seg->s_base, 1671 seg->s_size, PROT_WRITE); 1672 } 1673 } 1674 } 1675 /* 1676 * If necessary, create a vpage structure for the new segment. 1677 * Do not copy any page lock indications. 1678 */ 1679 if (svd->vpage != NULL) { 1680 uint_t i; 1681 struct vpage *ovp = svd->vpage; 1682 struct vpage *nvp; 1683 1684 nvp = newsvd->vpage = 1685 kmem_alloc(vpgtob(npages), KM_SLEEP); 1686 for (i = 0; i < npages; i++) { 1687 *nvp = *ovp++; 1688 VPP_CLRPPLOCK(nvp++); 1689 } 1690 } else 1691 newsvd->vpage = NULL; 1692 1693 /* Inform the vnode of the new mapping */ 1694 if (newsvd->vp != NULL) { 1695 error = VOP_ADDMAP(newsvd->vp, (offset_t)newsvd->offset, 1696 newseg->s_as, newseg->s_base, newseg->s_size, newsvd->prot, 1697 newsvd->maxprot, newsvd->type, newsvd->cred, NULL); 1698 } 1699 out: 1700 if (error == 0 && HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 1701 ASSERT(newsvd->amp == NULL); 1702 ASSERT(newsvd->tr_state == SEGVN_TR_OFF); 1703 newsvd->rcookie = svd->rcookie; 1704 hat_dup_region(newseg->s_as->a_hat, newsvd->rcookie); 1705 } 1706 return (error); 1707 } 1708 1709 1710 /* 1711 * callback function to invoke free_vp_pages() for only those pages actually 1712 * processed by the HAT when a shared region is destroyed. 1713 */ 1714 extern int free_pages; 1715 1716 static void 1717 segvn_hat_rgn_unload_callback(caddr_t saddr, caddr_t eaddr, caddr_t r_saddr, 1718 size_t r_size, void *r_obj, u_offset_t r_objoff) 1719 { 1720 u_offset_t off; 1721 size_t len; 1722 vnode_t *vp = (vnode_t *)r_obj; 1723 1724 ASSERT(eaddr > saddr); 1725 ASSERT(saddr >= r_saddr); 1726 ASSERT(saddr < r_saddr + r_size); 1727 ASSERT(eaddr > r_saddr); 1728 ASSERT(eaddr <= r_saddr + r_size); 1729 ASSERT(vp != NULL); 1730 1731 if (!free_pages) { 1732 return; 1733 } 1734 1735 len = eaddr - saddr; 1736 off = (saddr - r_saddr) + r_objoff; 1737 free_vp_pages(vp, off, len); 1738 } 1739 1740 /* 1741 * callback function used by segvn_unmap to invoke free_vp_pages() for only 1742 * those pages actually processed by the HAT 1743 */ 1744 static void 1745 segvn_hat_unload_callback(hat_callback_t *cb) 1746 { 1747 struct seg *seg = cb->hcb_data; 1748 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 1749 size_t len; 1750 u_offset_t off; 1751 1752 ASSERT(svd->vp != NULL); 1753 ASSERT(cb->hcb_end_addr > cb->hcb_start_addr); 1754 ASSERT(cb->hcb_start_addr >= seg->s_base); 1755 1756 len = cb->hcb_end_addr - cb->hcb_start_addr; 1757 off = cb->hcb_start_addr - seg->s_base; 1758 free_vp_pages(svd->vp, svd->offset + off, len); 1759 } 1760 1761 /* 1762 * This function determines the number of bytes of swap reserved by 1763 * a segment for which per-page accounting is present. It is used to 1764 * calculate the correct value of a segvn_data's swresv. 1765 */ 1766 static size_t 1767 segvn_count_swap_by_vpages(struct seg *seg) 1768 { 1769 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 1770 struct vpage *vp, *evp; 1771 size_t nswappages = 0; 1772 1773 ASSERT(svd->pageswap); 1774 ASSERT(svd->vpage != NULL); 1775 1776 evp = &svd->vpage[seg_page(seg, seg->s_base + seg->s_size)]; 1777 1778 for (vp = svd->vpage; vp < evp; vp++) { 1779 if (VPP_ISSWAPRES(vp)) 1780 nswappages++; 1781 } 1782 1783 return (nswappages << PAGESHIFT); 1784 } 1785 1786 static int 1787 segvn_unmap(struct seg *seg, caddr_t addr, size_t len) 1788 { 1789 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 1790 struct segvn_data *nsvd; 1791 struct seg *nseg; 1792 struct anon_map *amp; 1793 pgcnt_t opages; /* old segment size in pages */ 1794 pgcnt_t npages; /* new segment size in pages */ 1795 pgcnt_t dpages; /* pages being deleted (unmapped) */ 1796 hat_callback_t callback; /* used for free_vp_pages() */ 1797 hat_callback_t *cbp = NULL; 1798 caddr_t nbase; 1799 size_t nsize; 1800 size_t oswresv; 1801 int reclaim = 1; 1802 1803 /* 1804 * We don't need any segment level locks for "segvn" data 1805 * since the address space is "write" locked. 1806 */ 1807 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 1808 1809 /* 1810 * Fail the unmap if pages are SOFTLOCKed through this mapping. 1811 * softlockcnt is protected from change by the as write lock. 1812 */ 1813 retry: 1814 if (svd->softlockcnt > 0) { 1815 ASSERT(svd->tr_state == SEGVN_TR_OFF); 1816 1817 /* 1818 * If this is shared segment non 0 softlockcnt 1819 * means locked pages are still in use. 1820 */ 1821 if (svd->type == MAP_SHARED) { 1822 return (EAGAIN); 1823 } 1824 1825 /* 1826 * since we do have the writers lock nobody can fill 1827 * the cache during the purge. The flush either succeeds 1828 * or we still have pending I/Os. 1829 */ 1830 if (reclaim == 1) { 1831 segvn_purge(seg); 1832 reclaim = 0; 1833 goto retry; 1834 } 1835 return (EAGAIN); 1836 } 1837 1838 /* 1839 * Check for bad sizes 1840 */ 1841 if (addr < seg->s_base || addr + len > seg->s_base + seg->s_size || 1842 (len & PAGEOFFSET) || ((uintptr_t)addr & PAGEOFFSET)) { 1843 panic("segvn_unmap"); 1844 /*NOTREACHED*/ 1845 } 1846 1847 if (seg->s_szc != 0) { 1848 size_t pgsz = page_get_pagesize(seg->s_szc); 1849 int err; 1850 if (!IS_P2ALIGNED(addr, pgsz) || !IS_P2ALIGNED(len, pgsz)) { 1851 ASSERT(seg->s_base != addr || seg->s_size != len); 1852 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 1853 ASSERT(svd->amp == NULL); 1854 ASSERT(svd->tr_state == SEGVN_TR_OFF); 1855 hat_leave_region(seg->s_as->a_hat, 1856 svd->rcookie, HAT_REGION_TEXT); 1857 svd->rcookie = HAT_INVALID_REGION_COOKIE; 1858 /* 1859 * could pass a flag to segvn_demote_range() 1860 * below to tell it not to do any unloads but 1861 * this case is rare enough to not bother for 1862 * now. 1863 */ 1864 } else if (svd->tr_state == SEGVN_TR_INIT) { 1865 svd->tr_state = SEGVN_TR_OFF; 1866 } else if (svd->tr_state == SEGVN_TR_ON) { 1867 ASSERT(svd->amp != NULL); 1868 segvn_textunrepl(seg, 1); 1869 ASSERT(svd->amp == NULL); 1870 ASSERT(svd->tr_state == SEGVN_TR_OFF); 1871 } 1872 VM_STAT_ADD(segvnvmstats.demoterange[0]); 1873 err = segvn_demote_range(seg, addr, len, SDR_END, 0); 1874 if (err == 0) { 1875 return (IE_RETRY); 1876 } 1877 return (err); 1878 } 1879 } 1880 1881 /* Inform the vnode of the unmapping. */ 1882 if (svd->vp) { 1883 int error; 1884 1885 error = VOP_DELMAP(svd->vp, 1886 (offset_t)svd->offset + (uintptr_t)(addr - seg->s_base), 1887 seg->s_as, addr, len, svd->prot, svd->maxprot, 1888 svd->type, svd->cred, NULL); 1889 1890 if (error == EAGAIN) 1891 return (error); 1892 } 1893 1894 /* 1895 * Remove any page locks set through this mapping. 1896 * If text replication is not off no page locks could have been 1897 * established via this mapping. 1898 */ 1899 if (svd->tr_state == SEGVN_TR_OFF) { 1900 (void) segvn_lockop(seg, addr, len, 0, MC_UNLOCK, NULL, 0); 1901 } 1902 1903 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 1904 ASSERT(svd->amp == NULL); 1905 ASSERT(svd->tr_state == SEGVN_TR_OFF); 1906 ASSERT(svd->type == MAP_PRIVATE); 1907 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 1908 HAT_REGION_TEXT); 1909 svd->rcookie = HAT_INVALID_REGION_COOKIE; 1910 } else if (svd->tr_state == SEGVN_TR_ON) { 1911 ASSERT(svd->amp != NULL); 1912 ASSERT(svd->pageprot == 0 && !(svd->prot & PROT_WRITE)); 1913 segvn_textunrepl(seg, 1); 1914 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF); 1915 } else { 1916 if (svd->tr_state != SEGVN_TR_OFF) { 1917 ASSERT(svd->tr_state == SEGVN_TR_INIT); 1918 svd->tr_state = SEGVN_TR_OFF; 1919 } 1920 /* 1921 * Unload any hardware translations in the range to be taken 1922 * out. Use a callback to invoke free_vp_pages() effectively. 1923 */ 1924 if (svd->vp != NULL && free_pages != 0) { 1925 callback.hcb_data = seg; 1926 callback.hcb_function = segvn_hat_unload_callback; 1927 cbp = &callback; 1928 } 1929 hat_unload_callback(seg->s_as->a_hat, addr, len, 1930 HAT_UNLOAD_UNMAP, cbp); 1931 1932 if (svd->type == MAP_SHARED && svd->vp != NULL && 1933 (svd->vp->v_flag & VVMEXEC) && 1934 ((svd->prot & PROT_WRITE) || svd->pageprot)) { 1935 segvn_inval_trcache(svd->vp); 1936 } 1937 } 1938 1939 /* 1940 * Check for entire segment 1941 */ 1942 if (addr == seg->s_base && len == seg->s_size) { 1943 seg_free(seg); 1944 return (0); 1945 } 1946 1947 opages = seg_pages(seg); 1948 dpages = btop(len); 1949 npages = opages - dpages; 1950 amp = svd->amp; 1951 ASSERT(amp == NULL || amp->a_szc >= seg->s_szc); 1952 1953 /* 1954 * Check for beginning of segment 1955 */ 1956 if (addr == seg->s_base) { 1957 if (svd->vpage != NULL) { 1958 size_t nbytes; 1959 struct vpage *ovpage; 1960 1961 ovpage = svd->vpage; /* keep pointer to vpage */ 1962 1963 nbytes = vpgtob(npages); 1964 svd->vpage = kmem_alloc(nbytes, KM_SLEEP); 1965 bcopy(&ovpage[dpages], svd->vpage, nbytes); 1966 1967 /* free up old vpage */ 1968 kmem_free(ovpage, vpgtob(opages)); 1969 } 1970 if (amp != NULL) { 1971 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 1972 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) { 1973 /* 1974 * Shared anon map is no longer in use. Before 1975 * freeing its pages purge all entries from 1976 * pcache that belong to this amp. 1977 */ 1978 if (svd->type == MAP_SHARED) { 1979 ASSERT(amp->refcnt == 1); 1980 ASSERT(svd->softlockcnt == 0); 1981 anonmap_purge(amp); 1982 } 1983 /* 1984 * Free up now unused parts of anon_map array. 1985 */ 1986 if (amp->a_szc == seg->s_szc) { 1987 if (seg->s_szc != 0) { 1988 anon_free_pages(amp->ahp, 1989 svd->anon_index, len, 1990 seg->s_szc); 1991 } else { 1992 anon_free(amp->ahp, 1993 svd->anon_index, 1994 len); 1995 } 1996 } else { 1997 ASSERT(svd->type == MAP_SHARED); 1998 ASSERT(amp->a_szc > seg->s_szc); 1999 anon_shmap_free_pages(amp, 2000 svd->anon_index, len); 2001 } 2002 2003 /* 2004 * Unreserve swap space for the 2005 * unmapped chunk of this segment in 2006 * case it's MAP_SHARED 2007 */ 2008 if (svd->type == MAP_SHARED) { 2009 anon_unresv_zone(len, 2010 seg->s_as->a_proc->p_zone); 2011 amp->swresv -= len; 2012 } 2013 } 2014 ANON_LOCK_EXIT(&->a_rwlock); 2015 svd->anon_index += dpages; 2016 } 2017 if (svd->vp != NULL) 2018 svd->offset += len; 2019 2020 seg->s_base += len; 2021 seg->s_size -= len; 2022 2023 if (svd->swresv) { 2024 if (svd->flags & MAP_NORESERVE) { 2025 ASSERT(amp); 2026 oswresv = svd->swresv; 2027 2028 svd->swresv = ptob(anon_pages(amp->ahp, 2029 svd->anon_index, npages)); 2030 anon_unresv_zone(oswresv - svd->swresv, 2031 seg->s_as->a_proc->p_zone); 2032 } else { 2033 size_t unlen; 2034 2035 if (svd->pageswap) { 2036 oswresv = svd->swresv; 2037 svd->swresv = 2038 segvn_count_swap_by_vpages(seg); 2039 ASSERT(oswresv >= svd->swresv); 2040 unlen = oswresv - svd->swresv; 2041 } else { 2042 svd->swresv -= len; 2043 ASSERT(svd->swresv == seg->s_size); 2044 unlen = len; 2045 } 2046 anon_unresv_zone(unlen, 2047 seg->s_as->a_proc->p_zone); 2048 } 2049 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u", 2050 seg, len, 0); 2051 } 2052 2053 return (0); 2054 } 2055 2056 /* 2057 * Check for end of segment 2058 */ 2059 if (addr + len == seg->s_base + seg->s_size) { 2060 if (svd->vpage != NULL) { 2061 size_t nbytes; 2062 struct vpage *ovpage; 2063 2064 ovpage = svd->vpage; /* keep pointer to vpage */ 2065 2066 nbytes = vpgtob(npages); 2067 svd->vpage = kmem_alloc(nbytes, KM_SLEEP); 2068 bcopy(ovpage, svd->vpage, nbytes); 2069 2070 /* free up old vpage */ 2071 kmem_free(ovpage, vpgtob(opages)); 2072 2073 } 2074 if (amp != NULL) { 2075 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 2076 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) { 2077 /* 2078 * Free up now unused parts of anon_map array. 2079 */ 2080 ulong_t an_idx = svd->anon_index + npages; 2081 2082 /* 2083 * Shared anon map is no longer in use. Before 2084 * freeing its pages purge all entries from 2085 * pcache that belong to this amp. 2086 */ 2087 if (svd->type == MAP_SHARED) { 2088 ASSERT(amp->refcnt == 1); 2089 ASSERT(svd->softlockcnt == 0); 2090 anonmap_purge(amp); 2091 } 2092 2093 if (amp->a_szc == seg->s_szc) { 2094 if (seg->s_szc != 0) { 2095 anon_free_pages(amp->ahp, 2096 an_idx, len, 2097 seg->s_szc); 2098 } else { 2099 anon_free(amp->ahp, an_idx, 2100 len); 2101 } 2102 } else { 2103 ASSERT(svd->type == MAP_SHARED); 2104 ASSERT(amp->a_szc > seg->s_szc); 2105 anon_shmap_free_pages(amp, 2106 an_idx, len); 2107 } 2108 2109 /* 2110 * Unreserve swap space for the 2111 * unmapped chunk of this segment in 2112 * case it's MAP_SHARED 2113 */ 2114 if (svd->type == MAP_SHARED) { 2115 anon_unresv_zone(len, 2116 seg->s_as->a_proc->p_zone); 2117 amp->swresv -= len; 2118 } 2119 } 2120 ANON_LOCK_EXIT(&->a_rwlock); 2121 } 2122 2123 seg->s_size -= len; 2124 2125 if (svd->swresv) { 2126 if (svd->flags & MAP_NORESERVE) { 2127 ASSERT(amp); 2128 oswresv = svd->swresv; 2129 svd->swresv = ptob(anon_pages(amp->ahp, 2130 svd->anon_index, npages)); 2131 anon_unresv_zone(oswresv - svd->swresv, 2132 seg->s_as->a_proc->p_zone); 2133 } else { 2134 size_t unlen; 2135 2136 if (svd->pageswap) { 2137 oswresv = svd->swresv; 2138 svd->swresv = 2139 segvn_count_swap_by_vpages(seg); 2140 ASSERT(oswresv >= svd->swresv); 2141 unlen = oswresv - svd->swresv; 2142 } else { 2143 svd->swresv -= len; 2144 ASSERT(svd->swresv == seg->s_size); 2145 unlen = len; 2146 } 2147 anon_unresv_zone(unlen, 2148 seg->s_as->a_proc->p_zone); 2149 } 2150 TRACE_3(TR_FAC_VM, TR_ANON_PROC, 2151 "anon proc:%p %lu %u", seg, len, 0); 2152 } 2153 2154 return (0); 2155 } 2156 2157 /* 2158 * The section to go is in the middle of the segment, 2159 * have to make it into two segments. nseg is made for 2160 * the high end while seg is cut down at the low end. 2161 */ 2162 nbase = addr + len; /* new seg base */ 2163 nsize = (seg->s_base + seg->s_size) - nbase; /* new seg size */ 2164 seg->s_size = addr - seg->s_base; /* shrink old seg */ 2165 nseg = seg_alloc(seg->s_as, nbase, nsize); 2166 if (nseg == NULL) { 2167 panic("segvn_unmap seg_alloc"); 2168 /*NOTREACHED*/ 2169 } 2170 nseg->s_ops = seg->s_ops; 2171 nsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP); 2172 nseg->s_data = (void *)nsvd; 2173 nseg->s_szc = seg->s_szc; 2174 *nsvd = *svd; 2175 nsvd->seg = nseg; 2176 nsvd->offset = svd->offset + (uintptr_t)(nseg->s_base - seg->s_base); 2177 nsvd->swresv = 0; 2178 nsvd->softlockcnt = 0; 2179 nsvd->softlockcnt_sbase = 0; 2180 nsvd->softlockcnt_send = 0; 2181 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE); 2182 2183 if (svd->vp != NULL) { 2184 VN_HOLD(nsvd->vp); 2185 if (nsvd->type == MAP_SHARED) 2186 lgrp_shm_policy_init(NULL, nsvd->vp); 2187 } 2188 crhold(svd->cred); 2189 2190 if (svd->vpage == NULL) { 2191 nsvd->vpage = NULL; 2192 } else { 2193 /* need to split vpage into two arrays */ 2194 size_t nbytes; 2195 struct vpage *ovpage; 2196 2197 ovpage = svd->vpage; /* keep pointer to vpage */ 2198 2199 npages = seg_pages(seg); /* seg has shrunk */ 2200 nbytes = vpgtob(npages); 2201 svd->vpage = kmem_alloc(nbytes, KM_SLEEP); 2202 2203 bcopy(ovpage, svd->vpage, nbytes); 2204 2205 npages = seg_pages(nseg); 2206 nbytes = vpgtob(npages); 2207 nsvd->vpage = kmem_alloc(nbytes, KM_SLEEP); 2208 2209 bcopy(&ovpage[opages - npages], nsvd->vpage, nbytes); 2210 2211 /* free up old vpage */ 2212 kmem_free(ovpage, vpgtob(opages)); 2213 } 2214 2215 if (amp == NULL) { 2216 nsvd->amp = NULL; 2217 nsvd->anon_index = 0; 2218 } else { 2219 /* 2220 * Need to create a new anon map for the new segment. 2221 * We'll also allocate a new smaller array for the old 2222 * smaller segment to save space. 2223 */ 2224 opages = btop((uintptr_t)(addr - seg->s_base)); 2225 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 2226 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) { 2227 /* 2228 * Free up now unused parts of anon_map array. 2229 */ 2230 ulong_t an_idx = svd->anon_index + opages; 2231 2232 /* 2233 * Shared anon map is no longer in use. Before 2234 * freeing its pages purge all entries from 2235 * pcache that belong to this amp. 2236 */ 2237 if (svd->type == MAP_SHARED) { 2238 ASSERT(amp->refcnt == 1); 2239 ASSERT(svd->softlockcnt == 0); 2240 anonmap_purge(amp); 2241 } 2242 2243 if (amp->a_szc == seg->s_szc) { 2244 if (seg->s_szc != 0) { 2245 anon_free_pages(amp->ahp, an_idx, len, 2246 seg->s_szc); 2247 } else { 2248 anon_free(amp->ahp, an_idx, 2249 len); 2250 } 2251 } else { 2252 ASSERT(svd->type == MAP_SHARED); 2253 ASSERT(amp->a_szc > seg->s_szc); 2254 anon_shmap_free_pages(amp, an_idx, len); 2255 } 2256 2257 /* 2258 * Unreserve swap space for the 2259 * unmapped chunk of this segment in 2260 * case it's MAP_SHARED 2261 */ 2262 if (svd->type == MAP_SHARED) { 2263 anon_unresv_zone(len, 2264 seg->s_as->a_proc->p_zone); 2265 amp->swresv -= len; 2266 } 2267 } 2268 nsvd->anon_index = svd->anon_index + 2269 btop((uintptr_t)(nseg->s_base - seg->s_base)); 2270 if (svd->type == MAP_SHARED) { 2271 amp->refcnt++; 2272 nsvd->amp = amp; 2273 } else { 2274 struct anon_map *namp; 2275 struct anon_hdr *nahp; 2276 2277 ASSERT(svd->type == MAP_PRIVATE); 2278 nahp = anon_create(btop(seg->s_size), ANON_SLEEP); 2279 namp = anonmap_alloc(nseg->s_size, 0, ANON_SLEEP); 2280 namp->a_szc = seg->s_szc; 2281 (void) anon_copy_ptr(amp->ahp, svd->anon_index, nahp, 2282 0, btop(seg->s_size), ANON_SLEEP); 2283 (void) anon_copy_ptr(amp->ahp, nsvd->anon_index, 2284 namp->ahp, 0, btop(nseg->s_size), ANON_SLEEP); 2285 anon_release(amp->ahp, btop(amp->size)); 2286 svd->anon_index = 0; 2287 nsvd->anon_index = 0; 2288 amp->ahp = nahp; 2289 amp->size = seg->s_size; 2290 nsvd->amp = namp; 2291 } 2292 ANON_LOCK_EXIT(&->a_rwlock); 2293 } 2294 if (svd->swresv) { 2295 if (svd->flags & MAP_NORESERVE) { 2296 ASSERT(amp); 2297 oswresv = svd->swresv; 2298 svd->swresv = ptob(anon_pages(amp->ahp, 2299 svd->anon_index, btop(seg->s_size))); 2300 nsvd->swresv = ptob(anon_pages(nsvd->amp->ahp, 2301 nsvd->anon_index, btop(nseg->s_size))); 2302 ASSERT(oswresv >= (svd->swresv + nsvd->swresv)); 2303 anon_unresv_zone(oswresv - (svd->swresv + nsvd->swresv), 2304 seg->s_as->a_proc->p_zone); 2305 } else { 2306 size_t unlen; 2307 2308 if (svd->pageswap) { 2309 oswresv = svd->swresv; 2310 svd->swresv = segvn_count_swap_by_vpages(seg); 2311 nsvd->swresv = segvn_count_swap_by_vpages(nseg); 2312 ASSERT(oswresv >= (svd->swresv + nsvd->swresv)); 2313 unlen = oswresv - (svd->swresv + nsvd->swresv); 2314 } else { 2315 if (seg->s_size + nseg->s_size + len != 2316 svd->swresv) { 2317 panic("segvn_unmap: cannot split " 2318 "swap reservation"); 2319 /*NOTREACHED*/ 2320 } 2321 svd->swresv = seg->s_size; 2322 nsvd->swresv = nseg->s_size; 2323 unlen = len; 2324 } 2325 anon_unresv_zone(unlen, 2326 seg->s_as->a_proc->p_zone); 2327 } 2328 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u", 2329 seg, len, 0); 2330 } 2331 2332 return (0); /* I'm glad that's all over with! */ 2333 } 2334 2335 static void 2336 segvn_free(struct seg *seg) 2337 { 2338 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 2339 pgcnt_t npages = seg_pages(seg); 2340 struct anon_map *amp; 2341 size_t len; 2342 2343 /* 2344 * We don't need any segment level locks for "segvn" data 2345 * since the address space is "write" locked. 2346 */ 2347 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 2348 ASSERT(svd->tr_state == SEGVN_TR_OFF); 2349 2350 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 2351 2352 /* 2353 * Be sure to unlock pages. XXX Why do things get free'ed instead 2354 * of unmapped? XXX 2355 */ 2356 (void) segvn_lockop(seg, seg->s_base, seg->s_size, 2357 0, MC_UNLOCK, NULL, 0); 2358 2359 /* 2360 * Deallocate the vpage and anon pointers if necessary and possible. 2361 */ 2362 if (svd->vpage != NULL) { 2363 kmem_free(svd->vpage, vpgtob(npages)); 2364 svd->vpage = NULL; 2365 } 2366 if ((amp = svd->amp) != NULL) { 2367 /* 2368 * If there are no more references to this anon_map 2369 * structure, then deallocate the structure after freeing 2370 * up all the anon slot pointers that we can. 2371 */ 2372 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 2373 ASSERT(amp->a_szc >= seg->s_szc); 2374 if (--amp->refcnt == 0) { 2375 if (svd->type == MAP_PRIVATE) { 2376 /* 2377 * Private - we only need to anon_free 2378 * the part that this segment refers to. 2379 */ 2380 if (seg->s_szc != 0) { 2381 anon_free_pages(amp->ahp, 2382 svd->anon_index, seg->s_size, 2383 seg->s_szc); 2384 } else { 2385 anon_free(amp->ahp, svd->anon_index, 2386 seg->s_size); 2387 } 2388 } else { 2389 2390 /* 2391 * Shared anon map is no longer in use. Before 2392 * freeing its pages purge all entries from 2393 * pcache that belong to this amp. 2394 */ 2395 ASSERT(svd->softlockcnt == 0); 2396 anonmap_purge(amp); 2397 2398 /* 2399 * Shared - anon_free the entire 2400 * anon_map's worth of stuff and 2401 * release any swap reservation. 2402 */ 2403 if (amp->a_szc != 0) { 2404 anon_shmap_free_pages(amp, 0, 2405 amp->size); 2406 } else { 2407 anon_free(amp->ahp, 0, amp->size); 2408 } 2409 if ((len = amp->swresv) != 0) { 2410 anon_unresv_zone(len, 2411 seg->s_as->a_proc->p_zone); 2412 TRACE_3(TR_FAC_VM, TR_ANON_PROC, 2413 "anon proc:%p %lu %u", seg, len, 0); 2414 } 2415 } 2416 svd->amp = NULL; 2417 ANON_LOCK_EXIT(&->a_rwlock); 2418 anonmap_free(amp); 2419 } else if (svd->type == MAP_PRIVATE) { 2420 /* 2421 * We had a private mapping which still has 2422 * a held anon_map so just free up all the 2423 * anon slot pointers that we were using. 2424 */ 2425 if (seg->s_szc != 0) { 2426 anon_free_pages(amp->ahp, svd->anon_index, 2427 seg->s_size, seg->s_szc); 2428 } else { 2429 anon_free(amp->ahp, svd->anon_index, 2430 seg->s_size); 2431 } 2432 ANON_LOCK_EXIT(&->a_rwlock); 2433 } else { 2434 ANON_LOCK_EXIT(&->a_rwlock); 2435 } 2436 } 2437 2438 /* 2439 * Release swap reservation. 2440 */ 2441 if ((len = svd->swresv) != 0) { 2442 anon_unresv_zone(svd->swresv, 2443 seg->s_as->a_proc->p_zone); 2444 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u", 2445 seg, len, 0); 2446 svd->swresv = 0; 2447 } 2448 /* 2449 * Release claim on vnode, credentials, and finally free the 2450 * private data. 2451 */ 2452 if (svd->vp != NULL) { 2453 if (svd->type == MAP_SHARED) 2454 lgrp_shm_policy_fini(NULL, svd->vp); 2455 VN_RELE(svd->vp); 2456 svd->vp = NULL; 2457 } 2458 crfree(svd->cred); 2459 svd->pageprot = 0; 2460 svd->pageadvice = 0; 2461 svd->pageswap = 0; 2462 svd->cred = NULL; 2463 2464 /* 2465 * Take segfree_syncmtx lock to let segvn_reclaim() finish if it's 2466 * still working with this segment without holding as lock (in case 2467 * it's called by pcache async thread). 2468 */ 2469 ASSERT(svd->softlockcnt == 0); 2470 mutex_enter(&svd->segfree_syncmtx); 2471 mutex_exit(&svd->segfree_syncmtx); 2472 2473 seg->s_data = NULL; 2474 kmem_cache_free(segvn_cache, svd); 2475 } 2476 2477 /* 2478 * Do a F_SOFTUNLOCK call over the range requested. The range must have 2479 * already been F_SOFTLOCK'ed. 2480 * Caller must always match addr and len of a softunlock with a previous 2481 * softlock with exactly the same addr and len. 2482 */ 2483 static void 2484 segvn_softunlock(struct seg *seg, caddr_t addr, size_t len, enum seg_rw rw) 2485 { 2486 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 2487 page_t *pp; 2488 caddr_t adr; 2489 struct vnode *vp; 2490 u_offset_t offset; 2491 ulong_t anon_index; 2492 struct anon_map *amp; 2493 struct anon *ap = NULL; 2494 2495 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2496 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock)); 2497 2498 if ((amp = svd->amp) != NULL) 2499 anon_index = svd->anon_index + seg_page(seg, addr); 2500 2501 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 2502 ASSERT(svd->tr_state == SEGVN_TR_OFF); 2503 hat_unlock_region(seg->s_as->a_hat, addr, len, svd->rcookie); 2504 } else { 2505 hat_unlock(seg->s_as->a_hat, addr, len); 2506 } 2507 for (adr = addr; adr < addr + len; adr += PAGESIZE) { 2508 if (amp != NULL) { 2509 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 2510 if ((ap = anon_get_ptr(amp->ahp, anon_index++)) 2511 != NULL) { 2512 swap_xlate(ap, &vp, &offset); 2513 } else { 2514 vp = svd->vp; 2515 offset = svd->offset + 2516 (uintptr_t)(adr - seg->s_base); 2517 } 2518 ANON_LOCK_EXIT(&->a_rwlock); 2519 } else { 2520 vp = svd->vp; 2521 offset = svd->offset + 2522 (uintptr_t)(adr - seg->s_base); 2523 } 2524 2525 /* 2526 * Use page_find() instead of page_lookup() to 2527 * find the page since we know that it is locked. 2528 */ 2529 pp = page_find(vp, offset); 2530 if (pp == NULL) { 2531 panic( 2532 "segvn_softunlock: addr %p, ap %p, vp %p, off %llx", 2533 (void *)adr, (void *)ap, (void *)vp, offset); 2534 /*NOTREACHED*/ 2535 } 2536 2537 if (rw == S_WRITE) { 2538 hat_setrefmod(pp); 2539 if (seg->s_as->a_vbits) 2540 hat_setstat(seg->s_as, adr, PAGESIZE, 2541 P_REF | P_MOD); 2542 } else if (rw != S_OTHER) { 2543 hat_setref(pp); 2544 if (seg->s_as->a_vbits) 2545 hat_setstat(seg->s_as, adr, PAGESIZE, P_REF); 2546 } 2547 TRACE_3(TR_FAC_VM, TR_SEGVN_FAULT, 2548 "segvn_fault:pp %p vp %p offset %llx", pp, vp, offset); 2549 page_unlock(pp); 2550 } 2551 ASSERT(svd->softlockcnt >= btop(len)); 2552 if (!atomic_add_long_nv((ulong_t *)&svd->softlockcnt, -btop(len))) { 2553 /* 2554 * All SOFTLOCKS are gone. Wakeup any waiting 2555 * unmappers so they can try again to unmap. 2556 * Check for waiters first without the mutex 2557 * held so we don't always grab the mutex on 2558 * softunlocks. 2559 */ 2560 if (AS_ISUNMAPWAIT(seg->s_as)) { 2561 mutex_enter(&seg->s_as->a_contents); 2562 if (AS_ISUNMAPWAIT(seg->s_as)) { 2563 AS_CLRUNMAPWAIT(seg->s_as); 2564 cv_broadcast(&seg->s_as->a_cv); 2565 } 2566 mutex_exit(&seg->s_as->a_contents); 2567 } 2568 } 2569 } 2570 2571 #define PAGE_HANDLED ((page_t *)-1) 2572 2573 /* 2574 * Release all the pages in the NULL terminated ppp list 2575 * which haven't already been converted to PAGE_HANDLED. 2576 */ 2577 static void 2578 segvn_pagelist_rele(page_t **ppp) 2579 { 2580 for (; *ppp != NULL; ppp++) { 2581 if (*ppp != PAGE_HANDLED) 2582 page_unlock(*ppp); 2583 } 2584 } 2585 2586 static int stealcow = 1; 2587 2588 /* 2589 * Workaround for viking chip bug. See bug id 1220902. 2590 * To fix this down in pagefault() would require importing so 2591 * much as and segvn code as to be unmaintainable. 2592 */ 2593 int enable_mbit_wa = 0; 2594 2595 /* 2596 * Handles all the dirty work of getting the right 2597 * anonymous pages and loading up the translations. 2598 * This routine is called only from segvn_fault() 2599 * when looping over the range of addresses requested. 2600 * 2601 * The basic algorithm here is: 2602 * If this is an anon_zero case 2603 * Call anon_zero to allocate page 2604 * Load up translation 2605 * Return 2606 * endif 2607 * If this is an anon page 2608 * Use anon_getpage to get the page 2609 * else 2610 * Find page in pl[] list passed in 2611 * endif 2612 * If not a cow 2613 * Load up the translation to the page 2614 * return 2615 * endif 2616 * Call anon_private to handle cow 2617 * Load up (writable) translation to new page 2618 */ 2619 static faultcode_t 2620 segvn_faultpage( 2621 struct hat *hat, /* the hat to use for mapping */ 2622 struct seg *seg, /* seg_vn of interest */ 2623 caddr_t addr, /* address in as */ 2624 u_offset_t off, /* offset in vp */ 2625 struct vpage *vpage, /* pointer to vpage for vp, off */ 2626 page_t *pl[], /* object source page pointer */ 2627 uint_t vpprot, /* access allowed to object pages */ 2628 enum fault_type type, /* type of fault */ 2629 enum seg_rw rw, /* type of access at fault */ 2630 int brkcow) /* we may need to break cow */ 2631 { 2632 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 2633 page_t *pp, **ppp; 2634 uint_t pageflags = 0; 2635 page_t *anon_pl[1 + 1]; 2636 page_t *opp = NULL; /* original page */ 2637 uint_t prot; 2638 int err; 2639 int cow; 2640 int claim; 2641 int steal = 0; 2642 ulong_t anon_index; 2643 struct anon *ap, *oldap; 2644 struct anon_map *amp; 2645 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD; 2646 int anon_lock = 0; 2647 anon_sync_obj_t cookie; 2648 2649 if (svd->flags & MAP_TEXT) { 2650 hat_flag |= HAT_LOAD_TEXT; 2651 } 2652 2653 ASSERT(SEGVN_READ_HELD(seg->s_as, &svd->lock)); 2654 ASSERT(seg->s_szc == 0); 2655 ASSERT(svd->tr_state != SEGVN_TR_INIT); 2656 2657 /* 2658 * Initialize protection value for this page. 2659 * If we have per page protection values check it now. 2660 */ 2661 if (svd->pageprot) { 2662 uint_t protchk; 2663 2664 switch (rw) { 2665 case S_READ: 2666 protchk = PROT_READ; 2667 break; 2668 case S_WRITE: 2669 protchk = PROT_WRITE; 2670 break; 2671 case S_EXEC: 2672 protchk = PROT_EXEC; 2673 break; 2674 case S_OTHER: 2675 default: 2676 protchk = PROT_READ | PROT_WRITE | PROT_EXEC; 2677 break; 2678 } 2679 2680 prot = VPP_PROT(vpage); 2681 if ((prot & protchk) == 0) 2682 return (FC_PROT); /* illegal access type */ 2683 } else { 2684 prot = svd->prot; 2685 } 2686 2687 if (type == F_SOFTLOCK) { 2688 atomic_add_long((ulong_t *)&svd->softlockcnt, 1); 2689 } 2690 2691 /* 2692 * Always acquire the anon array lock to prevent 2 threads from 2693 * allocating separate anon slots for the same "addr". 2694 */ 2695 2696 if ((amp = svd->amp) != NULL) { 2697 ASSERT(RW_READ_HELD(&->a_rwlock)); 2698 anon_index = svd->anon_index + seg_page(seg, addr); 2699 anon_array_enter(amp, anon_index, &cookie); 2700 anon_lock = 1; 2701 } 2702 2703 if (svd->vp == NULL && amp != NULL) { 2704 if ((ap = anon_get_ptr(amp->ahp, anon_index)) == NULL) { 2705 /* 2706 * Allocate a (normally) writable anonymous page of 2707 * zeroes. If no advance reservations, reserve now. 2708 */ 2709 if (svd->flags & MAP_NORESERVE) { 2710 if (anon_resv_zone(ptob(1), 2711 seg->s_as->a_proc->p_zone)) { 2712 atomic_add_long(&svd->swresv, ptob(1)); 2713 } else { 2714 err = ENOMEM; 2715 goto out; 2716 } 2717 } 2718 if ((pp = anon_zero(seg, addr, &ap, 2719 svd->cred)) == NULL) { 2720 err = ENOMEM; 2721 goto out; /* out of swap space */ 2722 } 2723 /* 2724 * Re-acquire the anon_map lock and 2725 * initialize the anon array entry. 2726 */ 2727 (void) anon_set_ptr(amp->ahp, anon_index, ap, 2728 ANON_SLEEP); 2729 2730 ASSERT(pp->p_szc == 0); 2731 2732 /* 2733 * Handle pages that have been marked for migration 2734 */ 2735 if (lgrp_optimizations()) 2736 page_migrate(seg, addr, &pp, 1); 2737 2738 if (enable_mbit_wa) { 2739 if (rw == S_WRITE) 2740 hat_setmod(pp); 2741 else if (!hat_ismod(pp)) 2742 prot &= ~PROT_WRITE; 2743 } 2744 /* 2745 * If AS_PAGLCK is set in a_flags (via memcntl(2) 2746 * with MC_LOCKAS, MCL_FUTURE) and this is a 2747 * MAP_NORESERVE segment, we may need to 2748 * permanently lock the page as it is being faulted 2749 * for the first time. The following text applies 2750 * only to MAP_NORESERVE segments: 2751 * 2752 * As per memcntl(2), if this segment was created 2753 * after MCL_FUTURE was applied (a "future" 2754 * segment), its pages must be locked. If this 2755 * segment existed at MCL_FUTURE application (a 2756 * "past" segment), the interface is unclear. 2757 * 2758 * We decide to lock only if vpage is present: 2759 * 2760 * - "future" segments will have a vpage array (see 2761 * as_map), and so will be locked as required 2762 * 2763 * - "past" segments may not have a vpage array, 2764 * depending on whether events (such as 2765 * mprotect) have occurred. Locking if vpage 2766 * exists will preserve legacy behavior. Not 2767 * locking if vpage is absent, will not break 2768 * the interface or legacy behavior. Note that 2769 * allocating vpage here if it's absent requires 2770 * upgrading the segvn reader lock, the cost of 2771 * which does not seem worthwhile. 2772 * 2773 * Usually testing and setting VPP_ISPPLOCK and 2774 * VPP_SETPPLOCK requires holding the segvn lock as 2775 * writer, but in this case all readers are 2776 * serializing on the anon array lock. 2777 */ 2778 if (AS_ISPGLCK(seg->s_as) && vpage != NULL && 2779 (svd->flags & MAP_NORESERVE) && 2780 !VPP_ISPPLOCK(vpage)) { 2781 proc_t *p = seg->s_as->a_proc; 2782 ASSERT(svd->type == MAP_PRIVATE); 2783 mutex_enter(&p->p_lock); 2784 if (rctl_incr_locked_mem(p, NULL, PAGESIZE, 2785 1) == 0) { 2786 claim = VPP_PROT(vpage) & PROT_WRITE; 2787 if (page_pp_lock(pp, claim, 0)) { 2788 VPP_SETPPLOCK(vpage); 2789 } else { 2790 rctl_decr_locked_mem(p, NULL, 2791 PAGESIZE, 1); 2792 } 2793 } 2794 mutex_exit(&p->p_lock); 2795 } 2796 2797 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 2798 hat_memload(hat, addr, pp, prot, hat_flag); 2799 2800 if (!(hat_flag & HAT_LOAD_LOCK)) 2801 page_unlock(pp); 2802 2803 anon_array_exit(&cookie); 2804 return (0); 2805 } 2806 } 2807 2808 /* 2809 * Obtain the page structure via anon_getpage() if it is 2810 * a private copy of an object (the result of a previous 2811 * copy-on-write). 2812 */ 2813 if (amp != NULL) { 2814 if ((ap = anon_get_ptr(amp->ahp, anon_index)) != NULL) { 2815 err = anon_getpage(&ap, &vpprot, anon_pl, PAGESIZE, 2816 seg, addr, rw, svd->cred); 2817 if (err) 2818 goto out; 2819 2820 if (svd->type == MAP_SHARED) { 2821 /* 2822 * If this is a shared mapping to an 2823 * anon_map, then ignore the write 2824 * permissions returned by anon_getpage(). 2825 * They apply to the private mappings 2826 * of this anon_map. 2827 */ 2828 vpprot |= PROT_WRITE; 2829 } 2830 opp = anon_pl[0]; 2831 } 2832 } 2833 2834 /* 2835 * Search the pl[] list passed in if it is from the 2836 * original object (i.e., not a private copy). 2837 */ 2838 if (opp == NULL) { 2839 /* 2840 * Find original page. We must be bringing it in 2841 * from the list in pl[]. 2842 */ 2843 for (ppp = pl; (opp = *ppp) != NULL; ppp++) { 2844 if (opp == PAGE_HANDLED) 2845 continue; 2846 ASSERT(opp->p_vnode == svd->vp); /* XXX */ 2847 if (opp->p_offset == off) 2848 break; 2849 } 2850 if (opp == NULL) { 2851 panic("segvn_faultpage not found"); 2852 /*NOTREACHED*/ 2853 } 2854 *ppp = PAGE_HANDLED; 2855 2856 } 2857 2858 ASSERT(PAGE_LOCKED(opp)); 2859 2860 TRACE_3(TR_FAC_VM, TR_SEGVN_FAULT, 2861 "segvn_fault:pp %p vp %p offset %llx", opp, NULL, 0); 2862 2863 /* 2864 * The fault is treated as a copy-on-write fault if a 2865 * write occurs on a private segment and the object 2866 * page (i.e., mapping) is write protected. We assume 2867 * that fatal protection checks have already been made. 2868 */ 2869 2870 if (brkcow) { 2871 ASSERT(svd->tr_state == SEGVN_TR_OFF); 2872 cow = !(vpprot & PROT_WRITE); 2873 } else if (svd->tr_state == SEGVN_TR_ON) { 2874 /* 2875 * If we are doing text replication COW on first touch. 2876 */ 2877 ASSERT(amp != NULL); 2878 ASSERT(svd->vp != NULL); 2879 ASSERT(rw != S_WRITE); 2880 cow = (ap == NULL); 2881 } else { 2882 cow = 0; 2883 } 2884 2885 /* 2886 * If not a copy-on-write case load the translation 2887 * and return. 2888 */ 2889 if (cow == 0) { 2890 2891 /* 2892 * Handle pages that have been marked for migration 2893 */ 2894 if (lgrp_optimizations()) 2895 page_migrate(seg, addr, &opp, 1); 2896 2897 if (IS_VMODSORT(opp->p_vnode) || enable_mbit_wa) { 2898 if (rw == S_WRITE) 2899 hat_setmod(opp); 2900 else if (rw != S_OTHER && !hat_ismod(opp)) 2901 prot &= ~PROT_WRITE; 2902 } 2903 2904 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE || 2905 (!svd->pageprot && svd->prot == (prot & vpprot))); 2906 ASSERT(amp == NULL || 2907 svd->rcookie == HAT_INVALID_REGION_COOKIE); 2908 hat_memload_region(hat, addr, opp, prot & vpprot, hat_flag, 2909 svd->rcookie); 2910 2911 if (!(hat_flag & HAT_LOAD_LOCK)) 2912 page_unlock(opp); 2913 2914 if (anon_lock) { 2915 anon_array_exit(&cookie); 2916 } 2917 return (0); 2918 } 2919 2920 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 2921 2922 hat_setref(opp); 2923 2924 ASSERT(amp != NULL && anon_lock); 2925 2926 /* 2927 * Steal the page only if it isn't a private page 2928 * since stealing a private page is not worth the effort. 2929 */ 2930 if ((ap = anon_get_ptr(amp->ahp, anon_index)) == NULL) 2931 steal = 1; 2932 2933 /* 2934 * Steal the original page if the following conditions are true: 2935 * 2936 * We are low on memory, the page is not private, page is not large, 2937 * not shared, not modified, not `locked' or if we have it `locked' 2938 * (i.e., p_cowcnt == 1 and p_lckcnt == 0, which also implies 2939 * that the page is not shared) and if it doesn't have any 2940 * translations. page_struct_lock isn't needed to look at p_cowcnt 2941 * and p_lckcnt because we first get exclusive lock on page. 2942 */ 2943 (void) hat_pagesync(opp, HAT_SYNC_DONTZERO | HAT_SYNC_STOPON_MOD); 2944 2945 if (stealcow && freemem < minfree && steal && opp->p_szc == 0 && 2946 page_tryupgrade(opp) && !hat_ismod(opp) && 2947 ((opp->p_lckcnt == 0 && opp->p_cowcnt == 0) || 2948 (opp->p_lckcnt == 0 && opp->p_cowcnt == 1 && 2949 vpage != NULL && VPP_ISPPLOCK(vpage)))) { 2950 /* 2951 * Check if this page has other translations 2952 * after unloading our translation. 2953 */ 2954 if (hat_page_is_mapped(opp)) { 2955 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 2956 hat_unload(seg->s_as->a_hat, addr, PAGESIZE, 2957 HAT_UNLOAD); 2958 } 2959 2960 /* 2961 * hat_unload() might sync back someone else's recent 2962 * modification, so check again. 2963 */ 2964 if (!hat_ismod(opp) && !hat_page_is_mapped(opp)) 2965 pageflags |= STEAL_PAGE; 2966 } 2967 2968 /* 2969 * If we have a vpage pointer, see if it indicates that we have 2970 * ``locked'' the page we map -- if so, tell anon_private to 2971 * transfer the locking resource to the new page. 2972 * 2973 * See Statement at the beginning of segvn_lockop regarding 2974 * the way lockcnts/cowcnts are handled during COW. 2975 * 2976 */ 2977 if (vpage != NULL && VPP_ISPPLOCK(vpage)) 2978 pageflags |= LOCK_PAGE; 2979 2980 /* 2981 * Allocate a private page and perform the copy. 2982 * For MAP_NORESERVE reserve swap space now, unless this 2983 * is a cow fault on an existing anon page in which case 2984 * MAP_NORESERVE will have made advance reservations. 2985 */ 2986 if ((svd->flags & MAP_NORESERVE) && (ap == NULL)) { 2987 if (anon_resv_zone(ptob(1), seg->s_as->a_proc->p_zone)) { 2988 atomic_add_long(&svd->swresv, ptob(1)); 2989 } else { 2990 page_unlock(opp); 2991 err = ENOMEM; 2992 goto out; 2993 } 2994 } 2995 oldap = ap; 2996 pp = anon_private(&ap, seg, addr, prot, opp, pageflags, svd->cred); 2997 if (pp == NULL) { 2998 err = ENOMEM; /* out of swap space */ 2999 goto out; 3000 } 3001 3002 /* 3003 * If we copied away from an anonymous page, then 3004 * we are one step closer to freeing up an anon slot. 3005 * 3006 * NOTE: The original anon slot must be released while 3007 * holding the "anon_map" lock. This is necessary to prevent 3008 * other threads from obtaining a pointer to the anon slot 3009 * which may be freed if its "refcnt" is 1. 3010 */ 3011 if (oldap != NULL) 3012 anon_decref(oldap); 3013 3014 (void) anon_set_ptr(amp->ahp, anon_index, ap, ANON_SLEEP); 3015 3016 /* 3017 * Handle pages that have been marked for migration 3018 */ 3019 if (lgrp_optimizations()) 3020 page_migrate(seg, addr, &pp, 1); 3021 3022 ASSERT(pp->p_szc == 0); 3023 3024 ASSERT(!IS_VMODSORT(pp->p_vnode)); 3025 if (enable_mbit_wa) { 3026 if (rw == S_WRITE) 3027 hat_setmod(pp); 3028 else if (!hat_ismod(pp)) 3029 prot &= ~PROT_WRITE; 3030 } 3031 3032 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 3033 hat_memload(hat, addr, pp, prot, hat_flag); 3034 3035 if (!(hat_flag & HAT_LOAD_LOCK)) 3036 page_unlock(pp); 3037 3038 ASSERT(anon_lock); 3039 anon_array_exit(&cookie); 3040 return (0); 3041 out: 3042 if (anon_lock) 3043 anon_array_exit(&cookie); 3044 3045 if (type == F_SOFTLOCK) { 3046 atomic_add_long((ulong_t *)&svd->softlockcnt, -1); 3047 } 3048 return (FC_MAKE_ERR(err)); 3049 } 3050 3051 /* 3052 * relocate a bunch of smaller targ pages into one large repl page. all targ 3053 * pages must be complete pages smaller than replacement pages. 3054 * it's assumed that no page's szc can change since they are all PAGESIZE or 3055 * complete large pages locked SHARED. 3056 */ 3057 static void 3058 segvn_relocate_pages(page_t **targ, page_t *replacement) 3059 { 3060 page_t *pp; 3061 pgcnt_t repl_npgs, curnpgs; 3062 pgcnt_t i; 3063 uint_t repl_szc = replacement->p_szc; 3064 page_t *first_repl = replacement; 3065 page_t *repl; 3066 spgcnt_t npgs; 3067 3068 VM_STAT_ADD(segvnvmstats.relocatepages[0]); 3069 3070 ASSERT(repl_szc != 0); 3071 npgs = repl_npgs = page_get_pagecnt(repl_szc); 3072 3073 i = 0; 3074 while (repl_npgs) { 3075 spgcnt_t nreloc; 3076 int err; 3077 ASSERT(replacement != NULL); 3078 pp = targ[i]; 3079 ASSERT(pp->p_szc < repl_szc); 3080 ASSERT(PAGE_EXCL(pp)); 3081 ASSERT(!PP_ISFREE(pp)); 3082 curnpgs = page_get_pagecnt(pp->p_szc); 3083 if (curnpgs == 1) { 3084 VM_STAT_ADD(segvnvmstats.relocatepages[1]); 3085 repl = replacement; 3086 page_sub(&replacement, repl); 3087 ASSERT(PAGE_EXCL(repl)); 3088 ASSERT(!PP_ISFREE(repl)); 3089 ASSERT(repl->p_szc == repl_szc); 3090 } else { 3091 page_t *repl_savepp; 3092 int j; 3093 VM_STAT_ADD(segvnvmstats.relocatepages[2]); 3094 repl_savepp = replacement; 3095 for (j = 0; j < curnpgs; j++) { 3096 repl = replacement; 3097 page_sub(&replacement, repl); 3098 ASSERT(PAGE_EXCL(repl)); 3099 ASSERT(!PP_ISFREE(repl)); 3100 ASSERT(repl->p_szc == repl_szc); 3101 ASSERT(page_pptonum(targ[i + j]) == 3102 page_pptonum(targ[i]) + j); 3103 } 3104 repl = repl_savepp; 3105 ASSERT(IS_P2ALIGNED(page_pptonum(repl), curnpgs)); 3106 } 3107 err = page_relocate(&pp, &repl, 0, 1, &nreloc, NULL); 3108 if (err || nreloc != curnpgs) { 3109 panic("segvn_relocate_pages: " 3110 "page_relocate failed err=%d curnpgs=%ld " 3111 "nreloc=%ld", err, curnpgs, nreloc); 3112 } 3113 ASSERT(curnpgs <= repl_npgs); 3114 repl_npgs -= curnpgs; 3115 i += curnpgs; 3116 } 3117 ASSERT(replacement == NULL); 3118 3119 repl = first_repl; 3120 repl_npgs = npgs; 3121 for (i = 0; i < repl_npgs; i++) { 3122 ASSERT(PAGE_EXCL(repl)); 3123 ASSERT(!PP_ISFREE(repl)); 3124 targ[i] = repl; 3125 page_downgrade(targ[i]); 3126 repl++; 3127 } 3128 } 3129 3130 /* 3131 * Check if all pages in ppa array are complete smaller than szc pages and 3132 * their roots will still be aligned relative to their current size if the 3133 * entire ppa array is relocated into one szc page. If these conditions are 3134 * not met return 0. 3135 * 3136 * If all pages are properly aligned attempt to upgrade their locks 3137 * to exclusive mode. If it fails set *upgrdfail to 1 and return 0. 3138 * upgrdfail was set to 0 by caller. 3139 * 3140 * Return 1 if all pages are aligned and locked exclusively. 3141 * 3142 * If all pages in ppa array happen to be physically contiguous to make one 3143 * szc page and all exclusive locks are successfully obtained promote the page 3144 * size to szc and set *pszc to szc. Return 1 with pages locked shared. 3145 */ 3146 static int 3147 segvn_full_szcpages(page_t **ppa, uint_t szc, int *upgrdfail, uint_t *pszc) 3148 { 3149 page_t *pp; 3150 pfn_t pfn; 3151 pgcnt_t totnpgs = page_get_pagecnt(szc); 3152 pfn_t first_pfn; 3153 int contig = 1; 3154 pgcnt_t i; 3155 pgcnt_t j; 3156 uint_t curszc; 3157 pgcnt_t curnpgs; 3158 int root = 0; 3159 3160 ASSERT(szc > 0); 3161 3162 VM_STAT_ADD(segvnvmstats.fullszcpages[0]); 3163 3164 for (i = 0; i < totnpgs; i++) { 3165 pp = ppa[i]; 3166 ASSERT(PAGE_SHARED(pp)); 3167 ASSERT(!PP_ISFREE(pp)); 3168 pfn = page_pptonum(pp); 3169 if (i == 0) { 3170 if (!IS_P2ALIGNED(pfn, totnpgs)) { 3171 contig = 0; 3172 } else { 3173 first_pfn = pfn; 3174 } 3175 } else if (contig && pfn != first_pfn + i) { 3176 contig = 0; 3177 } 3178 if (pp->p_szc == 0) { 3179 if (root) { 3180 VM_STAT_ADD(segvnvmstats.fullszcpages[1]); 3181 return (0); 3182 } 3183 } else if (!root) { 3184 if ((curszc = pp->p_szc) >= szc) { 3185 VM_STAT_ADD(segvnvmstats.fullszcpages[2]); 3186 return (0); 3187 } 3188 if (curszc == 0) { 3189 /* 3190 * p_szc changed means we don't have all pages 3191 * locked. return failure. 3192 */ 3193 VM_STAT_ADD(segvnvmstats.fullszcpages[3]); 3194 return (0); 3195 } 3196 curnpgs = page_get_pagecnt(curszc); 3197 if (!IS_P2ALIGNED(pfn, curnpgs) || 3198 !IS_P2ALIGNED(i, curnpgs)) { 3199 VM_STAT_ADD(segvnvmstats.fullszcpages[4]); 3200 return (0); 3201 } 3202 root = 1; 3203 } else { 3204 ASSERT(i > 0); 3205 VM_STAT_ADD(segvnvmstats.fullszcpages[5]); 3206 if (pp->p_szc != curszc) { 3207 VM_STAT_ADD(segvnvmstats.fullszcpages[6]); 3208 return (0); 3209 } 3210 if (pfn - 1 != page_pptonum(ppa[i - 1])) { 3211 panic("segvn_full_szcpages: " 3212 "large page not physically contiguous"); 3213 } 3214 if (P2PHASE(pfn, curnpgs) == curnpgs - 1) { 3215 root = 0; 3216 } 3217 } 3218 } 3219 3220 for (i = 0; i < totnpgs; i++) { 3221 ASSERT(ppa[i]->p_szc < szc); 3222 if (!page_tryupgrade(ppa[i])) { 3223 for (j = 0; j < i; j++) { 3224 page_downgrade(ppa[j]); 3225 } 3226 *pszc = ppa[i]->p_szc; 3227 *upgrdfail = 1; 3228 VM_STAT_ADD(segvnvmstats.fullszcpages[7]); 3229 return (0); 3230 } 3231 } 3232 3233 /* 3234 * When a page is put a free cachelist its szc is set to 0. if file 3235 * system reclaimed pages from cachelist targ pages will be physically 3236 * contiguous with 0 p_szc. in this case just upgrade szc of targ 3237 * pages without any relocations. 3238 * To avoid any hat issues with previous small mappings 3239 * hat_pageunload() the target pages first. 3240 */ 3241 if (contig) { 3242 VM_STAT_ADD(segvnvmstats.fullszcpages[8]); 3243 for (i = 0; i < totnpgs; i++) { 3244 (void) hat_pageunload(ppa[i], HAT_FORCE_PGUNLOAD); 3245 } 3246 for (i = 0; i < totnpgs; i++) { 3247 ppa[i]->p_szc = szc; 3248 } 3249 for (i = 0; i < totnpgs; i++) { 3250 ASSERT(PAGE_EXCL(ppa[i])); 3251 page_downgrade(ppa[i]); 3252 } 3253 if (pszc != NULL) { 3254 *pszc = szc; 3255 } 3256 } 3257 VM_STAT_ADD(segvnvmstats.fullszcpages[9]); 3258 return (1); 3259 } 3260 3261 /* 3262 * Create physically contiguous pages for [vp, off] - [vp, off + 3263 * page_size(szc)) range and for private segment return them in ppa array. 3264 * Pages are created either via IO or relocations. 3265 * 3266 * Return 1 on success and 0 on failure. 3267 * 3268 * If physically contiguous pages already exist for this range return 1 without 3269 * filling ppa array. Caller initializes ppa[0] as NULL to detect that ppa 3270 * array wasn't filled. In this case caller fills ppa array via VOP_GETPAGE(). 3271 */ 3272 3273 static int 3274 segvn_fill_vp_pages(struct segvn_data *svd, vnode_t *vp, u_offset_t off, 3275 uint_t szc, page_t **ppa, page_t **ppplist, uint_t *ret_pszc, 3276 int *downsize) 3277 3278 { 3279 page_t *pplist = *ppplist; 3280 size_t pgsz = page_get_pagesize(szc); 3281 pgcnt_t pages = btop(pgsz); 3282 ulong_t start_off = off; 3283 u_offset_t eoff = off + pgsz; 3284 spgcnt_t nreloc; 3285 u_offset_t io_off = off; 3286 size_t io_len; 3287 page_t *io_pplist = NULL; 3288 page_t *done_pplist = NULL; 3289 pgcnt_t pgidx = 0; 3290 page_t *pp; 3291 page_t *newpp; 3292 page_t *targpp; 3293 int io_err = 0; 3294 int i; 3295 pfn_t pfn; 3296 ulong_t ppages; 3297 page_t *targ_pplist = NULL; 3298 page_t *repl_pplist = NULL; 3299 page_t *tmp_pplist; 3300 int nios = 0; 3301 uint_t pszc; 3302 struct vattr va; 3303 3304 VM_STAT_ADD(segvnvmstats.fill_vp_pages[0]); 3305 3306 ASSERT(szc != 0); 3307 ASSERT(pplist->p_szc == szc); 3308 3309 /* 3310 * downsize will be set to 1 only if we fail to lock pages. this will 3311 * allow subsequent faults to try to relocate the page again. If we 3312 * fail due to misalignment don't downsize and let the caller map the 3313 * whole region with small mappings to avoid more faults into the area 3314 * where we can't get large pages anyway. 3315 */ 3316 *downsize = 0; 3317 3318 while (off < eoff) { 3319 newpp = pplist; 3320 ASSERT(newpp != NULL); 3321 ASSERT(PAGE_EXCL(newpp)); 3322 ASSERT(!PP_ISFREE(newpp)); 3323 /* 3324 * we pass NULL for nrelocp to page_lookup_create() 3325 * so that it doesn't relocate. We relocate here 3326 * later only after we make sure we can lock all 3327 * pages in the range we handle and they are all 3328 * aligned. 3329 */ 3330 pp = page_lookup_create(vp, off, SE_SHARED, newpp, NULL, 0); 3331 ASSERT(pp != NULL); 3332 ASSERT(!PP_ISFREE(pp)); 3333 ASSERT(pp->p_vnode == vp); 3334 ASSERT(pp->p_offset == off); 3335 if (pp == newpp) { 3336 VM_STAT_ADD(segvnvmstats.fill_vp_pages[1]); 3337 page_sub(&pplist, pp); 3338 ASSERT(PAGE_EXCL(pp)); 3339 ASSERT(page_iolock_assert(pp)); 3340 page_list_concat(&io_pplist, &pp); 3341 off += PAGESIZE; 3342 continue; 3343 } 3344 VM_STAT_ADD(segvnvmstats.fill_vp_pages[2]); 3345 pfn = page_pptonum(pp); 3346 pszc = pp->p_szc; 3347 if (pszc >= szc && targ_pplist == NULL && io_pplist == NULL && 3348 IS_P2ALIGNED(pfn, pages)) { 3349 ASSERT(repl_pplist == NULL); 3350 ASSERT(done_pplist == NULL); 3351 ASSERT(pplist == *ppplist); 3352 page_unlock(pp); 3353 page_free_replacement_page(pplist); 3354 page_create_putback(pages); 3355 *ppplist = NULL; 3356 VM_STAT_ADD(segvnvmstats.fill_vp_pages[3]); 3357 return (1); 3358 } 3359 if (pszc >= szc) { 3360 page_unlock(pp); 3361 segvn_faultvnmpss_align_err1++; 3362 goto out; 3363 } 3364 ppages = page_get_pagecnt(pszc); 3365 if (!IS_P2ALIGNED(pfn, ppages)) { 3366 ASSERT(pszc > 0); 3367 /* 3368 * sizing down to pszc won't help. 3369 */ 3370 page_unlock(pp); 3371 segvn_faultvnmpss_align_err2++; 3372 goto out; 3373 } 3374 pfn = page_pptonum(newpp); 3375 if (!IS_P2ALIGNED(pfn, ppages)) { 3376 ASSERT(pszc > 0); 3377 /* 3378 * sizing down to pszc won't help. 3379 */ 3380 page_unlock(pp); 3381 segvn_faultvnmpss_align_err3++; 3382 goto out; 3383 } 3384 if (!PAGE_EXCL(pp)) { 3385 VM_STAT_ADD(segvnvmstats.fill_vp_pages[4]); 3386 page_unlock(pp); 3387 *downsize = 1; 3388 *ret_pszc = pp->p_szc; 3389 goto out; 3390 } 3391 targpp = pp; 3392 if (io_pplist != NULL) { 3393 VM_STAT_ADD(segvnvmstats.fill_vp_pages[5]); 3394 io_len = off - io_off; 3395 /* 3396 * Some file systems like NFS don't check EOF 3397 * conditions in VOP_PAGEIO(). Check it here 3398 * now that pages are locked SE_EXCL. Any file 3399 * truncation will wait until the pages are 3400 * unlocked so no need to worry that file will 3401 * be truncated after we check its size here. 3402 * XXX fix NFS to remove this check. 3403 */ 3404 va.va_mask = AT_SIZE; 3405 if (VOP_GETATTR(vp, &va, ATTR_HINT, svd->cred, NULL)) { 3406 VM_STAT_ADD(segvnvmstats.fill_vp_pages[6]); 3407 page_unlock(targpp); 3408 goto out; 3409 } 3410 if (btopr(va.va_size) < btopr(io_off + io_len)) { 3411 VM_STAT_ADD(segvnvmstats.fill_vp_pages[7]); 3412 *downsize = 1; 3413 *ret_pszc = 0; 3414 page_unlock(targpp); 3415 goto out; 3416 } 3417 io_err = VOP_PAGEIO(vp, io_pplist, io_off, io_len, 3418 B_READ, svd->cred, NULL); 3419 if (io_err) { 3420 VM_STAT_ADD(segvnvmstats.fill_vp_pages[8]); 3421 page_unlock(targpp); 3422 if (io_err == EDEADLK) { 3423 segvn_vmpss_pageio_deadlk_err++; 3424 } 3425 goto out; 3426 } 3427 nios++; 3428 VM_STAT_ADD(segvnvmstats.fill_vp_pages[9]); 3429 while (io_pplist != NULL) { 3430 pp = io_pplist; 3431 page_sub(&io_pplist, pp); 3432 ASSERT(page_iolock_assert(pp)); 3433 page_io_unlock(pp); 3434 pgidx = (pp->p_offset - start_off) >> 3435 PAGESHIFT; 3436 ASSERT(pgidx < pages); 3437 ppa[pgidx] = pp; 3438 page_list_concat(&done_pplist, &pp); 3439 } 3440 } 3441 pp = targpp; 3442 ASSERT(PAGE_EXCL(pp)); 3443 ASSERT(pp->p_szc <= pszc); 3444 if (pszc != 0 && !group_page_trylock(pp, SE_EXCL)) { 3445 VM_STAT_ADD(segvnvmstats.fill_vp_pages[10]); 3446 page_unlock(pp); 3447 *downsize = 1; 3448 *ret_pszc = pp->p_szc; 3449 goto out; 3450 } 3451 VM_STAT_ADD(segvnvmstats.fill_vp_pages[11]); 3452 /* 3453 * page szc chould have changed before the entire group was 3454 * locked. reread page szc. 3455 */ 3456 pszc = pp->p_szc; 3457 ppages = page_get_pagecnt(pszc); 3458 3459 /* link just the roots */ 3460 page_list_concat(&targ_pplist, &pp); 3461 page_sub(&pplist, newpp); 3462 page_list_concat(&repl_pplist, &newpp); 3463 off += PAGESIZE; 3464 while (--ppages != 0) { 3465 newpp = pplist; 3466 page_sub(&pplist, newpp); 3467 off += PAGESIZE; 3468 } 3469 io_off = off; 3470 } 3471 if (io_pplist != NULL) { 3472 VM_STAT_ADD(segvnvmstats.fill_vp_pages[12]); 3473 io_len = eoff - io_off; 3474 va.va_mask = AT_SIZE; 3475 if (VOP_GETATTR(vp, &va, ATTR_HINT, svd->cred, NULL) != 0) { 3476 VM_STAT_ADD(segvnvmstats.fill_vp_pages[13]); 3477 goto out; 3478 } 3479 if (btopr(va.va_size) < btopr(io_off + io_len)) { 3480 VM_STAT_ADD(segvnvmstats.fill_vp_pages[14]); 3481 *downsize = 1; 3482 *ret_pszc = 0; 3483 goto out; 3484 } 3485 io_err = VOP_PAGEIO(vp, io_pplist, io_off, io_len, 3486 B_READ, svd->cred, NULL); 3487 if (io_err) { 3488 VM_STAT_ADD(segvnvmstats.fill_vp_pages[15]); 3489 if (io_err == EDEADLK) { 3490 segvn_vmpss_pageio_deadlk_err++; 3491 } 3492 goto out; 3493 } 3494 nios++; 3495 while (io_pplist != NULL) { 3496 pp = io_pplist; 3497 page_sub(&io_pplist, pp); 3498 ASSERT(page_iolock_assert(pp)); 3499 page_io_unlock(pp); 3500 pgidx = (pp->p_offset - start_off) >> PAGESHIFT; 3501 ASSERT(pgidx < pages); 3502 ppa[pgidx] = pp; 3503 } 3504 } 3505 /* 3506 * we're now bound to succeed or panic. 3507 * remove pages from done_pplist. it's not needed anymore. 3508 */ 3509 while (done_pplist != NULL) { 3510 pp = done_pplist; 3511 page_sub(&done_pplist, pp); 3512 } 3513 VM_STAT_ADD(segvnvmstats.fill_vp_pages[16]); 3514 ASSERT(pplist == NULL); 3515 *ppplist = NULL; 3516 while (targ_pplist != NULL) { 3517 int ret; 3518 VM_STAT_ADD(segvnvmstats.fill_vp_pages[17]); 3519 ASSERT(repl_pplist); 3520 pp = targ_pplist; 3521 page_sub(&targ_pplist, pp); 3522 pgidx = (pp->p_offset - start_off) >> PAGESHIFT; 3523 newpp = repl_pplist; 3524 page_sub(&repl_pplist, newpp); 3525 #ifdef DEBUG 3526 pfn = page_pptonum(pp); 3527 pszc = pp->p_szc; 3528 ppages = page_get_pagecnt(pszc); 3529 ASSERT(IS_P2ALIGNED(pfn, ppages)); 3530 pfn = page_pptonum(newpp); 3531 ASSERT(IS_P2ALIGNED(pfn, ppages)); 3532 ASSERT(P2PHASE(pfn, pages) == pgidx); 3533 #endif 3534 nreloc = 0; 3535 ret = page_relocate(&pp, &newpp, 0, 1, &nreloc, NULL); 3536 if (ret != 0 || nreloc == 0) { 3537 panic("segvn_fill_vp_pages: " 3538 "page_relocate failed"); 3539 } 3540 pp = newpp; 3541 while (nreloc-- != 0) { 3542 ASSERT(PAGE_EXCL(pp)); 3543 ASSERT(pp->p_vnode == vp); 3544 ASSERT(pgidx == 3545 ((pp->p_offset - start_off) >> PAGESHIFT)); 3546 ppa[pgidx++] = pp; 3547 pp++; 3548 } 3549 } 3550 3551 if (svd->type == MAP_PRIVATE) { 3552 VM_STAT_ADD(segvnvmstats.fill_vp_pages[18]); 3553 for (i = 0; i < pages; i++) { 3554 ASSERT(ppa[i] != NULL); 3555 ASSERT(PAGE_EXCL(ppa[i])); 3556 ASSERT(ppa[i]->p_vnode == vp); 3557 ASSERT(ppa[i]->p_offset == 3558 start_off + (i << PAGESHIFT)); 3559 page_downgrade(ppa[i]); 3560 } 3561 ppa[pages] = NULL; 3562 } else { 3563 VM_STAT_ADD(segvnvmstats.fill_vp_pages[19]); 3564 /* 3565 * the caller will still call VOP_GETPAGE() for shared segments 3566 * to check FS write permissions. For private segments we map 3567 * file read only anyway. so no VOP_GETPAGE is needed. 3568 */ 3569 for (i = 0; i < pages; i++) { 3570 ASSERT(ppa[i] != NULL); 3571 ASSERT(PAGE_EXCL(ppa[i])); 3572 ASSERT(ppa[i]->p_vnode == vp); 3573 ASSERT(ppa[i]->p_offset == 3574 start_off + (i << PAGESHIFT)); 3575 page_unlock(ppa[i]); 3576 } 3577 ppa[0] = NULL; 3578 } 3579 3580 return (1); 3581 out: 3582 /* 3583 * Do the cleanup. Unlock target pages we didn't relocate. They are 3584 * linked on targ_pplist by root pages. reassemble unused replacement 3585 * and io pages back to pplist. 3586 */ 3587 if (io_pplist != NULL) { 3588 VM_STAT_ADD(segvnvmstats.fill_vp_pages[20]); 3589 pp = io_pplist; 3590 do { 3591 ASSERT(pp->p_vnode == vp); 3592 ASSERT(pp->p_offset == io_off); 3593 ASSERT(page_iolock_assert(pp)); 3594 page_io_unlock(pp); 3595 page_hashout(pp, NULL); 3596 io_off += PAGESIZE; 3597 } while ((pp = pp->p_next) != io_pplist); 3598 page_list_concat(&io_pplist, &pplist); 3599 pplist = io_pplist; 3600 } 3601 tmp_pplist = NULL; 3602 while (targ_pplist != NULL) { 3603 VM_STAT_ADD(segvnvmstats.fill_vp_pages[21]); 3604 pp = targ_pplist; 3605 ASSERT(PAGE_EXCL(pp)); 3606 page_sub(&targ_pplist, pp); 3607 3608 pszc = pp->p_szc; 3609 ppages = page_get_pagecnt(pszc); 3610 ASSERT(IS_P2ALIGNED(page_pptonum(pp), ppages)); 3611 3612 if (pszc != 0) { 3613 group_page_unlock(pp); 3614 } 3615 page_unlock(pp); 3616 3617 pp = repl_pplist; 3618 ASSERT(pp != NULL); 3619 ASSERT(PAGE_EXCL(pp)); 3620 ASSERT(pp->p_szc == szc); 3621 page_sub(&repl_pplist, pp); 3622 3623 ASSERT(IS_P2ALIGNED(page_pptonum(pp), ppages)); 3624 3625 /* relink replacement page */ 3626 page_list_concat(&tmp_pplist, &pp); 3627 while (--ppages != 0) { 3628 VM_STAT_ADD(segvnvmstats.fill_vp_pages[22]); 3629 pp++; 3630 ASSERT(PAGE_EXCL(pp)); 3631 ASSERT(pp->p_szc == szc); 3632 page_list_concat(&tmp_pplist, &pp); 3633 } 3634 } 3635 if (tmp_pplist != NULL) { 3636 VM_STAT_ADD(segvnvmstats.fill_vp_pages[23]); 3637 page_list_concat(&tmp_pplist, &pplist); 3638 pplist = tmp_pplist; 3639 } 3640 /* 3641 * at this point all pages are either on done_pplist or 3642 * pplist. They can't be all on done_pplist otherwise 3643 * we'd've been done. 3644 */ 3645 ASSERT(pplist != NULL); 3646 if (nios != 0) { 3647 VM_STAT_ADD(segvnvmstats.fill_vp_pages[24]); 3648 pp = pplist; 3649 do { 3650 VM_STAT_ADD(segvnvmstats.fill_vp_pages[25]); 3651 ASSERT(pp->p_szc == szc); 3652 ASSERT(PAGE_EXCL(pp)); 3653 ASSERT(pp->p_vnode != vp); 3654 pp->p_szc = 0; 3655 } while ((pp = pp->p_next) != pplist); 3656 3657 pp = done_pplist; 3658 do { 3659 VM_STAT_ADD(segvnvmstats.fill_vp_pages[26]); 3660 ASSERT(pp->p_szc == szc); 3661 ASSERT(PAGE_EXCL(pp)); 3662 ASSERT(pp->p_vnode == vp); 3663 pp->p_szc = 0; 3664 } while ((pp = pp->p_next) != done_pplist); 3665 3666 while (pplist != NULL) { 3667 VM_STAT_ADD(segvnvmstats.fill_vp_pages[27]); 3668 pp = pplist; 3669 page_sub(&pplist, pp); 3670 page_free(pp, 0); 3671 } 3672 3673 while (done_pplist != NULL) { 3674 VM_STAT_ADD(segvnvmstats.fill_vp_pages[28]); 3675 pp = done_pplist; 3676 page_sub(&done_pplist, pp); 3677 page_unlock(pp); 3678 } 3679 *ppplist = NULL; 3680 return (0); 3681 } 3682 ASSERT(pplist == *ppplist); 3683 if (io_err) { 3684 VM_STAT_ADD(segvnvmstats.fill_vp_pages[29]); 3685 /* 3686 * don't downsize on io error. 3687 * see if vop_getpage succeeds. 3688 * pplist may still be used in this case 3689 * for relocations. 3690 */ 3691 return (0); 3692 } 3693 VM_STAT_ADD(segvnvmstats.fill_vp_pages[30]); 3694 page_free_replacement_page(pplist); 3695 page_create_putback(pages); 3696 *ppplist = NULL; 3697 return (0); 3698 } 3699 3700 int segvn_anypgsz = 0; 3701 3702 #define SEGVN_RESTORE_SOFTLOCK_VP(type, pages) \ 3703 if ((type) == F_SOFTLOCK) { \ 3704 atomic_add_long((ulong_t *)&(svd)->softlockcnt, \ 3705 -(pages)); \ 3706 } 3707 3708 #define SEGVN_UPDATE_MODBITS(ppa, pages, rw, prot, vpprot) \ 3709 if (IS_VMODSORT((ppa)[0]->p_vnode)) { \ 3710 if ((rw) == S_WRITE) { \ 3711 for (i = 0; i < (pages); i++) { \ 3712 ASSERT((ppa)[i]->p_vnode == \ 3713 (ppa)[0]->p_vnode); \ 3714 hat_setmod((ppa)[i]); \ 3715 } \ 3716 } else if ((rw) != S_OTHER && \ 3717 ((prot) & (vpprot) & PROT_WRITE)) { \ 3718 for (i = 0; i < (pages); i++) { \ 3719 ASSERT((ppa)[i]->p_vnode == \ 3720 (ppa)[0]->p_vnode); \ 3721 if (!hat_ismod((ppa)[i])) { \ 3722 prot &= ~PROT_WRITE; \ 3723 break; \ 3724 } \ 3725 } \ 3726 } \ 3727 } 3728 3729 #ifdef VM_STATS 3730 3731 #define SEGVN_VMSTAT_FLTVNPAGES(idx) \ 3732 VM_STAT_ADD(segvnvmstats.fltvnpages[(idx)]); 3733 3734 #else /* VM_STATS */ 3735 3736 #define SEGVN_VMSTAT_FLTVNPAGES(idx) 3737 3738 #endif 3739 3740 static faultcode_t 3741 segvn_fault_vnodepages(struct hat *hat, struct seg *seg, caddr_t lpgaddr, 3742 caddr_t lpgeaddr, enum fault_type type, enum seg_rw rw, caddr_t addr, 3743 caddr_t eaddr, int brkcow) 3744 { 3745 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 3746 struct anon_map *amp = svd->amp; 3747 uchar_t segtype = svd->type; 3748 uint_t szc = seg->s_szc; 3749 size_t pgsz = page_get_pagesize(szc); 3750 size_t maxpgsz = pgsz; 3751 pgcnt_t pages = btop(pgsz); 3752 pgcnt_t maxpages = pages; 3753 size_t ppasize = (pages + 1) * sizeof (page_t *); 3754 caddr_t a = lpgaddr; 3755 caddr_t maxlpgeaddr = lpgeaddr; 3756 u_offset_t off = svd->offset + (uintptr_t)(a - seg->s_base); 3757 ulong_t aindx = svd->anon_index + seg_page(seg, a); 3758 struct vpage *vpage = (svd->vpage != NULL) ? 3759 &svd->vpage[seg_page(seg, a)] : NULL; 3760 vnode_t *vp = svd->vp; 3761 page_t **ppa; 3762 uint_t pszc; 3763 size_t ppgsz; 3764 pgcnt_t ppages; 3765 faultcode_t err = 0; 3766 int ierr; 3767 int vop_size_err = 0; 3768 uint_t protchk, prot, vpprot; 3769 ulong_t i; 3770 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD; 3771 anon_sync_obj_t an_cookie; 3772 enum seg_rw arw; 3773 int alloc_failed = 0; 3774 int adjszc_chk; 3775 struct vattr va; 3776 int xhat = 0; 3777 page_t *pplist; 3778 pfn_t pfn; 3779 int physcontig; 3780 int upgrdfail; 3781 int segvn_anypgsz_vnode = 0; /* for now map vnode with 2 page sizes */ 3782 int tron = (svd->tr_state == SEGVN_TR_ON); 3783 3784 ASSERT(szc != 0); 3785 ASSERT(vp != NULL); 3786 ASSERT(brkcow == 0 || amp != NULL); 3787 ASSERT(tron == 0 || amp != NULL); 3788 ASSERT(enable_mbit_wa == 0); /* no mbit simulations with large pages */ 3789 ASSERT(!(svd->flags & MAP_NORESERVE)); 3790 ASSERT(type != F_SOFTUNLOCK); 3791 ASSERT(IS_P2ALIGNED(a, maxpgsz)); 3792 ASSERT(amp == NULL || IS_P2ALIGNED(aindx, maxpages)); 3793 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock)); 3794 ASSERT(seg->s_szc < NBBY * sizeof (int)); 3795 ASSERT(type != F_SOFTLOCK || lpgeaddr - a == maxpgsz); 3796 ASSERT(svd->tr_state != SEGVN_TR_INIT); 3797 3798 VM_STAT_COND_ADD(type == F_SOFTLOCK, segvnvmstats.fltvnpages[0]); 3799 VM_STAT_COND_ADD(type != F_SOFTLOCK, segvnvmstats.fltvnpages[1]); 3800 3801 if (svd->flags & MAP_TEXT) { 3802 hat_flag |= HAT_LOAD_TEXT; 3803 } 3804 3805 if (svd->pageprot) { 3806 switch (rw) { 3807 case S_READ: 3808 protchk = PROT_READ; 3809 break; 3810 case S_WRITE: 3811 protchk = PROT_WRITE; 3812 break; 3813 case S_EXEC: 3814 protchk = PROT_EXEC; 3815 break; 3816 case S_OTHER: 3817 default: 3818 protchk = PROT_READ | PROT_WRITE | PROT_EXEC; 3819 break; 3820 } 3821 } else { 3822 prot = svd->prot; 3823 /* caller has already done segment level protection check. */ 3824 } 3825 3826 if (seg->s_as->a_hat != hat) { 3827 xhat = 1; 3828 } 3829 3830 if (rw == S_WRITE && segtype == MAP_PRIVATE) { 3831 SEGVN_VMSTAT_FLTVNPAGES(2); 3832 arw = S_READ; 3833 } else { 3834 arw = rw; 3835 } 3836 3837 ppa = kmem_alloc(ppasize, KM_SLEEP); 3838 3839 VM_STAT_COND_ADD(amp != NULL, segvnvmstats.fltvnpages[3]); 3840 3841 for (;;) { 3842 adjszc_chk = 0; 3843 for (; a < lpgeaddr; a += pgsz, off += pgsz, aindx += pages) { 3844 if (adjszc_chk) { 3845 while (szc < seg->s_szc) { 3846 uintptr_t e; 3847 uint_t tszc; 3848 tszc = segvn_anypgsz_vnode ? szc + 1 : 3849 seg->s_szc; 3850 ppgsz = page_get_pagesize(tszc); 3851 if (!IS_P2ALIGNED(a, ppgsz) || 3852 ((alloc_failed >> tszc) & 0x1)) { 3853 break; 3854 } 3855 SEGVN_VMSTAT_FLTVNPAGES(4); 3856 szc = tszc; 3857 pgsz = ppgsz; 3858 pages = btop(pgsz); 3859 e = P2ROUNDUP((uintptr_t)eaddr, pgsz); 3860 lpgeaddr = (caddr_t)e; 3861 } 3862 } 3863 3864 again: 3865 if (IS_P2ALIGNED(a, maxpgsz) && amp != NULL) { 3866 ASSERT(IS_P2ALIGNED(aindx, maxpages)); 3867 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 3868 anon_array_enter(amp, aindx, &an_cookie); 3869 if (anon_get_ptr(amp->ahp, aindx) != NULL) { 3870 SEGVN_VMSTAT_FLTVNPAGES(5); 3871 ASSERT(anon_pages(amp->ahp, aindx, 3872 maxpages) == maxpages); 3873 anon_array_exit(&an_cookie); 3874 ANON_LOCK_EXIT(&->a_rwlock); 3875 err = segvn_fault_anonpages(hat, seg, 3876 a, a + maxpgsz, type, rw, 3877 MAX(a, addr), 3878 MIN(a + maxpgsz, eaddr), brkcow); 3879 if (err != 0) { 3880 SEGVN_VMSTAT_FLTVNPAGES(6); 3881 goto out; 3882 } 3883 if (szc < seg->s_szc) { 3884 szc = seg->s_szc; 3885 pgsz = maxpgsz; 3886 pages = maxpages; 3887 lpgeaddr = maxlpgeaddr; 3888 } 3889 goto next; 3890 } else { 3891 ASSERT(anon_pages(amp->ahp, aindx, 3892 maxpages) == 0); 3893 SEGVN_VMSTAT_FLTVNPAGES(7); 3894 anon_array_exit(&an_cookie); 3895 ANON_LOCK_EXIT(&->a_rwlock); 3896 } 3897 } 3898 ASSERT(!brkcow || IS_P2ALIGNED(a, maxpgsz)); 3899 ASSERT(!tron || IS_P2ALIGNED(a, maxpgsz)); 3900 3901 if (svd->pageprot != 0 && IS_P2ALIGNED(a, maxpgsz)) { 3902 ASSERT(vpage != NULL); 3903 prot = VPP_PROT(vpage); 3904 ASSERT(sameprot(seg, a, maxpgsz)); 3905 if ((prot & protchk) == 0) { 3906 SEGVN_VMSTAT_FLTVNPAGES(8); 3907 err = FC_PROT; 3908 goto out; 3909 } 3910 } 3911 if (type == F_SOFTLOCK) { 3912 atomic_add_long((ulong_t *)&svd->softlockcnt, 3913 pages); 3914 } 3915 3916 pplist = NULL; 3917 physcontig = 0; 3918 ppa[0] = NULL; 3919 if (!brkcow && !tron && szc && 3920 !page_exists_physcontig(vp, off, szc, 3921 segtype == MAP_PRIVATE ? ppa : NULL)) { 3922 SEGVN_VMSTAT_FLTVNPAGES(9); 3923 if (page_alloc_pages(vp, seg, a, &pplist, NULL, 3924 szc, 0, 0) && type != F_SOFTLOCK) { 3925 SEGVN_VMSTAT_FLTVNPAGES(10); 3926 pszc = 0; 3927 ierr = -1; 3928 alloc_failed |= (1 << szc); 3929 break; 3930 } 3931 if (pplist != NULL && 3932 vp->v_mpssdata == SEGVN_PAGEIO) { 3933 int downsize; 3934 SEGVN_VMSTAT_FLTVNPAGES(11); 3935 physcontig = segvn_fill_vp_pages(svd, 3936 vp, off, szc, ppa, &pplist, 3937 &pszc, &downsize); 3938 ASSERT(!physcontig || pplist == NULL); 3939 if (!physcontig && downsize && 3940 type != F_SOFTLOCK) { 3941 ASSERT(pplist == NULL); 3942 SEGVN_VMSTAT_FLTVNPAGES(12); 3943 ierr = -1; 3944 break; 3945 } 3946 ASSERT(!physcontig || 3947 segtype == MAP_PRIVATE || 3948 ppa[0] == NULL); 3949 if (physcontig && ppa[0] == NULL) { 3950 physcontig = 0; 3951 } 3952 } 3953 } else if (!brkcow && !tron && szc && ppa[0] != NULL) { 3954 SEGVN_VMSTAT_FLTVNPAGES(13); 3955 ASSERT(segtype == MAP_PRIVATE); 3956 physcontig = 1; 3957 } 3958 3959 if (!physcontig) { 3960 SEGVN_VMSTAT_FLTVNPAGES(14); 3961 ppa[0] = NULL; 3962 ierr = VOP_GETPAGE(vp, (offset_t)off, pgsz, 3963 &vpprot, ppa, pgsz, seg, a, arw, 3964 svd->cred, NULL); 3965 #ifdef DEBUG 3966 if (ierr == 0) { 3967 for (i = 0; i < pages; i++) { 3968 ASSERT(PAGE_LOCKED(ppa[i])); 3969 ASSERT(!PP_ISFREE(ppa[i])); 3970 ASSERT(ppa[i]->p_vnode == vp); 3971 ASSERT(ppa[i]->p_offset == 3972 off + (i << PAGESHIFT)); 3973 } 3974 } 3975 #endif /* DEBUG */ 3976 if (segtype == MAP_PRIVATE) { 3977 SEGVN_VMSTAT_FLTVNPAGES(15); 3978 vpprot &= ~PROT_WRITE; 3979 } 3980 } else { 3981 ASSERT(segtype == MAP_PRIVATE); 3982 SEGVN_VMSTAT_FLTVNPAGES(16); 3983 vpprot = PROT_ALL & ~PROT_WRITE; 3984 ierr = 0; 3985 } 3986 3987 if (ierr != 0) { 3988 SEGVN_VMSTAT_FLTVNPAGES(17); 3989 if (pplist != NULL) { 3990 SEGVN_VMSTAT_FLTVNPAGES(18); 3991 page_free_replacement_page(pplist); 3992 page_create_putback(pages); 3993 } 3994 SEGVN_RESTORE_SOFTLOCK_VP(type, pages); 3995 if (a + pgsz <= eaddr) { 3996 SEGVN_VMSTAT_FLTVNPAGES(19); 3997 err = FC_MAKE_ERR(ierr); 3998 goto out; 3999 } 4000 va.va_mask = AT_SIZE; 4001 if (VOP_GETATTR(vp, &va, 0, svd->cred, NULL)) { 4002 SEGVN_VMSTAT_FLTVNPAGES(20); 4003 err = FC_MAKE_ERR(EIO); 4004 goto out; 4005 } 4006 if (btopr(va.va_size) >= btopr(off + pgsz)) { 4007 SEGVN_VMSTAT_FLTVNPAGES(21); 4008 err = FC_MAKE_ERR(ierr); 4009 goto out; 4010 } 4011 if (btopr(va.va_size) < 4012 btopr(off + (eaddr - a))) { 4013 SEGVN_VMSTAT_FLTVNPAGES(22); 4014 err = FC_MAKE_ERR(ierr); 4015 goto out; 4016 } 4017 if (brkcow || tron || type == F_SOFTLOCK) { 4018 /* can't reduce map area */ 4019 SEGVN_VMSTAT_FLTVNPAGES(23); 4020 vop_size_err = 1; 4021 goto out; 4022 } 4023 SEGVN_VMSTAT_FLTVNPAGES(24); 4024 ASSERT(szc != 0); 4025 pszc = 0; 4026 ierr = -1; 4027 break; 4028 } 4029 4030 if (amp != NULL) { 4031 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 4032 anon_array_enter(amp, aindx, &an_cookie); 4033 } 4034 if (amp != NULL && 4035 anon_get_ptr(amp->ahp, aindx) != NULL) { 4036 ulong_t taindx = P2ALIGN(aindx, maxpages); 4037 4038 SEGVN_VMSTAT_FLTVNPAGES(25); 4039 ASSERT(anon_pages(amp->ahp, taindx, 4040 maxpages) == maxpages); 4041 for (i = 0; i < pages; i++) { 4042 page_unlock(ppa[i]); 4043 } 4044 anon_array_exit(&an_cookie); 4045 ANON_LOCK_EXIT(&->a_rwlock); 4046 if (pplist != NULL) { 4047 page_free_replacement_page(pplist); 4048 page_create_putback(pages); 4049 } 4050 SEGVN_RESTORE_SOFTLOCK_VP(type, pages); 4051 if (szc < seg->s_szc) { 4052 SEGVN_VMSTAT_FLTVNPAGES(26); 4053 /* 4054 * For private segments SOFTLOCK 4055 * either always breaks cow (any rw 4056 * type except S_READ_NOCOW) or 4057 * address space is locked as writer 4058 * (S_READ_NOCOW case) and anon slots 4059 * can't show up on second check. 4060 * Therefore if we are here for 4061 * SOFTLOCK case it must be a cow 4062 * break but cow break never reduces 4063 * szc. text replication (tron) in 4064 * this case works as cow break. 4065 * Thus the assert below. 4066 */ 4067 ASSERT(!brkcow && !tron && 4068 type != F_SOFTLOCK); 4069 pszc = seg->s_szc; 4070 ierr = -2; 4071 break; 4072 } 4073 ASSERT(IS_P2ALIGNED(a, maxpgsz)); 4074 goto again; 4075 } 4076 #ifdef DEBUG 4077 if (amp != NULL) { 4078 ulong_t taindx = P2ALIGN(aindx, maxpages); 4079 ASSERT(!anon_pages(amp->ahp, taindx, maxpages)); 4080 } 4081 #endif /* DEBUG */ 4082 4083 if (brkcow || tron) { 4084 ASSERT(amp != NULL); 4085 ASSERT(pplist == NULL); 4086 ASSERT(szc == seg->s_szc); 4087 ASSERT(IS_P2ALIGNED(a, maxpgsz)); 4088 ASSERT(IS_P2ALIGNED(aindx, maxpages)); 4089 SEGVN_VMSTAT_FLTVNPAGES(27); 4090 ierr = anon_map_privatepages(amp, aindx, szc, 4091 seg, a, prot, ppa, vpage, segvn_anypgsz, 4092 tron ? PG_LOCAL : 0, svd->cred); 4093 if (ierr != 0) { 4094 SEGVN_VMSTAT_FLTVNPAGES(28); 4095 anon_array_exit(&an_cookie); 4096 ANON_LOCK_EXIT(&->a_rwlock); 4097 SEGVN_RESTORE_SOFTLOCK_VP(type, pages); 4098 err = FC_MAKE_ERR(ierr); 4099 goto out; 4100 } 4101 4102 ASSERT(!IS_VMODSORT(ppa[0]->p_vnode)); 4103 /* 4104 * p_szc can't be changed for locked 4105 * swapfs pages. 4106 */ 4107 ASSERT(svd->rcookie == 4108 HAT_INVALID_REGION_COOKIE); 4109 hat_memload_array(hat, a, pgsz, ppa, prot, 4110 hat_flag); 4111 4112 if (!(hat_flag & HAT_LOAD_LOCK)) { 4113 SEGVN_VMSTAT_FLTVNPAGES(29); 4114 for (i = 0; i < pages; i++) { 4115 page_unlock(ppa[i]); 4116 } 4117 } 4118 anon_array_exit(&an_cookie); 4119 ANON_LOCK_EXIT(&->a_rwlock); 4120 goto next; 4121 } 4122 4123 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE || 4124 (!svd->pageprot && svd->prot == (prot & vpprot))); 4125 4126 pfn = page_pptonum(ppa[0]); 4127 /* 4128 * hat_page_demote() needs an SE_EXCL lock on one of 4129 * constituent page_t's and it decreases root's p_szc 4130 * last. This means if root's p_szc is equal szc and 4131 * all its constituent pages are locked 4132 * hat_page_demote() that could have changed p_szc to 4133 * szc is already done and no new have page_demote() 4134 * can start for this large page. 4135 */ 4136 4137 /* 4138 * we need to make sure same mapping size is used for 4139 * the same address range if there's a possibility the 4140 * adddress is already mapped because hat layer panics 4141 * when translation is loaded for the range already 4142 * mapped with a different page size. We achieve it 4143 * by always using largest page size possible subject 4144 * to the constraints of page size, segment page size 4145 * and page alignment. Since mappings are invalidated 4146 * when those constraints change and make it 4147 * impossible to use previously used mapping size no 4148 * mapping size conflicts should happen. 4149 */ 4150 4151 chkszc: 4152 if ((pszc = ppa[0]->p_szc) == szc && 4153 IS_P2ALIGNED(pfn, pages)) { 4154 4155 SEGVN_VMSTAT_FLTVNPAGES(30); 4156 #ifdef DEBUG 4157 for (i = 0; i < pages; i++) { 4158 ASSERT(PAGE_LOCKED(ppa[i])); 4159 ASSERT(!PP_ISFREE(ppa[i])); 4160 ASSERT(page_pptonum(ppa[i]) == 4161 pfn + i); 4162 ASSERT(ppa[i]->p_szc == szc); 4163 ASSERT(ppa[i]->p_vnode == vp); 4164 ASSERT(ppa[i]->p_offset == 4165 off + (i << PAGESHIFT)); 4166 } 4167 #endif /* DEBUG */ 4168 /* 4169 * All pages are of szc we need and they are 4170 * all locked so they can't change szc. load 4171 * translations. 4172 * 4173 * if page got promoted since last check 4174 * we don't need pplist. 4175 */ 4176 if (pplist != NULL) { 4177 page_free_replacement_page(pplist); 4178 page_create_putback(pages); 4179 } 4180 if (PP_ISMIGRATE(ppa[0])) { 4181 page_migrate(seg, a, ppa, pages); 4182 } 4183 SEGVN_UPDATE_MODBITS(ppa, pages, rw, 4184 prot, vpprot); 4185 if (!xhat) { 4186 hat_memload_array_region(hat, a, pgsz, 4187 ppa, prot & vpprot, hat_flag, 4188 svd->rcookie); 4189 } else { 4190 /* 4191 * avoid large xhat mappings to FS 4192 * pages so that hat_page_demote() 4193 * doesn't need to check for xhat 4194 * large mappings. 4195 * Don't use regions with xhats. 4196 */ 4197 for (i = 0; i < pages; i++) { 4198 hat_memload(hat, 4199 a + (i << PAGESHIFT), 4200 ppa[i], prot & vpprot, 4201 hat_flag); 4202 } 4203 } 4204 4205 if (!(hat_flag & HAT_LOAD_LOCK)) { 4206 for (i = 0; i < pages; i++) { 4207 page_unlock(ppa[i]); 4208 } 4209 } 4210 if (amp != NULL) { 4211 anon_array_exit(&an_cookie); 4212 ANON_LOCK_EXIT(&->a_rwlock); 4213 } 4214 goto next; 4215 } 4216 4217 /* 4218 * See if upsize is possible. 4219 */ 4220 if (pszc > szc && szc < seg->s_szc && 4221 (segvn_anypgsz_vnode || pszc >= seg->s_szc)) { 4222 pgcnt_t aphase; 4223 uint_t pszc1 = MIN(pszc, seg->s_szc); 4224 ppgsz = page_get_pagesize(pszc1); 4225 ppages = btop(ppgsz); 4226 aphase = btop(P2PHASE((uintptr_t)a, ppgsz)); 4227 4228 ASSERT(type != F_SOFTLOCK); 4229 4230 SEGVN_VMSTAT_FLTVNPAGES(31); 4231 if (aphase != P2PHASE(pfn, ppages)) { 4232 segvn_faultvnmpss_align_err4++; 4233 } else { 4234 SEGVN_VMSTAT_FLTVNPAGES(32); 4235 if (pplist != NULL) { 4236 page_t *pl = pplist; 4237 page_free_replacement_page(pl); 4238 page_create_putback(pages); 4239 } 4240 for (i = 0; i < pages; i++) { 4241 page_unlock(ppa[i]); 4242 } 4243 if (amp != NULL) { 4244 anon_array_exit(&an_cookie); 4245 ANON_LOCK_EXIT(&->a_rwlock); 4246 } 4247 pszc = pszc1; 4248 ierr = -2; 4249 break; 4250 } 4251 } 4252 4253 /* 4254 * check if we should use smallest mapping size. 4255 */ 4256 upgrdfail = 0; 4257 if (szc == 0 || xhat || 4258 (pszc >= szc && 4259 !IS_P2ALIGNED(pfn, pages)) || 4260 (pszc < szc && 4261 !segvn_full_szcpages(ppa, szc, &upgrdfail, 4262 &pszc))) { 4263 4264 if (upgrdfail && type != F_SOFTLOCK) { 4265 /* 4266 * segvn_full_szcpages failed to lock 4267 * all pages EXCL. Size down. 4268 */ 4269 ASSERT(pszc < szc); 4270 4271 SEGVN_VMSTAT_FLTVNPAGES(33); 4272 4273 if (pplist != NULL) { 4274 page_t *pl = pplist; 4275 page_free_replacement_page(pl); 4276 page_create_putback(pages); 4277 } 4278 4279 for (i = 0; i < pages; i++) { 4280 page_unlock(ppa[i]); 4281 } 4282 if (amp != NULL) { 4283 anon_array_exit(&an_cookie); 4284 ANON_LOCK_EXIT(&->a_rwlock); 4285 } 4286 ierr = -1; 4287 break; 4288 } 4289 if (szc != 0 && !xhat && !upgrdfail) { 4290 segvn_faultvnmpss_align_err5++; 4291 } 4292 SEGVN_VMSTAT_FLTVNPAGES(34); 4293 if (pplist != NULL) { 4294 page_free_replacement_page(pplist); 4295 page_create_putback(pages); 4296 } 4297 SEGVN_UPDATE_MODBITS(ppa, pages, rw, 4298 prot, vpprot); 4299 if (upgrdfail && segvn_anypgsz_vnode) { 4300 /* SOFTLOCK case */ 4301 hat_memload_array_region(hat, a, pgsz, 4302 ppa, prot & vpprot, hat_flag, 4303 svd->rcookie); 4304 } else { 4305 for (i = 0; i < pages; i++) { 4306 hat_memload_region(hat, 4307 a + (i << PAGESHIFT), 4308 ppa[i], prot & vpprot, 4309 hat_flag, svd->rcookie); 4310 } 4311 } 4312 if (!(hat_flag & HAT_LOAD_LOCK)) { 4313 for (i = 0; i < pages; i++) { 4314 page_unlock(ppa[i]); 4315 } 4316 } 4317 if (amp != NULL) { 4318 anon_array_exit(&an_cookie); 4319 ANON_LOCK_EXIT(&->a_rwlock); 4320 } 4321 goto next; 4322 } 4323 4324 if (pszc == szc) { 4325 /* 4326 * segvn_full_szcpages() upgraded pages szc. 4327 */ 4328 ASSERT(pszc == ppa[0]->p_szc); 4329 ASSERT(IS_P2ALIGNED(pfn, pages)); 4330 goto chkszc; 4331 } 4332 4333 if (pszc > szc) { 4334 kmutex_t *szcmtx; 4335 SEGVN_VMSTAT_FLTVNPAGES(35); 4336 /* 4337 * p_szc of ppa[0] can change since we haven't 4338 * locked all constituent pages. Call 4339 * page_lock_szc() to prevent szc changes. 4340 * This should be a rare case that happens when 4341 * multiple segments use a different page size 4342 * to map the same file offsets. 4343 */ 4344 szcmtx = page_szc_lock(ppa[0]); 4345 pszc = ppa[0]->p_szc; 4346 ASSERT(szcmtx != NULL || pszc == 0); 4347 ASSERT(ppa[0]->p_szc <= pszc); 4348 if (pszc <= szc) { 4349 SEGVN_VMSTAT_FLTVNPAGES(36); 4350 if (szcmtx != NULL) { 4351 mutex_exit(szcmtx); 4352 } 4353 goto chkszc; 4354 } 4355 if (pplist != NULL) { 4356 /* 4357 * page got promoted since last check. 4358 * we don't need preaalocated large 4359 * page. 4360 */ 4361 SEGVN_VMSTAT_FLTVNPAGES(37); 4362 page_free_replacement_page(pplist); 4363 page_create_putback(pages); 4364 } 4365 SEGVN_UPDATE_MODBITS(ppa, pages, rw, 4366 prot, vpprot); 4367 hat_memload_array_region(hat, a, pgsz, ppa, 4368 prot & vpprot, hat_flag, svd->rcookie); 4369 mutex_exit(szcmtx); 4370 if (!(hat_flag & HAT_LOAD_LOCK)) { 4371 for (i = 0; i < pages; i++) { 4372 page_unlock(ppa[i]); 4373 } 4374 } 4375 if (amp != NULL) { 4376 anon_array_exit(&an_cookie); 4377 ANON_LOCK_EXIT(&->a_rwlock); 4378 } 4379 goto next; 4380 } 4381 4382 /* 4383 * if page got demoted since last check 4384 * we could have not allocated larger page. 4385 * allocate now. 4386 */ 4387 if (pplist == NULL && 4388 page_alloc_pages(vp, seg, a, &pplist, NULL, 4389 szc, 0, 0) && type != F_SOFTLOCK) { 4390 SEGVN_VMSTAT_FLTVNPAGES(38); 4391 for (i = 0; i < pages; i++) { 4392 page_unlock(ppa[i]); 4393 } 4394 if (amp != NULL) { 4395 anon_array_exit(&an_cookie); 4396 ANON_LOCK_EXIT(&->a_rwlock); 4397 } 4398 ierr = -1; 4399 alloc_failed |= (1 << szc); 4400 break; 4401 } 4402 4403 SEGVN_VMSTAT_FLTVNPAGES(39); 4404 4405 if (pplist != NULL) { 4406 segvn_relocate_pages(ppa, pplist); 4407 #ifdef DEBUG 4408 } else { 4409 ASSERT(type == F_SOFTLOCK); 4410 SEGVN_VMSTAT_FLTVNPAGES(40); 4411 #endif /* DEBUG */ 4412 } 4413 4414 SEGVN_UPDATE_MODBITS(ppa, pages, rw, prot, vpprot); 4415 4416 if (pplist == NULL && segvn_anypgsz_vnode == 0) { 4417 ASSERT(type == F_SOFTLOCK); 4418 for (i = 0; i < pages; i++) { 4419 ASSERT(ppa[i]->p_szc < szc); 4420 hat_memload_region(hat, 4421 a + (i << PAGESHIFT), 4422 ppa[i], prot & vpprot, hat_flag, 4423 svd->rcookie); 4424 } 4425 } else { 4426 ASSERT(pplist != NULL || type == F_SOFTLOCK); 4427 hat_memload_array_region(hat, a, pgsz, ppa, 4428 prot & vpprot, hat_flag, svd->rcookie); 4429 } 4430 if (!(hat_flag & HAT_LOAD_LOCK)) { 4431 for (i = 0; i < pages; i++) { 4432 ASSERT(PAGE_SHARED(ppa[i])); 4433 page_unlock(ppa[i]); 4434 } 4435 } 4436 if (amp != NULL) { 4437 anon_array_exit(&an_cookie); 4438 ANON_LOCK_EXIT(&->a_rwlock); 4439 } 4440 4441 next: 4442 if (vpage != NULL) { 4443 vpage += pages; 4444 } 4445 adjszc_chk = 1; 4446 } 4447 if (a == lpgeaddr) 4448 break; 4449 ASSERT(a < lpgeaddr); 4450 4451 ASSERT(!brkcow && !tron && type != F_SOFTLOCK); 4452 4453 /* 4454 * ierr == -1 means we failed to map with a large page. 4455 * (either due to allocation/relocation failures or 4456 * misalignment with other mappings to this file. 4457 * 4458 * ierr == -2 means some other thread allocated a large page 4459 * after we gave up tp map with a large page. retry with 4460 * larger mapping. 4461 */ 4462 ASSERT(ierr == -1 || ierr == -2); 4463 ASSERT(ierr == -2 || szc != 0); 4464 ASSERT(ierr == -1 || szc < seg->s_szc); 4465 if (ierr == -2) { 4466 SEGVN_VMSTAT_FLTVNPAGES(41); 4467 ASSERT(pszc > szc && pszc <= seg->s_szc); 4468 szc = pszc; 4469 } else if (segvn_anypgsz_vnode) { 4470 SEGVN_VMSTAT_FLTVNPAGES(42); 4471 szc--; 4472 } else { 4473 SEGVN_VMSTAT_FLTVNPAGES(43); 4474 ASSERT(pszc < szc); 4475 /* 4476 * other process created pszc large page. 4477 * but we still have to drop to 0 szc. 4478 */ 4479 szc = 0; 4480 } 4481 4482 pgsz = page_get_pagesize(szc); 4483 pages = btop(pgsz); 4484 if (ierr == -2) { 4485 /* 4486 * Size up case. Note lpgaddr may only be needed for 4487 * softlock case so we don't adjust it here. 4488 */ 4489 a = (caddr_t)P2ALIGN((uintptr_t)a, pgsz); 4490 ASSERT(a >= lpgaddr); 4491 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz); 4492 off = svd->offset + (uintptr_t)(a - seg->s_base); 4493 aindx = svd->anon_index + seg_page(seg, a); 4494 vpage = (svd->vpage != NULL) ? 4495 &svd->vpage[seg_page(seg, a)] : NULL; 4496 } else { 4497 /* 4498 * Size down case. Note lpgaddr may only be needed for 4499 * softlock case so we don't adjust it here. 4500 */ 4501 ASSERT(IS_P2ALIGNED(a, pgsz)); 4502 ASSERT(IS_P2ALIGNED(lpgeaddr, pgsz)); 4503 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz); 4504 ASSERT(a < lpgeaddr); 4505 if (a < addr) { 4506 SEGVN_VMSTAT_FLTVNPAGES(44); 4507 /* 4508 * The beginning of the large page region can 4509 * be pulled to the right to make a smaller 4510 * region. We haven't yet faulted a single 4511 * page. 4512 */ 4513 a = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz); 4514 ASSERT(a >= lpgaddr); 4515 off = svd->offset + 4516 (uintptr_t)(a - seg->s_base); 4517 aindx = svd->anon_index + seg_page(seg, a); 4518 vpage = (svd->vpage != NULL) ? 4519 &svd->vpage[seg_page(seg, a)] : NULL; 4520 } 4521 } 4522 } 4523 out: 4524 kmem_free(ppa, ppasize); 4525 if (!err && !vop_size_err) { 4526 SEGVN_VMSTAT_FLTVNPAGES(45); 4527 return (0); 4528 } 4529 if (type == F_SOFTLOCK && a > lpgaddr) { 4530 SEGVN_VMSTAT_FLTVNPAGES(46); 4531 segvn_softunlock(seg, lpgaddr, a - lpgaddr, S_OTHER); 4532 } 4533 if (!vop_size_err) { 4534 SEGVN_VMSTAT_FLTVNPAGES(47); 4535 return (err); 4536 } 4537 ASSERT(brkcow || tron || type == F_SOFTLOCK); 4538 /* 4539 * Large page end is mapped beyond the end of file and it's a cow 4540 * fault (can be a text replication induced cow) or softlock so we can't 4541 * reduce the map area. For now just demote the segment. This should 4542 * really only happen if the end of the file changed after the mapping 4543 * was established since when large page segments are created we make 4544 * sure they don't extend beyond the end of the file. 4545 */ 4546 SEGVN_VMSTAT_FLTVNPAGES(48); 4547 4548 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 4549 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 4550 err = 0; 4551 if (seg->s_szc != 0) { 4552 segvn_fltvnpages_clrszc_cnt++; 4553 ASSERT(svd->softlockcnt == 0); 4554 err = segvn_clrszc(seg); 4555 if (err != 0) { 4556 segvn_fltvnpages_clrszc_err++; 4557 } 4558 } 4559 ASSERT(err || seg->s_szc == 0); 4560 SEGVN_LOCK_DOWNGRADE(seg->s_as, &svd->lock); 4561 /* segvn_fault will do its job as if szc had been zero to begin with */ 4562 return (err == 0 ? IE_RETRY : FC_MAKE_ERR(err)); 4563 } 4564 4565 /* 4566 * This routine will attempt to fault in one large page. 4567 * it will use smaller pages if that fails. 4568 * It should only be called for pure anonymous segments. 4569 */ 4570 static faultcode_t 4571 segvn_fault_anonpages(struct hat *hat, struct seg *seg, caddr_t lpgaddr, 4572 caddr_t lpgeaddr, enum fault_type type, enum seg_rw rw, caddr_t addr, 4573 caddr_t eaddr, int brkcow) 4574 { 4575 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 4576 struct anon_map *amp = svd->amp; 4577 uchar_t segtype = svd->type; 4578 uint_t szc = seg->s_szc; 4579 size_t pgsz = page_get_pagesize(szc); 4580 size_t maxpgsz = pgsz; 4581 pgcnt_t pages = btop(pgsz); 4582 uint_t ppaszc = szc; 4583 caddr_t a = lpgaddr; 4584 ulong_t aindx = svd->anon_index + seg_page(seg, a); 4585 struct vpage *vpage = (svd->vpage != NULL) ? 4586 &svd->vpage[seg_page(seg, a)] : NULL; 4587 page_t **ppa; 4588 uint_t ppa_szc; 4589 faultcode_t err; 4590 int ierr; 4591 uint_t protchk, prot, vpprot; 4592 ulong_t i; 4593 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD; 4594 anon_sync_obj_t cookie; 4595 int adjszc_chk; 4596 int pgflags = (svd->tr_state == SEGVN_TR_ON) ? PG_LOCAL : 0; 4597 4598 ASSERT(szc != 0); 4599 ASSERT(amp != NULL); 4600 ASSERT(enable_mbit_wa == 0); /* no mbit simulations with large pages */ 4601 ASSERT(!(svd->flags & MAP_NORESERVE)); 4602 ASSERT(type != F_SOFTUNLOCK); 4603 ASSERT(IS_P2ALIGNED(a, maxpgsz)); 4604 ASSERT(!brkcow || svd->tr_state == SEGVN_TR_OFF); 4605 ASSERT(svd->tr_state != SEGVN_TR_INIT); 4606 4607 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock)); 4608 4609 VM_STAT_COND_ADD(type == F_SOFTLOCK, segvnvmstats.fltanpages[0]); 4610 VM_STAT_COND_ADD(type != F_SOFTLOCK, segvnvmstats.fltanpages[1]); 4611 4612 if (svd->flags & MAP_TEXT) { 4613 hat_flag |= HAT_LOAD_TEXT; 4614 } 4615 4616 if (svd->pageprot) { 4617 switch (rw) { 4618 case S_READ: 4619 protchk = PROT_READ; 4620 break; 4621 case S_WRITE: 4622 protchk = PROT_WRITE; 4623 break; 4624 case S_EXEC: 4625 protchk = PROT_EXEC; 4626 break; 4627 case S_OTHER: 4628 default: 4629 protchk = PROT_READ | PROT_WRITE | PROT_EXEC; 4630 break; 4631 } 4632 VM_STAT_ADD(segvnvmstats.fltanpages[2]); 4633 } else { 4634 prot = svd->prot; 4635 /* caller has already done segment level protection check. */ 4636 } 4637 4638 ppa = kmem_cache_alloc(segvn_szc_cache[ppaszc], KM_SLEEP); 4639 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 4640 for (;;) { 4641 adjszc_chk = 0; 4642 for (; a < lpgeaddr; a += pgsz, aindx += pages) { 4643 if (svd->pageprot != 0 && IS_P2ALIGNED(a, maxpgsz)) { 4644 VM_STAT_ADD(segvnvmstats.fltanpages[3]); 4645 ASSERT(vpage != NULL); 4646 prot = VPP_PROT(vpage); 4647 ASSERT(sameprot(seg, a, maxpgsz)); 4648 if ((prot & protchk) == 0) { 4649 err = FC_PROT; 4650 goto error; 4651 } 4652 } 4653 if (adjszc_chk && IS_P2ALIGNED(a, maxpgsz) && 4654 pgsz < maxpgsz) { 4655 ASSERT(a > lpgaddr); 4656 szc = seg->s_szc; 4657 pgsz = maxpgsz; 4658 pages = btop(pgsz); 4659 ASSERT(IS_P2ALIGNED(aindx, pages)); 4660 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, 4661 pgsz); 4662 } 4663 if (type == F_SOFTLOCK) { 4664 atomic_add_long((ulong_t *)&svd->softlockcnt, 4665 pages); 4666 } 4667 anon_array_enter(amp, aindx, &cookie); 4668 ppa_szc = (uint_t)-1; 4669 ierr = anon_map_getpages(amp, aindx, szc, seg, a, 4670 prot, &vpprot, ppa, &ppa_szc, vpage, rw, brkcow, 4671 segvn_anypgsz, pgflags, svd->cred); 4672 if (ierr != 0) { 4673 anon_array_exit(&cookie); 4674 VM_STAT_ADD(segvnvmstats.fltanpages[4]); 4675 if (type == F_SOFTLOCK) { 4676 atomic_add_long( 4677 (ulong_t *)&svd->softlockcnt, 4678 -pages); 4679 } 4680 if (ierr > 0) { 4681 VM_STAT_ADD(segvnvmstats.fltanpages[6]); 4682 err = FC_MAKE_ERR(ierr); 4683 goto error; 4684 } 4685 break; 4686 } 4687 4688 ASSERT(!IS_VMODSORT(ppa[0]->p_vnode)); 4689 4690 ASSERT(segtype == MAP_SHARED || 4691 ppa[0]->p_szc <= szc); 4692 ASSERT(segtype == MAP_PRIVATE || 4693 ppa[0]->p_szc >= szc); 4694 4695 /* 4696 * Handle pages that have been marked for migration 4697 */ 4698 if (lgrp_optimizations()) 4699 page_migrate(seg, a, ppa, pages); 4700 4701 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 4702 4703 if (segtype == MAP_SHARED) { 4704 vpprot |= PROT_WRITE; 4705 } 4706 4707 hat_memload_array(hat, a, pgsz, ppa, 4708 prot & vpprot, hat_flag); 4709 4710 if (hat_flag & HAT_LOAD_LOCK) { 4711 VM_STAT_ADD(segvnvmstats.fltanpages[7]); 4712 } else { 4713 VM_STAT_ADD(segvnvmstats.fltanpages[8]); 4714 for (i = 0; i < pages; i++) 4715 page_unlock(ppa[i]); 4716 } 4717 if (vpage != NULL) 4718 vpage += pages; 4719 4720 anon_array_exit(&cookie); 4721 adjszc_chk = 1; 4722 } 4723 if (a == lpgeaddr) 4724 break; 4725 ASSERT(a < lpgeaddr); 4726 /* 4727 * ierr == -1 means we failed to allocate a large page. 4728 * so do a size down operation. 4729 * 4730 * ierr == -2 means some other process that privately shares 4731 * pages with this process has allocated a larger page and we 4732 * need to retry with larger pages. So do a size up 4733 * operation. This relies on the fact that large pages are 4734 * never partially shared i.e. if we share any constituent 4735 * page of a large page with another process we must share the 4736 * entire large page. Note this cannot happen for SOFTLOCK 4737 * case, unless current address (a) is at the beginning of the 4738 * next page size boundary because the other process couldn't 4739 * have relocated locked pages. 4740 */ 4741 ASSERT(ierr == -1 || ierr == -2); 4742 4743 if (segvn_anypgsz) { 4744 ASSERT(ierr == -2 || szc != 0); 4745 ASSERT(ierr == -1 || szc < seg->s_szc); 4746 szc = (ierr == -1) ? szc - 1 : szc + 1; 4747 } else { 4748 /* 4749 * For non COW faults and segvn_anypgsz == 0 4750 * we need to be careful not to loop forever 4751 * if existing page is found with szc other 4752 * than 0 or seg->s_szc. This could be due 4753 * to page relocations on behalf of DR or 4754 * more likely large page creation. For this 4755 * case simply re-size to existing page's szc 4756 * if returned by anon_map_getpages(). 4757 */ 4758 if (ppa_szc == (uint_t)-1) { 4759 szc = (ierr == -1) ? 0 : seg->s_szc; 4760 } else { 4761 ASSERT(ppa_szc <= seg->s_szc); 4762 ASSERT(ierr == -2 || ppa_szc < szc); 4763 ASSERT(ierr == -1 || ppa_szc > szc); 4764 szc = ppa_szc; 4765 } 4766 } 4767 4768 pgsz = page_get_pagesize(szc); 4769 pages = btop(pgsz); 4770 ASSERT(type != F_SOFTLOCK || ierr == -1 || 4771 (IS_P2ALIGNED(a, pgsz) && IS_P2ALIGNED(lpgeaddr, pgsz))); 4772 if (type == F_SOFTLOCK) { 4773 /* 4774 * For softlocks we cannot reduce the fault area 4775 * (calculated based on the largest page size for this 4776 * segment) for size down and a is already next 4777 * page size aligned as assertted above for size 4778 * ups. Therefore just continue in case of softlock. 4779 */ 4780 VM_STAT_ADD(segvnvmstats.fltanpages[9]); 4781 continue; /* keep lint happy */ 4782 } else if (ierr == -2) { 4783 4784 /* 4785 * Size up case. Note lpgaddr may only be needed for 4786 * softlock case so we don't adjust it here. 4787 */ 4788 VM_STAT_ADD(segvnvmstats.fltanpages[10]); 4789 a = (caddr_t)P2ALIGN((uintptr_t)a, pgsz); 4790 ASSERT(a >= lpgaddr); 4791 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz); 4792 aindx = svd->anon_index + seg_page(seg, a); 4793 vpage = (svd->vpage != NULL) ? 4794 &svd->vpage[seg_page(seg, a)] : NULL; 4795 } else { 4796 /* 4797 * Size down case. Note lpgaddr may only be needed for 4798 * softlock case so we don't adjust it here. 4799 */ 4800 VM_STAT_ADD(segvnvmstats.fltanpages[11]); 4801 ASSERT(IS_P2ALIGNED(a, pgsz)); 4802 ASSERT(IS_P2ALIGNED(lpgeaddr, pgsz)); 4803 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz); 4804 ASSERT(a < lpgeaddr); 4805 if (a < addr) { 4806 /* 4807 * The beginning of the large page region can 4808 * be pulled to the right to make a smaller 4809 * region. We haven't yet faulted a single 4810 * page. 4811 */ 4812 VM_STAT_ADD(segvnvmstats.fltanpages[12]); 4813 a = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz); 4814 ASSERT(a >= lpgaddr); 4815 aindx = svd->anon_index + seg_page(seg, a); 4816 vpage = (svd->vpage != NULL) ? 4817 &svd->vpage[seg_page(seg, a)] : NULL; 4818 } 4819 } 4820 } 4821 VM_STAT_ADD(segvnvmstats.fltanpages[13]); 4822 ANON_LOCK_EXIT(&->a_rwlock); 4823 kmem_cache_free(segvn_szc_cache[ppaszc], ppa); 4824 return (0); 4825 error: 4826 VM_STAT_ADD(segvnvmstats.fltanpages[14]); 4827 ANON_LOCK_EXIT(&->a_rwlock); 4828 kmem_cache_free(segvn_szc_cache[ppaszc], ppa); 4829 if (type == F_SOFTLOCK && a > lpgaddr) { 4830 VM_STAT_ADD(segvnvmstats.fltanpages[15]); 4831 segvn_softunlock(seg, lpgaddr, a - lpgaddr, S_OTHER); 4832 } 4833 return (err); 4834 } 4835 4836 int fltadvice = 1; /* set to free behind pages for sequential access */ 4837 4838 /* 4839 * This routine is called via a machine specific fault handling routine. 4840 * It is also called by software routines wishing to lock or unlock 4841 * a range of addresses. 4842 * 4843 * Here is the basic algorithm: 4844 * If unlocking 4845 * Call segvn_softunlock 4846 * Return 4847 * endif 4848 * Checking and set up work 4849 * If we will need some non-anonymous pages 4850 * Call VOP_GETPAGE over the range of non-anonymous pages 4851 * endif 4852 * Loop over all addresses requested 4853 * Call segvn_faultpage passing in page list 4854 * to load up translations and handle anonymous pages 4855 * endloop 4856 * Load up translation to any additional pages in page list not 4857 * already handled that fit into this segment 4858 */ 4859 static faultcode_t 4860 segvn_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t len, 4861 enum fault_type type, enum seg_rw rw) 4862 { 4863 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 4864 page_t **plp, **ppp, *pp; 4865 u_offset_t off; 4866 caddr_t a; 4867 struct vpage *vpage; 4868 uint_t vpprot, prot; 4869 int err; 4870 page_t *pl[PVN_GETPAGE_NUM + 1]; 4871 size_t plsz, pl_alloc_sz; 4872 size_t page; 4873 ulong_t anon_index; 4874 struct anon_map *amp; 4875 int dogetpage = 0; 4876 caddr_t lpgaddr, lpgeaddr; 4877 size_t pgsz; 4878 anon_sync_obj_t cookie; 4879 int brkcow = BREAK_COW_SHARE(rw, type, svd->type); 4880 4881 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 4882 ASSERT(svd->amp == NULL || svd->rcookie == HAT_INVALID_REGION_COOKIE); 4883 4884 /* 4885 * First handle the easy stuff 4886 */ 4887 if (type == F_SOFTUNLOCK) { 4888 if (rw == S_READ_NOCOW) { 4889 rw = S_READ; 4890 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 4891 } 4892 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 4893 pgsz = (seg->s_szc == 0) ? PAGESIZE : 4894 page_get_pagesize(seg->s_szc); 4895 VM_STAT_COND_ADD(pgsz > PAGESIZE, segvnvmstats.fltanpages[16]); 4896 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr); 4897 segvn_softunlock(seg, lpgaddr, lpgeaddr - lpgaddr, rw); 4898 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 4899 return (0); 4900 } 4901 4902 ASSERT(svd->tr_state == SEGVN_TR_OFF || 4903 !HAT_IS_REGION_COOKIE_VALID(svd->rcookie)); 4904 if (brkcow == 0) { 4905 if (svd->tr_state == SEGVN_TR_INIT) { 4906 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 4907 if (svd->tr_state == SEGVN_TR_INIT) { 4908 ASSERT(svd->vp != NULL && svd->amp == NULL); 4909 ASSERT(svd->flags & MAP_TEXT); 4910 ASSERT(svd->type == MAP_PRIVATE); 4911 segvn_textrepl(seg); 4912 ASSERT(svd->tr_state != SEGVN_TR_INIT); 4913 ASSERT(svd->tr_state != SEGVN_TR_ON || 4914 svd->amp != NULL); 4915 } 4916 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 4917 } 4918 } else if (svd->tr_state != SEGVN_TR_OFF) { 4919 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 4920 4921 if (rw == S_WRITE && svd->tr_state != SEGVN_TR_OFF) { 4922 ASSERT(!svd->pageprot && !(svd->prot & PROT_WRITE)); 4923 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 4924 return (FC_PROT); 4925 } 4926 4927 if (svd->tr_state == SEGVN_TR_ON) { 4928 ASSERT(svd->vp != NULL && svd->amp != NULL); 4929 segvn_textunrepl(seg, 0); 4930 ASSERT(svd->amp == NULL && 4931 svd->tr_state == SEGVN_TR_OFF); 4932 } else if (svd->tr_state != SEGVN_TR_OFF) { 4933 svd->tr_state = SEGVN_TR_OFF; 4934 } 4935 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF); 4936 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 4937 } 4938 4939 top: 4940 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 4941 4942 /* 4943 * If we have the same protections for the entire segment, 4944 * insure that the access being attempted is legitimate. 4945 */ 4946 4947 if (svd->pageprot == 0) { 4948 uint_t protchk; 4949 4950 switch (rw) { 4951 case S_READ: 4952 case S_READ_NOCOW: 4953 protchk = PROT_READ; 4954 break; 4955 case S_WRITE: 4956 protchk = PROT_WRITE; 4957 break; 4958 case S_EXEC: 4959 protchk = PROT_EXEC; 4960 break; 4961 case S_OTHER: 4962 default: 4963 protchk = PROT_READ | PROT_WRITE | PROT_EXEC; 4964 break; 4965 } 4966 4967 if ((svd->prot & protchk) == 0) { 4968 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 4969 return (FC_PROT); /* illegal access type */ 4970 } 4971 } 4972 4973 if (brkcow && HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 4974 /* this must be SOFTLOCK S_READ fault */ 4975 ASSERT(svd->amp == NULL); 4976 ASSERT(svd->tr_state == SEGVN_TR_OFF); 4977 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 4978 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 4979 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 4980 /* 4981 * this must be the first ever non S_READ_NOCOW 4982 * softlock for this segment. 4983 */ 4984 ASSERT(svd->softlockcnt == 0); 4985 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 4986 HAT_REGION_TEXT); 4987 svd->rcookie = HAT_INVALID_REGION_COOKIE; 4988 } 4989 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 4990 goto top; 4991 } 4992 4993 /* 4994 * We can't allow the long term use of softlocks for vmpss segments, 4995 * because in some file truncation cases we should be able to demote 4996 * the segment, which requires that there are no softlocks. The 4997 * only case where it's ok to allow a SOFTLOCK fault against a vmpss 4998 * segment is S_READ_NOCOW, where the caller holds the address space 4999 * locked as writer and calls softunlock before dropping the as lock. 5000 * S_READ_NOCOW is used by /proc to read memory from another user. 5001 * 5002 * Another deadlock between SOFTLOCK and file truncation can happen 5003 * because segvn_fault_vnodepages() calls the FS one pagesize at 5004 * a time. A second VOP_GETPAGE() call by segvn_fault_vnodepages() 5005 * can cause a deadlock because the first set of page_t's remain 5006 * locked SE_SHARED. To avoid this, we demote segments on a first 5007 * SOFTLOCK if they have a length greater than the segment's 5008 * page size. 5009 * 5010 * So for now, we only avoid demoting a segment on a SOFTLOCK when 5011 * the access type is S_READ_NOCOW and the fault length is less than 5012 * or equal to the segment's page size. While this is quite restrictive, 5013 * it should be the most common case of SOFTLOCK against a vmpss 5014 * segment. 5015 * 5016 * For S_READ_NOCOW, it's safe not to do a copy on write because the 5017 * caller makes sure no COW will be caused by another thread for a 5018 * softlocked page. 5019 */ 5020 if (type == F_SOFTLOCK && svd->vp != NULL && seg->s_szc != 0) { 5021 int demote = 0; 5022 5023 if (rw != S_READ_NOCOW) { 5024 demote = 1; 5025 } 5026 if (!demote && len > PAGESIZE) { 5027 pgsz = page_get_pagesize(seg->s_szc); 5028 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, 5029 lpgeaddr); 5030 if (lpgeaddr - lpgaddr > pgsz) { 5031 demote = 1; 5032 } 5033 } 5034 5035 ASSERT(demote || AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 5036 5037 if (demote) { 5038 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5039 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 5040 if (seg->s_szc != 0) { 5041 segvn_vmpss_clrszc_cnt++; 5042 ASSERT(svd->softlockcnt == 0); 5043 err = segvn_clrszc(seg); 5044 if (err) { 5045 segvn_vmpss_clrszc_err++; 5046 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5047 return (FC_MAKE_ERR(err)); 5048 } 5049 } 5050 ASSERT(seg->s_szc == 0); 5051 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5052 goto top; 5053 } 5054 } 5055 5056 /* 5057 * Check to see if we need to allocate an anon_map structure. 5058 */ 5059 if (svd->amp == NULL && (svd->vp == NULL || brkcow)) { 5060 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 5061 /* 5062 * Drop the "read" lock on the segment and acquire 5063 * the "write" version since we have to allocate the 5064 * anon_map. 5065 */ 5066 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5067 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 5068 5069 if (svd->amp == NULL) { 5070 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP); 5071 svd->amp->a_szc = seg->s_szc; 5072 } 5073 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5074 5075 /* 5076 * Start all over again since segment protections 5077 * may have changed after we dropped the "read" lock. 5078 */ 5079 goto top; 5080 } 5081 5082 /* 5083 * S_READ_NOCOW vs S_READ distinction was 5084 * only needed for the code above. After 5085 * that we treat it as S_READ. 5086 */ 5087 if (rw == S_READ_NOCOW) { 5088 ASSERT(type == F_SOFTLOCK); 5089 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 5090 rw = S_READ; 5091 } 5092 5093 amp = svd->amp; 5094 5095 /* 5096 * MADV_SEQUENTIAL work is ignored for large page segments. 5097 */ 5098 if (seg->s_szc != 0) { 5099 pgsz = page_get_pagesize(seg->s_szc); 5100 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock)); 5101 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr); 5102 if (svd->vp == NULL) { 5103 err = segvn_fault_anonpages(hat, seg, lpgaddr, 5104 lpgeaddr, type, rw, addr, addr + len, brkcow); 5105 } else { 5106 err = segvn_fault_vnodepages(hat, seg, lpgaddr, 5107 lpgeaddr, type, rw, addr, addr + len, brkcow); 5108 if (err == IE_RETRY) { 5109 ASSERT(seg->s_szc == 0); 5110 ASSERT(SEGVN_READ_HELD(seg->s_as, &svd->lock)); 5111 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5112 goto top; 5113 } 5114 } 5115 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5116 return (err); 5117 } 5118 5119 page = seg_page(seg, addr); 5120 if (amp != NULL) { 5121 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 5122 anon_index = svd->anon_index + page; 5123 5124 if (type == F_PROT && rw == S_READ && 5125 svd->tr_state == SEGVN_TR_OFF && 5126 svd->type == MAP_PRIVATE && svd->pageprot == 0) { 5127 size_t index = anon_index; 5128 struct anon *ap; 5129 5130 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5131 /* 5132 * The fast path could apply to S_WRITE also, except 5133 * that the protection fault could be caused by lazy 5134 * tlb flush when ro->rw. In this case, the pte is 5135 * RW already. But RO in the other cpu's tlb causes 5136 * the fault. Since hat_chgprot won't do anything if 5137 * pte doesn't change, we may end up faulting 5138 * indefinitely until the RO tlb entry gets replaced. 5139 */ 5140 for (a = addr; a < addr + len; a += PAGESIZE, index++) { 5141 anon_array_enter(amp, index, &cookie); 5142 ap = anon_get_ptr(amp->ahp, index); 5143 anon_array_exit(&cookie); 5144 if ((ap == NULL) || (ap->an_refcnt != 1)) { 5145 ANON_LOCK_EXIT(&->a_rwlock); 5146 goto slow; 5147 } 5148 } 5149 hat_chgprot(seg->s_as->a_hat, addr, len, svd->prot); 5150 ANON_LOCK_EXIT(&->a_rwlock); 5151 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5152 return (0); 5153 } 5154 } 5155 slow: 5156 5157 if (svd->vpage == NULL) 5158 vpage = NULL; 5159 else 5160 vpage = &svd->vpage[page]; 5161 5162 off = svd->offset + (uintptr_t)(addr - seg->s_base); 5163 5164 /* 5165 * If MADV_SEQUENTIAL has been set for the particular page we 5166 * are faulting on, free behind all pages in the segment and put 5167 * them on the free list. 5168 */ 5169 5170 if ((page != 0) && fltadvice && svd->tr_state != SEGVN_TR_ON) { 5171 struct vpage *vpp; 5172 ulong_t fanon_index; 5173 size_t fpage; 5174 u_offset_t pgoff, fpgoff; 5175 struct vnode *fvp; 5176 struct anon *fap = NULL; 5177 5178 if (svd->advice == MADV_SEQUENTIAL || 5179 (svd->pageadvice && 5180 VPP_ADVICE(vpage) == MADV_SEQUENTIAL)) { 5181 pgoff = off - PAGESIZE; 5182 fpage = page - 1; 5183 if (vpage != NULL) 5184 vpp = &svd->vpage[fpage]; 5185 if (amp != NULL) 5186 fanon_index = svd->anon_index + fpage; 5187 5188 while (pgoff > svd->offset) { 5189 if (svd->advice != MADV_SEQUENTIAL && 5190 (!svd->pageadvice || (vpage && 5191 VPP_ADVICE(vpp) != MADV_SEQUENTIAL))) 5192 break; 5193 5194 /* 5195 * If this is an anon page, we must find the 5196 * correct <vp, offset> for it 5197 */ 5198 fap = NULL; 5199 if (amp != NULL) { 5200 ANON_LOCK_ENTER(&->a_rwlock, 5201 RW_READER); 5202 anon_array_enter(amp, fanon_index, 5203 &cookie); 5204 fap = anon_get_ptr(amp->ahp, 5205 fanon_index); 5206 if (fap != NULL) { 5207 swap_xlate(fap, &fvp, &fpgoff); 5208 } else { 5209 fpgoff = pgoff; 5210 fvp = svd->vp; 5211 } 5212 anon_array_exit(&cookie); 5213 ANON_LOCK_EXIT(&->a_rwlock); 5214 } else { 5215 fpgoff = pgoff; 5216 fvp = svd->vp; 5217 } 5218 if (fvp == NULL) 5219 break; /* XXX */ 5220 /* 5221 * Skip pages that are free or have an 5222 * "exclusive" lock. 5223 */ 5224 pp = page_lookup_nowait(fvp, fpgoff, SE_SHARED); 5225 if (pp == NULL) 5226 break; 5227 /* 5228 * We don't need the page_struct_lock to test 5229 * as this is only advisory; even if we 5230 * acquire it someone might race in and lock 5231 * the page after we unlock and before the 5232 * PUTPAGE, then VOP_PUTPAGE will do nothing. 5233 */ 5234 if (pp->p_lckcnt == 0 && pp->p_cowcnt == 0) { 5235 /* 5236 * Hold the vnode before releasing 5237 * the page lock to prevent it from 5238 * being freed and re-used by some 5239 * other thread. 5240 */ 5241 VN_HOLD(fvp); 5242 page_unlock(pp); 5243 /* 5244 * We should build a page list 5245 * to kluster putpages XXX 5246 */ 5247 (void) VOP_PUTPAGE(fvp, 5248 (offset_t)fpgoff, PAGESIZE, 5249 (B_DONTNEED|B_FREE|B_ASYNC), 5250 svd->cred, NULL); 5251 VN_RELE(fvp); 5252 } else { 5253 /* 5254 * XXX - Should the loop terminate if 5255 * the page is `locked'? 5256 */ 5257 page_unlock(pp); 5258 } 5259 --vpp; 5260 --fanon_index; 5261 pgoff -= PAGESIZE; 5262 } 5263 } 5264 } 5265 5266 plp = pl; 5267 *plp = NULL; 5268 pl_alloc_sz = 0; 5269 5270 /* 5271 * See if we need to call VOP_GETPAGE for 5272 * *any* of the range being faulted on. 5273 * We can skip all of this work if there 5274 * was no original vnode. 5275 */ 5276 if (svd->vp != NULL) { 5277 u_offset_t vp_off; 5278 size_t vp_len; 5279 struct anon *ap; 5280 vnode_t *vp; 5281 5282 vp_off = off; 5283 vp_len = len; 5284 5285 if (amp == NULL) 5286 dogetpage = 1; 5287 else { 5288 /* 5289 * Only acquire reader lock to prevent amp->ahp 5290 * from being changed. It's ok to miss pages, 5291 * hence we don't do anon_array_enter 5292 */ 5293 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5294 ap = anon_get_ptr(amp->ahp, anon_index); 5295 5296 if (len <= PAGESIZE) 5297 /* inline non_anon() */ 5298 dogetpage = (ap == NULL); 5299 else 5300 dogetpage = non_anon(amp->ahp, anon_index, 5301 &vp_off, &vp_len); 5302 ANON_LOCK_EXIT(&->a_rwlock); 5303 } 5304 5305 if (dogetpage) { 5306 enum seg_rw arw; 5307 struct as *as = seg->s_as; 5308 5309 if (len > ptob((sizeof (pl) / sizeof (pl[0])) - 1)) { 5310 /* 5311 * Page list won't fit in local array, 5312 * allocate one of the needed size. 5313 */ 5314 pl_alloc_sz = 5315 (btop(len) + 1) * sizeof (page_t *); 5316 plp = kmem_alloc(pl_alloc_sz, KM_SLEEP); 5317 plp[0] = NULL; 5318 plsz = len; 5319 } else if (rw == S_WRITE && svd->type == MAP_PRIVATE || 5320 svd->tr_state == SEGVN_TR_ON || rw == S_OTHER || 5321 (((size_t)(addr + PAGESIZE) < 5322 (size_t)(seg->s_base + seg->s_size)) && 5323 hat_probe(as->a_hat, addr + PAGESIZE))) { 5324 /* 5325 * Ask VOP_GETPAGE to return the exact number 5326 * of pages if 5327 * (a) this is a COW fault, or 5328 * (b) this is a software fault, or 5329 * (c) next page is already mapped. 5330 */ 5331 plsz = len; 5332 } else { 5333 /* 5334 * Ask VOP_GETPAGE to return adjacent pages 5335 * within the segment. 5336 */ 5337 plsz = MIN((size_t)PVN_GETPAGE_SZ, (size_t) 5338 ((seg->s_base + seg->s_size) - addr)); 5339 ASSERT((addr + plsz) <= 5340 (seg->s_base + seg->s_size)); 5341 } 5342 5343 /* 5344 * Need to get some non-anonymous pages. 5345 * We need to make only one call to GETPAGE to do 5346 * this to prevent certain deadlocking conditions 5347 * when we are doing locking. In this case 5348 * non_anon() should have picked up the smallest 5349 * range which includes all the non-anonymous 5350 * pages in the requested range. We have to 5351 * be careful regarding which rw flag to pass in 5352 * because on a private mapping, the underlying 5353 * object is never allowed to be written. 5354 */ 5355 if (rw == S_WRITE && svd->type == MAP_PRIVATE) { 5356 arw = S_READ; 5357 } else { 5358 arw = rw; 5359 } 5360 vp = svd->vp; 5361 TRACE_3(TR_FAC_VM, TR_SEGVN_GETPAGE, 5362 "segvn_getpage:seg %p addr %p vp %p", 5363 seg, addr, vp); 5364 err = VOP_GETPAGE(vp, (offset_t)vp_off, vp_len, 5365 &vpprot, plp, plsz, seg, addr + (vp_off - off), arw, 5366 svd->cred, NULL); 5367 if (err) { 5368 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5369 segvn_pagelist_rele(plp); 5370 if (pl_alloc_sz) 5371 kmem_free(plp, pl_alloc_sz); 5372 return (FC_MAKE_ERR(err)); 5373 } 5374 if (svd->type == MAP_PRIVATE) 5375 vpprot &= ~PROT_WRITE; 5376 } 5377 } 5378 5379 /* 5380 * N.B. at this time the plp array has all the needed non-anon 5381 * pages in addition to (possibly) having some adjacent pages. 5382 */ 5383 5384 /* 5385 * Always acquire the anon_array_lock to prevent 5386 * 2 threads from allocating separate anon slots for 5387 * the same "addr". 5388 * 5389 * If this is a copy-on-write fault and we don't already 5390 * have the anon_array_lock, acquire it to prevent the 5391 * fault routine from handling multiple copy-on-write faults 5392 * on the same "addr" in the same address space. 5393 * 5394 * Only one thread should deal with the fault since after 5395 * it is handled, the other threads can acquire a translation 5396 * to the newly created private page. This prevents two or 5397 * more threads from creating different private pages for the 5398 * same fault. 5399 * 5400 * We grab "serialization" lock here if this is a MAP_PRIVATE segment 5401 * to prevent deadlock between this thread and another thread 5402 * which has soft-locked this page and wants to acquire serial_lock. 5403 * ( bug 4026339 ) 5404 * 5405 * The fix for bug 4026339 becomes unnecessary when using the 5406 * locking scheme with per amp rwlock and a global set of hash 5407 * lock, anon_array_lock. If we steal a vnode page when low 5408 * on memory and upgrad the page lock through page_rename, 5409 * then the page is PAGE_HANDLED, nothing needs to be done 5410 * for this page after returning from segvn_faultpage. 5411 * 5412 * But really, the page lock should be downgraded after 5413 * the stolen page is page_rename'd. 5414 */ 5415 5416 if (amp != NULL) 5417 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5418 5419 /* 5420 * Ok, now loop over the address range and handle faults 5421 */ 5422 for (a = addr; a < addr + len; a += PAGESIZE, off += PAGESIZE) { 5423 err = segvn_faultpage(hat, seg, a, off, vpage, plp, vpprot, 5424 type, rw, brkcow); 5425 if (err) { 5426 if (amp != NULL) 5427 ANON_LOCK_EXIT(&->a_rwlock); 5428 if (type == F_SOFTLOCK && a > addr) { 5429 segvn_softunlock(seg, addr, (a - addr), 5430 S_OTHER); 5431 } 5432 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5433 segvn_pagelist_rele(plp); 5434 if (pl_alloc_sz) 5435 kmem_free(plp, pl_alloc_sz); 5436 return (err); 5437 } 5438 if (vpage) { 5439 vpage++; 5440 } else if (svd->vpage) { 5441 page = seg_page(seg, addr); 5442 vpage = &svd->vpage[++page]; 5443 } 5444 } 5445 5446 /* Didn't get pages from the underlying fs so we're done */ 5447 if (!dogetpage) 5448 goto done; 5449 5450 /* 5451 * Now handle any other pages in the list returned. 5452 * If the page can be used, load up the translations now. 5453 * Note that the for loop will only be entered if "plp" 5454 * is pointing to a non-NULL page pointer which means that 5455 * VOP_GETPAGE() was called and vpprot has been initialized. 5456 */ 5457 if (svd->pageprot == 0) 5458 prot = svd->prot & vpprot; 5459 5460 5461 /* 5462 * Large Files: diff should be unsigned value because we started 5463 * supporting > 2GB segment sizes from 2.5.1 and when a 5464 * large file of size > 2GB gets mapped to address space 5465 * the diff value can be > 2GB. 5466 */ 5467 5468 for (ppp = plp; (pp = *ppp) != NULL; ppp++) { 5469 size_t diff; 5470 struct anon *ap; 5471 int anon_index; 5472 anon_sync_obj_t cookie; 5473 int hat_flag = HAT_LOAD_ADV; 5474 5475 if (svd->flags & MAP_TEXT) { 5476 hat_flag |= HAT_LOAD_TEXT; 5477 } 5478 5479 if (pp == PAGE_HANDLED) 5480 continue; 5481 5482 if (svd->tr_state != SEGVN_TR_ON && 5483 pp->p_offset >= svd->offset && 5484 pp->p_offset < svd->offset + seg->s_size) { 5485 5486 diff = pp->p_offset - svd->offset; 5487 5488 /* 5489 * Large Files: Following is the assertion 5490 * validating the above cast. 5491 */ 5492 ASSERT(svd->vp == pp->p_vnode); 5493 5494 page = btop(diff); 5495 if (svd->pageprot) 5496 prot = VPP_PROT(&svd->vpage[page]) & vpprot; 5497 5498 /* 5499 * Prevent other threads in the address space from 5500 * creating private pages (i.e., allocating anon slots) 5501 * while we are in the process of loading translations 5502 * to additional pages returned by the underlying 5503 * object. 5504 */ 5505 if (amp != NULL) { 5506 anon_index = svd->anon_index + page; 5507 anon_array_enter(amp, anon_index, &cookie); 5508 ap = anon_get_ptr(amp->ahp, anon_index); 5509 } 5510 if ((amp == NULL) || (ap == NULL)) { 5511 if (IS_VMODSORT(pp->p_vnode) || 5512 enable_mbit_wa) { 5513 if (rw == S_WRITE) 5514 hat_setmod(pp); 5515 else if (rw != S_OTHER && 5516 !hat_ismod(pp)) 5517 prot &= ~PROT_WRITE; 5518 } 5519 /* 5520 * Skip mapping read ahead pages marked 5521 * for migration, so they will get migrated 5522 * properly on fault 5523 */ 5524 ASSERT(amp == NULL || 5525 svd->rcookie == HAT_INVALID_REGION_COOKIE); 5526 if ((prot & PROT_READ) && !PP_ISMIGRATE(pp)) { 5527 hat_memload_region(hat, 5528 seg->s_base + diff, 5529 pp, prot, hat_flag, 5530 svd->rcookie); 5531 } 5532 } 5533 if (amp != NULL) 5534 anon_array_exit(&cookie); 5535 } 5536 page_unlock(pp); 5537 } 5538 done: 5539 if (amp != NULL) 5540 ANON_LOCK_EXIT(&->a_rwlock); 5541 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5542 if (pl_alloc_sz) 5543 kmem_free(plp, pl_alloc_sz); 5544 return (0); 5545 } 5546 5547 /* 5548 * This routine is used to start I/O on pages asynchronously. XXX it will 5549 * only create PAGESIZE pages. At fault time they will be relocated into 5550 * larger pages. 5551 */ 5552 static faultcode_t 5553 segvn_faulta(struct seg *seg, caddr_t addr) 5554 { 5555 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 5556 int err; 5557 struct anon_map *amp; 5558 vnode_t *vp; 5559 5560 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 5561 5562 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 5563 if ((amp = svd->amp) != NULL) { 5564 struct anon *ap; 5565 5566 /* 5567 * Reader lock to prevent amp->ahp from being changed. 5568 * This is advisory, it's ok to miss a page, so 5569 * we don't do anon_array_enter lock. 5570 */ 5571 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5572 if ((ap = anon_get_ptr(amp->ahp, 5573 svd->anon_index + seg_page(seg, addr))) != NULL) { 5574 5575 err = anon_getpage(&ap, NULL, NULL, 5576 0, seg, addr, S_READ, svd->cred); 5577 5578 ANON_LOCK_EXIT(&->a_rwlock); 5579 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5580 if (err) 5581 return (FC_MAKE_ERR(err)); 5582 return (0); 5583 } 5584 ANON_LOCK_EXIT(&->a_rwlock); 5585 } 5586 5587 if (svd->vp == NULL) { 5588 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5589 return (0); /* zfod page - do nothing now */ 5590 } 5591 5592 vp = svd->vp; 5593 TRACE_3(TR_FAC_VM, TR_SEGVN_GETPAGE, 5594 "segvn_getpage:seg %p addr %p vp %p", seg, addr, vp); 5595 err = VOP_GETPAGE(vp, 5596 (offset_t)(svd->offset + (uintptr_t)(addr - seg->s_base)), 5597 PAGESIZE, NULL, NULL, 0, seg, addr, 5598 S_OTHER, svd->cred, NULL); 5599 5600 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5601 if (err) 5602 return (FC_MAKE_ERR(err)); 5603 return (0); 5604 } 5605 5606 static int 5607 segvn_setprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot) 5608 { 5609 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 5610 struct vpage *cvp, *svp, *evp; 5611 struct vnode *vp; 5612 size_t pgsz; 5613 pgcnt_t pgcnt; 5614 anon_sync_obj_t cookie; 5615 int unload_done = 0; 5616 5617 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 5618 5619 if ((svd->maxprot & prot) != prot) 5620 return (EACCES); /* violated maxprot */ 5621 5622 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 5623 5624 /* return if prot is the same */ 5625 if (!svd->pageprot && svd->prot == prot) { 5626 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5627 return (0); 5628 } 5629 5630 /* 5631 * Since we change protections we first have to flush the cache. 5632 * This makes sure all the pagelock calls have to recheck 5633 * protections. 5634 */ 5635 if (svd->softlockcnt > 0) { 5636 ASSERT(svd->tr_state == SEGVN_TR_OFF); 5637 5638 /* 5639 * If this is shared segment non 0 softlockcnt 5640 * means locked pages are still in use. 5641 */ 5642 if (svd->type == MAP_SHARED) { 5643 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5644 return (EAGAIN); 5645 } 5646 5647 /* 5648 * Since we do have the segvn writers lock nobody can fill 5649 * the cache with entries belonging to this seg during 5650 * the purge. The flush either succeeds or we still have 5651 * pending I/Os. 5652 */ 5653 segvn_purge(seg); 5654 if (svd->softlockcnt > 0) { 5655 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5656 return (EAGAIN); 5657 } 5658 } 5659 5660 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 5661 ASSERT(svd->amp == NULL); 5662 ASSERT(svd->tr_state == SEGVN_TR_OFF); 5663 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 5664 HAT_REGION_TEXT); 5665 svd->rcookie = HAT_INVALID_REGION_COOKIE; 5666 unload_done = 1; 5667 } else if (svd->tr_state == SEGVN_TR_INIT) { 5668 svd->tr_state = SEGVN_TR_OFF; 5669 } else if (svd->tr_state == SEGVN_TR_ON) { 5670 ASSERT(svd->amp != NULL); 5671 segvn_textunrepl(seg, 0); 5672 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF); 5673 unload_done = 1; 5674 } 5675 5676 if ((prot & PROT_WRITE) && svd->type == MAP_SHARED && 5677 svd->vp != NULL && (svd->vp->v_flag & VVMEXEC)) { 5678 ASSERT(vn_is_mapped(svd->vp, V_WRITE)); 5679 segvn_inval_trcache(svd->vp); 5680 } 5681 if (seg->s_szc != 0) { 5682 int err; 5683 pgsz = page_get_pagesize(seg->s_szc); 5684 pgcnt = pgsz >> PAGESHIFT; 5685 ASSERT(IS_P2ALIGNED(pgcnt, pgcnt)); 5686 if (!IS_P2ALIGNED(addr, pgsz) || !IS_P2ALIGNED(len, pgsz)) { 5687 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5688 ASSERT(seg->s_base != addr || seg->s_size != len); 5689 /* 5690 * If we are holding the as lock as a reader then 5691 * we need to return IE_RETRY and let the as 5692 * layer drop and re-acquire the lock as a writer. 5693 */ 5694 if (AS_READ_HELD(seg->s_as, &seg->s_as->a_lock)) 5695 return (IE_RETRY); 5696 VM_STAT_ADD(segvnvmstats.demoterange[1]); 5697 if (svd->type == MAP_PRIVATE || svd->vp != NULL) { 5698 err = segvn_demote_range(seg, addr, len, 5699 SDR_END, 0); 5700 } else { 5701 uint_t szcvec = map_pgszcvec(seg->s_base, 5702 pgsz, (uintptr_t)seg->s_base, 5703 (svd->flags & MAP_TEXT), MAPPGSZC_SHM, 0); 5704 err = segvn_demote_range(seg, addr, len, 5705 SDR_END, szcvec); 5706 } 5707 if (err == 0) 5708 return (IE_RETRY); 5709 if (err == ENOMEM) 5710 return (IE_NOMEM); 5711 return (err); 5712 } 5713 } 5714 5715 5716 /* 5717 * If it's a private mapping and we're making it writable then we 5718 * may have to reserve the additional swap space now. If we are 5719 * making writable only a part of the segment then we use its vpage 5720 * array to keep a record of the pages for which we have reserved 5721 * swap. In this case we set the pageswap field in the segment's 5722 * segvn structure to record this. 5723 * 5724 * If it's a private mapping to a file (i.e., vp != NULL) and we're 5725 * removing write permission on the entire segment and we haven't 5726 * modified any pages, we can release the swap space. 5727 */ 5728 if (svd->type == MAP_PRIVATE) { 5729 if (prot & PROT_WRITE) { 5730 if (!(svd->flags & MAP_NORESERVE) && 5731 !(svd->swresv && svd->pageswap == 0)) { 5732 size_t sz = 0; 5733 5734 /* 5735 * Start by determining how much swap 5736 * space is required. 5737 */ 5738 if (addr == seg->s_base && 5739 len == seg->s_size && 5740 svd->pageswap == 0) { 5741 /* The whole segment */ 5742 sz = seg->s_size; 5743 } else { 5744 /* 5745 * Make sure that the vpage array 5746 * exists, and make a note of the 5747 * range of elements corresponding 5748 * to len. 5749 */ 5750 segvn_vpage(seg); 5751 svp = &svd->vpage[seg_page(seg, addr)]; 5752 evp = &svd->vpage[seg_page(seg, 5753 addr + len)]; 5754 5755 if (svd->pageswap == 0) { 5756 /* 5757 * This is the first time we've 5758 * asked for a part of this 5759 * segment, so we need to 5760 * reserve everything we've 5761 * been asked for. 5762 */ 5763 sz = len; 5764 } else { 5765 /* 5766 * We have to count the number 5767 * of pages required. 5768 */ 5769 for (cvp = svp; cvp < evp; 5770 cvp++) { 5771 if (!VPP_ISSWAPRES(cvp)) 5772 sz++; 5773 } 5774 sz <<= PAGESHIFT; 5775 } 5776 } 5777 5778 /* Try to reserve the necessary swap. */ 5779 if (anon_resv_zone(sz, 5780 seg->s_as->a_proc->p_zone) == 0) { 5781 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5782 return (IE_NOMEM); 5783 } 5784 5785 /* 5786 * Make a note of how much swap space 5787 * we've reserved. 5788 */ 5789 if (svd->pageswap == 0 && sz == seg->s_size) { 5790 svd->swresv = sz; 5791 } else { 5792 ASSERT(svd->vpage != NULL); 5793 svd->swresv += sz; 5794 svd->pageswap = 1; 5795 for (cvp = svp; cvp < evp; cvp++) { 5796 if (!VPP_ISSWAPRES(cvp)) 5797 VPP_SETSWAPRES(cvp); 5798 } 5799 } 5800 } 5801 } else { 5802 /* 5803 * Swap space is released only if this segment 5804 * does not map anonymous memory, since read faults 5805 * on such segments still need an anon slot to read 5806 * in the data. 5807 */ 5808 if (svd->swresv != 0 && svd->vp != NULL && 5809 svd->amp == NULL && addr == seg->s_base && 5810 len == seg->s_size && svd->pageprot == 0) { 5811 ASSERT(svd->pageswap == 0); 5812 anon_unresv_zone(svd->swresv, 5813 seg->s_as->a_proc->p_zone); 5814 svd->swresv = 0; 5815 TRACE_3(TR_FAC_VM, TR_ANON_PROC, 5816 "anon proc:%p %lu %u", seg, 0, 0); 5817 } 5818 } 5819 } 5820 5821 if (addr == seg->s_base && len == seg->s_size && svd->vpage == NULL) { 5822 if (svd->prot == prot) { 5823 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5824 return (0); /* all done */ 5825 } 5826 svd->prot = (uchar_t)prot; 5827 } else if (svd->type == MAP_PRIVATE) { 5828 struct anon *ap = NULL; 5829 page_t *pp; 5830 u_offset_t offset, off; 5831 struct anon_map *amp; 5832 ulong_t anon_idx = 0; 5833 5834 /* 5835 * A vpage structure exists or else the change does not 5836 * involve the entire segment. Establish a vpage structure 5837 * if none is there. Then, for each page in the range, 5838 * adjust its individual permissions. Note that write- 5839 * enabling a MAP_PRIVATE page can affect the claims for 5840 * locked down memory. Overcommitting memory terminates 5841 * the operation. 5842 */ 5843 segvn_vpage(seg); 5844 svd->pageprot = 1; 5845 if ((amp = svd->amp) != NULL) { 5846 anon_idx = svd->anon_index + seg_page(seg, addr); 5847 ASSERT(seg->s_szc == 0 || 5848 IS_P2ALIGNED(anon_idx, pgcnt)); 5849 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5850 } 5851 5852 offset = svd->offset + (uintptr_t)(addr - seg->s_base); 5853 evp = &svd->vpage[seg_page(seg, addr + len)]; 5854 5855 /* 5856 * See Statement at the beginning of segvn_lockop regarding 5857 * the way cowcnts and lckcnts are handled. 5858 */ 5859 for (svp = &svd->vpage[seg_page(seg, addr)]; svp < evp; svp++) { 5860 5861 if (seg->s_szc != 0) { 5862 if (amp != NULL) { 5863 anon_array_enter(amp, anon_idx, 5864 &cookie); 5865 } 5866 if (IS_P2ALIGNED(anon_idx, pgcnt) && 5867 !segvn_claim_pages(seg, svp, offset, 5868 anon_idx, prot)) { 5869 if (amp != NULL) { 5870 anon_array_exit(&cookie); 5871 } 5872 break; 5873 } 5874 if (amp != NULL) { 5875 anon_array_exit(&cookie); 5876 } 5877 anon_idx++; 5878 } else { 5879 if (amp != NULL) { 5880 anon_array_enter(amp, anon_idx, 5881 &cookie); 5882 ap = anon_get_ptr(amp->ahp, anon_idx++); 5883 } 5884 5885 if (VPP_ISPPLOCK(svp) && 5886 VPP_PROT(svp) != prot) { 5887 5888 if (amp == NULL || ap == NULL) { 5889 vp = svd->vp; 5890 off = offset; 5891 } else 5892 swap_xlate(ap, &vp, &off); 5893 if (amp != NULL) 5894 anon_array_exit(&cookie); 5895 5896 if ((pp = page_lookup(vp, off, 5897 SE_SHARED)) == NULL) { 5898 panic("segvn_setprot: no page"); 5899 /*NOTREACHED*/ 5900 } 5901 ASSERT(seg->s_szc == 0); 5902 if ((VPP_PROT(svp) ^ prot) & 5903 PROT_WRITE) { 5904 if (prot & PROT_WRITE) { 5905 if (!page_addclaim( 5906 pp)) { 5907 page_unlock(pp); 5908 break; 5909 } 5910 } else { 5911 if (!page_subclaim( 5912 pp)) { 5913 page_unlock(pp); 5914 break; 5915 } 5916 } 5917 } 5918 page_unlock(pp); 5919 } else if (amp != NULL) 5920 anon_array_exit(&cookie); 5921 } 5922 VPP_SETPROT(svp, prot); 5923 offset += PAGESIZE; 5924 } 5925 if (amp != NULL) 5926 ANON_LOCK_EXIT(&->a_rwlock); 5927 5928 /* 5929 * Did we terminate prematurely? If so, simply unload 5930 * the translations to the things we've updated so far. 5931 */ 5932 if (svp != evp) { 5933 if (unload_done) { 5934 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5935 return (IE_NOMEM); 5936 } 5937 len = (svp - &svd->vpage[seg_page(seg, addr)]) * 5938 PAGESIZE; 5939 ASSERT(seg->s_szc == 0 || IS_P2ALIGNED(len, pgsz)); 5940 if (len != 0) 5941 hat_unload(seg->s_as->a_hat, addr, 5942 len, HAT_UNLOAD); 5943 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5944 return (IE_NOMEM); 5945 } 5946 } else { 5947 segvn_vpage(seg); 5948 svd->pageprot = 1; 5949 evp = &svd->vpage[seg_page(seg, addr + len)]; 5950 for (svp = &svd->vpage[seg_page(seg, addr)]; svp < evp; svp++) { 5951 VPP_SETPROT(svp, prot); 5952 } 5953 } 5954 5955 if (unload_done) { 5956 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5957 return (0); 5958 } 5959 5960 if (((prot & PROT_WRITE) != 0 && 5961 (svd->vp != NULL || svd->type == MAP_PRIVATE)) || 5962 (prot & ~PROT_USER) == PROT_NONE) { 5963 /* 5964 * Either private or shared data with write access (in 5965 * which case we need to throw out all former translations 5966 * so that we get the right translations set up on fault 5967 * and we don't allow write access to any copy-on-write pages 5968 * that might be around or to prevent write access to pages 5969 * representing holes in a file), or we don't have permission 5970 * to access the memory at all (in which case we have to 5971 * unload any current translations that might exist). 5972 */ 5973 hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD); 5974 } else { 5975 /* 5976 * A shared mapping or a private mapping in which write 5977 * protection is going to be denied - just change all the 5978 * protections over the range of addresses in question. 5979 * segvn does not support any other attributes other 5980 * than prot so we can use hat_chgattr. 5981 */ 5982 hat_chgattr(seg->s_as->a_hat, addr, len, prot); 5983 } 5984 5985 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5986 5987 return (0); 5988 } 5989 5990 /* 5991 * segvn_setpagesize is called via SEGOP_SETPAGESIZE from as_setpagesize, 5992 * to determine if the seg is capable of mapping the requested szc. 5993 */ 5994 static int 5995 segvn_setpagesize(struct seg *seg, caddr_t addr, size_t len, uint_t szc) 5996 { 5997 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 5998 struct segvn_data *nsvd; 5999 struct anon_map *amp = svd->amp; 6000 struct seg *nseg; 6001 caddr_t eaddr = addr + len, a; 6002 size_t pgsz = page_get_pagesize(szc); 6003 pgcnt_t pgcnt = page_get_pagecnt(szc); 6004 int err; 6005 u_offset_t off = svd->offset + (uintptr_t)(addr - seg->s_base); 6006 extern struct vnode kvp; 6007 6008 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 6009 ASSERT(addr >= seg->s_base && eaddr <= seg->s_base + seg->s_size); 6010 6011 if (seg->s_szc == szc || segvn_lpg_disable != 0) { 6012 return (0); 6013 } 6014 6015 /* 6016 * addr should always be pgsz aligned but eaddr may be misaligned if 6017 * it's at the end of the segment. 6018 * 6019 * XXX we should assert this condition since as_setpagesize() logic 6020 * guarantees it. 6021 */ 6022 if (!IS_P2ALIGNED(addr, pgsz) || 6023 (!IS_P2ALIGNED(eaddr, pgsz) && 6024 eaddr != seg->s_base + seg->s_size)) { 6025 6026 segvn_setpgsz_align_err++; 6027 return (EINVAL); 6028 } 6029 6030 if (amp != NULL && svd->type == MAP_SHARED) { 6031 ulong_t an_idx = svd->anon_index + seg_page(seg, addr); 6032 if (!IS_P2ALIGNED(an_idx, pgcnt)) { 6033 6034 segvn_setpgsz_anon_align_err++; 6035 return (EINVAL); 6036 } 6037 } 6038 6039 if ((svd->flags & MAP_NORESERVE) || seg->s_as == &kas || 6040 szc > segvn_maxpgszc) { 6041 return (EINVAL); 6042 } 6043 6044 /* paranoid check */ 6045 if (svd->vp != NULL && 6046 (IS_SWAPFSVP(svd->vp) || VN_ISKAS(svd->vp))) { 6047 return (EINVAL); 6048 } 6049 6050 if (seg->s_szc == 0 && svd->vp != NULL && 6051 map_addr_vacalign_check(addr, off)) { 6052 return (EINVAL); 6053 } 6054 6055 /* 6056 * Check that protections are the same within new page 6057 * size boundaries. 6058 */ 6059 if (svd->pageprot) { 6060 for (a = addr; a < eaddr; a += pgsz) { 6061 if ((a + pgsz) > eaddr) { 6062 if (!sameprot(seg, a, eaddr - a)) { 6063 return (EINVAL); 6064 } 6065 } else { 6066 if (!sameprot(seg, a, pgsz)) { 6067 return (EINVAL); 6068 } 6069 } 6070 } 6071 } 6072 6073 /* 6074 * Since we are changing page size we first have to flush 6075 * the cache. This makes sure all the pagelock calls have 6076 * to recheck protections. 6077 */ 6078 if (svd->softlockcnt > 0) { 6079 ASSERT(svd->tr_state == SEGVN_TR_OFF); 6080 6081 /* 6082 * If this is shared segment non 0 softlockcnt 6083 * means locked pages are still in use. 6084 */ 6085 if (svd->type == MAP_SHARED) { 6086 return (EAGAIN); 6087 } 6088 6089 /* 6090 * Since we do have the segvn writers lock nobody can fill 6091 * the cache with entries belonging to this seg during 6092 * the purge. The flush either succeeds or we still have 6093 * pending I/Os. 6094 */ 6095 segvn_purge(seg); 6096 if (svd->softlockcnt > 0) { 6097 return (EAGAIN); 6098 } 6099 } 6100 6101 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 6102 ASSERT(svd->amp == NULL); 6103 ASSERT(svd->tr_state == SEGVN_TR_OFF); 6104 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 6105 HAT_REGION_TEXT); 6106 svd->rcookie = HAT_INVALID_REGION_COOKIE; 6107 } else if (svd->tr_state == SEGVN_TR_INIT) { 6108 svd->tr_state = SEGVN_TR_OFF; 6109 } else if (svd->tr_state == SEGVN_TR_ON) { 6110 ASSERT(svd->amp != NULL); 6111 segvn_textunrepl(seg, 1); 6112 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF); 6113 amp = NULL; 6114 } 6115 6116 /* 6117 * Operation for sub range of existing segment. 6118 */ 6119 if (addr != seg->s_base || eaddr != (seg->s_base + seg->s_size)) { 6120 if (szc < seg->s_szc) { 6121 VM_STAT_ADD(segvnvmstats.demoterange[2]); 6122 err = segvn_demote_range(seg, addr, len, SDR_RANGE, 0); 6123 if (err == 0) { 6124 return (IE_RETRY); 6125 } 6126 if (err == ENOMEM) { 6127 return (IE_NOMEM); 6128 } 6129 return (err); 6130 } 6131 if (addr != seg->s_base) { 6132 nseg = segvn_split_seg(seg, addr); 6133 if (eaddr != (nseg->s_base + nseg->s_size)) { 6134 /* eaddr is szc aligned */ 6135 (void) segvn_split_seg(nseg, eaddr); 6136 } 6137 return (IE_RETRY); 6138 } 6139 if (eaddr != (seg->s_base + seg->s_size)) { 6140 /* eaddr is szc aligned */ 6141 (void) segvn_split_seg(seg, eaddr); 6142 } 6143 return (IE_RETRY); 6144 } 6145 6146 /* 6147 * Break any low level sharing and reset seg->s_szc to 0. 6148 */ 6149 if ((err = segvn_clrszc(seg)) != 0) { 6150 if (err == ENOMEM) { 6151 err = IE_NOMEM; 6152 } 6153 return (err); 6154 } 6155 ASSERT(seg->s_szc == 0); 6156 6157 /* 6158 * If the end of the current segment is not pgsz aligned 6159 * then attempt to concatenate with the next segment. 6160 */ 6161 if (!IS_P2ALIGNED(eaddr, pgsz)) { 6162 nseg = AS_SEGNEXT(seg->s_as, seg); 6163 if (nseg == NULL || nseg == seg || eaddr != nseg->s_base) { 6164 return (ENOMEM); 6165 } 6166 if (nseg->s_ops != &segvn_ops) { 6167 return (EINVAL); 6168 } 6169 nsvd = (struct segvn_data *)nseg->s_data; 6170 if (nsvd->softlockcnt > 0) { 6171 /* 6172 * If this is shared segment non 0 softlockcnt 6173 * means locked pages are still in use. 6174 */ 6175 if (nsvd->type == MAP_SHARED) { 6176 return (EAGAIN); 6177 } 6178 segvn_purge(nseg); 6179 if (nsvd->softlockcnt > 0) { 6180 return (EAGAIN); 6181 } 6182 } 6183 err = segvn_clrszc(nseg); 6184 if (err == ENOMEM) { 6185 err = IE_NOMEM; 6186 } 6187 if (err != 0) { 6188 return (err); 6189 } 6190 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE); 6191 err = segvn_concat(seg, nseg, 1); 6192 if (err == -1) { 6193 return (EINVAL); 6194 } 6195 if (err == -2) { 6196 return (IE_NOMEM); 6197 } 6198 return (IE_RETRY); 6199 } 6200 6201 /* 6202 * May need to re-align anon array to 6203 * new szc. 6204 */ 6205 if (amp != NULL) { 6206 if (!IS_P2ALIGNED(svd->anon_index, pgcnt)) { 6207 struct anon_hdr *nahp; 6208 6209 ASSERT(svd->type == MAP_PRIVATE); 6210 6211 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 6212 ASSERT(amp->refcnt == 1); 6213 nahp = anon_create(btop(amp->size), ANON_NOSLEEP); 6214 if (nahp == NULL) { 6215 ANON_LOCK_EXIT(&->a_rwlock); 6216 return (IE_NOMEM); 6217 } 6218 if (anon_copy_ptr(amp->ahp, svd->anon_index, 6219 nahp, 0, btop(seg->s_size), ANON_NOSLEEP)) { 6220 anon_release(nahp, btop(amp->size)); 6221 ANON_LOCK_EXIT(&->a_rwlock); 6222 return (IE_NOMEM); 6223 } 6224 anon_release(amp->ahp, btop(amp->size)); 6225 amp->ahp = nahp; 6226 svd->anon_index = 0; 6227 ANON_LOCK_EXIT(&->a_rwlock); 6228 } 6229 } 6230 if (svd->vp != NULL && szc != 0) { 6231 struct vattr va; 6232 u_offset_t eoffpage = svd->offset; 6233 va.va_mask = AT_SIZE; 6234 eoffpage += seg->s_size; 6235 eoffpage = btopr(eoffpage); 6236 if (VOP_GETATTR(svd->vp, &va, 0, svd->cred, NULL) != 0) { 6237 segvn_setpgsz_getattr_err++; 6238 return (EINVAL); 6239 } 6240 if (btopr(va.va_size) < eoffpage) { 6241 segvn_setpgsz_eof_err++; 6242 return (EINVAL); 6243 } 6244 if (amp != NULL) { 6245 /* 6246 * anon_fill_cow_holes() may call VOP_GETPAGE(). 6247 * don't take anon map lock here to avoid holding it 6248 * across VOP_GETPAGE() calls that may call back into 6249 * segvn for klsutering checks. We don't really need 6250 * anon map lock here since it's a private segment and 6251 * we hold as level lock as writers. 6252 */ 6253 if ((err = anon_fill_cow_holes(seg, seg->s_base, 6254 amp->ahp, svd->anon_index, svd->vp, svd->offset, 6255 seg->s_size, szc, svd->prot, svd->vpage, 6256 svd->cred)) != 0) { 6257 return (EINVAL); 6258 } 6259 } 6260 segvn_setvnode_mpss(svd->vp); 6261 } 6262 6263 if (amp != NULL) { 6264 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 6265 if (svd->type == MAP_PRIVATE) { 6266 amp->a_szc = szc; 6267 } else if (szc > amp->a_szc) { 6268 amp->a_szc = szc; 6269 } 6270 ANON_LOCK_EXIT(&->a_rwlock); 6271 } 6272 6273 seg->s_szc = szc; 6274 6275 return (0); 6276 } 6277 6278 static int 6279 segvn_clrszc(struct seg *seg) 6280 { 6281 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6282 struct anon_map *amp = svd->amp; 6283 size_t pgsz; 6284 pgcnt_t pages; 6285 int err = 0; 6286 caddr_t a = seg->s_base; 6287 caddr_t ea = a + seg->s_size; 6288 ulong_t an_idx = svd->anon_index; 6289 vnode_t *vp = svd->vp; 6290 struct vpage *vpage = svd->vpage; 6291 page_t *anon_pl[1 + 1], *pp; 6292 struct anon *ap, *oldap; 6293 uint_t prot = svd->prot, vpprot; 6294 int pageflag = 0; 6295 6296 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) || 6297 SEGVN_WRITE_HELD(seg->s_as, &svd->lock)); 6298 ASSERT(svd->softlockcnt == 0); 6299 6300 if (vp == NULL && amp == NULL) { 6301 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 6302 seg->s_szc = 0; 6303 return (0); 6304 } 6305 6306 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 6307 ASSERT(svd->amp == NULL); 6308 ASSERT(svd->tr_state == SEGVN_TR_OFF); 6309 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 6310 HAT_REGION_TEXT); 6311 svd->rcookie = HAT_INVALID_REGION_COOKIE; 6312 } else if (svd->tr_state == SEGVN_TR_ON) { 6313 ASSERT(svd->amp != NULL); 6314 segvn_textunrepl(seg, 1); 6315 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF); 6316 amp = NULL; 6317 } else { 6318 if (svd->tr_state != SEGVN_TR_OFF) { 6319 ASSERT(svd->tr_state == SEGVN_TR_INIT); 6320 svd->tr_state = SEGVN_TR_OFF; 6321 } 6322 6323 /* 6324 * do HAT_UNLOAD_UNMAP since we are changing the pagesize. 6325 * unload argument is 0 when we are freeing the segment 6326 * and unload was already done. 6327 */ 6328 hat_unload(seg->s_as->a_hat, seg->s_base, seg->s_size, 6329 HAT_UNLOAD_UNMAP); 6330 } 6331 6332 if (amp == NULL || svd->type == MAP_SHARED) { 6333 seg->s_szc = 0; 6334 return (0); 6335 } 6336 6337 pgsz = page_get_pagesize(seg->s_szc); 6338 pages = btop(pgsz); 6339 6340 /* 6341 * XXX anon rwlock is not really needed because this is a 6342 * private segment and we are writers. 6343 */ 6344 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 6345 6346 for (; a < ea; a += pgsz, an_idx += pages) { 6347 if ((oldap = anon_get_ptr(amp->ahp, an_idx)) != NULL) { 6348 ASSERT(vpage != NULL || svd->pageprot == 0); 6349 if (vpage != NULL) { 6350 ASSERT(sameprot(seg, a, pgsz)); 6351 prot = VPP_PROT(vpage); 6352 pageflag = VPP_ISPPLOCK(vpage) ? LOCK_PAGE : 0; 6353 } 6354 if (seg->s_szc != 0) { 6355 ASSERT(vp == NULL || anon_pages(amp->ahp, 6356 an_idx, pages) == pages); 6357 if ((err = anon_map_demotepages(amp, an_idx, 6358 seg, a, prot, vpage, svd->cred)) != 0) { 6359 goto out; 6360 } 6361 } else { 6362 if (oldap->an_refcnt == 1) { 6363 continue; 6364 } 6365 if ((err = anon_getpage(&oldap, &vpprot, 6366 anon_pl, PAGESIZE, seg, a, S_READ, 6367 svd->cred))) { 6368 goto out; 6369 } 6370 if ((pp = anon_private(&ap, seg, a, prot, 6371 anon_pl[0], pageflag, svd->cred)) == NULL) { 6372 err = ENOMEM; 6373 goto out; 6374 } 6375 anon_decref(oldap); 6376 (void) anon_set_ptr(amp->ahp, an_idx, ap, 6377 ANON_SLEEP); 6378 page_unlock(pp); 6379 } 6380 } 6381 vpage = (vpage == NULL) ? NULL : vpage + pages; 6382 } 6383 6384 amp->a_szc = 0; 6385 seg->s_szc = 0; 6386 out: 6387 ANON_LOCK_EXIT(&->a_rwlock); 6388 return (err); 6389 } 6390 6391 static int 6392 segvn_claim_pages( 6393 struct seg *seg, 6394 struct vpage *svp, 6395 u_offset_t off, 6396 ulong_t anon_idx, 6397 uint_t prot) 6398 { 6399 pgcnt_t pgcnt = page_get_pagecnt(seg->s_szc); 6400 size_t ppasize = (pgcnt + 1) * sizeof (page_t *); 6401 page_t **ppa; 6402 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6403 struct anon_map *amp = svd->amp; 6404 struct vpage *evp = svp + pgcnt; 6405 caddr_t addr = ((uintptr_t)(svp - svd->vpage) << PAGESHIFT) 6406 + seg->s_base; 6407 struct anon *ap; 6408 struct vnode *vp = svd->vp; 6409 page_t *pp; 6410 pgcnt_t pg_idx, i; 6411 int err = 0; 6412 anoff_t aoff; 6413 int anon = (amp != NULL) ? 1 : 0; 6414 6415 ASSERT(svd->type == MAP_PRIVATE); 6416 ASSERT(svd->vpage != NULL); 6417 ASSERT(seg->s_szc != 0); 6418 ASSERT(IS_P2ALIGNED(pgcnt, pgcnt)); 6419 ASSERT(amp == NULL || IS_P2ALIGNED(anon_idx, pgcnt)); 6420 ASSERT(sameprot(seg, addr, pgcnt << PAGESHIFT)); 6421 6422 if (VPP_PROT(svp) == prot) 6423 return (1); 6424 if (!((VPP_PROT(svp) ^ prot) & PROT_WRITE)) 6425 return (1); 6426 6427 ppa = kmem_alloc(ppasize, KM_SLEEP); 6428 if (anon && vp != NULL) { 6429 if (anon_get_ptr(amp->ahp, anon_idx) == NULL) { 6430 anon = 0; 6431 ASSERT(!anon_pages(amp->ahp, anon_idx, pgcnt)); 6432 } 6433 ASSERT(!anon || 6434 anon_pages(amp->ahp, anon_idx, pgcnt) == pgcnt); 6435 } 6436 6437 for (*ppa = NULL, pg_idx = 0; svp < evp; svp++, anon_idx++) { 6438 if (!VPP_ISPPLOCK(svp)) 6439 continue; 6440 if (anon) { 6441 ap = anon_get_ptr(amp->ahp, anon_idx); 6442 if (ap == NULL) { 6443 panic("segvn_claim_pages: no anon slot"); 6444 } 6445 swap_xlate(ap, &vp, &aoff); 6446 off = (u_offset_t)aoff; 6447 } 6448 ASSERT(vp != NULL); 6449 if ((pp = page_lookup(vp, 6450 (u_offset_t)off, SE_SHARED)) == NULL) { 6451 panic("segvn_claim_pages: no page"); 6452 } 6453 ppa[pg_idx++] = pp; 6454 off += PAGESIZE; 6455 } 6456 6457 if (ppa[0] == NULL) { 6458 kmem_free(ppa, ppasize); 6459 return (1); 6460 } 6461 6462 ASSERT(pg_idx <= pgcnt); 6463 ppa[pg_idx] = NULL; 6464 6465 if (prot & PROT_WRITE) 6466 err = page_addclaim_pages(ppa); 6467 else 6468 err = page_subclaim_pages(ppa); 6469 6470 for (i = 0; i < pg_idx; i++) { 6471 ASSERT(ppa[i] != NULL); 6472 page_unlock(ppa[i]); 6473 } 6474 6475 kmem_free(ppa, ppasize); 6476 return (err); 6477 } 6478 6479 /* 6480 * Returns right (upper address) segment if split occurred. 6481 * If the address is equal to the beginning or end of its segment it returns 6482 * the current segment. 6483 */ 6484 static struct seg * 6485 segvn_split_seg(struct seg *seg, caddr_t addr) 6486 { 6487 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6488 struct seg *nseg; 6489 size_t nsize; 6490 struct segvn_data *nsvd; 6491 6492 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 6493 ASSERT(svd->tr_state == SEGVN_TR_OFF); 6494 6495 ASSERT(addr >= seg->s_base); 6496 ASSERT(addr <= seg->s_base + seg->s_size); 6497 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 6498 6499 if (addr == seg->s_base || addr == seg->s_base + seg->s_size) 6500 return (seg); 6501 6502 nsize = seg->s_base + seg->s_size - addr; 6503 seg->s_size = addr - seg->s_base; 6504 nseg = seg_alloc(seg->s_as, addr, nsize); 6505 ASSERT(nseg != NULL); 6506 nseg->s_ops = seg->s_ops; 6507 nsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP); 6508 nseg->s_data = (void *)nsvd; 6509 nseg->s_szc = seg->s_szc; 6510 *nsvd = *svd; 6511 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE); 6512 nsvd->seg = nseg; 6513 rw_init(&nsvd->lock, NULL, RW_DEFAULT, NULL); 6514 6515 if (nsvd->vp != NULL) { 6516 VN_HOLD(nsvd->vp); 6517 nsvd->offset = svd->offset + 6518 (uintptr_t)(nseg->s_base - seg->s_base); 6519 if (nsvd->type == MAP_SHARED) 6520 lgrp_shm_policy_init(NULL, nsvd->vp); 6521 } else { 6522 /* 6523 * The offset for an anonymous segment has no signifigance in 6524 * terms of an offset into a file. If we were to use the above 6525 * calculation instead, the structures read out of 6526 * /proc/<pid>/xmap would be more difficult to decipher since 6527 * it would be unclear whether two seemingly contiguous 6528 * prxmap_t structures represented different segments or a 6529 * single segment that had been split up into multiple prxmap_t 6530 * structures (e.g. if some part of the segment had not yet 6531 * been faulted in). 6532 */ 6533 nsvd->offset = 0; 6534 } 6535 6536 ASSERT(svd->softlockcnt == 0); 6537 ASSERT(svd->softlockcnt_sbase == 0); 6538 ASSERT(svd->softlockcnt_send == 0); 6539 crhold(svd->cred); 6540 6541 if (svd->vpage != NULL) { 6542 size_t bytes = vpgtob(seg_pages(seg)); 6543 size_t nbytes = vpgtob(seg_pages(nseg)); 6544 struct vpage *ovpage = svd->vpage; 6545 6546 svd->vpage = kmem_alloc(bytes, KM_SLEEP); 6547 bcopy(ovpage, svd->vpage, bytes); 6548 nsvd->vpage = kmem_alloc(nbytes, KM_SLEEP); 6549 bcopy(ovpage + seg_pages(seg), nsvd->vpage, nbytes); 6550 kmem_free(ovpage, bytes + nbytes); 6551 } 6552 if (svd->amp != NULL && svd->type == MAP_PRIVATE) { 6553 struct anon_map *oamp = svd->amp, *namp; 6554 struct anon_hdr *nahp; 6555 6556 ANON_LOCK_ENTER(&oamp->a_rwlock, RW_WRITER); 6557 ASSERT(oamp->refcnt == 1); 6558 nahp = anon_create(btop(seg->s_size), ANON_SLEEP); 6559 (void) anon_copy_ptr(oamp->ahp, svd->anon_index, 6560 nahp, 0, btop(seg->s_size), ANON_SLEEP); 6561 6562 namp = anonmap_alloc(nseg->s_size, 0, ANON_SLEEP); 6563 namp->a_szc = nseg->s_szc; 6564 (void) anon_copy_ptr(oamp->ahp, 6565 svd->anon_index + btop(seg->s_size), 6566 namp->ahp, 0, btop(nseg->s_size), ANON_SLEEP); 6567 anon_release(oamp->ahp, btop(oamp->size)); 6568 oamp->ahp = nahp; 6569 oamp->size = seg->s_size; 6570 svd->anon_index = 0; 6571 nsvd->amp = namp; 6572 nsvd->anon_index = 0; 6573 ANON_LOCK_EXIT(&oamp->a_rwlock); 6574 } else if (svd->amp != NULL) { 6575 pgcnt_t pgcnt = page_get_pagecnt(seg->s_szc); 6576 ASSERT(svd->amp == nsvd->amp); 6577 ASSERT(seg->s_szc <= svd->amp->a_szc); 6578 nsvd->anon_index = svd->anon_index + seg_pages(seg); 6579 ASSERT(IS_P2ALIGNED(nsvd->anon_index, pgcnt)); 6580 ANON_LOCK_ENTER(&svd->amp->a_rwlock, RW_WRITER); 6581 svd->amp->refcnt++; 6582 ANON_LOCK_EXIT(&svd->amp->a_rwlock); 6583 } 6584 6585 /* 6586 * Split the amount of swap reserved. 6587 */ 6588 if (svd->swresv) { 6589 /* 6590 * For MAP_NORESERVE, only allocate swap reserve for pages 6591 * being used. Other segments get enough to cover whole 6592 * segment. 6593 */ 6594 if (svd->flags & MAP_NORESERVE) { 6595 size_t oswresv; 6596 6597 ASSERT(svd->amp); 6598 oswresv = svd->swresv; 6599 svd->swresv = ptob(anon_pages(svd->amp->ahp, 6600 svd->anon_index, btop(seg->s_size))); 6601 nsvd->swresv = ptob(anon_pages(nsvd->amp->ahp, 6602 nsvd->anon_index, btop(nseg->s_size))); 6603 ASSERT(oswresv >= (svd->swresv + nsvd->swresv)); 6604 } else { 6605 if (svd->pageswap) { 6606 svd->swresv = segvn_count_swap_by_vpages(seg); 6607 ASSERT(nsvd->swresv >= svd->swresv); 6608 nsvd->swresv -= svd->swresv; 6609 } else { 6610 ASSERT(svd->swresv == seg->s_size + 6611 nseg->s_size); 6612 svd->swresv = seg->s_size; 6613 nsvd->swresv = nseg->s_size; 6614 } 6615 } 6616 } 6617 6618 return (nseg); 6619 } 6620 6621 /* 6622 * called on memory operations (unmap, setprot, setpagesize) for a subset 6623 * of a large page segment to either demote the memory range (SDR_RANGE) 6624 * or the ends (SDR_END) by addr/len. 6625 * 6626 * returns 0 on success. returns errno, including ENOMEM, on failure. 6627 */ 6628 static int 6629 segvn_demote_range( 6630 struct seg *seg, 6631 caddr_t addr, 6632 size_t len, 6633 int flag, 6634 uint_t szcvec) 6635 { 6636 caddr_t eaddr = addr + len; 6637 caddr_t lpgaddr, lpgeaddr; 6638 struct seg *nseg; 6639 struct seg *badseg1 = NULL; 6640 struct seg *badseg2 = NULL; 6641 size_t pgsz; 6642 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6643 int err; 6644 uint_t szc = seg->s_szc; 6645 uint_t tszcvec; 6646 6647 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 6648 ASSERT(svd->tr_state == SEGVN_TR_OFF); 6649 ASSERT(szc != 0); 6650 pgsz = page_get_pagesize(szc); 6651 ASSERT(seg->s_base != addr || seg->s_size != len); 6652 ASSERT(addr >= seg->s_base && eaddr <= seg->s_base + seg->s_size); 6653 ASSERT(svd->softlockcnt == 0); 6654 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 6655 ASSERT(szcvec == 0 || (flag == SDR_END && svd->type == MAP_SHARED)); 6656 6657 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr); 6658 ASSERT(flag == SDR_RANGE || eaddr < lpgeaddr || addr > lpgaddr); 6659 if (flag == SDR_RANGE) { 6660 /* demote entire range */ 6661 badseg1 = nseg = segvn_split_seg(seg, lpgaddr); 6662 (void) segvn_split_seg(nseg, lpgeaddr); 6663 ASSERT(badseg1->s_base == lpgaddr); 6664 ASSERT(badseg1->s_size == lpgeaddr - lpgaddr); 6665 } else if (addr != lpgaddr) { 6666 ASSERT(flag == SDR_END); 6667 badseg1 = nseg = segvn_split_seg(seg, lpgaddr); 6668 if (eaddr != lpgeaddr && eaddr > lpgaddr + pgsz && 6669 eaddr < lpgaddr + 2 * pgsz) { 6670 (void) segvn_split_seg(nseg, lpgeaddr); 6671 ASSERT(badseg1->s_base == lpgaddr); 6672 ASSERT(badseg1->s_size == 2 * pgsz); 6673 } else { 6674 nseg = segvn_split_seg(nseg, lpgaddr + pgsz); 6675 ASSERT(badseg1->s_base == lpgaddr); 6676 ASSERT(badseg1->s_size == pgsz); 6677 if (eaddr != lpgeaddr && eaddr > lpgaddr + pgsz) { 6678 ASSERT(lpgeaddr - lpgaddr > 2 * pgsz); 6679 nseg = segvn_split_seg(nseg, lpgeaddr - pgsz); 6680 badseg2 = nseg; 6681 (void) segvn_split_seg(nseg, lpgeaddr); 6682 ASSERT(badseg2->s_base == lpgeaddr - pgsz); 6683 ASSERT(badseg2->s_size == pgsz); 6684 } 6685 } 6686 } else { 6687 ASSERT(flag == SDR_END); 6688 ASSERT(eaddr < lpgeaddr); 6689 badseg1 = nseg = segvn_split_seg(seg, lpgeaddr - pgsz); 6690 (void) segvn_split_seg(nseg, lpgeaddr); 6691 ASSERT(badseg1->s_base == lpgeaddr - pgsz); 6692 ASSERT(badseg1->s_size == pgsz); 6693 } 6694 6695 ASSERT(badseg1 != NULL); 6696 ASSERT(badseg1->s_szc == szc); 6697 ASSERT(flag == SDR_RANGE || badseg1->s_size == pgsz || 6698 badseg1->s_size == 2 * pgsz); 6699 ASSERT(sameprot(badseg1, badseg1->s_base, pgsz)); 6700 ASSERT(badseg1->s_size == pgsz || 6701 sameprot(badseg1, badseg1->s_base + pgsz, pgsz)); 6702 if (err = segvn_clrszc(badseg1)) { 6703 return (err); 6704 } 6705 ASSERT(badseg1->s_szc == 0); 6706 6707 if (szc > 1 && (tszcvec = P2PHASE(szcvec, 1 << szc)) > 1) { 6708 uint_t tszc = highbit(tszcvec) - 1; 6709 caddr_t ta = MAX(addr, badseg1->s_base); 6710 caddr_t te; 6711 size_t tpgsz = page_get_pagesize(tszc); 6712 6713 ASSERT(svd->type == MAP_SHARED); 6714 ASSERT(flag == SDR_END); 6715 ASSERT(tszc < szc && tszc > 0); 6716 6717 if (eaddr > badseg1->s_base + badseg1->s_size) { 6718 te = badseg1->s_base + badseg1->s_size; 6719 } else { 6720 te = eaddr; 6721 } 6722 6723 ASSERT(ta <= te); 6724 badseg1->s_szc = tszc; 6725 if (!IS_P2ALIGNED(ta, tpgsz) || !IS_P2ALIGNED(te, tpgsz)) { 6726 if (badseg2 != NULL) { 6727 err = segvn_demote_range(badseg1, ta, te - ta, 6728 SDR_END, tszcvec); 6729 if (err != 0) { 6730 return (err); 6731 } 6732 } else { 6733 return (segvn_demote_range(badseg1, ta, 6734 te - ta, SDR_END, tszcvec)); 6735 } 6736 } 6737 } 6738 6739 if (badseg2 == NULL) 6740 return (0); 6741 ASSERT(badseg2->s_szc == szc); 6742 ASSERT(badseg2->s_size == pgsz); 6743 ASSERT(sameprot(badseg2, badseg2->s_base, badseg2->s_size)); 6744 if (err = segvn_clrszc(badseg2)) { 6745 return (err); 6746 } 6747 ASSERT(badseg2->s_szc == 0); 6748 6749 if (szc > 1 && (tszcvec = P2PHASE(szcvec, 1 << szc)) > 1) { 6750 uint_t tszc = highbit(tszcvec) - 1; 6751 size_t tpgsz = page_get_pagesize(tszc); 6752 6753 ASSERT(svd->type == MAP_SHARED); 6754 ASSERT(flag == SDR_END); 6755 ASSERT(tszc < szc && tszc > 0); 6756 ASSERT(badseg2->s_base > addr); 6757 ASSERT(eaddr > badseg2->s_base); 6758 ASSERT(eaddr < badseg2->s_base + badseg2->s_size); 6759 6760 badseg2->s_szc = tszc; 6761 if (!IS_P2ALIGNED(eaddr, tpgsz)) { 6762 return (segvn_demote_range(badseg2, badseg2->s_base, 6763 eaddr - badseg2->s_base, SDR_END, tszcvec)); 6764 } 6765 } 6766 6767 return (0); 6768 } 6769 6770 static int 6771 segvn_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot) 6772 { 6773 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6774 struct vpage *vp, *evp; 6775 6776 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 6777 6778 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 6779 /* 6780 * If segment protection can be used, simply check against them. 6781 */ 6782 if (svd->pageprot == 0) { 6783 int err; 6784 6785 err = ((svd->prot & prot) != prot) ? EACCES : 0; 6786 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6787 return (err); 6788 } 6789 6790 /* 6791 * Have to check down to the vpage level. 6792 */ 6793 evp = &svd->vpage[seg_page(seg, addr + len)]; 6794 for (vp = &svd->vpage[seg_page(seg, addr)]; vp < evp; vp++) { 6795 if ((VPP_PROT(vp) & prot) != prot) { 6796 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6797 return (EACCES); 6798 } 6799 } 6800 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6801 return (0); 6802 } 6803 6804 static int 6805 segvn_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv) 6806 { 6807 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6808 size_t pgno = seg_page(seg, addr + len) - seg_page(seg, addr) + 1; 6809 6810 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 6811 6812 if (pgno != 0) { 6813 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 6814 if (svd->pageprot == 0) { 6815 do { 6816 protv[--pgno] = svd->prot; 6817 } while (pgno != 0); 6818 } else { 6819 size_t pgoff = seg_page(seg, addr); 6820 6821 do { 6822 pgno--; 6823 protv[pgno] = VPP_PROT(&svd->vpage[pgno+pgoff]); 6824 } while (pgno != 0); 6825 } 6826 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6827 } 6828 return (0); 6829 } 6830 6831 static u_offset_t 6832 segvn_getoffset(struct seg *seg, caddr_t addr) 6833 { 6834 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6835 6836 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 6837 6838 return (svd->offset + (uintptr_t)(addr - seg->s_base)); 6839 } 6840 6841 /*ARGSUSED*/ 6842 static int 6843 segvn_gettype(struct seg *seg, caddr_t addr) 6844 { 6845 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6846 6847 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 6848 6849 return (svd->type | (svd->flags & (MAP_NORESERVE | MAP_TEXT | 6850 MAP_INITDATA))); 6851 } 6852 6853 /*ARGSUSED*/ 6854 static int 6855 segvn_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp) 6856 { 6857 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6858 6859 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 6860 6861 *vpp = svd->vp; 6862 return (0); 6863 } 6864 6865 /* 6866 * Check to see if it makes sense to do kluster/read ahead to 6867 * addr + delta relative to the mapping at addr. We assume here 6868 * that delta is a signed PAGESIZE'd multiple (which can be negative). 6869 * 6870 * For segvn, we currently "approve" of the action if we are 6871 * still in the segment and it maps from the same vp/off, 6872 * or if the advice stored in segvn_data or vpages allows it. 6873 * Currently, klustering is not allowed only if MADV_RANDOM is set. 6874 */ 6875 static int 6876 segvn_kluster(struct seg *seg, caddr_t addr, ssize_t delta) 6877 { 6878 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6879 struct anon *oap, *ap; 6880 ssize_t pd; 6881 size_t page; 6882 struct vnode *vp1, *vp2; 6883 u_offset_t off1, off2; 6884 struct anon_map *amp; 6885 6886 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 6887 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) || 6888 SEGVN_LOCK_HELD(seg->s_as, &svd->lock)); 6889 6890 if (addr + delta < seg->s_base || 6891 addr + delta >= (seg->s_base + seg->s_size)) 6892 return (-1); /* exceeded segment bounds */ 6893 6894 pd = delta / (ssize_t)PAGESIZE; /* divide to preserve sign bit */ 6895 page = seg_page(seg, addr); 6896 6897 /* 6898 * Check to see if either of the pages addr or addr + delta 6899 * have advice set that prevents klustering (if MADV_RANDOM advice 6900 * is set for entire segment, or MADV_SEQUENTIAL is set and delta 6901 * is negative). 6902 */ 6903 if (svd->advice == MADV_RANDOM || 6904 svd->advice == MADV_SEQUENTIAL && delta < 0) 6905 return (-1); 6906 else if (svd->pageadvice && svd->vpage) { 6907 struct vpage *bvpp, *evpp; 6908 6909 bvpp = &svd->vpage[page]; 6910 evpp = &svd->vpage[page + pd]; 6911 if (VPP_ADVICE(bvpp) == MADV_RANDOM || 6912 VPP_ADVICE(evpp) == MADV_SEQUENTIAL && delta < 0) 6913 return (-1); 6914 if (VPP_ADVICE(bvpp) != VPP_ADVICE(evpp) && 6915 VPP_ADVICE(evpp) == MADV_RANDOM) 6916 return (-1); 6917 } 6918 6919 if (svd->type == MAP_SHARED) 6920 return (0); /* shared mapping - all ok */ 6921 6922 if ((amp = svd->amp) == NULL) 6923 return (0); /* off original vnode */ 6924 6925 page += svd->anon_index; 6926 6927 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 6928 6929 oap = anon_get_ptr(amp->ahp, page); 6930 ap = anon_get_ptr(amp->ahp, page + pd); 6931 6932 ANON_LOCK_EXIT(&->a_rwlock); 6933 6934 if ((oap == NULL && ap != NULL) || (oap != NULL && ap == NULL)) { 6935 return (-1); /* one with and one without an anon */ 6936 } 6937 6938 if (oap == NULL) { /* implies that ap == NULL */ 6939 return (0); /* off original vnode */ 6940 } 6941 6942 /* 6943 * Now we know we have two anon pointers - check to 6944 * see if they happen to be properly allocated. 6945 */ 6946 6947 /* 6948 * XXX We cheat here and don't lock the anon slots. We can't because 6949 * we may have been called from the anon layer which might already 6950 * have locked them. We are holding a refcnt on the slots so they 6951 * can't disappear. The worst that will happen is we'll get the wrong 6952 * names (vp, off) for the slots and make a poor klustering decision. 6953 */ 6954 swap_xlate(ap, &vp1, &off1); 6955 swap_xlate(oap, &vp2, &off2); 6956 6957 6958 if (!VOP_CMP(vp1, vp2, NULL) || off1 - off2 != delta) 6959 return (-1); 6960 return (0); 6961 } 6962 6963 /* 6964 * Swap the pages of seg out to secondary storage, returning the 6965 * number of bytes of storage freed. 6966 * 6967 * The basic idea is first to unload all translations and then to call 6968 * VOP_PUTPAGE() for all newly-unmapped pages, to push them out to the 6969 * swap device. Pages to which other segments have mappings will remain 6970 * mapped and won't be swapped. Our caller (as_swapout) has already 6971 * performed the unloading step. 6972 * 6973 * The value returned is intended to correlate well with the process's 6974 * memory requirements. However, there are some caveats: 6975 * 1) When given a shared segment as argument, this routine will 6976 * only succeed in swapping out pages for the last sharer of the 6977 * segment. (Previous callers will only have decremented mapping 6978 * reference counts.) 6979 * 2) We assume that the hat layer maintains a large enough translation 6980 * cache to capture process reference patterns. 6981 */ 6982 static size_t 6983 segvn_swapout(struct seg *seg) 6984 { 6985 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6986 struct anon_map *amp; 6987 pgcnt_t pgcnt = 0; 6988 pgcnt_t npages; 6989 pgcnt_t page; 6990 ulong_t anon_index; 6991 6992 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 6993 6994 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 6995 /* 6996 * Find pages unmapped by our caller and force them 6997 * out to the virtual swap device. 6998 */ 6999 if ((amp = svd->amp) != NULL) 7000 anon_index = svd->anon_index; 7001 npages = seg->s_size >> PAGESHIFT; 7002 for (page = 0; page < npages; page++) { 7003 page_t *pp; 7004 struct anon *ap; 7005 struct vnode *vp; 7006 u_offset_t off; 7007 anon_sync_obj_t cookie; 7008 7009 /* 7010 * Obtain <vp, off> pair for the page, then look it up. 7011 * 7012 * Note that this code is willing to consider regular 7013 * pages as well as anon pages. Is this appropriate here? 7014 */ 7015 ap = NULL; 7016 if (amp != NULL) { 7017 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 7018 if (anon_array_try_enter(amp, anon_index + page, 7019 &cookie)) { 7020 ANON_LOCK_EXIT(&->a_rwlock); 7021 continue; 7022 } 7023 ap = anon_get_ptr(amp->ahp, anon_index + page); 7024 if (ap != NULL) { 7025 swap_xlate(ap, &vp, &off); 7026 } else { 7027 vp = svd->vp; 7028 off = svd->offset + ptob(page); 7029 } 7030 anon_array_exit(&cookie); 7031 ANON_LOCK_EXIT(&->a_rwlock); 7032 } else { 7033 vp = svd->vp; 7034 off = svd->offset + ptob(page); 7035 } 7036 if (vp == NULL) { /* untouched zfod page */ 7037 ASSERT(ap == NULL); 7038 continue; 7039 } 7040 7041 pp = page_lookup_nowait(vp, off, SE_SHARED); 7042 if (pp == NULL) 7043 continue; 7044 7045 7046 /* 7047 * Examine the page to see whether it can be tossed out, 7048 * keeping track of how many we've found. 7049 */ 7050 if (!page_tryupgrade(pp)) { 7051 /* 7052 * If the page has an i/o lock and no mappings, 7053 * it's very likely that the page is being 7054 * written out as a result of klustering. 7055 * Assume this is so and take credit for it here. 7056 */ 7057 if (!page_io_trylock(pp)) { 7058 if (!hat_page_is_mapped(pp)) 7059 pgcnt++; 7060 } else { 7061 page_io_unlock(pp); 7062 } 7063 page_unlock(pp); 7064 continue; 7065 } 7066 ASSERT(!page_iolock_assert(pp)); 7067 7068 7069 /* 7070 * Skip if page is locked or has mappings. 7071 * We don't need the page_struct_lock to look at lckcnt 7072 * and cowcnt because the page is exclusive locked. 7073 */ 7074 if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0 || 7075 hat_page_is_mapped(pp)) { 7076 page_unlock(pp); 7077 continue; 7078 } 7079 7080 /* 7081 * dispose skips large pages so try to demote first. 7082 */ 7083 if (pp->p_szc != 0 && !page_try_demote_pages(pp)) { 7084 page_unlock(pp); 7085 /* 7086 * XXX should skip the remaining page_t's of this 7087 * large page. 7088 */ 7089 continue; 7090 } 7091 7092 ASSERT(pp->p_szc == 0); 7093 7094 /* 7095 * No longer mapped -- we can toss it out. How 7096 * we do so depends on whether or not it's dirty. 7097 */ 7098 if (hat_ismod(pp) && pp->p_vnode) { 7099 /* 7100 * We must clean the page before it can be 7101 * freed. Setting B_FREE will cause pvn_done 7102 * to free the page when the i/o completes. 7103 * XXX: This also causes it to be accounted 7104 * as a pageout instead of a swap: need 7105 * B_SWAPOUT bit to use instead of B_FREE. 7106 * 7107 * Hold the vnode before releasing the page lock 7108 * to prevent it from being freed and re-used by 7109 * some other thread. 7110 */ 7111 VN_HOLD(vp); 7112 page_unlock(pp); 7113 7114 /* 7115 * Queue all i/o requests for the pageout thread 7116 * to avoid saturating the pageout devices. 7117 */ 7118 if (!queue_io_request(vp, off)) 7119 VN_RELE(vp); 7120 } else { 7121 /* 7122 * The page was clean, free it. 7123 * 7124 * XXX: Can we ever encounter modified pages 7125 * with no associated vnode here? 7126 */ 7127 ASSERT(pp->p_vnode != NULL); 7128 /*LINTED: constant in conditional context*/ 7129 VN_DISPOSE(pp, B_FREE, 0, kcred); 7130 } 7131 7132 /* 7133 * Credit now even if i/o is in progress. 7134 */ 7135 pgcnt++; 7136 } 7137 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7138 7139 /* 7140 * Wakeup pageout to initiate i/o on all queued requests. 7141 */ 7142 cv_signal_pageout(); 7143 return (ptob(pgcnt)); 7144 } 7145 7146 /* 7147 * Synchronize primary storage cache with real object in virtual memory. 7148 * 7149 * XXX - Anonymous pages should not be sync'ed out at all. 7150 */ 7151 static int 7152 segvn_sync(struct seg *seg, caddr_t addr, size_t len, int attr, uint_t flags) 7153 { 7154 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 7155 struct vpage *vpp; 7156 page_t *pp; 7157 u_offset_t offset; 7158 struct vnode *vp; 7159 u_offset_t off; 7160 caddr_t eaddr; 7161 int bflags; 7162 int err = 0; 7163 int segtype; 7164 int pageprot; 7165 int prot; 7166 ulong_t anon_index; 7167 struct anon_map *amp; 7168 struct anon *ap; 7169 anon_sync_obj_t cookie; 7170 7171 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 7172 7173 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 7174 7175 if (svd->softlockcnt > 0) { 7176 /* 7177 * If this is shared segment non 0 softlockcnt 7178 * means locked pages are still in use. 7179 */ 7180 if (svd->type == MAP_SHARED) { 7181 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7182 return (EAGAIN); 7183 } 7184 7185 /* 7186 * flush all pages from seg cache 7187 * otherwise we may deadlock in swap_putpage 7188 * for B_INVAL page (4175402). 7189 * 7190 * Even if we grab segvn WRITER's lock 7191 * here, there might be another thread which could've 7192 * successfully performed lookup/insert just before 7193 * we acquired the lock here. So, grabbing either 7194 * lock here is of not much use. Until we devise 7195 * a strategy at upper layers to solve the 7196 * synchronization issues completely, we expect 7197 * applications to handle this appropriately. 7198 */ 7199 segvn_purge(seg); 7200 if (svd->softlockcnt > 0) { 7201 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7202 return (EAGAIN); 7203 } 7204 } else if (svd->type == MAP_SHARED && svd->amp != NULL && 7205 svd->amp->a_softlockcnt > 0) { 7206 /* 7207 * Try to purge this amp's entries from pcache. It will 7208 * succeed only if other segments that share the amp have no 7209 * outstanding softlock's. 7210 */ 7211 segvn_purge(seg); 7212 if (svd->amp->a_softlockcnt > 0 || svd->softlockcnt > 0) { 7213 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7214 return (EAGAIN); 7215 } 7216 } 7217 7218 vpp = svd->vpage; 7219 offset = svd->offset + (uintptr_t)(addr - seg->s_base); 7220 bflags = ((flags & MS_ASYNC) ? B_ASYNC : 0) | 7221 ((flags & MS_INVALIDATE) ? B_INVAL : 0); 7222 7223 if (attr) { 7224 pageprot = attr & ~(SHARED|PRIVATE); 7225 segtype = (attr & SHARED) ? MAP_SHARED : MAP_PRIVATE; 7226 7227 /* 7228 * We are done if the segment types don't match 7229 * or if we have segment level protections and 7230 * they don't match. 7231 */ 7232 if (svd->type != segtype) { 7233 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7234 return (0); 7235 } 7236 if (vpp == NULL) { 7237 if (svd->prot != pageprot) { 7238 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7239 return (0); 7240 } 7241 prot = svd->prot; 7242 } else 7243 vpp = &svd->vpage[seg_page(seg, addr)]; 7244 7245 } else if (svd->vp && svd->amp == NULL && 7246 (flags & MS_INVALIDATE) == 0) { 7247 7248 /* 7249 * No attributes, no anonymous pages and MS_INVALIDATE flag 7250 * is not on, just use one big request. 7251 */ 7252 err = VOP_PUTPAGE(svd->vp, (offset_t)offset, len, 7253 bflags, svd->cred, NULL); 7254 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7255 return (err); 7256 } 7257 7258 if ((amp = svd->amp) != NULL) 7259 anon_index = svd->anon_index + seg_page(seg, addr); 7260 7261 for (eaddr = addr + len; addr < eaddr; addr += PAGESIZE) { 7262 ap = NULL; 7263 if (amp != NULL) { 7264 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 7265 anon_array_enter(amp, anon_index, &cookie); 7266 ap = anon_get_ptr(amp->ahp, anon_index++); 7267 if (ap != NULL) { 7268 swap_xlate(ap, &vp, &off); 7269 } else { 7270 vp = svd->vp; 7271 off = offset; 7272 } 7273 anon_array_exit(&cookie); 7274 ANON_LOCK_EXIT(&->a_rwlock); 7275 } else { 7276 vp = svd->vp; 7277 off = offset; 7278 } 7279 offset += PAGESIZE; 7280 7281 if (vp == NULL) /* untouched zfod page */ 7282 continue; 7283 7284 if (attr) { 7285 if (vpp) { 7286 prot = VPP_PROT(vpp); 7287 vpp++; 7288 } 7289 if (prot != pageprot) { 7290 continue; 7291 } 7292 } 7293 7294 /* 7295 * See if any of these pages are locked -- if so, then we 7296 * will have to truncate an invalidate request at the first 7297 * locked one. We don't need the page_struct_lock to test 7298 * as this is only advisory; even if we acquire it someone 7299 * might race in and lock the page after we unlock and before 7300 * we do the PUTPAGE, then PUTPAGE simply does nothing. 7301 */ 7302 if (flags & MS_INVALIDATE) { 7303 if ((pp = page_lookup(vp, off, SE_SHARED)) != NULL) { 7304 if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0) { 7305 page_unlock(pp); 7306 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7307 return (EBUSY); 7308 } 7309 if (ap != NULL && pp->p_szc != 0 && 7310 page_tryupgrade(pp)) { 7311 if (pp->p_lckcnt == 0 && 7312 pp->p_cowcnt == 0) { 7313 /* 7314 * swapfs VN_DISPOSE() won't 7315 * invalidate large pages. 7316 * Attempt to demote. 7317 * XXX can't help it if it 7318 * fails. But for swapfs 7319 * pages it is no big deal. 7320 */ 7321 (void) page_try_demote_pages( 7322 pp); 7323 } 7324 } 7325 page_unlock(pp); 7326 } 7327 } else if (svd->type == MAP_SHARED && amp != NULL) { 7328 /* 7329 * Avoid writing out to disk ISM's large pages 7330 * because segspt_free_pages() relies on NULL an_pvp 7331 * of anon slots of such pages. 7332 */ 7333 7334 ASSERT(svd->vp == NULL); 7335 /* 7336 * swapfs uses page_lookup_nowait if not freeing or 7337 * invalidating and skips a page if 7338 * page_lookup_nowait returns NULL. 7339 */ 7340 pp = page_lookup_nowait(vp, off, SE_SHARED); 7341 if (pp == NULL) { 7342 continue; 7343 } 7344 if (pp->p_szc != 0) { 7345 page_unlock(pp); 7346 continue; 7347 } 7348 7349 /* 7350 * Note ISM pages are created large so (vp, off)'s 7351 * page cannot suddenly become large after we unlock 7352 * pp. 7353 */ 7354 page_unlock(pp); 7355 } 7356 /* 7357 * XXX - Should ultimately try to kluster 7358 * calls to VOP_PUTPAGE() for performance. 7359 */ 7360 VN_HOLD(vp); 7361 err = VOP_PUTPAGE(vp, (offset_t)off, PAGESIZE, 7362 (bflags | (IS_SWAPFSVP(vp) ? B_PAGE_NOWAIT : 0)), 7363 svd->cred, NULL); 7364 7365 VN_RELE(vp); 7366 if (err) 7367 break; 7368 } 7369 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7370 return (err); 7371 } 7372 7373 /* 7374 * Determine if we have data corresponding to pages in the 7375 * primary storage virtual memory cache (i.e., "in core"). 7376 */ 7377 static size_t 7378 segvn_incore(struct seg *seg, caddr_t addr, size_t len, char *vec) 7379 { 7380 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 7381 struct vnode *vp, *avp; 7382 u_offset_t offset, aoffset; 7383 size_t p, ep; 7384 int ret; 7385 struct vpage *vpp; 7386 page_t *pp; 7387 uint_t start; 7388 struct anon_map *amp; /* XXX - for locknest */ 7389 struct anon *ap; 7390 uint_t attr; 7391 anon_sync_obj_t cookie; 7392 7393 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 7394 7395 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 7396 if (svd->amp == NULL && svd->vp == NULL) { 7397 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7398 bzero(vec, btopr(len)); 7399 return (len); /* no anonymous pages created yet */ 7400 } 7401 7402 p = seg_page(seg, addr); 7403 ep = seg_page(seg, addr + len); 7404 start = svd->vp ? SEG_PAGE_VNODEBACKED : 0; 7405 7406 amp = svd->amp; 7407 for (; p < ep; p++, addr += PAGESIZE) { 7408 vpp = (svd->vpage) ? &svd->vpage[p]: NULL; 7409 ret = start; 7410 ap = NULL; 7411 avp = NULL; 7412 /* Grab the vnode/offset for the anon slot */ 7413 if (amp != NULL) { 7414 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 7415 anon_array_enter(amp, svd->anon_index + p, &cookie); 7416 ap = anon_get_ptr(amp->ahp, svd->anon_index + p); 7417 if (ap != NULL) { 7418 swap_xlate(ap, &avp, &aoffset); 7419 } 7420 anon_array_exit(&cookie); 7421 ANON_LOCK_EXIT(&->a_rwlock); 7422 } 7423 if ((avp != NULL) && page_exists(avp, aoffset)) { 7424 /* A page exists for the anon slot */ 7425 ret |= SEG_PAGE_INCORE; 7426 7427 /* 7428 * If page is mapped and writable 7429 */ 7430 attr = (uint_t)0; 7431 if ((hat_getattr(seg->s_as->a_hat, addr, 7432 &attr) != -1) && (attr & PROT_WRITE)) { 7433 ret |= SEG_PAGE_ANON; 7434 } 7435 /* 7436 * Don't get page_struct lock for lckcnt and cowcnt, 7437 * since this is purely advisory. 7438 */ 7439 if ((pp = page_lookup_nowait(avp, aoffset, 7440 SE_SHARED)) != NULL) { 7441 if (pp->p_lckcnt) 7442 ret |= SEG_PAGE_SOFTLOCK; 7443 if (pp->p_cowcnt) 7444 ret |= SEG_PAGE_HASCOW; 7445 page_unlock(pp); 7446 } 7447 } 7448 7449 /* Gather vnode statistics */ 7450 vp = svd->vp; 7451 offset = svd->offset + (uintptr_t)(addr - seg->s_base); 7452 7453 if (vp != NULL) { 7454 /* 7455 * Try to obtain a "shared" lock on the page 7456 * without blocking. If this fails, determine 7457 * if the page is in memory. 7458 */ 7459 pp = page_lookup_nowait(vp, offset, SE_SHARED); 7460 if ((pp == NULL) && (page_exists(vp, offset))) { 7461 /* Page is incore, and is named */ 7462 ret |= (SEG_PAGE_INCORE | SEG_PAGE_VNODE); 7463 } 7464 /* 7465 * Don't get page_struct lock for lckcnt and cowcnt, 7466 * since this is purely advisory. 7467 */ 7468 if (pp != NULL) { 7469 ret |= (SEG_PAGE_INCORE | SEG_PAGE_VNODE); 7470 if (pp->p_lckcnt) 7471 ret |= SEG_PAGE_SOFTLOCK; 7472 if (pp->p_cowcnt) 7473 ret |= SEG_PAGE_HASCOW; 7474 page_unlock(pp); 7475 } 7476 } 7477 7478 /* Gather virtual page information */ 7479 if (vpp) { 7480 if (VPP_ISPPLOCK(vpp)) 7481 ret |= SEG_PAGE_LOCKED; 7482 vpp++; 7483 } 7484 7485 *vec++ = (char)ret; 7486 } 7487 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7488 return (len); 7489 } 7490 7491 /* 7492 * Statement for p_cowcnts/p_lckcnts. 7493 * 7494 * p_cowcnt is updated while mlock/munlocking MAP_PRIVATE and PROT_WRITE region 7495 * irrespective of the following factors or anything else: 7496 * 7497 * (1) anon slots are populated or not 7498 * (2) cow is broken or not 7499 * (3) refcnt on ap is 1 or greater than 1 7500 * 7501 * If it's not MAP_PRIVATE and PROT_WRITE, p_lckcnt is updated during mlock 7502 * and munlock. 7503 * 7504 * 7505 * Handling p_cowcnts/p_lckcnts during copy-on-write fault: 7506 * 7507 * if vpage has PROT_WRITE 7508 * transfer cowcnt on the oldpage -> cowcnt on the newpage 7509 * else 7510 * transfer lckcnt on the oldpage -> lckcnt on the newpage 7511 * 7512 * During copy-on-write, decrement p_cowcnt on the oldpage and increment 7513 * p_cowcnt on the newpage *if* the corresponding vpage has PROT_WRITE. 7514 * 7515 * We may also break COW if softlocking on read access in the physio case. 7516 * In this case, vpage may not have PROT_WRITE. So, we need to decrement 7517 * p_lckcnt on the oldpage and increment p_lckcnt on the newpage *if* the 7518 * vpage doesn't have PROT_WRITE. 7519 * 7520 * 7521 * Handling p_cowcnts/p_lckcnts during mprotect on mlocked region: 7522 * 7523 * If a MAP_PRIVATE region loses PROT_WRITE, we decrement p_cowcnt and 7524 * increment p_lckcnt by calling page_subclaim() which takes care of 7525 * availrmem accounting and p_lckcnt overflow. 7526 * 7527 * If a MAP_PRIVATE region gains PROT_WRITE, we decrement p_lckcnt and 7528 * increment p_cowcnt by calling page_addclaim() which takes care of 7529 * availrmem availability and p_cowcnt overflow. 7530 */ 7531 7532 /* 7533 * Lock down (or unlock) pages mapped by this segment. 7534 * 7535 * XXX only creates PAGESIZE pages if anon slots are not initialized. 7536 * At fault time they will be relocated into larger pages. 7537 */ 7538 static int 7539 segvn_lockop(struct seg *seg, caddr_t addr, size_t len, 7540 int attr, int op, ulong_t *lockmap, size_t pos) 7541 { 7542 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 7543 struct vpage *vpp; 7544 struct vpage *evp; 7545 page_t *pp; 7546 u_offset_t offset; 7547 u_offset_t off; 7548 int segtype; 7549 int pageprot; 7550 int claim; 7551 struct vnode *vp; 7552 ulong_t anon_index; 7553 struct anon_map *amp; 7554 struct anon *ap; 7555 struct vattr va; 7556 anon_sync_obj_t cookie; 7557 struct kshmid *sp = NULL; 7558 struct proc *p = curproc; 7559 kproject_t *proj = NULL; 7560 int chargeproc = 1; 7561 size_t locked_bytes = 0; 7562 size_t unlocked_bytes = 0; 7563 int err = 0; 7564 7565 /* 7566 * Hold write lock on address space because may split or concatenate 7567 * segments 7568 */ 7569 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 7570 7571 /* 7572 * If this is a shm, use shm's project and zone, else use 7573 * project and zone of calling process 7574 */ 7575 7576 /* Determine if this segment backs a sysV shm */ 7577 if (svd->amp != NULL && svd->amp->a_sp != NULL) { 7578 ASSERT(svd->type == MAP_SHARED); 7579 ASSERT(svd->tr_state == SEGVN_TR_OFF); 7580 sp = svd->amp->a_sp; 7581 proj = sp->shm_perm.ipc_proj; 7582 chargeproc = 0; 7583 } 7584 7585 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 7586 if (attr) { 7587 pageprot = attr & ~(SHARED|PRIVATE); 7588 segtype = attr & SHARED ? MAP_SHARED : MAP_PRIVATE; 7589 7590 /* 7591 * We are done if the segment types don't match 7592 * or if we have segment level protections and 7593 * they don't match. 7594 */ 7595 if (svd->type != segtype) { 7596 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7597 return (0); 7598 } 7599 if (svd->pageprot == 0 && svd->prot != pageprot) { 7600 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7601 return (0); 7602 } 7603 } 7604 7605 if (op == MC_LOCK) { 7606 if (svd->tr_state == SEGVN_TR_INIT) { 7607 svd->tr_state = SEGVN_TR_OFF; 7608 } else if (svd->tr_state == SEGVN_TR_ON) { 7609 ASSERT(svd->amp != NULL); 7610 segvn_textunrepl(seg, 0); 7611 ASSERT(svd->amp == NULL && 7612 svd->tr_state == SEGVN_TR_OFF); 7613 } 7614 } 7615 7616 /* 7617 * If we're locking, then we must create a vpage structure if 7618 * none exists. If we're unlocking, then check to see if there 7619 * is a vpage -- if not, then we could not have locked anything. 7620 */ 7621 7622 if ((vpp = svd->vpage) == NULL) { 7623 if (op == MC_LOCK) 7624 segvn_vpage(seg); 7625 else { 7626 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7627 return (0); 7628 } 7629 } 7630 7631 /* 7632 * The anonymous data vector (i.e., previously 7633 * unreferenced mapping to swap space) can be allocated 7634 * by lazily testing for its existence. 7635 */ 7636 if (op == MC_LOCK && svd->amp == NULL && svd->vp == NULL) { 7637 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 7638 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP); 7639 svd->amp->a_szc = seg->s_szc; 7640 } 7641 7642 if ((amp = svd->amp) != NULL) { 7643 anon_index = svd->anon_index + seg_page(seg, addr); 7644 } 7645 7646 offset = svd->offset + (uintptr_t)(addr - seg->s_base); 7647 evp = &svd->vpage[seg_page(seg, addr + len)]; 7648 7649 if (sp != NULL) 7650 mutex_enter(&sp->shm_mlock); 7651 7652 /* determine number of unlocked bytes in range for lock operation */ 7653 if (op == MC_LOCK) { 7654 7655 if (sp == NULL) { 7656 for (vpp = &svd->vpage[seg_page(seg, addr)]; vpp < evp; 7657 vpp++) { 7658 if (!VPP_ISPPLOCK(vpp)) 7659 unlocked_bytes += PAGESIZE; 7660 } 7661 } else { 7662 ulong_t i_idx, i_edx; 7663 anon_sync_obj_t i_cookie; 7664 struct anon *i_ap; 7665 struct vnode *i_vp; 7666 u_offset_t i_off; 7667 7668 /* Only count sysV pages once for locked memory */ 7669 i_edx = svd->anon_index + seg_page(seg, addr + len); 7670 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 7671 for (i_idx = anon_index; i_idx < i_edx; i_idx++) { 7672 anon_array_enter(amp, i_idx, &i_cookie); 7673 i_ap = anon_get_ptr(amp->ahp, i_idx); 7674 if (i_ap == NULL) { 7675 unlocked_bytes += PAGESIZE; 7676 anon_array_exit(&i_cookie); 7677 continue; 7678 } 7679 swap_xlate(i_ap, &i_vp, &i_off); 7680 anon_array_exit(&i_cookie); 7681 pp = page_lookup(i_vp, i_off, SE_SHARED); 7682 if (pp == NULL) { 7683 unlocked_bytes += PAGESIZE; 7684 continue; 7685 } else if (pp->p_lckcnt == 0) 7686 unlocked_bytes += PAGESIZE; 7687 page_unlock(pp); 7688 } 7689 ANON_LOCK_EXIT(&->a_rwlock); 7690 } 7691 7692 mutex_enter(&p->p_lock); 7693 err = rctl_incr_locked_mem(p, proj, unlocked_bytes, 7694 chargeproc); 7695 mutex_exit(&p->p_lock); 7696 7697 if (err) { 7698 if (sp != NULL) 7699 mutex_exit(&sp->shm_mlock); 7700 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7701 return (err); 7702 } 7703 } 7704 /* 7705 * Loop over all pages in the range. Process if we're locking and 7706 * page has not already been locked in this mapping; or if we're 7707 * unlocking and the page has been locked. 7708 */ 7709 for (vpp = &svd->vpage[seg_page(seg, addr)]; vpp < evp; 7710 vpp++, pos++, addr += PAGESIZE, offset += PAGESIZE, anon_index++) { 7711 if ((attr == 0 || VPP_PROT(vpp) == pageprot) && 7712 ((op == MC_LOCK && !VPP_ISPPLOCK(vpp)) || 7713 (op == MC_UNLOCK && VPP_ISPPLOCK(vpp)))) { 7714 7715 if (amp != NULL) 7716 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 7717 /* 7718 * If this isn't a MAP_NORESERVE segment and 7719 * we're locking, allocate anon slots if they 7720 * don't exist. The page is brought in later on. 7721 */ 7722 if (op == MC_LOCK && svd->vp == NULL && 7723 ((svd->flags & MAP_NORESERVE) == 0) && 7724 amp != NULL && 7725 ((ap = anon_get_ptr(amp->ahp, anon_index)) 7726 == NULL)) { 7727 anon_array_enter(amp, anon_index, &cookie); 7728 7729 if ((ap = anon_get_ptr(amp->ahp, 7730 anon_index)) == NULL) { 7731 pp = anon_zero(seg, addr, &ap, 7732 svd->cred); 7733 if (pp == NULL) { 7734 anon_array_exit(&cookie); 7735 ANON_LOCK_EXIT(&->a_rwlock); 7736 err = ENOMEM; 7737 goto out; 7738 } 7739 ASSERT(anon_get_ptr(amp->ahp, 7740 anon_index) == NULL); 7741 (void) anon_set_ptr(amp->ahp, 7742 anon_index, ap, ANON_SLEEP); 7743 page_unlock(pp); 7744 } 7745 anon_array_exit(&cookie); 7746 } 7747 7748 /* 7749 * Get name for page, accounting for 7750 * existence of private copy. 7751 */ 7752 ap = NULL; 7753 if (amp != NULL) { 7754 anon_array_enter(amp, anon_index, &cookie); 7755 ap = anon_get_ptr(amp->ahp, anon_index); 7756 if (ap != NULL) { 7757 swap_xlate(ap, &vp, &off); 7758 } else { 7759 if (svd->vp == NULL && 7760 (svd->flags & MAP_NORESERVE)) { 7761 anon_array_exit(&cookie); 7762 ANON_LOCK_EXIT(&->a_rwlock); 7763 continue; 7764 } 7765 vp = svd->vp; 7766 off = offset; 7767 } 7768 if (op != MC_LOCK || ap == NULL) { 7769 anon_array_exit(&cookie); 7770 ANON_LOCK_EXIT(&->a_rwlock); 7771 } 7772 } else { 7773 vp = svd->vp; 7774 off = offset; 7775 } 7776 7777 /* 7778 * Get page frame. It's ok if the page is 7779 * not available when we're unlocking, as this 7780 * may simply mean that a page we locked got 7781 * truncated out of existence after we locked it. 7782 * 7783 * Invoke VOP_GETPAGE() to obtain the page struct 7784 * since we may need to read it from disk if its 7785 * been paged out. 7786 */ 7787 if (op != MC_LOCK) 7788 pp = page_lookup(vp, off, SE_SHARED); 7789 else { 7790 page_t *pl[1 + 1]; 7791 int error; 7792 7793 ASSERT(vp != NULL); 7794 7795 error = VOP_GETPAGE(vp, (offset_t)off, PAGESIZE, 7796 (uint_t *)NULL, pl, PAGESIZE, seg, addr, 7797 S_OTHER, svd->cred, NULL); 7798 7799 if (error && ap != NULL) { 7800 anon_array_exit(&cookie); 7801 ANON_LOCK_EXIT(&->a_rwlock); 7802 } 7803 7804 /* 7805 * If the error is EDEADLK then we must bounce 7806 * up and drop all vm subsystem locks and then 7807 * retry the operation later 7808 * This behavior is a temporary measure because 7809 * ufs/sds logging is badly designed and will 7810 * deadlock if we don't allow this bounce to 7811 * happen. The real solution is to re-design 7812 * the logging code to work properly. See bug 7813 * 4125102 for details of the problem. 7814 */ 7815 if (error == EDEADLK) { 7816 err = error; 7817 goto out; 7818 } 7819 /* 7820 * Quit if we fail to fault in the page. Treat 7821 * the failure as an error, unless the addr 7822 * is mapped beyond the end of a file. 7823 */ 7824 if (error && svd->vp) { 7825 va.va_mask = AT_SIZE; 7826 if (VOP_GETATTR(svd->vp, &va, 0, 7827 svd->cred, NULL) != 0) { 7828 err = EIO; 7829 goto out; 7830 } 7831 if (btopr(va.va_size) >= 7832 btopr(off + 1)) { 7833 err = EIO; 7834 goto out; 7835 } 7836 goto out; 7837 7838 } else if (error) { 7839 err = EIO; 7840 goto out; 7841 } 7842 pp = pl[0]; 7843 ASSERT(pp != NULL); 7844 } 7845 7846 /* 7847 * See Statement at the beginning of this routine. 7848 * 7849 * claim is always set if MAP_PRIVATE and PROT_WRITE 7850 * irrespective of following factors: 7851 * 7852 * (1) anon slots are populated or not 7853 * (2) cow is broken or not 7854 * (3) refcnt on ap is 1 or greater than 1 7855 * 7856 * See 4140683 for details 7857 */ 7858 claim = ((VPP_PROT(vpp) & PROT_WRITE) && 7859 (svd->type == MAP_PRIVATE)); 7860 7861 /* 7862 * Perform page-level operation appropriate to 7863 * operation. If locking, undo the SOFTLOCK 7864 * performed to bring the page into memory 7865 * after setting the lock. If unlocking, 7866 * and no page was found, account for the claim 7867 * separately. 7868 */ 7869 if (op == MC_LOCK) { 7870 int ret = 1; /* Assume success */ 7871 7872 ASSERT(!VPP_ISPPLOCK(vpp)); 7873 7874 ret = page_pp_lock(pp, claim, 0); 7875 if (ap != NULL) { 7876 if (ap->an_pvp != NULL) { 7877 anon_swap_free(ap, pp); 7878 } 7879 anon_array_exit(&cookie); 7880 ANON_LOCK_EXIT(&->a_rwlock); 7881 } 7882 if (ret == 0) { 7883 /* locking page failed */ 7884 page_unlock(pp); 7885 err = EAGAIN; 7886 goto out; 7887 } 7888 VPP_SETPPLOCK(vpp); 7889 if (sp != NULL) { 7890 if (pp->p_lckcnt == 1) 7891 locked_bytes += PAGESIZE; 7892 } else 7893 locked_bytes += PAGESIZE; 7894 7895 if (lockmap != (ulong_t *)NULL) 7896 BT_SET(lockmap, pos); 7897 7898 page_unlock(pp); 7899 } else { 7900 ASSERT(VPP_ISPPLOCK(vpp)); 7901 if (pp != NULL) { 7902 /* sysV pages should be locked */ 7903 ASSERT(sp == NULL || pp->p_lckcnt > 0); 7904 page_pp_unlock(pp, claim, 0); 7905 if (sp != NULL) { 7906 if (pp->p_lckcnt == 0) 7907 unlocked_bytes 7908 += PAGESIZE; 7909 } else 7910 unlocked_bytes += PAGESIZE; 7911 page_unlock(pp); 7912 } else { 7913 ASSERT(sp == NULL); 7914 unlocked_bytes += PAGESIZE; 7915 } 7916 VPP_CLRPPLOCK(vpp); 7917 } 7918 } 7919 } 7920 out: 7921 if (op == MC_LOCK) { 7922 /* Credit back bytes that did not get locked */ 7923 if ((unlocked_bytes - locked_bytes) > 0) { 7924 if (proj == NULL) 7925 mutex_enter(&p->p_lock); 7926 rctl_decr_locked_mem(p, proj, 7927 (unlocked_bytes - locked_bytes), chargeproc); 7928 if (proj == NULL) 7929 mutex_exit(&p->p_lock); 7930 } 7931 7932 } else { 7933 /* Account bytes that were unlocked */ 7934 if (unlocked_bytes > 0) { 7935 if (proj == NULL) 7936 mutex_enter(&p->p_lock); 7937 rctl_decr_locked_mem(p, proj, unlocked_bytes, 7938 chargeproc); 7939 if (proj == NULL) 7940 mutex_exit(&p->p_lock); 7941 } 7942 } 7943 if (sp != NULL) 7944 mutex_exit(&sp->shm_mlock); 7945 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7946 7947 return (err); 7948 } 7949 7950 /* 7951 * Set advice from user for specified pages 7952 * There are 5 types of advice: 7953 * MADV_NORMAL - Normal (default) behavior (whatever that is) 7954 * MADV_RANDOM - Random page references 7955 * do not allow readahead or 'klustering' 7956 * MADV_SEQUENTIAL - Sequential page references 7957 * Pages previous to the one currently being 7958 * accessed (determined by fault) are 'not needed' 7959 * and are freed immediately 7960 * MADV_WILLNEED - Pages are likely to be used (fault ahead in mctl) 7961 * MADV_DONTNEED - Pages are not needed (synced out in mctl) 7962 * MADV_FREE - Contents can be discarded 7963 * MADV_ACCESS_DEFAULT- Default access 7964 * MADV_ACCESS_LWP - Next LWP will access heavily 7965 * MADV_ACCESS_MANY- Many LWPs or processes will access heavily 7966 */ 7967 static int 7968 segvn_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav) 7969 { 7970 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 7971 size_t page; 7972 int err = 0; 7973 int already_set; 7974 struct anon_map *amp; 7975 ulong_t anon_index; 7976 struct seg *next; 7977 lgrp_mem_policy_t policy; 7978 struct seg *prev; 7979 struct vnode *vp; 7980 7981 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 7982 7983 /* 7984 * In case of MADV_FREE, we won't be modifying any segment private 7985 * data structures; so, we only need to grab READER's lock 7986 */ 7987 if (behav != MADV_FREE) { 7988 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 7989 if (svd->tr_state != SEGVN_TR_OFF) { 7990 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7991 return (0); 7992 } 7993 } else { 7994 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 7995 } 7996 7997 /* 7998 * Large pages are assumed to be only turned on when accesses to the 7999 * segment's address range have spatial and temporal locality. That 8000 * justifies ignoring MADV_SEQUENTIAL for large page segments. 8001 * Also, ignore advice affecting lgroup memory allocation 8002 * if don't need to do lgroup optimizations on this system 8003 */ 8004 8005 if ((behav == MADV_SEQUENTIAL && 8006 (seg->s_szc != 0 || HAT_IS_REGION_COOKIE_VALID(svd->rcookie))) || 8007 (!lgrp_optimizations() && (behav == MADV_ACCESS_DEFAULT || 8008 behav == MADV_ACCESS_LWP || behav == MADV_ACCESS_MANY))) { 8009 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8010 return (0); 8011 } 8012 8013 if (behav == MADV_SEQUENTIAL || behav == MADV_ACCESS_DEFAULT || 8014 behav == MADV_ACCESS_LWP || behav == MADV_ACCESS_MANY) { 8015 /* 8016 * Since we are going to unload hat mappings 8017 * we first have to flush the cache. Otherwise 8018 * this might lead to system panic if another 8019 * thread is doing physio on the range whose 8020 * mappings are unloaded by madvise(3C). 8021 */ 8022 if (svd->softlockcnt > 0) { 8023 /* 8024 * If this is shared segment non 0 softlockcnt 8025 * means locked pages are still in use. 8026 */ 8027 if (svd->type == MAP_SHARED) { 8028 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8029 return (EAGAIN); 8030 } 8031 /* 8032 * Since we do have the segvn writers lock 8033 * nobody can fill the cache with entries 8034 * belonging to this seg during the purge. 8035 * The flush either succeeds or we still 8036 * have pending I/Os. In the later case, 8037 * madvise(3C) fails. 8038 */ 8039 segvn_purge(seg); 8040 if (svd->softlockcnt > 0) { 8041 /* 8042 * Since madvise(3C) is advisory and 8043 * it's not part of UNIX98, madvise(3C) 8044 * failure here doesn't cause any hardship. 8045 * Note that we don't block in "as" layer. 8046 */ 8047 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8048 return (EAGAIN); 8049 } 8050 } else if (svd->type == MAP_SHARED && svd->amp != NULL && 8051 svd->amp->a_softlockcnt > 0) { 8052 /* 8053 * Try to purge this amp's entries from pcache. It 8054 * will succeed only if other segments that share the 8055 * amp have no outstanding softlock's. 8056 */ 8057 segvn_purge(seg); 8058 } 8059 } 8060 8061 amp = svd->amp; 8062 vp = svd->vp; 8063 if (behav == MADV_FREE) { 8064 /* 8065 * MADV_FREE is not supported for segments with 8066 * underlying object; if anonmap is NULL, anon slots 8067 * are not yet populated and there is nothing for 8068 * us to do. As MADV_FREE is advisory, we don't 8069 * return error in either case. 8070 */ 8071 if (vp != NULL || amp == NULL) { 8072 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8073 return (0); 8074 } 8075 8076 segvn_purge(seg); 8077 8078 page = seg_page(seg, addr); 8079 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 8080 anon_disclaim(amp, svd->anon_index + page, len); 8081 ANON_LOCK_EXIT(&->a_rwlock); 8082 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8083 return (0); 8084 } 8085 8086 /* 8087 * If advice is to be applied to entire segment, 8088 * use advice field in seg_data structure 8089 * otherwise use appropriate vpage entry. 8090 */ 8091 if ((addr == seg->s_base) && (len == seg->s_size)) { 8092 switch (behav) { 8093 case MADV_ACCESS_LWP: 8094 case MADV_ACCESS_MANY: 8095 case MADV_ACCESS_DEFAULT: 8096 /* 8097 * Set memory allocation policy for this segment 8098 */ 8099 policy = lgrp_madv_to_policy(behav, len, svd->type); 8100 if (svd->type == MAP_SHARED) 8101 already_set = lgrp_shm_policy_set(policy, amp, 8102 svd->anon_index, vp, svd->offset, len); 8103 else { 8104 /* 8105 * For private memory, need writers lock on 8106 * address space because the segment may be 8107 * split or concatenated when changing policy 8108 */ 8109 if (AS_READ_HELD(seg->s_as, 8110 &seg->s_as->a_lock)) { 8111 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8112 return (IE_RETRY); 8113 } 8114 8115 already_set = lgrp_privm_policy_set(policy, 8116 &svd->policy_info, len); 8117 } 8118 8119 /* 8120 * If policy set already and it shouldn't be reapplied, 8121 * don't do anything. 8122 */ 8123 if (already_set && 8124 !LGRP_MEM_POLICY_REAPPLICABLE(policy)) 8125 break; 8126 8127 /* 8128 * Mark any existing pages in given range for 8129 * migration 8130 */ 8131 page_mark_migrate(seg, addr, len, amp, svd->anon_index, 8132 vp, svd->offset, 1); 8133 8134 /* 8135 * If same policy set already or this is a shared 8136 * memory segment, don't need to try to concatenate 8137 * segment with adjacent ones. 8138 */ 8139 if (already_set || svd->type == MAP_SHARED) 8140 break; 8141 8142 /* 8143 * Try to concatenate this segment with previous 8144 * one and next one, since we changed policy for 8145 * this one and it may be compatible with adjacent 8146 * ones now. 8147 */ 8148 prev = AS_SEGPREV(seg->s_as, seg); 8149 next = AS_SEGNEXT(seg->s_as, seg); 8150 8151 if (next && next->s_ops == &segvn_ops && 8152 addr + len == next->s_base) 8153 (void) segvn_concat(seg, next, 1); 8154 8155 if (prev && prev->s_ops == &segvn_ops && 8156 addr == prev->s_base + prev->s_size) { 8157 /* 8158 * Drop lock for private data of current 8159 * segment before concatenating (deleting) it 8160 * and return IE_REATTACH to tell as_ctl() that 8161 * current segment has changed 8162 */ 8163 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8164 if (!segvn_concat(prev, seg, 1)) 8165 err = IE_REATTACH; 8166 8167 return (err); 8168 } 8169 break; 8170 8171 case MADV_SEQUENTIAL: 8172 /* 8173 * unloading mapping guarantees 8174 * detection in segvn_fault 8175 */ 8176 ASSERT(seg->s_szc == 0); 8177 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 8178 hat_unload(seg->s_as->a_hat, addr, len, 8179 HAT_UNLOAD); 8180 /* FALLTHROUGH */ 8181 case MADV_NORMAL: 8182 case MADV_RANDOM: 8183 svd->advice = (uchar_t)behav; 8184 svd->pageadvice = 0; 8185 break; 8186 case MADV_WILLNEED: /* handled in memcntl */ 8187 case MADV_DONTNEED: /* handled in memcntl */ 8188 case MADV_FREE: /* handled above */ 8189 break; 8190 default: 8191 err = EINVAL; 8192 } 8193 } else { 8194 caddr_t eaddr; 8195 struct seg *new_seg; 8196 struct segvn_data *new_svd; 8197 u_offset_t off; 8198 caddr_t oldeaddr; 8199 8200 page = seg_page(seg, addr); 8201 8202 segvn_vpage(seg); 8203 8204 switch (behav) { 8205 struct vpage *bvpp, *evpp; 8206 8207 case MADV_ACCESS_LWP: 8208 case MADV_ACCESS_MANY: 8209 case MADV_ACCESS_DEFAULT: 8210 /* 8211 * Set memory allocation policy for portion of this 8212 * segment 8213 */ 8214 8215 /* 8216 * Align address and length of advice to page 8217 * boundaries for large pages 8218 */ 8219 if (seg->s_szc != 0) { 8220 size_t pgsz; 8221 8222 pgsz = page_get_pagesize(seg->s_szc); 8223 addr = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz); 8224 len = P2ROUNDUP(len, pgsz); 8225 } 8226 8227 /* 8228 * Check to see whether policy is set already 8229 */ 8230 policy = lgrp_madv_to_policy(behav, len, svd->type); 8231 8232 anon_index = svd->anon_index + page; 8233 off = svd->offset + (uintptr_t)(addr - seg->s_base); 8234 8235 if (svd->type == MAP_SHARED) 8236 already_set = lgrp_shm_policy_set(policy, amp, 8237 anon_index, vp, off, len); 8238 else 8239 already_set = 8240 (policy == svd->policy_info.mem_policy); 8241 8242 /* 8243 * If policy set already and it shouldn't be reapplied, 8244 * don't do anything. 8245 */ 8246 if (already_set && 8247 !LGRP_MEM_POLICY_REAPPLICABLE(policy)) 8248 break; 8249 8250 /* 8251 * For private memory, need writers lock on 8252 * address space because the segment may be 8253 * split or concatenated when changing policy 8254 */ 8255 if (svd->type == MAP_PRIVATE && 8256 AS_READ_HELD(seg->s_as, &seg->s_as->a_lock)) { 8257 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8258 return (IE_RETRY); 8259 } 8260 8261 /* 8262 * Mark any existing pages in given range for 8263 * migration 8264 */ 8265 page_mark_migrate(seg, addr, len, amp, svd->anon_index, 8266 vp, svd->offset, 1); 8267 8268 /* 8269 * Don't need to try to split or concatenate 8270 * segments, since policy is same or this is a shared 8271 * memory segment 8272 */ 8273 if (already_set || svd->type == MAP_SHARED) 8274 break; 8275 8276 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 8277 ASSERT(svd->amp == NULL); 8278 ASSERT(svd->tr_state == SEGVN_TR_OFF); 8279 ASSERT(svd->softlockcnt == 0); 8280 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 8281 HAT_REGION_TEXT); 8282 svd->rcookie = HAT_INVALID_REGION_COOKIE; 8283 } 8284 8285 /* 8286 * Split off new segment if advice only applies to a 8287 * portion of existing segment starting in middle 8288 */ 8289 new_seg = NULL; 8290 eaddr = addr + len; 8291 oldeaddr = seg->s_base + seg->s_size; 8292 if (addr > seg->s_base) { 8293 /* 8294 * Must flush I/O page cache 8295 * before splitting segment 8296 */ 8297 if (svd->softlockcnt > 0) 8298 segvn_purge(seg); 8299 8300 /* 8301 * Split segment and return IE_REATTACH to tell 8302 * as_ctl() that current segment changed 8303 */ 8304 new_seg = segvn_split_seg(seg, addr); 8305 new_svd = (struct segvn_data *)new_seg->s_data; 8306 err = IE_REATTACH; 8307 8308 /* 8309 * If new segment ends where old one 8310 * did, try to concatenate the new 8311 * segment with next one. 8312 */ 8313 if (eaddr == oldeaddr) { 8314 /* 8315 * Set policy for new segment 8316 */ 8317 (void) lgrp_privm_policy_set(policy, 8318 &new_svd->policy_info, 8319 new_seg->s_size); 8320 8321 next = AS_SEGNEXT(new_seg->s_as, 8322 new_seg); 8323 8324 if (next && 8325 next->s_ops == &segvn_ops && 8326 eaddr == next->s_base) 8327 (void) segvn_concat(new_seg, 8328 next, 1); 8329 } 8330 } 8331 8332 /* 8333 * Split off end of existing segment if advice only 8334 * applies to a portion of segment ending before 8335 * end of the existing segment 8336 */ 8337 if (eaddr < oldeaddr) { 8338 /* 8339 * Must flush I/O page cache 8340 * before splitting segment 8341 */ 8342 if (svd->softlockcnt > 0) 8343 segvn_purge(seg); 8344 8345 /* 8346 * If beginning of old segment was already 8347 * split off, use new segment to split end off 8348 * from. 8349 */ 8350 if (new_seg != NULL && new_seg != seg) { 8351 /* 8352 * Split segment 8353 */ 8354 (void) segvn_split_seg(new_seg, eaddr); 8355 8356 /* 8357 * Set policy for new segment 8358 */ 8359 (void) lgrp_privm_policy_set(policy, 8360 &new_svd->policy_info, 8361 new_seg->s_size); 8362 } else { 8363 /* 8364 * Split segment and return IE_REATTACH 8365 * to tell as_ctl() that current 8366 * segment changed 8367 */ 8368 (void) segvn_split_seg(seg, eaddr); 8369 err = IE_REATTACH; 8370 8371 (void) lgrp_privm_policy_set(policy, 8372 &svd->policy_info, seg->s_size); 8373 8374 /* 8375 * If new segment starts where old one 8376 * did, try to concatenate it with 8377 * previous segment. 8378 */ 8379 if (addr == seg->s_base) { 8380 prev = AS_SEGPREV(seg->s_as, 8381 seg); 8382 8383 /* 8384 * Drop lock for private data 8385 * of current segment before 8386 * concatenating (deleting) it 8387 */ 8388 if (prev && 8389 prev->s_ops == 8390 &segvn_ops && 8391 addr == prev->s_base + 8392 prev->s_size) { 8393 SEGVN_LOCK_EXIT( 8394 seg->s_as, 8395 &svd->lock); 8396 (void) segvn_concat( 8397 prev, seg, 1); 8398 return (err); 8399 } 8400 } 8401 } 8402 } 8403 break; 8404 case MADV_SEQUENTIAL: 8405 ASSERT(seg->s_szc == 0); 8406 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 8407 hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD); 8408 /* FALLTHROUGH */ 8409 case MADV_NORMAL: 8410 case MADV_RANDOM: 8411 bvpp = &svd->vpage[page]; 8412 evpp = &svd->vpage[page + (len >> PAGESHIFT)]; 8413 for (; bvpp < evpp; bvpp++) 8414 VPP_SETADVICE(bvpp, behav); 8415 svd->advice = MADV_NORMAL; 8416 break; 8417 case MADV_WILLNEED: /* handled in memcntl */ 8418 case MADV_DONTNEED: /* handled in memcntl */ 8419 case MADV_FREE: /* handled above */ 8420 break; 8421 default: 8422 err = EINVAL; 8423 } 8424 } 8425 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8426 return (err); 8427 } 8428 8429 /* 8430 * Create a vpage structure for this seg. 8431 */ 8432 static void 8433 segvn_vpage(struct seg *seg) 8434 { 8435 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 8436 struct vpage *vp, *evp; 8437 8438 ASSERT(SEGVN_WRITE_HELD(seg->s_as, &svd->lock)); 8439 8440 /* 8441 * If no vpage structure exists, allocate one. Copy the protections 8442 * and the advice from the segment itself to the individual pages. 8443 */ 8444 if (svd->vpage == NULL) { 8445 svd->pageadvice = 1; 8446 svd->vpage = kmem_zalloc(seg_pages(seg) * sizeof (struct vpage), 8447 KM_SLEEP); 8448 evp = &svd->vpage[seg_page(seg, seg->s_base + seg->s_size)]; 8449 for (vp = svd->vpage; vp < evp; vp++) { 8450 VPP_SETPROT(vp, svd->prot); 8451 VPP_SETADVICE(vp, svd->advice); 8452 } 8453 } 8454 } 8455 8456 /* 8457 * Dump the pages belonging to this segvn segment. 8458 */ 8459 static void 8460 segvn_dump(struct seg *seg) 8461 { 8462 struct segvn_data *svd; 8463 page_t *pp; 8464 struct anon_map *amp; 8465 ulong_t anon_index; 8466 struct vnode *vp; 8467 u_offset_t off, offset; 8468 pfn_t pfn; 8469 pgcnt_t page, npages; 8470 caddr_t addr; 8471 8472 npages = seg_pages(seg); 8473 svd = (struct segvn_data *)seg->s_data; 8474 vp = svd->vp; 8475 off = offset = svd->offset; 8476 addr = seg->s_base; 8477 8478 if ((amp = svd->amp) != NULL) { 8479 anon_index = svd->anon_index; 8480 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 8481 } 8482 8483 for (page = 0; page < npages; page++, offset += PAGESIZE) { 8484 struct anon *ap; 8485 int we_own_it = 0; 8486 8487 if (amp && (ap = anon_get_ptr(svd->amp->ahp, anon_index++))) { 8488 swap_xlate_nopanic(ap, &vp, &off); 8489 } else { 8490 vp = svd->vp; 8491 off = offset; 8492 } 8493 8494 /* 8495 * If pp == NULL, the page either does not exist 8496 * or is exclusively locked. So determine if it 8497 * exists before searching for it. 8498 */ 8499 8500 if ((pp = page_lookup_nowait(vp, off, SE_SHARED))) 8501 we_own_it = 1; 8502 else 8503 pp = page_exists(vp, off); 8504 8505 if (pp) { 8506 pfn = page_pptonum(pp); 8507 dump_addpage(seg->s_as, addr, pfn); 8508 if (we_own_it) 8509 page_unlock(pp); 8510 } 8511 addr += PAGESIZE; 8512 dump_timeleft = dump_timeout; 8513 } 8514 8515 if (amp != NULL) 8516 ANON_LOCK_EXIT(&->a_rwlock); 8517 } 8518 8519 #ifdef DEBUG 8520 static uint32_t segvn_pglock_mtbf = 0; 8521 #endif 8522 8523 #define PCACHE_SHWLIST ((page_t *)-2) 8524 #define NOPCACHE_SHWLIST ((page_t *)-1) 8525 8526 /* 8527 * Lock/Unlock anon pages over a given range. Return shadow list. This routine 8528 * uses global segment pcache to cache shadow lists (i.e. pp arrays) of pages 8529 * to avoid the overhead of per page locking, unlocking for subsequent IOs to 8530 * the same parts of the segment. Currently shadow list creation is only 8531 * supported for pure anon segments. MAP_PRIVATE segment pcache entries are 8532 * tagged with segment pointer, starting virtual address and length. This 8533 * approach for MAP_SHARED segments may add many pcache entries for the same 8534 * set of pages and lead to long hash chains that decrease pcache lookup 8535 * performance. To avoid this issue for shared segments shared anon map and 8536 * starting anon index are used for pcache entry tagging. This allows all 8537 * segments to share pcache entries for the same anon range and reduces pcache 8538 * chain's length as well as memory overhead from duplicate shadow lists and 8539 * pcache entries. 8540 * 8541 * softlockcnt field in segvn_data structure counts the number of F_SOFTLOCK'd 8542 * pages via segvn_fault() and pagelock'd pages via this routine. But pagelock 8543 * part of softlockcnt accounting is done differently for private and shared 8544 * segments. In private segment case softlock is only incremented when a new 8545 * shadow list is created but not when an existing one is found via 8546 * seg_plookup(). pcache entries have reference count incremented/decremented 8547 * by each seg_plookup()/seg_pinactive() operation. Only entries that have 0 8548 * reference count can be purged (and purging is needed before segment can be 8549 * freed). When a private segment pcache entry is purged segvn_reclaim() will 8550 * decrement softlockcnt. Since in private segment case each of its pcache 8551 * entries only belongs to this segment we can expect that when 8552 * segvn_pagelock(L_PAGEUNLOCK) was called for all outstanding IOs in this 8553 * segment purge will succeed and softlockcnt will drop to 0. In shared 8554 * segment case reference count in pcache entry counts active locks from many 8555 * different segments so we can't expect segment purging to succeed even when 8556 * segvn_pagelock(L_PAGEUNLOCK) was called for all outstanding IOs in this 8557 * segment. To be able to determine when there're no pending pagelocks in 8558 * shared segment case we don't rely on purging to make softlockcnt drop to 0 8559 * but instead softlockcnt is incremented and decremented for every 8560 * segvn_pagelock(L_PAGELOCK/L_PAGEUNLOCK) call regardless if a new shadow 8561 * list was created or an existing one was found. When softlockcnt drops to 0 8562 * this segment no longer has any claims for pcached shadow lists and the 8563 * segment can be freed even if there're still active pcache entries 8564 * shared by this segment anon map. Shared segment pcache entries belong to 8565 * anon map and are typically removed when anon map is freed after all 8566 * processes destroy the segments that use this anon map. 8567 */ 8568 static int 8569 segvn_pagelock(struct seg *seg, caddr_t addr, size_t len, struct page ***ppp, 8570 enum lock_type type, enum seg_rw rw) 8571 { 8572 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 8573 size_t np; 8574 pgcnt_t adjustpages; 8575 pgcnt_t npages; 8576 ulong_t anon_index; 8577 uint_t protchk = (rw == S_READ) ? PROT_READ : PROT_WRITE; 8578 uint_t error; 8579 struct anon_map *amp; 8580 pgcnt_t anpgcnt; 8581 struct page **pplist, **pl, *pp; 8582 caddr_t a; 8583 size_t page; 8584 caddr_t lpgaddr, lpgeaddr; 8585 anon_sync_obj_t cookie; 8586 int anlock; 8587 struct anon_map *pamp; 8588 caddr_t paddr; 8589 seg_preclaim_cbfunc_t preclaim_callback; 8590 size_t pgsz; 8591 int use_pcache; 8592 size_t wlen; 8593 uint_t pflags = 0; 8594 int sftlck_sbase = 0; 8595 int sftlck_send = 0; 8596 8597 #ifdef DEBUG 8598 if (type == L_PAGELOCK && segvn_pglock_mtbf) { 8599 hrtime_t ts = gethrtime(); 8600 if ((ts % segvn_pglock_mtbf) == 0) { 8601 return (ENOTSUP); 8602 } 8603 if ((ts % segvn_pglock_mtbf) == 1) { 8604 return (EFAULT); 8605 } 8606 } 8607 #endif 8608 8609 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_START, 8610 "segvn_pagelock: start seg %p addr %p", seg, addr); 8611 8612 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 8613 ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK); 8614 8615 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 8616 8617 /* 8618 * for now we only support pagelock to anon memory. We would have to 8619 * check protections for vnode objects and call into the vnode driver. 8620 * That's too much for a fast path. Let the fault entry point handle 8621 * it. 8622 */ 8623 if (svd->vp != NULL) { 8624 if (type == L_PAGELOCK) { 8625 error = ENOTSUP; 8626 goto out; 8627 } 8628 panic("segvn_pagelock(L_PAGEUNLOCK): vp != NULL"); 8629 } 8630 if ((amp = svd->amp) == NULL) { 8631 if (type == L_PAGELOCK) { 8632 error = EFAULT; 8633 goto out; 8634 } 8635 panic("segvn_pagelock(L_PAGEUNLOCK): amp == NULL"); 8636 } 8637 if (rw != S_READ && rw != S_WRITE) { 8638 if (type == L_PAGELOCK) { 8639 error = ENOTSUP; 8640 goto out; 8641 } 8642 panic("segvn_pagelock(L_PAGEUNLOCK): bad rw"); 8643 } 8644 8645 if (seg->s_szc != 0) { 8646 /* 8647 * We are adjusting the pagelock region to the large page size 8648 * boundary because the unlocked part of a large page cannot 8649 * be freed anyway unless all constituent pages of a large 8650 * page are locked. Bigger regions reduce pcache chain length 8651 * and improve lookup performance. The tradeoff is that the 8652 * very first segvn_pagelock() call for a given page is more 8653 * expensive if only 1 page_t is needed for IO. This is only 8654 * an issue if pcache entry doesn't get reused by several 8655 * subsequent calls. We optimize here for the case when pcache 8656 * is heavily used by repeated IOs to the same address range. 8657 * 8658 * Note segment's page size cannot change while we are holding 8659 * as lock. And then it cannot change while softlockcnt is 8660 * not 0. This will allow us to correctly recalculate large 8661 * page size region for the matching pageunlock/reclaim call 8662 * since as_pageunlock() caller must always match 8663 * as_pagelock() call's addr and len. 8664 * 8665 * For pageunlock *ppp points to the pointer of page_t that 8666 * corresponds to the real unadjusted start address. Similar 8667 * for pagelock *ppp must point to the pointer of page_t that 8668 * corresponds to the real unadjusted start address. 8669 */ 8670 pgsz = page_get_pagesize(seg->s_szc); 8671 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr); 8672 adjustpages = btop((uintptr_t)(addr - lpgaddr)); 8673 } else if (len < segvn_pglock_comb_thrshld) { 8674 lpgaddr = addr; 8675 lpgeaddr = addr + len; 8676 adjustpages = 0; 8677 pgsz = PAGESIZE; 8678 } else { 8679 /* 8680 * Align the address range of large enough requests to allow 8681 * combining of different shadow lists into 1 to reduce memory 8682 * overhead from potentially overlapping large shadow lists 8683 * (worst case is we have a 1MB IO into buffers with start 8684 * addresses separated by 4K). Alignment is only possible if 8685 * padded chunks have sufficient access permissions. Note 8686 * permissions won't change between L_PAGELOCK and 8687 * L_PAGEUNLOCK calls since non 0 softlockcnt will force 8688 * segvn_setprot() to wait until softlockcnt drops to 0. This 8689 * allows us to determine in L_PAGEUNLOCK the same range we 8690 * computed in L_PAGELOCK. 8691 * 8692 * If alignment is limited by segment ends set 8693 * sftlck_sbase/sftlck_send flags. In L_PAGELOCK case when 8694 * these flags are set bump softlockcnt_sbase/softlockcnt_send 8695 * per segment counters. In L_PAGEUNLOCK case decrease 8696 * softlockcnt_sbase/softlockcnt_send counters if 8697 * sftlck_sbase/sftlck_send flags are set. When 8698 * softlockcnt_sbase/softlockcnt_send are non 0 8699 * segvn_concat()/segvn_extend_prev()/segvn_extend_next() 8700 * won't merge the segments. This restriction combined with 8701 * restriction on segment unmapping and splitting for segments 8702 * that have non 0 softlockcnt allows L_PAGEUNLOCK to 8703 * correctly determine the same range that was previously 8704 * locked by matching L_PAGELOCK. 8705 */ 8706 pflags = SEGP_PSHIFT | (segvn_pglock_comb_bshift << 16); 8707 pgsz = PAGESIZE; 8708 if (svd->type == MAP_PRIVATE) { 8709 lpgaddr = (caddr_t)P2ALIGN((uintptr_t)addr, 8710 segvn_pglock_comb_balign); 8711 if (lpgaddr < seg->s_base) { 8712 lpgaddr = seg->s_base; 8713 sftlck_sbase = 1; 8714 } 8715 } else { 8716 ulong_t aix = svd->anon_index + seg_page(seg, addr); 8717 ulong_t aaix = P2ALIGN(aix, segvn_pglock_comb_palign); 8718 if (aaix < svd->anon_index) { 8719 lpgaddr = seg->s_base; 8720 sftlck_sbase = 1; 8721 } else { 8722 lpgaddr = addr - ptob(aix - aaix); 8723 ASSERT(lpgaddr >= seg->s_base); 8724 } 8725 } 8726 if (svd->pageprot && lpgaddr != addr) { 8727 struct vpage *vp = &svd->vpage[seg_page(seg, lpgaddr)]; 8728 struct vpage *evp = &svd->vpage[seg_page(seg, addr)]; 8729 while (vp < evp) { 8730 if ((VPP_PROT(vp) & protchk) == 0) { 8731 break; 8732 } 8733 vp++; 8734 } 8735 if (vp < evp) { 8736 lpgaddr = addr; 8737 pflags = 0; 8738 } 8739 } 8740 lpgeaddr = addr + len; 8741 if (pflags) { 8742 if (svd->type == MAP_PRIVATE) { 8743 lpgeaddr = (caddr_t)P2ROUNDUP( 8744 (uintptr_t)lpgeaddr, 8745 segvn_pglock_comb_balign); 8746 } else { 8747 ulong_t aix = svd->anon_index + 8748 seg_page(seg, lpgeaddr); 8749 ulong_t aaix = P2ROUNDUP(aix, 8750 segvn_pglock_comb_palign); 8751 if (aaix < aix) { 8752 lpgeaddr = 0; 8753 } else { 8754 lpgeaddr += ptob(aaix - aix); 8755 } 8756 } 8757 if (lpgeaddr == 0 || 8758 lpgeaddr > seg->s_base + seg->s_size) { 8759 lpgeaddr = seg->s_base + seg->s_size; 8760 sftlck_send = 1; 8761 } 8762 } 8763 if (svd->pageprot && lpgeaddr != addr + len) { 8764 struct vpage *vp; 8765 struct vpage *evp; 8766 8767 vp = &svd->vpage[seg_page(seg, addr + len)]; 8768 evp = &svd->vpage[seg_page(seg, lpgeaddr)]; 8769 8770 while (vp < evp) { 8771 if ((VPP_PROT(vp) & protchk) == 0) { 8772 break; 8773 } 8774 vp++; 8775 } 8776 if (vp < evp) { 8777 lpgeaddr = addr + len; 8778 } 8779 } 8780 adjustpages = btop((uintptr_t)(addr - lpgaddr)); 8781 } 8782 8783 /* 8784 * For MAP_SHARED segments we create pcache entries tagged by amp and 8785 * anon index so that we can share pcache entries with other segments 8786 * that map this amp. For private segments pcache entries are tagged 8787 * with segment and virtual address. 8788 */ 8789 if (svd->type == MAP_SHARED) { 8790 pamp = amp; 8791 paddr = (caddr_t)((lpgaddr - seg->s_base) + 8792 ptob(svd->anon_index)); 8793 preclaim_callback = shamp_reclaim; 8794 } else { 8795 pamp = NULL; 8796 paddr = lpgaddr; 8797 preclaim_callback = segvn_reclaim; 8798 } 8799 8800 if (type == L_PAGEUNLOCK) { 8801 VM_STAT_ADD(segvnvmstats.pagelock[0]); 8802 8803 /* 8804 * update hat ref bits for /proc. We need to make sure 8805 * that threads tracing the ref and mod bits of the 8806 * address space get the right data. 8807 * Note: page ref and mod bits are updated at reclaim time 8808 */ 8809 if (seg->s_as->a_vbits) { 8810 for (a = addr; a < addr + len; a += PAGESIZE) { 8811 if (rw == S_WRITE) { 8812 hat_setstat(seg->s_as, a, 8813 PAGESIZE, P_REF | P_MOD); 8814 } else { 8815 hat_setstat(seg->s_as, a, 8816 PAGESIZE, P_REF); 8817 } 8818 } 8819 } 8820 8821 /* 8822 * Check the shadow list entry after the last page used in 8823 * this IO request. If it's NOPCACHE_SHWLIST the shadow list 8824 * was not inserted into pcache and is not large page 8825 * adjusted. In this case call reclaim callback directly and 8826 * don't adjust the shadow list start and size for large 8827 * pages. 8828 */ 8829 npages = btop(len); 8830 if ((*ppp)[npages] == NOPCACHE_SHWLIST) { 8831 void *ptag; 8832 if (pamp != NULL) { 8833 ASSERT(svd->type == MAP_SHARED); 8834 ptag = (void *)pamp; 8835 paddr = (caddr_t)((addr - seg->s_base) + 8836 ptob(svd->anon_index)); 8837 } else { 8838 ptag = (void *)seg; 8839 paddr = addr; 8840 } 8841 (*preclaim_callback)(ptag, paddr, len, *ppp, rw, 0); 8842 } else { 8843 ASSERT((*ppp)[npages] == PCACHE_SHWLIST || 8844 IS_SWAPFSVP((*ppp)[npages]->p_vnode)); 8845 len = lpgeaddr - lpgaddr; 8846 npages = btop(len); 8847 seg_pinactive(seg, pamp, paddr, len, 8848 *ppp - adjustpages, rw, pflags, preclaim_callback); 8849 } 8850 8851 if (pamp != NULL) { 8852 ASSERT(svd->type == MAP_SHARED); 8853 ASSERT(svd->softlockcnt >= npages); 8854 atomic_add_long((ulong_t *)&svd->softlockcnt, -npages); 8855 } 8856 8857 if (sftlck_sbase) { 8858 ASSERT(svd->softlockcnt_sbase > 0); 8859 atomic_add_long((ulong_t *)&svd->softlockcnt_sbase, -1); 8860 } 8861 if (sftlck_send) { 8862 ASSERT(svd->softlockcnt_send > 0); 8863 atomic_add_long((ulong_t *)&svd->softlockcnt_send, -1); 8864 } 8865 8866 /* 8867 * If someone is blocked while unmapping, we purge 8868 * segment page cache and thus reclaim pplist synchronously 8869 * without waiting for seg_pasync_thread. This speeds up 8870 * unmapping in cases where munmap(2) is called, while 8871 * raw async i/o is still in progress or where a thread 8872 * exits on data fault in a multithreaded application. 8873 */ 8874 if (AS_ISUNMAPWAIT(seg->s_as)) { 8875 if (svd->softlockcnt == 0) { 8876 mutex_enter(&seg->s_as->a_contents); 8877 if (AS_ISUNMAPWAIT(seg->s_as)) { 8878 AS_CLRUNMAPWAIT(seg->s_as); 8879 cv_broadcast(&seg->s_as->a_cv); 8880 } 8881 mutex_exit(&seg->s_as->a_contents); 8882 } else if (pamp == NULL) { 8883 /* 8884 * softlockcnt is not 0 and this is a 8885 * MAP_PRIVATE segment. Try to purge its 8886 * pcache entries to reduce softlockcnt. 8887 * If it drops to 0 segvn_reclaim() 8888 * will wake up a thread waiting on 8889 * unmapwait flag. 8890 * 8891 * We don't purge MAP_SHARED segments with non 8892 * 0 softlockcnt since IO is still in progress 8893 * for such segments. 8894 */ 8895 ASSERT(svd->type == MAP_PRIVATE); 8896 segvn_purge(seg); 8897 } 8898 } 8899 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8900 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_UNLOCK_END, 8901 "segvn_pagelock: unlock seg %p addr %p", seg, addr); 8902 return (0); 8903 } 8904 8905 /* The L_PAGELOCK case ... */ 8906 8907 VM_STAT_ADD(segvnvmstats.pagelock[1]); 8908 8909 /* 8910 * For MAP_SHARED segments we have to check protections before 8911 * seg_plookup() since pcache entries may be shared by many segments 8912 * with potentially different page protections. 8913 */ 8914 if (pamp != NULL) { 8915 ASSERT(svd->type == MAP_SHARED); 8916 if (svd->pageprot == 0) { 8917 if ((svd->prot & protchk) == 0) { 8918 error = EACCES; 8919 goto out; 8920 } 8921 } else { 8922 /* 8923 * check page protections 8924 */ 8925 caddr_t ea; 8926 8927 if (seg->s_szc) { 8928 a = lpgaddr; 8929 ea = lpgeaddr; 8930 } else { 8931 a = addr; 8932 ea = addr + len; 8933 } 8934 for (; a < ea; a += pgsz) { 8935 struct vpage *vp; 8936 8937 ASSERT(seg->s_szc == 0 || 8938 sameprot(seg, a, pgsz)); 8939 vp = &svd->vpage[seg_page(seg, a)]; 8940 if ((VPP_PROT(vp) & protchk) == 0) { 8941 error = EACCES; 8942 goto out; 8943 } 8944 } 8945 } 8946 } 8947 8948 /* 8949 * try to find pages in segment page cache 8950 */ 8951 pplist = seg_plookup(seg, pamp, paddr, lpgeaddr - lpgaddr, rw, pflags); 8952 if (pplist != NULL) { 8953 if (pamp != NULL) { 8954 npages = btop((uintptr_t)(lpgeaddr - lpgaddr)); 8955 ASSERT(svd->type == MAP_SHARED); 8956 atomic_add_long((ulong_t *)&svd->softlockcnt, 8957 npages); 8958 } 8959 if (sftlck_sbase) { 8960 atomic_add_long((ulong_t *)&svd->softlockcnt_sbase, 1); 8961 } 8962 if (sftlck_send) { 8963 atomic_add_long((ulong_t *)&svd->softlockcnt_send, 1); 8964 } 8965 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8966 *ppp = pplist + adjustpages; 8967 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_HIT_END, 8968 "segvn_pagelock: cache hit seg %p addr %p", seg, addr); 8969 return (0); 8970 } 8971 8972 /* 8973 * For MAP_SHARED segments we already verified above that segment 8974 * protections allow this pagelock operation. 8975 */ 8976 if (pamp == NULL) { 8977 ASSERT(svd->type == MAP_PRIVATE); 8978 if (svd->pageprot == 0) { 8979 if ((svd->prot & protchk) == 0) { 8980 error = EACCES; 8981 goto out; 8982 } 8983 if (svd->prot & PROT_WRITE) { 8984 wlen = lpgeaddr - lpgaddr; 8985 } else { 8986 wlen = 0; 8987 ASSERT(rw == S_READ); 8988 } 8989 } else { 8990 int wcont = 1; 8991 /* 8992 * check page protections 8993 */ 8994 for (a = lpgaddr, wlen = 0; a < lpgeaddr; a += pgsz) { 8995 struct vpage *vp; 8996 8997 ASSERT(seg->s_szc == 0 || 8998 sameprot(seg, a, pgsz)); 8999 vp = &svd->vpage[seg_page(seg, a)]; 9000 if ((VPP_PROT(vp) & protchk) == 0) { 9001 error = EACCES; 9002 goto out; 9003 } 9004 if (wcont && (VPP_PROT(vp) & PROT_WRITE)) { 9005 wlen += pgsz; 9006 } else { 9007 wcont = 0; 9008 ASSERT(rw == S_READ); 9009 } 9010 } 9011 } 9012 ASSERT(rw == S_READ || wlen == lpgeaddr - lpgaddr); 9013 ASSERT(rw == S_WRITE || wlen <= lpgeaddr - lpgaddr); 9014 } 9015 9016 /* 9017 * Only build large page adjusted shadow list if we expect to insert 9018 * it into pcache. For large enough pages it's a big overhead to 9019 * create a shadow list of the entire large page. But this overhead 9020 * should be amortized over repeated pcache hits on subsequent reuse 9021 * of this shadow list (IO into any range within this shadow list will 9022 * find it in pcache since we large page align the request for pcache 9023 * lookups). pcache performance is improved with bigger shadow lists 9024 * as it reduces the time to pcache the entire big segment and reduces 9025 * pcache chain length. 9026 */ 9027 if (seg_pinsert_check(seg, pamp, paddr, 9028 lpgeaddr - lpgaddr, pflags) == SEGP_SUCCESS) { 9029 addr = lpgaddr; 9030 len = lpgeaddr - lpgaddr; 9031 use_pcache = 1; 9032 } else { 9033 use_pcache = 0; 9034 /* 9035 * Since this entry will not be inserted into the pcache, we 9036 * will not do any adjustments to the starting address or 9037 * size of the memory to be locked. 9038 */ 9039 adjustpages = 0; 9040 } 9041 npages = btop(len); 9042 9043 pplist = kmem_alloc(sizeof (page_t *) * (npages + 1), KM_SLEEP); 9044 pl = pplist; 9045 *ppp = pplist + adjustpages; 9046 /* 9047 * If use_pcache is 0 this shadow list is not large page adjusted. 9048 * Record this info in the last entry of shadow array so that 9049 * L_PAGEUNLOCK can determine if it should large page adjust the 9050 * address range to find the real range that was locked. 9051 */ 9052 pl[npages] = use_pcache ? PCACHE_SHWLIST : NOPCACHE_SHWLIST; 9053 9054 page = seg_page(seg, addr); 9055 anon_index = svd->anon_index + page; 9056 9057 anlock = 0; 9058 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 9059 ASSERT(amp->a_szc >= seg->s_szc); 9060 anpgcnt = page_get_pagecnt(amp->a_szc); 9061 for (a = addr; a < addr + len; a += PAGESIZE, anon_index++) { 9062 struct anon *ap; 9063 struct vnode *vp; 9064 u_offset_t off; 9065 9066 /* 9067 * Lock and unlock anon array only once per large page. 9068 * anon_array_enter() locks the root anon slot according to 9069 * a_szc which can't change while anon map is locked. We lock 9070 * anon the first time through this loop and each time we 9071 * reach anon index that corresponds to a root of a large 9072 * page. 9073 */ 9074 if (a == addr || P2PHASE(anon_index, anpgcnt) == 0) { 9075 ASSERT(anlock == 0); 9076 anon_array_enter(amp, anon_index, &cookie); 9077 anlock = 1; 9078 } 9079 ap = anon_get_ptr(amp->ahp, anon_index); 9080 9081 /* 9082 * We must never use seg_pcache for COW pages 9083 * because we might end up with original page still 9084 * lying in seg_pcache even after private page is 9085 * created. This leads to data corruption as 9086 * aio_write refers to the page still in cache 9087 * while all other accesses refer to the private 9088 * page. 9089 */ 9090 if (ap == NULL || ap->an_refcnt != 1) { 9091 struct vpage *vpage; 9092 9093 if (seg->s_szc) { 9094 error = EFAULT; 9095 break; 9096 } 9097 if (svd->vpage != NULL) { 9098 vpage = &svd->vpage[seg_page(seg, a)]; 9099 } else { 9100 vpage = NULL; 9101 } 9102 ASSERT(anlock); 9103 anon_array_exit(&cookie); 9104 anlock = 0; 9105 pp = NULL; 9106 error = segvn_faultpage(seg->s_as->a_hat, seg, a, 0, 9107 vpage, &pp, 0, F_INVAL, rw, 1); 9108 if (error) { 9109 error = fc_decode(error); 9110 break; 9111 } 9112 anon_array_enter(amp, anon_index, &cookie); 9113 anlock = 1; 9114 ap = anon_get_ptr(amp->ahp, anon_index); 9115 if (ap == NULL || ap->an_refcnt != 1) { 9116 error = EFAULT; 9117 break; 9118 } 9119 } 9120 swap_xlate(ap, &vp, &off); 9121 pp = page_lookup_nowait(vp, off, SE_SHARED); 9122 if (pp == NULL) { 9123 error = EFAULT; 9124 break; 9125 } 9126 if (ap->an_pvp != NULL) { 9127 anon_swap_free(ap, pp); 9128 } 9129 /* 9130 * Unlock anon if this is the last slot in a large page. 9131 */ 9132 if (P2PHASE(anon_index, anpgcnt) == anpgcnt - 1) { 9133 ASSERT(anlock); 9134 anon_array_exit(&cookie); 9135 anlock = 0; 9136 } 9137 *pplist++ = pp; 9138 } 9139 if (anlock) { /* Ensure the lock is dropped */ 9140 anon_array_exit(&cookie); 9141 } 9142 ANON_LOCK_EXIT(&->a_rwlock); 9143 9144 if (a >= addr + len) { 9145 atomic_add_long((ulong_t *)&svd->softlockcnt, npages); 9146 if (pamp != NULL) { 9147 ASSERT(svd->type == MAP_SHARED); 9148 atomic_add_long((ulong_t *)&pamp->a_softlockcnt, 9149 npages); 9150 wlen = len; 9151 } 9152 if (sftlck_sbase) { 9153 atomic_add_long((ulong_t *)&svd->softlockcnt_sbase, 1); 9154 } 9155 if (sftlck_send) { 9156 atomic_add_long((ulong_t *)&svd->softlockcnt_send, 1); 9157 } 9158 if (use_pcache) { 9159 (void) seg_pinsert(seg, pamp, paddr, len, wlen, pl, 9160 rw, pflags, preclaim_callback); 9161 } 9162 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 9163 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_FILL_END, 9164 "segvn_pagelock: cache fill seg %p addr %p", seg, addr); 9165 return (0); 9166 } 9167 9168 pplist = pl; 9169 np = ((uintptr_t)(a - addr)) >> PAGESHIFT; 9170 while (np > (uint_t)0) { 9171 ASSERT(PAGE_LOCKED(*pplist)); 9172 page_unlock(*pplist); 9173 np--; 9174 pplist++; 9175 } 9176 kmem_free(pl, sizeof (page_t *) * (npages + 1)); 9177 out: 9178 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 9179 *ppp = NULL; 9180 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_MISS_END, 9181 "segvn_pagelock: cache miss seg %p addr %p", seg, addr); 9182 return (error); 9183 } 9184 9185 /* 9186 * purge any cached pages in the I/O page cache 9187 */ 9188 static void 9189 segvn_purge(struct seg *seg) 9190 { 9191 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 9192 9193 /* 9194 * pcache is only used by pure anon segments. 9195 */ 9196 if (svd->amp == NULL || svd->vp != NULL) { 9197 return; 9198 } 9199 9200 /* 9201 * For MAP_SHARED segments non 0 segment's softlockcnt means 9202 * active IO is still in progress via this segment. So we only 9203 * purge MAP_SHARED segments when their softlockcnt is 0. 9204 */ 9205 if (svd->type == MAP_PRIVATE) { 9206 if (svd->softlockcnt) { 9207 seg_ppurge(seg, NULL, 0); 9208 } 9209 } else if (svd->softlockcnt == 0 && svd->amp->a_softlockcnt != 0) { 9210 seg_ppurge(seg, svd->amp, 0); 9211 } 9212 } 9213 9214 /* 9215 * If async argument is not 0 we are called from pcache async thread and don't 9216 * hold AS lock. 9217 */ 9218 9219 /*ARGSUSED*/ 9220 static int 9221 segvn_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist, 9222 enum seg_rw rw, int async) 9223 { 9224 struct seg *seg = (struct seg *)ptag; 9225 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 9226 pgcnt_t np, npages; 9227 struct page **pl; 9228 9229 npages = np = btop(len); 9230 ASSERT(npages); 9231 9232 ASSERT(svd->vp == NULL && svd->amp != NULL); 9233 ASSERT(svd->softlockcnt >= npages); 9234 ASSERT(async || AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 9235 9236 pl = pplist; 9237 9238 ASSERT(pl[np] == NOPCACHE_SHWLIST || pl[np] == PCACHE_SHWLIST); 9239 ASSERT(!async || pl[np] == PCACHE_SHWLIST); 9240 9241 while (np > (uint_t)0) { 9242 if (rw == S_WRITE) { 9243 hat_setrefmod(*pplist); 9244 } else { 9245 hat_setref(*pplist); 9246 } 9247 page_unlock(*pplist); 9248 np--; 9249 pplist++; 9250 } 9251 9252 kmem_free(pl, sizeof (page_t *) * (npages + 1)); 9253 9254 /* 9255 * If we are pcache async thread we don't hold AS lock. This means if 9256 * softlockcnt drops to 0 after the decrement below address space may 9257 * get freed. We can't allow it since after softlock derement to 0 we 9258 * still need to access as structure for possible wakeup of unmap 9259 * waiters. To prevent the disappearance of as we take this segment 9260 * segfree_syncmtx. segvn_free() also takes this mutex as a barrier to 9261 * make sure this routine completes before segment is freed. 9262 * 9263 * The second complication we have to deal with in async case is a 9264 * possibility of missed wake up of unmap wait thread. When we don't 9265 * hold as lock here we may take a_contents lock before unmap wait 9266 * thread that was first to see softlockcnt was still not 0. As a 9267 * result we'll fail to wake up an unmap wait thread. To avoid this 9268 * race we set nounmapwait flag in as structure if we drop softlockcnt 9269 * to 0 when we were called by pcache async thread. unmapwait thread 9270 * will not block if this flag is set. 9271 */ 9272 if (async) { 9273 mutex_enter(&svd->segfree_syncmtx); 9274 } 9275 9276 if (!atomic_add_long_nv((ulong_t *)&svd->softlockcnt, -npages)) { 9277 if (async || AS_ISUNMAPWAIT(seg->s_as)) { 9278 mutex_enter(&seg->s_as->a_contents); 9279 if (async) { 9280 AS_SETNOUNMAPWAIT(seg->s_as); 9281 } 9282 if (AS_ISUNMAPWAIT(seg->s_as)) { 9283 AS_CLRUNMAPWAIT(seg->s_as); 9284 cv_broadcast(&seg->s_as->a_cv); 9285 } 9286 mutex_exit(&seg->s_as->a_contents); 9287 } 9288 } 9289 9290 if (async) { 9291 mutex_exit(&svd->segfree_syncmtx); 9292 } 9293 return (0); 9294 } 9295 9296 /*ARGSUSED*/ 9297 static int 9298 shamp_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist, 9299 enum seg_rw rw, int async) 9300 { 9301 amp_t *amp = (amp_t *)ptag; 9302 pgcnt_t np, npages; 9303 struct page **pl; 9304 9305 npages = np = btop(len); 9306 ASSERT(npages); 9307 ASSERT(amp->a_softlockcnt >= npages); 9308 9309 pl = pplist; 9310 9311 ASSERT(pl[np] == NOPCACHE_SHWLIST || pl[np] == PCACHE_SHWLIST); 9312 ASSERT(!async || pl[np] == PCACHE_SHWLIST); 9313 9314 while (np > (uint_t)0) { 9315 if (rw == S_WRITE) { 9316 hat_setrefmod(*pplist); 9317 } else { 9318 hat_setref(*pplist); 9319 } 9320 page_unlock(*pplist); 9321 np--; 9322 pplist++; 9323 } 9324 9325 kmem_free(pl, sizeof (page_t *) * (npages + 1)); 9326 9327 /* 9328 * If somebody sleeps in anonmap_purge() wake them up if a_softlockcnt 9329 * drops to 0. anon map can't be freed until a_softlockcnt drops to 0 9330 * and anonmap_purge() acquires a_purgemtx. 9331 */ 9332 mutex_enter(&->a_purgemtx); 9333 if (!atomic_add_long_nv((ulong_t *)&->a_softlockcnt, -npages) && 9334 amp->a_purgewait) { 9335 amp->a_purgewait = 0; 9336 cv_broadcast(&->a_purgecv); 9337 } 9338 mutex_exit(&->a_purgemtx); 9339 return (0); 9340 } 9341 9342 /* 9343 * get a memory ID for an addr in a given segment 9344 * 9345 * XXX only creates PAGESIZE pages if anon slots are not initialized. 9346 * At fault time they will be relocated into larger pages. 9347 */ 9348 static int 9349 segvn_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp) 9350 { 9351 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 9352 struct anon *ap = NULL; 9353 ulong_t anon_index; 9354 struct anon_map *amp; 9355 anon_sync_obj_t cookie; 9356 9357 if (svd->type == MAP_PRIVATE) { 9358 memidp->val[0] = (uintptr_t)seg->s_as; 9359 memidp->val[1] = (uintptr_t)addr; 9360 return (0); 9361 } 9362 9363 if (svd->type == MAP_SHARED) { 9364 if (svd->vp) { 9365 memidp->val[0] = (uintptr_t)svd->vp; 9366 memidp->val[1] = (u_longlong_t)svd->offset + 9367 (uintptr_t)(addr - seg->s_base); 9368 return (0); 9369 } else { 9370 9371 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 9372 if ((amp = svd->amp) != NULL) { 9373 anon_index = svd->anon_index + 9374 seg_page(seg, addr); 9375 } 9376 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 9377 9378 ASSERT(amp != NULL); 9379 9380 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 9381 anon_array_enter(amp, anon_index, &cookie); 9382 ap = anon_get_ptr(amp->ahp, anon_index); 9383 if (ap == NULL) { 9384 page_t *pp; 9385 9386 pp = anon_zero(seg, addr, &ap, svd->cred); 9387 if (pp == NULL) { 9388 anon_array_exit(&cookie); 9389 ANON_LOCK_EXIT(&->a_rwlock); 9390 return (ENOMEM); 9391 } 9392 ASSERT(anon_get_ptr(amp->ahp, anon_index) 9393 == NULL); 9394 (void) anon_set_ptr(amp->ahp, anon_index, 9395 ap, ANON_SLEEP); 9396 page_unlock(pp); 9397 } 9398 9399 anon_array_exit(&cookie); 9400 ANON_LOCK_EXIT(&->a_rwlock); 9401 9402 memidp->val[0] = (uintptr_t)ap; 9403 memidp->val[1] = (uintptr_t)addr & PAGEOFFSET; 9404 return (0); 9405 } 9406 } 9407 return (EINVAL); 9408 } 9409 9410 static int 9411 sameprot(struct seg *seg, caddr_t a, size_t len) 9412 { 9413 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 9414 struct vpage *vpage; 9415 spgcnt_t pages = btop(len); 9416 uint_t prot; 9417 9418 if (svd->pageprot == 0) 9419 return (1); 9420 9421 ASSERT(svd->vpage != NULL); 9422 9423 vpage = &svd->vpage[seg_page(seg, a)]; 9424 prot = VPP_PROT(vpage); 9425 vpage++; 9426 pages--; 9427 while (pages-- > 0) { 9428 if (prot != VPP_PROT(vpage)) 9429 return (0); 9430 vpage++; 9431 } 9432 return (1); 9433 } 9434 9435 /* 9436 * Get memory allocation policy info for specified address in given segment 9437 */ 9438 static lgrp_mem_policy_info_t * 9439 segvn_getpolicy(struct seg *seg, caddr_t addr) 9440 { 9441 struct anon_map *amp; 9442 ulong_t anon_index; 9443 lgrp_mem_policy_info_t *policy_info; 9444 struct segvn_data *svn_data; 9445 u_offset_t vn_off; 9446 vnode_t *vp; 9447 9448 ASSERT(seg != NULL); 9449 9450 svn_data = (struct segvn_data *)seg->s_data; 9451 if (svn_data == NULL) 9452 return (NULL); 9453 9454 /* 9455 * Get policy info for private or shared memory 9456 */ 9457 if (svn_data->type != MAP_SHARED) { 9458 if (svn_data->tr_state != SEGVN_TR_ON) { 9459 policy_info = &svn_data->policy_info; 9460 } else { 9461 policy_info = &svn_data->tr_policy_info; 9462 ASSERT(policy_info->mem_policy == 9463 LGRP_MEM_POLICY_NEXT_SEG); 9464 } 9465 } else { 9466 amp = svn_data->amp; 9467 anon_index = svn_data->anon_index + seg_page(seg, addr); 9468 vp = svn_data->vp; 9469 vn_off = svn_data->offset + (uintptr_t)(addr - seg->s_base); 9470 policy_info = lgrp_shm_policy_get(amp, anon_index, vp, vn_off); 9471 } 9472 9473 return (policy_info); 9474 } 9475 9476 /*ARGSUSED*/ 9477 static int 9478 segvn_capable(struct seg *seg, segcapability_t capability) 9479 { 9480 return (0); 9481 } 9482 9483 /* 9484 * Bind text vnode segment to an amp. If we bind successfully mappings will be 9485 * established to per vnode mapping per lgroup amp pages instead of to vnode 9486 * pages. There's one amp per vnode text mapping per lgroup. Many processes 9487 * may share the same text replication amp. If a suitable amp doesn't already 9488 * exist in svntr hash table create a new one. We may fail to bind to amp if 9489 * segment is not eligible for text replication. Code below first checks for 9490 * these conditions. If binding is successful segment tr_state is set to on 9491 * and svd->amp points to the amp to use. Otherwise tr_state is set to off and 9492 * svd->amp remains as NULL. 9493 */ 9494 static void 9495 segvn_textrepl(struct seg *seg) 9496 { 9497 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 9498 vnode_t *vp = svd->vp; 9499 u_offset_t off = svd->offset; 9500 size_t size = seg->s_size; 9501 u_offset_t eoff = off + size; 9502 uint_t szc = seg->s_szc; 9503 ulong_t hash = SVNTR_HASH_FUNC(vp); 9504 svntr_t *svntrp; 9505 struct vattr va; 9506 proc_t *p = seg->s_as->a_proc; 9507 lgrp_id_t lgrp_id; 9508 lgrp_id_t olid; 9509 int first; 9510 struct anon_map *amp; 9511 9512 ASSERT(AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 9513 ASSERT(SEGVN_WRITE_HELD(seg->s_as, &svd->lock)); 9514 ASSERT(p != NULL); 9515 ASSERT(svd->tr_state == SEGVN_TR_INIT); 9516 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie)); 9517 ASSERT(svd->flags & MAP_TEXT); 9518 ASSERT(svd->type == MAP_PRIVATE); 9519 ASSERT(vp != NULL && svd->amp == NULL); 9520 ASSERT(!svd->pageprot && !(svd->prot & PROT_WRITE)); 9521 ASSERT(!(svd->flags & MAP_NORESERVE) && svd->swresv == 0); 9522 ASSERT(seg->s_as != &kas); 9523 ASSERT(off < eoff); 9524 ASSERT(svntr_hashtab != NULL); 9525 9526 /* 9527 * If numa optimizations are no longer desired bail out. 9528 */ 9529 if (!lgrp_optimizations()) { 9530 svd->tr_state = SEGVN_TR_OFF; 9531 return; 9532 } 9533 9534 /* 9535 * Avoid creating anon maps with size bigger than the file size. 9536 * If VOP_GETATTR() call fails bail out. 9537 */ 9538 va.va_mask = AT_SIZE | AT_MTIME | AT_CTIME; 9539 if (VOP_GETATTR(vp, &va, 0, svd->cred, NULL) != 0) { 9540 svd->tr_state = SEGVN_TR_OFF; 9541 SEGVN_TR_ADDSTAT(gaerr); 9542 return; 9543 } 9544 if (btopr(va.va_size) < btopr(eoff)) { 9545 svd->tr_state = SEGVN_TR_OFF; 9546 SEGVN_TR_ADDSTAT(overmap); 9547 return; 9548 } 9549 9550 /* 9551 * VVMEXEC may not be set yet if exec() prefaults text segment. Set 9552 * this flag now before vn_is_mapped(V_WRITE) so that MAP_SHARED 9553 * mapping that checks if trcache for this vnode needs to be 9554 * invalidated can't miss us. 9555 */ 9556 if (!(vp->v_flag & VVMEXEC)) { 9557 mutex_enter(&vp->v_lock); 9558 vp->v_flag |= VVMEXEC; 9559 mutex_exit(&vp->v_lock); 9560 } 9561 mutex_enter(&svntr_hashtab[hash].tr_lock); 9562 /* 9563 * Bail out if potentially MAP_SHARED writable mappings exist to this 9564 * vnode. We don't want to use old file contents from existing 9565 * replicas if this mapping was established after the original file 9566 * was changed. 9567 */ 9568 if (vn_is_mapped(vp, V_WRITE)) { 9569 mutex_exit(&svntr_hashtab[hash].tr_lock); 9570 svd->tr_state = SEGVN_TR_OFF; 9571 SEGVN_TR_ADDSTAT(wrcnt); 9572 return; 9573 } 9574 svntrp = svntr_hashtab[hash].tr_head; 9575 for (; svntrp != NULL; svntrp = svntrp->tr_next) { 9576 ASSERT(svntrp->tr_refcnt != 0); 9577 if (svntrp->tr_vp != vp) { 9578 continue; 9579 } 9580 9581 /* 9582 * Bail out if the file or its attributes were changed after 9583 * this replication entry was created since we need to use the 9584 * latest file contents. Note that mtime test alone is not 9585 * sufficient because a user can explicitly change mtime via 9586 * utimes(2) interfaces back to the old value after modifiying 9587 * the file contents. To detect this case we also have to test 9588 * ctime which among other things records the time of the last 9589 * mtime change by utimes(2). ctime is not changed when the file 9590 * is only read or executed so we expect that typically existing 9591 * replication amp's can be used most of the time. 9592 */ 9593 if (!svntrp->tr_valid || 9594 svntrp->tr_mtime.tv_sec != va.va_mtime.tv_sec || 9595 svntrp->tr_mtime.tv_nsec != va.va_mtime.tv_nsec || 9596 svntrp->tr_ctime.tv_sec != va.va_ctime.tv_sec || 9597 svntrp->tr_ctime.tv_nsec != va.va_ctime.tv_nsec) { 9598 mutex_exit(&svntr_hashtab[hash].tr_lock); 9599 svd->tr_state = SEGVN_TR_OFF; 9600 SEGVN_TR_ADDSTAT(stale); 9601 return; 9602 } 9603 /* 9604 * if off, eoff and szc match current segment we found the 9605 * existing entry we can use. 9606 */ 9607 if (svntrp->tr_off == off && svntrp->tr_eoff == eoff && 9608 svntrp->tr_szc == szc) { 9609 break; 9610 } 9611 /* 9612 * Don't create different but overlapping in file offsets 9613 * entries to avoid replication of the same file pages more 9614 * than once per lgroup. 9615 */ 9616 if ((off >= svntrp->tr_off && off < svntrp->tr_eoff) || 9617 (eoff > svntrp->tr_off && eoff <= svntrp->tr_eoff)) { 9618 mutex_exit(&svntr_hashtab[hash].tr_lock); 9619 svd->tr_state = SEGVN_TR_OFF; 9620 SEGVN_TR_ADDSTAT(overlap); 9621 return; 9622 } 9623 } 9624 /* 9625 * If we didn't find existing entry create a new one. 9626 */ 9627 if (svntrp == NULL) { 9628 svntrp = kmem_cache_alloc(svntr_cache, KM_NOSLEEP); 9629 if (svntrp == NULL) { 9630 mutex_exit(&svntr_hashtab[hash].tr_lock); 9631 svd->tr_state = SEGVN_TR_OFF; 9632 SEGVN_TR_ADDSTAT(nokmem); 9633 return; 9634 } 9635 #ifdef DEBUG 9636 { 9637 lgrp_id_t i; 9638 for (i = 0; i < NLGRPS_MAX; i++) { 9639 ASSERT(svntrp->tr_amp[i] == NULL); 9640 } 9641 } 9642 #endif /* DEBUG */ 9643 svntrp->tr_vp = vp; 9644 svntrp->tr_off = off; 9645 svntrp->tr_eoff = eoff; 9646 svntrp->tr_szc = szc; 9647 svntrp->tr_valid = 1; 9648 svntrp->tr_mtime = va.va_mtime; 9649 svntrp->tr_ctime = va.va_ctime; 9650 svntrp->tr_refcnt = 0; 9651 svntrp->tr_next = svntr_hashtab[hash].tr_head; 9652 svntr_hashtab[hash].tr_head = svntrp; 9653 } 9654 first = 1; 9655 again: 9656 /* 9657 * We want to pick a replica with pages on main thread's (t_tid = 1, 9658 * aka T1) lgrp. Currently text replication is only optimized for 9659 * workloads that either have all threads of a process on the same 9660 * lgrp or execute their large text primarily on main thread. 9661 */ 9662 lgrp_id = p->p_t1_lgrpid; 9663 if (lgrp_id == LGRP_NONE) { 9664 /* 9665 * In case exec() prefaults text on non main thread use 9666 * current thread lgrpid. It will become main thread anyway 9667 * soon. 9668 */ 9669 lgrp_id = lgrp_home_id(curthread); 9670 } 9671 /* 9672 * Set p_tr_lgrpid to lgrpid if it hasn't been set yet. Otherwise 9673 * just set it to NLGRPS_MAX if it's different from current process T1 9674 * home lgrp. p_tr_lgrpid is used to detect if process uses text 9675 * replication and T1 new home is different from lgrp used for text 9676 * replication. When this happens asyncronous segvn thread rechecks if 9677 * segments should change lgrps used for text replication. If we fail 9678 * to set p_tr_lgrpid with cas32 then set it to NLGRPS_MAX without cas 9679 * if it's not already NLGRPS_MAX and not equal lgrp_id we want to 9680 * use. We don't need to use cas in this case because another thread 9681 * that races in between our non atomic check and set may only change 9682 * p_tr_lgrpid to NLGRPS_MAX at this point. 9683 */ 9684 ASSERT(lgrp_id != LGRP_NONE && lgrp_id < NLGRPS_MAX); 9685 olid = p->p_tr_lgrpid; 9686 if (lgrp_id != olid && olid != NLGRPS_MAX) { 9687 lgrp_id_t nlid = (olid == LGRP_NONE) ? lgrp_id : NLGRPS_MAX; 9688 if (cas32((uint32_t *)&p->p_tr_lgrpid, olid, nlid) != olid) { 9689 olid = p->p_tr_lgrpid; 9690 ASSERT(olid != LGRP_NONE); 9691 if (olid != lgrp_id && olid != NLGRPS_MAX) { 9692 p->p_tr_lgrpid = NLGRPS_MAX; 9693 } 9694 } 9695 ASSERT(p->p_tr_lgrpid != LGRP_NONE); 9696 membar_producer(); 9697 /* 9698 * lgrp_move_thread() won't schedule async recheck after 9699 * p->p_t1_lgrpid update unless p->p_tr_lgrpid is not 9700 * LGRP_NONE. Recheck p_t1_lgrpid once now that p->p_tr_lgrpid 9701 * is not LGRP_NONE. 9702 */ 9703 if (first && p->p_t1_lgrpid != LGRP_NONE && 9704 p->p_t1_lgrpid != lgrp_id) { 9705 first = 0; 9706 goto again; 9707 } 9708 } 9709 /* 9710 * If no amp was created yet for lgrp_id create a new one as long as 9711 * we have enough memory to afford it. 9712 */ 9713 if ((amp = svntrp->tr_amp[lgrp_id]) == NULL) { 9714 size_t trmem = atomic_add_long_nv(&segvn_textrepl_bytes, size); 9715 if (trmem > segvn_textrepl_max_bytes) { 9716 SEGVN_TR_ADDSTAT(normem); 9717 goto fail; 9718 } 9719 if (anon_try_resv_zone(size, NULL) == 0) { 9720 SEGVN_TR_ADDSTAT(noanon); 9721 goto fail; 9722 } 9723 amp = anonmap_alloc(size, size, ANON_NOSLEEP); 9724 if (amp == NULL) { 9725 anon_unresv_zone(size, NULL); 9726 SEGVN_TR_ADDSTAT(nokmem); 9727 goto fail; 9728 } 9729 ASSERT(amp->refcnt == 1); 9730 amp->a_szc = szc; 9731 svntrp->tr_amp[lgrp_id] = amp; 9732 SEGVN_TR_ADDSTAT(newamp); 9733 } 9734 svntrp->tr_refcnt++; 9735 ASSERT(svd->svn_trnext == NULL); 9736 ASSERT(svd->svn_trprev == NULL); 9737 svd->svn_trnext = svntrp->tr_svnhead; 9738 svd->svn_trprev = NULL; 9739 if (svntrp->tr_svnhead != NULL) { 9740 svntrp->tr_svnhead->svn_trprev = svd; 9741 } 9742 svntrp->tr_svnhead = svd; 9743 ASSERT(amp->a_szc == szc && amp->size == size && amp->swresv == size); 9744 ASSERT(amp->refcnt >= 1); 9745 svd->amp = amp; 9746 svd->anon_index = 0; 9747 svd->tr_policy_info.mem_policy = LGRP_MEM_POLICY_NEXT_SEG; 9748 svd->tr_policy_info.mem_lgrpid = lgrp_id; 9749 svd->tr_state = SEGVN_TR_ON; 9750 mutex_exit(&svntr_hashtab[hash].tr_lock); 9751 SEGVN_TR_ADDSTAT(repl); 9752 return; 9753 fail: 9754 ASSERT(segvn_textrepl_bytes >= size); 9755 atomic_add_long(&segvn_textrepl_bytes, -size); 9756 ASSERT(svntrp != NULL); 9757 ASSERT(svntrp->tr_amp[lgrp_id] == NULL); 9758 if (svntrp->tr_refcnt == 0) { 9759 ASSERT(svntrp == svntr_hashtab[hash].tr_head); 9760 svntr_hashtab[hash].tr_head = svntrp->tr_next; 9761 mutex_exit(&svntr_hashtab[hash].tr_lock); 9762 kmem_cache_free(svntr_cache, svntrp); 9763 } else { 9764 mutex_exit(&svntr_hashtab[hash].tr_lock); 9765 } 9766 svd->tr_state = SEGVN_TR_OFF; 9767 } 9768 9769 /* 9770 * Convert seg back to regular vnode mapping seg by unbinding it from its text 9771 * replication amp. This routine is most typically called when segment is 9772 * unmapped but can also be called when segment no longer qualifies for text 9773 * replication (e.g. due to protection changes). If unload_unmap is set use 9774 * HAT_UNLOAD_UNMAP flag in hat_unload_callback(). If we are the last user of 9775 * svntr free all its anon maps and remove it from the hash table. 9776 */ 9777 static void 9778 segvn_textunrepl(struct seg *seg, int unload_unmap) 9779 { 9780 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 9781 vnode_t *vp = svd->vp; 9782 u_offset_t off = svd->offset; 9783 size_t size = seg->s_size; 9784 u_offset_t eoff = off + size; 9785 uint_t szc = seg->s_szc; 9786 ulong_t hash = SVNTR_HASH_FUNC(vp); 9787 svntr_t *svntrp; 9788 svntr_t **prv_svntrp; 9789 lgrp_id_t lgrp_id = svd->tr_policy_info.mem_lgrpid; 9790 lgrp_id_t i; 9791 9792 ASSERT(AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 9793 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) || 9794 SEGVN_WRITE_HELD(seg->s_as, &svd->lock)); 9795 ASSERT(svd->tr_state == SEGVN_TR_ON); 9796 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie)); 9797 ASSERT(svd->amp != NULL); 9798 ASSERT(svd->amp->refcnt >= 1); 9799 ASSERT(svd->anon_index == 0); 9800 ASSERT(lgrp_id != LGRP_NONE && lgrp_id < NLGRPS_MAX); 9801 ASSERT(svntr_hashtab != NULL); 9802 9803 mutex_enter(&svntr_hashtab[hash].tr_lock); 9804 prv_svntrp = &svntr_hashtab[hash].tr_head; 9805 for (; (svntrp = *prv_svntrp) != NULL; prv_svntrp = &svntrp->tr_next) { 9806 ASSERT(svntrp->tr_refcnt != 0); 9807 if (svntrp->tr_vp == vp && svntrp->tr_off == off && 9808 svntrp->tr_eoff == eoff && svntrp->tr_szc == szc) { 9809 break; 9810 } 9811 } 9812 if (svntrp == NULL) { 9813 panic("segvn_textunrepl: svntr record not found"); 9814 } 9815 if (svntrp->tr_amp[lgrp_id] != svd->amp) { 9816 panic("segvn_textunrepl: amp mismatch"); 9817 } 9818 svd->tr_state = SEGVN_TR_OFF; 9819 svd->amp = NULL; 9820 if (svd->svn_trprev == NULL) { 9821 ASSERT(svntrp->tr_svnhead == svd); 9822 svntrp->tr_svnhead = svd->svn_trnext; 9823 if (svntrp->tr_svnhead != NULL) { 9824 svntrp->tr_svnhead->svn_trprev = NULL; 9825 } 9826 svd->svn_trnext = NULL; 9827 } else { 9828 svd->svn_trprev->svn_trnext = svd->svn_trnext; 9829 if (svd->svn_trnext != NULL) { 9830 svd->svn_trnext->svn_trprev = svd->svn_trprev; 9831 svd->svn_trnext = NULL; 9832 } 9833 svd->svn_trprev = NULL; 9834 } 9835 if (--svntrp->tr_refcnt) { 9836 mutex_exit(&svntr_hashtab[hash].tr_lock); 9837 goto done; 9838 } 9839 *prv_svntrp = svntrp->tr_next; 9840 mutex_exit(&svntr_hashtab[hash].tr_lock); 9841 for (i = 0; i < NLGRPS_MAX; i++) { 9842 struct anon_map *amp = svntrp->tr_amp[i]; 9843 if (amp == NULL) { 9844 continue; 9845 } 9846 ASSERT(amp->refcnt == 1); 9847 ASSERT(amp->swresv == size); 9848 ASSERT(amp->size == size); 9849 ASSERT(amp->a_szc == szc); 9850 if (amp->a_szc != 0) { 9851 anon_free_pages(amp->ahp, 0, size, szc); 9852 } else { 9853 anon_free(amp->ahp, 0, size); 9854 } 9855 svntrp->tr_amp[i] = NULL; 9856 ASSERT(segvn_textrepl_bytes >= size); 9857 atomic_add_long(&segvn_textrepl_bytes, -size); 9858 anon_unresv_zone(amp->swresv, NULL); 9859 amp->refcnt = 0; 9860 anonmap_free(amp); 9861 } 9862 kmem_cache_free(svntr_cache, svntrp); 9863 done: 9864 hat_unload_callback(seg->s_as->a_hat, seg->s_base, size, 9865 unload_unmap ? HAT_UNLOAD_UNMAP : 0, NULL); 9866 } 9867 9868 /* 9869 * This is called when a MAP_SHARED writable mapping is created to a vnode 9870 * that is currently used for execution (VVMEXEC flag is set). In this case we 9871 * need to prevent further use of existing replicas. 9872 */ 9873 static void 9874 segvn_inval_trcache(vnode_t *vp) 9875 { 9876 ulong_t hash = SVNTR_HASH_FUNC(vp); 9877 svntr_t *svntrp; 9878 9879 ASSERT(vp->v_flag & VVMEXEC); 9880 9881 if (svntr_hashtab == NULL) { 9882 return; 9883 } 9884 9885 mutex_enter(&svntr_hashtab[hash].tr_lock); 9886 svntrp = svntr_hashtab[hash].tr_head; 9887 for (; svntrp != NULL; svntrp = svntrp->tr_next) { 9888 ASSERT(svntrp->tr_refcnt != 0); 9889 if (svntrp->tr_vp == vp && svntrp->tr_valid) { 9890 svntrp->tr_valid = 0; 9891 } 9892 } 9893 mutex_exit(&svntr_hashtab[hash].tr_lock); 9894 } 9895 9896 static void 9897 segvn_trasync_thread(void) 9898 { 9899 callb_cpr_t cpr_info; 9900 kmutex_t cpr_lock; /* just for CPR stuff */ 9901 9902 mutex_init(&cpr_lock, NULL, MUTEX_DEFAULT, NULL); 9903 9904 CALLB_CPR_INIT(&cpr_info, &cpr_lock, 9905 callb_generic_cpr, "segvn_async"); 9906 9907 if (segvn_update_textrepl_interval == 0) { 9908 segvn_update_textrepl_interval = segvn_update_tr_time * hz; 9909 } else { 9910 segvn_update_textrepl_interval *= hz; 9911 } 9912 (void) timeout(segvn_trupdate_wakeup, NULL, 9913 segvn_update_textrepl_interval); 9914 9915 for (;;) { 9916 mutex_enter(&cpr_lock); 9917 CALLB_CPR_SAFE_BEGIN(&cpr_info); 9918 mutex_exit(&cpr_lock); 9919 sema_p(&segvn_trasync_sem); 9920 mutex_enter(&cpr_lock); 9921 CALLB_CPR_SAFE_END(&cpr_info, &cpr_lock); 9922 mutex_exit(&cpr_lock); 9923 segvn_trupdate(); 9924 } 9925 } 9926 9927 static uint64_t segvn_lgrp_trthr_migrs_snpsht = 0; 9928 9929 static void 9930 segvn_trupdate_wakeup(void *dummy) 9931 { 9932 uint64_t cur_lgrp_trthr_migrs = lgrp_get_trthr_migrations(); 9933 9934 if (cur_lgrp_trthr_migrs != segvn_lgrp_trthr_migrs_snpsht) { 9935 segvn_lgrp_trthr_migrs_snpsht = cur_lgrp_trthr_migrs; 9936 sema_v(&segvn_trasync_sem); 9937 } 9938 9939 if (!segvn_disable_textrepl_update && 9940 segvn_update_textrepl_interval != 0) { 9941 (void) timeout(segvn_trupdate_wakeup, dummy, 9942 segvn_update_textrepl_interval); 9943 } 9944 } 9945 9946 static void 9947 segvn_trupdate(void) 9948 { 9949 ulong_t hash; 9950 svntr_t *svntrp; 9951 segvn_data_t *svd; 9952 9953 ASSERT(svntr_hashtab != NULL); 9954 9955 for (hash = 0; hash < svntr_hashtab_sz; hash++) { 9956 mutex_enter(&svntr_hashtab[hash].tr_lock); 9957 svntrp = svntr_hashtab[hash].tr_head; 9958 for (; svntrp != NULL; svntrp = svntrp->tr_next) { 9959 ASSERT(svntrp->tr_refcnt != 0); 9960 svd = svntrp->tr_svnhead; 9961 for (; svd != NULL; svd = svd->svn_trnext) { 9962 segvn_trupdate_seg(svd->seg, svd, svntrp, 9963 hash); 9964 } 9965 } 9966 mutex_exit(&svntr_hashtab[hash].tr_lock); 9967 } 9968 } 9969 9970 static void 9971 segvn_trupdate_seg(struct seg *seg, 9972 segvn_data_t *svd, 9973 svntr_t *svntrp, 9974 ulong_t hash) 9975 { 9976 proc_t *p; 9977 lgrp_id_t lgrp_id; 9978 struct as *as; 9979 size_t size; 9980 struct anon_map *amp; 9981 9982 ASSERT(svd->vp != NULL); 9983 ASSERT(svd->vp == svntrp->tr_vp); 9984 ASSERT(svd->offset == svntrp->tr_off); 9985 ASSERT(svd->offset + seg->s_size == svntrp->tr_eoff); 9986 ASSERT(seg != NULL); 9987 ASSERT(svd->seg == seg); 9988 ASSERT(seg->s_data == (void *)svd); 9989 ASSERT(seg->s_szc == svntrp->tr_szc); 9990 ASSERT(svd->tr_state == SEGVN_TR_ON); 9991 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie)); 9992 ASSERT(svd->amp != NULL); 9993 ASSERT(svd->tr_policy_info.mem_policy == LGRP_MEM_POLICY_NEXT_SEG); 9994 ASSERT(svd->tr_policy_info.mem_lgrpid != LGRP_NONE); 9995 ASSERT(svd->tr_policy_info.mem_lgrpid < NLGRPS_MAX); 9996 ASSERT(svntrp->tr_amp[svd->tr_policy_info.mem_lgrpid] == svd->amp); 9997 ASSERT(svntrp->tr_refcnt != 0); 9998 ASSERT(mutex_owned(&svntr_hashtab[hash].tr_lock)); 9999 10000 as = seg->s_as; 10001 ASSERT(as != NULL && as != &kas); 10002 p = as->a_proc; 10003 ASSERT(p != NULL); 10004 ASSERT(p->p_tr_lgrpid != LGRP_NONE); 10005 lgrp_id = p->p_t1_lgrpid; 10006 if (lgrp_id == LGRP_NONE) { 10007 return; 10008 } 10009 ASSERT(lgrp_id < NLGRPS_MAX); 10010 if (svd->tr_policy_info.mem_lgrpid == lgrp_id) { 10011 return; 10012 } 10013 10014 /* 10015 * Use tryenter locking since we are locking as/seg and svntr hash 10016 * lock in reverse from syncrounous thread order. 10017 */ 10018 if (!AS_LOCK_TRYENTER(as, &as->a_lock, RW_READER)) { 10019 SEGVN_TR_ADDSTAT(nolock); 10020 if (segvn_lgrp_trthr_migrs_snpsht) { 10021 segvn_lgrp_trthr_migrs_snpsht = 0; 10022 } 10023 return; 10024 } 10025 if (!SEGVN_LOCK_TRYENTER(seg->s_as, &svd->lock, RW_WRITER)) { 10026 AS_LOCK_EXIT(as, &as->a_lock); 10027 SEGVN_TR_ADDSTAT(nolock); 10028 if (segvn_lgrp_trthr_migrs_snpsht) { 10029 segvn_lgrp_trthr_migrs_snpsht = 0; 10030 } 10031 return; 10032 } 10033 size = seg->s_size; 10034 if (svntrp->tr_amp[lgrp_id] == NULL) { 10035 size_t trmem = atomic_add_long_nv(&segvn_textrepl_bytes, size); 10036 if (trmem > segvn_textrepl_max_bytes) { 10037 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 10038 AS_LOCK_EXIT(as, &as->a_lock); 10039 atomic_add_long(&segvn_textrepl_bytes, -size); 10040 SEGVN_TR_ADDSTAT(normem); 10041 return; 10042 } 10043 if (anon_try_resv_zone(size, NULL) == 0) { 10044 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 10045 AS_LOCK_EXIT(as, &as->a_lock); 10046 atomic_add_long(&segvn_textrepl_bytes, -size); 10047 SEGVN_TR_ADDSTAT(noanon); 10048 return; 10049 } 10050 amp = anonmap_alloc(size, size, KM_NOSLEEP); 10051 if (amp == NULL) { 10052 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 10053 AS_LOCK_EXIT(as, &as->a_lock); 10054 atomic_add_long(&segvn_textrepl_bytes, -size); 10055 anon_unresv_zone(size, NULL); 10056 SEGVN_TR_ADDSTAT(nokmem); 10057 return; 10058 } 10059 ASSERT(amp->refcnt == 1); 10060 amp->a_szc = seg->s_szc; 10061 svntrp->tr_amp[lgrp_id] = amp; 10062 } 10063 /* 10064 * We don't need to drop the bucket lock but here we give other 10065 * threads a chance. svntr and svd can't be unlinked as long as 10066 * segment lock is held as a writer and AS held as well. After we 10067 * retake bucket lock we'll continue from where we left. We'll be able 10068 * to reach the end of either list since new entries are always added 10069 * to the beginning of the lists. 10070 */ 10071 mutex_exit(&svntr_hashtab[hash].tr_lock); 10072 hat_unload_callback(as->a_hat, seg->s_base, size, 0, NULL); 10073 mutex_enter(&svntr_hashtab[hash].tr_lock); 10074 10075 ASSERT(svd->tr_state == SEGVN_TR_ON); 10076 ASSERT(svd->amp != NULL); 10077 ASSERT(svd->tr_policy_info.mem_policy == LGRP_MEM_POLICY_NEXT_SEG); 10078 ASSERT(svd->tr_policy_info.mem_lgrpid != lgrp_id); 10079 ASSERT(svd->amp != svntrp->tr_amp[lgrp_id]); 10080 10081 svd->tr_policy_info.mem_lgrpid = lgrp_id; 10082 svd->amp = svntrp->tr_amp[lgrp_id]; 10083 p->p_tr_lgrpid = NLGRPS_MAX; 10084 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 10085 AS_LOCK_EXIT(as, &as->a_lock); 10086 10087 ASSERT(svntrp->tr_refcnt != 0); 10088 ASSERT(svd->vp == svntrp->tr_vp); 10089 ASSERT(svd->tr_policy_info.mem_lgrpid == lgrp_id); 10090 ASSERT(svd->amp != NULL && svd->amp == svntrp->tr_amp[lgrp_id]); 10091 ASSERT(svd->seg == seg); 10092 ASSERT(svd->tr_state == SEGVN_TR_ON); 10093 10094 SEGVN_TR_ADDSTAT(asyncrepl); 10095 } 10096